<?xml version="1.0" encoding="UTF-8"?><!-- generator="podbean/5.5" -->
<rss version="2.0"
     xmlns:content="http://purl.org/rss/1.0/modules/content/"
     xmlns:wfw="http://wellformedweb.org/CommentAPI/"
     xmlns:dc="http://purl.org/dc/elements/1.1/"
     xmlns:atom="http://www.w3.org/2005/Atom"
     xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"
     xmlns:googleplay="http://www.google.com/schemas/play-podcasts/1.0"
     xmlns:spotify="http://www.spotify.com/ns/rss"
     xmlns:podcast="https://podcastindex.org/namespace/1.0"
    xmlns:media="http://search.yahoo.com/mrss/">

<channel>
    <title>The Radical AI Podcast</title>
    <atom:link href="https://feed.podbean.com/radicalai/feed.xml" rel="self" type="application/rss+xml"/>
    <link>https://podcasts.apple.com/us/podcast/the-radical-ai-podcast/id1505229145</link>
    <description>Radical people. Radical ideas. Radical Stories. Welcome to the future of Artificial Intelligence Ethics. Welcome to the Radical AI Podcast.</description>
    <pubDate>Wed, 09 Aug 2023 00:05:00 -0600</pubDate>
    <generator>https://podbean.com/?v=5.5</generator>
    <language>en</language>
        <copyright>Copyright 2020 All rights reserved.</copyright>
    <category>Technology</category>
    <ttl>1440</ttl>
    <itunes:type>episodic</itunes:type>
          <itunes:summary>Radical AI is a podcast centering marginalized or otherwise radical voices in industry and the academy for dialogue, collaboration, and debate regarding the field of Artificial Intelligence Ethics and the relationship between the humanities and machine learning.</itunes:summary>
        <itunes:author>Radical AI</itunes:author>
<itunes:category text="Technology" />
    <itunes:owner>
        <itunes:name>Radical AI</itunes:name>
            </itunes:owner>
    	<itunes:block>No</itunes:block>
	<itunes:explicit>false</itunes:explicit>
    <itunes:image href="https://pbcdn1.podbean.com/imglogo/image-logo/7651344/Webpnet-resizeimage_4__b8oyw.jpg" />
    
    <item>
        <title>Stay Radical: A Final Goodbye from Dylan and Jess</title>
        <itunes:title>Stay Radical: A Final Goodbye from Dylan and Jess</itunes:title>
        <link>https://radicalai.podbean.com/e/stay-radical-a-final-goodbye-from-dylan-and-jess/</link>
                    <comments>https://radicalai.podbean.com/e/stay-radical-a-final-goodbye-from-dylan-and-jess/#comments</comments>        <pubDate>Wed, 09 Aug 2023 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/34017b98-c405-3a80-847f-f430e99e07d5</guid>
                                    <description><![CDATA[<p>The Radical AI Podcast has unfortunately reached its end. In this episode, Dylan and Jess say goodbye to the podcast and thank you listeners for your unwavering support throughout the years. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>The Radical AI Podcast has unfortunately reached its end. In this episode, Dylan and Jess say goodbye to the podcast and thank you listeners for your unwavering support throughout the years. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ry4err/Goodbye_mixdown.mp3" length="31183892" type="audio/mpeg"/>
        <itunes:summary><![CDATA[The Radical AI Podcast has unfortunately reached its end. In this episode, Dylan and Jess say goodbye to the podcast and thank you listeners for your unwavering support throughout the years. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1298</itunes:duration>
                <itunes:episode>90</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Twitter vs. Mastodon with Johnathan Flowers</title>
        <itunes:title>Twitter vs. Mastodon with Johnathan Flowers</itunes:title>
        <link>https://radicalai.podbean.com/e/twitter-vs-mastodon-with-johnathan-flowers/</link>
                    <comments>https://radicalai.podbean.com/e/twitter-vs-mastodon-with-johnathan-flowers/#comments</comments>        <pubDate>Wed, 26 Apr 2023 00:10:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/f3ff01ca-220a-3d88-a6e7-6d45a8b70c2d</guid>
                                    <description><![CDATA[<p>In this episode, we interview Dr. Jonathan Flowers about Twitter vs. Mastodon 101, the power dynamics of the fediverse, and potential paths forward in our digital lives.</p>
<p> </p>
<p>Johnathan Flowers is an Assistant Professor of Philosophy at California State University, Northridge. His research areas include African American intellectual history, Japanese Aesthetics, American Pragmatism, Philosophy of Disability, and Philosophy of Technology. Johnathan also works in the area of Science and Technology Studies, where he applies insights from American Pragmatism, Philosophy of Race, and Disability Studies to current issues in human/computer interaction, artificial intelligence and machine learning. </p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this episode, we interview Dr. Jonathan Flowers about Twitter vs. Mastodon 101, the power dynamics of the fediverse, and potential paths forward in our digital lives.</p>
<p> </p>
<p>Johnathan Flowers is an Assistant Professor of Philosophy at California State University, Northridge. His research areas include African American intellectual history, Japanese Aesthetics, American Pragmatism, Philosophy of Disability, and Philosophy of Technology. Johnathan also works in the area of Science and Technology Studies, where he applies insights from American Pragmatism, Philosophy of Race, and Disability Studies to current issues in human/computer interaction, artificial intelligence and machine learning. </p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/g94qav/Johnathan_Flowers_mixdown86ber.mp3" length="103482844" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this episode, we interview Dr. Jonathan Flowers about Twitter vs. Mastodon 101, the power dynamics of the fediverse, and potential paths forward in our digital lives.
 
Johnathan Flowers is an Assistant Professor of Philosophy at California State University, Northridge. His research areas include African American intellectual history, Japanese Aesthetics, American Pragmatism, Philosophy of Disability, and Philosophy of Technology. Johnathan also works in the area of Science and Technology Studies, where he applies insights from American Pragmatism, Philosophy of Race, and Disability Studies to current issues in human/computer interaction, artificial intelligence and machine learning. 
 
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>4311</itunes:duration>
                <itunes:episode>89</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>More than a Glitch, Technochauvanism, and Algorithmic Accountability with Meredith Broussard</title>
        <itunes:title>More than a Glitch, Technochauvanism, and Algorithmic Accountability with Meredith Broussard</itunes:title>
        <link>https://radicalai.podbean.com/e/more-than-a-glitch-confronting-race-gender-and-ability-bias-in-tech-with-meredith-broussard/</link>
                    <comments>https://radicalai.podbean.com/e/more-than-a-glitch-confronting-race-gender-and-ability-bias-in-tech-with-meredith-broussard/#comments</comments>        <pubDate>Wed, 22 Mar 2023 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/8e6a6135-71c5-3f23-8830-8ba5588b0004</guid>
                                    <description><![CDATA[<p>In this episode, we discuss Meredith Broussard's influential new book, More than a Glitch: Confronting Race, Gender, and Ability Bias in Tech – published by MIT Press.</p>
<p>
 </p>
<p>Meredith is a data journalist, an associate professor at the Arthur L. Carter Journalism Institute of New York University, a research director at the NYU Alliance for Public Interest Technology, and the author of several books, including “More Than a Glitch” (which we cover in this episode) and “Artificial Unintelligence: How Computers Misunderstand the World.” Her academic research focuses on artificial intelligence in investigative reporting and ethical AI, with a particular interest in using data analysis for social good.</p>
<p> </p>
<p> </p>
<p>Full show notes for this episode, including the link to buy Meredith's new book, can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this episode, we discuss Meredith Broussard's influential new book, <em>More than a Glitch: Confronting Race, Gender, and Ability Bias in Tech</em> – published by MIT Press.</p>
<p><br>
 </p>
<p>Meredith is a data journalist, an associate professor at the Arthur L. Carter Journalism Institute of New York University, a research director at the NYU Alliance for Public Interest Technology, and the author of several books, including “More Than a Glitch” (which we cover in this episode) and “Artificial Unintelligence: How Computers Misunderstand the World.” Her academic research focuses on artificial intelligence in investigative reporting and ethical AI, with a particular interest in using data analysis for social good.</p>
<p> </p>
<p> </p>
<p>Full show notes for this episode, including the link to buy Meredith's new book, can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/bp9sr9/Broussard_mixdown.mp3" length="92832198" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this episode, we discuss Meredith Broussard's influential new book, More than a Glitch: Confronting Race, Gender, and Ability Bias in Tech – published by MIT Press.
 
Meredith is a data journalist, an associate professor at the Arthur L. Carter Journalism Institute of New York University, a research director at the NYU Alliance for Public Interest Technology, and the author of several books, including “More Than a Glitch” (which we cover in this episode) and “Artificial Unintelligence: How Computers Misunderstand the World.” Her academic research focuses on artificial intelligence in investigative reporting and ethical AI, with a particular interest in using data analysis for social good.
 
 
Full show notes for this episode, including the link to buy Meredith's new book, can be found at Radicalai.org.]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3867</itunes:duration>
                <itunes:episode>88</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>The Limitations of ChatGPT with Emily M. Bender and Casey Fiesler</title>
        <itunes:title>The Limitations of ChatGPT with Emily M. Bender and Casey Fiesler</itunes:title>
        <link>https://radicalai.podbean.com/e/more-auto-complete-than-search-engine-correcting-the-ethical-record-of-chatgpt-with-emily-m-bender-and-casey-fiesler/</link>
                    <comments>https://radicalai.podbean.com/e/more-auto-complete-than-search-engine-correcting-the-ethical-record-of-chatgpt-with-emily-m-bender-and-casey-fiesler/#comments</comments>        <pubDate>Wed, 01 Mar 2023 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/c7a2f57b-55da-3045-b1e6-e627b90676ac</guid>
                                    <description><![CDATA[<p>In this episode, we unpack: is ChatGPT Ethical? In what ways? </p>
<p> </p>
<p>We interview Dr. Emily M. Bender and Dr. Casey Fiesler about the limitations of ChatGPT – we cover ethical considerations, bias and discrimination, and the importance of algorithmic literacy in the face of chatbots.

</p>
<p> </p>
<p>Emily M. Bender is a Professor of Linguistics and an Adjunct Professor in the School of Computer Science and the Information School at the University of Washington, where she has been on the faculty since 2003. Her research interests include multilingual grammar engineering, computational semantics, and the societal impacts of language technology. Emily was also recently nominated as a Fellow of the American Association for the Advancement of Science (AAAS).</p>
<p> </p>
<p> </p>
<p>Casey Fiesler is an associate professor in Information Science at University of Colorado Boulder. She researches and teaches in the areas of technology ethics, internet law and policy, and online communities. Also a public scholar, she is a frequent commentator and speaker on topics of technology ethics and policy, and her research has been covered everywhere from The New York Times to Teen Vogue.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this episode, we unpack: is ChatGPT Ethical? In what ways? </p>
<p> </p>
<p>We interview Dr. Emily M. Bender and Dr. Casey Fiesler about the limitations of ChatGPT – we cover ethical considerations, bias and discrimination, and the importance of algorithmic literacy in the face of chatbots.<br>
<br>
</p>
<p> </p>
<p>Emily M. Bender is a Professor of Linguistics and an Adjunct Professor in the School of Computer Science and the Information School at the University of Washington, where she has been on the faculty since 2003. Her research interests include multilingual grammar engineering, computational semantics, and the societal impacts of language technology. Emily was also recently nominated as a Fellow of the American Association for the Advancement of Science (AAAS).</p>
<p> </p>
<p> </p>
<p>Casey Fiesler is an associate professor in Information Science at University of Colorado Boulder. She researches and teaches in the areas of technology ethics, internet law and policy, and online communities. Also a public scholar, she is a frequent commentator and speaker on topics of technology ethics and policy, and her research has been covered everywhere from The New York Times to Teen Vogue.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/8s5gvt/Bender-Fiesler_CGPT_mixdown830qi.mp3" length="89349308" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this episode, we unpack: is ChatGPT Ethical? In what ways? 
 
We interview Dr. Emily M. Bender and Dr. Casey Fiesler about the limitations of ChatGPT – we cover ethical considerations, bias and discrimination, and the importance of algorithmic literacy in the face of chatbots.
 
Emily M. Bender is a Professor of Linguistics and an Adjunct Professor in the School of Computer Science and the Information School at the University of Washington, where she has been on the faculty since 2003. Her research interests include multilingual grammar engineering, computational semantics, and the societal impacts of language technology. Emily was also recently nominated as a Fellow of the American Association for the Advancement of Science (AAAS).
 
 
Casey Fiesler is an associate professor in Information Science at University of Colorado Boulder. She researches and teaches in the areas of technology ethics, internet law and policy, and online communities. Also a public scholar, she is a frequent commentator and speaker on topics of technology ethics and policy, and her research has been covered everywhere from The New York Times to Teen Vogue.
 
Full show notes for this episode can be found at Radicalai.org.]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3722</itunes:duration>
                <itunes:episode>87</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>ChatGPT: What is it? How does it work? Should we be excited? Or scared? with Deep Dhillon</title>
        <itunes:title>ChatGPT: What is it? How does it work? Should we be excited? Or scared? with Deep Dhillon</itunes:title>
        <link>https://radicalai.podbean.com/e/chatgpt-what-is-it-how-does-it-work-should-we-be-excited-or-scared-with-deep-dhillon/</link>
                    <comments>https://radicalai.podbean.com/e/chatgpt-what-is-it-how-does-it-work-should-we-be-excited-or-scared-with-deep-dhillon/#comments</comments>        <pubDate>Wed, 25 Jan 2023 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/393371fa-adcf-3b56-ab21-07c4d0abb06f</guid>
                                    <description><![CDATA[<p>ChatGPT – what is it? How does it work? Should we be excited? Or scared?</p>
<p>The recent natural language chatbot has been getting ALL the hype. In this episode we interview Deep Dhillon about the ins and outs of ChatGPT!</p>
<p>Deep is the co-founder and leader of technology development at Xyonix, where his mission is to find novel value in clients' data. Deep has experience as a technology executive; conceptualizing, architecting and deploying advanced applications, leveraging machine learning, natural language processing and data science to build smarter businesses and more powerful products.</p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>ChatGPT – what is it? How does it work? Should we be excited? Or scared?</p>
<p>The recent natural language chatbot has been getting ALL the hype. In this episode we interview Deep Dhillon about the ins and outs of ChatGPT!</p>
<p>Deep is the co-founder and leader of technology development at Xyonix, where his mission is to find novel value in clients' data. Deep has experience as a technology executive; conceptualizing, architecting and deploying advanced applications, leveraging machine learning, natural language processing and data science to build smarter businesses and more powerful products.</p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/c4q9vd/Deep_mixdown.mp3" length="82862537" type="audio/mpeg"/>
        <itunes:summary><![CDATA[ChatGPT – what is it? How does it work? Should we be excited? Or scared?
The recent natural language chatbot has been getting ALL the hype. In this episode we interview Deep Dhillon about the ins and outs of ChatGPT!
Deep is the co-founder and leader of technology development at Xyonix, where his mission is to find novel value in clients' data. Deep has experience as a technology executive; conceptualizing, architecting and deploying advanced applications, leveraging machine learning, natural language processing and data science to build smarter businesses and more powerful products.
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3452</itunes:duration>
                <itunes:episode>86</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Sounds, Sights, Smells, and Senses: Let’s Talk Data with Jordan Wirfs-Brock</title>
        <itunes:title>Sounds, Sights, Smells, and Senses: Let’s Talk Data with Jordan Wirfs-Brock</itunes:title>
        <link>https://radicalai.podbean.com/e/hearing-seeing-smelling-tasting-and-touching-data-with-jordan-wirfs-brock/</link>
                    <comments>https://radicalai.podbean.com/e/hearing-seeing-smelling-tasting-and-touching-data-with-jordan-wirfs-brock/#comments</comments>        <pubDate>Wed, 30 Nov 2022 00:31:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/10c78bd6-25f1-3af7-a21e-2d18649aeadc</guid>
                                    <description><![CDATA[<p>What can our senses teach us about data? What can data teach us about our senses? </p>
<p> </p>
<p>In this episode, we interview Jordan Wirfs-Brock about how we can explore data through all of our senses, especially through the sense of sound.</p>
<p> </p>
<p>Jordan Wirfs-Brock recently completed a PhD in Information Science at the University of Colorado Boulder and will be joining Whitman College as an assistant professor in Computer Science in January. Her research explores how to bring data into our everyday lives as a creative material by developing data representations that are participatory and engage all of our senses, especially sound. In the past, she has worked as a data journalist covering the energy industry and as a civic technologist helping non-profit organizations understand their communities through data.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What can our senses teach us about data? What can data teach us about our senses? </p>
<p> </p>
<p>In this episode, we interview Jordan Wirfs-Brock about how we can explore data through all of our senses, especially through the sense of sound.</p>
<p> </p>
<p>Jordan Wirfs-Brock recently completed a PhD in Information Science at the University of Colorado Boulder and will be joining Whitman College as an assistant professor in Computer Science in January. Her research explores how to bring data into our everyday lives as a creative material by developing data representations that are participatory and engage all of our senses, especially sound. In the past, she has worked as a data journalist covering the energy industry and as a civic technologist helping non-profit organizations understand their communities through data.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ymuvrt/JWB_mixdown.mp3" length="75679428" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What can our senses teach us about data? What can data teach us about our senses? 
 
In this episode, we interview Jordan Wirfs-Brock about how we can explore data through all of our senses, especially through the sense of sound.
 
Jordan Wirfs-Brock recently completed a PhD in Information Science at the University of Colorado Boulder and will be joining Whitman College as an assistant professor in Computer Science in January. Her research explores how to bring data into our everyday lives as a creative material by developing data representations that are participatory and engage all of our senses, especially sound. In the past, she has worked as a data journalist covering the energy industry and as a civic technologist helping non-profit organizations understand their communities through data.
 
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3152</itunes:duration>
                <itunes:episode>85</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>How to Stay Safe Online with Seyi Akiwowo</title>
        <itunes:title>How to Stay Safe Online with Seyi Akiwowo</itunes:title>
        <link>https://radicalai.podbean.com/e/how-to-stay-safe-online-with-seyi-akiwowo/</link>
                    <comments>https://radicalai.podbean.com/e/how-to-stay-safe-online-with-seyi-akiwowo/#comments</comments>        <pubDate>Wed, 26 Oct 2022 00:15:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/2bff302c-22f7-3594-b762-9e7d657d6d88</guid>
                                    <description><![CDATA[<p>How can technology be designed to fight online abuse and harassment? What is the difference between cancel culture and appropriate accountability? How can you stay safe online?</p>
<p> </p>
<p> </p>
<p>In this episode we interview Seyi Akiwowo to discuss her newly released book: How to Stay Safe Online: A digital self-care toolkit for developing resilience and allyship.</p>
<p> </p>
<p> </p>
<p>Seyi is the founder and CEO of Glitch, a charity that’s been on a mission to end online abuse by making digital citizens of us all since 2017.Seyi is also an author, a consultant and writer within the political and tech space, and a former TED speaker.</p>
<p>

</p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How can technology be designed to fight online abuse and harassment? What is the difference between cancel culture and appropriate accountability? How can you stay safe online?</p>
<p> </p>
<p> </p>
<p>In this episode we interview Seyi Akiwowo to discuss her newly released book: <em>How to Stay Safe Online: A digital self-care toolkit for developing resilience and allyship.</em></p>
<p> </p>
<p> </p>
<p>Seyi is the founder and CEO of Glitch, a charity that’s been on a mission to end online abuse by making digital citizens of us all since 2017.Seyi is also an author, a consultant and writer within the political and tech space, and a former TED speaker.</p>
<p><br>
<br>
</p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/5sad9y/Seyi_mixdown.mp3" length="73748849" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can technology be designed to fight online abuse and harassment? What is the difference between cancel culture and appropriate accountability? How can you stay safe online?
 
 
In this episode we interview Seyi Akiwowo to discuss her newly released book: How to Stay Safe Online: A digital self-care toolkit for developing resilience and allyship.
 
 
Seyi is the founder and CEO of Glitch, a charity that’s been on a mission to end online abuse by making digital citizens of us all since 2017.Seyi is also an author, a consultant and writer within the political and tech space, and a former TED speaker.

Full show notes for this episode can be found at Radicalai.org.]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3072</itunes:duration>
                <itunes:episode>84</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Data Privacy and Women’s Rights with Rebecca Finlay</title>
        <itunes:title>Data Privacy and Women’s Rights with Rebecca Finlay</itunes:title>
        <link>https://radicalai.podbean.com/e/data-privacy-and-women-s-health-with-rebecca-finlay/</link>
                    <comments>https://radicalai.podbean.com/e/data-privacy-and-women-s-health-with-rebecca-finlay/#comments</comments>        <pubDate>Wed, 28 Sep 2022 00:09:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/21d008cd-9af0-38bc-a6f3-f6032eeeab20</guid>
                                    <description><![CDATA[<p>What is the reality of data privacy after the overruling of Roe v. Wade? </p>
<p> </p>
<p>In this episode, we interview Rebecca Finlay about protecting user data privacy and human rights, following the US Supreme Court ruling of Dobbs v. Jackson Women’s Health Organization. </p>
<p> </p>
<p>Rebecca Finlay is the CEO of the non-profit, Partnership on AI overseeing the organization’s mission and strategy. In this role, Rebecca ensures that the Partnership on AI and their global community of Partners work together so that developments in AI advance positive outcomes for people and society.</p>
<p> </p>
<p>
Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the reality of data privacy after the overruling of Roe v. Wade? </p>
<p> </p>
<p>In this episode, we interview Rebecca Finlay about protecting user data privacy and human rights, following the US Supreme Court ruling of Dobbs v. Jackson Women’s Health Organization. </p>
<p> </p>
<p>Rebecca Finlay is the CEO of the non-profit, Partnership on AI overseeing the organization’s mission and strategy. In this role, Rebecca ensures that the Partnership on AI and their global community of Partners work together so that developments in AI advance positive outcomes for people and society.</p>
<p> </p>
<p><br>
Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/jmji2t/Rebecca_PAI1_mixdownb74cw.mp3" length="64471079" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the reality of data privacy after the overruling of Roe v. Wade? 
 
In this episode, we interview Rebecca Finlay about protecting user data privacy and human rights, following the US Supreme Court ruling of Dobbs v. Jackson Women’s Health Organization. 
 
Rebecca Finlay is the CEO of the non-profit, Partnership on AI overseeing the organization’s mission and strategy. In this role, Rebecca ensures that the Partnership on AI and their global community of Partners work together so that developments in AI advance positive outcomes for people and society.
 
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2685</itunes:duration>
                <itunes:episode>83</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Digital Lethargy with Tung-Hui Hu</title>
        <itunes:title>Digital Lethargy with Tung-Hui Hu</itunes:title>
        <link>https://radicalai.podbean.com/e/digital-lethargy-with-tung-hui-hu/</link>
                    <comments>https://radicalai.podbean.com/e/digital-lethargy-with-tung-hui-hu/#comments</comments>        <pubDate>Wed, 31 Aug 2022 00:20:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/0d5d0ce1-eb28-3dfd-98a1-a474b6c80be2</guid>
                                    <description><![CDATA[<p>What is Digital Lethargy?  How can we adapt to an age of disconnection? How can art act as a force of resistance? </p>
<p> </p>
<p>In this episode we interview Tung-Hui Hu about digital exhaustion in the modern day, and his new upcoming book from MIT Press: “Digital Lethargy: Dispatches from an Age of Disconnection.”</p>
<p> </p>
<p>Tung-Hui is an associate professor of English at the University of Michigan and the author of A Prehistory of the Cloud from MIT Press. He is on the advisory board of the McLuhan Centre for Culture and Technology and is also a poet.</p>
<p> </p>
<p>
Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Digital Lethargy?  How can we adapt to an age of disconnection? How can art act as a force of resistance? </p>
<p> </p>
<p>In this episode we interview Tung-Hui Hu about digital exhaustion in the modern day, and his new upcoming book from MIT Press: “Digital Lethargy: Dispatches from an Age of Disconnection.”</p>
<p> </p>
<p>Tung-Hui is an associate professor of English at the University of Michigan and the author of <em>A Prehistory of the Cloud</em> from MIT Press. He is on the advisory board of the McLuhan Centre for Culture and Technology and is also a poet.</p>
<p> </p>
<p><br>
Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/cj4px2/Huai_mixdown.mp3" length="86169421" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Digital Lethargy?  How can we adapt to an age of disconnection? How can art act as a force of resistance? 
 
In this episode we interview Tung-Hui Hu about digital exhaustion in the modern day, and his new upcoming book from MIT Press: “Digital Lethargy: Dispatches from an Age of Disconnection.”
 
Tung-Hui is an associate professor of English at the University of Michigan and the author of A Prehistory of the Cloud from MIT Press. He is on the advisory board of the McLuhan Centre for Culture and Technology and is also a poet.
 
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3590</itunes:duration>
                <itunes:episode>82</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Should the Government use AI? with Shion Guha</title>
        <itunes:title>Should the Government use AI? with Shion Guha</itunes:title>
        <link>https://radicalai.podbean.com/e/should-the-government-use-ai-with-shion-guha/</link>
                    <comments>https://radicalai.podbean.com/e/should-the-government-use-ai-with-shion-guha/#comments</comments>        <pubDate>Wed, 27 Jul 2022 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/04f9e6d8-6b5c-33d5-8605-59e0936cb2ac</guid>
                                    <description><![CDATA[<p>How does the government use algorithms? How do algorithms impact social services, policing, and other social services? And where does Silicon Valley fit in?</p>
<p> </p>
<p>In this episode we interview Shion Guha about how governments adopt algorithms to enforce public policy.</p>
<p> </p>
<p>Shion is an Assistant Professor in the Faculty of Information at University of Toronto. His research fits into the field of Human-Centered Data Science, which he helped develop. Shion explores the intersection between AI and public policy by researching algorithmic decision-making in public services such as criminal justice, child welfare, and healthcare.</p>
<p>
Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How does the government use algorithms? How do algorithms impact social services, policing, and other social services? And where does Silicon Valley fit in?</p>
<p> </p>
<p>In this episode we interview Shion Guha about how governments adopt algorithms to enforce public policy.</p>
<p> </p>
<p>Shion is an Assistant Professor in the Faculty of Information at University of Toronto. His research fits into the field of Human-Centered Data Science, which he helped develop. Shion explores the intersection between AI and public policy by researching algorithmic decision-making in public services such as criminal justice, child welfare, and healthcare.</p>
<p><br>
Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xh67ce/Shion_mixdown1.mp3" length="81549249" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How does the government use algorithms? How do algorithms impact social services, policing, and other social services? And where does Silicon Valley fit in?
 
In this episode we interview Shion Guha about how governments adopt algorithms to enforce public policy.
 
Shion is an Assistant Professor in the Faculty of Information at University of Toronto. His research fits into the field of Human-Centered Data Science, which he helped develop. Shion explores the intersection between AI and public policy by researching algorithmic decision-making in public services such as criminal justice, child welfare, and healthcare.
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3397</itunes:duration>
                <itunes:episode>81</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Envisioning a Decolonial Digital Mental Health with Sachin Pendse, Munmun De Choudhury, and Neha Kumar</title>
        <itunes:title>Envisioning a Decolonial Digital Mental Health with Sachin Pendse, Munmun De Choudhury, and Neha Kumar</itunes:title>
        <link>https://radicalai.podbean.com/e/envisioning-a-decolonial-digital-mental-health-with-sachin-pendse-munmun-de-choudhury-and-neha-kumar/</link>
                    <comments>https://radicalai.podbean.com/e/envisioning-a-decolonial-digital-mental-health-with-sachin-pendse-munmun-de-choudhury-and-neha-kumar/#comments</comments>        <pubDate>Wed, 29 Jun 2022 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/4d7cf6fd-88a0-3940-89d6-32757ba0932f</guid>
                                    <description><![CDATA[<p>In this episode we have a panel discussion about decolonial digital mental health with three leading experts on the topic: Sachin Pendse, Munmun De Choudhury, and Neha Kumar</p>
<p> </p>
<p>Sachin is a PhD student in Human-Centered Computing at Georgia Tech, researching the role that technology plays in addressing barriers that prevent people from receiving consistent mental health care.</p>
<p> </p>
<p>Munmun is the Associate Professor in the <a href='http://www.ic.gatech.edu/'>School of Interactive Computing</a> at <a href='http://www.gatech.edu/'>Georgia Tech</a>. She founded and directs the <a href='http://socweb.cc.gatech.edu/'>Social Dynamics and Wellbeing Lab</a> that seeks to develop technologies for improving our mental well-being.</p>
<p> </p>
<p>Neha is an Associate Professor at Georgia Tech and leads the Technology and Design for Empowerment lab with a focus on the intersection of human-centered computing and global development. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this episode we have a panel discussion about decolonial digital mental health with three leading experts on the topic: Sachin Pendse, Munmun De Choudhury, and Neha Kumar</p>
<p> </p>
<p>Sachin is a PhD student in Human-Centered Computing at Georgia Tech, researching the role that technology plays in addressing barriers that prevent people from receiving consistent mental health care.</p>
<p> </p>
<p>Munmun is the Associate Professor in the <a href='http://www.ic.gatech.edu/'>School of Interactive Computing</a> at <a href='http://www.gatech.edu/'>Georgia Tech</a>. She founded and directs the <a href='http://socweb.cc.gatech.edu/'>Social Dynamics and Wellbeing Lab</a> that seeks to develop technologies for improving our mental well-being.</p>
<p> </p>
<p>Neha is an Associate Professor at Georgia Tech and leads the Technology and Design for Empowerment lab with a focus on the intersection of human-centered computing and global development. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xs6nyd/Sachin_et_al_mixdownavxl6.mp3" length="83839439" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this episode we have a panel discussion about decolonial digital mental health with three leading experts on the topic: Sachin Pendse, Munmun De Choudhury, and Neha Kumar
 
Sachin is a PhD student in Human-Centered Computing at Georgia Tech, researching the role that technology plays in addressing barriers that prevent people from receiving consistent mental health care.
 
Munmun is the Associate Professor in the School of Interactive Computing at Georgia Tech. She founded and directs the Social Dynamics and Wellbeing Lab that seeks to develop technologies for improving our mental well-being.
 
Neha is an Associate Professor at Georgia Tech and leads the Technology and Design for Empowerment lab with a focus on the intersection of human-centered computing and global development. 
 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3492</itunes:duration>
                <itunes:episode>80</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Visualizing Our Lives Through Data with Jaime Snyder</title>
        <itunes:title>Visualizing Our Lives Through Data with Jaime Snyder</itunes:title>
        <link>https://radicalai.podbean.com/e/visualizing-our-lives-in-data-with-jaime-snyder/</link>
                    <comments>https://radicalai.podbean.com/e/visualizing-our-lives-in-data-with-jaime-snyder/#comments</comments>        <pubDate>Wed, 25 May 2022 00:15:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/e99ca6c5-501d-320c-ba39-b97ba300e0e0</guid>
                                    <description><![CDATA[<p>How do we see ourselves in data? What is self-tracking and how can we design for visualizing the data of our bodies and mental health? How do we make visualized data more accessible? </p>
<p> </p>
<p>In this episode, we interview Jaime Snyder about the data visualization of COVID, mental health, and more. </p>
<p> </p>
<p>Jaime Snyder is an Associate Professor in the Information School at the University of Washington in Seattle. She leads the Visualization Studies Research Studio and is also an Adjunct Associate Professor in the UW Department of Human-Centered Design and Engineering. Snyder’s research draws on her background as an artist and information science scholar to explore the creation and use of visual representations of information, data, and knowledge in collaborative and coordinated contexts.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How do we see ourselves in data? What is self-tracking and how can we design for visualizing the data of our bodies and mental health? How do we make visualized data more accessible? </p>
<p> </p>
<p>In this episode, we interview Jaime Snyder about the data visualization of COVID, mental health, and more. </p>
<p> </p>
<p>Jaime Snyder is an Associate Professor in the Information School at the University of Washington in Seattle. She leads the Visualization Studies Research Studio and is also an Adjunct Associate Professor in the UW Department of Human-Centered Design and Engineering. Snyder’s research draws on her background as an artist and information science scholar to explore the creation and use of visual representations of information, data, and knowledge in collaborative and coordinated contexts.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/8hxsah/Jaime_Snyder_mixdownag9vt.mp3" length="85315808" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How do we see ourselves in data? What is self-tracking and how can we design for visualizing the data of our bodies and mental health? How do we make visualized data more accessible? 
 
In this episode, we interview Jaime Snyder about the data visualization of COVID, mental health, and more. 
 
Jaime Snyder is an Associate Professor in the Information School at the University of Washington in Seattle. She leads the Visualization Studies Research Studio and is also an Adjunct Associate Professor in the UW Department of Human-Centered Design and Engineering. Snyder’s research draws on her background as an artist and information science scholar to explore the creation and use of visual representations of information, data, and knowledge in collaborative and coordinated contexts.
 
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3554</itunes:duration>
                <itunes:episode>79</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Let’s Talk About Sex: Digital Pornography and LGBTQIA+ Censorship w/ Alex Monea</title>
        <itunes:title>Let’s Talk About Sex: Digital Pornography and LGBTQIA+ Censorship w/ Alex Monea</itunes:title>
        <link>https://radicalai.podbean.com/e/let-s-talk-about-sex-digital-pornography-and-lgbtqia-censorship-w-alex-monea/</link>
                    <comments>https://radicalai.podbean.com/e/let-s-talk-about-sex-digital-pornography-and-lgbtqia-censorship-w-alex-monea/#comments</comments>        <pubDate>Wed, 27 Apr 2022 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d7468564-20a8-3763-a3af-7d77cf812686</guid>
                                    <description><![CDATA[<p>What is the history of digital pornography? How do algorithms perpetuate LGBTQIA+ content censorship? What is the role that content moderation and corporate ownership plays in perpetuating misogyny and heteronormativity?</p>
<p> </p>
<p>In this episode, we interview Alex Monea about the history of digital pornography and LGBTQIA+ content censorship. We also discuss his new book published by MIT Press: "The Digital Closet: How the Internet Became Straight"</p>
<p> </p>
<p>Alexander Monea is Assistant Professor in the English Department and Cultural Studies PhD Program at George Mason University. He researches data ethics and the intersection between computation and marginalization.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the history of digital pornography? How do algorithms perpetuate LGBTQIA+ content censorship? What is the role that content moderation and corporate ownership plays in perpetuating misogyny and heteronormativity?</p>
<p> </p>
<p>In this episode, we interview Alex Monea about the history of digital pornography and LGBTQIA+ content censorship. We also discuss his new book published by MIT Press: "The Digital Closet: How the Internet Became Straight"</p>
<p> </p>
<p>Alexander Monea is Assistant Professor in the English Department and Cultural Studies PhD Program at George Mason University. He researches data ethics and the intersection between computation and marginalization.</p>
<p> </p>
<p>Full show notes for this episode can be found at <a href='http://radicalai.org/'>Radicalai.org</a>.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/fv6kre/Alex_Monet_mixdown69ihz.mp3" length="84717920" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the history of digital pornography? How do algorithms perpetuate LGBTQIA+ content censorship? What is the role that content moderation and corporate ownership plays in perpetuating misogyny and heteronormativity?
 
In this episode, we interview Alex Monea about the history of digital pornography and LGBTQIA+ content censorship. We also discuss his new book published by MIT Press: "The Digital Closet: How the Internet Became Straight"
 
Alexander Monea is Assistant Professor in the English Department and Cultural Studies PhD Program at George Mason University. He researches data ethics and the intersection between computation and marginalization.
 
Full show notes for this episode can be found at Radicalai.org.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3529</itunes:duration>
                <itunes:episode>78</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>New Year, New You: Welcome Back to the Radical AI Podcast</title>
        <itunes:title>New Year, New You: Welcome Back to the Radical AI Podcast</itunes:title>
        <link>https://radicalai.podbean.com/e/new-year-new-you-welcome-back-to-the-radical-ai-podcast/</link>
                    <comments>https://radicalai.podbean.com/e/new-year-new-you-welcome-back-to-the-radical-ai-podcast/#comments</comments>        <pubDate>Wed, 20 Apr 2022 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/0349bde8-1206-3945-9237-e63597837cec</guid>
                                    <description><![CDATA[<p>Curious about where we've been and where we're going? Listen to this minisode! Dylan and Jess discuss the podcast, the new season, and much more! </p>
<p>Full show notes for this episode can be found at <a href='https://www.radicalai.org/'>radicalai.org</a>. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Curious about where we've been and where we're going? Listen to this minisode! Dylan and Jess discuss the podcast, the new season, and much more! </p>
<p>Full show notes for this episode can be found at <a href='https://www.radicalai.org/'>radicalai.org</a>. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/8jy8gs/Minisode_Season_3_Launch9ppjb.mp3" length="37157531" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Curious about where we've been and where we're going? Listen to this minisode! Dylan and Jess discuss the podcast, the new season, and much more! 
Full show notes for this episode can be found at radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1547</itunes:duration>
                <itunes:episode>77</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #7: Why AI Registries are Critical for Metrics of Accountability with Sara Jordan and Anand Rao</title>
        <itunes:title>Measurementality #7: Why AI Registries are Critical for Metrics of Accountability with Sara Jordan and Anand Rao</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-7-why-ai-registries-are-critical-for-metrics-of-accountability-with-sara-jordan-and-anand-rao/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-7-why-ai-registries-are-critical-for-metrics-of-accountability-with-sara-jordan-and-anand-rao/#comments</comments>        <pubDate>Sun, 19 Dec 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/965107e0-0ab0-3f35-835c-dda322e905ed</guid>
                                    <description><![CDATA[<p class="sqsrte-large">In this 7th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by exploring how AI development can better engage with governance and privacy with Sara Jordan and Anand Rao</p>
<p class="sqsrte-large">Sara Jordan is Senior Researcher, Artificial Intelligence and Ethics at the Future of Privacy Forum</p>
<p class="sqsrte-large">Anand Rao is a Principal with PwC’s US Advisory practice</p>
]]></description>
                                                            <content:encoded><![CDATA[<p class="sqsrte-large">In this 7th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by exploring how AI development can better engage with governance and privacy with Sara Jordan and Anand Rao</p>
<p class="sqsrte-large">Sara Jordan is Senior Researcher, Artificial Intelligence and Ethics at the Future of Privacy Forum</p>
<p class="sqsrte-large">Anand Rao is a Principal with PwC’s US Advisory practice</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/c9a2xk/MMT_7_Draft_for_Review_mixdownbp1ht.mp3" length="75607227" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this 7th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by exploring how AI development can better engage with governance and privacy with Sara Jordan and Anand Rao
Sara Jordan is Senior Researcher, Artificial Intelligence and Ethics at the Future of Privacy Forum
Anand Rao is a Principal with PwC’s US Advisory practice]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3149</itunes:duration>
                <itunes:episode>76</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Decolonial AI 101 with Raziye Buse Çetin</title>
        <itunes:title>Decolonial AI 101 with Raziye Buse Çetin</itunes:title>
        <link>https://radicalai.podbean.com/e/decolonial-ai-101-with-raziye-buse-cetin/</link>
                    <comments>https://radicalai.podbean.com/e/decolonial-ai-101-with-raziye-buse-cetin/#comments</comments>        <pubDate>Wed, 08 Dec 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/a43f89f3-bde3-3675-8e28-3a5c2b2800f5</guid>
                                    <description><![CDATA[<p>What is Decolonial AI? How can we apply a postcolonial lens to AI design?</p>
<p>In this episode we interview Raziye Buse Çetin about Colonial, Decolonial, and Postcolonial AI -- and the Newly released Decolonial AI Manyfesto.</p>
<p>Buse is an AI policy and ethics researcher and consultant. Her work revolves around ethics, impact, and governance of AI systems. She combines her lived experience with her interest in postcolonial studies, intersectional feminism and science and technology studies (STS) to develop critical thinking about AI technologies and narratives around it. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Decolonial AI? How can we apply a postcolonial lens to AI design?</p>
<p>In this episode we interview Raziye Buse Çetin about Colonial, Decolonial, and Postcolonial AI -- and the Newly released Decolonial AI Manyfesto.</p>
<p>Buse is an AI policy and ethics researcher and consultant. Her work revolves around ethics, impact, and governance of AI systems. She combines her lived experience with her interest in postcolonial studies, intersectional feminism and science and technology studies (STS) to develop critical thinking about AI technologies and narratives around it. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/wsq3e6/Buse_mixdown.mp3" length="60341484" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Decolonial AI? How can we apply a postcolonial lens to AI design?
In this episode we interview Raziye Buse Çetin about Colonial, Decolonial, and Postcolonial AI -- and the Newly released Decolonial AI Manyfesto.
Buse is an AI policy and ethics researcher and consultant. Her work revolves around ethics, impact, and governance of AI systems. She combines her lived experience with her interest in postcolonial studies, intersectional feminism and science and technology studies (STS) to develop critical thinking about AI technologies and narratives around it. 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2513</itunes:duration>
                <itunes:episode>75</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Design Justice 101 with Sasha Costanza-Chock</title>
        <itunes:title>Design Justice 101 with Sasha Costanza-Chock</itunes:title>
        <link>https://radicalai.podbean.com/e/design-justice-101-with-sasha-costanza-chock/</link>
                    <comments>https://radicalai.podbean.com/e/design-justice-101-with-sasha-costanza-chock/#comments</comments>        <pubDate>Wed, 03 Nov 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/8258169f-2350-3fd5-a7a6-10ff2bf8cfd1</guid>
                                    <description><![CDATA[<p>What is Design Justice? How can we employ it to disrupt power systems supporting the matrix of domination?</p>
<p>In this episode, we interview Sasha Costanza-Chock about the 101 of Design Justice and how we can use it as a force for collective liberation.</p>
<p>Sasha Costanza-Chock is a researcher and designer who works to support community-led processes that build shared power, dismantle the matrix of domination, and advance ecological survival. Sasha is the Director of Research & Design at the Algorithmic Justice League and is the author of Design Justice: Community-Led Practices to Build the Worlds We Need.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Design Justice? How can we employ it to disrupt power systems supporting the matrix of domination?</p>
<p>In this episode, we interview Sasha Costanza-Chock about the 101 of Design Justice and how we can use it as a force for collective liberation.</p>
<p>Sasha Costanza-Chock is a researcher and designer who works to support community-led processes that build shared power, dismantle the matrix of domination, and advance ecological survival. Sasha is the Director of Research & Design at the Algorithmic Justice League and is the author of Design Justice: Community-Led Practices to Build the Worlds We Need.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/brg2qp/Sasha_mixdown.mp3" length="79435401" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Design Justice? How can we employ it to disrupt power systems supporting the matrix of domination?
In this episode, we interview Sasha Costanza-Chock about the 101 of Design Justice and how we can use it as a force for collective liberation.
Sasha Costanza-Chock is a researcher and designer who works to support community-led processes that build shared power, dismantle the matrix of domination, and advance ecological survival. Sasha is the Director of Research & Design at the Algorithmic Justice League and is the author of Design Justice: Community-Led Practices to Build the Worlds We Need.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3308</itunes:duration>
                <itunes:episode>74</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>What Causes AI to Fail? with the AI Today Podcast</title>
        <itunes:title>What Causes AI to Fail? with the AI Today Podcast</itunes:title>
        <link>https://radicalai.podbean.com/e/what-causes-ai-to-fail-with-the-ai-today-podcast/</link>
                    <comments>https://radicalai.podbean.com/e/what-causes-ai-to-fail-with-the-ai-today-podcast/#comments</comments>        <pubDate>Fri, 15 Oct 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/05dbe006-37b2-39d4-9bbd-090de9f254cd</guid>
                                    <description><![CDATA[<p> what causes AI to fail from a business/industry perspective and beyond? What metrics are used to measure and indicate failure? And how can we improve the field of AI by learning from these failures? </p>
<p>To answer these questions we interview Kathleen Walch and Ron Schmelzer of Cognilytica’s AI Today podcast. </p>
<p>Ron and Kathleen are both principal analysts, managing partners and founders of Cognilytica. Cognilytica is a research, advisory, and education firm focused on advanced big data analytics, cognitive technologies, and evolving areas of Artificial Intelligence and machine learning. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p> what causes AI to fail from a business/industry perspective and beyond? What metrics are used to measure and indicate failure? And how can we improve the field of AI by learning from these failures? </p>
<p>To answer these questions we interview Kathleen Walch and Ron Schmelzer of Cognilytica’s AI Today podcast. </p>
<p>Ron and Kathleen are both principal analysts, managing partners and founders of Cognilytica. Cognilytica is a research, advisory, and education firm focused on advanced big data analytics, cognitive technologies, and evolving areas of Artificial Intelligence and machine learning. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/n7st8m/AIToday_mixdown.mp3" length="76704165" type="audio/mpeg"/>
        <itunes:summary><![CDATA[ what causes AI to fail from a business/industry perspective and beyond? What metrics are used to measure and indicate failure? And how can we improve the field of AI by learning from these failures? 
To answer these questions we interview Kathleen Walch and Ron Schmelzer of Cognilytica’s AI Today podcast. 
Ron and Kathleen are both principal analysts, managing partners and founders of Cognilytica. Cognilytica is a research, advisory, and education firm focused on advanced big data analytics, cognitive technologies, and evolving areas of Artificial Intelligence and machine learning. 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3195</itunes:duration>
                <itunes:episode>73</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #6: Authentic Accountability for Successful AI with Yoav Schlesinger</title>
        <itunes:title>Measurementality #6: Authentic Accountability for Successful AI with Yoav Schlesinger</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-6-authentic-accountability-for-successful-ai-with-yoav-schlesinger/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-6-authentic-accountability-for-successful-ai-with-yoav-schlesinger/#comments</comments>        <pubDate>Mon, 11 Oct 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/b8312f22-3d30-3388-b1ce-9bc270dd8881</guid>
                                    <description><![CDATA[<p class="sqsrte-large">In this 6th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how we can build more authentic systems of accountability for creating AI with Yoav Schlesinger.</p>
<p class="sqsrte-large">Yoav Schlesinger is the Principal of Ethical AI Practice for Salesforce.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p class="sqsrte-large">In this 6th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how we can build more authentic systems of accountability for creating AI with Yoav Schlesinger.</p>
<p class="sqsrte-large">Yoav Schlesinger is the Principal of Ethical AI Practice for Salesforce.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/2msfq5/MMT_6_Draft_for_Post_mixdown9s9ar.mp3" length="63318003" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this 6th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how we can build more authentic systems of accountability for creating AI with Yoav Schlesinger.
Yoav Schlesinger is the Principal of Ethical AI Practice for Salesforce.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2637</itunes:duration>
                <itunes:episode>72</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Predicting Mental Illness Through AI with Stevie Chancellor</title>
        <itunes:title>Predicting Mental Illness Through AI with Stevie Chancellor</itunes:title>
        <link>https://radicalai.podbean.com/e/predicting-mental-illness-through-ai-with-stevie-chancellor/</link>
                    <comments>https://radicalai.podbean.com/e/predicting-mental-illness-through-ai-with-stevie-chancellor/#comments</comments>        <pubDate>Wed, 06 Oct 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d2cd4f9e-0a23-3430-82a0-5a84db7867cb</guid>
                                    <description><![CDATA[<p>How is AI used to predict mental illness? What are the benefits and challenges to its use?</p>
<p>In this episode we interview Stevie Chancellor about AI, mental health, and the benefits and challenges of machine learning systems that are used to predict mental illness. </p>
<p>Stevie Chancellor is an Assistant Professor in the Department of Computer Science & Engineering at the University of Minnesota - Twin Cities. Her research combines human-computer interaction and machine learning approaches to build and critically evaluate machine learning systems for pressing social issues, focusing on high-risk health behaviors in online communities.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How is AI used to predict mental illness? What are the benefits and challenges to its use?</p>
<p>In this episode we interview Stevie Chancellor about AI, mental health, and the benefits and challenges of machine learning systems that are used to predict mental illness. </p>
<p>Stevie Chancellor is an Assistant Professor in the Department of Computer Science & Engineering at the University of Minnesota - Twin Cities. Her research combines human-computer interaction and machine learning approaches to build and critically evaluate machine learning systems for pressing social issues, focusing on high-risk health behaviors in online communities.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/tkgagx/Stevie_mixdown.mp3" length="80515660" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How is AI used to predict mental illness? What are the benefits and challenges to its use?
In this episode we interview Stevie Chancellor about AI, mental health, and the benefits and challenges of machine learning systems that are used to predict mental illness. 
Stevie Chancellor is an Assistant Professor in the Department of Computer Science & Engineering at the University of Minnesota - Twin Cities. Her research combines human-computer interaction and machine learning approaches to build and critically evaluate machine learning systems for pressing social issues, focusing on high-risk health behaviors in online communities.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3354</itunes:duration>
                <itunes:episode>71</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #5: Intergenerational Collaboration with Sinead Bovell</title>
        <itunes:title>Measurementality #5: Intergenerational Collaboration with Sinead Bovell</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-5-intergenerational-collaboration-with-sinead-bovell/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-5-intergenerational-collaboration-with-sinead-bovell/#comments</comments>        <pubDate>Sun, 19 Sep 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/70020ac4-0bc7-3397-898f-34675757ab3a</guid>
                                    <description><![CDATA[<p class="sqsrte-large">In this 5th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how existing metrics regarding youth and intergenerational collaboration are being globally measured today. </p>
<p>To discuss this topic we interview Sinead Bovell.</p>
<p>Sinead is the founder and CEO of WAYE, a tech education company that prepares the next generation of leaders for a future with advanced technologies, with a focus on non-traditional and minority markets.</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p class="sqsrte-large">In this 5th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how existing metrics regarding youth and intergenerational collaboration are being globally measured today. </p>
<p>To discuss this topic we interview Sinead Bovell.</p>
<p>Sinead is the founder and CEO of WAYE, a tech education company that prepares the next generation of leaders for a future with advanced technologies, with a focus on non-traditional and minority markets.</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/must2y/MMT_5_final_mixdownbs10b.mp3" length="51568316" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this 5th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how existing metrics regarding youth and intergenerational collaboration are being globally measured today. 
To discuss this topic we interview Sinead Bovell.
Sinead is the founder and CEO of WAYE, a tech education company that prepares the next generation of leaders for a future with advanced technologies, with a focus on non-traditional and minority markets.
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2148</itunes:duration>
                <itunes:episode>70</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Indigenous AI 101 with Jason Edward Lewis</title>
        <itunes:title>Indigenous AI 101 with Jason Edward Lewis</itunes:title>
        <link>https://radicalai.podbean.com/e/indigenous-ai-101-with-jason-edward-lewis/</link>
                    <comments>https://radicalai.podbean.com/e/indigenous-ai-101-with-jason-edward-lewis/#comments</comments>        <pubDate>Wed, 08 Sep 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/6351eedd-4e1c-321d-bdb5-dfb41ad1629b</guid>
                                    <description><![CDATA[<p>What is Indigenous AI and how might it drive our technology design and implementation?</p>
<p>To answer this question and more in this episode we interview Jason Edward Lewis about Indigenous AI Protocols and a paper he co-authored entitled “Position Paper on Indigenous Protocol and Artificial Intelligence.”</p>
<p>Jason Edward Lewis is a Hawaiian and Samoan digital media theorist, poet, and software designer. Jason also founded Obx Laboratory for Experimental Media and is the University Research Chair in Computational Media and the Indigenous Future Imaginary as well as a Professor of Computation Arts at Concordia University, Montreal. Jason directs the Initiative for Indigenous Futures, and co-directs the Indigenous Futures Research Centre, the Indigenous Protocol and AI Workshops, the Aboriginal Territories in Cyberspace research network, and the Skins Workshops on Aboriginal Storytelling and Video Game Design.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Indigenous AI and how might it drive our technology design and implementation?</p>
<p>To answer this question and more in this episode we interview Jason Edward Lewis about Indigenous AI Protocols and a paper he co-authored entitled “Position Paper on Indigenous Protocol and Artificial Intelligence.”</p>
<p>Jason Edward Lewis is a Hawaiian and Samoan digital media theorist, poet, and software designer. Jason also founded Obx Laboratory for Experimental Media and is the University Research Chair in Computational Media and the Indigenous Future Imaginary as well as a Professor of Computation Arts at Concordia University, Montreal. Jason directs the Initiative for Indigenous Futures, and co-directs the Indigenous Futures Research Centre, the Indigenous Protocol and AI Workshops, the Aboriginal Territories in Cyberspace research network, and the Skins Workshops on Aboriginal Storytelling and Video Game Design.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/3tdryi/JEL_mixdown.mp3" length="93018673" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Indigenous AI and how might it drive our technology design and implementation?
To answer this question and more in this episode we interview Jason Edward Lewis about Indigenous AI Protocols and a paper he co-authored entitled “Position Paper on Indigenous Protocol and Artificial Intelligence.”
Jason Edward Lewis is a Hawaiian and Samoan digital media theorist, poet, and software designer. Jason also founded Obx Laboratory for Experimental Media and is the University Research Chair in Computational Media and the Indigenous Future Imaginary as well as a Professor of Computation Arts at Concordia University, Montreal. Jason directs the Initiative for Indigenous Futures, and co-directs the Indigenous Futures Research Centre, the Indigenous Protocol and AI Workshops, the Aboriginal Territories in Cyberspace research network, and the Skins Workshops on Aboriginal Storytelling and Video Game Design.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3875</itunes:duration>
                <itunes:episode>69</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Casteist Technology and Digital Brahminism with Thenmozhi Soundararajan and Seema Hari</title>
        <itunes:title>Casteist Technology and Digital Brahminism with Thenmozhi Soundararajan and Seema Hari</itunes:title>
        <link>https://radicalai.podbean.com/e/casteist-technology-and-digital-brahminism-with-thenmozhi-soundararajan-and-seema-hari/</link>
                    <comments>https://radicalai.podbean.com/e/casteist-technology-and-digital-brahminism-with-thenmozhi-soundararajan-and-seema-hari/#comments</comments>        <pubDate>Fri, 18 Jun 2021 10:48:22 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/0776d378-e0ac-3cf5-a72b-726c8273a420</guid>
                                    <description><![CDATA[<p>What is Casteist Technology and Digital Brahmanism and how can we best engage to enact change? </p>
<p>Join 2021 Radical AI Intern Nikhil Dharmaraj as he interviews Thenmozhi Soundararajan and Seema Hari about technology, casteism, and surveillance.</p>
<p>Thenmozhi Soundararajan is a Dalit rights artist, technologist, and theorist. Currently, Thenmozhi is the co-Founder and Executive Director of Equality Labs, a Dalit Civil Rights organization that uses community research, cultural and political organizing, popular education and digital security to build power to end caste apartheid, white supremacy, gender-based violence, and religious intolerance.</p>
<p>Seema Hari is an engineer and an anti-caste and anti-colorism activist.</p>
<p>Full show notes and guest bios for this episode can be found at Radicalai.org. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Casteist Technology and Digital Brahmanism and how can we best engage to enact change? </p>
<p>Join 2021 Radical AI Intern Nikhil Dharmaraj as he interviews Thenmozhi Soundararajan and Seema Hari about technology, casteism, and surveillance.</p>
<p>Thenmozhi Soundararajan is a Dalit rights artist, technologist, and theorist. Currently, Thenmozhi is the co-Founder and Executive Director of Equality Labs, a Dalit Civil Rights organization that uses community research, cultural and political organizing, popular education and digital security to build power to end caste apartheid, white supremacy, gender-based violence, and religious intolerance.</p>
<p>Seema Hari is an engineer and an anti-caste and anti-colorism activist.</p>
<p>Full show notes and guest bios for this episode can be found at Radicalai.org. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/w523mt/NK_mixdown.mp3" length="71285966" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Casteist Technology and Digital Brahmanism and how can we best engage to enact change? 
Join 2021 Radical AI Intern Nikhil Dharmaraj as he interviews Thenmozhi Soundararajan and Seema Hari about technology, casteism, and surveillance.
Thenmozhi Soundararajan is a Dalit rights artist, technologist, and theorist. Currently, Thenmozhi is the co-Founder and Executive Director of Equality Labs, a Dalit Civil Rights organization that uses community research, cultural and political organizing, popular education and digital security to build power to end caste apartheid, white supremacy, gender-based violence, and religious intolerance.
Seema Hari is an engineer and an anti-caste and anti-colorism activist.
Full show notes and guest bios for this episode can be found at Radicalai.org. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2969</itunes:duration>
                <itunes:episode>68</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #4: What are we Optimizing for? with Laura Musikanski and Jonathan Stray</title>
        <itunes:title>Measurementality #4: What are we Optimizing for? with Laura Musikanski and Jonathan Stray</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-4-what-are-we-optimizing-for-with-laura-musikanski-and-jonathan-stray/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-4-what-are-we-optimizing-for-with-laura-musikanski-and-jonathan-stray/#comments</comments>        <pubDate>Wed, 16 Jun 2021 10:55:12 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/690d239a-ae75-3b98-b0dc-cd7e5cc03d2e</guid>
                                    <description><![CDATA[<p class="sqsrte-large">In this 4th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how existing metrics regarding human wellbeing along with environmental flourishing are being globally measured today. </p>
<p> <a href='https://www.linkedin.com/in/laura-musikanski-7214361/'>Laura Musikanski is the </a>Executive Director of <a href='https://www.happycounts.org/'>The Happiness Alliance</a> and Chair of <a href='https://standards.ieee.org/standard/7010-2020.html'>IEEE 7010-2020</a></p>
<p><a href='https://www.linkedin.com/in/jonathanstray/'>Jonathan Stray</a> is a Visiting Scholar at Center for Human-Compatible AI and a former research partner at The Partnership on AI as well as being the author of <a href='https://medium.com/partnership-on-ai/aligning-ai-to-human-values-means-picking-the-right-metrics-855859e6f047'>Aligning AI to Human Values means Picking the Right Metrics</a></p>
]]></description>
                                                            <content:encoded><![CDATA[<p class="sqsrte-large">In this 4th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how existing metrics regarding human wellbeing along with environmental flourishing are being globally measured today. </p>
<p> <a href='https://www.linkedin.com/in/laura-musikanski-7214361/'>Laura Musikanski is the </a>Executive Director of <a href='https://www.happycounts.org/'>The Happiness Alliance</a> and Chair of <a href='https://standards.ieee.org/standard/7010-2020.html'>IEEE 7010-2020</a></p>
<p><a href='https://www.linkedin.com/in/jonathanstray/'>Jonathan Stray</a> is a Visiting Scholar at Center for Human-Compatible AI and a former research partner at The Partnership on AI as well as being the author of <a href='https://medium.com/partnership-on-ai/aligning-ai-to-human-values-means-picking-the-right-metrics-855859e6f047'>Aligning AI to Human Values means Picking the Right Metrics</a></p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/geyv4a/MMT4_mixdown.mp3" length="66594354" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this 4th episode of Measurementality we'll be "identifying what counts in the algorithmic age" by analyzing how existing metrics regarding human wellbeing along with environmental flourishing are being globally measured today. 
 Laura Musikanski is the Executive Director of The Happiness Alliance and Chair of IEEE 7010-2020
Jonathan Stray is a Visiting Scholar at Center for Human-Compatible AI and a former research partner at The Partnership on AI as well as being the author of Aligning AI to Human Values means Picking the Right Metrics]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2774</itunes:duration>
                <itunes:episode>67</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Feminist AI 101 with Eleanor Drage and Kerry Mackereth</title>
        <itunes:title>Feminist AI 101 with Eleanor Drage and Kerry Mackereth</itunes:title>
        <link>https://radicalai.podbean.com/e/feminist-ai-101-with-eleanor-drage-and-kerry-machereth/</link>
                    <comments>https://radicalai.podbean.com/e/feminist-ai-101-with-eleanor-drage-and-kerry-machereth/#comments</comments>        <pubDate>Wed, 02 Jun 2021 00:07:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/22c7a80b-725b-3b0e-92c7-d48f4a8c7766</guid>
                                    <description><![CDATA[<p>What is Feminist AI and how and why should we design and implement it?</p>
<p>To answer this question and more in this episode we interview Eleanor Drage and Kerry Mackereth about the ins and outs of Feminist AI.</p>
<p>Eleanor and Kerry are both postdoctoral researchers who are working on the “Gender and Technology” research project at the “University of Cambridge Centre for Gender Studies” and in association with the Leverhulme Centre for the Future of Intelligence. In this project, they are working to provide the AI sector with practical tools to create more equitable AI informed by intersectional feminist knowledge.</p>
<p>Full show notes and guest bios for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Feminist AI and how and why should we design and implement it?</p>
<p>To answer this question and more in this episode we interview Eleanor Drage and Kerry Mackereth about the ins and outs of Feminist AI.</p>
<p>Eleanor and Kerry are both postdoctoral researchers who are working on the “Gender and Technology” research project at the “University of Cambridge Centre for Gender Studies” and in association with the Leverhulme Centre for the Future of Intelligence. In this project, they are working to provide the AI sector with practical tools to create more equitable AI informed by intersectional feminist knowledge.</p>
<p>Full show notes and guest bios for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/fwavqi/E-K_mixdown.mp3" length="60684935" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Feminist AI and how and why should we design and implement it?
To answer this question and more in this episode we interview Eleanor Drage and Kerry Mackereth about the ins and outs of Feminist AI.
Eleanor and Kerry are both postdoctoral researchers who are working on the “Gender and Technology” research project at the “University of Cambridge Centre for Gender Studies” and in association with the Leverhulme Centre for the Future of Intelligence. In this project, they are working to provide the AI sector with practical tools to create more equitable AI informed by intersectional feminist knowledge.
Full show notes and guest bios for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2527</itunes:duration>
                <itunes:episode>66</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Decentralizing AI with Divya Siddarth</title>
        <itunes:title>Decentralizing AI with Divya Siddarth</itunes:title>
        <link>https://radicalai.podbean.com/e/decentralizing-ai-with-divya-siddarth/</link>
                    <comments>https://radicalai.podbean.com/e/decentralizing-ai-with-divya-siddarth/#comments</comments>        <pubDate>Thu, 27 May 2021 10:39:33 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/bfd01cc4-7895-35f4-97bc-f9fe1694f3b9</guid>
                                    <description><![CDATA[<p>What is decentralized AI and how and why should we design and implement it?</p>
<p>To answer this question and more in this episode we interview Divya Siddarth about decentralizing AI, democratization, and how we can utilize the logic of social movements to influence our technology design.</p>
<p>Divya Siddarth is a Political Economist and Social Technologist at the Microsoft Office of the CTO working to understand, preserve, and extend democracy through technological progress and innovation. She previously taught classes at Stanford University on building technology for good and creating a more secure world for political activism and engagement in civil society. Divya also spent a few years in India as a research fellow, working with activists and politicians to think through democratized alternatives to existing tech platforms.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is decentralized AI and how and why should we design and implement it?</p>
<p>To answer this question and more in this episode we interview Divya Siddarth about decentralizing AI, democratization, and how we can utilize the logic of social movements to influence our technology design.</p>
<p>Divya Siddarth is a Political Economist and Social Technologist at the Microsoft Office of the CTO working to understand, preserve, and extend democracy through technological progress and innovation. She previously taught classes at Stanford University on building technology for good and creating a more secure world for political activism and engagement in civil society. Divya also spent a few years in India as a research fellow, working with activists and politicians to think through democratized alternatives to existing tech platforms.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/4npsz5/Divya_mixdown.mp3" length="76473691" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is decentralized AI and how and why should we design and implement it?
To answer this question and more in this episode we interview Divya Siddarth about decentralizing AI, democratization, and how we can utilize the logic of social movements to influence our technology design.
Divya Siddarth is a Political Economist and Social Technologist at the Microsoft Office of the CTO working to understand, preserve, and extend democracy through technological progress and innovation. She previously taught classes at Stanford University on building technology for good and creating a more secure world for political activism and engagement in civil society. Divya also spent a few years in India as a research fellow, working with activists and politicians to think through democratized alternatives to existing tech platforms.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3185</itunes:duration>
                <itunes:episode>65</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Killer Robots and Value Sensitive Design with Steven Umbrello</title>
        <itunes:title>Killer Robots and Value Sensitive Design with Steven Umbrello</itunes:title>
        <link>https://radicalai.podbean.com/e/killer-robots-and-value-sensitive-design-with-steven-umbrello/</link>
                    <comments>https://radicalai.podbean.com/e/killer-robots-and-value-sensitive-design-with-steven-umbrello/#comments</comments>        <pubDate>Wed, 05 May 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/73234811-b68c-3cfa-b284-446b21ae6826</guid>
                                    <description><![CDATA[<p>What is Value Sensitive Design and how can it inform the development and deployment of killer robots and autonomous weapon systems?</p>
<p>On this week's episode we welcome Steven Umbrello to the show.</p>
<p>Steven Umbrello currently serves as the Managing Director at the Institute for Ethics and Emerging Technologies. His main area of research revolves around Value Sensitive Design otherwise known as (VSD), its philosophical foundations, and its potential application to emerging technologies such as artificial intelligence and Industry 4.0.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is Value Sensitive Design and how can it inform the development and deployment of killer robots and autonomous weapon systems?</p>
<p>On this week's episode we welcome Steven Umbrello to the show.</p>
<p>Steven Umbrello currently serves as the Managing Director at the Institute for Ethics and Emerging Technologies. His main area of research revolves around Value Sensitive Design otherwise known as (VSD), its philosophical foundations, and its potential application to emerging technologies such as artificial intelligence and Industry 4.0.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/f3qdyf/Steven_Umbrello_mixdown9t1io.mp3" length="95021726" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is Value Sensitive Design and how can it inform the development and deployment of killer robots and autonomous weapon systems?
On this week's episode we welcome Steven Umbrello to the show.
Steven Umbrello currently serves as the Managing Director at the Institute for Ethics and Emerging Technologies. His main area of research revolves around Value Sensitive Design otherwise known as (VSD), its philosophical foundations, and its potential application to emerging technologies such as artificial intelligence and Industry 4.0.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3958</itunes:duration>
                <itunes:episode>64</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #3: Counting Mental Health and Caregiving in Technology and AI</title>
        <itunes:title>Measurementality #3: Counting Mental Health and Caregiving in Technology and AI</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-3-counting-mental-health-and-caregiving-in-technology-and-ai/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-3-counting-mental-health-and-caregiving-in-technology-and-ai/#comments</comments>        <pubDate>Sun, 02 May 2021 16:03:43 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/7808ef0d-27dc-3fde-bf41-462048d9c4c5</guid>
                                    <description><![CDATA[<p class="sqsrte-large">In this 3rd episode of Measurementality, we discuss mental health, wellness, indicators, metrics, and designing for the future with invited guests Amandeep Gill and Riane Eisler.</p>
<p>Riane Eisler is president of the Center for Partnership Systems and Editor-in-Chief of the Interdisciplinary Journal of Partnership Studies at the University of Minnesota. </p>
<p>Ambassador Amandeep Gill is Director of the Global Health Centre project on International Digital Health & AI Research Collaborative (I-DAIR).</p>
]]></description>
                                                            <content:encoded><![CDATA[<p class="sqsrte-large">In this 3rd episode of Measurementality, we discuss mental health, wellness, indicators, metrics, and designing for the future with invited guests Amandeep Gill and Riane Eisler.</p>
<p>Riane Eisler is president of the Center for Partnership Systems and Editor-in-Chief of the Interdisciplinary Journal of Partnership Studies at the University of Minnesota. </p>
<p>Ambassador Amandeep Gill is Director of the Global Health Centre project on International Digital Health & AI Research Collaborative (I-DAIR).</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/baf2ha/MMT3_mixdown.mp3" length="81764641" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this 3rd episode of Measurementality, we discuss mental health, wellness, indicators, metrics, and designing for the future with invited guests Amandeep Gill and Riane Eisler.
Riane Eisler is president of the Center for Partnership Systems and Editor-in-Chief of the Interdisciplinary Journal of Partnership Studies at the University of Minnesota. 
Ambassador Amandeep Gill is Director of the Global Health Centre project on International Digital Health & AI Research Collaborative (I-DAIR).]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3406</itunes:duration>
                <itunes:episode>63</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Design, Disability, Creativity, and Accessibility with Cynthia Bennett</title>
        <itunes:title>Design, Disability, Creativity, and Accessibility with Cynthia Bennett</itunes:title>
        <link>https://radicalai.podbean.com/e/designing-for-lived-disability-with-cynthia-bennett/</link>
                    <comments>https://radicalai.podbean.com/e/designing-for-lived-disability-with-cynthia-bennett/#comments</comments>        <pubDate>Wed, 21 Apr 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/e84ccac1-8731-3aa8-a689-e7eda4150c62</guid>
                                    <description><![CDATA[<p>How can we center the lived experiences and creativity of people with disabilities in the design of our technology?</p>
<p>On this week's episode we welcome Cynthia Bennett to the show.</p>
<p>Cynthia Bennett is a postdoctoral researcher at Carnegie Mellon University’s Human-Computer Interaction Institute. Her research focuses on the intersection of power, disability, design, and accessibility. Cynthia centers the lived experiences and creativity of people with disabilities as starting points for developing accessible and justice-oriented applications of technology. Cynthia is also a disabled scholar who is committed to raising the participation of disabled people in academia and the tech industry.  </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How can we center the lived experiences and creativity of people with disabilities in the design of our technology?</p>
<p>On this week's episode we welcome Cynthia Bennett to the show.</p>
<p>Cynthia Bennett is a postdoctoral researcher at Carnegie Mellon University’s Human-Computer Interaction Institute. Her research focuses on the intersection of power, disability, design, and accessibility. Cynthia centers the lived experiences and creativity of people with disabilities as starting points for developing accessible and justice-oriented applications of technology. Cynthia is also a disabled scholar who is committed to raising the participation of disabled people in academia and the tech industry.  </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/29mu23/Cynthia_Bennet_mixdowna0kmx.mp3" length="85974909" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can we center the lived experiences and creativity of people with disabilities in the design of our technology?
On this week's episode we welcome Cynthia Bennett to the show.
Cynthia Bennett is a postdoctoral researcher at Carnegie Mellon University’s Human-Computer Interaction Institute. Her research focuses on the intersection of power, disability, design, and accessibility. Cynthia centers the lived experiences and creativity of people with disabilities as starting points for developing accessible and justice-oriented applications of technology. Cynthia is also a disabled scholar who is committed to raising the participation of disabled people in academia and the tech industry.  
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3581</itunes:duration>
                <itunes:episode>62</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Atlas of AI with Kate Crawford</title>
        <itunes:title>Atlas of AI with Kate Crawford</itunes:title>
        <link>https://radicalai.podbean.com/e/atlas-of-ai-with-kate-crawford/</link>
                    <comments>https://radicalai.podbean.com/e/atlas-of-ai-with-kate-crawford/#comments</comments>        <pubDate>Wed, 07 Apr 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/f9b66acb-bb7f-37a3-813e-e3d601f19548</guid>
                                    <description><![CDATA[<p>What is the Atlas of AI? Why is it important? How is AI an industry of extraction? How is AI impacting the planet? What can be done? </p>
<p>To answer these questions and more we welcome to the show Dr. Kate Crawford to discuss Kate's new book Atlas of AI: Power, Politics, and the Planetary Costs of Artificial Intelligence</p>
<p>Dr. Kate Crawford is a leading scholar of the social and political implications of artificial intelligence. She is a Research Professor of Communication and STS at USC Annenberg, a Senior Principal Researcher at Microsoft Research in New York City, and the inaugural Visiting Chair for AI and Justice at the École Normale Supérieure in Paris. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the Atlas of AI? Why is it important? How is AI an industry of extraction? How is AI impacting the planet? What can be done? </p>
<p>To answer these questions and more we welcome to the show Dr. Kate Crawford to discuss Kate's new book <em>Atlas of AI: Power, Politics, and the Planetary Costs of Artificial Intelligence</em></p>
<p>Dr. Kate Crawford is a leading scholar of the social and political implications of artificial intelligence. She is a Research Professor of Communication and STS at USC Annenberg, a Senior Principal Researcher at Microsoft Research in New York City, and the inaugural Visiting Chair for AI and Justice at the École Normale Supérieure in Paris. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/cgascy/Kate_Crawford_mixdown8bfoj.mp3" length="86239217" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the Atlas of AI? Why is it important? How is AI an industry of extraction? How is AI impacting the planet? What can be done? 
To answer these questions and more we welcome to the show Dr. Kate Crawford to discuss Kate's new book Atlas of AI: Power, Politics, and the Planetary Costs of Artificial Intelligence
Dr. Kate Crawford is a leading scholar of the social and political implications of artificial intelligence. She is a Research Professor of Communication and STS at USC Annenberg, a Senior Principal Researcher at Microsoft Research in New York City, and the inaugural Visiting Chair for AI and Justice at the École Normale Supérieure in Paris. 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3592</itunes:duration>
                <itunes:episode>61</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Defining Bias with Su Lin Blodgett</title>
        <itunes:title>Defining Bias with Su Lin Blodgett</itunes:title>
        <link>https://radicalai.podbean.com/e/defining-bias-with-su-lin-blodgett/</link>
                    <comments>https://radicalai.podbean.com/e/defining-bias-with-su-lin-blodgett/#comments</comments>        <pubDate>Wed, 31 Mar 2021 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/bc0ee192-0107-3cd0-90fb-a8b9dae94387</guid>
                                    <description><![CDATA[<p>How do we define bias? Is all bias the same? Is it possible eliminate bias completely in our AI systems? Should we?</p>
<p>To answer these questions and more we welcome to the show Su Lin Blodgett</p>
<p>Su Lin is a postdoctoral researcher in the Fairness, Accountability, Transparency, and Ethics (FATE) group at Microsoft Research Montréal. She is broadly interested in examining the social implications of Natural Language Processing, or NLP technologies, and in using NLP approaches to examine language variation and change. She previously completed her Ph.D. in computer science at the University of Massachusetts Amherst.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How do we <em>define</em> bias? Is all bias the same? Is it possible eliminate bias completely in our AI systems? Should we?</p>
<p>To answer these questions and more we welcome to the show Su Lin Blodgett</p>
<p>Su Lin is a postdoctoral researcher in the Fairness, Accountability, Transparency, and Ethics (FATE) group at Microsoft Research Montréal. She is broadly interested in examining the social implications of Natural Language Processing, or NLP technologies, and in using NLP approaches to examine language variation and change. She previously completed her Ph.D. in computer science at the University of Massachusetts Amherst.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ka44vm/su_lin_mixdown7iphl.mp3" length="79489324" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How do we define bias? Is all bias the same? Is it possible eliminate bias completely in our AI systems? Should we?
To answer these questions and more we welcome to the show Su Lin Blodgett
Su Lin is a postdoctoral researcher in the Fairness, Accountability, Transparency, and Ethics (FATE) group at Microsoft Research Montréal. She is broadly interested in examining the social implications of Natural Language Processing, or NLP technologies, and in using NLP approaches to examine language variation and change. She previously completed her Ph.D. in computer science at the University of Massachusetts Amherst.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3311</itunes:duration>
                <itunes:episode>60</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #2: Children's Data and Sustainability</title>
        <itunes:title>Measurementality #2: Children's Data and Sustainability</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-2-childrens-data-and-sustainability/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-2-childrens-data-and-sustainability/#comments</comments>        <pubDate>Sun, 21 Mar 2021 09:02:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/6b91a10f-5643-39c4-936a-75d725bdeecb</guid>
                                    <description><![CDATA[<p>Welcome to the second episode of our Measurementality series in partnership with IEEESA!</p>
<p>Our topics today are children and sustainability. </p>
<p>We interview <a href='https://www.linkedin.com/in/alexsandypentland/'>Sandy Pentland</a> of MIT and <a href='https://5rightsfoundation.com/about-us/our-team.html'>Baroness Beeban Kidron</a> of the 5Rights Foundation.  </p>
<p>Focusing on the key goal of our series, "defining what counts in the algorithmic age," guests will discuss issues like data privacy for children, data agency for all, and how metrics like the United Nations Sustainable Development Goals and other human rights oriented metrics are being utilized in the design of Artificial Intelligence Systems (AIS).</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Welcome to the second episode of our Measurementality series in partnership with IEEESA!</p>
<p>Our topics today are children and sustainability. </p>
<p>We interview <a href='https://www.linkedin.com/in/alexsandypentland/'>Sandy Pentland</a> of MIT and <a href='https://5rightsfoundation.com/about-us/our-team.html'>Baroness Beeban Kidron</a> of the 5Rights Foundation.  </p>
<p>Focusing on the key goal of our series, "defining what counts in the algorithmic age," guests will discuss issues like data privacy for children, data agency for all, and how metrics like the United Nations Sustainable Development Goals and other human rights oriented metrics are being utilized in the design of Artificial Intelligence Systems (AIS).</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xnv8ua/MMT_26v8jq.mp3" length="101391803" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Welcome to the second episode of our Measurementality series in partnership with IEEESA!
Our topics today are children and sustainability. 
We interview Sandy Pentland of MIT and Baroness Beeban Kidron of the 5Rights Foundation.  
Focusing on the key goal of our series, "defining what counts in the algorithmic age," guests will discuss issues like data privacy for children, data agency for all, and how metrics like the United Nations Sustainable Development Goals and other human rights oriented metrics are being utilized in the design of Artificial Intelligence Systems (AIS).
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3168</itunes:duration>
                <itunes:episode>59</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Your Computer Is on Fire with Mar Hicks &amp; Kavita Philip</title>
        <itunes:title>Your Computer Is on Fire with Mar Hicks &amp; Kavita Philip</itunes:title>
        <link>https://radicalai.podbean.com/e/your-computer-is-on-fire-with-mar-hicks-kavita-philip/</link>
                    <comments>https://radicalai.podbean.com/e/your-computer-is-on-fire-with-mar-hicks-kavita-philip/#comments</comments>        <pubDate>Wed, 10 Mar 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/609619c5-7190-3d1c-9c64-58eb65b01eb4</guid>
                                    <description><![CDATA[<p>How do we challenge techno-utopianism? How do we dismantle systems of oppression in technology? </p>
<p>To answer these questions and more we welcome to the show two editors of the new collection from MIT Press Your Computer Is on Fire, Mar Hicks and Kavita Philip.</p>
<p>Mar Hicks is an author, historian, and professor doing research on the history of computing, labor, and how hidden technological dynamics change the core narratives of the history of computing in unexpected ways. Hicks's multiple award-winning book, Programmed Inequality, looks at how the British lost their early lead in computing by discarding women computer workers, and what this cautionary tale tells us about current issues in high tech. Their current project looks at resistance and queerness in the history of technology.</p>
<p>Kavita Philip is a historian of science and technology who has written about nineteenth-century environmental knowledge in British India, information technology in post-colonial India, and the intersections of art, science fiction, and social activism with science and technology. She is author of Civilizing Natures (2004), and Studies in Unauthorized Reproduction (forthcoming, MIT Press), as well as co-editor of five volumes curating new interdisciplinary work in radical history, art, activism, computing, and public policy.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How do we challenge techno-utopianism? How do we dismantle systems of oppression in technology? </p>
<p>To answer these questions and more we welcome to the show two editors of the new collection from MIT Press <em>Your Computer Is on Fire</em>, Mar Hicks and Kavita Philip.</p>
<p>Mar Hicks is an author, historian, and professor doing research on the history of computing, labor, and how hidden technological dynamics change the core narratives of the history of computing in unexpected ways. Hicks's multiple award-winning book, Programmed Inequality, looks at how the British lost their early lead in computing by discarding women computer workers, and what this cautionary tale tells us about current issues in high tech. Their current project looks at resistance and queerness in the history of technology.</p>
<p>Kavita Philip is a historian of science and technology who has written about nineteenth-century environmental knowledge in British India, information technology in post-colonial India, and the intersections of art, science fiction, and social activism with science and technology. She is author of Civilizing Natures (2004), and Studies in Unauthorized Reproduction (forthcoming, MIT Press), as well as co-editor of five volumes curating new interdisciplinary work in radical history, art, activism, computing, and public policy.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ruxz8x/Mar_mixdown.mp3" length="84629407" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How do we challenge techno-utopianism? How do we dismantle systems of oppression in technology? 
To answer these questions and more we welcome to the show two editors of the new collection from MIT Press Your Computer Is on Fire, Mar Hicks and Kavita Philip.
Mar Hicks is an author, historian, and professor doing research on the history of computing, labor, and how hidden technological dynamics change the core narratives of the history of computing in unexpected ways. Hicks's multiple award-winning book, Programmed Inequality, looks at how the British lost their early lead in computing by discarding women computer workers, and what this cautionary tale tells us about current issues in high tech. Their current project looks at resistance and queerness in the history of technology.
Kavita Philip is a historian of science and technology who has written about nineteenth-century environmental knowledge in British India, information technology in post-colonial India, and the intersections of art, science fiction, and social activism with science and technology. She is author of Civilizing Natures (2004), and Studies in Unauthorized Reproduction (forthcoming, MIT Press), as well as co-editor of five volumes curating new interdisciplinary work in radical history, art, activism, computing, and public policy.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3525</itunes:duration>
                <itunes:episode>58</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #9 - Misinformation &amp; Free Expression with Jasmine McNealy &amp; Claire Wardle</title>
        <itunes:title>All Tech is Human Series #9 - Misinformation &amp; Free Expression with Jasmine McNealy &amp; Claire Wardle</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-9-misinformation-free-expression-with-jasmine-mcnealy-claire-wardle/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-9-misinformation-free-expression-with-jasmine-mcnealy-claire-wardle/#comments</comments>        <pubDate>Wed, 03 Mar 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/752b6b59-7bf8-3f96-ac88-72054b2b0e47</guid>
                                    <description><![CDATA[<p>This conversation explores the question: How can we reduce misinformation and disinformation on social media platforms while also ensuring that platforms promote the free exchange of ideas? </p>
<p>Guests in this episode include Dr. Jasmine McNealy (Associate Professor of Telecommunication at the University of Florida, Harvard Berkman Klein Center affiliate, media & law expert) and Dr. Claire Wardle (co-founder and director of First Draft, leading expert on user generated content, verification and misinformation).</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>This conversation explores the question: How can we reduce misinformation and disinformation on social media platforms while also ensuring that platforms promote the free exchange of ideas? </p>
<p>Guests in this episode include Dr. Jasmine McNealy (Associate Professor of Telecommunication at the University of Florida, Harvard Berkman Klein Center affiliate, media & law expert) and Dr. Claire Wardle (co-founder and director of First Draft, leading expert on user generated content, verification and misinformation).</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/e9cpd8/ATIH_9_mixdownbiksa.mp3" length="82880042" type="audio/mpeg"/>
        <itunes:summary><![CDATA[This conversation explores the question: How can we reduce misinformation and disinformation on social media platforms while also ensuring that platforms promote the free exchange of ideas? 
Guests in this episode include Dr. Jasmine McNealy (Associate Professor of Telecommunication at the University of Florida, Harvard Berkman Klein Center affiliate, media & law expert) and Dr. Claire Wardle (co-founder and director of First Draft, leading expert on user generated content, verification and misinformation).
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3452</itunes:duration>
                <itunes:episode>57</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Social Inequality in the Digital Economy with Zanele Munyikwa</title>
        <itunes:title>Social Inequality in the Digital Economy with Zanele Munyikwa</itunes:title>
        <link>https://radicalai.podbean.com/e/social-inequality-in-the-digital-economy-with-zanele-munyikwa/</link>
                    <comments>https://radicalai.podbean.com/e/social-inequality-in-the-digital-economy-with-zanele-munyikwa/#comments</comments>        <pubDate>Wed, 24 Feb 2021 00:15:21 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/ebfe8ba2-e804-331a-b899-69c2878ef4f2</guid>
                                    <description><![CDATA[<p>How does the Digital Economy perpetuate social inequality? </p>
<p>In this episode we interview Zanele Munyikwa. Zanele is a PhD student in Management Science and Information Technology at MIT Sloan. She is a computational social scientist who uses causal inference and machine learning techniques to study the digital economy, technology, and the future of work. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How does the Digital Economy perpetuate social inequality? </p>
<p>In this episode we interview Zanele Munyikwa. Zanele is a PhD student in Management Science and Information Technology at MIT Sloan. She is a computational social scientist who uses causal inference and machine learning techniques to study the digital economy, technology, and the future of work. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/qxec86/Zanele_mixdown.mp3" length="53761652" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How does the Digital Economy perpetuate social inequality? 
In this episode we interview Zanele Munyikwa. Zanele is a PhD student in Management Science and Information Technology at MIT Sloan. She is a computational social scientist who uses causal inference and machine learning techniques to study the digital economy, technology, and the future of work. 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2239</itunes:duration>
                <itunes:episode>56</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Measurementality #1: Defining What Counts in the Algorithmic Age</title>
        <itunes:title>Measurementality #1: Defining What Counts in the Algorithmic Age</itunes:title>
        <link>https://radicalai.podbean.com/e/measurementality-1-defining-what-counts-in-the-algorithmic-age/</link>
                    <comments>https://radicalai.podbean.com/e/measurementality-1-defining-what-counts-in-the-algorithmic-age/#comments</comments>        <pubDate>Sun, 14 Feb 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/911d2a0a-f049-3105-af70-63435c8d4c4e</guid>
                                    <description><![CDATA[<p>Join John C. Havens of the IEEE Standards Association along with Jess and Dylan, the co-hosts of the popular podcast, Radical AI as they discuss the new Measurementality content series, including topics such as: How is success measured today in the world of Artificial Intelligence Systems (AIS)? What is the positive future we’re working to build with AIS? And, what are the measures of success for that future? We'll also be discussing how the Measurementality series features a call to action for listeners and the AIS community at large to respond to these questions to contribute to two reports helping us define and frame 'what counts in the algorithmic age.'</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Join John C. Havens of the IEEE Standards Association along with Jess and Dylan, the co-hosts of the popular podcast, Radical AI as they discuss the new Measurementality content series, including topics such as: How is success measured today in the world of Artificial Intelligence Systems (AIS)? What is the positive future we’re working to build with AIS? And, what are the measures of success for that future? We'll also be discussing how the Measurementality series features a call to action for listeners and the AIS community at large to respond to these questions to contribute to two reports helping us define and frame 'what counts in the algorithmic age.'</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/wwbyim/MMT1_mixdown.mp3" length="49120015" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Join John C. Havens of the IEEE Standards Association along with Jess and Dylan, the co-hosts of the popular podcast, Radical AI as they discuss the new Measurementality content series, including topics such as: How is success measured today in the world of Artificial Intelligence Systems (AIS)? What is the positive future we’re working to build with AIS? And, what are the measures of success for that future? We'll also be discussing how the Measurementality series features a call to action for listeners and the AIS community at large to respond to these questions to contribute to two reports helping us define and frame 'what counts in the algorithmic age.']]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2046</itunes:duration>
                <itunes:episode>55</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Anti-Trust: Congress and the Tech Lobby with Anna Lenhart</title>
        <itunes:title>Anti-Trust: Congress and the Tech Lobby with Anna Lenhart</itunes:title>
        <link>https://radicalai.podbean.com/e/anti-trust-congress-and-the-tech-lobby-with-anna-lenhart/</link>
                    <comments>https://radicalai.podbean.com/e/anti-trust-congress-and-the-tech-lobby-with-anna-lenhart/#comments</comments>        <pubDate>Wed, 10 Feb 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d588e4db-fae6-3619-a263-9ae9977391c8</guid>
                                    <description><![CDATA[<p>What should you know about Anti-Trust regulation nationally and internationally? How does the tech sector drive policy?   </p>
<p>In this episode we interview Anna Lenhart, a researcher for technology policy and democracy at University of Maryland’s iSchool Ethics & Values in Design Lab. She recently served as a TechCongress Fellow with the House Judiciary Committee Antitrust Subcommittee and supported the investigation into Facebook, Google, Amazon and Apple.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What should you know about Anti-Trust regulation nationally and internationally? How does the tech sector drive policy?   </p>
<p>In this episode we interview Anna Lenhart, a researcher for technology policy and democracy at University of Maryland’s iSchool Ethics & Values in Design Lab. She recently served as a TechCongress Fellow with the House Judiciary Committee Antitrust Subcommittee and supported the investigation into Facebook, Google, Amazon and Apple.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/jjfazi/Anna_Lenhart_mixdownaeee3.mp3" length="76440380" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What should you know about Anti-Trust regulation nationally and internationally? How does the tech sector drive policy?   
In this episode we interview Anna Lenhart, a researcher for technology policy and democracy at University of Maryland’s iSchool Ethics & Values in Design Lab. She recently served as a TechCongress Fellow with the House Judiciary Committee Antitrust Subcommittee and supported the investigation into Facebook, Google, Amazon and Apple.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3184</itunes:duration>
                <itunes:episode>54</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #8 - Improving Social Media: Content Moderation &amp; Democracy with Sarah T. Roberts &amp; Murtaza Shaikh</title>
        <itunes:title>All Tech is Human Series #8 - Improving Social Media: Content Moderation &amp; Democracy with Sarah T. Roberts &amp; Murtaza Shaikh</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-8-improving-social-media-content-moderation-democracy-with-sarah-t-roberts-murtaza-shaikh/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-8-improving-social-media-content-moderation-democracy-with-sarah-t-roberts-murtaza-shaikh/#comments</comments>        <pubDate>Wed, 27 Jan 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d95da3f6-716d-3fac-ac36-cbaf7998c010</guid>
                                    <description><![CDATA[<p>This conversation explores the topic Improving Social Media: Content Moderation & Democracy with invited panelists Sarah T. Roberts and Murtaza Shaikh</p>
<p>Sarah T. Roberts is the co-founder and Co-Director of the UCLA Center for Critical Internet Inquiry, and the author of Behind the Screen: Content Moderation in the Shadows of Social Media.</p>
<p>Murtaza Shaikh is the Senior Advisor on Hate Speech, Social Media and Minorities to the UN Special Rapporteur on Minority Issues</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>This conversation explores the topic <em>Improving Social Media: Content Moderation & Democracy </em>with invited panelists Sarah T. Roberts and Murtaza Shaikh</p>
<p>Sarah T. Roberts is the co-founder and Co-Director of the UCLA Center for Critical Internet Inquiry, and the author of Behind the Screen: Content Moderation in the Shadows of Social Media.</p>
<p>Murtaza Shaikh is the Senior Advisor on Hate Speech, Social Media and Minorities to the UN Special Rapporteur on Minority Issues</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/dstc9j/ATIH_8_mixdown9k9zy.mp3" length="93052678" type="audio/mpeg"/>
        <itunes:summary><![CDATA[This conversation explores the topic Improving Social Media: Content Moderation & Democracy with invited panelists Sarah T. Roberts and Murtaza Shaikh
Sarah T. Roberts is the co-founder and Co-Director of the UCLA Center for Critical Internet Inquiry, and the author of Behind the Screen: Content Moderation in the Shadows of Social Media.
Murtaza Shaikh is the Senior Advisor on Hate Speech, Social Media and Minorities to the UN Special Rapporteur on Minority Issues
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3876</itunes:duration>
                <itunes:episode>53</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Ability and Accessibility in AI with Meredith Ringel Morris</title>
        <itunes:title>Ability and Accessibility in AI with Meredith Ringel Morris</itunes:title>
        <link>https://radicalai.podbean.com/e/accessibility-and-disability-in-ai-with-meredith-ringel-morris/</link>
                    <comments>https://radicalai.podbean.com/e/accessibility-and-disability-in-ai-with-meredith-ringel-morris/#comments</comments>        <pubDate>Wed, 20 Jan 2021 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/9c1fffd0-fb6d-3367-ba6e-8cff768a9b71</guid>
                                    <description><![CDATA[<p>What should you know about Ability and Accessibility in AI and responsible technology development? </p>
<p>In this episode we interview Meredith Ringel Morris, a computer scientist conducting research in the areas of human-computer interaction (HCI), computer-supported cooperative work (CSCW), social computing, and accessibility. Her current research focus is on accessibility, particularly on the intersection of accessibility and social technologies.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What should you know about Ability and Accessibility in AI and responsible technology development? </p>
<p>In this episode we interview Meredith Ringel Morris, a computer scientist conducting research in the areas of human-computer interaction (HCI), computer-supported cooperative work (CSCW), social computing, and accessibility. Her current research focus is on accessibility, particularly on the intersection of accessibility and social technologies.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/epug7d/Merrie_Morris3_mixdownbg2n6.mp3" length="82446360" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What should you know about Ability and Accessibility in AI and responsible technology development? 
In this episode we interview Meredith Ringel Morris, a computer scientist conducting research in the areas of human-computer interaction (HCI), computer-supported cooperative work (CSCW), social computing, and accessibility. Her current research focus is on accessibility, particularly on the intersection of accessibility and social technologies.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3434</itunes:duration>
                <itunes:episode>52</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>2020 Hindsight: The Radical AI Podcast New Years Spectacular!</title>
        <itunes:title>2020 Hindsight: The Radical AI Podcast New Years Spectacular!</itunes:title>
        <link>https://radicalai.podbean.com/e/2020-hindsight-the-radical-ai-podcast-new-years-spectacular/</link>
                    <comments>https://radicalai.podbean.com/e/2020-hindsight-the-radical-ai-podcast-new-years-spectacular/#comments</comments>        <pubDate>Wed, 30 Dec 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/64d9cf46-4f7a-38a9-8241-70ce20234006</guid>
                                    <description><![CDATA[<p>What a year! Join us to review 2020 and to release a special surprise! HINT: the surprise can be found on our website at Radicalai.org ;)</p>
<p>As always, if you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What a year! Join us to review 2020 and to release a special surprise! HINT: the surprise can be found on our website at Radicalai.org ;)</p>
<p>As always, if you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/d5wi2t/RAI_Year_in_Review_2020_mixdownbgkkk.mp3" length="46374988" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What a year! Join us to review 2020 and to release a special surprise! HINT: the surprise can be found on our website at Radicalai.org ;)
As always, if you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1931</itunes:duration>
                <itunes:episode>51</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #7 - The Business Case for AI Ethics with William Griffin and Alayna Kennedy</title>
        <itunes:title>All Tech is Human Series #7 - The Business Case for AI Ethics with William Griffin and Alayna Kennedy</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-7-the-business-case-for-ai-ethics-with-william-griffin-and-alayna-kennedy/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-7-the-business-case-for-ai-ethics-with-william-griffin-and-alayna-kennedy/#comments</comments>        <pubDate>Wed, 16 Dec 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/b0e5a75a-3cf8-300d-bf51-05f3a77be31b</guid>
                                    <description><![CDATA[<p>What is the business case for AI Ethics? </p>
<p>This conversation explores the topic with invited panelists William Griffin and Alayna Kennedy.</p>
<p>Willam Griffin is the Chief Ethics Officer of Hypergiant, an organization that works with partners to create powerful technology solutions and smarter, more efficient human workforces.</p>
<p>Alayna Kennedy is a data scientist at IBM, working on creating ethical algorithms and aligning human and machine values.</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p><em>What is t</em><em>he business case for AI Ethics? </em></p>
<p>This conversation explores the topic<em> </em>with invited panelists William Griffin and Alayna Kennedy.</p>
<p>Willam Griffin is the Chief Ethics Officer of Hypergiant, an organization that works with partners to create powerful technology solutions and smarter, more efficient human workforces.</p>
<p>Alayna Kennedy is a data scientist at IBM, working on creating ethical algorithms and aligning human and machine values.</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/2e43v3/ATIH_7_mixdown9ale2.mp3" length="91647934" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the business case for AI Ethics? 
This conversation explores the topic with invited panelists William Griffin and Alayna Kennedy.
Willam Griffin is the Chief Ethics Officer of Hypergiant, an organization that works with partners to create powerful technology solutions and smarter, more efficient human workforces.
Alayna Kennedy is a data scientist at IBM, working on creating ethical algorithms and aligning human and machine values.
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3818</itunes:duration>
                <itunes:episode>50</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Mentorship Through the Black in AI Academic Program with Moses Namara</title>
        <itunes:title>Mentorship Through the Black in AI Academic Program with Moses Namara</itunes:title>
        <link>https://radicalai.podbean.com/e/mentorship-through-the-black-in-ai-academic-program-with-moses-namara/</link>
                    <comments>https://radicalai.podbean.com/e/mentorship-through-the-black-in-ai-academic-program-with-moses-namara/#comments</comments>        <pubDate>Wed, 09 Dec 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/8f1d3862-cdba-3dbf-b753-948884f77efe</guid>
                                    <description><![CDATA[<p class="p1">In this episode, we interview Moses Namara of Black in AI about the new Black in AI Academic Program, a program that serves as a resource to support Black junior researchers as they apply to graduate programs, navigate graduate school, and enter the postgraduate job market.</p>
<p class="p1">Moses Namara is a Facebook Research Fellow and Ph.D. candidate in Human-Centered Computing (HCC) at Clemson University. He uses interdisciplinary research methods from computer science, psychology, and the social sciences to understand the principles behind users'  adoption and use of technology, decision-making, and privacy attitudes and behaviors. His research interests are in the field of usable privacy and security and human-computer interaction.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p class="p1">In this episode, we interview Moses Namara of Black in AI about the new Black in AI Academic Program, a program that serves as a resource to support Black junior researchers as they apply to graduate programs, navigate graduate school, and enter the postgraduate job market.</p>
<p class="p1">Moses Namara is a Facebook Research Fellow and Ph.D. candidate in Human-Centered Computing (HCC) at Clemson University. He uses interdisciplinary research methods from computer science, psychology, and the social sciences to understand the principles behind users'  adoption and use of technology, decision-making, and privacy attitudes and behaviors. His research interests are in the field of usable privacy and security and human-computer interaction.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/jgahg5/moses_mixdown.mp3" length="78951538" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this episode, we interview Moses Namara of Black in AI about the new Black in AI Academic Program, a program that serves as a resource to support Black junior researchers as they apply to graduate programs, navigate graduate school, and enter the postgraduate job market.
Moses Namara is a Facebook Research Fellow and Ph.D. candidate in Human-Centered Computing (HCC) at Clemson University. He uses interdisciplinary research methods from computer science, psychology, and the social sciences to understand the principles behind users'  adoption and use of technology, decision-making, and privacy attitudes and behaviors. His research interests are in the field of usable privacy and security and human-computer interaction.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3289</itunes:duration>
                <itunes:episode>49</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Industry AI Ethics 101 with Kathy Baxter</title>
        <itunes:title>Industry AI Ethics 101 with Kathy Baxter</itunes:title>
        <link>https://radicalai.podbean.com/e/industry-ai-ethics-101-with-kathy-baxter/</link>
                    <comments>https://radicalai.podbean.com/e/industry-ai-ethics-101-with-kathy-baxter/#comments</comments>        <pubDate>Wed, 02 Dec 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/965d86e7-65cf-3b47-b83c-f4b6dcc27803</guid>
                                    <description><![CDATA[<p>What do you need to know about AI Ethics in the tech industry?</p>
<p>To explore this question we welcome Kathy Baxter to the show.</p>
<p>Kathy is an Architect of Ethical AI Practice at Salesforce, where she develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked at Google, eBay, and Oracle in User Experience Research.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What do you need to know about AI Ethics in the tech industry?</p>
<p>To explore this question we welcome Kathy Baxter to the show.</p>
<p>Kathy is an Architect of Ethical AI Practice at Salesforce, where she develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked at Google, eBay, and Oracle in User Experience Research.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nj95ib/Kathy_Baxter_mixdownbs37n.mp3" length="75981168" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What do you need to know about AI Ethics in the tech industry?
To explore this question we welcome Kathy Baxter to the show.
Kathy is an Architect of Ethical AI Practice at Salesforce, where she develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked at Google, eBay, and Oracle in User Experience Research.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3165</itunes:duration>
                <itunes:episode>48</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Our Messy Robot Relationships with Kate Darling</title>
        <itunes:title>Our Messy Robot Relationships with Kate Darling</itunes:title>
        <link>https://radicalai.podbean.com/e/our-messy-robot-relationships-with-kate-darling/</link>
                    <comments>https://radicalai.podbean.com/e/our-messy-robot-relationships-with-kate-darling/#comments</comments>        <pubDate>Wed, 25 Nov 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d4e03f3b-5bdc-36b2-8a7b-d79534f21a2c</guid>
                                    <description><![CDATA[<p>Have you ever seen a robot and called it cute? Have you ever seen a drone and felt afraid? Have you ever apologized to siri or yelled at your rumba to get out of the way? Have you ever named your car?</p>
<p>Our relationships with robots are complex and messy, to explore this topic, we interview Kate Darling, a leading expert in Robot Ethics and a Research Specialist at the MIT Media Lab. Kate researches the near-term effects of robotic technology, with a particular interest in law, social, and ethical issues.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Have you ever seen a robot and called it cute? Have you ever seen a drone and felt afraid? Have you ever apologized to siri or yelled at your rumba to get out of the way? Have you ever named your car?</p>
<p>Our relationships with robots are complex and messy, to explore this topic, we interview Kate Darling, a leading expert in Robot Ethics and a Research Specialist at the MIT Media Lab. Kate researches the near-term effects of robotic technology, with a particular interest in law, social, and ethical issues.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/vy3wat/Kate_Darling_mixdown6v99r.mp3" length="86627550" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Have you ever seen a robot and called it cute? Have you ever seen a drone and felt afraid? Have you ever apologized to siri or yelled at your rumba to get out of the way? Have you ever named your car?
Our relationships with robots are complex and messy, to explore this topic, we interview Kate Darling, a leading expert in Robot Ethics and a Research Specialist at the MIT Media Lab. Kate researches the near-term effects of robotic technology, with a particular interest in law, social, and ethical issues.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3609</itunes:duration>
                <itunes:episode>47</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #6 - Coded Bias, AI, and the Future of Civil Rights with Timnit Gebru, Meredith Broussard, &amp; Shalini Kantayya</title>
        <itunes:title>All Tech is Human Series #6 - Coded Bias, AI, and the Future of Civil Rights with Timnit Gebru, Meredith Broussard, &amp; Shalini Kantayya</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-6-coded-bias-ai-and-the-future-of-civil-rights-with-timnit-gebru-meredith-broussard-shalini-kantayya/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-6-coded-bias-ai-and-the-future-of-civil-rights-with-timnit-gebru-meredith-broussard-shalini-kantayya/#comments</comments>        <pubDate>Wed, 18 Nov 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/6024c222-d62f-34ab-8723-4b09e0612f86</guid>
                                    <description><![CDATA[<p>How will Artificial Intelligence define the future of Civil Rights? </p>
<p>To celebrate the NYC theater release of the film Coded Bias we present this Livestreamed conversation featuring Shalini Kantayya (director, Coded Bias), Meredith Broussard (Author, Artificial Unintelligence), and Timnit Gebru (Co-Lead, Ethical Artificial Intelligence Team at Google) </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How will Artificial Intelligence define the future of Civil Rights? </p>
<p>To celebrate the NYC theater release of the film <em>Coded Bias</em> we present this Livestreamed conversation featuring Shalini Kantayya (director, <em>Coded Bias),</em> Meredith Broussard (Author, <em>Artificial Unintelligence), </em>and Timnit Gebru (Co-Lead, Ethical Artificial Intelligence Team at Google) </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/3ujhqi/ATIH_6__mixdown8nov8.mp3" length="98724374" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How will Artificial Intelligence define the future of Civil Rights? 
To celebrate the NYC theater release of the film Coded Bias we present this Livestreamed conversation featuring Shalini Kantayya (director, Coded Bias), Meredith Broussard (Author, Artificial Unintelligence), and Timnit Gebru (Co-Lead, Ethical Artificial Intelligence Team at Google) 
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>4113</itunes:duration>
                <itunes:episode>46</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Robot Regulation: What Is It and Why Does It Matter? with Ryan Calo</title>
        <itunes:title>Robot Regulation: What Is It and Why Does It Matter? with Ryan Calo</itunes:title>
        <link>https://radicalai.podbean.com/e/robot-regulation-what-is-it-and-why-does-it-matter-with-ryan-calo/</link>
                    <comments>https://radicalai.podbean.com/e/robot-regulation-what-is-it-and-why-does-it-matter-with-ryan-calo/#comments</comments>        <pubDate>Wed, 11 Nov 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/79bbb301-0ed2-3a0b-b3ec-46dec04226db</guid>
                                    <description><![CDATA[<p>What is robot regulation and why does it matter?</p>
<p>To answer this question we welcome to the show Ryan Calo. </p>
<p>Ryan is a professor at the University of Washington School of Law. He is a faculty co-director of the University of Washington Tech Policy Lab, a unique, interdisciplinary research unit that spans the School of Law, Information School, and Paul G. Allen School of Computer Science and Engineering. Ryan’s research broadly ecompasses law and emerging technology.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is robot regulation and why does it matter?</p>
<p>To answer this question we welcome to the show Ryan Calo. </p>
<p>Ryan is a professor at the University of Washington School of Law. He is a faculty co-director of the University of Washington Tech Policy Lab, a unique, interdisciplinary research unit that spans the School of Law, Information School, and Paul G. Allen School of Computer Science and Engineering. Ryan’s research broadly ecompasses law and emerging technology.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/r658qn/Ryan_Calo_mixdownbsi1q.mp3" length="86257584" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is robot regulation and why does it matter?
To answer this question we welcome to the show Ryan Calo. 
Ryan is a professor at the University of Washington School of Law. He is a faculty co-director of the University of Washington Tech Policy Lab, a unique, interdisciplinary research unit that spans the School of Law, Information School, and Paul G. Allen School of Computer Science and Engineering. Ryan’s research broadly ecompasses law and emerging technology.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3593</itunes:duration>
                <itunes:episode>45</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Transparency as a Political Choice with Rumman Chowdhury &amp; Mona Sloane</title>
        <itunes:title>Transparency as a Political Choice with Rumman Chowdhury &amp; Mona Sloane</itunes:title>
        <link>https://radicalai.podbean.com/e/transparency-as-a-political-choice-with-mona-sloane-rumman-chowdhury/</link>
                    <comments>https://radicalai.podbean.com/e/transparency-as-a-political-choice-with-mona-sloane-rumman-chowdhury/#comments</comments>        <pubDate>Wed, 04 Nov 2020 00:05:00 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/aaaf53aa-3135-3af1-9376-96cd55b98484</guid>
                                    <description><![CDATA[<p>What is the relationship between the government and artificial intelligence?</p>
<p>To unpack this timely question we interview Mona Sloane and Rumman Chowdhury.</p>
<p>Mona Sloane is a sociologist working on inequality in the context of AI design and policy. Mona is a Fellow with NYU’s Institute for Public Knowledge (IPK), where she convenes the ‘Co-Opting AI’ series and co-curates the ‘The Shift’ series. She is also an Adjunct Professor at NYU’s Tandon School of Engineering.</p>
<p>Rumman Chowdhury studies artificial intelligence and humanity. She is currently the Global Lead for Responsible AI at Accenture Applied Intelligence, where she works with C-suite clients to create cutting-edge technical solutions for ethical, explainable and transparent AI.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the relationship between the government and artificial intelligence?</p>
<p>To unpack this timely question we interview Mona Sloane and Rumman Chowdhury.</p>
<p>Mona Sloane is a sociologist working on inequality in the context of AI design and policy. Mona is a Fellow with NYU’s Institute for Public Knowledge (IPK), where she convenes the ‘Co-Opting AI’ series and co-curates the ‘The Shift’ series. She is also an Adjunct Professor at NYU’s Tandon School of Engineering.</p>
<p>Rumman Chowdhury studies artificial intelligence and humanity. She is currently the Global Lead for Responsible AI at Accenture Applied Intelligence, where she works with C-suite clients to create cutting-edge technical solutions for ethical, explainable and transparent AI.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/purt8n/Rumman_Mona_mixdown719ka.mp3" length="87838234" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the relationship between the government and artificial intelligence?
To unpack this timely question we interview Mona Sloane and Rumman Chowdhury.
Mona Sloane is a sociologist working on inequality in the context of AI design and policy. Mona is a Fellow with NYU’s Institute for Public Knowledge (IPK), where she convenes the ‘Co-Opting AI’ series and co-curates the ‘The Shift’ series. She is also an Adjunct Professor at NYU’s Tandon School of Engineering.
Rumman Chowdhury studies artificial intelligence and humanity. She is currently the Global Lead for Responsible AI at Accenture Applied Intelligence, where she works with C-suite clients to create cutting-edge technical solutions for ethical, explainable and transparent AI.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3659</itunes:duration>
                <itunes:episode>44</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #5 - Social Media's Role in the US Election with Dipayan Ghosh &amp; Vera Zakem</title>
        <itunes:title>All Tech is Human Series #5 - Social Media's Role in the US Election with Dipayan Ghosh &amp; Vera Zakem</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-5-social-medias-role-in-the-us-election-with-dipayan-ghosh-vera-zakem/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-5-social-medias-role-in-the-us-election-with-dipayan-ghosh-vera-zakem/#comments</comments>        <pubDate>Sun, 01 Nov 2020 00:51:16 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/47067c6d-ee55-34fa-ad40-74d33b86c8b2</guid>
                                    <description><![CDATA[<p>The 2016 US election made it clear that social media companies play a profound role in how voters are informed and influenced. What role should social media companies be playing in the upcoming US election?</p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring <a href='https://dipayanghosh.com/'>Dipayan Ghosh</a> (co-director of the Digital Platforms & Democracy Project at the Harvard Kennedy School, author of <a href='https://www.amazon.com/Terms-Disservice-Silicon-Valley-Destructive-ebook'>Terms of Disservice</a>, & former public policy advisor at Facebook) & <a href='https://www.zakemglobal.com/'>Vera Zakem</a> (Senior Policy and Technology Advisor, Institute for Security and Technology, CEO of Zakem Global Strategies, & former strategy and research at Twitter).</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>The 2016 US election made it clear that social media companies play a profound role in how voters are informed and influenced. What role should social media companies be playing in the upcoming US election?</p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring <a href='https://dipayanghosh.com/'>Dipayan Ghosh</a> (co-director of the Digital Platforms & Democracy Project at the Harvard Kennedy School, author of <a href='https://www.amazon.com/Terms-Disservice-Silicon-Valley-Destructive-ebook'>Terms of Disservice</a>, & former public policy advisor at Facebook) & <a href='https://www.zakemglobal.com/'>Vera Zakem</a> (Senior Policy and Technology Advisor, Institute for Security and Technology, CEO of Zakem Global Strategies, & former strategy and research at Twitter).</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/67hamc/ATIH_5_mixdown8akge.mp3" length="96577194" type="audio/mpeg"/>
        <itunes:summary><![CDATA[The 2016 US election made it clear that social media companies play a profound role in how voters are informed and influenced. What role should social media companies be playing in the upcoming US election?
In partnership with All Tech is Human we present this Livestreamed conversation featuring Dipayan Ghosh (co-director of the Digital Platforms & Democracy Project at the Harvard Kennedy School, author of Terms of Disservice, & former public policy advisor at Facebook) & Vera Zakem (Senior Policy and Technology Advisor, Institute for Security and Technology, CEO of Zakem Global Strategies, & former strategy and research at Twitter).
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>4023</itunes:duration>
                <itunes:episode>43</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Facebook Ads, Propaganda, and Global Politics with Nayantara Ranganathan and Manuel Beltrán</title>
        <itunes:title>Facebook Ads, Propaganda, and Global Politics with Nayantara Ranganathan and Manuel Beltrán</itunes:title>
        <link>https://radicalai.podbean.com/e/facebook-ads-propaganda-and-global-politics-with-nayantara-ranganathan-and-manuel-beltran/</link>
                    <comments>https://radicalai.podbean.com/e/facebook-ads-propaganda-and-global-politics-with-nayantara-ranganathan-and-manuel-beltran/#comments</comments>        <pubDate>Wed, 28 Oct 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/78a41f36-c76f-33ac-b554-66896bafa6e5</guid>
                                    <description><![CDATA[<p>What should you know about propaganda and political ads in the age of information? How do they impact democracy across the globe? </p>
<p>To cover this important topic, we welcome to the show Nayantara Ranganathan and Manuel Beltrán.</p>
<p>Nayantara Ranganathan is a lawyer and researcher studying the politics and culture of digital technologies. At the Internet Democracy Project, she worked on applying feminist methods of research and practice to questions of data governance. Within her independent research, she is exploring how technology is remaking law and regulation in its own image.</p>
<p>Manuel Beltrán is an artist and activist. He researches and lectures on art, activism, social movements, post-digital culture and new media. As an activist, he was involved in the Indignados movement in Spain, the Gezi Park protests in Turkey and several forms of independent activism and cyber-activism in Europe and beyond. </p>
<p>Together, Nayantara and Manuel founded the Persuasion Lab, a project exploring new forms of political propaganda on social media. They are also both members of the Real Facebook Oversight Board.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What should you know about propaganda and political ads in the age of information? How do they impact democracy across the globe? </p>
<p>To cover this important topic, we welcome to the show Nayantara Ranganathan and Manuel Beltrán.</p>
<p>Nayantara Ranganathan is a lawyer and researcher studying the politics and culture of digital technologies. At the Internet Democracy Project, she worked on applying feminist methods of research and practice to questions of data governance. Within her independent research, she is exploring how technology is remaking law and regulation in its own image.</p>
<p>Manuel Beltrán is an artist and activist. He researches and lectures on art, activism, social movements, post-digital culture and new media. As an activist, he was involved in the Indignados movement in Spain, the Gezi Park protests in Turkey and several forms of independent activism and cyber-activism in Europe and beyond. </p>
<p>Together, Nayantara and Manuel founded the Persuasion Lab, a project exploring new forms of political propaganda on social media. They are also both members of the Real Facebook Oversight Board.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/73qidr/adwatch_mixdown.mp3" length="82155406" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What should you know about propaganda and political ads in the age of information? How do they impact democracy across the globe? 
To cover this important topic, we welcome to the show Nayantara Ranganathan and Manuel Beltrán.
Nayantara Ranganathan is a lawyer and researcher studying the politics and culture of digital technologies. At the Internet Democracy Project, she worked on applying feminist methods of research and practice to questions of data governance. Within her independent research, she is exploring how technology is remaking law and regulation in its own image.
Manuel Beltrán is an artist and activist. He researches and lectures on art, activism, social movements, post-digital culture and new media. As an activist, he was involved in the Indignados movement in Spain, the Gezi Park protests in Turkey and several forms of independent activism and cyber-activism in Europe and beyond. 
Together, Nayantara and Manuel founded the Persuasion Lab, a project exploring new forms of political propaganda on social media. They are also both members of the Real Facebook Oversight Board.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3422</itunes:duration>
                <itunes:episode>42</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Voter Fraud, Media Regulation, and Civic Design with Shannon McGregor and Whitney Quesenbery</title>
        <itunes:title>Voter Fraud, Media Regulation, and Civic Design with Shannon McGregor and Whitney Quesenbery</itunes:title>
        <link>https://radicalai.podbean.com/e/go-vote-social-media-and-the-us-2020-election-with-shannon-mcgregor-and-whitney-quesenbery/</link>
                    <comments>https://radicalai.podbean.com/e/go-vote-social-media-and-the-us-2020-election-with-shannon-mcgregor-and-whitney-quesenbery/#comments</comments>        <pubDate>Wed, 21 Oct 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/ff15b4a3-78b0-3bde-933f-da5f0e1eab4c</guid>
                                    <description><![CDATA[<p>What is the role that technology and social media should play in voting practices? What should global citizens know about the impact that technology being used in the US 2020 election voting processes might have on the future of democracy worldwide? </p>
<p>In this episode we interview Shannon McGregor and Whitney Quesenbery.</p>
<p>Shannon McGregor is an Assistant Professor in the UNC Hussman School of Journalism and Media and a Senior Researcher with UNC’s Center for Information, Technology, and Public Life. Her research addresses the role of social media and their data in political processes, with a focus on political communication, journalism, public opinion, and gender.</p>
<p>Whitney Quesenbery is the executive director of the Center for Civic Design. Whitney is also co-author of two influential Brennan Center reports that show just how much design matters in elections. She was previously chair for Human Factors and Privacy for the Elections Assistance Commission's committee working towards developing voting system guidelines.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the role that technology and social media should play in voting practices? What should global citizens know about the impact that technology being used in the US 2020 election voting processes might have on the future of democracy worldwide? </p>
<p>In this episode we interview Shannon McGregor and Whitney Quesenbery.</p>
<p>Shannon McGregor is an Assistant Professor in the UNC Hussman School of Journalism and Media and a Senior Researcher with UNC’s Center for Information, Technology, and Public Life. Her research addresses the role of social media and their data in political processes, with a focus on political communication, journalism, public opinion, and gender.</p>
<p>Whitney Quesenbery is the executive director of the Center for Civic Design. Whitney is also co-author of two influential Brennan Center reports that show just how much design matters in elections. She was previously chair for Human Factors and Privacy for the Elections Assistance Commission's committee working towards developing voting system guidelines.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/63sxdm/Whitney_Shan_mixdownau3cd.mp3" length="83119446" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the role that technology and social media should play in voting practices? What should global citizens know about the impact that technology being used in the US 2020 election voting processes might have on the future of democracy worldwide? 
In this episode we interview Shannon McGregor and Whitney Quesenbery.
Shannon McGregor is an Assistant Professor in the UNC Hussman School of Journalism and Media and a Senior Researcher with UNC’s Center for Information, Technology, and Public Life. Her research addresses the role of social media and their data in political processes, with a focus on political communication, journalism, public opinion, and gender.
Whitney Quesenbery is the executive director of the Center for Civic Design. Whitney is also co-author of two influential Brennan Center reports that show just how much design matters in elections. She was previously chair for Human Factors and Privacy for the Elections Assistance Commission's committee working towards developing voting system guidelines.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3462</itunes:duration>
                <itunes:episode>41</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Why We Do This: Reflecting on Six Months of Radical AI with Dylan and Jess</title>
        <itunes:title>Why We Do This: Reflecting on Six Months of Radical AI with Dylan and Jess</itunes:title>
        <link>https://radicalai.podbean.com/e/why-we-do-this-reflecting-on-six-months-of-radical-ai-with-dylan-and-jess/</link>
                    <comments>https://radicalai.podbean.com/e/why-we-do-this-reflecting-on-six-months-of-radical-ai-with-dylan-and-jess/#comments</comments>        <pubDate>Wed, 14 Oct 2020 00:29:33 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/0a5849b4-acb0-3c99-8a10-39eb66744fb1</guid>
                                    <description><![CDATA[<p>In this special episode of The Radical AI Podcast Dylan and Jess pull back the curtain to reflect on six months of the show! From qualitative research to ontological horseplay - this episode has it all!</p>
<p>Full show notes for this episode can be found at <a href='https://www.radicalai.org/'>radicalai.org</a>. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
<p> </p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this special episode of The Radical AI Podcast Dylan and Jess pull back the curtain to reflect on six months of the show! From qualitative research to ontological horseplay - this episode has it all!</p>
<p>Full show notes for this episode can be found at <a href='https://www.radicalai.org/'>radicalai.org</a>. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
<p> </p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nmrwqg/6_months_mixdown6b0uk.mp3" length="57516046" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this special episode of The Radical AI Podcast Dylan and Jess pull back the curtain to reflect on six months of the show! From qualitative research to ontological horseplay - this episode has it all!
Full show notes for this episode can be found at radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 
 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2396</itunes:duration>
                <itunes:episode>40</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>More than Fake News: Fighting Media Manipulation with Claire Leibowicz and Emily Saltz from the Partnership on AI</title>
        <itunes:title>More than Fake News: Fighting Media Manipulation with Claire Leibowicz and Emily Saltz from the Partnership on AI</itunes:title>
        <link>https://radicalai.podbean.com/e/fake-news-fighting-media-manipulation-with-claire-leibowicz-and-emily-saltz-from-the-partnership-on-ai/</link>
                    <comments>https://radicalai.podbean.com/e/fake-news-fighting-media-manipulation-with-claire-leibowicz-and-emily-saltz-from-the-partnership-on-ai/#comments</comments>        <pubDate>Wed, 07 Oct 2020 00:23:15 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/8d0ef0ef-e297-36c5-89fc-23a42555b330</guid>
                                    <description><![CDATA[<p>What is media integrity? What is media manipulation? What do you need to know about fake news?</p>
<p>To answer these questions and more we welcome to the show Claire Leibowicz and Emily Saltz -- two representatives from the Partnership on AI’s AI and Media Integrity team.</p>
<p>Claire Leibowicz is a Program Lead directing the strategy and execution of projects in the Partnership on AI’s AI and Media Integrity portfolio. Claire also oversees PAI’s AI and Media Integrity Steering Committee.</p>
<p>Emily Saltz is a Research Fellow at Partnership on AI for the PAI/First Draft Media Manipulation Research Fellowship. Prior to joining PAI, Emily was UX Lead for The News Provenance Project at The New York Times.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is media integrity? What is media manipulation? What do you need to know about fake news?</p>
<p>To answer these questions and more we welcome to the show Claire Leibowicz and Emily Saltz -- two representatives from the Partnership on AI’s AI and Media Integrity team.</p>
<p>Claire Leibowicz is a Program Lead directing the strategy and execution of projects in the Partnership on AI’s AI and Media Integrity portfolio. Claire also oversees PAI’s AI and Media Integrity Steering Committee.</p>
<p>Emily Saltz is a Research Fellow at Partnership on AI for the PAI/First Draft Media Manipulation Research Fellowship. Prior to joining PAI, Emily was UX Lead for The News Provenance Project at The New York Times.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/yw4ru3/PAI_Media_mixdownaun0d.mp3" length="90497482" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is media integrity? What is media manipulation? What do you need to know about fake news?
To answer these questions and more we welcome to the show Claire Leibowicz and Emily Saltz -- two representatives from the Partnership on AI’s AI and Media Integrity team.
Claire Leibowicz is a Program Lead directing the strategy and execution of projects in the Partnership on AI’s AI and Media Integrity portfolio. Claire also oversees PAI’s AI and Media Integrity Steering Committee.
Emily Saltz is a Research Fellow at Partnership on AI for the PAI/First Draft Media Manipulation Research Fellowship. Prior to joining PAI, Emily was UX Lead for The News Provenance Project at The New York Times.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3770</itunes:duration>
                <itunes:episode>39</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>The State of the Union of Surveillance: Are Things Getting Better? with Liz O'Sullivan</title>
        <itunes:title>The State of the Union of Surveillance: Are Things Getting Better? with Liz O'Sullivan</itunes:title>
        <link>https://radicalai.podbean.com/e/the-state-of-the-union-of-surveillance-is-it-getting-better-with-liz-osullivan/</link>
                    <comments>https://radicalai.podbean.com/e/the-state-of-the-union-of-surveillance-is-it-getting-better-with-liz-osullivan/#comments</comments>        <pubDate>Wed, 30 Sep 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/3416f69d-f6dc-3050-939f-8d820f3fe27f</guid>
                                    <description><![CDATA[<p>What should you know about the state of surveillance in the world today? What can we do as consumers to stop unintentionally contributing to surveillance? The Facial Recognition industry had a reckoning after the murder of George Floyd - are things getting better?</p>
<p>To answer these questions we welcome Liz O'Sullivan to the show.</p>
<p>Liz O'Sullivan is the Surveillance Technology Oversight Project's technology director. She is also the co-founder and vice president of commercial operations at Arthur AI, an AI explainability and bias monitoring startup. Liz has been featured in articles on ethical AI in the NY Times, The Intercept, and The Register, and has written about AI for the ACLU and The Campaign to Stop Killer Robots. She has spent 10 years in tech, mainly in the AI space, most recently as the head of image annotations for the computer vision startup, Clarifai. Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p>

</p>
<p> </p>
<p> </p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What should you know about the state of surveillance in the world today? What can we do as consumers to stop unintentionally contributing to surveillance? The Facial Recognition industry had a reckoning after the murder of George Floyd - are things getting better?</p>
<p>To answer these questions we welcome Liz O'Sullivan to the show.</p>
<p>Liz O'Sullivan is the Surveillance Technology Oversight Project's technology director. She is also the co-founder and vice president of commercial operations at Arthur AI, an AI explainability and bias monitoring startup. Liz has been featured in articles on ethical AI in the NY Times, The Intercept, and The Register, and has written about AI for the ACLU and The Campaign to Stop Killer Robots. She has spent 10 years in tech, mainly in the AI space, most recently as the head of image annotations for the computer vision startup, Clarifai. Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p><br>
<br>
</p>
<p> </p>
<p> </p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/msuyqf/Liz_O_Sulli_mixdownbgdhr.mp3" length="83775494" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What should you know about the state of surveillance in the world today? What can we do as consumers to stop unintentionally contributing to surveillance? The Facial Recognition industry had a reckoning after the murder of George Floyd - are things getting better?
To answer these questions we welcome Liz O'Sullivan to the show.
Liz O'Sullivan is the Surveillance Technology Oversight Project's technology director. She is also the co-founder and vice president of commercial operations at Arthur AI, an AI explainability and bias monitoring startup. Liz has been featured in articles on ethical AI in the NY Times, The Intercept, and The Register, and has written about AI for the ACLU and The Campaign to Stop Killer Robots. She has spent 10 years in tech, mainly in the AI space, most recently as the head of image annotations for the computer vision startup, Clarifai. Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod

 
 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3490</itunes:duration>
                <itunes:episode>38</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Checklists and Principles and Values, Oh My! Practices for Co-Designing Ethical Technologies with Michael Madaio</title>
        <itunes:title>Checklists and Principles and Values, Oh My! Practices for Co-Designing Ethical Technologies with Michael Madaio</itunes:title>
        <link>https://radicalai.podbean.com/e/checklists-and-principles-and-values-oh-my-practices-for-co-designing-ethical-technologies-with-michael-madaio/</link>
                    <comments>https://radicalai.podbean.com/e/checklists-and-principles-and-values-oh-my-practices-for-co-designing-ethical-technologies-with-michael-madaio/#comments</comments>        <pubDate>Wed, 23 Sep 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/10f284fb-b169-382c-8698-3f1be3238621</guid>
                                    <description><![CDATA[<p>What are the limitations of using checklists for fairness? What are the alternatives? How do we effectively design ethical AI systems around our collective values? </p>
<p>To answer these questions we welcome Dr. Michael Madaio to the show.</p>
<p>Madaio is a postdoc at Microsoft Research working with the FATE (Fairness, Accountability, Transparency, and Ethics in AI) research group. Michael works at the intersection of human-computer interaction, AI/ML, and public interest technology, where he uses human-centered methods to understand how we might equitably co-design data-driven technologies in the public interest with impacted stakeholders. </p>
<p>Michael, along with other collaborators at Microsoft FATE, authored the paper: “Co-Designing Checklists to Understand Organizational Challenges and Opportunities around Fairness in AI”, which is one of the major focuses of this interview! </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p>

</p>
<p> </p>
<p> </p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What are the limitations of using checklists for fairness? What are the alternatives? How do we effectively design ethical AI systems around our collective values? </p>
<p>To answer these questions we welcome Dr. Michael Madaio to the show.</p>
<p>Madaio is a postdoc at Microsoft Research working with the FATE (Fairness, Accountability, Transparency, and Ethics in AI) research group. Michael works at the intersection of human-computer interaction, AI/ML, and public interest technology, where he uses human-centered methods to understand how we might equitably co-design data-driven technologies in the public interest with impacted stakeholders. </p>
<p>Michael, along with other collaborators at Microsoft FATE, authored the paper: “Co-Designing Checklists to Understand Organizational Challenges and Opportunities around Fairness in AI”, which is one of the major focuses of this interview! </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p><br>
<br>
</p>
<p> </p>
<p> </p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ffpw8n/MichaelMadaio_mixdown.mp3" length="84742664" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What are the limitations of using checklists for fairness? What are the alternatives? How do we effectively design ethical AI systems around our collective values? 
To answer these questions we welcome Dr. Michael Madaio to the show.
Madaio is a postdoc at Microsoft Research working with the FATE (Fairness, Accountability, Transparency, and Ethics in AI) research group. Michael works at the intersection of human-computer interaction, AI/ML, and public interest technology, where he uses human-centered methods to understand how we might equitably co-design data-driven technologies in the public interest with impacted stakeholders. 
Michael, along with other collaborators at Microsoft FATE, authored the paper: “Co-Designing Checklists to Understand Organizational Challenges and Opportunities around Fairness in AI”, which is one of the major focuses of this interview! 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod

 
 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3530</itunes:duration>
                <itunes:episode>37</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Resistance Against the Tech to Prison Pipeline with the Coalition for Critical Technology</title>
        <itunes:title>Resistance Against the Tech to Prison Pipeline with the Coalition for Critical Technology</itunes:title>
        <link>https://radicalai.podbean.com/e/resistance-against-the-tech-to-prison-pipeline-with-the-coalition-for-critical-technology/</link>
                    <comments>https://radicalai.podbean.com/e/resistance-against-the-tech-to-prison-pipeline-with-the-coalition-for-critical-technology/#comments</comments>        <pubDate>Wed, 16 Sep 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/bf4360d7-26af-3ea9-84ee-34cb907d29fc</guid>
                                    <description><![CDATA[<p>What is the tech to prison pipeline? How can we build infrastructures of resistance to it? What role does academia play in perpetuating carceral technology?</p>
<p>To answer these questions we welcome to the show Sonja Solomun and Audrey Beard, two representatives from the Coalition for Critical Technology. </p>
<p>Sonja Solomun works on the politics of media and technology, including the history of digital platforms, polarization, and on fair and accountable governance of technology. She is currently the Research Director of the Centre for Media, Technology and Democracy at McGill’s Max Bell School of Public Policy is finishing her PhD at the Department of Communication Studies at McGill University.</p>
<p>Audrey Beard is a critical AI researcher who explores the politics of artificial intelligence systems and who earned their Master's in Computer Science at Rensselaer Polytechnic Institute.</p>
<p>Audrey and Sonja co-founded the Coalition for Critical Technology, along with NM Amadeo, Chelsea Barabas, Theo Dryer, and Beth Semel. The mission of the Coalition for Critical Technology is to work towards justice by resisting technologies that exacerbate inequality, reinforce racism, and support the carceral state.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p>

</p>
<p> </p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the tech to prison pipeline? How can we build infrastructures of resistance to it? What role does academia play in perpetuating carceral technology?</p>
<p>To answer these questions we welcome to the show Sonja Solomun and Audrey Beard, two representatives from the Coalition for Critical Technology. </p>
<p>Sonja Solomun works on the politics of media and technology, including the history of digital platforms, polarization, and on fair and accountable governance of technology. She is currently the Research Director of the Centre for Media, Technology and Democracy at McGill’s Max Bell School of Public Policy is finishing her PhD at the Department of Communication Studies at McGill University.</p>
<p>Audrey Beard is a critical AI researcher who explores the politics of artificial intelligence systems and who earned their Master's in Computer Science at Rensselaer Polytechnic Institute.</p>
<p>Audrey and Sonja co-founded the Coalition for Critical Technology, along with NM Amadeo, Chelsea Barabas, Theo Dryer, and Beth Semel. The mission of the Coalition for Critical Technology is to work towards justice by resisting technologies that exacerbate inequality, reinforce racism, and support the carceral state.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p><br>
<br>
</p>
<p> </p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xrj4vr/CCT_mixdown2.mp3" length="85666640" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the tech to prison pipeline? How can we build infrastructures of resistance to it? What role does academia play in perpetuating carceral technology?
To answer these questions we welcome to the show Sonja Solomun and Audrey Beard, two representatives from the Coalition for Critical Technology. 
Sonja Solomun works on the politics of media and technology, including the history of digital platforms, polarization, and on fair and accountable governance of technology. She is currently the Research Director of the Centre for Media, Technology and Democracy at McGill’s Max Bell School of Public Policy is finishing her PhD at the Department of Communication Studies at McGill University.
Audrey Beard is a critical AI researcher who explores the politics of artificial intelligence systems and who earned their Master's in Computer Science at Rensselaer Polytechnic Institute.
Audrey and Sonja co-founded the Coalition for Critical Technology, along with NM Amadeo, Chelsea Barabas, Theo Dryer, and Beth Semel. The mission of the Coalition for Critical Technology is to work towards justice by resisting technologies that exacerbate inequality, reinforce racism, and support the carceral state.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod

 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3569</itunes:duration>
                <itunes:episode>36</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #4 - Building the Next Generation of Responsible Technologists &amp; Changemakers with  Rumman Chowdhury and Yoav Schlesinger</title>
        <itunes:title>All Tech is Human Series #4 - Building the Next Generation of Responsible Technologists &amp; Changemakers with  Rumman Chowdhury and Yoav Schlesinger</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-4-building-the-next-generation-of-responsible-technologists-changemakers-with-rumman-chowdhury-and-yoav-schlesinger/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-4-building-the-next-generation-of-responsible-technologists-changemakers-with-rumman-chowdhury-and-yoav-schlesinger/#comments</comments>        <pubDate>Sun, 13 Sep 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/6b0bfeb2-7ea4-369b-8e28-ccfd082cba8b</guid>
                                    <description><![CDATA[<p>How can we inform and inspire the next generation of responsible technologists and changemakers? How do you get involved as someone new to the responsible AI field?</p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring <a href='https://twitter.com/ruchowdh'>Rumman Chowdhury</a> (Responsible AI Lead at Accenture) and <a href='https://twitter.com/yschlesinger'>Yoav Schlesinger</a> (Principal, Ethical AI Practice at Salesforce).</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How can we inform and inspire the next generation of responsible technologists and changemakers? How do you get involved as someone new to the responsible AI field?</p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring <a href='https://twitter.com/ruchowdh'>Rumman Chowdhury</a> (Responsible AI Lead at Accenture) and <a href='https://twitter.com/yschlesinger'>Yoav Schlesinger</a> (Principal, Ethical AI Practice at Salesforce).</p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/grpvdj/ATIH_4_mixdown7tsk3.mp3" length="80529058" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can we inform and inspire the next generation of responsible technologists and changemakers? How do you get involved as someone new to the responsible AI field?
In partnership with All Tech is Human we present this Livestreamed conversation featuring Rumman Chowdhury (Responsible AI Lead at Accenture) and Yoav Schlesinger (Principal, Ethical AI Practice at Salesforce).
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3355</itunes:duration>
                <itunes:episode>35</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Democratizing AI: Inclusivity, Accountability, &amp; Collaboration with Anima Anandkumar</title>
        <itunes:title>Democratizing AI: Inclusivity, Accountability, &amp; Collaboration with Anima Anandkumar</itunes:title>
        <link>https://radicalai.podbean.com/e/democratizing-ai-inclusivity-accountability-collaboration-with-anima-anandkumar/</link>
                    <comments>https://radicalai.podbean.com/e/democratizing-ai-inclusivity-accountability-collaboration-with-anima-anandkumar/#comments</comments>        <pubDate>Wed, 09 Sep 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/060e4569-cc7a-32c8-a08f-a018e40a44d2</guid>
                                    <description><![CDATA[<p>What are current attitudes towards AI Ethics from within the tech industry? How can we make computer science a more inclusive discipline for women? What does it mean to democratize AI? Why should we? How can we?</p>
<p>To answer these questions and more we welcome Dr. Anima Anandkumar to the show.</p>
<p> Anima holds dual positions in academia and industry. In academia - she is a professor in the Caltech Computing and Mathematical Sciences department. In Industry - she is the director of machine learning research at NVIDIA. At NVIDIA, she is leading the research group that develops next-generation AI algorithms. Anima is also the youngest named chair professor at Caltech, where she co-leads the AI4science initiative.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What are current attitudes towards AI Ethics from within the tech industry? How can we make computer science a more inclusive discipline for women? What does it mean to democratize AI? Why should we? How can we?</p>
<p>To answer these questions and more we welcome Dr. Anima Anandkumar to the show.</p>
<p> Anima holds dual positions in academia and industry. In academia - she is a professor in the Caltech Computing and Mathematical Sciences department. In Industry - she is the director of machine learning research at NVIDIA. At NVIDIA, she is leading the research group that develops next-generation AI algorithms. Anima is also the youngest named chair professor at Caltech, where she co-leads the AI4science initiative.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ubd8rz/Anima_mixdown.mp3" length="86263218" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What are current attitudes towards AI Ethics from within the tech industry? How can we make computer science a more inclusive discipline for women? What does it mean to democratize AI? Why should we? How can we?
To answer these questions and more we welcome Dr. Anima Anandkumar to the show.
 Anima holds dual positions in academia and industry. In academia - she is a professor in the Caltech Computing and Mathematical Sciences department. In Industry - she is the director of machine learning research at NVIDIA. At NVIDIA, she is leading the research group that develops next-generation AI algorithms. Anima is also the youngest named chair professor at Caltech, where she co-leads the AI4science initiative.
 
Full show notes for this episode can be found at Radicalai.org. 
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3593</itunes:duration>
                <itunes:episode>34</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Designing for Intelligibility: Building Responsible AI with Jenn Wortman Vaughan</title>
        <itunes:title>Designing for Intelligibility: Building Responsible AI with Jenn Wortman Vaughan</itunes:title>
        <link>https://radicalai.podbean.com/e/designing-for-intelligibility-building-responsible-ai-with-jenn-wortman-vaughan/</link>
                    <comments>https://radicalai.podbean.com/e/designing-for-intelligibility-building-responsible-ai-with-jenn-wortman-vaughan/#comments</comments>        <pubDate>Wed, 02 Sep 2020 00:31:36 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/c697849e-44ba-37cc-baa6-798a302e8088</guid>
                                    <description><![CDATA[<p>What are the differences between explainability, intelligibility, interpretability, and transparency in Responsible AI? What is human-centered machine learning? Should we be regulating machine learning transparency? </p>
<p> </p>
<p>To answer these questions and more we welcome Dr. Jenn Wortman Vaughan to the show.</p>
<p> </p>
<p>Jenn is a Senior Principal Researcher at Microsoft Research. She has been leading efforts at Microsoft around transparency, intelligibility, and explanation under the umbrella of Aether, their company-wide initiative focused on responsible AI. Jenn’s research focuses broadly on the interaction between people and AI, with a passion for AI that augments, rather than replaces, human abilities.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What are the differences between explainability, intelligibility, interpretability, and transparency in Responsible AI? What is human-centered machine learning? Should we be regulating machine learning transparency? </p>
<p> </p>
<p>To answer these questions and more we welcome Dr. Jenn Wortman Vaughan to the show.</p>
<p> </p>
<p>Jenn is a Senior Principal Researcher at Microsoft Research. She has been leading efforts at Microsoft around transparency, intelligibility, and explanation under the umbrella of Aether, their company-wide initiative focused on responsible AI. Jenn’s research focuses broadly on the interaction between people and AI, with a passion for AI that augments, rather than replaces, human abilities.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xte5zd/JWV_mixdown.mp3" length="105426956" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What are the differences between explainability, intelligibility, interpretability, and transparency in Responsible AI? What is human-centered machine learning? Should we be regulating machine learning transparency? 
 
To answer these questions and more we welcome Dr. Jenn Wortman Vaughan to the show.
 
Jenn is a Senior Principal Researcher at Microsoft Research. She has been leading efforts at Microsoft around transparency, intelligibility, and explanation under the umbrella of Aether, their company-wide initiative focused on responsible AI. Jenn’s research focuses broadly on the interaction between people and AI, with a passion for AI that augments, rather than replaces, human abilities.
 
Full show notes for this episode can be found at Radicalai.org. 
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>4392</itunes:duration>
                <itunes:episode>33</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #3 - Big Tech, Power, &amp; Diplomacy with Alexis Wichowski &amp; Rana Sarkar</title>
        <itunes:title>All Tech is Human Series #3 - Big Tech, Power, &amp; Diplomacy with Alexis Wichowski &amp; Rana Sarkar</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-3-big-tech-power-diplomacy-with-alexis-wichowski-rana-sarkar/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-3-big-tech-power-diplomacy-with-alexis-wichowski-rana-sarkar/#comments</comments>        <pubDate>Wed, 26 Aug 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/f1b0c9ae-b5f8-3827-b28b-6aef132db47e</guid>
                                    <description><![CDATA[<p>How should diplomacy and international cooperation adjust to the significant global power that major tech companies wield? </p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring Alexis Wichowski (adjunct associate professor in Columbia University’s School of International and Public Affairs, teaching in the Technology, Media, and Communications specialization) and Rana Sarkar (Consul General of Canada for San Francisco and Silicon Valley, with accreditation for Northern California and Hawaii.)</p>
<p> </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p> </p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How should diplomacy and international cooperation adjust to the significant global power that major tech companies wield? </p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring Alexis Wichowski (adjunct associate professor in Columbia University’s School of International and Public Affairs, teaching in the Technology, Media, and Communications specialization) and Rana Sarkar (Consul General of Canada for San Francisco and Silicon Valley, with accreditation for Northern California and Hawaii.)</p>
<p> </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p> </p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/iedepq/ATIH_3_mixdown7qjt0.mp3" length="79518068" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How should diplomacy and international cooperation adjust to the significant global power that major tech companies wield? 
In partnership with All Tech is Human we present this Livestreamed conversation featuring Alexis Wichowski (adjunct associate professor in Columbia University’s School of International and Public Affairs, teaching in the Technology, Media, and Communications specialization) and Rana Sarkar (Consul General of Canada for San Francisco and Silicon Valley, with accreditation for Northern California and Hawaii.)
 
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
 
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3312</itunes:duration>
                <itunes:episode>32</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Is Uber Moral? The Ethical Crisis of the Gig Economy with Veena Dubal</title>
        <itunes:title>Is Uber Moral? The Ethical Crisis of the Gig Economy with Veena Dubal</itunes:title>
        <link>https://radicalai.podbean.com/e/is-uber-evil-the-moral-crisis-of-the-gig-economy-with-veena-dubal/</link>
                    <comments>https://radicalai.podbean.com/e/is-uber-evil-the-moral-crisis-of-the-gig-economy-with-veena-dubal/#comments</comments>        <pubDate>Wed, 19 Aug 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/8bacc018-0150-3386-ab72-c470cabf2290</guid>
                                    <description><![CDATA[<p>What is precarious work and how does it impact the psychology of labor? How might platforms like Uber and Lyft be negatively impacting their workers? How do gig economy apps control the lives of those who use them for work?</p>
<p> </p>
<p>To answer these questions and more we welcome Dr. Veena Dubal to the show.</p>
<p> </p>
<p>Veena is a professor of Law at UC Hastings. Veena received her J.D. and PhD from UC Berkeley, where she conducted an ethnography of the San Francisco taxi industry. Veena’s research focuses on the intersection of law, technology, and precarious work. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is precarious work and how does it impact the psychology of labor? How might platforms like Uber and Lyft be negatively impacting their workers? How do gig economy apps control the lives of those who use them for work?</p>
<p> </p>
<p>To answer these questions and more we welcome Dr. Veena Dubal to the show.</p>
<p> </p>
<p>Veena is a professor of Law at UC Hastings. Veena received her J.D. and PhD from UC Berkeley, where she conducted an ethnography of the San Francisco taxi industry. Veena’s research focuses on the intersection of law, technology, and precarious work. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/f65x32/Veena_mixdown2.mp3" length="85960119" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is precarious work and how does it impact the psychology of labor? How might platforms like Uber and Lyft be negatively impacting their workers? How do gig economy apps control the lives of those who use them for work?
 
To answer these questions and more we welcome Dr. Veena Dubal to the show.
 
Veena is a professor of Law at UC Hastings. Veena received her J.D. and PhD from UC Berkeley, where she conducted an ethnography of the San Francisco taxi industry. Veena’s research focuses on the intersection of law, technology, and precarious work. 
 
Full show notes for this episode can be found at Radicalai.org. 
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3581</itunes:duration>
                <itunes:episode>31</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Finding Joy in Meaningful Work: AI for Social Good in Social Work &amp; Social Justice with Eric Rice</title>
        <itunes:title>Finding Joy in Meaningful Work: AI for Social Good in Social Work &amp; Social Justice with Eric Rice</itunes:title>
        <link>https://radicalai.podbean.com/e/finding-joy-in-meaningful-work-ai-for-social-good-in-social-work-social-justice-with-eric-rice/</link>
                    <comments>https://radicalai.podbean.com/e/finding-joy-in-meaningful-work-ai-for-social-good-in-social-work-social-justice-with-eric-rice/#comments</comments>        <pubDate>Wed, 12 Aug 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/da5d8044-550c-3bee-bdb6-f8dae3d1333c</guid>
                                    <description><![CDATA[<p>Where is the limit in the use of technology to solve societal problems? How can Social Work utilize AI to address social injustice? To answer these questions and more we welcome Dr. Eric Rice to the show. </p>
<p> </p>
<p>Eric is an associate professor and the founding co-director of the USC Center for Artificial Intelligence in Society, a joint venture of the USC Suzanne Dworak-Peck School of Social Work and the USC Viterbi School of Engineering. Rice received a BA from the University of Chicago, and an MA and PhD in Sociology from Stanford University. Eric’s research focuses on community outreach, network science, and the use of social networking technology by high-risk youth.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Where is the limit in the use of technology to solve societal problems? How can Social Work utilize AI to address social injustice? To answer these questions and more we welcome Dr. Eric Rice to the show. </p>
<p> </p>
<p>Eric is an associate professor and the founding co-director of the USC Center for Artificial Intelligence in Society, a joint venture of the USC Suzanne Dworak-Peck School of Social Work and the USC Viterbi School of Engineering. Rice received a BA from the University of Chicago, and an MA and PhD in Sociology from Stanford University. Eric’s research focuses on community outreach, network science, and the use of social networking technology by high-risk youth.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xtgabz/Eric_Rice_mixdown6auvd.mp3" length="85705345" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Where is the limit in the use of technology to solve societal problems? How can Social Work utilize AI to address social injustice? To answer these questions and more we welcome Dr. Eric Rice to the show. 
 
Eric is an associate professor and the founding co-director of the USC Center for Artificial Intelligence in Society, a joint venture of the USC Suzanne Dworak-Peck School of Social Work and the USC Viterbi School of Engineering. Rice received a BA from the University of Chicago, and an MA and PhD in Sociology from Stanford University. Eric’s research focuses on community outreach, network science, and the use of social networking technology by high-risk youth.
 
Full show notes for this episode can be found at Radicalai.org. 
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3570</itunes:duration>
                <itunes:episode>30</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>What Should You Know About AI for Social Good? - Panel with Anamika Barman-Adhikari, Fei Fang, &amp; Amulya Yadav - BONUS EPISODE</title>
        <itunes:title>What Should You Know About AI for Social Good? - Panel with Anamika Barman-Adhikari, Fei Fang, &amp; Amulya Yadav - BONUS EPISODE</itunes:title>
        <link>https://radicalai.podbean.com/e/what-should-you-know-about-ai-for-social-good-panel-with-anamika-barman-adhikari-fei-fang-amulya-yadav-bonus-episode/</link>
                    <comments>https://radicalai.podbean.com/e/what-should-you-know-about-ai-for-social-good-panel-with-anamika-barman-adhikari-fei-fang-amulya-yadav-bonus-episode/#comments</comments>        <pubDate>Sun, 09 Aug 2020 08:30:26 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/3e68601d-9c64-315c-8854-abef49d4f386</guid>
                                    <description><![CDATA[<p>What is AI for Social Good? </p>
<p>In this special bonus panel episode on AI for Social Good 101, we interviewed Dr. Anamika Barman-Adhikari, Dr. Fei Fang, and Dr. Amulya Yadav.  </p>
<p>Anamika Barman-Adhikari is an Associate Professor of social work at the University of Denver. She received her Ph.D. in Social Work from University of Southern California.</p>
<p>Fei Fang is an Assistant Professor at the Institute for Software Research in the School of Computer Science at Carnegie Mellon University. Before joining CMU, she was a Postdoctoral Fellow at the Center for Research on Computation and Society (CRCS) at Harvard University.</p>
<p>Amulya Yadav is an Assistant Professor in the College of Information Sciences and Technology at Penn State University. He is also an affiliate faculty appointment at the Center for Artificial Intelligence in Society at USC.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is AI for Social Good? </p>
<p>In this special bonus panel episode on AI for Social Good 101, we interviewed Dr. Anamika Barman-Adhikari, Dr. Fei Fang, and Dr. Amulya Yadav.  </p>
<p>Anamika Barman-Adhikari is an Associate Professor of social work at the University of Denver. She received her Ph.D. in Social Work from University of Southern California.</p>
<p>Fei Fang is an Assistant Professor at the Institute for Software Research in the School of Computer Science at Carnegie Mellon University. Before joining CMU, she was a Postdoctoral Fellow at the Center for Research on Computation and Society (CRCS) at Harvard University.</p>
<p>Amulya Yadav is an Assistant Professor in the College of Information Sciences and Technology at Penn State University. He is also an affiliate faculty appointment at the Center for Artificial Intelligence in Society at USC.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/rdrxrz/AI4SG_Panel_mixdown8iols.mp3" length="83404173" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is AI for Social Good? 
In this special bonus panel episode on AI for Social Good 101, we interviewed Dr. Anamika Barman-Adhikari, Dr. Fei Fang, and Dr. Amulya Yadav.  
Anamika Barman-Adhikari is an Associate Professor of social work at the University of Denver. She received her Ph.D. in Social Work from University of Southern California.
Fei Fang is an Assistant Professor at the Institute for Software Research in the School of Computer Science at Carnegie Mellon University. Before joining CMU, she was a Postdoctoral Fellow at the Center for Research on Computation and Society (CRCS) at Harvard University.
Amulya Yadav is an Assistant Professor in the College of Information Sciences and Technology at Penn State University. He is also an affiliate faculty appointment at the Center for Artificial Intelligence in Society at USC.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3474</itunes:duration>
                <itunes:episode>29</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Ethically Aligned Design &amp; Applied AI Ethics with John C. Havens</title>
        <itunes:title>Ethically Aligned Design &amp; Applied AI Ethics with John C. Havens</itunes:title>
        <link>https://radicalai.podbean.com/e/ethically-aligned-design-applied-ai-ethics-with-john-c-havens/</link>
                    <comments>https://radicalai.podbean.com/e/ethically-aligned-design-applied-ai-ethics-with-john-c-havens/#comments</comments>        <pubDate>Wed, 05 Aug 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/a59f315f-f9bd-3ecb-8c8e-da931ef0a4f0</guid>
                                    <description><![CDATA[<p>What is IEEE and what is their “ethically aligned design” initiative? How can positive visions for the future help us create better technology? What do kindness and wellbeing have to do with AI Ethics?</p>
<p>To answer these questions and more we welcome John C. Havens to the show. </p>
<p>John is the current Executive Director of the Global Initiative on Ethics of Autonomous and Intelligent Systems at The Institute of Electrical and Electronics Engineers (IEEE). He is a contributing writer for Mashable, The Guardian, and The Huffington Post. John is the author of Heartificial Intelligence: Embracing Our Humanity to Maximize Machines, among others. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is IEEE and what is their “ethically aligned design” initiative? How can positive visions for the future help us create better technology? What do kindness and wellbeing have to do with AI Ethics?</p>
<p>To answer these questions and more we welcome John C. Havens to the show. </p>
<p>John is the current Executive Director of the Global Initiative on Ethics of Autonomous and Intelligent Systems at The Institute of Electrical and Electronics Engineers (IEEE). He is a contributing writer for Mashable, The Guardian, and The Huffington Post. John is the author of Heartificial Intelligence: Embracing Our Humanity to Maximize Machines, among others. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/cssknu/John_C_Havens_mixdownb9ke1.mp3" length="87936417" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is IEEE and what is their “ethically aligned design” initiative? How can positive visions for the future help us create better technology? What do kindness and wellbeing have to do with AI Ethics?
To answer these questions and more we welcome John C. Havens to the show. 
John is the current Executive Director of the Global Initiative on Ethics of Autonomous and Intelligent Systems at The Institute of Electrical and Electronics Engineers (IEEE). He is a contributing writer for Mashable, The Guardian, and The Huffington Post. John is the author of Heartificial Intelligence: Embracing Our Humanity to Maximize Machines, among others. 
 
Full show notes for this episode can be found at Radicalai.org. 
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3663</itunes:duration>
                <itunes:episode>28</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #2 - Data Discrimination &amp; Algorithmic Bias with Safiya Noble &amp; Meredith Broussard</title>
        <itunes:title>All Tech is Human Series #2 - Data Discrimination &amp; Algorithmic Bias with Safiya Noble &amp; Meredith Broussard</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-2-data-discrimination-algorithmic-bias-with-safiya-noble-meredith-broussard/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-2-data-discrimination-algorithmic-bias-with-safiya-noble-meredith-broussard/#comments</comments>        <pubDate>Wed, 29 Jul 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/a5e375aa-eb5b-38ba-879a-7cc76e457337</guid>
                                    <description><![CDATA[<p>How can we reduce data discrimination & algorithmic bias that perpetuate gender and racial inequalities?</p>
<p> </p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring Safiya Noble (Associate Professor at the University of California, Los Angeles (UCLA) in the Department of Information Studies and author of Algorithms of Oppression: How Search Engines Reinforce Racism) and</p>
<p>Meredith Broussard (Associate Professor at the Arthur L. Carter Journalism Institute of New York University and the author of Artificial Unintelligence: How Computers Misunderstand the World). </p>
<p> </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p> </p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How can we reduce data discrimination & algorithmic bias that perpetuate gender and racial inequalities?</p>
<p> </p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring Safiya Noble (Associate Professor at the University of California, Los Angeles (UCLA) in the Department of Information Studies and author of <em>Algorithms of Oppression: How Search Engines Reinforce Racism</em>) and</p>
<p>Meredith Broussard (Associate Professor at the Arthur L. Carter Journalism Institute of New York University and the author of <em>Artificial Unintelligence: How Computers Misunderstand the World</em>). </p>
<p> </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p> </p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/vfhtby/atih_2_mixdown6fe82.mp3" length="71038898" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can we reduce data discrimination & algorithmic bias that perpetuate gender and racial inequalities?
 
In partnership with All Tech is Human we present this Livestreamed conversation featuring Safiya Noble (Associate Professor at the University of California, Los Angeles (UCLA) in the Department of Information Studies and author of Algorithms of Oppression: How Search Engines Reinforce Racism) and
Meredith Broussard (Associate Professor at the Arthur L. Carter Journalism Institute of New York University and the author of Artificial Unintelligence: How Computers Misunderstand the World). 
 
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
 
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2959</itunes:duration>
                <itunes:episode>27</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Ghost Work and the Role of Compassion in Tech Ethics with Mary Gray</title>
        <itunes:title>Ghost Work and the Role of Compassion in Tech Ethics with Mary Gray</itunes:title>
        <link>https://radicalai.podbean.com/e/ghost-work-and-the-role-of-compassion-in-tech-ethics-with-mary-gray/</link>
                    <comments>https://radicalai.podbean.com/e/ghost-work-and-the-role-of-compassion-in-tech-ethics-with-mary-gray/#comments</comments>        <pubDate>Wed, 22 Jul 2020 01:30:48 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/672a6b70-dc1e-393c-8593-4c99b5a6a159</guid>
                                    <description><![CDATA[<p>In what way does technology make us more or less visible to each other? What is Ghost Work and how might it impact the future of work? How can AI Ethicists relate more intimately with compassion? To answer these questions and more we welcome Dr. Mary L. Gray to the show. </p>
<p> </p>
<p>Dr. Mary L. Gray is a Senior Principal Researcher at Microsoft Research and Faculty Associate at Harvard University’s Berkman Klein Center for Internet and Society. Along with her research, Mary teaches at Indiana University, maintaining an appointment as an Associate Professor of the Media School, with affiliations in American Studies, Anthropology, and Gender Studies. She is also the co-author, with Siddharth Suri, of Ghost Work: How to Stop Silicon Valley from Building a New Global Underclass. Mary is an anthropologist and media scholar by training, and focuses on how everyday uses of technologies transform people’s lives.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In what way does technology make us more or less visible to each other? What is Ghost Work and how might it impact the future of work? How can AI Ethicists relate more intimately with compassion? To answer these questions and more we welcome Dr. Mary L. Gray to the show. </p>
<p> </p>
<p>Dr. Mary L. Gray is a Senior Principal Researcher at Microsoft Research and Faculty Associate at Harvard University’s Berkman Klein Center for Internet and Society. Along with her research, Mary teaches at Indiana University, maintaining an appointment as an Associate Professor of the Media School, with affiliations in American Studies, Anthropology, and Gender Studies. She is also the co-author, with Siddharth Suri, of Ghost Work: How to Stop Silicon Valley from Building a New Global Underclass. Mary is an anthropologist and media scholar by training, and focuses on how everyday uses of technologies transform people’s lives.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/a2ksrf/mary_gray_mixdown874l5.mp3" length="88616871" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In what way does technology make us more or less visible to each other? What is Ghost Work and how might it impact the future of work? How can AI Ethicists relate more intimately with compassion? To answer these questions and more we welcome Dr. Mary L. Gray to the show. 
 
Dr. Mary L. Gray is a Senior Principal Researcher at Microsoft Research and Faculty Associate at Harvard University’s Berkman Klein Center for Internet and Society. Along with her research, Mary teaches at Indiana University, maintaining an appointment as an Associate Professor of the Media School, with affiliations in American Studies, Anthropology, and Gender Studies. She is also the co-author, with Siddharth Suri, of Ghost Work: How to Stop Silicon Valley from Building a New Global Underclass. Mary is an anthropologist and media scholar by training, and focuses on how everyday uses of technologies transform people’s lives.
 
Full show notes for this episode can be found at Radicalai.org. 
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3692</itunes:duration>
                <itunes:episode>26</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Minisode #4 - Advice, Love, and Gratitude: Happy Three Months From Radical AI</title>
        <itunes:title>Minisode #4 - Advice, Love, and Gratitude: Happy Three Months From Radical AI</itunes:title>
        <link>https://radicalai.podbean.com/e/minisode-4-advice-love-and-gratitude-happy-three-months-from-radical-ai/</link>
                    <comments>https://radicalai.podbean.com/e/minisode-4-advice-love-and-gratitude-happy-three-months-from-radical-ai/#comments</comments>        <pubDate>Sun, 19 Jul 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/a18016d8-94e9-5e4e-a57f-b58a37f1be36</guid>
                                    <description><![CDATA[<p>Happy Three Months of the Radical AI Podcast! In this minisode we share the compiled advice given from our guests over our first 18 interviews, celebrate the Radical AI Community, and give thanks for the journey thus far. </p>
<p>Thank you so much for your support! </p>
<p>As always, show notes can be found on radicalai.org - along with a written blogpost with extended quotes of advice taken from our conversations with our guests. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Happy Three Months of the Radical AI Podcast! In this minisode we share the compiled advice given from our guests over our first 18 interviews, celebrate the Radical AI Community, and give thanks for the journey thus far. </p>
<p>Thank you so much for your support! </p>
<p>As always, show notes can be found on radicalai.org - along with a written blogpost with extended quotes of advice taken from our conversations with our guests. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ij5fi4/Minisode_4_mixdown1_6wr8a.mp3" length="39774452" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Happy Three Months of the Radical AI Podcast! In this minisode we share the compiled advice given from our guests over our first 18 interviews, celebrate the Radical AI Community, and give thanks for the journey thus far. 
Thank you so much for your support! 
As always, show notes can be found on radicalai.org - along with a written blogpost with extended quotes of advice taken from our conversations with our guests. ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1656</itunes:duration>
                <itunes:episode>25</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Surveillance, Stigma &amp; Sociotechnical Design for HIV in Dating and Hookup Platforms with Calvin Liang, Jevan Hutson, and Os Keyes</title>
        <itunes:title>Surveillance, Stigma &amp; Sociotechnical Design for HIV in Dating and Hookup Platforms with Calvin Liang, Jevan Hutson, and Os Keyes</itunes:title>
        <link>https://radicalai.podbean.com/e/surveillance-stigma-sociotechnical-design-for-hiv-in-dating-and-hookup-platforms-with-calvin-liang-jevan-hutson-and-os-keyes/</link>
                    <comments>https://radicalai.podbean.com/e/surveillance-stigma-sociotechnical-design-for-hiv-in-dating-and-hookup-platforms-with-calvin-liang-jevan-hutson-and-os-keyes/#comments</comments>        <pubDate>Wed, 15 Jul 2020 07:29:58 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/3bce2779-65a1-5cb2-89e9-14bf95016863</guid>
                                    <description><![CDATA[<p>In this episode we interview the interdisciplinary research team of Calvin Liang, Jevan Hutson, and Os Keyes around the motivation and research behind their paper: "Surveillance, Stigma & Sociotechnical Design for HIV". This paper analyzes the approaches that 49 online dating and hookup platforms have taken when designing for HIV disclosure. Calvin, Jevan, and Os point to bottom-up, communal, and queer approaches for design as a way of potentially making the tension between disclosure and risk easier to safely navigate. Their paper will be published in First Monday's Special Issue on HIV/AIDS and Digital Media in the Fall.</p>
<p>Calvin and Os are PhD students in Human-Centered Design and Engineering at the University of Washington. </p>
<p>Jevan is a data justice advocate, human-computer interaction researcher, and recent graduate of the University of Washington School of Law. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this episode we interview the interdisciplinary research team of Calvin Liang, Jevan Hutson, and Os Keyes around the motivation and research behind their paper: "Surveillance, Stigma & Sociotechnical Design for HIV". This paper analyzes the approaches that 49 online dating and hookup platforms have taken when designing for HIV disclosure. Calvin, Jevan, and Os point to bottom-up, communal, and queer approaches for design as a way of potentially making the tension between disclosure and risk easier to safely navigate. Their paper will be published in First Monday's Special Issue on HIV/AIDS and Digital Media in the Fall.</p>
<p>Calvin and Os are PhD students in Human-Centered Design and Engineering at the University of Washington. </p>
<p>Jevan is a data justice advocate, human-computer interaction researcher, and recent graduate of the University of Washington School of Law. </p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/5996w8/JCO__mixdown2_8934k.mp3" length="75817015" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this episode we interview the interdisciplinary research team of Calvin Liang, Jevan Hutson, and Os Keyes around the motivation and research behind their paper: "Surveillance, Stigma & Sociotechnical Design for HIV". This paper analyzes the approaches that 49 online dating and hookup platforms have taken when designing for HIV disclosure. Calvin, Jevan, and Os point to bottom-up, communal, and queer approaches for design as a way of potentially making the tension between disclosure and risk easier to safely navigate. Their paper will be published in First Monday's Special Issue on HIV/AIDS and Digital Media in the Fall.
Calvin and Os are PhD students in Human-Centered Design and Engineering at the University of Washington. 
Jevan is a data justice advocate, human-computer interaction researcher, and recent graduate of the University of Washington School of Law. 
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3158</itunes:duration>
                <itunes:episode>24</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>All Tech is Human Series #1 - Building Anti-Racist Technology &amp; Culture with Mutale Nkonde &amp; Charlton McIlwain</title>
        <itunes:title>All Tech is Human Series #1 - Building Anti-Racist Technology &amp; Culture with Mutale Nkonde &amp; Charlton McIlwain</itunes:title>
        <link>https://radicalai.podbean.com/e/all-tech-is-human-series-1-building-anti-racist-technology-culture-with-mutale-nkonde-charlton-mcilwain/</link>
                    <comments>https://radicalai.podbean.com/e/all-tech-is-human-series-1-building-anti-racist-technology-culture-with-mutale-nkonde-charlton-mcilwain/#comments</comments>        <pubDate>Sun, 12 Jul 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/7da18e06-fff5-514e-b7f0-96d9f7cfe401</guid>
                                    <description><![CDATA[<p>How can we ensure that our technological systems do not reproduce existing inequalities?</p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring <a href='http://mutale.tech/'>Mutale</a> Nkonde (CEO of AI for the People & fellow at the Digital Society Lab at Stanford) & <a href='https://charltonmcilwain.com/'>Charlton</a> McIlwain (author of Black Software: The Internet & Racial Justice, From the AfroNet to Black Lives Matter, as well as Vice Provost for Faculty Engagement and Development at NYU). </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How can we ensure that our technological systems do not reproduce existing inequalities?</p>
<p>In partnership with All Tech is Human we present this Livestreamed conversation featuring <a href='http://mutale.tech/'>Mutale</a> Nkonde (CEO of AI for the People & fellow at the Digital Society Lab at Stanford) & <a href='https://charltonmcilwain.com/'>Charlton</a> McIlwain (author of Black Software: The Internet & Racial Justice, From the AfroNet to Black Lives Matter, as well as Vice Provost for Faculty Engagement and Development at NYU). </p>
<p>This conversation is moderated by <a href='http://www.alltechishuman.org/'>All Tech Is Human</a>'s <a href='http://www.davidpolgar.com/'>David Ryan Polgar</a>. The organizational partner for the event is <a href='https://thebridgework.com/'>TheBridge</a>.</p>
<p>The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. </p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ul0t03/ATIH_1_mixdown_bdbo4.mp3" length="89386845" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can we ensure that our technological systems do not reproduce existing inequalities?
In partnership with All Tech is Human we present this Livestreamed conversation featuring Mutale Nkonde (CEO of AI for the People & fellow at the Digital Society Lab at Stanford) & Charlton McIlwain (author of Black Software: The Internet & Racial Justice, From the AfroNet to Black Lives Matter, as well as Vice Provost for Faculty Engagement and Development at NYU). 
This conversation is moderated by All Tech Is Human's David Ryan Polgar. The organizational partner for the event is TheBridge.
The conversation does not stop here! For each of the episodes in our series with All Tech is Human, you can find a detailed “continue the conversation” page on our website radicalai.org. For each episode we will include all of the action items we just debriefed as well as annotated resources that were mentioned by the guest speakers during the livestream, ways to get involved, relevant podcast episodes, books, and other publications. 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3724</itunes:duration>
                <itunes:episode>23</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Science Fiction, Science Fact, and AI Consciousness with Beth Singler</title>
        <itunes:title>Science Fiction, Science Fact, and AI Consciousness with Beth Singler</itunes:title>
        <link>https://radicalai.podbean.com/e/science-fiction-science-fact-and-ai-consciousness-with-beth-singler/</link>
                    <comments>https://radicalai.podbean.com/e/science-fiction-science-fact-and-ai-consciousness-with-beth-singler/#comments</comments>        <pubDate>Wed, 08 Jul 2020 00:07:03 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/eaae0181-f1e1-520b-a43a-3c213bda8481</guid>
                                    <description><![CDATA[<p>How can Science Fiction be used to get the public involved in the AI Ethics conversation? What are religious studies and how can they relate to AI? Why is it important to distinguish between Science Fiction and Science Fact when it comes to the future of AI? </p>
<p>To answer these questions and more we welcome Dr. Beth Singler to the show. </p>
<p>Dr. Beth Singler is a Junior Research Fellow in Artificial Intelligence at the University of Cambridge. Previously, Beth was the post-doctoral Research Associate on the “Human Identity in an age of Nearly-Human Machines” project at the Faraday Institute for Science and Religion. Through her research, Beth explores the social, ethical, philosophical, and religious implications of advances in Artificial Intelligence and robotics.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How can Science Fiction be used to get the public involved in the AI Ethics conversation? What are religious studies and how can they relate to AI? Why is it important to distinguish between Science Fiction and Science Fact when it comes to the future of AI? </p>
<p>To answer these questions and more we welcome Dr. Beth Singler to the show. </p>
<p>Dr. Beth Singler is a Junior Research Fellow in Artificial Intelligence at the University of Cambridge. Previously, Beth was the post-doctoral Research Associate on the “Human Identity in an age of Nearly-Human Machines” project at the Faraday Institute for Science and Religion. Through her research, Beth explores the social, ethical, philosophical, and religious implications of advances in Artificial Intelligence and robotics.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xkj6mb/Beth_Singler_mixdown_9csox.mp3" length="73643583" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can Science Fiction be used to get the public involved in the AI Ethics conversation? What are religious studies and how can they relate to AI? Why is it important to distinguish between Science Fiction and Science Fact when it comes to the future of AI? 
To answer these questions and more we welcome Dr. Beth Singler to the show. 
Dr. Beth Singler is a Junior Research Fellow in Artificial Intelligence at the University of Cambridge. Previously, Beth was the post-doctoral Research Associate on the “Human Identity in an age of Nearly-Human Machines” project at the Faraday Institute for Science and Religion. Through her research, Beth explores the social, ethical, philosophical, and religious implications of advances in Artificial Intelligence and robotics.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3068</itunes:duration>
                <itunes:episode>22</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>The Power of Linguistics: Unpacking Natural Language Processing Ethics with Emily M. Bender </title>
        <itunes:title>The Power of Linguistics: Unpacking Natural Language Processing Ethics with Emily M. Bender </itunes:title>
        <link>https://radicalai.podbean.com/e/the-power-of-language-to-transform-the-world-unpacking-natural-language-processing-with-emily-bender/</link>
                    <comments>https://radicalai.podbean.com/e/the-power-of-language-to-transform-the-world-unpacking-natural-language-processing-with-emily-bender/#comments</comments>        <pubDate>Wed, 01 Jul 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/e506efa6-17d6-5d82-b27b-313215efe238</guid>
                                    <description><![CDATA[<p>What are the societal impacts and ethics of Natural Language Processing (or NLP)? How can language be a form of power? How can we effectively teach ethics in the NLP classroom? How can we promote healthy interdisciplinary collaboration in the development of NLP products?</p>
<p>To answer these questions and more we welcome Dr. Emily M. Bender to the show.</p>
<p>Dr. Emily M. Bender researches linguistics, computational linguistics, and ethical issues in Natural Language Processing.  Emily is currently a Professor in the Department of Linguistics and an Adjunct Professor in the Department of Computer Science and Engineering at the University of Washington. She is also the faculty director of the CLMS program and the director of the Computational Linguistics Laboratory.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What are the societal impacts and ethics of Natural Language Processing (or NLP)? How can language be a form of power? How can we effectively teach ethics in the NLP classroom? How can we promote healthy interdisciplinary collaboration in the development of NLP products?</p>
<p>To answer these questions and more we welcome Dr. Emily M. Bender to the show.</p>
<p>Dr. Emily M. Bender researches linguistics, computational linguistics, and ethical issues in Natural Language Processing.  Emily is currently a Professor in the Department of Linguistics and an Adjunct Professor in the Department of Computer Science and Engineering at the University of Washington. She is also the faculty director of the CLMS program and the director of the Computational Linguistics Laboratory.</p>
<p>Full show notes for this episode can be found at Radicalai.org. </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nnh1gm/Emily_Bender_mixdown_7lgjw.mp3" length="86992407" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What are the societal impacts and ethics of Natural Language Processing (or NLP)? How can language be a form of power? How can we effectively teach ethics in the NLP classroom? How can we promote healthy interdisciplinary collaboration in the development of NLP products?
To answer these questions and more we welcome Dr. Emily M. Bender to the show.
Dr. Emily M. Bender researches linguistics, computational linguistics, and ethical issues in Natural Language Processing.  Emily is currently a Professor in the Department of Linguistics and an Adjunct Professor in the Department of Computer Science and Engineering at the University of Washington. She is also the faculty director of the CLMS program and the director of the Computational Linguistics Laboratory.
Full show notes for this episode can be found at Radicalai.org. 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3624</itunes:duration>
                <itunes:episode>21</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>AI &amp; Racial Bias with Renée Cummings - BONUS EPISODE</title>
        <itunes:title>AI &amp; Racial Bias with Renée Cummings - BONUS EPISODE</itunes:title>
        <link>https://radicalai.podbean.com/e/ai-racial-bias-with-renee-cummings-bonus-episode/</link>
                    <comments>https://radicalai.podbean.com/e/ai-racial-bias-with-renee-cummings-bonus-episode/#comments</comments>        <pubDate>Sun, 28 Jun 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/b2ae627d-c47d-5cf4-8625-71ab2df6fd03</guid>
                                    <description><![CDATA[<p>Sponsored by Ethical Intelligence this bonus episode features a presentation delivered by Renee Cummings as a workshop given on 06/24/20. We also welcome Ethical Intelligence CEO Olivia Gambelin to the show as a guest host. </p>
<p>Renée Cummings is a criminologist and international criminal justice consultant who specializes in Artificial Intelligence (AI); ethical AI, bias in AI, diversity and inclusion in AI, algorithmic authenticity and accountability, data integrity and equity, AI for social good and social justice in AI policy and governance. She is the CEO of Urban AI. </p>
<p>Full show notes for this episode can be found at Radicalai.org. Slides referenced can be found by contacting Ethical Intelligence at ethicalintelligence.co</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Sponsored by Ethical Intelligence this bonus episode features a presentation delivered by Renee Cummings as a workshop given on 06/24/20. We also welcome Ethical Intelligence CEO Olivia Gambelin to the show as a guest host. </p>
<p>Renée Cummings is a criminologist and international criminal justice consultant who specializes in Artificial Intelligence (AI); ethical AI, bias in AI, diversity and inclusion in AI, algorithmic authenticity and accountability, data integrity and equity, AI for social good and social justice in AI policy and governance. She is the CEO of Urban AI. </p>
<p>Full show notes for this episode can be found at Radicalai.org. Slides referenced can be found by contacting Ethical Intelligence at ethicalintelligence.co</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/jkv4zu/Renee_mixdown.mp3" length="61691977" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Sponsored by Ethical Intelligence this bonus episode features a presentation delivered by Renee Cummings as a workshop given on 06/24/20. We also welcome Ethical Intelligence CEO Olivia Gambelin to the show as a guest host. 
Renée Cummings is a criminologist and international criminal justice consultant who specializes in Artificial Intelligence (AI); ethical AI, bias in AI, diversity and inclusion in AI, algorithmic authenticity and accountability, data integrity and equity, AI for social good and social justice in AI policy and governance. She is the CEO of Urban AI. 
Full show notes for this episode can be found at Radicalai.org. Slides referenced can be found by contacting Ethical Intelligence at ethicalintelligence.co
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2570</itunes:duration>
                <itunes:episode>20</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>IBM, Microsoft, and Amazon Disavow Facial Recognition Technology: What Do You Need to Know? with Deb Raji</title>
        <itunes:title>IBM, Microsoft, and Amazon Disavow Facial Recognition Technology: What Do You Need to Know? with Deb Raji</itunes:title>
        <link>https://radicalai.podbean.com/e/ibm-microsoft-and-amazon-disavow-facial-recognition-technology-what-do-you-need-to-know-with-deb-raji/</link>
                    <comments>https://radicalai.podbean.com/e/ibm-microsoft-and-amazon-disavow-facial-recognition-technology-what-do-you-need-to-know-with-deb-raji/#comments</comments>        <pubDate>Wed, 24 Jun 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/18627fc8-22a2-5b92-9f2a-087bad810ff1</guid>
                                    <description><![CDATA[<p>What does it mean that IBM, Microsoft, Amazon, and others have distanced themselves from developing facial recognition technology and providing facial recognition data to vendors? Should you be skeptical? Where is the hope? To answer these questions and more we welcome Deb Raji to the show. </p>
<p>Deb is a tech fellow at the AI Now Institute Working on critical perspectives to evaluation practice in AI, conducting audits on deployed AI systems and facial recognition, and AI auditing policy. She has worked closely with the Algorithmic Justice League initiative and on several projects to highlight cases of bias in computer vision. Deb was named one of MIT Technology Review’s 35 Innovators Under 35 for her research on the harms of racially biased data in facial recognition technologies.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What does it mean that IBM, Microsoft, Amazon, and others have distanced themselves from developing facial recognition technology and providing facial recognition data to vendors? Should you be skeptical? Where is the hope? To answer these questions and more we welcome Deb Raji to the show. </p>
<p>Deb is a tech fellow at the AI Now Institute Working on critical perspectives to evaluation practice in AI, conducting audits on deployed AI systems and facial recognition, and AI auditing policy. She has worked closely with the Algorithmic Justice League initiative and on several projects to highlight cases of bias in computer vision. Deb was named one of MIT Technology Review’s 35 Innovators Under 35 for her research on the harms of racially biased data in facial recognition technologies.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/wv4srm/Deb_Raji_mixdown_6ljdy.mp3" length="77617423" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What does it mean that IBM, Microsoft, Amazon, and others have distanced themselves from developing facial recognition technology and providing facial recognition data to vendors? Should you be skeptical? Where is the hope? To answer these questions and more we welcome Deb Raji to the show. 
Deb is a tech fellow at the AI Now Institute Working on critical perspectives to evaluation practice in AI, conducting audits on deployed AI systems and facial recognition, and AI auditing policy. She has worked closely with the Algorithmic Justice League initiative and on several projects to highlight cases of bias in computer vision. Deb was named one of MIT Technology Review’s 35 Innovators Under 35 for her research on the harms of racially biased data in facial recognition technologies.
Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3233</itunes:duration>
                <itunes:episode>19</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Emoji Design, White Accountability, and the Ethical Future of Chatbots with Miriam Sweeney</title>
        <itunes:title>Emoji Design, White Accountability, and the Ethical Future of Chatbots with Miriam Sweeney</itunes:title>
        <link>https://radicalai.podbean.com/e/emoji-design-white-accountability-and-the-ethical-future-of-chatbots-with-miriam-sweeney/</link>
                    <comments>https://radicalai.podbean.com/e/emoji-design-white-accountability-and-the-ethical-future-of-chatbots-with-miriam-sweeney/#comments</comments>        <pubDate>Wed, 17 Jun 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/23ea362b-ded0-5af3-9b2b-9b5946a1fde0</guid>
                                    <description><![CDATA[<p>What are the ethics of emoji design and why does it matter? What are some of the ethical concerns we should have about chatbots and virtual assistants? How can these technologies perpetuate racial and gender stereotypes? To answer these questions and more The Radical AI Podcast welcomes Dr. Miriam Sweeney to the show. </p>
<p> </p>
<p>Dr. Miriam Sweeney is an assistant professor in the School of Library & Information Studies at the University of Alabama. She is a critical cultural digital media scholar who studies anthropomorphic design, virtual assistants, voice interfaces, and AI through the lenses of race, gender, and sexuality. Her current project, Facing Our Computers: Identity, Interfaces, and Intimate Data, explores the linkages between identity, design, and dataveillance in AI voice assistants, digital assistants, and chatbot interfaces. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
 ]]></description>
                                                            <content:encoded><![CDATA[<p>What are the ethics of emoji design and why does it matter? What are some of the ethical concerns we should have about chatbots and virtual assistants? How can these technologies perpetuate racial and gender stereotypes? To answer these questions and more The Radical AI Podcast welcomes Dr. Miriam Sweeney to the show. </p>
<p> </p>
<p>Dr. Miriam Sweeney is an assistant professor in the School of Library & Information Studies at the University of Alabama. She is a critical cultural digital media scholar who studies anthropomorphic design, virtual assistants, voice interfaces, and AI through the lenses of race, gender, and sexuality. Her current project, Facing Our Computers: Identity, Interfaces, and Intimate Data, explores the linkages between identity, design, and dataveillance in AI voice assistants, digital assistants, and chatbot interfaces. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
 ]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/f0mtn3/Miriam_mixdown.mp3" length="77748856" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What are the ethics of emoji design and why does it matter? What are some of the ethical concerns we should have about chatbots and virtual assistants? How can these technologies perpetuate racial and gender stereotypes? To answer these questions and more The Radical AI Podcast welcomes Dr. Miriam Sweeney to the show. 
 
Dr. Miriam Sweeney is an assistant professor in the School of Library & Information Studies at the University of Alabama. She is a critical cultural digital media scholar who studies anthropomorphic design, virtual assistants, voice interfaces, and AI through the lenses of race, gender, and sexuality. Her current project, Facing Our Computers: Identity, Interfaces, and Intimate Data, explores the linkages between identity, design, and dataveillance in AI voice assistants, digital assistants, and chatbot interfaces. 
 
Full show notes for this episode can be found at Radicalai.org
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3239</itunes:duration>
                <itunes:episode>18</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Minisode #3 - Coded Bias Debrief, Uprooting Colonialism in Tech, Unpacking Objectivity, and How to Take Action!</title>
        <itunes:title>Minisode #3 - Coded Bias Debrief, Uprooting Colonialism in Tech, Unpacking Objectivity, and How to Take Action!</itunes:title>
        <link>https://radicalai.podbean.com/e/minisode-3-coded-bias-debrief-uprooting-colonialism-in-tech-unpacking-objectivity-and-how-to-take-action/</link>
                    <comments>https://radicalai.podbean.com/e/minisode-3-coded-bias-debrief-uprooting-colonialism-in-tech-unpacking-objectivity-and-how-to-take-action/#comments</comments>        <pubDate>Sun, 14 Jun 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d3636302-d28e-5546-90c1-eb470cddc0f2</guid>
                                    <description><![CDATA[<p>In this Minisode hosts Dylan and Jess reflect on current events, systemic racism in tech and beyond, how to stay connected to the deeply systemic work that is needed to uproot colonialism, and much more. </p>
<p>Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. </p>
<p>As always we invite you to please subscribe, rate, and leave a review to show your support!</p>
<p>Show notes and transcript available at radicalai.org  </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this Minisode hosts Dylan and Jess reflect on current events, systemic racism in tech and beyond, how to stay connected to the deeply systemic work that is needed to uproot colonialism, and much more. </p>
<p>Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. </p>
<p>As always we invite you to please subscribe, rate, and leave a review to show your support!</p>
<p>Show notes and transcript available at radicalai.org  </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/foht6p/minisode_3_mixdown1_7tz5m.mp3" length="39284943" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this Minisode hosts Dylan and Jess reflect on current events, systemic racism in tech and beyond, how to stay connected to the deeply systemic work that is needed to uproot colonialism, and much more. 
Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. 
As always we invite you to please subscribe, rate, and leave a review to show your support!
Show notes and transcript available at radicalai.org  ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1636</itunes:duration>
                <itunes:episode>17</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Data as Protest: Data for Black Lives with Yeshi Milner</title>
        <itunes:title>Data as Protest: Data for Black Lives with Yeshi Milner</itunes:title>
        <link>https://radicalai.podbean.com/e/data-as-protest-data-for-black-lives-with-yeshi-milner/</link>
                    <comments>https://radicalai.podbean.com/e/data-as-protest-data-for-black-lives-with-yeshi-milner/#comments</comments>        <pubDate>Wed, 10 Jun 2020 00:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/a02df154-d718-5ab2-859f-fa2c47395081</guid>
                                    <description><![CDATA[<p>How can we claim agency over data systems to fight for racial justice? What is Data for Black Lives? How can you join the movement? To answer these questions and more we welcome Yeshi Milner to the show.</p>
<p> </p>
<p>Yeshi Milner is the co-founder and executive director of Data for Black Lives. Raised in Miami, FL, Yeshi began organizing against the school-to-prison pipeline at Power U Center for Social Change as a high school senior. There she developed a lifelong commitment to movement building as a vehicle for creating and sustaining large-scale social change. More recently, Yeshi was a campaign manager at Color of Change, where she spearheaded several major national initiatives, including OrganizeFor, the only online petition platform dedicated to building the political voice of Black people.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
 ]]></description>
                                                            <content:encoded><![CDATA[<p>How can we claim agency over data systems to fight for racial justice? What is Data for Black Lives? How can you join the movement? To answer these questions and more we welcome Yeshi Milner to the show.</p>
<p> </p>
<p>Yeshi Milner is the co-founder and executive director of Data for Black Lives. Raised in Miami, FL, Yeshi began organizing against the school-to-prison pipeline at Power U Center for Social Change as a high school senior. There she developed a lifelong commitment to movement building as a vehicle for creating and sustaining large-scale social change. More recently, Yeshi was a campaign manager at Color of Change, where she spearheaded several major national initiatives, including OrganizeFor, the only online petition platform dedicated to building the political voice of Black people.</p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
 ]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/5vss6t/Yeshi_mixdown2.mp3" length="65834219" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How can we claim agency over data systems to fight for racial justice? What is Data for Black Lives? How can you join the movement? To answer these questions and more we welcome Yeshi Milner to the show.
 
Yeshi Milner is the co-founder and executive director of Data for Black Lives. Raised in Miami, FL, Yeshi began organizing against the school-to-prison pipeline at Power U Center for Social Change as a high school senior. There she developed a lifelong commitment to movement building as a vehicle for creating and sustaining large-scale social change. More recently, Yeshi was a campaign manager at Color of Change, where she spearheaded several major national initiatives, including OrganizeFor, the only online petition platform dedicated to building the political voice of Black people.
 
Full show notes for this episode can be found at Radicalai.org
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2742</itunes:duration>
                <itunes:episode>16</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Confronting Our Reality: Racial Representation and Systemic Transformation with Dr. Timnit Gebru</title>
        <itunes:title>Confronting Our Reality: Racial Representation and Systemic Transformation with Dr. Timnit Gebru</itunes:title>
        <link>https://radicalai.podbean.com/e/confronting-our-reality-racial-representation-and-systemic-transformation-with-dr-timnit-gebru/</link>
                    <comments>https://radicalai.podbean.com/e/confronting-our-reality-racial-representation-and-systemic-transformation-with-dr-timnit-gebru/#comments</comments>        <pubDate>Wed, 03 Jun 2020 00:02:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/d8ee6b68-f46a-550e-999e-70895234ee80</guid>
                                    <description><![CDATA[<p>How do we respond to the racism in the world we have been given? What does it mean to transform technology systems in the spirit of justice and equity? How do we engage with diversity and representation without reducing our efforts to simple branding and lip service?</p>
<p> </p>
<p>To answer these questions and more the Radical AI Podcast welcomes one of our heroes Dr. Timnit Gebru to the show. </p>
<p> </p>
<p>Dr. Timnit Gebru is a research scientist at Google on the ethical AI team and a co-founder of Black in AI. Timnit previously did her postdoc at Microsoft Research for the FATE (Fairness Transparency Accountability and Ethics in AI) group, where she studied algorithmic bias and the ethical implications underlying any data mining project. She received her Ph.D. from the Stanford Artificial Intelligence Laboratory, studying computer vision under Fei-Fei Li.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How do we respond to the racism in the world we have been given? What does it mean to transform technology systems in the spirit of justice and equity? How do we engage with diversity and representation without reducing our efforts to simple branding and lip service?</p>
<p> </p>
<p>To answer these questions and more the Radical AI Podcast welcomes one of our heroes Dr. Timnit Gebru to the show. </p>
<p> </p>
<p>Dr. Timnit Gebru is a research scientist at Google on the ethical AI team and a co-founder of Black in AI. Timnit previously did her postdoc at Microsoft Research for the FATE (Fairness Transparency Accountability and Ethics in AI) group, where she studied algorithmic bias and the ethical implications underlying any data mining project. She received her Ph.D. from the Stanford Artificial Intelligence Laboratory, studying computer vision under Fei-Fei Li.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/440o4w/Timnit_mixdown.mp3" length="111350055" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How do we respond to the racism in the world we have been given? What does it mean to transform technology systems in the spirit of justice and equity? How do we engage with diversity and representation without reducing our efforts to simple branding and lip service?
 
To answer these questions and more the Radical AI Podcast welcomes one of our heroes Dr. Timnit Gebru to the show. 
 
Dr. Timnit Gebru is a research scientist at Google on the ethical AI team and a co-founder of Black in AI. Timnit previously did her postdoc at Microsoft Research for the FATE (Fairness Transparency Accountability and Ethics in AI) group, where she studied algorithmic bias and the ethical implications underlying any data mining project. She received her Ph.D. from the Stanford Artificial Intelligence Laboratory, studying computer vision under Fei-Fei Li.]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>4639</itunes:duration>
                <itunes:episode>15</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>The History that Defines our Technological Future with Archivist Eun Seo Jo - BONUS EPISODE</title>
        <itunes:title>The History that Defines our Technological Future with Archivist Eun Seo Jo - BONUS EPISODE</itunes:title>
        <link>https://radicalai.podbean.com/e/the-history-that-defines-our-technological-future-with-archivist-eun-seo-jo-bonus-episode/</link>
                    <comments>https://radicalai.podbean.com/e/the-history-that-defines-our-technological-future-with-archivist-eun-seo-jo-bonus-episode/#comments</comments>        <pubDate>Sun, 31 May 2020 00:01:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/8e6467e4-2c36-5a39-a9c5-cc78a6b590db</guid>
                                    <description><![CDATA[<p>How does your data tell your story? Is historical data political? What do our archives have to do with defining the future of our technology?</p>
<p>To answer these questions and more The Radical AI Podcast welcomes Stanford PhD. Student and Archivist Eun Seo Jo to the show. </p>
<p> </p>
<p>Eun Seo Jo is a PhD student in History at Stanford University. Her research broadly covers applications of machine learning on historical data and the ethical concerns of using socio-cultural data for AI research and systems. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
 
 ]]></description>
                                                            <content:encoded><![CDATA[<p>How does your data tell your story? Is historical data political? What do our archives have to do with defining the future of our technology?</p>
<p>To answer these questions and more The Radical AI Podcast welcomes Stanford PhD. Student and Archivist Eun Seo Jo to the show. </p>
<p> </p>
<p>Eun Seo Jo is a PhD student in History at Stanford University. Her research broadly covers applications of machine learning on historical data and the ethical concerns of using socio-cultural data for AI research and systems. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
 
 ]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/70d545/Eun_Seo_Jo__mixdown2_bq5m0.mp3" length="92750353" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How does your data tell your story? Is historical data political? What do our archives have to do with defining the future of our technology?
To answer these questions and more The Radical AI Podcast welcomes Stanford PhD. Student and Archivist Eun Seo Jo to the show. 
 
Eun Seo Jo is a PhD student in History at Stanford University. Her research broadly covers applications of machine learning on historical data and the ethical concerns of using socio-cultural data for AI research and systems. 
 
Full show notes for this episode can be found at Radicalai.org
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3864</itunes:duration>
                <itunes:episode>14</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Robot Rights? Exploring Algorithmic Colonization with Abeba Birhane</title>
        <itunes:title>Robot Rights? Exploring Algorithmic Colonization with Abeba Birhane</itunes:title>
        <link>https://radicalai.podbean.com/e/robot-rights-exploring-algorithmic-colonization-with-abeba-birhane/</link>
                    <comments>https://radicalai.podbean.com/e/robot-rights-exploring-algorithmic-colonization-with-abeba-birhane/#comments</comments>        <pubDate>Wed, 27 May 2020 00:01:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/fc293f98-74ed-598d-a293-c60a1cc85b69</guid>
                                    <description><![CDATA[<p>Should we grant robots rights? What is moral relationality and how can it be useful for designing machine learning algorithms? What is the algorithmic colonization of Africa and why is it harmful? To answer these questions and more The Radical AI Podcast welcomes Abeba Birhane to the show. </p>
<p> </p>
<p>Abeba Birhane is a PhD candidate in cognitive science at University College Dublin in the School of Computer Science. She studies the relationships between emerging technologies, personhood and society. Specifically, Abeba explores how technology can shape what it means to be human. Abeba’s work is incredibly interdisciplinary - bridging the fields of cognitive science, psychology, computer science, critical data studies, and philosophy. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Should we grant robots rights? What is moral relationality and how can it be useful for designing machine learning algorithms? What is the algorithmic colonization of Africa and why is it harmful? To answer these questions and more The Radical AI Podcast welcomes Abeba Birhane to the show. </p>
<p> </p>
<p>Abeba Birhane is a PhD candidate in cognitive science at University College Dublin in the School of Computer Science. She studies the relationships between emerging technologies, personhood and society. Specifically, Abeba explores how technology can shape what it means to be human. Abeba’s work is incredibly interdisciplinary - bridging the fields of cognitive science, psychology, computer science, critical data studies, and philosophy. </p>
<p> </p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p> </p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/j38cwe/Abeba_Birhane_mixdown_6g3yp.mp3" length="81826657" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Should we grant robots rights? What is moral relationality and how can it be useful for designing machine learning algorithms? What is the algorithmic colonization of Africa and why is it harmful? To answer these questions and more The Radical AI Podcast welcomes Abeba Birhane to the show. 
 
Abeba Birhane is a PhD candidate in cognitive science at University College Dublin in the School of Computer Science. She studies the relationships between emerging technologies, personhood and society. Specifically, Abeba explores how technology can shape what it means to be human. Abeba’s work is incredibly interdisciplinary - bridging the fields of cognitive science, psychology, computer science, critical data studies, and philosophy. 
 
Full show notes for this episode can be found at Radicalai.org
 
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3409</itunes:duration>
                <itunes:episode>13</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Tech Journalism and Ethics: Where is the Truth Anyway? with Karen Hao</title>
        <itunes:title>Tech Journalism and Ethics: Where is the Truth Anyway? with Karen Hao</itunes:title>
        <link>https://radicalai.podbean.com/e/tech-journalism-and-ethics-where-is-the-truth-anyway-with-karen-hao/</link>
                    <comments>https://radicalai.podbean.com/e/tech-journalism-and-ethics-where-is-the-truth-anyway-with-karen-hao/#comments</comments>        <pubDate>Wed, 20 May 2020 03:01:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/ed270e02-ff3a-5d19-9406-51058ccd91c2</guid>
                                    <description><![CDATA[<p>What is the role of journalism in telling the stories of tech ethics? How can journalism bridge the gap between technology and public policy? How do we measure truth in journalism, research and beyond? </p>
<p>To answer these questions and more The Radical AI Podcast welcomes Karen Hao to the show. </p>
<p>Karen is the artificial intelligence reporter for MIT Technology Review. She covers the ethics and social impacts of technology as well as its applications for social good. Karen also writes the AI newsletter, “the Algorithm”, which thoughtfully examines the field’s latest news and research. Previously, Karen was a reporter and data scientist at Quartz and an application engineer at the first startup to spin out of Google X.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the role of journalism in telling the stories of tech ethics? How can journalism bridge the gap between technology and public policy? How do we measure truth in journalism, research and beyond? </p>
<p>To answer these questions and more The Radical AI Podcast welcomes Karen Hao to the show. </p>
<p>Karen is the artificial intelligence reporter for MIT Technology Review. She covers the ethics and social impacts of technology as well as its applications for social good. Karen also writes the AI newsletter, “the Algorithm”, which thoughtfully examines the field’s latest news and research. Previously, Karen was a reporter and data scientist at Quartz and an application engineer at the first startup to spin out of Google X.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/bx1a8z/Karen_Hao_mixdown_8fdd0.mp3" length="47550645" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the role of journalism in telling the stories of tech ethics? How can journalism bridge the gap between technology and public policy? How do we measure truth in journalism, research and beyond? 
To answer these questions and more The Radical AI Podcast welcomes Karen Hao to the show. 
Karen is the artificial intelligence reporter for MIT Technology Review. She covers the ethics and social impacts of technology as well as its applications for social good. Karen also writes the AI newsletter, “the Algorithm”, which thoughtfully examines the field’s latest news and research. Previously, Karen was a reporter and data scientist at Quartz and an application engineer at the first startup to spin out of Google X.
Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1980</itunes:duration>
                <itunes:episode>12</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Minisode #2 - Racism and Sexism in AI, Love and Authenticity in Tech, and Celebrating One Month Since Launch!</title>
        <itunes:title>Minisode #2 - Racism and Sexism in AI, Love and Authenticity in Tech, and Celebrating One Month Since Launch!</itunes:title>
        <link>https://radicalai.podbean.com/e/minisode-2-racism-and-sexism-in-ai-love-and-authenticity-in-tech-and-celebrating-one-month-since-launch/</link>
                    <comments>https://radicalai.podbean.com/e/minisode-2-racism-and-sexism-in-ai-love-and-authenticity-in-tech-and-celebrating-one-month-since-launch/#comments</comments>        <pubDate>Sun, 17 May 2020 00:15:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/77fef7e4-45db-5b0b-bee2-271c91d675da</guid>
                                    <description><![CDATA[<p>In this Minisode hosts Dylan and Jess celebrate one month since launch and debrief racism and sexism in AI, labor movements in tech spaces, theology, and so much more!</p>
<p>Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. </p>
<p>As always we invite you to please subscribe, rate, and leave a review to show your support!  </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this Minisode hosts Dylan and Jess celebrate one month since launch and debrief racism and sexism in AI, labor movements in tech spaces, theology, and so much more!</p>
<p>Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. </p>
<p>As always we invite you to please subscribe, rate, and leave a review to show your support!  </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ukwxcw/Minisode2_mixdown39rkse.mp3" length="42984603" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this Minisode hosts Dylan and Jess celebrate one month since launch and debrief racism and sexism in AI, labor movements in tech spaces, theology, and so much more!
Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. 
As always we invite you to please subscribe, rate, and leave a review to show your support!  ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1790</itunes:duration>
                <itunes:episode>11</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Labor and Innovation: Exploring the Power of Design and Storytelling with Lilly Irani</title>
        <itunes:title>Labor and Innovation: Exploring the Power of Design and Storytelling with Lilly Irani</itunes:title>
        <link>https://radicalai.podbean.com/e/labor-and-innovation-exploring-the-power-of-design-and-storytelling-with-lilly-irani/</link>
                    <comments>https://radicalai.podbean.com/e/labor-and-innovation-exploring-the-power-of-design-and-storytelling-with-lilly-irani/#comments</comments>        <pubDate>Tue, 12 May 2020 22:34:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/1f132232-1aa5-501a-a9b7-07b312a9fa99</guid>
                                    <description><![CDATA[<p>What is the intersection between labor justice movements and the AI technology industry? How can we use design and ethnography to address the relationship between technology, power, and liberation? To answer these questions and more The Radical AI Podcast welcomes Dr. Lilly Irani to the show. </p>
<p>Dr. Lilly Irani is an associate professor of communication and science studies at the University of California, San Diego. She is a cofounder and maintainer of digital labor activism tool Turkopticon, and author of the book Chasing Innovation: Making Entrepreneurial Citizens in Modern India. Dr. Irani’s research broadly investigates the cultural politics of high-tech work practices with a focus on how actors produce “innovation” cultures.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What is the intersection between labor justice movements and the AI technology industry? How can we use design and ethnography to address the relationship between technology, power, and liberation? To answer these questions and more The Radical AI Podcast welcomes Dr. Lilly Irani to the show. </p>
<p>Dr. Lilly Irani is an associate professor of communication and science studies at the University of California, San Diego. She is a cofounder and maintainer of digital labor activism tool Turkopticon, and author of the book Chasing Innovation: Making Entrepreneurial Citizens in Modern India. Dr. Irani’s research broadly investigates the cultural politics of high-tech work practices with a focus on how actors produce “innovation” cultures.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/4i865k/LillyIrani_mixdown_027mm8v.mp3" length="79268817" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What is the intersection between labor justice movements and the AI technology industry? How can we use design and ethnography to address the relationship between technology, power, and liberation? To answer these questions and more The Radical AI Podcast welcomes Dr. Lilly Irani to the show. 
Dr. Lilly Irani is an associate professor of communication and science studies at the University of California, San Diego. She is a cofounder and maintainer of digital labor activism tool Turkopticon, and author of the book Chasing Innovation: Making Entrepreneurial Citizens in Modern India. Dr. Irani’s research broadly investigates the cultural politics of high-tech work practices with a focus on how actors produce “innovation” cultures.
Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3302</itunes:duration>
                <itunes:episode>10</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Love, Challenge, and Hope: Building a Movement to Dismantle the New Jim Code with Ruha Benjamin</title>
        <itunes:title>Love, Challenge, and Hope: Building a Movement to Dismantle the New Jim Code with Ruha Benjamin</itunes:title>
        <link>https://radicalai.podbean.com/e/love-challenge-and-hope-building-a-movement-to-dismantle-the-new-jim-code-with-ruha-benjamin/</link>
                    <comments>https://radicalai.podbean.com/e/love-challenge-and-hope-building-a-movement-to-dismantle-the-new-jim-code-with-ruha-benjamin/#comments</comments>        <pubDate>Wed, 06 May 2020 02:30:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/564df00d-654b-5f02-ab3f-451c8662baf9</guid>
                                    <description><![CDATA[<p>How is racism embedded in technological systems? How do we address the root causes of discrimination? How do we as designers and consumers of AI technology reclaim our agency and create a world of equity for all? To answer these questions and more The Radical AI Podcast welcomes Dr. Ruha Benjamin to the show.  </p>
<p>Dr. Benjamin is Associate Professor of African American Studies at Princeton University and founder of the Just Data Lab. She is author of People’s Science: Bodies and Rights on the Stem Cell Frontier (2013) and Race After Technology: Abolitionist Tools for the New Jim Code (2019) among other publications. Her work investigates the social dimensions of science, medicine, and technology with a focus on the relationship between innovation and inequity, health and justice, knowledge, and power.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How is racism embedded in technological systems? How do we address the root causes of discrimination? How do we as designers and consumers of AI technology reclaim our agency and create a world of equity for all? To answer these questions and more The Radical AI Podcast welcomes Dr. Ruha Benjamin to the show.  </p>
<p>Dr. Benjamin is Associate Professor of African American Studies at Princeton University and founder of the Just Data Lab. She is author of People’s Science: Bodies and Rights on the Stem Cell Frontier (2013) and Race After Technology: Abolitionist Tools for the New Jim Code (2019) among other publications. Her work investigates the social dimensions of science, medicine, and technology with a focus on the relationship between innovation and inequity, health and justice, knowledge, and power.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/bxb65m/Ruha_mixdown2.mp3" length="85339751" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How is racism embedded in technological systems? How do we address the root causes of discrimination? How do we as designers and consumers of AI technology reclaim our agency and create a world of equity for all? To answer these questions and more The Radical AI Podcast welcomes Dr. Ruha Benjamin to the show.  
Dr. Benjamin is Associate Professor of African American Studies at Princeton University and founder of the Just Data Lab. She is author of People’s Science: Bodies and Rights on the Stem Cell Frontier (2013) and Race After Technology: Abolitionist Tools for the New Jim Code (2019) among other publications. Her work investigates the social dimensions of science, medicine, and technology with a focus on the relationship between innovation and inequity, health and justice, knowledge, and power.
Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3555</itunes:duration>
                <itunes:episode>9</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Racism and Sexism in AI Technology? Navigating Systems of Power with Sarah Myers West</title>
        <itunes:title>Racism and Sexism in AI Technology? Navigating Systems of Power with Sarah Myers West</itunes:title>
        <link>https://radicalai.podbean.com/e/racism-and-sexism-in-ai-technology-navigating-systems-of-power-with-sarah-myers-west/</link>
                    <comments>https://radicalai.podbean.com/e/racism-and-sexism-in-ai-technology-navigating-systems-of-power-with-sarah-myers-west/#comments</comments>        <pubDate>Wed, 29 Apr 2020 02:30:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/e5643b69-62ba-5229-9ff0-cf7acc218b1c</guid>
                                    <description><![CDATA[<p>Can you separate technology from power? Is technology ever objective? How do we build technology that meets the needs of everyone? To answer these questions and more The Radical AI Podcast welcomes Dr. Sarah Myers West to the show. </p>
<p>Dr. Sarah Myers West is a postdoctoral researcher at the AI Now Institute. Her research centers on the critical study of technology and culture, with an emphasis on historical and ethnographic methods.</p>
<p>Dr. West is currently working on a project that addresses the politics of diversity and inclusion in technological communities by exploring the nexus of artificial intelligence, gender, and intersectionality. She received her doctoral degree from the Annenberg School for Communication and Journalism at the University of Southern California in 2018, where her dissertation examined the cultural history and politics of encryption technologies from the 1960s to the present day.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Can you separate technology from power? Is technology ever objective? How do we build technology that meets the needs of everyone? To answer these questions and more The Radical AI Podcast welcomes Dr. Sarah Myers West to the show. </p>
<p>Dr. Sarah Myers West is a postdoctoral researcher at the AI Now Institute. Her research centers on the critical study of technology and culture, with an emphasis on historical and ethnographic methods.</p>
<p>Dr. West is currently working on a project that addresses the politics of diversity and inclusion in technological communities by exploring the nexus of artificial intelligence, gender, and intersectionality. She received her doctoral degree from the Annenberg School for Communication and Journalism at the University of Southern California in 2018, where her dissertation examined the cultural history and politics of encryption technologies from the 1960s to the present day.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/jjc97k/SMW_RAI_mixdown2.mp3" length="63506129" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Can you separate technology from power? Is technology ever objective? How do we build technology that meets the needs of everyone? To answer these questions and more The Radical AI Podcast welcomes Dr. Sarah Myers West to the show. 
Dr. Sarah Myers West is a postdoctoral researcher at the AI Now Institute. Her research centers on the critical study of technology and culture, with an emphasis on historical and ethnographic methods.
Dr. West is currently working on a project that addresses the politics of diversity and inclusion in technological communities by exploring the nexus of artificial intelligence, gender, and intersectionality. She received her doctoral degree from the Annenberg School for Communication and Journalism at the University of Southern California in 2018, where her dissertation examined the cultural history and politics of encryption technologies from the 1960s to the present day.
Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2645</itunes:duration>
                <itunes:episode>8</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Is God in your iPhone? Black Liberation Theology, Accessibility, and Digital Citizenry with Shamika Goddard </title>
        <itunes:title>Is God in your iPhone? Black Liberation Theology, Accessibility, and Digital Citizenry with Shamika Goddard </itunes:title>
        <link>https://radicalai.podbean.com/e/is-god-in-your-iphone-black-liberation-theology-accessibility-and-digital-citizenry-with-shamika-goddard/</link>
                    <comments>https://radicalai.podbean.com/e/is-god-in-your-iphone-black-liberation-theology-accessibility-and-digital-citizenry-with-shamika-goddard/#comments</comments>        <pubDate>Wed, 22 Apr 2020 02:45:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/df28b3d1-1c99-5809-83ba-a44188fb5d16</guid>
                                    <description><![CDATA[<p>What does it mean to be an embodied black woman in technology spaces? What is techno-womanism? Do spirituality and liberation have a place in our conversations about technology? To answer these questions and more The Radical AI Podcast welcomes Shamika Goddard to the show. </p>
<p>Shamika was born and raised in San Antonio, TX, and is the oldest of four children. Attending math and engineering camps for fun in junior high and high school, Shamika was excited about learning and eager to help save the world. After graduating from Stanford University, she served a year with AmeriCorps through Reading Partners in Queens and decided to stay in New York City. She went on to study technology and ethics at Union Theological Seminary in the city of New York and is thrilled to be serving others as a Tech Chaplain. She currently attends CU Boulder's iSchool studying technology, ethics, and social justice issues.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>What does it mean to be an embodied black woman in technology spaces? What is techno-womanism? Do spirituality and liberation have a place in our conversations about technology? To answer these questions and more The Radical AI Podcast welcomes Shamika Goddard to the show. </p>
<p>Shamika was born and raised in San Antonio, TX, and is the oldest of four children. Attending math and engineering camps for fun in junior high and high school, Shamika was excited about learning and eager to help save the world. After graduating from Stanford University, she served a year with AmeriCorps through Reading Partners in Queens and decided to stay in New York City. She went on to study technology and ethics at Union Theological Seminary in the city of New York and is thrilled to be serving others as a Tech Chaplain. She currently attends CU Boulder's iSchool studying technology, ethics, and social justice issues.</p>
<p>Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod</p>
<p> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/xjnqsf/shamika_mixdown11.mp3" length="89645852" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What does it mean to be an embodied black woman in technology spaces? What is techno-womanism? Do spirituality and liberation have a place in our conversations about technology? To answer these questions and more The Radical AI Podcast welcomes Shamika Goddard to the show. 
Shamika was born and raised in San Antonio, TX, and is the oldest of four children. Attending math and engineering camps for fun in junior high and high school, Shamika was excited about learning and eager to help save the world. After graduating from Stanford University, she served a year with AmeriCorps through Reading Partners in Queens and decided to stay in New York City. She went on to study technology and ethics at Union Theological Seminary in the city of New York and is thrilled to be serving others as a Tech Chaplain. She currently attends CU Boulder's iSchool studying technology, ethics, and social justice issues.
Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3734</itunes:duration>
                <itunes:episode>7</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Minisode #1 - Contact Tracing, Social Power, and a Thank You! </title>
        <itunes:title>Minisode #1 - Contact Tracing, Social Power, and a Thank You! </itunes:title>
        <link>https://radicalai.podbean.com/e/minisode-1-contact-tracing-social-power-and-a-thank-you/</link>
                    <comments>https://radicalai.podbean.com/e/minisode-1-contact-tracing-social-power-and-a-thank-you/#comments</comments>        <pubDate>Sun, 19 Apr 2020 03:30:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/f4664321-96e3-58b4-8b00-8336589aea4f</guid>
                                    <description><![CDATA[<p>In this Minisode hosts Jess and Dylan debrief the breaking news of contact tracing apps, socio-political power structures, and reveal future guests for the show.</p>
<p>Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. </p>
<p>As always we invite you to please subscribe, rate, and leave a review to show your support!  </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>In this Minisode hosts Jess and Dylan debrief the breaking news of contact tracing apps, socio-political power structures, and reveal future guests for the show.</p>
<p>Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. </p>
<p>As always we invite you to please subscribe, rate, and leave a review to show your support!  </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/tazabm/Minisode_2_mixdown.mp3" length="26560241" type="audio/mpeg"/>
        <itunes:summary><![CDATA[In this Minisode hosts Jess and Dylan debrief the breaking news of contact tracing apps, socio-political power structures, and reveal future guests for the show.
Every month The Radical AI Podcast releases a Minisode reviewing the previous month's episodes and updating listeners on insider news from the Radical AI world. 
As always we invite you to please subscribe, rate, and leave a review to show your support!  ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1106</itunes:duration>
                <itunes:episode>6</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Apple &amp; Google Partner to Promote Coronavirus Contact Tracing. Should You be Worried? with Seda Gurses</title>
        <itunes:title>Apple &amp; Google Partner to Promote Coronavirus Contact Tracing. Should You be Worried? with Seda Gurses</itunes:title>
        <link>https://radicalai.podbean.com/e/apple-google-partner-to-promote-coronavirus-contact-tracing-should-you-be-worried-with-seda-gurses/</link>
                    <comments>https://radicalai.podbean.com/e/apple-google-partner-to-promote-coronavirus-contact-tracing-should-you-be-worried-with-seda-gurses/#comments</comments>        <pubDate>Wed, 15 Apr 2020 04:15:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/15a860a9-7deb-5d25-9a85-db4522dcdcfc</guid>
                                    <description><![CDATA[<p>What does it mean that Apple and Google have partnered to create contact tracing apps? Should you be worried that your medical data is at stake? What are the ramifications of this unprecedented collaboration of two tech behemoths? To answer these questions and more The Radical AI Podcast welcomes Seda Gurses to the show.</p>
<p>Seda is currently an Associate Professor in the Department of Multi-Actor Systems at the Faculty of Technology Policy and Management, at TU Delft and an affiliate at the COSIC Group at the Department of Electrical Engineering (ESAT), KU Leuven. Her work focuses on privacy-enhancing and protective optimization technologies (PETs and POTs), privacy engineering, as well as questions around software infrastructures, social justice, and political economy as they intersect with computer science. Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
 
 ]]></description>
                                                            <content:encoded><![CDATA[<p>What does it mean that Apple and Google have partnered to create contact tracing apps? Should you be worried that your medical data is at stake? What are the ramifications of this unprecedented collaboration of two tech behemoths? To answer these questions and more The Radical AI Podcast welcomes Seda Gurses to the show.</p>
<p>Seda is currently an Associate Professor in the Department of Multi-Actor Systems at the Faculty of Technology Policy and Management, at TU Delft and an affiliate at the COSIC Group at the Department of Electrical Engineering (ESAT), KU Leuven. Her work focuses on privacy-enhancing and protective optimization technologies (PETs and POTs), privacy engineering, as well as questions around software infrastructures, social justice, and political economy as they intersect with computer science. Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
 
 ]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/hrgmzt/Seda_mixdown2.mp3" length="64072027" type="audio/mpeg"/>
        <itunes:summary><![CDATA[What does it mean that Apple and Google have partnered to create contact tracing apps? Should you be worried that your medical data is at stake? What are the ramifications of this unprecedented collaboration of two tech behemoths? To answer these questions and more The Radical AI Podcast welcomes Seda Gurses to the show.
Seda is currently an Associate Professor in the Department of Multi-Actor Systems at the Faculty of Technology Policy and Management, at TU Delft and an affiliate at the COSIC Group at the Department of Electrical Engineering (ESAT), KU Leuven. Her work focuses on privacy-enhancing and protective optimization technologies (PETs and POTs), privacy engineering, as well as questions around software infrastructures, social justice, and political economy as they intersect with computer science. Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod 
 
 ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2669</itunes:duration>
                <itunes:episode>5</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Have Classification Algorithms Gone Too Far? Exploring Gender in AI with Morgan Klaus Scheuerman</title>
        <itunes:title>Have Classification Algorithms Gone Too Far? Exploring Gender in AI with Morgan Klaus Scheuerman</itunes:title>
        <link>https://radicalai.podbean.com/e/3-have-classification-algorithms-gone-too-far-exploring-gender-in-ai-with-morgan-klaus-scheuerman/</link>
                    <comments>https://radicalai.podbean.com/e/3-have-classification-algorithms-gone-too-far-exploring-gender-in-ai-with-morgan-klaus-scheuerman/#comments</comments>        <pubDate>Thu, 09 Apr 2020 04:10:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/50e75965-2cc8-5e45-9bb4-80d3dbd3bcc6</guid>
                                    <description><![CDATA[<p>How does a machine experience your identity? What do women and gender studies have to do with technology design? How do we create more dialogue between industry and the academy? To answer these questions and more The Radical AI Podcast welcomes Morgan Klaus Scheuerman to the show.</p>
<p>Morgan Klaus Scheuerman is an Information Science Ph.D. student at the University of Colorado interested in exploring the ways individuals with diverse gender identities interact with technology. Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How does a machine experience your identity? What do women and gender studies have to do with technology design? How do we create more dialogue between industry and the academy? To answer these questions and more The Radical AI Podcast welcomes Morgan Klaus Scheuerman to the show.</p>
<p>Morgan Klaus Scheuerman is an Information Science Ph.D. student at the University of Colorado interested in exploring the ways individuals with diverse gender identities interact with technology. Full show notes for this episode can be found at Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/3qd8nc/Morgan_mixdown4.mp3" length="83514310" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How does a machine experience your identity? What do women and gender studies have to do with technology design? How do we create more dialogue between industry and the academy? To answer these questions and more The Radical AI Podcast welcomes Morgan Klaus Scheuerman to the show.
Morgan Klaus Scheuerman is an Information Science Ph.D. student at the University of Colorado interested in exploring the ways individuals with diverse gender identities interact with technology. Full show notes for this episode can be found at Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3548</itunes:duration>
                <itunes:episode>4</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Are We Being Watched? Unpacking AI Surveillance with Kandrea Wade</title>
        <itunes:title>Are We Being Watched? Unpacking AI Surveillance with Kandrea Wade</itunes:title>
        <link>https://radicalai.podbean.com/e/are-we-being-watched-unpacking-ai-surveillance-with-kandrea-wade/</link>
                    <comments>https://radicalai.podbean.com/e/are-we-being-watched-unpacking-ai-surveillance-with-kandrea-wade/#comments</comments>        <pubDate>Thu, 09 Apr 2020 04:05:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/2c6afa3e-dd6b-5ca5-a81e-41e528dea552</guid>
                                    <description><![CDATA[<p>Are we being watched? Who is responsible for ensuring the ethical use of our data? What does it mean to be data literate? To answer these questions and more The Radical AI Podcast welcomes Kandrea Wade to the show.</p>
<p>Kandrea Wade is a PhD student in the Information Science department at CU Boulder focusing on algorithmic identity and the digital surveillance of marginalized groups. Full show notes for this episode can be found on Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Are we being watched? Who is responsible for ensuring the ethical use of our data? What does it mean to be data literate? To answer these questions and more The Radical AI Podcast welcomes Kandrea Wade to the show.</p>
<p>Kandrea Wade is a PhD student in the Information Science department at CU Boulder focusing on algorithmic identity and the digital surveillance of marginalized groups. Full show notes for this episode can be found on Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/t98ujr/Kandrea_mixdown.mp3" length="70079755" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Are we being watched? Who is responsible for ensuring the ethical use of our data? What does it mean to be data literate? To answer these questions and more The Radical AI Podcast welcomes Kandrea Wade to the show.
Kandrea Wade is a PhD student in the Information Science department at CU Boulder focusing on algorithmic identity and the digital surveillance of marginalized groups. Full show notes for this episode can be found on Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>2919</itunes:duration>
                <itunes:episode>3</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Can a Machine Ever be Moral? Robot Politeness and Persuasion with Tom Williams</title>
        <itunes:title>Can a Machine Ever be Moral? Robot Politeness and Persuasion with Tom Williams</itunes:title>
        <link>https://radicalai.podbean.com/e/2-can-a-robot-ever-be-moral-with-tom-williams/</link>
                    <comments>https://radicalai.podbean.com/e/2-can-a-robot-ever-be-moral-with-tom-williams/#comments</comments>        <pubDate>Thu, 09 Apr 2020 04:00:00 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/149771fa-a9ff-547a-99cc-f32d22a86dd5</guid>
                                    <description><![CDATA[<p>Are robots moral? How can we tell? What do language, politeness, and persuasion have to do with robot morality? To answer these questions and more The Radical AI Podcast welcomes Dr. Tom Williams to the show.</p>
<p>Tom Williams is an Assistant Professor of Computer Science at the Colorado School of Mines, where he directs the Mines Interactive Robotics Research Lab. Full show notes for this episode can be found on Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Are robots moral? How can we tell? What do language, politeness, and persuasion have to do with robot morality? To answer these questions and more The Radical AI Podcast welcomes Dr. Tom Williams to the show.</p>
<p>Tom Williams is an Assistant Professor of Computer Science at the Colorado School of Mines, where he directs the Mines Interactive Robotics Research Lab. Full show notes for this episode can be found on Radicalai.org</p>
<p>If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ry39kv/Tom_Williams_mixdown.mp3" length="92295253" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Are robots moral? How can we tell? What do language, politeness, and persuasion have to do with robot morality? To answer these questions and more The Radical AI Podcast welcomes Dr. Tom Williams to the show.
Tom Williams is an Assistant Professor of Computer Science at the Colorado School of Mines, where he directs the Mines Interactive Robotics Research Lab. Full show notes for this episode can be found on Radicalai.org
If you enjoy this episode please make sure to subscribe, submit a rating and review, and connect with us on twitter at twitter.com/radicalaipod ]]></itunes:summary>
        <itunes:author>Radical AI</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>3845</itunes:duration>
                <itunes:episode>2</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
    <item>
        <title>Trailer</title>
        <itunes:title>Trailer</itunes:title>
        <link>https://radicalai.podbean.com/e/trailer-radical-ai-podcast/</link>
                    <comments>https://radicalai.podbean.com/e/trailer-radical-ai-podcast/#comments</comments>        <pubDate>Sun, 29 Mar 2020 11:37:44 -0600</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/70297e24-b62c-5959-a508-1e0532b6182a</guid>
                                    <description><![CDATA[<p>The Radical AI Podcast will officially launch on 4/10/20!  </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>The Radical AI Podcast will officially launch on 4/10/20!  </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ue2baa/trailer-ableton-draft2.mp3" length="4155558" type="audio/mpeg"/>
        <itunes:summary><![CDATA[The Radical AI Podcast will officially launch on 4/10/20!  ]]></itunes:summary>
        <itunes:author>radicalai</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>103</itunes:duration>
                        <itunes:episodeType>trailer</itunes:episodeType>
            </item>
    <item>
        <title>Welcome to Radical AI!</title>
        <itunes:title>Welcome to Radical AI!</itunes:title>
        <link>https://radicalai.podbean.com/e/1-welcome-to-radical-ai/</link>
                    <comments>https://radicalai.podbean.com/e/1-welcome-to-radical-ai/#comments</comments>        <pubDate>Thu, 05 Mar 2020 16:26:27 -0700</pubDate>
        <guid isPermaLink="false">radicalai.podbean.com/3b47f1c2-869b-5d0f-a293-6b3e8e222aa9</guid>
                                    <description><![CDATA[<p>Who are we? Why do we exist? What is our origin story? Join hosts Dylan and Jess as they begin from the beginning: just what is Radical AI anyway?</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Who are we? Why do we exist? What is our origin story? Join hosts Dylan and Jess as they begin from the beginning: just what is Radical AI anyway?</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ksafjm/DylJess_Ep_1_mixdown.mp3" length="36253855" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Who are we? Why do we exist? What is our origin story? Join hosts Dylan and Jess as they begin from the beginning: just what is Radical AI anyway?]]></itunes:summary>
        <itunes:author>radicalai</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1510</itunes:duration>
                <itunes:episode>1</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
            </item>
</channel>
</rss>
