<?xml version="1.0" encoding="UTF-8"?><!-- generator="podbean/5.5" -->
<rss version="2.0"
     xmlns:content="http://purl.org/rss/1.0/modules/content/"
     xmlns:wfw="http://wellformedweb.org/CommentAPI/"
     xmlns:dc="http://purl.org/dc/elements/1.1/"
     xmlns:atom="http://www.w3.org/2005/Atom"
     xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"
     xmlns:googleplay="http://www.google.com/schemas/play-podcasts/1.0"
     xmlns:spotify="http://www.spotify.com/ns/rss"
     xmlns:podcast="https://podcastindex.org/namespace/1.0"
    xmlns:media="http://search.yahoo.com/mrss/">

<channel>
    <title>GenAI Learner</title>
    <atom:link href="https://feed.podbean.com/genai-learner/feed.xml" rel="self" type="application/rss+xml"/>
    <link>https://genai-learner.podbean.com</link>
    <description><![CDATA[<p><span>Dive deep into the exciting realm of Generative AI without the jargon! 🚀 Here, we transform the latest GenAI technologies – sourced from pioneering research papers and top blogs – into easy-to-follow podcast discussions. Join our community of AI enthusiasts, learn something new every week, and become a GenAI expert with us!</span></p>]]></description>
    <pubDate>Wed, 18 Mar 2026 23:33:12 -0300</pubDate>
    <generator>https://podbean.com/?v=5.5</generator>
    <language>en</language>
        <copyright>Copyright 2025 All rights reserved.</copyright>
    <category>Technology</category>
    <ttl>1440</ttl>
    <itunes:type>episodic</itunes:type>
          <itunes:summary>Dive deep into the exciting realm of Generative AI without the jargon! 🚀 Here, we transform the latest GenAI technologies – sourced from pioneering research papers and top blogs – into easy-to-follow podcast discussions. Join our community of AI enthusiasts, learn something new every week, and become a GenAI expert with us!</itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
<itunes:category text="Technology" />
    <itunes:owner>
        <itunes:name>hogarthian.art</itunes:name>
            </itunes:owner>
    	<itunes:block>No</itunes:block>
	<itunes:explicit>false</itunes:explicit>
	<itunes:new-feed-url>https://feed.podbean.com/genai-learner/feed.xml</itunes:new-feed-url>
    <itunes:image href="https://pbcdn1.podbean.com/imglogo/image-logo/21502942/GenAI_Learner_Icon_1500_x_1500_px_1_78gbc.png" />
    
    <item>
        <title>Beyond Singletasking: Building an Operating System for Your GPU</title>
        <itunes:title>Beyond Singletasking: Building an Operating System for Your GPU</itunes:title>
        <link>https://genai-learner.podbean.com/e/beyond-singletasking-building-an-operating-system-for-your-gpu/</link>
                    <comments>https://genai-learner.podbean.com/e/beyond-singletasking-building-an-operating-system-for-your-gpu/#comments</comments>        <pubDate>Wed, 18 Mar 2026 23:33:12 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/dd4e648d-ad72-3da9-9971-f9a842b11966</guid>
                                    <description><![CDATA[<p>Tired of wasted compute? UC Berkeley is addressing the inefficiencies of exclusive GPU access by proposing a unified resource management layer to enable multitasking, potentially reclaiming the 90% of resources often left idle during inference—explained in plain English on the GenAI learner podcast.

Paper: https://arxiv.org/abs/2508.08448</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Tired of wasted compute? UC Berkeley is addressing the inefficiencies of exclusive GPU access by proposing a unified resource management layer to enable multitasking, potentially reclaiming the 90% of resources often left idle during inference—explained in plain English on the GenAI learner podcast.<br>
<br>
Paper: https://arxiv.org/abs/2508.08448</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/sdt3vm66m9b57w2m/An_Operating_System_for_GPU_Multitasking.mp3" length="19776919" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Tired of wasted compute? UC Berkeley is addressing the inefficiencies of exclusive GPU access by proposing a unified resource management layer to enable multitasking, potentially reclaiming the 90% of resources often left idle during inference—explained in plain English on the GenAI learner podcast.Paper: https://arxiv.org/abs/2508.08448]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1236</itunes:duration>
                <itunes:episode>29</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/kvcached-simply-explained-square-small.jpg" />    </item>
    <item>
        <title>Scaling AI: Think Operators, Not Models</title>
        <itunes:title>Scaling AI: Think Operators, Not Models</itunes:title>
        <link>https://genai-learner.podbean.com/e/scaling-ai-think-operators-not-models/</link>
                    <comments>https://genai-learner.podbean.com/e/scaling-ai-think-operators-not-models/#comments</comments>        <pubDate>Sat, 15 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/f25878f8-1d3a-3d6f-b355-dafdc0c33865</guid>
                                    <description><![CDATA[<p>Scaling large AI models to meet dynamic traffic is slow and leads to significant resource waste. Researchers at Microsoft Azure Research and Rice University are rethinking this process, finding that scaling the entire model as a monolith is inefficient. Their breakthrough, "operator-level autoscaling," scales just the specific bottleneck parts (operators) of the model instead of the whole thing. This new approach is far more efficient, preserving performance while using up to 40% fewer GPUs and 35% less energy. </p>
<p>Arxiv: <a href='https://arxiv.org/abs/2511.02248'>https://arxiv.org/abs/2511.02248</a> </p>
<p>The GenAI Learner podcast explains this new, efficient approach in simple terms.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Scaling large AI models to meet dynamic traffic is slow and leads to significant resource waste. Researchers at Microsoft Azure Research and Rice University are rethinking this process, finding that scaling the entire model as a monolith is inefficient. Their breakthrough, "operator-level autoscaling," scales just the specific bottleneck <em>parts</em> (operators) of the model instead of the whole thing. This new approach is far more efficient, preserving performance while using up to 40% fewer GPUs and 35% less energy. </p>
<p>Arxiv: <a href='https://arxiv.org/abs/2511.02248'>https://arxiv.org/abs/2511.02248</a> </p>
<p>The GenAI Learner podcast explains this new, efficient approach in simple terms.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/fqzegwx8d83jvt4r/Scaling_LLMs_Beyond_The_Monolith.mp3" length="11587009" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Scaling large AI models to meet dynamic traffic is slow and leads to significant resource waste. Researchers at Microsoft Azure Research and Rice University are rethinking this process, finding that scaling the entire model as a monolith is inefficient. Their breakthrough, "operator-level autoscaling," scales just the specific bottleneck parts (operators) of the model instead of the whole thing. This new approach is far more efficient, preserving performance while using up to 40% fewer GPUs and 35% less energy. 
Arxiv: https://arxiv.org/abs/2511.02248 
The GenAI Learner podcast explains this new, efficient approach in simple terms.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>724</itunes:duration>
                <itunes:episode>28</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/rethinking_ai_autoscaling-workflow.png" />    </item>
    <item>
        <title>Can AI Learn Like Humans? The Novel Games Benchmark</title>
        <itunes:title>Can AI Learn Like Humans? The Novel Games Benchmark</itunes:title>
        <link>https://genai-learner.podbean.com/e/can-ai-learn-like-humans-the-novel-games-benchmark/</link>
                    <comments>https://genai-learner.podbean.com/e/can-ai-learn-like-humans-the-novel-games-benchmark/#comments</comments>        <pubDate>Thu, 13 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/fee1757d-81ff-3627-a0fc-1523fcb74191</guid>
                                    <description><![CDATA[<p>Researchers at MIT and Harvard argue that true intelligence requires constructing internal world models, proposing a generative game benchmark to prove if AI can adapt to unseen environments without millions of training steps—tune into GenAI Learner for the details.

https://arxiv.org/pdf/2507.12821</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers at MIT and Harvard argue that true intelligence requires constructing internal world models, proposing a generative game benchmark to prove if AI can adapt to unseen environments without millions of training steps—tune into GenAI Learner for the details.<br>
<br>
https://arxiv.org/pdf/2507.12821</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/zw95a26gfnx2czfy/Building_Adaptive_AI_World_Models.mp3" length="11976965" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers at MIT and Harvard argue that true intelligence requires constructing internal world models, proposing a generative game benchmark to prove if AI can adapt to unseen environments without millions of training steps—tune into GenAI Learner for the details.https://arxiv.org/pdf/2507.12821]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>748</itunes:duration>
                <itunes:episode>27</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/AdaptiveWorldModel-keycomponents.png" />    </item>
    <item>
        <title>The Surprising Limits of RL in LLMs: Why Optimization Kills Deep Reasoning Capacity</title>
        <itunes:title>The Surprising Limits of RL in LLMs: Why Optimization Kills Deep Reasoning Capacity</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-surprising-limits-of-rl-in-llms-why-optimization-kills-deep-reasoning-capacity/</link>
                    <comments>https://genai-learner.podbean.com/e/the-surprising-limits-of-rl-in-llms-why-optimization-kills-deep-reasoning-capacity/#comments</comments>        <pubDate>Wed, 12 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/26b0b863-108a-3493-8ed9-973b3f78ead6</guid>
                                    <description><![CDATA[<p>The Surprising Limits of RL in LLM Reasoning</p>
<p>Arxiv: https://arxiv.org/pdf/2504.13837The promise of RL for LLM growth hits a wall: Tsinghua University's study shows RLVR only improves efficiency but is bounded by and does not elicit novel reasoning in base models—get the non-technical scoop on the "GenAI learner" podcast.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>The Surprising Limits of RL in LLM Reasoning</p>
<p>Arxiv: https://arxiv.org/pdf/2504.13837The promise of RL for LLM growth hits a wall: Tsinghua University's study shows RLVR only improves efficiency but is bounded by and does not elicit novel reasoning in base models—get the non-technical scoop on the "GenAI learner" podcast.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nia4454zc6wm2tpn/The_Surprising_Limits_of_RL_in_LLMs_Why_Optimization_Kills_Dee9puhe.mp3" length="13701047" type="audio/mpeg"/>
        <itunes:summary><![CDATA[The Surprising Limits of RL in LLM Reasoning
Arxiv: https://arxiv.org/pdf/2504.13837The promise of RL for LLM growth hits a wall: Tsinghua University's study shows RLVR only improves efficiency but is bounded by and does not elicit novel reasoning in base models—get the non-technical scoop on the "GenAI learner" podcast.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>856</itunes:duration>
                <itunes:episode>26</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/RLVR-training-workflow2.png" />    </item>
    <item>
        <title>Trillion-Parameter Failure: How Tiny Recursion Models Beat GPT-4 on Structured Reasoning with 0.01% the Scale</title>
        <itunes:title>Trillion-Parameter Failure: How Tiny Recursion Models Beat GPT-4 on Structured Reasoning with 0.01% the Scale</itunes:title>
        <link>https://genai-learner.podbean.com/e/trillion-parameter-failure-how-tiny-recursion-models-beat-gpt-4-on-structured-reasoning-with-001-the-scale/</link>
                    <comments>https://genai-learner.podbean.com/e/trillion-parameter-failure-how-tiny-recursion-models-beat-gpt-4-on-structured-reasoning-with-001-the-scale/#comments</comments>        <pubDate>Tue, 11 Nov 2025 08:55:55 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/975da5f1-ad76-35d1-8d51-8d3d177c0c15</guid>
                                    <description><![CDATA[<p>Research from Samsung SAIL Montréal introduces the Tiny Recursive Model (TRM), which uses a single, 2-layer network to outperform massive LLMs on tough puzzles like ARC-AGI. </p>
<p>Arxiv: <a href='https://arxiv.org/pdf/2510.04871'>https://arxiv.org/pdf/2510.04871</a> </p>
<p>Hear the simple breakdown on GenAI learner!</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Research from Samsung SAIL Montréal introduces the Tiny Recursive Model (TRM), which uses a single, 2-layer network to outperform massive LLMs on tough puzzles like ARC-AGI. </p>
<p>Arxiv: <a href='https://arxiv.org/pdf/2510.04871'>https://arxiv.org/pdf/2510.04871</a> </p>
<p>Hear the simple breakdown on GenAI learner!</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/aqzx6zmdkf95ra2d/Trillion-Parameter_Failure_How_Tiny_Recursion_Models_Beat_GPT-9l37n.mp3" length="18982379" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Research from Samsung SAIL Montréal introduces the Tiny Recursive Model (TRM), which uses a single, 2-layer network to outperform massive LLMs on tough puzzles like ARC-AGI. 
Arxiv: https://arxiv.org/pdf/2510.04871 
Hear the simple breakdown on GenAI learner!]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1186</itunes:duration>
                <itunes:episode>25</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/TRM-Training-Workflow.png" />    </item>
    <item>
        <title>The LLM Commitee: Why 182,000 AI Models Aren't Enough and How Ensembles Beat the Single Perfect Oracle?</title>
        <itunes:title>The LLM Commitee: Why 182,000 AI Models Aren't Enough and How Ensembles Beat the Single Perfect Oracle?</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-llm-commitee-why-182000-ai-models-arent-enough-and-how-ensembles-beat-the-single-perfect-oracle/</link>
                    <comments>https://genai-learner.podbean.com/e/the-llm-commitee-why-182000-ai-models-arent-enough-and-how-ensembles-beat-the-single-perfect-oracle/#comments</comments>        <pubDate>Mon, 10 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/e4fa4176-b350-3e38-a34c-33ddfa11e9bb</guid>
                                    <description><![CDATA[<p>Ensemble LLMs: The Power of Multiple AI Minds</p>
<p>Arxiv: <a href='https://arxiv.org/pdf/2502.18036'>https://arxiv.org/pdf/2502.18036</a> </p>
<p>
The LLM Commitee: Why 182,000 AI Models Aren't Enough and How Ensembles Beat the Single Perfect Oracle? Why rely on one LLM when you can use many? Beihang University's survey on LLM Ensemble details how leveraging individual model strengths with multiple LLMs leads to better results. </p>
<p>
Get the simple explanation on GenAI Learner.

</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Ensemble LLMs: The Power of Multiple AI Minds</p>
<p>Arxiv: <a href='https://arxiv.org/pdf/2502.18036'>https://arxiv.org/pdf/2502.18036</a> </p>
<p><br>
The LLM Commitee: Why 182,000 AI Models Aren't Enough and How Ensembles Beat the Single Perfect Oracle? Why rely on one LLM when you can use many? Beihang University's survey on LLM Ensemble details how leveraging individual model strengths with multiple LLMs leads to better results. </p>
<p><br>
Get the simple explanation on <em>GenAI Learner</em>.<br>
<br>
</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/8n6qt32fgf2xmsr8/The_LLM_Committee_Why_182_000_AI_Models_Aren_t_Enough_and_How_7qdq7.mp3" length="17725576" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Ensemble LLMs: The Power of Multiple AI Minds
Arxiv: https://arxiv.org/pdf/2502.18036 
The LLM Commitee: Why 182,000 AI Models Aren't Enough and How Ensembles Beat the Single Perfect Oracle? Why rely on one LLM when you can use many? Beihang University's survey on LLM Ensemble details how leveraging individual model strengths with multiple LLMs leads to better results. 
Get the simple explanation on GenAI Learner.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1107</itunes:duration>
                <itunes:episode>24</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/LLM-Ensemble-keycomponents.png" />    </item>
    <item>
        <title>TransferEngine Deep Dive: How Unordered RDMA Breaks Vendor Lock?</title>
        <itunes:title>TransferEngine Deep Dive: How Unordered RDMA Breaks Vendor Lock?</itunes:title>
        <link>https://genai-learner.podbean.com/e/transferengine-deep-dive-how-unordered-rdma-breaks-vendor-lock/</link>
                    <comments>https://genai-learner.podbean.com/e/transferengine-deep-dive-how-unordered-rdma-breaks-vendor-lock/#comments</comments>        <pubDate>Sun, 09 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/5165987f-14bf-30bd-a651-27205f29a982</guid>
                                    <description><![CDATA[<p>Cloud wars over custom hardware? Perplexity AI solved it. 
Discover the TransferEngine provides a portable, vendor-agnostic RDMA point-to-point communication interface for LLM systems, avoiding hardware lock-in with a simple breakdown on the GenAI learner podcast.</p>
<p>
Arxiv: <a href='https://arxiv.org/abs/2510.27656'>https://arxiv.org/abs/2510.27656</a> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Cloud wars over custom hardware? Perplexity AI solved it. <br>
Discover the TransferEngine provides a portable, vendor-agnostic RDMA point-to-point communication interface for LLM systems, avoiding hardware lock-in with a simple breakdown on the GenAI learner podcast.</p>
<p><br>
Arxiv: <a href='https://arxiv.org/abs/2510.27656'>https://arxiv.org/abs/2510.27656</a> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nueip7968hwjdhwa/TransferEngine_Deep_Dive_How_Unordered_RDMA_Breaks_Vendor_Lock6q5ik.mp3" length="14345958" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Cloud wars over custom hardware? Perplexity AI solved it. Discover the TransferEngine provides a portable, vendor-agnostic RDMA point-to-point communication interface for LLM systems, avoiding hardware lock-in with a simple breakdown on the GenAI learner podcast.
Arxiv: https://arxiv.org/abs/2510.27656 ]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>896</itunes:duration>
                <itunes:episode>23</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/TransferEngine-keycomponents.png" />    </item>
    <item>
        <title>PaperCoder Unlocked: How Multi-Agent AI Solves Science Reproducibility</title>
        <itunes:title>PaperCoder Unlocked: How Multi-Agent AI Solves Science Reproducibility</itunes:title>
        <link>https://genai-learner.podbean.com/e/papercoder-unlocked-how-multi-agent-ai-solves-science-reproducibility/</link>
                    <comments>https://genai-learner.podbean.com/e/papercoder-unlocked-how-multi-agent-ai-solves-science-reproducibility/#comments</comments>        <pubDate>Sat, 08 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/f063f778-a0ae-3371-8107-2aa4795eb8b4</guid>
                                    <description><![CDATA[<p>Straight from KAIST, the revolutionary PaperCoder automates functional code generation from raw machine learning papers, and the "GenAI learner" podcast breaks down this multi-agent LLM framework simply.

Arxiv: https://arxiv.org/abs/2504.17192</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Straight from KAIST, the revolutionary PaperCoder automates functional code generation from raw machine learning papers, and the "GenAI learner" podcast breaks down this multi-agent LLM framework simply.<br>
<br>
Arxiv: https://arxiv.org/abs/2504.17192</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ysxkrw7uhifzqu7r/PaperCoder_Unlocked_How_Multi-Agent_AI_Solves_Science_s_Reprodbsjzf.mp3" length="12932420" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Straight from KAIST, the revolutionary PaperCoder automates functional code generation from raw machine learning papers, and the "GenAI learner" podcast breaks down this multi-agent LLM framework simply.Arxiv: https://arxiv.org/abs/2504.17192]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>808</itunes:duration>
                <itunes:episode>22</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/papercoder-system-component.png" />    </item>
    <item>
        <title>AXIOM: How Gradient Free AI Smashes Deep Reinforcement Learning</title>
        <itunes:title>AXIOM: How Gradient Free AI Smashes Deep Reinforcement Learning</itunes:title>
        <link>https://genai-learner.podbean.com/e/axiom-how-gradient-free-ai-smashes-deep-reinforcement-learning/</link>
                    <comments>https://genai-learner.podbean.com/e/axiom-how-gradient-free-ai-smashes-deep-reinforcement-learning/#comments</comments>        <pubDate>Tue, 04 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/5f537d88-393c-3b65-aa49-baf53dddb330</guid>
                                    <description><![CDATA[<p>How to Learn Games in Minutes (No NNs!)</p>
<p>Researchers at VERSES AI built a new AI agent that masters games in minutes without using neural networks or gradient optimization. </p>
<p>Arxiv: https://arxiv.org/abs/2505.24784</p>
<p>The GenAI Learner podcast breaks down how this "gradient-free" method works.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>How to Learn Games in Minutes (No NNs!)</p>
<p>Researchers at VERSES AI built a new AI agent that masters games in minutes without using neural networks or gradient optimization. </p>
<p>Arxiv: https://arxiv.org/abs/2505.24784</p>
<p>The GenAI Learner podcast breaks down how this "gradient-free" method works.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/4gfyxy79734set7v/AXIOM_How_Gradient-Free_AI_Smashes_Deep_Reinforcement_Learning8tlfn.mp3" length="17818780" type="audio/mpeg"/>
        <itunes:summary><![CDATA[How to Learn Games in Minutes (No NNs!)
Researchers at VERSES AI built a new AI agent that masters games in minutes without using neural networks or gradient optimization. 
Arxiv: https://arxiv.org/abs/2505.24784
The GenAI Learner podcast breaks down how this "gradient-free" method works.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1113</itunes:duration>
                <itunes:episode>21</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/AXIOM-workflow.png" />    </item>
    <item>
        <title>Meta’s COCONUT: Reasoning Without Words</title>
        <itunes:title>Meta’s COCONUT: Reasoning Without Words</itunes:title>
        <link>https://genai-learner.podbean.com/e/meta-s-coconut-reasoning-without-words/</link>
                    <comments>https://genai-learner.podbean.com/e/meta-s-coconut-reasoning-without-words/#comments</comments>        <pubDate>Mon, 03 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/f26d9650-98f2-35f0-91a7-e47940ae6922</guid>
                                    <description><![CDATA[<p>Researchers at Meta just taught LLMs to reason without language, letting them explore multiple paths simultaneously. 
Arxiv: <a href='https://arxiv.org/abs/2412.06769v1'>https://arxiv.org/abs/2412.06769v1</a>
 
The GenAI Learner podcast breaks down how this "COCONUT" method works in simple terms.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers at Meta just taught LLMs to reason without language, letting them explore multiple paths simultaneously. <br>
Arxiv: <a href='https://arxiv.org/abs/2412.06769v1'>https://arxiv.org/abs/2412.06769v1</a><br>
 <br>
The GenAI Learner podcast breaks down how this "COCONUT" method works in simple terms.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/5mr8y43mvqc6q6ez/coconut.mp3" length="14819087" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers at Meta just taught LLMs to reason without language, letting them explore multiple paths simultaneously. Arxiv: https://arxiv.org/abs/2412.06769v1 The GenAI Learner podcast breaks down how this "COCONUT" method works in simple terms.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>926</itunes:duration>
                <itunes:episode>20</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/coconut-workflow-1.png" />    </item>
    <item>
        <title>Evolve Your AI Agent Without Gradients: EvoTest</title>
        <itunes:title>Evolve Your AI Agent Without Gradients: EvoTest</itunes:title>
        <link>https://genai-learner.podbean.com/e/evolve-your-ai-agent-without-gradients-evotest/</link>
                    <comments>https://genai-learner.podbean.com/e/evolve-your-ai-agent-without-gradients-evotest/#comments</comments>        <pubDate>Sun, 02 Nov 2025 07:00:00 -0400</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/5b00050a-f210-3a04-975e-0c90c2388e48</guid>
                                    <description><![CDATA[<p>A new system from Microsoft Research shows how EvoTest evolves an agent's entire configuration using narrative transcript analysis, outperforming gradient-based and reflection methods. 
Arxiv: <a href='https://arxiv.org/pdf/2510.13220'>https://arxiv.org/pdf/2510.13220</a> 
Get the simple breakdown on GenAI learner!</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>A new system from Microsoft Research shows how EvoTest evolves an agent's entire configuration using narrative transcript analysis, outperforming gradient-based and reflection methods. <br>
Arxiv: <a href='https://arxiv.org/pdf/2510.13220'>https://arxiv.org/pdf/2510.13220</a> <br>
Get the simple breakdown on GenAI learner!</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/wcmg94dif67sdy6j/EvoTest.mp3" length="15639959" type="audio/mpeg"/>
        <itunes:summary><![CDATA[A new system from Microsoft Research shows how EvoTest evolves an agent's entire configuration using narrative transcript analysis, outperforming gradient-based and reflection methods. Arxiv: https://arxiv.org/pdf/2510.13220 Get the simple breakdown on GenAI learner!]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>977</itunes:duration>
                <itunes:episode>19</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/EvoTest-key-components.png" />    </item>
    <item>
        <title>Smarter, Cheaper AGI: Beating the $1 Million AI Challenge</title>
        <itunes:title>Smarter, Cheaper AGI: Beating the $1 Million AI Challenge</itunes:title>
        <link>https://genai-learner.podbean.com/e/smarter-cheaper-agi-beating-the-1-million-ai-challenge/</link>
                    <comments>https://genai-learner.podbean.com/e/smarter-cheaper-agi-beating-the-1-million-ai-challenge/#comments</comments>        <pubDate>Sat, 01 Nov 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/01e3b6b5-2cbc-3072-9003-0b80c2a49957</guid>
                                    <description><![CDATA[<p>An individual's DreamCoder-inspired method proved to be the most performance-cost efficient approach to the complex ARC-AGI Prize challenge.</p>
<p>Original Article: <a href='https://substack.com/home/post/p-172998849'>https://substack.com/home/post/p-172998849</a> </p>
<p>Understand the details with our non-technical explanation on GenAI Learner.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>An individual's DreamCoder-inspired method proved to be the most performance-cost efficient approach to the complex ARC-AGI Prize challenge.</p>
<p>Original Article: <a href='https://substack.com/home/post/p-172998849'>https://substack.com/home/post/p-172998849</a> </p>
<p>Understand the details with our non-technical explanation on GenAI Learner.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/i4ui6c9eq2e84n5p/eric-pang-agi.mp3" length="14284100" type="audio/mpeg"/>
        <itunes:summary><![CDATA[An individual's DreamCoder-inspired method proved to be the most performance-cost efficient approach to the complex ARC-AGI Prize challenge.
Original Article: https://substack.com/home/post/p-172998849 
Understand the details with our non-technical explanation on GenAI Learner.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>892</itunes:duration>
                <itunes:episode>18</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/eric-pang-sys-overview.png" />    </item>
    <item>
        <title>English vs. Python: How an AI Beat the ARC-AGI Test</title>
        <itunes:title>English vs. Python: How an AI Beat the ARC-AGI Test</itunes:title>
        <link>https://genai-learner.podbean.com/e/english-vs-python-how-an-ai-beat-the-arc-agi-test-1761827912/</link>
                    <comments>https://genai-learner.podbean.com/e/english-vs-python-how-an-ai-beat-the-arc-agi-test-1761827912/#comments</comments>        <pubDate>Fri, 31 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/de64c39c-976b-31d4-a279-14e57b0abf97</guid>
                                    <description><![CDATA[<p>Jeremy Berman's Substack reveals a state-of-the-art 79.6% ARC-AGI score by using an evolutionary process that refines plain English instructions instead of code—tune into GenAI learner for a simple breakdown.

<a href='https://jeremyberman.substack.com/p/how-i-got-the-highest-score-on-arc-agi-again'>https://jeremyberman.substack.com/p/how-i-got-the-highest-score-on-arc-agi-again</a> </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Jeremy Berman's Substack reveals a state-of-the-art 79.6% ARC-AGI score by using an evolutionary process that refines plain English instructions instead of code—tune into GenAI learner for a simple breakdown.<br>
<br>
<a href='https://jeremyberman.substack.com/p/how-i-got-the-highest-score-on-arc-agi-again'>https://jeremyberman.substack.com/p/how-i-got-the-highest-score-on-arc-agi-again</a> </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/rrfa6gb39kjdw9zz/Natural_Language_Beats_Python.mp3" length="17236145" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Jeremy Berman's Substack reveals a state-of-the-art 79.6% ARC-AGI score by using an evolutionary process that refines plain English instructions instead of code—tune into GenAI learner for a simple breakdown.https://jeremyberman.substack.com/p/how-i-got-the-highest-score-on-arc-agi-again ]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1077</itunes:duration>
                <itunes:episode>17</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/evolutionary_test_time_workflow.png" />    </item>
    <item>
        <title>Evolving LLM Solutions: The ARC-AGI Breakthrough</title>
        <itunes:title>Evolving LLM Solutions: The ARC-AGI Breakthrough</itunes:title>
        <link>https://genai-learner.podbean.com/e/english-vs-python-how-an-ai-beat-the-arc-agi-test/</link>
                    <comments>https://genai-learner.podbean.com/e/english-vs-python-how-an-ai-beat-the-arc-agi-test/#comments</comments>        <pubDate>Thu, 30 Oct 2025 08:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/da788117-81ed-39dd-b32a-f7218e3b6e96</guid>
                                    <description><![CDATA[<p>Hear how Anthropic's Sonnet 3.5 smashed the ARC-AGI record by using Evolutionary Test-time Compute to overcome generalization limits. </p>
<p>Substack: <a href='https://jeremyberman.substack.com/p/how-i-got-a-record-536-on-arc-agi'>https://jeremyberman.substack.com/p/how-i-got-a-record-536-on-arc-agi</a> </p>
<p>Get the simple breakdown on the GenAI learner podcast.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Hear how Anthropic's Sonnet 3.5 smashed the ARC-AGI record by using Evolutionary Test-time Compute to overcome generalization limits. </p>
<p>Substack: <a href='https://jeremyberman.substack.com/p/how-i-got-a-record-536-on-arc-agi'>https://jeremyberman.substack.com/p/how-i-got-a-record-536-on-arc-agi</a> </p>
<p>Get the simple breakdown on the GenAI learner podcast.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/jifj7pxi69umwguu/Evolution_Time_Compute.mp3" length="19880573" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Hear how Anthropic's Sonnet 3.5 smashed the ARC-AGI record by using Evolutionary Test-time Compute to overcome generalization limits. 
Substack: https://jeremyberman.substack.com/p/how-i-got-a-record-536-on-arc-agi 
Get the simple breakdown on the GenAI learner podcast.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1242</itunes:duration>
                <itunes:episode>16</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/test-time-workflow.png" />    </item>
    <item>
        <title>Stop Measuring AI Skill, Start Measuring AGI Efficiency</title>
        <itunes:title>Stop Measuring AI Skill, Start Measuring AGI Efficiency</itunes:title>
        <link>https://genai-learner.podbean.com/e/stop-measuring-ai-skill-start-measuring-agi-efficiency/</link>
                    <comments>https://genai-learner.podbean.com/e/stop-measuring-ai-skill-start-measuring-agi-efficiency/#comments</comments>        <pubDate>Wed, 29 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/38d0fe49-0012-377a-ac73-5c8d24986ee0</guid>
                                    <description><![CDATA[<p>Google, Inc.'s François Chollet argues that we should stop measuring AI's performance and start measuring intelligence as the efficiency of skill acquisition over a range of tasks, accounting for priors and experience. </p>
<p> </p>
<p>Arxiv: <a href='https://arxiv.org/abs/1911.01547'>https://arxiv.org/abs/1911.01547</a> </p>
<p> </p>
<p>Get the simple breakdown on "GenAI learner."</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Google, Inc.'s François Chollet argues that we should stop measuring AI's performance and start measuring intelligence as the efficiency of skill acquisition over a range of tasks, accounting for priors and experience. </p>
<p> </p>
<p>Arxiv: <a href='https://arxiv.org/abs/1911.01547'>https://arxiv.org/abs/1911.01547</a> </p>
<p> </p>
<p>Get the simple breakdown on "GenAI learner."</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/ez2ykmvpf34tmr5z/Measuring_AI_Intelligence.mp3" length="18888756" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Google, Inc.'s François Chollet argues that we should stop measuring AI's performance and start measuring intelligence as the efficiency of skill acquisition over a range of tasks, accounting for priors and experience. 
 
Arxiv: https://arxiv.org/abs/1911.01547 
 
Get the simple breakdown on "GenAI learner."]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1180</itunes:duration>
                <itunes:episode>15</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/Measuring_AI_Intelligence_workflow.png" />    </item>
    <item>
        <title>The Benchmark That Broke AI's Best</title>
        <itunes:title>The Benchmark That Broke AI's Best</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-benchmark-that-broke-ais-best/</link>
                    <comments>https://genai-learner.podbean.com/e/the-benchmark-that-broke-ais-best/#comments</comments>        <pubDate>Tue, 28 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/3120ba39-7883-34b7-9482-e06cb6afd956</guid>
                                    <description><![CDATA[<p>The ARC Prize Foundation just dropped ARC-AGI-2, a new, harder AI benchmark designed to assess general fluid intelligence at higher cognitive complexity levels. </p>
<p>Arxiv:  <a href='https://arxiv.org/abs/2505.11831'>https://arxiv.org/abs/2505.11831</a> </p>
<p>Tune into GenAI learner for the simple breakdown of why it's so challenging for modern AI.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>The ARC Prize Foundation just dropped ARC-AGI-2, a new, harder AI benchmark designed to assess general fluid intelligence at higher cognitive complexity levels. </p>
<p>Arxiv:  <a href='https://arxiv.org/abs/2505.11831'>https://arxiv.org/abs/2505.11831</a> </p>
<p>Tune into GenAI learner for the simple breakdown of why it's so challenging for modern AI.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nuxh3rucqbin8hn6/ARC-AGI-2.mp3" length="13892890" type="audio/mpeg"/>
        <itunes:summary><![CDATA[The ARC Prize Foundation just dropped ARC-AGI-2, a new, harder AI benchmark designed to assess general fluid intelligence at higher cognitive complexity levels. 
Arxiv:  https://arxiv.org/abs/2505.11831 
Tune into GenAI learner for the simple breakdown of why it's so challenging for modern AI.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>868</itunes:duration>
                <itunes:episode>14</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/arc-2025-workflow-1.png" />    </item>
    <item>
        <title>AGI's New Secret: Why Models Must Train on the Fly</title>
        <itunes:title>AGI's New Secret: Why Models Must Train on the Fly</itunes:title>
        <link>https://genai-learner.podbean.com/e/agis-new-secret-why-models-must-train-on-the-fly/</link>
                    <comments>https://genai-learner.podbean.com/e/agis-new-secret-why-models-must-train-on-the-fly/#comments</comments>        <pubDate>Mon, 27 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/8b664213-3f1a-34b6-93ab-d2951a9a0d60</guid>
                                    <description><![CDATA[<p>ARC Prize 2024 revealed that Test-Time Training (TTT) and program synthesis drove the state-of-the-art ARC-AGI score from 33% to 55.5%. </p>
<p>Arxiv: <a href='https://arxiv.org/abs/2412.04604'>https://arxiv.org/abs/2412.04604</a> </p>
<p>Tune into GenAI Learner for a simple, non-technical explanation of this breakthrough and what it means for AGI progress.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>ARC Prize 2024 revealed that Test-Time Training (TTT) and program synthesis drove the state-of-the-art ARC-AGI score from 33% to 55.5%. </p>
<p>Arxiv: <a href='https://arxiv.org/abs/2412.04604'>https://arxiv.org/abs/2412.04604</a> </p>
<p>Tune into GenAI Learner for a simple, non-technical explanation of this breakthrough and what it means for AGI progress.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nhufpyudtxjn8bxv/arc-agi-2024.mp3" length="17135835" type="audio/mpeg"/>
        <itunes:summary><![CDATA[ARC Prize 2024 revealed that Test-Time Training (TTT) and program synthesis drove the state-of-the-art ARC-AGI score from 33% to 55.5%. 
Arxiv: https://arxiv.org/abs/2412.04604 
Tune into GenAI Learner for a simple, non-technical explanation of this breakthrough and what it means for AGI progress.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1070</itunes:duration>
                <itunes:episode>13</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/arc-agi-2024-blueprint.png" />    </item>
    <item>
        <title>LLMs Self-Verify with Just One Token: Introducing LaSeR</title>
        <itunes:title>LLMs Self-Verify with Just One Token: Introducing LaSeR</itunes:title>
        <link>https://genai-learner.podbean.com/e/llms-self-verify-with-just-one-token-introducing-laser/</link>
                    <comments>https://genai-learner.podbean.com/e/llms-self-verify-with-just-one-token-introducing-laser/#comments</comments>        <pubDate>Sun, 26 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/a3d9f3a9-e843-39b1-9da2-b9051888222c</guid>
                                    <description><![CDATA[<p>Researchers from Tencent and Renmin University of China discovered the reasoning reward equals a last-token self-rewarding score, a game-changer for efficient LLM verification—get the simple breakdown on GenAI Learner.</p>
<p>
Arxiv: <a href='https://www.arxiv.org/abs/2510.14943'>https://www.arxiv.org/abs/2510.14943</a>   </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers from Tencent and Renmin University of China discovered the reasoning reward equals a last-token self-rewarding score, a game-changer for efficient LLM verification—get the simple breakdown on GenAI Learner.</p>
<p><br>
Arxiv: <a href='https://www.arxiv.org/abs/2510.14943'>https://www.arxiv.org/abs/2510.14943</a>   </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/nbtv9vb42xuqf6um/LaSeR.mp3" length="13563120" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers from Tencent and Renmin University of China discovered the reasoning reward equals a last-token self-rewarding score, a game-changer for efficient LLM verification—get the simple breakdown on GenAI Learner.
Arxiv: https://www.arxiv.org/abs/2510.14943   ]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>847</itunes:duration>
                <itunes:episode>12</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/LaSeR_training.png" />    </item>
    <item>
        <title>The Accuracy Cliff: Why LLMs Fail Complex Questions</title>
        <itunes:title>The Accuracy Cliff: Why LLMs Fail Complex Questions</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-accuracy-cliff-why-llms-fail-complex-questions/</link>
                    <comments>https://genai-learner.podbean.com/e/the-accuracy-cliff-why-llms-fail-complex-questions/#comments</comments>        <pubDate>Sat, 25 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/156921a4-8434-3f1a-812e-ca845dc7bd30</guid>
                                    <description><![CDATA[<p>Wonder why LLMs struggle with multi-step logic? A new paper from MBZUAI shows the Fano-style accuracy upper bound proves single-pass LLM reasoning collapses when task complexity exceeds output capacity. 
Arxiv: <a href='https://arxiv.org/pdf/2509.21199'>https://arxiv.org/pdf/2509.21199</a> 
We break down the 'Accuracy Cliff' on GenAI learner.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Wonder why LLMs struggle with multi-step logic? A new paper from MBZUAI shows the Fano-style accuracy upper bound proves single-pass LLM reasoning collapses when task complexity exceeds output capacity. <br>
Arxiv: <a href='https://arxiv.org/pdf/2509.21199'>https://arxiv.org/pdf/2509.21199</a> <br>
We break down the 'Accuracy Cliff' on GenAI learner.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/p2xnegcasm8y3dpx/fano-style.mp3" length="19366065" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Wonder why LLMs struggle with multi-step logic? A new paper from MBZUAI shows the Fano-style accuracy upper bound proves single-pass LLM reasoning collapses when task complexity exceeds output capacity. Arxiv: https://arxiv.org/pdf/2509.21199 We break down the 'Accuracy Cliff' on GenAI learner.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1210</itunes:duration>
                <itunes:episode>11</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/fano-style-workflow-1.png" />    </item>
    <item>
        <title>Folding Context: How LLMs Solve Massive, Long-Horizon Tasks</title>
        <itunes:title>Folding Context: How LLMs Solve Massive, Long-Horizon Tasks</itunes:title>
        <link>https://genai-learner.podbean.com/e/folding-context-how-llms-solve-massive-long-horizon-tasks/</link>
                    <comments>https://genai-learner.podbean.com/e/folding-context-how-llms-solve-massive-long-horizon-tasks/#comments</comments>        <pubDate>Fri, 24 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/b328c515-b1b1-3054-96b5-5e4867876acb</guid>
                                    <description><![CDATA[<p>Engineers from ByteDance and Carnegie Mellon University just scaled LLM agents 10x with Context-Folding, a method that summarizes complex sub-tasks to manage memory. 
Arxiv: <a href='https://arxiv.org/abs/2510.11967'>https://arxiv.org/abs/2510.11967</a> 
Get the simple breakdown on GenAI Learner.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Engineers from ByteDance and Carnegie Mellon University just scaled LLM agents 10x with Context-Folding, a method that summarizes complex sub-tasks to manage memory. <br>
Arxiv: <a href='https://arxiv.org/abs/2510.11967'>https://arxiv.org/abs/2510.11967</a> <br>
Get the simple breakdown on GenAI Learner.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/5ihuu5imarnf33ff/Context_Folding.mp3" length="14973732" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Engineers from ByteDance and Carnegie Mellon University just scaled LLM agents 10x with Context-Folding, a method that summarizes complex sub-tasks to manage memory. Arxiv: https://arxiv.org/abs/2510.11967 Get the simple breakdown on GenAI Learner.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>935</itunes:duration>
                <itunes:episode>10</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/context-folding-agent.png" />    </item>
    <item>
        <title>RouterArena: The Great LLM Router Battle</title>
        <itunes:title>RouterArena: The Great LLM Router Battle</itunes:title>
        <link>https://genai-learner.podbean.com/e/routerarena-the-great-llm-router-battle/</link>
                    <comments>https://genai-learner.podbean.com/e/routerarena-the-great-llm-router-battle/#comments</comments>        <pubDate>Thu, 23 Oct 2025 12:39:32 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/cf3ec6d1-704c-3b1e-b321-d600436a15ad</guid>
                                    <description><![CDATA[<p>Researchers at Rice University have launched ROUTERARENA, the first-ever open platform for comparing and ranking different LLM routers. </p>
<p>Arxiv: <a href='https://arxiv.org/abs/2510.00202'>https://arxiv.org/abs/2510.00202</a>
</p>
<p>Listen to the GenAI Learner podcast to understand this new benchmark in simple terms.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers at Rice University have launched ROUTERARENA, the first-ever open platform for comparing and ranking different LLM routers. </p>
<p>Arxiv: <a href='https://arxiv.org/abs/2510.00202'>https://arxiv.org/abs/2510.00202</a><br>
</p>
<p>Listen to the GenAI Learner podcast to understand this new benchmark in simple terms.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/j8qa5ctfx2cafzzq/ROUTERARENA.mp3" length="13764158" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers at Rice University have launched ROUTERARENA, the first-ever open platform for comparing and ranking different LLM routers. 
Arxiv: https://arxiv.org/abs/2510.00202
Listen to the GenAI Learner podcast to understand this new benchmark in simple terms.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>860</itunes:duration>
                <itunes:episode>9</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/RouterArena-diagram-1.png" />    </item>
    <item>
        <title>ScaleRL by Meta: Making AI Training Predictable</title>
        <itunes:title>ScaleRL by Meta: Making AI Training Predictable</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-scalerl/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-scalerl/#comments</comments>        <pubDate>Wed, 22 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/35d829a4-a8d0-3686-80a2-8c3091dc7e1a</guid>
                                    <description><![CDATA[<p>Researchers at Meta developed "ScaleRL," a groundbreaking recipe that makes LLM reinforcement learning training predictable, just like pre-training. 
Paper: <a href='https://arxiv.org/pdf/2510.13786'>https://arxiv.org/pdf/2510.13786</a></p>
<p>
Hear it broken down simply on the GenAI Learner podcast.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers at Meta developed "ScaleRL," a groundbreaking recipe that makes LLM reinforcement learning training predictable, just like pre-training. <br>
Paper: <a href='https://arxiv.org/pdf/2510.13786'>https://arxiv.org/pdf/2510.13786</a></p>
<p><br>
Hear it broken down simply on the GenAI Learner podcast.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/b8d2nehsjq9wbmiu/ScaleRL.mp3" length="16228864" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers at Meta developed "ScaleRL," a groundbreaking recipe that makes LLM reinforcement learning training predictable, just like pre-training. Paper: https://arxiv.org/pdf/2510.13786
Hear it broken down simply on the GenAI Learner podcast.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1014</itunes:duration>
                <itunes:episode>8</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/ScaleRL-workflow.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/jdgntqbbuwfz5rar/ScaleRL-sixuj2-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/zaa7zmwgbdqqbdai/ScaleRL-sixuj2-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>82% GPU Savings by Alibaba: The Token-Level LLM Hack</title>
        <itunes:title>82% GPU Savings by Alibaba: The Token-Level LLM Hack</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-aegaeon/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-aegaeon/#comments</comments>        <pubDate>Tue, 21 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/6de67384-acef-3c4a-8b8c-fe2d0ba6b38d</guid>
                                    <description><![CDATA[<p>Stop wasting money on idle GPUs! Directly from the top-tier SOSP '25 conference, researchers from Peking University and Alibaba Group reveal how Aegaeon uses token-level auto-scaling to achieve an astounding 82% GPU resource saving in production.</p>
<p>Paper: <a href='https://ennanzhai.github.io/pub/sosp25-aegaeon.pdf'>https://ennanzhai.github.io/pub/sosp25-aegaeon.pdf</a> </p>
<p>
Get the simple breakdown on GenAI learner.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Stop wasting money on idle GPUs! Directly from the top-tier SOSP '25 conference, researchers from Peking University and Alibaba Group reveal how Aegaeon uses token-level auto-scaling to achieve an astounding 82% GPU resource saving in production.</p>
<p>Paper: <a href='https://ennanzhai.github.io/pub/sosp25-aegaeon.pdf'>https://ennanzhai.github.io/pub/sosp25-aegaeon.pdf</a> </p>
<p><br>
Get the simple breakdown on GenAI learner.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/dcgin7m758qiiteh/Aegaeon.mp3" length="16908465" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Stop wasting money on idle GPUs! Directly from the top-tier SOSP '25 conference, researchers from Peking University and Alibaba Group reveal how Aegaeon uses token-level auto-scaling to achieve an astounding 82% GPU resource saving in production.
Paper: https://ennanzhai.github.io/pub/sosp25-aegaeon.pdf 
Get the simple breakdown on GenAI learner.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1056</itunes:duration>
                <itunes:episode>7</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/aegaeon-workflow.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/8d57tqavi29txhib/Aegaeon-xitqgz-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/pyf4e3i5kjzhbzcx/Aegaeon-xitqgz-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>Stop Guessing: Routing LLMs by Human Preference</title>
        <itunes:title>Stop Guessing: Routing LLMs by Human Preference</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-arch-router/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-arch-router/#comments</comments>        <pubDate>Mon, 20 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/7ff9ac87-9d04-3ab1-9a49-dfc3fff4eecd</guid>
                                    <description><![CDATA[<p>Researchers at Katanemo Labs, Inc. built Arch-Router, a compact 1.5B model that aligns LLM routing with subjective human preferences using a Domain-Action Taxonomy. Hugging Face's CTO spotlighted it for their new Omni-router! 
Arxiv: <a href='https://arxiv.org/abs/2506.16655'>https://arxiv.org/abs/2506.16655</a>

Get the simple breakdown on the GenAI learner podcast. </p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers at Katanemo Labs, Inc. built Arch-Router, a compact 1.5B model that aligns LLM routing with subjective human preferences using a Domain-Action Taxonomy. Hugging Face's CTO spotlighted it for their new Omni-router! <br>
Arxiv: <a href='https://arxiv.org/abs/2506.16655'>https://arxiv.org/abs/2506.16655</a><br>
<br>
Get the simple breakdown on the GenAI learner podcast. </p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/h79e5q6y8rvcupue/arch-router.mp3" length="16495104" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers at Katanemo Labs, Inc. built Arch-Router, a compact 1.5B model that aligns LLM routing with subjective human preferences using a Domain-Action Taxonomy. Hugging Face's CTO spotlighted it for their new Omni-router! Arxiv: https://arxiv.org/abs/2506.16655Get the simple breakdown on the GenAI learner podcast. ]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1030</itunes:duration>
                <itunes:episode>6</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/arch-router-workflow.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/vx2m5zputyhnmvg5/arch-router-83735m-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/9494zu8qh8nzryjh/arch-router-83735m-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>LLMs Get a Smart Router: Multi-Step Coordination via RL</title>
        <itunes:title>LLMs Get a Smart Router: Multi-Step Coordination via RL</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-router-r1/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-router-r1/#comments</comments>        <pubDate>Sun, 19 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/da4e2cea-a8f6-33d3-a1ae-578bc0f4d5d0</guid>
                                    <description><![CDATA[<p>Researchers at the University of Illinois at Urbana-Champaign built Router-R1, an RL-based framework that teaches LLMs multi-round routing and aggregation for superior, cost-aware complex task solving.</p>
<p>Arxiv: <a href='https://arxiv.org/pdf/2506.09033'>https://arxiv.org/pdf/2506.09033</a> </p>
<p>Get the simple, non-technical breakdown on the "GenAI learner" podcast!</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Researchers at the University of Illinois at Urbana-Champaign built Router-R1, an RL-based framework that teaches LLMs multi-round routing and aggregation for superior, cost-aware complex task solving.</p>
<p>Arxiv: <a href='https://arxiv.org/pdf/2506.09033'>https://arxiv.org/pdf/2506.09033</a> </p>
<p>Get the simple, non-technical breakdown on the "GenAI learner" podcast!</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/tcti85kcgf9japty/Router-R1.mp3" length="14548250" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Researchers at the University of Illinois at Urbana-Champaign built Router-R1, an RL-based framework that teaches LLMs multi-round routing and aggregation for superior, cost-aware complex task solving.
Arxiv: https://arxiv.org/pdf/2506.09033 
Get the simple, non-technical breakdown on the "GenAI learner" podcast!]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>909</itunes:duration>
                <itunes:episode>5</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/Router-R1-overview.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/36vamtyqm87ax7fr/Router-R1-didgy3-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/3m869r8gsmv68dwv/Router-R1-didgy3-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>Teaching AI to Google Itself: The RL Way</title>
        <itunes:title>Teaching AI to Google Itself: The RL Way</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-search-r1/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-search-r1/#comments</comments>        <pubDate>Sat, 18 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/d6fdf40a-cd76-36bc-8770-ef525a70d0c8</guid>
                                    <description><![CDATA[<p>Discover how researchers from Google Cloud AI Research are pioneering new methods. SEARCH-R1 trains LLMs with reinforcement learning to autonomously generate multi-turn search queries. Get the non-technical breakdown on GenAI Learner.</p>
<p>
Arxiv: https://arxiv.org/abs/2503.09516</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Discover how researchers from Google Cloud AI Research are pioneering new methods. SEARCH-R1 trains LLMs with reinforcement learning to autonomously generate multi-turn search queries. Get the non-technical breakdown on <em>GenAI Learner</em>.</p>
<p><br>
Arxiv: https://arxiv.org/abs/2503.09516</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/4fugdi9qebj3y6nb/Search-R1.mp3" length="14419518" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Discover how researchers from Google Cloud AI Research are pioneering new methods. SEARCH-R1 trains LLMs with reinforcement learning to autonomously generate multi-turn search queries. Get the non-technical breakdown on GenAI Learner.
Arxiv: https://arxiv.org/abs/2503.09516]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>901</itunes:duration>
                <itunes:episode>4</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/SearchR1-overview.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/dx2yrkew8zbtu7b2/Search-R1-fgygjp-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/uvi4u9xkhni747fa/Search-R1-fgygjp-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>Agentic Context Engineering</title>
        <itunes:title>Agentic Context Engineering</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-ace/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-ace/#comments</comments>        <pubDate>Fri, 17 Oct 2025 07:00:00 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/1cee51b7-cb8c-30b3-832c-e77f35064d2e</guid>
                                    <description><![CDATA[<p>Arxiv: <a href='https://arxiv.org/abs/2510.04618'>https://arxiv.org/abs/2510.04618 </a></p>
<p>This recent paper from Stanford University and UC Berkeley introduces Agentic Context Engineering (ACE), a novel framework designed to enhance the performance of large language models (LLMs) in complex applications like agents and domain-specific reasoning by evolving their context, such as prompts and memory. ACE addresses key limitations of prior context adaptation methods, specifically brevity bias (where context becomes too concise and loses crucial details) and context collapse (where iterative rewriting erodes information over time), by treating contexts as comprehensive, evolving "playbooks." The framework uses a modular architecture—consisting of a Generator, Reflector, and Curator—to incrementally accumulate, refine, and organize strategies, which significantly boosts accuracy on benchmarks like AppWorld and financial analysis tasks while drastically reducing adaptation latency and cost compared to strong baselines. Overall, ACE demonstrates that detailed, self-improving contexts enable more scalable and efficient LLM systems.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>Arxiv: <a href='https://arxiv.org/abs/2510.04618'>https://arxiv.org/abs/2510.04618 </a></p>
<p>This recent paper from Stanford University and UC Berkeley introduces Agentic Context Engineering (ACE), a novel framework designed to enhance the performance of large language models (LLMs) in complex applications like agents and domain-specific reasoning by evolving their context, such as prompts and memory. ACE addresses key limitations of prior context adaptation methods, specifically brevity bias (where context becomes too concise and loses crucial details) and context collapse (where iterative rewriting erodes information over time), by treating contexts as comprehensive, evolving "playbooks." The framework uses a modular architecture—consisting of a Generator, Reflector, and Curator—to incrementally accumulate, refine, and organize strategies, which significantly boosts accuracy on benchmarks like AppWorld and financial analysis tasks while drastically reducing adaptation latency and cost compared to strong baselines. Overall, ACE demonstrates that detailed, self-improving contexts enable more scalable and efficient LLM systems.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/md7275mgdn3pam45/ACE.mp3" length="16606281" type="audio/mpeg"/>
        <itunes:summary><![CDATA[Arxiv: https://arxiv.org/abs/2510.04618 
This recent paper from Stanford University and UC Berkeley introduces Agentic Context Engineering (ACE), a novel framework designed to enhance the performance of large language models (LLMs) in complex applications like agents and domain-specific reasoning by evolving their context, such as prompts and memory. ACE addresses key limitations of prior context adaptation methods, specifically brevity bias (where context becomes too concise and loses crucial details) and context collapse (where iterative rewriting erodes information over time), by treating contexts as comprehensive, evolving "playbooks." The framework uses a modular architecture—consisting of a Generator, Reflector, and Curator—to incrementally accumulate, refine, and organize strategies, which significantly boosts accuracy on benchmarks like AppWorld and financial analysis tasks while drastically reducing adaptation latency and cost compared to strong baselines. Overall, ACE demonstrates that detailed, self-improving contexts enable more scalable and efficient LLM systems.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1037</itunes:duration>
                <itunes:episode>3</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/ACE_workflow.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/rvbvqdkytwxbryie/ACE-64k9et-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/htc7q3kt8ddb2j29/ACE-64k9et-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>Self-Adapting LLMs: SEAL Framework</title>
        <itunes:title>Self-Adapting LLMs: SEAL Framework</itunes:title>
        <link>https://genai-learner.podbean.com/e/the-title-of-seal/</link>
                    <comments>https://genai-learner.podbean.com/e/the-title-of-seal/#comments</comments>        <pubDate>Thu, 16 Oct 2025 18:17:25 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/140ee97a-698a-365f-b257-5f0cfb3f0e92</guid>
                                    <description><![CDATA[<p>This recent MIT paper details a novel framework called Self-Adapting Language Models (SEAL), which allows large language models (LLMs) to modify their own weights through self-generated training data and update instructions, termed self-edits. The core mechanism involves an outer reinforcement learning (RL) loop that trains the model to create effective self-edits, rewarding generations that improve performance on downstream tasks, and an inner supervised finetuning (SFT) loop that applies the resulting weight updates. Experiments demonstrate SEAL's effectiveness in two key domains: knowledge incorporation of new factual information and few-shot generalization by autonomously selecting data augmentations and optimization hyperparameters. Overall, SEAL offers a versatile approach for enabling LLMs to overcome their static nature and engage in self-directed adaptation in response to new inputs and tasks.</p>
]]></description>
                                                            <content:encoded><![CDATA[<p>This recent MIT paper details a novel framework called Self-Adapting Language Models (SEAL), which allows large language models (LLMs) to modify their own weights through self-generated training data and update instructions, termed self-edits. The core mechanism involves an outer reinforcement learning (RL) loop that trains the model to create effective self-edits, rewarding generations that improve performance on downstream tasks, and an inner supervised finetuning (SFT) loop that applies the resulting weight updates. Experiments demonstrate SEAL's effectiveness in two key domains: knowledge incorporation of new factual information and few-shot generalization by autonomously selecting data augmentations and optimization hyperparameters. Overall, SEAL offers a versatile approach for enabling LLMs to overcome their static nature and engage in self-directed adaptation in response to new inputs and tasks.</p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/hhnkmmae8cj5k5rk/SEAL.mp3" length="13940537" type="audio/mpeg"/>
        <itunes:summary><![CDATA[This recent MIT paper details a novel framework called Self-Adapting Language Models (SEAL), which allows large language models (LLMs) to modify their own weights through self-generated training data and update instructions, termed self-edits. The core mechanism involves an outer reinforcement learning (RL) loop that trains the model to create effective self-edits, rewarding generations that improve performance on downstream tasks, and an inner supervised finetuning (SFT) loop that applies the resulting weight updates. Experiments demonstrate SEAL's effectiveness in two key domains: knowledge incorporation of new factual information and few-shot generalization by autonomously selecting data augmentations and optimization hyperparameters. Overall, SEAL offers a versatile approach for enabling LLMs to overcome their static nature and engage in self-directed adaptation in response to new inputs and tasks.]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>871</itunes:duration>
                <itunes:episode>2</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/Gemini_Generated_Image_8ncnrg8ncnrg8ncn.png" /><podcast:transcript url="https://mcdn.podbean.com/mf/web/3kxi6rvpzmgmzjjg/SEAL-iiwqym-Optimized.vtt" type="text/vtt" /><podcast:chapters url="https://mcdn.podbean.com/mf/web/f5yicr3jhutp5ftw/SEAL-iiwqym-Optimized_chapters.json" type="application/json" />    </item>
    <item>
        <title>METIS: Quality-Aware RAG with Configuration Adaptation and Scheduling</title>
        <itunes:title>METIS: Quality-Aware RAG with Configuration Adaptation and Scheduling</itunes:title>
        <link>https://genai-learner.podbean.com/e/metis-quality-aware-rag-with-configuration-adaptation-and-scheduling/</link>
                    <comments>https://genai-learner.podbean.com/e/metis-quality-aware-rag-with-configuration-adaptation-and-scheduling/#comments</comments>        <pubDate>Thu, 16 Oct 2025 17:13:52 -0300</pubDate>
        <guid isPermaLink="false">genai-learner.podbean.com/5030f0ca-9a02-3001-8ce6-081c993d09df</guid>
                                    <description><![CDATA[<p>SOSP 2025 Paper READING ---- METIS: Fast Quality-Aware RAG Systems with Configuration Adaptation </p>
<p><a href='https://arxiv.org/pdf/2412.10543'>https://arxiv.org/pdf/2412.10543</a></p>
]]></description>
                                                            <content:encoded><![CDATA[<p>SOSP 2025 Paper READING ---- METIS: Fast Quality-Aware RAG Systems with Configuration Adaptation </p>
<p><a href='https://arxiv.org/pdf/2412.10543'>https://arxiv.org/pdf/2412.10543</a></p>
]]></content:encoded>
                                    
        <enclosure url="https://mcdn.podbean.com/mf/web/h8nnctyvj93zkbgm/METIS.mp3" length="21227238" type="audio/mpeg"/>
        <itunes:summary><![CDATA[SOSP 2025 Paper READING ---- METIS: Fast Quality-Aware RAG Systems with Configuration Adaptation 
https://arxiv.org/pdf/2412.10543]]></itunes:summary>
        <itunes:author>hogarthian.art</itunes:author>
        <itunes:explicit>false</itunes:explicit>
        <itunes:block>No</itunes:block>
        <itunes:duration>1326</itunes:duration>
                <itunes:episode>1</itunes:episode>
        <itunes:episodeType>full</itunes:episodeType>
        <itunes:image href="https://pbcdn1.podbean.com/imglogo/ep-logo/pbblog21502942/Gemini_Generated_Image_9u2wh49u2wh49u2w.png" />    </item>
</channel>
</rss>
