<?xml version="1.0" encoding="utf-8" standalone="yes"?><rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>Abdoul Majid O. Thiombiano | SmileLab Research Group</title><link>https://mkaouer.net/author/abdoul-majid-o.-thiombiano/</link><atom:link href="https://mkaouer.net/author/abdoul-majid-o.-thiombiano/index.xml" rel="self" type="application/rss+xml"/><description>Abdoul Majid O. Thiombiano</description><generator>Wowchemy (https://wowchemy.com)</generator><language>en-us</language><lastBuildDate>Wed, 01 Jan 2025 00:00:00 +0000</lastBuildDate><item><title>Distil-xLSTM: Learning Attention Mechanisms through Recurrent Structures</title><link>https://mkaouer.net/publication/thiombiano-2025-distilxlstm/</link><pubDate>Wed, 01 Jan 2025 00:00:00 +0000</pubDate><guid>https://mkaouer.net/publication/thiombiano-2025-distilxlstm/</guid><description/></item><item><title>From apologies to insights: extracting topics from ChatGPT apologetic responses</title><link>https://mkaouer.net/publication/hnich-2025-from/</link><pubDate>Wed, 01 Jan 2025 00:00:00 +0000</pubDate><guid>https://mkaouer.net/publication/hnich-2025-from/</guid><description/></item><item><title>MoxE: Mixture of xLSTM Experts with Entropy-Aware Routing for Efficient Language Modeling</title><link>https://mkaouer.net/publication/thiombiano-2025-moxe/</link><pubDate>Wed, 01 Jan 2025 00:00:00 +0000</pubDate><guid>https://mkaouer.net/publication/thiombiano-2025-moxe/</guid><description/></item><item><title>Assessing Large Language Models Effectiveness in Outdated Method Renaming</title><link>https://mkaouer.net/publication/mrad-2024-assessing/</link><pubDate>Mon, 01 Jan 2024 00:00:00 +0000</pubDate><guid>https://mkaouer.net/publication/mrad-2024-assessing/</guid><description/></item></channel></rss>