@article{Danyaro_Abdullahi_Abdallah_Chiroma_2025, title={Hallucinations in Large Language Models for Education: Challenges and Mitigation}, volume={4}, url={http://dx.doi.org/10.22161/ijtle.4.6.2}, DOI={10.22161/ijtle.4.6.2}, abstractNote={Large Language Models (LLMs) are increasingly being adopted in education to support teaching, learning, and assessment. While they offer benefits such as personalised learning and automated feedback, their tendency to generate hallucinations (plausible but factually incorrect or fabricated information) poses a critical challenge. In an educational context, hallucinations risk misleading students, compromising academic integrity, and eroding trust in AI-assisted learning. This paper examines hallucinations in education, highlighting their causes, risks, and implications. Unlike prior surveys that address hallucinations broadly, our work focuses specifically on education, where the consequences extend to academic honesty, critical thinking and equitable access. We provide a domain-specific analysis of how hallucinations emerge in tutoring systems, assessment and instructional content. Furthermore, we review technical and pedagogical mitigation strategies, such as prompt engineering, fine-tuning, dynamic course content integration and redesigned assessment practices. The paper contributes a framework that links technical solutions with education safeguards, emphasising that mitigating hallucinations is not limited to algorithmic advances but also requires institutional policies and critical AI literacy. By addressing these challenges, we aim to inform more reliable, equitable and trustworthy deployment of LLMs in education.}, number={6}, journal={International Journal of Teaching, Learning and Education}, publisher={AI Publications}, author={Danyaro, Kamaluddeen Usman and Abdullahi, Shamsu and Abdallah, Abdallah Saleh and Chiroma, Haruna}, year={2025}, pages={13–19} }