@inproceedings{f09e12a04f054156b453cd55e81c1e09,
title = "Strategies for Arabic Readability Modeling",
abstract = "Automatic readability assessment is relevant to building NLP applications for education, content analysis, and accessibility. However, Arabic readability assessment is a challenging task due to Arabic{\textquoteright}s morphological richness and limited readability resources. In this paper, we present a set of experimental results on Arabic readability assessment using a diverse range of approaches, from rule-based methods to Arabic pretrained language models. We report our results on a newly created corpus at different textual granularity levels (words and sentence fragments). Our results show that combining different techniques yields the best results, achieving an overall macro F1 score of 86.7 at the word level and 87.9 at the fragment level on a blind test set. We make our code, data, and pretrained models publicly available.",
author = "Liberato, {Juan Pi{\~n}eros} and Bashar Alhafni and {Al Khalil}, Muhamed and Nizar Habash",
note = "Publisher Copyright: {\textcopyright}2024 Association for Computational Linguistics.; 2nd Arabic Natural Language Processing Conference, ArabicNLP 2024 ; Conference date: 16-08-2024",
year = "2024",
language = "English (US)",
series = "ArabicNLP 2024 - 2nd Arabic Natural Language Processing Conference, Proceedings of the Conference",
publisher = "Association for Computational Linguistics (ACL)",
pages = "55--66",
editor = "Nizar Habash and Houda Bouamor and Ramy Eskander and Nadi Tomeh and Farha, {Ibrahim Abu} and Ahmed Abdelali and Samia Touileb and Injy Hamed and Yaser Onaizan and Bashar Alhafni and Wissam Antoun and Salam Khalifa and Hatem Haddad and Imed Zitouni and Badr AlKhamissi and Rawan Almatham and Khalil Mrini",
booktitle = "ArabicNLP 2024 - 2nd Arabic Natural Language Processing Conference, Proceedings of the Conference",
}