@inproceedings{c58761902d12441e8167142794cc0ed3,
title = "Addressing Sample Efficiency and Model-bias in Model-based Reinforcement Learning",
abstract = "Model-based reinforcement learning promises to be an effective way to bring reinforcement learning to real-world robotic systems by offering a sample efficient learning approach compared to model-free reinforcement learning. However, model-based reinforcement learning approaches at present struggle to match the performance of model-free ones. This work attempts to fill this gap by improving the performance of model-based reinforcement learning while further improving its sample efficiency. To improve the sample efficiency, an exploration strategy is formulated which maximizes the information gain. The asymptotic performance is improved by compensating for the model-bias using a model-free critic. We have evaluated our proposed approach on four reinforcement learning benchmarking tasks in the openAI gym framework.",
keywords = "Model based reinforcement learning, model predictive control, sample efficient learning",
author = "Anand, {Akhil S.} and {Erik Kveen}, Jens and Fares Abu-Dakka and Gr{\o}tli, {Esten Ingar} and {Tommy Gravdahl}, Jan",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 21st IEEE International Conference on Machine Learning and Applications, ICMLA 2022 ; Conference date: 12-12-2022 Through 14-12-2022",
year = "2022",
doi = "10.1109/ICMLA55696.2022.00009",
language = "English (US)",
series = "Proceedings - 21st IEEE International Conference on Machine Learning and Applications, ICMLA 2022",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "1--6",
editor = "Wani, {M. Arif} and Mehmed Kantardzic and Vasile Palade and Daniel Neagu and Longzhi Yang and Kit-Yan Chan",
booktitle = "Proceedings - 21st IEEE International Conference on Machine Learning and Applications, ICMLA 2022",
}