@inbook{df45b81057024f20bd11336ab7b53122,
title = "Improved Learning of Dynamics Models for Control",
abstract = "Model-based reinforcement learning (MBRL) plays an important role in developing control strategies for robotic systems. However, when dealing with complex platforms, it is difficult to model systems dynamics with analytic models. While data-driven tools offer an alternative to tackle this problem, collecting data on physical systems is non-trivial. Hence, smart solutions are required to effectively learn dynamics models with small amount of examples. In this paper we present an extension to Data As Demonstrator for handling controlled dynamics in order to improve the multiple-step prediction capabilities of the learned dynamics models. Results show the efficacy of our algorithm in developing LQR, iLQR, and open-loop trajectory-based control strategies on simulated benchmarks as well as physical robot platforms.",
keywords = "Dynamics learning, Optimal control, Reinforcement learning, Sequential prediction",
author = "Arun Venkatraman and Roberto Capobianco and Lerrel Pinto and Martial Hebert and Daniele Nardi and Bagnell, {J. Andrew}",
note = "Funding Information: Acknowledgements. This material is based upon work supported in part by: National Science Foundation Graduate Research Fellowship Grant No. DGE-1252522, National Science Foundation NRI Purposeful Prediction Award No. 1227234, and ONR contract N000141512365. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation. Publisher Copyright: {\textcopyright} 2017, Springer International Publishing AG.",
year = "2017",
doi = "10.1007/978-3-319-50115-4_61",
language = "English (US)",
series = "Springer Proceedings in Advanced Robotics",
publisher = "Springer Science and Business Media B.V.",
pages = "703--713",
booktitle = "Springer Proceedings in Advanced Robotics",
}