@inbook{0c7fa498eece42258bbe92fc20ac2295,
title = "Neural Galerkin schemes for sequential-in-time solving of partial differential equations with deep networks",
abstract = "This survey discusses Neural Galerkin schemes that leverage nonlinear parametrizations such as deep networks to numerically solve time-dependent partial differential equations (PDEs) in a variational sense. Neural Galerkin schemes build on the Dirac-Frenkel variational principle to train networks by minimizing the residual sequentially over time, which is in contrast to many other methods that approximate PDE solution fields with deep networks globally in time. Because of the sequential-in-time training, Neural Galerkin solutions inherently respect causality and approximate solution fields locally in time so that often fewer parameters are required than by global-in-time methods. Additionally, the sequential-in-time training enables adaptively sampling the spatial domain to efficiently evaluate the residual objectives over time, which is key for numerically realizing the expressive power of deep networks and other nonlinear parametrizations in high dimensions and when solution features are local such as wave fronts.",
keywords = "Curse of dimensionality, Deep networks, Dirac-Frenkel variational principle, Kolmogorov n-width, Model reduction, Time-dependent partial differential equations",
author = "Jules Berman and Paul Schwerdtner and Benjamin Peherstorfer",
note = "Publisher Copyright: {\textcopyright} 2024 Elsevier B.V.",
year = "2024",
month = jan,
doi = "10.1016/bs.hna.2024.05.006",
language = "English (US)",
isbn = "9780443239847",
series = "Handbook of Numerical Analysis",
publisher = "Elsevier B.V.",
pages = "389--418",
editor = "Siddhartha Mishra and Alex Townsend",
booktitle = "Numerical Analysis Meets Machine Learning",
}