@inproceedings{e57b11ce35634b2f936b5484a74b981e,
title = "Latent video transformer",
abstract = "The video generation task can be formulated as a prediction of future video frames given some past frames. Recent generative models for videos face the problem of high computational requirements. Some models require up to 512 Tensor Processing Units for parallel training. In this work, we address this problem via modeling the dynamics in a latent space. After the transformation of frames into the latent space, our model predicts latent representation for the next frames in an autoregressive manner. We demonstrate the performance of our approach on BAIR Robot Pushing and Kinetics-600 datasets. The approach tends to reduce requirements to 8 Graphical Processing Units for training the models while maintaining comparable generation quality.",
keywords = "Deep learning, Generative adversarial networks, Video generation",
author = "Ruslan Rakhimov and Denis Volkhonskiy and Alexey Artemov and Denis Zorin and Evgeny Burnaev",
note = "Funding Information: The authors acknowledge the usage of the Skoltech CDISE HPC cluster Zhores for obtaining the results presented in this paper. The authors were supported by the Russian Science Foundation under Grant 19-41-04109. They also acknowledge Vage Egiazarian for thoughtful discussions of the model and the experiments. Publisher Copyright: Copyright {\textcopyright} 2021 by SCITEPRESS – Science and Technology Publications, Lda. All rights reserved.; 16th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, VISIGRAPP 2021 ; Conference date: 08-02-2021 Through 10-02-2021",
year = "2021",
language = "English (US)",
series = "VISIGRAPP 2021 - Proceedings of the 16th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications",
publisher = "SciTePress",
pages = "101--112",
editor = "Farinella, {Giovanni Maria} and Petia Radeva and Jose Braz and Kadi Bouatouch",
booktitle = "VISAPP",
}