@inproceedings{cd6245bcb34848418bd776900a0a48a9,
title = "One-step time-dependent future video frame prediction with a convolutional encoder-decoder neural network",
abstract = "There is an inherent need for autonomous cars, drones, and other robots to have a notion of how their environment behaves and to anticipate changes in the near future. In this work, we focus on anticipating future appearance given the current frame of a video. Existing work focuses on either predicting the future appearance as the next frame of a video, or predicting future motion as optical flow or motion trajectories starting from a single video frame. This work stretches the ability of CNNs (Convolutional Neural Networks) to predict an anticipation of appearance at an arbitrarily given future time, not necessarily the next video frame. We condition our predicted future appearance on a continuous time variable that allows us to anticipate future frames at a given temporal distance, directly from the input video frame. We show that CNNs can learn an intrinsic representation of typical appearance changes over time and successfully generate realistic predictions at a deliberate time difference in the near future.",
keywords = "Action forecasting, Appearance prediction, CNNs, Future video frame prediction, Generative models, Scene understanding",
author = "Vedran Vukotic and Silvia Pintea and Christian Raymond and Guillaume Gravier and {van Gemert}, Jan",
year = "2017",
doi = "10.1007/978-3-319-68560-1_13",
language = "English",
isbn = "978-3-319-68559-5",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer",
pages = "140--151",
editor = "S. Battiato and G. Gallo and R. Schettini and F. Stanco",
booktitle = "Image Analysis and Processing - ICIAP 2017",
edition = "Part 1",
note = "Image Analysis and Processing - ICIAP 2017 : 19th International Conference ; Conference date: 11-09-2017 Through 15-09-2017",
}