@online{850,
TITLE = {Coherent Multi-sentence Video Description with Variable Level of Detail},
AUTHOR = {Senina, Anna and Rohrbach, Marcus and Qiu, Wei and Friedrich, Annemarie and Amin, Sikandar and Andriluka, Mykhaylo and Pinkal, Manfred and Schiele, Bernt},
LANGUAGE = {eng},
URL = {http://arxiv.org/abs/1403.6173},
EPRINT = {1403.6173},
EPRINTTYPE = {arXiv},
YEAR = {2014},
ABSTRACT = {Humans can easily describe what they see in a coherent way and at varying level of detail. However, existing approaches for automatic video description are mainly focused on single sentence generation and produce descriptions at a fixed level of detail. In this paper, we address both of these limitations: for a variable level of detail we produce coherent multi-sentence descriptions of complex videos. We follow a two-step approach where we first learn to predict a semantic representation (SR) from video and then generate natural language descriptions from the SR. To produce consistent multi-sentence descriptions, we model across-sentence consistency at the level of the SR by enforcing a consistent topic. We also contribute both to the visual recognition of objects proposing a hand-centric approach as well as to the robust generation of sentences using a word lattice. Human judges rate our multi-sentence descriptions as more readable, correct, and relevant than related work. To understand the difference between more detailed and shorter descriptions, we collect and analyze a video description corpus of three levels of detail.},
}