% Encoding: UTF-8
@COMMENT{BibTeX export based on data in FAU CRIS: https://cris.fau.de/}
@COMMENT{For any questions please write to cris-support@fau.de}
@inproceedings{faucris.107217264,
abstract = {In this paper we present a method to efficiently cull large parts of a scene prior to shadow map computations for many-lights settings. Our method is agnostic to how the light sources are generated and thus works with any method of light distribution. Our approach is based on previous work in culling for ray traversal to speed up area light sampling. Applied to shadow mapping our method works for high- and low-resolution shadow maps and, in contrast to previous work on many-lights rendering, does neither entail scene approximations nor imposes limits on light range, while still providing significant gains in performance. In contrast to standard culling methods shadow map rendering itself is sped up by a factor of 1.5 to 8.6 while the speedup of shadow map rendering, lookup and shading together ranges from 1.1 to 4.2.},
author = {Selgrad, Kai and Müller, Jonas and Reintges, Christian and Stamminger, Marc},
booktitle = {Eurographics Symposium on Rendering - Experimental Ideas & Implementations},
doi = {10.2312/sre.20161208},
faupublication = {yes},
isbn = {978-3-03868-019-2},
note = {UnivIS-Import:2017-01-09:Pub.2016.tech.IMMD.IMMD9.fastsh},
pages = {41-47},
peerreviewed = {Yes},
title = {{Fast} {Shadow} {Map} {Rendering} for {Many}-{Lights} {Settings}},
venue = {Dublin},
year = {2016}
}
@inproceedings{faucris.118788824,
abstract = {

We present a novel technique for rendering depth of field that addresses difficult overlap cases, such as close, but out-of-focus, geometry in the near-field. Such scene configurations are not managed well by state-of-the-art post-processing approaches since essential information is missing due to occlusion.

Our proposed algorithm renders the scene from a single camera position and computes a layered image using a single pass by constructing per-pixel lists. These lists can be filtered progressively to generate differently blurred representations of the scene. We show how this structure can be exploited to generate depth of field in real-time, even in complicated scene constellations.