@Book{ 4968,
title = {The Sense of Touch and its Rendering: Progress in Haptics Research},
year = {2008},
pages = {284},
web_url = {http://www.springerlink.com/content/978-3-540-79034-1},
publisher = {Springer},
address = {Berlin, Germany},
series = {Springer Tracts in Advanced Robotics ; 45},
state = {published},
ISBN = {978-3-540-79035-8},
DOI = {10.1007/978-3-540-79035-8},
author = {Bicchi A, Buss M, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Peer A}
}
@Article{ PariseKE2014,
title = {Natural auditory scene statistics shapes human spatial hearing},
journal = {Proceedings of the National Academy of Sciences of the United States of America},
year = {2014},
month = {4},
volume = {111},
number = {16},
pages = {6104–6108},
abstract = {Human perception, cognition, and action are laced with seemingly arbitrary mappings. In particular, sound has a strong spatial connotation: Sounds are high and low, melodies rise and fall, and pitch systematically biases perceived sound elevation. The origins of such mappings are unknown. Are they the result of physiological constraints, do they reflect natural environmental statistics, or are they truly arbitrary? We recorded natural sounds from the environment, analyzed the elevation-dependent filtering of the outer ear, and measured frequency-dependent biases in human sound localization. We find that auditory scene statistics reveals a clear mapping between frequency and elevation. Perhaps more interestingly, this natural statistical mapping is tightly mirrored in both ear-filtering properties and in perceived sound location. This suggests that both sound localization behavior and ear anatomy are fine-tuned to the statistics of natural auditory scenes, likely providing the basis for the spatial connotation of human hearing.},
web_url = {http://www.pnas.org/content/111/16/6104.full.pdf+html},
state = {published},
DOI = {10.1073/pnas.1322705111},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Knorre K and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ MayerDE2013,
title = {Duration perception in crossmodally-defined intervals},
journal = {Acta Psychologica},
year = {2014},
month = {3},
volume = {147},
pages = {2-9},
abstract = {How humans perform duration judgments with multisensory stimuli is an ongoing debate. Here, we investigated how sub-second duration judgments are achieved by asking participants to compare the duration of a continuous sound to the duration of an empty interval in which onset and offset were marked by signals of different modalities using all combinations of visual, auditory and tactile stimuli. The pattern of perceived durations across five stimulus durations (ranging from 100 ms to 900 ms) follows the Vierordt Law. Furthermore, intervals with a sound as onset (audio-visual, audio-tactile) are perceived longer than intervals with a sound as offset. No modality ordering effect is found for visualtactile intervals. To infer whether a single modality-independent or multiple modality-dependent time-keeping mechanisms exist we tested whether perceived duration follows a summative or a multiplicative distortion pattern by fitting a model to all modality combinations and durations. The results confirm that perceived duration depends on sensory latency (summative distortion). Instead, we did not find evidence for multiplicative distortions. The results of the model and the behavioural data support the concept of a single time-keeping mechanism that allows for judgments of durations marked by multisensory stimuli.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0001691813001613},
state = {published},
DOI = {10.1016/j.actpsy.2013.07.009},
author = {Mayer KM{kama}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ HartcherO039BrienDE2014,
title = {The Duration of Uncertain Times: Audiovisual Information about Intervals Is Integrated in a Statistically Optimal Fashion},
journal = {PLoS ONE},
year = {2014},
month = {3},
volume = {9},
number = {3},
pages = {1-8},
abstract = {Often multisensory information is integrated in a statistically optimal fashion where each sensory source is weighted according to its precision. This integration scheme isstatistically optimal because it theoretically results in unbiased perceptual estimates with the highest precisionpossible.There is a current lack of consensus about how the nervous system processes multiple sensory cues to elapsed time.In order to shed light upon this, we adopt a computational approach to pinpoint the integration strategy underlying duration estimationof audio/visual stimuli. One of the assumptions of our computational approach is that the multisensory signals redundantly specify the same stimulus property. Our results clearly show that despite claims to the contrary, perceived duration is the result of an optimal weighting process, similar to that adopted for estimates of space. That is, participants weight the audio and visual information to arrive at the most precise, single duration estimate possible. The work also disentangles how different integration strategies – i.e. consideringthe time of onset/offset ofsignals - might alter the final estimate. As such we provide the first concrete evidence of an optimal integration strategy in human duration estimates.},
web_url = {http://www.plosone.org/article/fetchObject.action;jsessionid=622211C1AF144AE639C6007134B76D65?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0089339&representation=PDF},
state = {published},
DOI = {e89339},
EPUB = {e89339},
author = {Hartcher-O'Brien J{jhartcher}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ RohdeWKE2013,
title = {The Human Touch: Skin Temperature during the Rubber Hand Illusion in Manual and Automated Stroking Procedures},
journal = {PLoS ONE},
year = {2013},
month = {11},
volume = {8},
number = {11},
pages = {1-8},
abstract = {A difference in skin temperature between the hands has been identified as a physiological correlate of the rubber hand illusion (RHI). The RHI is an illusion of body ownership, where participants perceive body ownership over a rubber hand if they see it being stroked in synchrony with their own occluded hand. The current study set out to replicate this result, i.e., psychologically induced cooling of the stimulated hand using an automated stroking paradigm, where stimulation was delivered by a robot arm (PHANToMTM force-feedback device). After we found no evidence for hand cooling in two experiments using this automated procedure, we reverted to a manual stroking paradigm, which is closer to the one employed in the study that first produced this effect. With this procedure, we observed a relative cooling of the stimulated hand in both the experimental and the control condition. The subjective experience of ownership, as rated by the participants, by contrast, was strictly linked to synchronous stroking in all three experiments. This implies that hand-cooling is not a strict correlate of the subjective feeling of hand ownership in the RHI. Factors associated with the differences between the two designs (differences in pressure of tactile stimulation, presence of another person) that were thus far considered irrelevant to the RHI appear to play a role in bringing about this temperature effect.},
web_url = {http://www.plosone.org/article/fetchObject.action;jsessionid=C0BFE6044490D27BA7C63E92C7D4A954?uri=info%3Adoi%2F10.1371%2Fjournal.pone.0080688&representation=PDF},
state = {published},
DOI = {10.1371/journal.pone.0080688},
EPUB = {e80688},
author = {Rohde M{marohde}{Research Group Multisensory Perception and Action}, Wold A{wold}{Research Group Multisensory Perception and Action}, Karnath H-O and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ PariseHE2013,
title = {Cross-correlation between Auditory and Visual Signals Promotes Multisensory Integration},
journal = {Multisensory Research},
year = {2013},
month = {7},
volume = {26},
number = {3},
pages = {307–316},
abstract = {Humans are equipped with multiple sensory channels that provide both redundant and complementary information about the objects and events in the world around them. A primary challenge for the brain is therefore to solve the ‘correspondence problem’, that is, to bind those signals that likely originate from the same environmental source, while keeping separate those unisensory inputs that likely belong to different objects/events. Whether multiple signals have a common origin or not must, however, be inferred from the signals themselves through a causal inference process. Recent studies have demonstrated that cross-correlation, that is, the similarity in temporal structure between unimodal signals, represents a powerful cue for solving the correspondence problem in humans. Here we provide further evidence for the role of the temporal correlation between auditory and visual signals in multisensory integration. Capitalizing on the well-known fact that sensitivity to crossmodal conflict is inversely related to the strength of coupling between the signals, we measured sensitivity to crossmodal spatial conflicts as a function of the cross-correlation between the temporal structures of the audiovisual signals. Observers’ performance was systematically modulated by the cross-correlation, with lower sensitivity to crossmodal conflict being measured for correlated as compared to uncorrelated audiovisual signals. These results therefore provide support for the claim that cross-correlation promotes multisensory integration. A Bayesian framework is proposed to interpret the present results, whereby stimulus correlation is represented on the prior distribution of expected crossmodal co-occurrence.},
web_url = {http://booksandjournals.brillonline.com/content/10.1163/22134808-00002417},
state = {published},
DOI = {10.1163/22134808-00002417},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Harrar V, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Spence C}
}
@Article{ vanDamHE2013,
title = {Switching between visuomotor mappings: Learning absolute mappings or relative shifts},
journal = {Journal of Vision},
year = {2013},
month = {2},
volume = {13},
number = {2:26},
pages = {1-12},
abstract = {Adaptation to specific visuomotor mappings becomes faster when switching back and forth between them. What is learned when repeatedly switching between the visuomotor mappings: the absolute mappings or the relative shift between the mappings? To test this, we trained participants in a rapid pointing task using a unique color cue as context for each mapping between pointing location and visual feedback. After extensive training, participants adapted to a new mapping using a neutral contextual cue. For catch trials (a change in cue and no visual feedback) different adaptation performances are predicted depending on how the mappings are encoded. When encoding an absolute mapping for each cue, participants would fall back to the mapping associated with the cue irrespective of the state they are currently in. In contrast, when a shift in mapping is encoded for the cue, pointing performance will shift relative to the current mapping by an amount equal to the difference between the previously learned mappings. Results indicate that the contextual cues signal absolute visuomotor mappings rather than relative shifts between mappings.},
web_url = {http://www.journalofvision.org/content/13/2/26.full.pdf+html},
state = {published},
DOI = {10.1167/13.2.26},
author = {van Dam LCJ{vandam}{Research Group Multisensory Perception and Action}, Hawellek DJ{dhawid}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ HelbigERPTMSN2011,
title = {The neural mechanisms of reliability weighted integration of shape information from vision and touch},
journal = {NeuroImage},
year = {2012},
month = {4},
volume = {60},
number = {2},
pages = {1063–1072},
abstract = {Behaviourally, humans have been shown to integrate multisensory information in a statistically-optimal fashion by averaging the individual unisensory estimates according to their relative reliabilities. This form of integration is optimal in that it yields the most reliable (i.e. least variable) multisensory percept. The present study investigates the neural mechanisms underlying integration of visual and tactile shape information at the macroscopic scale of the regional BOLD response. Observers discriminated the shapes of ellipses that were presented bimodally (visual-tactile) or visually alone. A 2×5 factorial design manipulated (i) the presence vs. absence of tactile shape information and (ii) the reliability of the visual shape information (five levels). We then investigated whether regional activations underlying tactile shape discrimination depended on the reliability of visual shape. Indeed, in primary somatosensory cortices (bilateral BA2) and the superior parietal lobe the responses to tactile shape input were increased when the reliability of visual shape information was reduced. Conversely, tactile inputs suppressed visual activations in the right posterior fusiform, when the visual signal was blurred and unreliable. Somatosensory and visual cortices may sustain integration of visual and tactile shape information either via direct connections from visual areas or top-down effects from higher order parietal areas.},
web_url = {http://www.sciencedirect.com/science/article/pii/S1053811911011475},
state = {published},
DOI = {10.1016/j.neuroimage.2011.09.072},
author = {Helbig HB{helbig}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Research Group Multisensory Perception and Action}, Ricciardi E{ricciardi}, Pietrini P, Thielscher A{thielscher}{Department High-Field Magnetic Resonance}, Mayer KM{kama}{Research Group Multisensory Perception and Action}, Schultz J{johannes} and Noppeney U{unoppe}{Research Group Cognitive Neuroimaging}}
}
@Article{ MachullaDFE2011,
title = {Multisensory simultaneity recalibration: storage of the aftereffect in the absence of counterevidence},
journal = {Experimental Brain Research},
year = {2012},
month = {3},
volume = {217},
number = {1},
pages = {89-97},
abstract = {Recent studies show that repeated exposure to an asynchrony between auditory and visual stimuli shifts the point of subjective simultaneity. Usually, the measurement stimuli used to assess this aftereffect are interleaved with short re-exposures to the asynchrony. In a first experiment, we show that the aftereffect declines during measurement in spite of the use of re-exposures. In a second experiment, we investigate whether the observed decline is either due to a dissipation of the aftereffect with the passage of time, or the result of using measurement stimuli with a distribution of asynchronies different from the exposure stimulus. To this end, we introduced a delay before measuring the aftereffects and we compared the magnitude of the aftereffect with and without delay. We find that the aftereffect does not dissipate during the delay but instead is stored until new sensory information in the form of measurement stimuli is presented as counterevidence (i.e., stimuli with an asynchrony that differs from the one used during exposure).},
web_url = {http://www.springerlink.com/content/u7337tv890047w92/fulltext.pdf},
state = {published},
DOI = {10.1007/s00221-011-2976-5},
author = {Machulla TK{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Fr\"ohlich E{efroehl}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ PariseSE2012,
title = {When Correlation Implies Causation in Multisensory Integration},
journal = {Current Biology},
year = {2012},
month = {1},
volume = {22},
number = {1},
pages = {46-49},
abstract = {Inferring which signals have a common underlying cause, and hence should be integrated, represents a primary challenge for a perceptual system dealing with multiple sensory inputs [ [1], [2] and [3]]. This challenge is often referred to as the correspondence problem or causal inference. Previous research has demonstrated that spatiotemporal cues, along with prior knowledge, are exploited by the human brain to solve this problem [ [4], [5], [6], [7], [8] and [9]]. Here we explore the role of correlation between the fine temporal structure of auditory and visual signals in causal inference. Specifically, we investigated whether correlated signals are inferred to originate from the same distal event and hence are integrated optimally [10]. In a localization task with visual, auditory, and combined audiovisual targets, the improvement in precision for combined relative to unimodal targets was statistically optimal only when audiovisual signals were correlated. This result demonstrates that humans use the similarity in the temporal structure of multisensory signals to solve the correspondence problem, hence inferring causation from correlation.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0960982211013212},
state = {published},
DOI = {10.1016/j.cub.2011.11.039},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Spence C and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ SoumanRSFTUDBE2011,
title = {CyberWalk: Enabling unconstrained omnidirectional walking through virtual environments},
journal = {ACM Transactions on Applied Perception},
year = {2011},
month = {11},
volume = {8},
number = {4:25},
pages = {1-22},
abstract = {Despite many recent developments in Virtual Reality, an effective locomotion interface which allows for normal walking through large virtual environments was still lacking until recently. Here, we describe the new CyberWalk omnidirectional treadmill system, which makes it possible for users to walk endlessly in any direction, while never leaving the confines of the limited walking surface. The treadmill system improves on previous designs, both in its mechanical features and in the control system employed to keep users close to the centre of the treadmill. As a result, users are able to start walking, vary their walking speed and direction, and stop walking like they would on a normal, stationary surface. The treadmill system was validated in two experiments, in which both the walking behaviour and the performance in a basic spatial updating task were compared to that during normal overground walking. The results suggest that walking on the CyberWalk treadmill is very close to normal walking, especially after some initial familiarization. Moreover, we did not find a detrimental effect of treadmill walking in the spatial updating task. The CyberWalk system constitutes a significant step forward to bringing the real world into the laboratory or workplace.},
web_url = {http://dl.acm.org/citation.cfm?id=2043607&dl=ACM&coll=DL&CFID=62552168&CFTOKEN=60220994},
state = {published},
DOI = {10.1145/2043603.2043607},
author = {Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}, Schwaiger M, Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Th\"ummel T, Ulbrich H, De Luca A, B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ FrissenCSE2011,
title = {Integration of vestibular and proprioceptive signals for spatial updating},
journal = {Experimental Brain Research},
year = {2011},
month = {7},
volume = {212},
number = {2},
pages = {163-176},
abstract = {Spatial updating during self-motion typically involves the appropriate integration of both visual and non-visual cues, including vestibular and proprioceptive information. Here, we investigated how human observers combine these two non-visual cues during full-stride curvilinear walking. To obtain a continuous, real-time estimate of perceived position, observers were asked to continuously point toward a previously viewed target in the absence of vision. They did so while moving on a large circular treadmill under various movement conditions. Two conditions were designed to evaluate spatial updating when information was largely limited to either proprioceptive information (walking in place) or vestibular information (passive movement). A third condition evaluated updating when both sources of information were available (walking through space) and were either congruent or in conflict. During both the passive movement condition and while walking through space, the pattern of pointing behavior demonstrated evidence of accurate egocentric updating. In contrast, when walking in place, perceived self-motion was underestimated and participants always adjusted the pointer at a constant rate, irrespective of changes in the rate at which the participant moved relative to the target. The results are discussed in relation to the maximum likelihood estimation model of sensory integration. They show that when the two cues were congruent, estimates were combined, such that the variance of the adjustments was generally reduced. Results also suggest that when conflicts were introduced between the vestibular and proprioceptive cues, spatial updating was based on a weighted average of the two inputs.},
web_url = {http://www.springerlink.com/content/cgju26276732uln0/fulltext.pdf},
state = {published},
DOI = {10.1007/s00221-011-2717-9},
author = {Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Campos JL{camposjl}{Department Human Perception, Cognition and Action}, Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 5923,
title = {Effects of visual–haptic asynchronies and loading–unloading movements on compliance perception},
journal = {Brain Research Bulletin},
year = {2011},
month = {6},
volume = {85},
number = {5},
pages = {245-259},
abstract = {Spring compliance is perceived by combining the sensed force exerted by the spring with the displacement caused by the action (sensed through vision and proprioception). We investigated the effect of delay of visual and force information with respect to proprioception to understand how visual–haptic perception of compliance is achieved. First, we confirm an earlier result that force delay increases perceived compliance. Furthermore, we find that perceived compliance decreases with a delay in the visual information. These effects of delay on perceived compliance would not be present if the perceptual system would utilize all force–displacement information available during the interaction. Both delays generate a bias in compliance which is opposite in the loading and unloading phases of the interaction. To explain these findings, we propose that information during the loading phase of the spring displacement is weighted more than information obtained during unloading. We confirm this hypothesis by showing that sensitivity to compliance during loading movements is much higher than during unloading movements. Moreover, we show that visual and proprioceptive information about the hand position are used for compliance perception depending on the sensitivity to compliance. Finally, by analyzing participants’ movements we show that these two factors (loading/unloading and reliability) account for the change in perceived compliance due to visual and force delays.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6SYT-4YH4PW9-1-2&_cdi=4843&_user=29041&_pii=S0361923010000535&_origin=&_coverDate=06%2F30%2F2011&_sk=999149994&view=c&wchp=dGLbVzW-zSkWz&md5=9431a6ab71ceec50cfd48eed7c02f605&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.brainresbull.2010.02.009},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Kn\"orlein B, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Harders M}
}
@Article{ 5822,
title = {Tactile suppression of displacement},
journal = {Experimental Brain Research},
year = {2010},
month = {10},
volume = {206},
number = {3},
pages = {299-310},
abstract = {In vision, the discovery of the phenomenon of saccadic suppression of displacement has made
important contributions to the understanding of Helmholtz stable world problem. Here we report a
similar phenomenon in the tactile modality. When scanning a single Braille dot with two fingers of the
same hand, subjects were asked to decide whether the dot was stationary or whether it jumped from one
location to another. The stimulus was created using refreshable Braille devices which have dots that
can be swiftly raised and recessed. In some conditions, the dot jumped from one location to another by
amounts of 2.5 and 5 mm. By monitoring the subjects finger position we could ensure that the jumps,
if any, occurred when the dot was not touched by either finger. In some other conditions the dot did not
move. We found that in certain conditions, jumping dots were felt to be stationary. If the jump was
orthogonal to the finger movements, tactile suppression of displacement occurred effectively when the
jump was of 2.5 m but when the jump was 5 mm, subject easily detected it. If the jump was lateral, the
suppression effect occurred as well but less often when the artificial movement of the dot opposed the
movement of the finger. In such cases, the stimulus appeared sooner than when the brain could predict
it from finger movement, supporting a predictive rather than a postdictive differential processing
hypothesis.},
file_url = {/fileadmin/user_upload/files/publications/Ziat_etal_EBR_2010_5822[0].pdf},
web_url = {http://www.springerlink.com/content/pv8t307120rwn768/fulltext.pdf},
state = {published},
DOI = {10.1007/s00221-010-2407-z},
author = {Ziat M, Hayward V, Chapman EC, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Lenay C}
}
@Article{ 6723,
title = {Humans do not have direct access to retinal flow during walking},
journal = {Journal of Vision},
year = {2010},
month = {9},
volume = {10},
number = {11:14},
pages = {1-12},
abstract = {Perceived visual speed has been reported to be reduced during walking. This reduction has been attributed to a partial subtraction of walking speed from visual speed (F. H. Durgin & K. Gigone, 2007; F. H. Durgin, K. Gigone, & R. Scott, 2005). We tested whether observers still have access to the retinal flow before subtraction takes place. Observers performed a 2IFC visual speed discrimination task while walking on a treadmill. In one condition, walking speed was identical in the two intervals, while in a second condition walking speed differed between intervals. If observers have access to the retinal flow before subtraction, any changes in walking speed across intervals should not affect their ability to discriminate retinal flow speed. Contrary to this direct access hypothesis, we found that observers were worse at discrimination when walking speed differed between intervals. The results therefore suggest that observers do not have access to retinal flow before subtraction. We also found that the amount of subtraction depended on the visual speed presented, suggesting that the interaction between the processing of visual input and of self-motion is more complex than previously proposed.},
web_url = {http://www.journalofvision.org/content/10/11/14.full.pdf+html},
state = {published},
DOI = {10.1167/10.11.14},
author = {Souman JL{souman}{Research Group Multisensory Perception and Action}, Freeman TCA{freemant}, Eikmeier V{eikmeier}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 6500,
title = {Learning to use an invisible visual signal for perception},
journal = {Current Biology},
year = {2010},
month = {9},
volume = {20},
number = {20},
pages = {1860-1863},
abstract = {How does the brain construct a percept from sensory signals? One approach to this fundamental question is to investigate perceptual learning as induced by exposure to
statistical regularities in sensory signals [17]. Recent studies showed that exposure to novel correlations between sensory signals can cause a signal to have new perceptual effects [2, 3]. In those studies, however, the signals were clearly visible. The automaticity of the learning was therefore difficult to determine. Here we investigate whether learning of this sort, which causes new effects on appearance, can be low level and automatic by employing a visual signal whose perceptual consequences were made invisiblea vertical disparity gradient masked by other depth cues. This approach excluded high-level influences such as attention or consciousness. Our stimulus for probing perceptual appearance was a rotating cylinder. During exposure, we introduced a new contingency between the invisible signal and the rotation direction of the cylinder. When subsequently presenting an ambiguously rotating version of the cylinder, we found that the invisible signal influenced the perceived rotation direction. This de
monstrates that perception can rapidly undergo structure learning by automatically picking up novel contingencies between sensory signals, thus automatically recruiting signals for novel uses during the construction of a percept.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRT-51618H5-7-G&_cdi=6243&_user=29041&_pii=S0960982210011656&_origin=search&_coverDate=10%2F26%2F2010&_sk=999799979&view=c&wchp=dGLbVzW-zSkWA&md5=ade64c780791a37d8086effaeda3b349&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.cub.2010.09.047},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Backus B{backus}{Research Group Multisensory Perception and Action}}
}
@Article{ 6737,
title = {Decisions Made Better},
journal = {Science},
year = {2010},
month = {8},
volume = {329},
number = {5995},
pages = {1022-1023},
abstract = {Under certain circumstances, joint decisions of a group can be better than those of the individuals.},
file_url = {/fileadmin/user_upload/files/publications/Ernst_Science%20%202010_6737[0].pdf},
web_url = {http://www.sciencemag.org/cgi/reprint/329/5995/1022.pdf},
state = {published},
DOI = {10.1126/science.1194920},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 6485,
title = {Preexposure disrupts learning of location-contingent perceptual biases for ambiguous stimuli},
journal = {Journal of Vision},
year = {2010},
month = {7},
volume = {10},
number = {8:15},
pages = {1-17},
abstract = {The perception of a bistable stimulus as one or the other interpretation can be biased by prior presentations of that stimulus. Such learning effects have been found to be long lasting even after small amounts of training. The effectiveness of training may be influenced by preexposure to the ambiguous stimulus. Here we investigate the role of preexposure for learning a position-dependent perceptual bias. We used rotating Necker Cubes as the bistable stimuli, which were presented at two locations: above or below fixation. On training trials, additional depth cues disambiguated the rotation direction contingent on the location. On test trials, the rotating cube was presented without disambiguation cues. Without preexposure to the ambiguous stimulus, subjects learned to perceive the cube to be rotating in the trained direction for both locations. However, subjects that were preexposed to the ambiguous stimulus did not learn the trained perceptlocation contingency, even though the preexposure was very short com
pared to the subsequent training. Preexposure to the disambiguated stimulus did not interfere with learning. This indicates a fundamental difference between ambiguous test and disambiguated training trials for learning a perceptual bias. In short, small variations in paradigm can have huge effects for the learning of perceptual biases for ambiguous stimuli.},
web_url = {http://www.journalofvision.org/content/10/8/15},
state = {published},
DOI = {10.1167/10.8.15},
author = {van Dam LCJ{vandam}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 6368,
title = {Eye Movements: Illusions in Slow Motion},
journal = {Current Biology},
year = {2010},
month = {4},
volume = {20},
number = {8},
pages = {R357-R359},
abstract = {A recent study has shown that a range of different motion illusions occurring during smooth pursuit eye-movements (e.g., the Aubert-Fleischl Phenomenon, Filehne Illusion) can be explained as opti-mal percepts based on a simple model derived from the Bayesian framework.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRT-4YY8KWD-H-1&_cdi=6243&_user=29041&_pii=S0960982210002903&_origin=search&_coverDate=04%2F27%2F2010&_sk=999799991&view=c&wchp=dGLzVlb-zSkWb&md5=7ea11c767d05d138419ca25648131fc7&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.cub.2010.03.009},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5872,
title = {Visually Guided Haptic Search},
journal = {IEEE Transactions on Haptics},
year = {2010},
month = {3},
volume = {3},
number = {1},
pages = {63-72},
abstract = {In this study we investigate the influence of visual feedback on haptic exploration. A haptic search task was designed in which subjects had to haptically explore a virtual display using a force feedback device and to determine whether a target was present among distractor items. While the target was recognizable only haptically, visual feedback of finger position or possible target positions could be given. Our results show that subjects could use visual feedback on possible target positions even in the absence on feedback on finger position. When there was no feedback on possible target locations, subjects scanned the whole display systematically. When feedback on finger position was present, subjects could make well-directed movements back to areas of interest. This was not the case without feedback on finger position, indicating that showing finger position helps forming a spatial representation of the display. In addition, we show that response time models of visual serial search do not generally apply f
or haptic serial search. Consequently, in tele-operation systems, for instance, it is helpful to show the position of the probe even if visual information on the scene is poor.},
web_url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5235142&abstractAccess=no&userType=},
state = {published},
DOI = {10.1109/TOH.2009.43},
author = {Plaisier MA{plaisier}{Research Group Multisensory Perception and Action}, Kappers AML, Tiest WMB and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5774,
title = {Within- and cross-modal distance information disambiguates visual size perception},
journal = {PLoS Computational Biology},
year = {2010},
month = {3},
volume = {6},
number = {3},
pages = {1-10},
abstract = {Perception is fundamentally underconstrained because different combinations of object properties can generate the same sensory information. To disambiguate sensory information into estimates of scene properties, our brains incorporate prior knowledge and additional “auxiliary” (i.e., not directly relevant to desired scene property) sensory information to constrain perceptual interpretations. For example, knowing the distance to an object helps in perceiving its size. The literature contains few demonstrations of the use of prior knowledge and auxiliary information in combined visual and haptic disambiguation and almost no examination of haptic disambiguation of vision beyond “bistable” stimuli. Previous studies have reported humans integrate multiple unambiguous sensations to perceive single, continuous object properties, like size or position. Here we test whether humans use visual and haptic information, individually and jointly, to disambiguate size from distance. We presented participants with a ball moving in depth with a changing diameter. Because no unambiguous distance information is available under monocular viewing, participants rely on prior assumptions about the ball's distance to disambiguate their -size percept. Presenting auxiliary binocular and/or haptic distance information augments participants' prior distance assumptions and improves their size judgment accuracy—though binocular cues were trusted more than haptic. Our results suggest both visual and haptic distance information disambiguate size perception, and we interpret these results in the context of probabilistic perceptual reasoning.},
web_url = {http://www.ploscompbiol.org/article/info%3Adoi%2F10.1371%2Fjournal.pcbi.1000697},
state = {published},
DOI = {10.1371/journal.pcbi.1000697},
EPUB = {e1000697},
author = {Battaglia PW{batt0086}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Research Group Multisensory Perception and Action}, Schrater PR, Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Kersten D{kersten}}
}
@Article{ 4902,
title = {Making virtual walking real: perceptual evaluation of a
new treadmill control algorithm},
journal = {Transactions on Applied Perception},
year = {2010},
month = {2},
volume = {7},
number = {2:11},
pages = {1-14},
abstract = {For us humans, walking is our most natural way of moving through the world. One of the major challenges in present research on navigation in virtual reality is to enable users to physically walk through virtual environments. Although treadmills, in principle, allow users to walk for extended periods of time through large virtual environments, existing setups largely fail to produce a truly immersive sense of navigation. Partially, this is because of inadequate control of treadmill speed as a function of walking behavior. Here, we present a new control algorithm that allows users to walk naturally on a treadmill, including starting to walk from standstill, stopping, and varying walking speed. The treadmill speed control consists of a feedback loop based on the measured user position relative to a given reference position, plus a feed-forward term based on online estimation of the user's walking velocity. The purpose of this design is to make the treadmill compensate fully for any persistent walker motion, while keeping the accelerations exerted on the user as low as possible.
We evaluated the performance of the algorithm by conducting a behavioral experiment in which we varied its most important parameters. Participants walked at normal walking speed and then, on an auditory cue, abruptly stopped. After being brought back to the center of the treadmill by the control algorithm, they rated how smoothly the treadmill had changed its velocity in response to the change in walking speed. Ratings, in general, were quite high, indicating good control performance. Moreover, ratings clearly depended on the control algorithm parameters that were varied. Ratings were especially affected by the way the treadmill reversed its direction of motion. In conclusion, controlling treadmill speed in such a way that changes in treadmill speed are unobtrusive and do not disturb VR immersiveness is feasible on a normal treadmill with a straightforward control algorithm.},
file_url = {/fileadmin/user_upload/files/publications/ACM_TAP_2010_4902[0].pdf},
web_url = {http://portal.acm.org/citation.cfm?id=1670671.1670675&coll=portal&dl=GUIDE&idx=J932&part=transaction&WantType=Transactions&title=ACM%20Transactions%20on%20Applied%20Perception%20(TAP)&CFID=90896224&CFTOKEN=54062865},
state = {published},
DOI = {10.1145/1670671.1670675},
author = {Souman JL{souman}{Research Group Multisensory Perception and Action}, Robuffo Giordano P{robu_pa}, Frissen I{ifrissen}{Research Group Multisensory Perception and Action}, Luca AD and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5870,
title = {Recalibration of multisensory simultaneity: Cross-modal transfer coincides with a change in perceptual latency},
journal = {Journal of Vision},
year = {2009},
month = {12},
volume = {9},
number = {12:7},
pages = {1-16},
abstract = {After exposure to asynchronous sound and light stimuli, perceived audio-visual synchrony changes to compensate for the
asynchrony. Here we investigate to what extent this audio-visual recalibration effect transfers to visual-tactile and audiotactile
simultaneity perception in order to infer the mechanisms responsible for temporal recalibration. Results indicate that
audio-visual recalibration of simultaneity can transfer to audio-tactile and visual-tactile stimuli depending on the way in
which the multisensory stimuli are presented. With presentation of co-located multisensory stimuli, we found a change in
the perceptual latency of the visual stimuli. Presenting auditory stimuli through headphones, on the other hand, induced a
change in the perceptual latency of the auditory stimuli. We argue that the difference in transfer depends on the relative
trust in the auditory and visual estimates. Interestingly, these findings were confirmed by showing that audio-visual
recalibration influences simple reaction time to visual and auditory stimuli. Presenting co-located stimuli during
asynchronous exposure induced a change in reaction time to visual stimuli, while with headphones the change in reaction
time occurred for the auditory stimuli. These results indicate that the perceptual latency is altered with repeated exposure
to asynchronous audio-visual stimuli in order to compensate (at least in part) for the presented asynchrony.},
web_url = {http://journalofvision.org/9/12/7/DiLuca-2009-jov-9-12-7.pdf},
state = {published},
DOI = {10.1167/9.12.7},
author = {Di Luca M{max}{Research Group Multisensory Perception and Action}, Machulla T-K{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5773,
title = {Walking Straight into Circles},
journal = {Current Biology},
year = {2009},
month = {9},
volume = {19},
number = {18},
pages = {1538-1542},
abstract = {Common belief has it that people who get lost in unfamiliar terrain often end up walking in circles. Although uncorroborated by empirical data, this belief has widely permeated popular culture. Here, we tested the ability of humans to walk on a straight course through unfamiliar terrain in two different environments: a large forest area and the Sahara desert. Walking trajectories of several hours were captured via global positioning system, showing that participants repeatedly walked in circles when they could not see the sun. Conversely, when the sun was visible, participants sometimes veered from a straight course but did not walk in circles. We tested various explanations for this walking behavior by assessing the ability of people to maintain a fixed course while blindfolded. Under these conditions, participants walked in often surprisingly small circles (diameter < 20 m), though rarely in a systematic direction. These results rule out a general explanation in terms of biomechanical
asym
metries or other general biases [1], [2], [3], [4], [5] and [6]. Instead, they suggest that veering from a straight course is the result of accumulating noise in the sensorimotor system, which, without an external directional reference to recalibrate the subjective straight ahead, may cause people to walk in circles.},
web_url = {http://www.sciencedirect.com/science?_ob=PdfDownloadURL&_uoikey=B6VRT-4X1YFNH-4&_tockey=%23toc%236243%239999%23999999999%2399999%23FLA%23&_orig=search&_acct=C000003178&_version=1&_userid=29041&md5=b5},
state = {published},
DOI = {10.1016/j.cub.2009.07.053},
author = {Souman JL{souman}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Research Group Multisensory Perception and Action}, Sreenivasa MN{manu}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5577,
title = {Perceptual Learning: Inverting the SizeWeight Illusion},
journal = {Current Biology},
year = {2009},
month = {1},
volume = {19},
number = {1},
pages = {R23-R25},
abstract = {When one lifts two objects of equal weight and appearance but different size, the smaller object usually feels heavier. New results show that this sizeweight illusion can be inverted after extensive training with objects in which the natural sizeweight relationship is artificially reversed.},
file_url = {/fileadmin/user_upload/files/publications/Ernst_CB_dispatch_5577[0].pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRT-4VC1WB9-F-1&_cdi=6243&_user=29041&_pii=S0960982208014085&_orig=search&_coverDate=01%2F13%2F2009&_sk=999809998&view=c&wchp=dGLzVlz-zSkzV&md5=e11cd5cc73e2eb5bd6c449cd4ac427d4&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.cub.2008.10.039},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 4814,
title = {Walking along curved paths of different angles: the relationship between head and trunk turning},
journal = {Experimental Brain Research},
year = {2008},
month = {10},
volume = {191},
number = {3},
pages = {313-320},
abstract = {Walking along a curved path requires coordinated motor actions of the entire body. Here, we investigate the relationship between head and trunk movements during walking. Previous studies have found that the head systematically turns into turns before the trunk does. This has been found to occur at a constant distance rather than at a constant time before a turn. We tested whether this anticipatory head behavior is spatially invariant for turns of different angles. Head and trunk positions and orientations were measured while participants walked around obstacles in 45°, 90°, 135° or 180° turns. The radius of the turns was either imposed or left free. We found that the head started to turn into the direction of the turn at a constant distance before the obstacle (~1.1 m) for turn angles up to 135°. During turns, the head was consistently oriented more into the direction of the turn than the trunk. This difference increased for larger turning angles and reached its maximum later in the turn for larger turns
. Walking speeds decreased monotonically for increasing turn angles. Imposing fixed turn radii only affected the point at which the trunk started to turn into a turn. Our results support the view that anticipatory head movements during turns occur in order to gather advance visual information about the trajectory and potential obstacles.},
web_url = {http://www.springerlink.com/content/u662315377204m32/fulltext.pdf},
state = {published},
DOI = {10.1007/s00221-008-1525-3},
author = {Sreenivasa MN{manu}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Research Group Multisensory Perception and Action}, Souman JL{souman}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5018,
title = {Material Properties Determine How Force and Position Signals Combine in Haptic Shape Perception},
journal = {Acta Psychologica},
year = {2008},
month = {6},
volume = {128},
number = {2},
pages = {264-273},
abstract = {When integrating estimates from redundant sensory signals, humans seem to weight these estimates according to their reliabilities. In the present study, human observers used active touch to judge the curvature of a shape. The curvature was specified by positional and force signals: When a finger slides across a surface, the fingers position follows the surface geometry (position signal). At the same time it is exposed to patterns of forces depending on the gradient of the surface (force signal; Robles-de-la Torre & Hayward, 2001). We show that variations in the surfaces material properties (compliance, friction) influence the sensorily available position and force signals, as well as the the noise associated with these signals. Along with this, material properties affect the weights given to the position and force signals for curvature judgements. Our findings are consistent with the notion of an observer who weights signal estimates according to their reliabilities. That is, signal
wei
ghts shifted with the signal noise, which in the present case resulted from active exploration.},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6V5T-4S4BDJX-1-9&_cdi=5795&_user=29041&_orig=search&_coverDate=06%2F30%2F2008&_sk=998719997&view=c&wchp=dGLbVzz-zSkzV&md5=6a11255057cdc19a1fdb3073e9f68902&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.actpsy.2008.02.002},
author = {Drewing K{kdrewing}, Wiecki TV{wiecki} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 5166,
title = {Multisensory Integration: A Late Bloomer},
journal = {Current Biology},
year = {2008},
month = {6},
volume = {18},
number = {12},
pages = {R519-R521},
abstract = {Under many circumstances, human adults integrate information from the different sensory modalities in a statistically optimal fashion. New results suggest that optimal multisensory integration only develops in middle childhood.},
file_url = {/fileadmin/user_upload/files/publications/Ernst_CB_dispatch_5166[0].pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VRT-4STRYB5-K-1&_cdi=6243&_user=29041&_orig=search&_coverDate=06%2F24%2F2008&_sk=999819987&view=c&wchp=dGLbVtb-zSkWb&md5=ec89be41fb927e3f00bf8008b3995221&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.cub.2008.05.002},
author = {Ernst M{marc}{Research Group Multisensory Perception and Action}}
}
@Article{ 4965,
title = {The statistical determinants of adaptation rate in human reaching},
journal = {Journal of Vision},
year = {2008},
month = {4},
volume = {8},
number = {4:20},
pages = {1-19},
abstract = {Rapid reaching to a target is generally accurate, but also contains random and systematic error. Random errors result from noise in visual measurement, motor planning, and reach execution. Systematic error results from systematic changes in the mapping between the visual estimate of target location and the motor command necessary to reach the target (e.g. new spectacles, muscular fatigue). Humans maintain accurate reaching by recalibrating the visuomotor system, but no widely accepted computational model of the process exists. Given certain boundary conditions, a statistically optimal solution is a Kalman filter. We compared human to Kalman-filter behavior to determine how humans take into account the statistical properties of errors and the reliability with which those errors can be measured. For most conditions, human and Kalman-filter behavior was similar: Increasing measurement uncertainty caused similar decreases in recalibration rate; directionally asymmetric uncertainty caused different rates in differ
ent directions; more variation in systematic error increased recalibration rate. However, behavior differed in one respect: Inserting random error by perturbing feedback position causes slower adaptation in Kalman filters, but had no effect in humans. This difference may be due to how biological systems remain responsive to changes in environmental statistics. We discuss the implications of this work.},
web_url = {http://journalofvision.org/8/4/20/},
state = {published},
DOI = {10.1167/8.4.20},
author = {Burge J{jburge}, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Banks MS{martybanks}}
}
@Article{ 4631,
title = {Trimodal integration of visual, tactile and auditory signals for the perception of sequences of events},
journal = {Brain Research Bulletin},
year = {2008},
month = {2},
volume = {75},
number = {6},
pages = {753-760},
abstract = {We investigated the interactions between visual, tactile and auditory sensory signals for the perception of sequences of events. Sequences of
flashes, taps and beeps were presented simultaneously. For each session, subjects were instructed to count the number of events presented in one
modality (Target) and to ignore the stimuli presented in the other modalities (Background). The number of events presented in the background
sequence could differ from the number of events in the target sequence. For each session, we quantified the Background-evoked bias by comparing
subjects responses with and without Background (Target presented alone). Nine combinations between vision, touch and audition were tested.
In each session but two, the Background significantly biased the Target. Vision was the most susceptible to Background-evoked bias and the
least efficient in biasing the other two modalities. By contrast, audition was the least susceptible to Background-evoked bias and the most efficient
in biasing the other two modalities. These differences were strongly correlated to the relative reliability of each modality. In line with this, the
evoked biases were larger when the Background consisted of two instead of only one modality.
These results show that for the perception of sequences of events: (1) vision, touch and audition are automatically integrated; (2) the respective
contributions of the three modalities to the integrated percept differ; (3) the relative contribution of each modality depends on its relative reliability
(1/variability); (4) task-irrelevant stimuli have more weight when presented in two rather than only one modality.},
file_url = {/fileadmin/user_upload/files/publications/trimodal_BRB7666_4631[0].pdf},
web_url = {http://dx.doi.org/10.1016/j.brainresbull.2008.01.009},
state = {published},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Dammeier F{dammeier}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 4400,
title = {Visual-haptic cue weighting is independent of modality-specific attention},
journal = {Journal of Vision},
year = {2008},
month = {1},
volume = {8},
number = {1:21},
pages = {1-16},
abstract = {Some object properties (e.g., size, shape, and depth information) are perceived through multiple sensory modalities. Such redundant sensory information is integrated into a unified percept. The integrated estimate is a weighted average of the sensory estimates, where higher weight is attributed to the more reliable sensory signal.Here we examine whether modality-specific attention can affect multisensory integration. Selectively reducing attention in one sensory channel can reduce the relative reliability of the estimate derived from this channel and might thus alter the weighting of the sensory estimates. In the present study, observers performed unimodal (visual and haptic) and bimodal (visual-haptic) size discrimination tasks. They either performed the primary task alone or they performed a secondary task simultaneously (dual task). The secondary task consisted of a same/different judgment of rapidly presented visual letter sequences, and so might be expected to withdraw attention predominantly from the visual rather than the haptic channel.Comparing size discrimination performance in single- and dual-task conditions, we found that vision-based estimates were more affected by the secondary task than the haptics-based estimates, indicating that indeed attention to vision was more reduced than attention to haptics. This attentional manipulation, however, did not affect the cue weighting in the bimodal task. Bimodal discrimination performance was better than unimodal performance in both single- and dual-task conditions, indicating that observers still integrate visual and haptic size information in the dual-task condition, when attention is withdrawn from vision. These findings indicate that visual-haptic cue weighting is independent of modality-specific attention.},
file_url = {/fileadmin/user_upload/files/publications/Helbig-Ernst-2008-jov-8-1-21_4400[0].pdf},
web_url = {http://journalofvision.org/8/1/21/Helbig-2008-jov-8-1-21.pdf},
state = {published},
DOI = {doi:10.1167/8.1.21},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 4171,
title = {Knowledge about a common source can promote visual-haptic integration},
journal = {Perception},
year = {2007},
month = {10},
volume = {36},
number = {10},
pages = {1523-1533},
file_url = {/fileadmin/user_upload/files/publications/HelbigErnst_2007_Perception_4171[0].pdf},
web_url = {http://www.perceptionweb.com/perception/fulltext/p36/p5851.pdf},
state = {published},
DOI = {10.1068/p5851},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 4459,
title = {Multisensory Recognition of Actively Explored Objects},
journal = {Canadian Journal of Experimental Psychology},
year = {2007},
month = {9},
volume = {61},
number = {3},
pages = {242-253},
abstract = {Shape recognition can be achieved though vision or touch, raising the issue of how this information is shared across modalities. Here we provide a short review of previous findings on cross-modal object recognition and we provide new empirical data on multisensory recognition of actively explored objects. It was previously shown that, similar to vision, haptic recognition of objects fixed in space is orientation specific and that cross-modal object recognition performance was relatively efficient when these views of the objects were matched across the sensory modalities [Newell, Ernst, Tjan &amp;amp;amp;amp; Bülthoff, 2001]. For actively explored, i.e. spatially unconstrained, objects we now found a cost in cross-modal relative to within-modal recognition performance. At first, this may seem to be in contrast to findings by Newell et al. (2001). However, a detailed video analysis of the visual and haptic exploration behaviour during learning and recognition revealed that one view of th
e ob
ject
s wa
s pr
edom
inantly explored relative to all others. Thus, active visual and haptic exploration is not balanced across object views. The cost in recognition performance across modalities for actively explored objects could be attributed to the fact that the predominantly learned object view was not appropriately matched between learning and recognition test in the cross-modal conditions. Thus, it seems that participants naturally adopt an exploration strategy during visual and haptic object learning that involves constraining the orientation of the objects. Although this strategy ensures good within modal performance, it is not optimal for achieving the best recognition performance across modalities.},
file_url = {/fileadmin/user_upload/files/publications/Ernst_etal07_CJEP_proofs_4459[0].pdf},
web_url = {http://content.apa.org/journals/cep/61/3/242},
state = {published},
DOI = {10.1037/cjep2007025},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Lange C{clange}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Newell FN{fiona}{Department Human Perception, Cognition and Action}}
}
@Article{ 4418,
title = {Signal reliability modulates auditory-tactile integration for event counting},
journal = {NeuroReport},
year = {2007},
month = {7},
volume = {18},
number = {11},
pages = {1157-1161},
abstract = {Sequences of auditory beeps and tactile taps were simultaneously presented and participants were instructed to focus on one of these modalities and to ignore the other. We tested whether (i) the two sensory channels bias one another and (ii) the interaction depends on the relative reliability of the channels. Audition biased tactile perception and touch biased auditory perception. Lowering the reliability of the auditory channel (i.e. the intensity of the beeps) decreased the effect of audition on touch and increased the effect of touch on audition. These results show that simultaneous auditory and tactile stimuli tend to be automatically integrated in a reliability-dependent manner.},
file_url = {/fileadmin/user_upload/files/publications/bresciani07NR_4418[0].pdf},
web_url = {http://gateway.ovid.com/ovidweb.cgi?T=JS&NEWS=N&PAGE=fulltext&AN=00001756-200707160-00009&LSLINK=80&D=ovft},
state = {published},
DOI = {10.1097/WNR.0b013e3281ace0ca},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 4432,
title = {Learning to integrate arbitrary signals from vision and touch},
journal = {Journal of Vision},
year = {2007},
month = {6},
volume = {7},
number = {5:7},
pages = {1-14},
web_url = {http://journalofvision.org/7/5/7/},
state = {published},
DOI = {10.1167/7.5.7},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 4169,
title = {Optimal integration of shape information from vision and touch},
journal = {Experimental Brain Research},
year = {2007},
month = {1},
volume = {179},
number = {4},
pages = {595-606},
file_url = {/fileadmin/user_upload/files/publications/HelbigErnst_2007_EBR_4169[0].pdf},
web_url = {http://springerlink.metapress.com/content/fn8234766p471u6m/fulltext.pdf},
state = {published},
DOI = {10.1007/s00221-006-0814-y},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 3787,
title = {Vision and touch are automatically integrated for the
perception of sequences of events},
journal = {Journal of Vision},
year = {2006},
month = {4},
volume = {6},
number = {5},
pages = {554-564},
abstract = {The purpose of the present experiment was to investigate the integration of sequences of visual and tactile events. Participants were presented with sequences of visual flashes and tactile taps simultaneously and instructed to count either the flashes (session 1) or the taps (session 2). The number of flashes could differ from the number of taps by ±1. For both sessions, the perceived number of events was significantly influenced by the number of events presented in the task-irrelevant modality. Touch had a stronger influence on vision than vision on touch. Interestingly, touch was the more reliable of the two modalities  less variable estimates when presented alone. For both sessions, the perceptual estimates were less variable when stimuli were presented in both modalities than when the task-relevant modality was presented alone. These results indicate that even when one signal is explicitly task-irrelevant, sensory information tends to be automatically integrated across modalities. They also suggest tha
t the relative weight of each sensory channel in the integration process depends on its relative reliability. The results are described using a Bayesian probabilistic model for multimodal integration that accounts for the coupling between the sensory estimates.},
file_url = {/fileadmin/user_upload/files/publications/Bresciani-2006-jov-6-5-2_3787[0].pdf},
web_url = {http://www.journalofvision.org/6/5/2/Bresciani-2006-jov-6-5-2.pdf},
state = {published},
DOI = {10.1167/6.5.2},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Dammeier F{dammeier}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 2886,
title = {Integration of force and position cues for shape perception through active touch},
journal = {Brain Research},
year = {2006},
month = {2},
volume = {1078},
number = {1},
pages = {92-100},
abstract = {This article systematically explores cue integration within active touch. Our research builds upon a recently made distinction between position and force cues for haptic shape perception: When sliding a finger across a bumpy surface, the finger follows the surface geometry (position cue). At the same time the finger is exposed to forces related to the slope of the surface (force cue). Experiment 1 independently varied force and position cues to the curvature of 3D-arches. Perceived curvature could be well described as a weighted average of the two cues. Experiment 2 found more weight of the position cue for more convex high arches and higher weight of the force cue for less convex shallow arches  probably mediated through a change in relative cue reliability. Both findings are in good agreement with the Maximum-Likelihood-Estimation (MLE) model for cue integration and, thus, carrying this model over to the domain of active haptic perception.},
file_url = {/fileadmin/user_upload/files/publications/B5.Drewing_2006_2886[0].pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&amp;_imagekey=B6SYR-4J9X340-1-K&amp;_cdi=4841&amp;_user=29041&amp;_orig=browse&amp;_coverDate=03%2F17%2F2006&amp;_sk=989219998&amp;view=c&amp;wch},
state = {published},
DOI = {10.1016/j.brainres.2005.12.026},
author = {Drewing K{kdrewing} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 3546,
title = {Focus Cues Affect Perceived Depth},
journal = {Journal of Vision},
year = {2005},
month = {12},
volume = {5},
number = {10},
pages = {834-862},
abstract = {Depth information from focus cuesaccommodation and the gradient of retinal bluris typically incorrect in threedimensional
(3-D) displays because the light comes from a planar display surface. If the visual system incorporates
information from focus cues into its calculation of 3-D scene parameters, this could cause distortions in perceived depth
even when the 2-D retinal images are geometrically correct. In Experiment 1 we measured the direct contribution of focus
cues to perceived slant by varying independently the physical slant of the display surface and the slant of a simulated
surface specified by binocular disparity (binocular viewing) or perspective/texture (monocular viewing). In the binocular
condition, slant estimates were unaffected by display slant. In the monocular condition, display slant had a systematic
effect on slant estimates. Estimates were consistent with a weighted average of slant from focus cues and slant from
disparity/texture, where the cue weights are determined by the reliability of each cue. In Experiment 2, we examined
whether focus cues also have an indirect effect on perceived slant via the distance estimate used in disparity scaling. We
varied independently the simulated distance and the focal distance to a disparity-defined 3-D stimulus. Perceived slant
was systematically affected by changes in focal distance. Accordingly, depth constancy (with respect to simulated
distance) was significantly reduced when focal distance was held constant compared to when it varied appropriately with
the simulated distance to the stimulus. The results of both experiments show that focus cues can contribute to estimates
of 3-D scene parameters. Inappropriate focus cues in typical 3-D displays may therefore contribute to distortions in
perceived space.},
file_url = {/fileadmin/user_upload/files/publications/Watt_etal_JOV_05_3546[0].pdf},
web_url = {http://jvis.org/5/10/7/Watt-2005-jov-5-10-7.pdf},
state = {published},
DOI = {10.1167/5.10.7},
author = {Watt SJ, Akeley K, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks M{martybanks}}
}
@Article{ 3785,
title = {The combination of vision and touch depends on spatial proximity},
journal = {Journal of Vision},
year = {2005},
month = {12},
volume = {5},
number = {11},
pages = {1013-1023},
abstract = {The nervous system often combines visual and haptic information about object properties such that the
combined estimate is more precise than with vision or haptics alone. We examined how the system determines
when to combine the signals. Presumably, signals should not be combined when they come from different
objects. The likelihood that signals come from different objects is highly correlated with the spatial separation
between the signals, so we asked how the spatial separation between visual and haptic signals affects their
combination. To do this, we first created conditions for each observer in which the effect of combinationthe
increase in discrimination precision with two modalities relative to performance with one modalityshould be
maximal. Then under these conditions we presented visual and haptic stimuli separated by different spatial
distances and compared human performance with predictions of a model that combined signals optimally. We
found that discrimination precision was essentially optimal when the signals came from the same location, and
that discrimination precision was poorer when the signals came from different locations. Thus, the mechanism of
visual-haptic combination is specialized for signals that coincide in space.},
file_url = {/fileadmin/user_upload/files/publications/JOV-00052-2005_in_press_[0].pdf},
web_url = {http://journalofvision.org/5/11/7/},
state = {published},
DOI = {10.1167/5.11.7},
author = {Gepshtein S, Burge J{jburge}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks M{martybanks}}
}
@Article{ 2785,
title = {Texture and haptic cues in slant discrimination: Reliability-based cue weighting without statistically optimal cue combination},
journal = {Journal of the Optical Society of America A},
year = {2005},
month = {5},
volume = {22},
number = {5},
pages = {801-809},
abstract = {A number of models of depth cue combination suggest that the final depth percept results from a weighted average of independent depth estimates based on the different cues available. The weight of each cue in such an average is thought to depend on the reliability of each cue. In principle, such a depth estimation could be statistically optimal in the sense of producing the minimum variance unbiased estimator that can be constructed from the available information. Here we test such models using visual and haptic depth information. Different texture types produce differences in slant discrimination performance, providing a means for testing a reliability-sensitive cue combination model using texture as one of the cues to slant. Our results show that the weights for the cues were generally sensitive to their reliability, but fell short of statistically optimal combinationwe find reliability-based re-weighting, but not statistically optimal cue combination.},
file_url = {/fileadmin/user_upload/files/publications/pdf2785.pdf},
web_url = {http://www.opticsinfobase.org/josaa/abstract.cfm?uri=josaa-22-5-801},
state = {published},
DOI = {10.1364/JOSAA.22.000801},
author = {Rosas P{pedror}, Wagemans J, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Wichmann FA{felix}{Department Empirical Inference}}
}
@Article{ 2614,
title = {Feeling what you hear: auditory signals can modulate tactile taps perception},
journal = {Experimental Brain Research},
year = {2005},
month = {4},
volume = {162},
number = {2},
pages = {172-180},
abstract = {We tested whether auditory sequences of beeps can modulate the tactile perception of sequences of taps (two to four taps per sequence) delivered to the index fingertip. In the first experiment, the auditory and tactile sequences were presented simultaneously. The number of beeps delivered in the auditory sequence were either the same as, less than, or more than the number of taps of the simultaneously presented tactile sequence. Though task irrelevant (subjects were instructed to focus on the tactile stimuli), the auditory stimuli systematically modulated subjects tactile perception; in other words subjects responses depended significantly on the number of delivered beeps. Such modulation only occurred when the auditory and tactile stimuli were similar enough. In the second experiment, we tested whether the automatic auditory-tactile integration depends on simultaneity or whether a bias can be evoked when the auditory and tactile sequence are presented in temporal asynchrony. Audition significantly modula
ted tactile perception when the stimuli were presented simultaneously but this effect gradually disappeared when a temporal asynchrony was introduced between auditory and tactile stimuli. These results show that when provided with auditory and tactile sensory signals that are likely to be generated by the same stimulus, the central nervous system (CNS) tends to automatically integrate these signals.},
file_url = {/fileadmin/user_upload/files/publications/pdf2614.pdf},
web_url = {http://www.springerlink.com/content/b8uplmgtv3hj2q4c/fulltext.pdf},
state = {published},
DOI = {10.1007/s00221-004-2128-2},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Drewing K{kdrewing}, Bouyer G, Maury V and Kheddar A}
}
@Article{ 3352,
title = {First Evaluation of A Novel Tactile Display Exerting Shear Force via Lateral Displacement},
journal = {ACM Transactions on Applied Perception},
year = {2005},
month = {4},
volume = {2},
number = {2},
pages = {118-131},
abstract = {Based on existing knowledge on human tactile movement perception, we constructed a prototype of a novel tactile multipin
display that controls lateral pin displacement and, thus produces shear force. Two experiments focus on the question of whether
the prototype display generates tactile stimulation that is appropriate for the sensitivity of human tactile perception. In
particular, Experiment I studied human resolution for distinguishing between different directions of pin displacement and
Experiment II explored the perceptual integration of information resulting from the displacement of multiple pins. Both experiments
demonstrated that humans can discriminate between directions of the displacements, and also that the technically
realized resolution of the display exceeds the perceptual resolution (>14?). Experiment II demonstrated that the human brain
does not process stimulation from the different pins of the display independent of one another at least concerning direction. The
acquired psychophysical knowledge based on this new technology will in return be used to improve the design of the display.},
file_url = {/fileadmin/user_upload/files/publications/pdf3352.pdf},
file_url2 = {/fileadmin/user_upload/files/publications/Drewing_Fritschi_Zopf_Ernst_Buss_3352[1].pdf},
state = {published},
DOI = {10.1145/1060581.1060586},
author = {Drewing K{kdrewing}, Fritschi M{fritschi}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Zopf R{rzopf}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Buss M}
}
@Article{ 2612,
title = {Experience can change the "light-from-above" prior},
journal = {Nature Neuroscience},
year = {2004},
month = {10},
volume = {7},
number = {10},
pages = {1057-1058},
abstract = {To interpret complex and ambiguous input, the human visual system uses prior knowledge or assumptions about the world. We show that the light-from-above prior, used to extract information about shape from shading, is modified in response to active experience with the scene. The resultant adaptation is not specific to the learned scene but generalizes to a different task, demonstrating that priors are constantly adapted by interactive experience with the environment.},
file_url = {/fileadmin/user_upload/files/publications/pdf2612.pdf},
web_url = {http://www.nature.com/neuro/journal/v7/n10/pdf/nn1312.pdf},
state = {published},
DOI = {10.1038/nn1312},
author = {Adams WJ, Graf EW and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Article{ 2507,
title = {Merging the Senses into a Robust Percept},
journal = {Trends in Cognitive Sciences},
year = {2004},
month = {4},
volume = {8},
number = {4},
pages = {162-169},
abstract = {For perceiving the environment our brain uses multiple sources of sensory information derived from several different modalities, including vision, touch and audition. All these different sources of information have to be efficiently merged to form a coherent and robust percept. Here we highlight some of the mechanisms underlying this merging of the senses in the brain. We show that depending on the type of information different combination and integration strategies are used and that prior knowledge is often required for interpreting the sensory signals.},
file_url = {/fileadmin/user_upload/files/publications/pdf2507.pdf},
web_url = {http://www.sciencedirect.com/science?_ob=MImg&_imagekey=B6VH9-4BVP99P-1-N&_cdi=6061&_user=29041&_orig=search&_coverDate=04%2F30%2F2004&_sk=999919995&view=c&wchp=dGLzVzz-zSkzV&md5=7e20182e5625df31980e8f9a12c4a338&ie=/sdarticle.pdf},
state = {published},
DOI = {10.1016/j.tics.2004.02.002},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Article{ 1631,
title = {Combining Sensory Information: Mandatory Fusion Within, but Not Between, Senses},
journal = {Science},
year = {2002},
month = {11},
volume = {298},
number = {5598},
pages = {1627-1630},
abstract = {Humans use multiple sources of sensory information to estimate environmental properties. For example, the eyes and hands both provide relevant information about an objects shape. The eyes pick up shape information from the objects projected outline, its disparity gradient, texture gradient, shading, and more. The hands supply tactile and haptic shape information (respectively, static and active cues). When multiple cues are available, it would be sensible to combine them in a way that yields a more accurate estimate of the object property in question than any single-cue estimate would. By combining information from multiple sources, the nervous system might lose access to single-cue information. Here we report that single-cue information is indeed lost when cues from within the same sensory modality (disparity and texture gradients in vision) are combined, but not when cues from different modalities (vision and haptics) are combined. When one considers the nature of within- and inter-modal information, th
is difference is perfectly reasonable.},
file_url = {/fileadmin/user_upload/files/publications/pdf1631.pdf},
web_url = {http://www.sciencemag.org/cgi/reprint/298/5598/1627.pdf},
state = {published},
DOI = {10.1126/science.1075396},
author = {Hillis JM, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Banks MS{martybanks} and Landy MS}
}
@Article{ 1005,
title = {Humans Integrate Visual and Haptic Information in a Statistically Optimal Fashion},
journal = {Nature},
year = {2002},
month = {1},
volume = {415},
number = {6870},
pages = {429-433},
abstract = {When a person looks at an object while exploring it with their hand, vision and touch both provide information for estimating the properties of the object. Vision frequently dominates the integrated visual-haptic percept, for example when judging size, shape or position, but in some circumstances the percept is clearly affected by haptics. Here we propose that a general principle, which minimizes variance in the final estimate, determines the degree to which vision or haptics dominates. This principle is realized by using maximum-likelihood estimation to combine the inputs. To investigate cue combination quantitatively, we first measured the variances associated with visual and haptic estimation of height. We then used these measurements to construct a maximum-likelihood integrator. This model behaved very similarly to humans in a visual-haptic task. Thus, the nervous system seems to combine visual and haptic information in a fashion that is similar to a maximum-likelihood integrator. Visual dominance occurs
when the variance associated with visual estimation is lower than that associated with haptic estimation.},
file_url = {/fileadmin/user_upload/files/publications/pdf1005.pdf},
web_url = {http://www.nature.com/nature/journal/v415/n6870/pdf/415429a.pdf},
state = {published},
DOI = {10.1038/415429a},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks MS{martybanks}}
}
@Article{ 1200,
title = {Viewpoint Dependence in Visual and Haptic Object Recognition},
journal = {Psychological Science},
year = {2001},
month = {1},
volume = {12},
number = {1},
pages = {37-42},
abstract = {On the whale, people recognize objects best when they see the objects from a familiar view and worse when they see the objects from views that were previously occluded from sight. Unexpectedly, we found haptic object recognition to be viewpoint-specific as well, even though hand movements were unrestricted. This viewpoint dependence was due to the hands preferring the back "view" of the objects. Furthermore, when the sensory modalities (visual vs. haptic) differed between learning an object and recognizing it, recognition performance was best when the objects were rotated back-to-front between learning and recognition. Our data indicate that the visual system recognizes the front view of objects best, whereas the hand recognizes objects best from the back.},
file_url = {/fileadmin/user_upload/files/publications/pdf1200.pdf},
web_url = {http://pss.sagepub.com/content/12/1/37.full.pdf+html},
state = {published},
DOI = {10.1111/1467-9280.00307},
author = {Newell F{fiona}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Tjan BS{tjan}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Article{ 79,
title = {Touch can change visual slant perception},
journal = {Nature Neuroscience},
year = {2000},
month = {1},
volume = {3},
number = {1},
pages = {69-73},
abstract = {The visual system uses several signals to deduce the three- dimensional structure of the environment, including binocular disparity, texture
gradients, shading and motion parallax. Although each of these sources of information is independently insufficient to yield reliable three-dimensional structure
from everyday scenes, the visual system combines them by weighting the available information; altering the weights would therefore change the perceived
structure. We report that haptic feedback (active touch) increases the weight of a consistent surface- slant signal relative to inconsistent signals. Thus,
appearance of a subsequently viewed surface is changed: the surface appears slanted in the direction specified by the haptically reinforced signal.},
file_url = {/fileadmin/user_upload/files/publications/pdf79.pdf},
web_url = {http://www.nature.com/neuro/journal/v3/n1/pdf/nn0100_69.pdf},
state = {published},
DOI = {10.1038/71140},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Banks MS{martybanks} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Article{ 1626,
title = {Calorimetry of archeal tetraether lipid-indication of a novel metastabele thermotropic phase in the main phospholipid thermoplasma acidophilum cultured at 59°C},
journal = {Chemistry and Physics of Lipids},
year = {1998},
month = {7},
volume = {94},
number = {1},
pages = {1-12},
abstract = {The main glycophospholipid (MPL) from the archaeon Thermoplasma acidophilum is composed of a di-isopranol-2,3-glycerotetraether. The fraction of pentacyclizations of its hydrocarbon chains increases with the growth temperature of the source organism (39 and 59°C), the respective lipids being named MPL39 and MPL59. MPL has a main phase transition between −15 and −30°C. Non-hydrated and hydrated samples of MPL59 have been studied by differential thermal analysis (DTA). Non-hydrated MPL59 does not exert any phase transition. Computer simulation of an unhydrated MPL molecule with four pentacycles and another without pentacyclations demonstrates similar behavior, i.e. the MPL molecules form coils with both polar ends getting closely together. The molecule without pentacyclation coils faster than that with pentacycles. With hydrated samples, DTA scanning conditions were varied. Under certain conditions, the shape of the calorimetric scans, i.e. occurrence of an additional (endotherm) phase transition peak at +17°C and enthalpy changes of the phase transitions indicate a (metastable) solid-analogue phase in MPL59 in addition to the well-known liquid–crystalline phase. Only lipid samples from T. acidophilum with a high degree of acyclic hydrocarbon chains (MPL39) had thus far been reported to form a metastable solid-analogue phase (Blöcher, D., Gutermann, R., Henkel, B., Ring, K., 1990. Biochim. Biophys. Acta 1024, 54–60). A phase transition model is presented for MPL59 which includes the existence of a metastable solid-analogue phase.},
web_url = {http://www.sciencedirect.com/science/article/pii/S0009308498000048},
state = {published},
DOI = {10.1016/S0009-3084(98)00004-8},
author = {Ernst M{marc}, Freisleben H-J, Antonopoulos E, Henkel L, Mlekusch W and Reibnegger G}
}
@Inproceedings{ Ernst2010,
title = {Using Interactive Technology to Study Human Perception and Action},
year = {2010},
month = {10},
pages = {1-2},
web_url = {http://hfr2010.wordpress.com/},
event_name = {3rd Workshop for Young Researchers on Human-Friendly Robotics (HFR 2010)},
event_place = {Tübingen, Germany},
state = {published},
author = {Ernst M{marc}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 5026,
title = {Motion Primitives of Dancing},
journal = {Haptics: Perception, Devices and Scenarios (Eurohaptics 2008)},
year = {2008},
month = {6},
pages = {838-843},
abstract = {In this work, we analyze whether oscillatory motion between two extreme positions could be used to create a robotic dancing partner that provides natural haptic feedback. To this end, we compared the pattern of hand movements performed following a pacing signal while participants were instructed to either move rhythmically or to dance. Furthermore, we analyzed the influence of the frequency and type of pacing signal on the two kinds of movements. Trajectories were analyzed in terms of: frequency of movement, spatial and temporal synchronization, and jerk.
Results indicate that it is easier to perform synchronized movements while dancing, even though these movements partially deviate from the pacing frequency. Dance movements are in fact more complex than the ones produced to keep the rhythm and for this reason they should be modeled accordingly in order to provide realistic haptic feedback.},
web_url = {http://www.disam.upm.es/~eurohaptics2008/},
editor = {Ferre, M.},
publisher = {Springer},
address = {Berlin, Germany},
booktitle = {Haptics: Perception, Devices and Scenarios},
event_name = {6th International Conference EuroHaptics 2008},
event_place = {Madrid, Spain},
state = {published},
ISBN = {978-3-540-69057-3},
DOI = {10.1007/978-3-540-69057-3_106},
author = {Groten R, H\"olldampf J, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Buss M}
}
@Inproceedings{ 4073,
title = {An instance of tactile suppression: Active exploration impairs tactile sensitivity for the direction of lateral movement},
journal = {Proceedings of the EuroHaptics 2006 International Conference (EH 2006)},
year = {2006},
month = {7},
pages = {351-355},
abstract = {The phenomenon of a reduction in tactile sensitivity during voluntarily executed body movement we call “tactile suppression”. This is in analogy to saccadic suppression where the visual sensitivity is reduced during voluntarily executed eye movements [1]. Here we investigate tactile suppression using an integrated tactile/kinesthetic display – consisting of a tactile shear force device [2] mounted on a hyper redundant haptic display (ViSHaRD10 [3]). To quantify the tactile suppression effect we measured subject’s motion-direction discrimination performance for tactile stimuli moving laterally on the index finger under various active and passive exploration conditions. In the baseline condition (“static”) only tactile stimuli were provided using the shear-force device while the arm was held still. In the “active” condition subjects had to discriminate the direction of tactile motion while actively executing arm movements at the same time. Finally, in the “passive” condition the kinesthetic device passively moved the subjects’ arm, while the subject was performing the discrimination task. Compared to the “still” condition results indicate a significant decrease of tactile sensibility during active movements whereas passive movements seem to have a minor effect on tactile discrimination performance.},
file_url = {/fileadmin/user_upload/files/publications/Eurohaptics-paper_4073[0].pdf},
web_url = {http://lsc.univ-evry.fr/~eurohaptics/index.shtml},
editor = {Kheddar, A. , B. Bayart},
address = {Paris, France},
event_name = {EuroHaptics International Conference (EH 2006)},
event_place = {Paris, France},
state = {published},
author = {Vitello MP{vitello}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Fritschi M{fritschi}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 4058,
title = {Integration of Kinesthetic and Tactile Display: A Modular Design Concept},
journal = {Proceedings of the EuroHaptics 2006 International Conference (EH 2006)},
year = {2006},
month = {7},
pages = {607-612},
abstract = {This paper describes the systematic design of a modular setup for several integrated kinesthetic and cutaneous (tactile) display configurations. The proposed modular integration of a kinesthetic display and several tactile displays in serial configuration provides a versatile experimental setup to explore the integration of the kinesthetic and tactile modality of the human perception. The kinesthetic base display is a hyper-redundant device and sufficiently powerful to carry each of the compact tactile displays. In addition to a detailed description of the partly novel displays, a series of preliminary
evaluation experiments is presented.},
file_url = {/fileadmin/user_upload/files/publications/eh_2006_115_final_[0].pdf},
web_url = {http://lsc.univ-evry.fr/~eurohaptics/index.shtml},
editor = {Kheddar, A. , B. Bayart},
address = {Paris, France},
event_name = {EuroHaptics International Conference (EH 2006)},
event_place = {Paris, France},
state = {published},
author = {Fritschi M{fritschi}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Buss M}
}
@Inproceedings{ 4046,
title = {Using Multidimensional Scaling to Quantify the Fidelity of Haptic Rendering of Deformable Objects},
journal = {Proceedings of the EuroHaptics 2006 International Conference (EH 2006)},
year = {2006},
month = {7},
pages = {289-295},
abstract = {In this paper, we examine the application of a psychophysical evaluation technique to quantify the fidelity of haptic rendering methods. The technique is based on multidimensional scaling (MDS) analysis of similarity ratings provided by users comparing pairs of
haptically-presented objects. Unbeknownst to the participants, both real and virtual deformable objects were presented to them. In addition, virtual objects were either presented under higher-fidelity rendering condition or under lower-fidelity condition in which force filtering and proxy-point filtering were removed. We hypothesized
that reducing fidelity of virtual rendering would exaggerate the difference between real and virtual objects. MDS analysis of pairwise similarity data provided quantitative confirmation that users perceived a clear difference between real and virtual objects in the
lower-fidelity, but not in the higher-fidelity condition. In the latter, a single perceptual dimension, corresponding to stiffness, sufficed to explain similarity data, while two perceptual dimensions were needed in the former condition. This study demonstrates how MDS analysis provides an opportunity to visualize and quantify the perceptual effects of changes in rendering parameters and how it can be used in the evaluation of haptic rendering scenarios.},
file_url = {/fileadmin/user_upload/files/publications/leskovsky_cooke_eurohaptics_2006_4046[0].pdf},
web_url = {http://lsc.univ-evry.fr/~eurohaptics/index.shtml},
editor = {Kheddar, A. , B. Bayart},
address = {Paris, France},
event_name = {EuroHaptics International Conference (EH 2006)},
event_place = {Paris, France},
state = {published},
author = {Leskovsky P, Cooke T{tmcooke}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Harders M}
}
@Inproceedings{ 3784,
title = {Material Properties Determine How we Integrate Shape Signals in Active Touch},
journal = {Proceedings of the 1st Joint Worldhaptic Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems},
year = {2005},
month = {3},
pages = {1-6},
abstract = {When sliding a finger across a bumpy surface, the finger follows the surface geometry (position signal). At the same time the finger is exposed to forces related to the slope of the surface (force signal) [1]. For haptic shape perception the brain uses both signals integrating them by weighted averaging [2]. This is consistent with the Maximum-Likelihood-Estimate (MLE) model on signal integration, previously only applied to passive perception.
The model further predicts that signal weight is proportional to signal reliability. Here, we tested this prediction for the integration of force and position signals to perceived curvature by manipulating material properties of the curve. Low as compared to high compliance decreased the reliability and so the weight of the sensorily transduced position signal. High as compared to low friction decreased the reliability and so the weight of the transduced force signal. These results demonstrat that the MLE model extends to situations involving active touch.},
file_url = {fileadmin/user_upload/files/publications/WHC-2005-Drewing.pdf},
web_url = {http://www.worldhaptics.org/2005/FinalProgramme.html},
event_name = {1st Joint Worldhaptic Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems (WorldHaptics 2005)},
event_place = {Pisa, Italy},
state = {published},
author = {Drewing K{kdrewing}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Wiecki T{wiecki}}
}
@Inproceedings{ 2824,
title = {Construction and Psychophysical Evaluation of a Novel Tactile Shear Force Display},
journal = {Proceedings of the 13th IEEE International Workshop on Robot and Human Interactive Communication (ROMAN 2004)},
year = {2004},
month = {9},
pages = {509-513},
abstract = {This work presents the prototype of a shear force display for the finger tip and a first psychophysical evaluation. In order to explore whether the stimuli produced by the display are appropriate for human perception we studied discrimination performance of humans for distinguishing between different directions of pin movement. In a second step we explored the perceptual integration of multi-pin movements. This basic psychophysical knowledge that so far did not exist because the technology was not yet available to be used to improve the design of the display.},
web_url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=1374812},
publisher = {IEEE Operations Center},
address = {Piscataway, NJ, USA},
event_name = {13th IEEE International Workshop on Robot and Human Interactive Communication (RO-MAN 2004)},
event_place = {Kurashiki, Japan},
state = {published},
ISBN = {0-7803-8570-5},
DOI = {10.1109/ROMAN.2004.1374812},
author = {Fritschi M{fritschi}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Drewing K{kdrewing}, Zopf R{rzopf}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Buss M}
}
@Inproceedings{ 2925,
title = {Tactile Feedback Systems},
journal = {Workshop at the 2004 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2004)},
year = {2004},
month = {9},
pages = {1-21},
abstract = {Tactile feedback is among haptics one of the more recent modalities for human-system interaction. Research in tactile feedback using pin-array type actuators has been going on during the past years or so. A survey about technological achievements, human sensing capabilities, and psychophysical evaluation in this area is presented. Then the focus is on novel approaches in actuator technology and tactile feedback systems providing shear force (tangential force to the finger-tip).},
file_url = {fileadmin/user_upload/files/publications/TWF1.pdf},
event_name = {Workshop "Touch and Haptics": 2004 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2004)},
event_place = {Sendai, Japan},
state = {published},
author = {Fritschi M{fritschi}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Buss M, Drewing K{kdrewing}, Zopf R{rzopf}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 2890,
title = {Auditory modulation of tactile taps perception},
journal = {Proceedings of EuroHaptics 2004},
year = {2004},
month = {6},
pages = {198-202},
abstract = {We tested whether the tactile perception of sequences of taps delivered on the index fingertip can be modulated by sequences of auditory beeps. In the first experiment, the tactile and auditory sequences were always presented
simultaneously, and were structurally either similar or dissimilar. In the second experiment, the auditory and tactile sequences were always structurally similar but
not always presented simultaneously. When structurally similar and presented simultaneously, the auditory sequences significantly modulated tactile taps perception. This automatic combination of “redundant-like” tactile and auditory signals likely constitutes an optimization process taking advantage of multimodal redundancy for perceptual estimates.},
file_url = {fileadmin/user_upload/files/publications/EuroHaptics-2004-Bresciani.pdf},
web_url = {http://www.lsr.ei.tum.de/eurohaptics2004/index.shtml},
editor = {Buss, M. , M. Fritschi},
publisher = {Institute of Automatic Control Engineering},
address = {München, Germany},
event_name = {4th International Conference EuroHaptics 2004},
event_place = {München, Germany},
state = {published},
ISBN = {3-9809614-0-0},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Drewing K{kdrewing}, Bouyer G, Maury V and Kheddar A}
}
@Inproceedings{ 2722,
title = {Bimanual Size Estimation: No Automatic Integration of Information across the Hands},
journal = {Proceedings of EuroHaptics 2004},
year = {2004},
month = {6},
pages = {520-523},
abstract = {Sensory input is often integrated to gain a single estimate of the underlying physical property. Here we investigate if size estimates from the left and right hand are automatically integrated. Six subjects participated in a bimanual matching task. Subjects were presented (vir-tual) objects to be felt with either hand or with both hands. Their task was to reproduce the sizes after presentation. The bimanual stimuli either had the same size for each hand or there was a size con ict between the hands. We showed that there is no automatic integration and subjects retained access to both hands size estimates.},
file_url = {/fileadmin/user_upload/files/publications/pdf2722.pdf},
web_url = {http://www.lsr.ei.tum.de/eurohaptics2004/index.shtml},
editor = {Buss, M. , M. Fritschi},
publisher = {Institute of Automatic Control Engineering},
address = {München, Germany},
event_name = {4th International Conference EuroHaptics 2004},
event_place = {München, Germany},
state = {published},
ISBN = {3-9809614-0-0},
author = {Lange C{clange}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Klatzky RL{bobby} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 2744,
title = {Construction and first evaluation of a newly developed tactile Shear Force Display},
journal = {Proceedings of EuroHaptics 2004},
year = {2004},
month = {6},
pages = {508-511},
abstract = {At present, tactile displays are constructed either as shape or vibrotactile displays. While shape displays render the shape of objects to the skin, vibrotactile devices display high frequent but small amplitude patterns of forces. Existing tactile displays of both types base on an array of small pins, which move normal to the contact surface. That is, the pins create a pattern of indentation into the skin. Usually, the devices are applied to the human finger pad. However, in order to produce a realistic tactile impression of the environment it is probably as important to provide forces lateral to the human skin, so called shear forces. This is particularly reasonable when considering perceptions evoked by movements of the skin relative to the environment, e.g. when stroking with the finger across a surface. We aim at technically realizing a third type of tactile display which can provide shear forces. The poster presents the prototype of a shear force display for the finger tip and a first psychophysical evaluation. In order to explore whether the stimuli produced
by the display are appropriate for human perception we studied in a first step discrimination performance of humans for distinguishing between different directions of pin movement. This basic psychophysical knowledge that so far did not exist because the technology was not yet available will in return be used to improve the design of the display.},
file_url = {fileadmin/user_upload/files/publications/EuroHaptics-2004-Fritschi.pdf},
web_url = {http://www.lsr.ei.tum.de/eurohaptics2004/index.shtml},
editor = {Buss, M. , M. Fritschi},
publisher = {Institute of Automatic Control Engineering},
address = {München, Germany},
event_name = {4th International Conference EuroHaptics 2004},
event_place = {München, Germany},
state = {published},
ISBN = {3-9809614-0-0},
author = {Fritschi M{fritschi}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Drewing K{kdrewing}, Zopf R{rzopf}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Buss M}
}
@Inproceedings{ 2768,
title = {Effect of attention on multimodal cue integration},
journal = {Proceedings of EuroHaptics 2004},
year = {2004},
month = {6},
pages = {524-527},
abstract = {Humans gather information about their environment from multiple sensory channels. It seems that cues from separate sensory modalities (e.g. vision and haptics) are combined
in a statistically optimal way according to a maximum-likelihood estimator [1]. Ernst and Banks showed that for bi-modal perceptual estimates, the weight attributed
to one sensory channel changes when its relative reliability is modified by increasing the noise associated to its signal. Because increasing the attentional load of a given sensory channel is likely to change its reliability, we assume that such modification would also alter the weight of the different cues for multimodal perceptual estimates. Here we examine this hypothesis using a dual-task paradigm. Subjects’ main-task is to estimate the size of a raised bar using vision alone, haptics alone, or both modalities combined. Their performance in the main-task condition alone is compared to the performance obtained when an additional visual ‘distractor’-task is performed simultaneously to the main-task (Dual-Task Paradigm). We found that vision-based estimates are more affected by a visual ‘distractor’ than the haptics-based estimates. Our findings substantiate that attention influences the weighting of the different sensory channels for multimodal perceptual estimates. That is, when attention is detracted from the visual modality, the haptic estimates are consequently weighted higher in visual-haptic size discrimination. In further experiments, we will examine the influence of a haptic ‘distractor’-task.
We would expect, that a haptic ‘distractor’ interferes to a higher extend with the haptic primary task. The vision-based estimates in the main-task should be less affected. We will then further examine whether cue integration is still statistically optimal.},
file_url = {fileadmin/user_upload/files/publications/EuroHaptics-2004-Helbig.pdf},
web_url = {http://www.lsr.ei.tum.de/eurohaptics2004/index.shtml},
editor = {Buss, M. , M. Fritschi},
publisher = {Institute of Automatic Control Engineering},
address = {München, Germany},
event_name = {4th International Conference EuroHaptics 2004},
event_place = {München, Germany},
state = {published},
ISBN = {3-9809614-0-0},
author = {Helbig H{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 2743,
title = {Roughness and spatial density judgments on visual and haptic textures using virtual reality},
journal = {Proceedings of EuroHaptics 2004},
year = {2004},
month = {6},
pages = {203-206},
abstract = {The purpose of this study is to investigate multimodal visual-haptic texture perception for which we used virtual reality techniques. Participants judged a broad range of textures according to their roughness and their spatial
density under visual, haptic and visual-haptic exploration conditions. Participants were well able to differentiate between the different textures both by using the roughness and the spatial density judgment. When provided with visualhaptic textures, subjects performance increased (for both judgments) indicating sensory combination of visual and haptic texture information. Most interestingly,
performance for density and roughness judgments did not differ significantly, indicating that these estimates are highly correlated. This may be due to the fact that our textures were generated in virtual reality using a haptic pointforce display (PHANToM). In conclusion, it seems that the roughness and spatial density estimate were based on the same physical parameters given the display technology used.},
file_url = {fileadmin/user_upload/files/publications/EuroHaptics-2004-Drewing.pdf},
web_url = {http://www.lsr.ei.tum.de/eurohaptics2004/index.shtml},
editor = {Buss, M. , M. Fritschi},
publisher = {Institute of Automatic Control Engineering},
address = {München, Germany},
event_name = {4th International Conference EuroHaptics 2004},
event_place = {München, Germany},
state = {published},
ISBN = {3-9809614-0-0},
author = {Drewing K{kdrewing}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Lederman SJ and Klatzky R{bobby}}
}
@Inproceedings{ 2508,
title = {Cross-modal perception of actively explored objects},
journal = {Proceedings EuroHaptics 2003},
year = {2003},
month = {7},
pages = {291-299},
abstract = {Many objects in our world can be picked up and freely manipulated, thus allowing information about an object to be available to both the visual and haptic systems. However, we understand very little about how object information is shared across the modalities. Under constrained viewing cross-modal object recognition is most efficient when the same surface of an object is presented to the visual and haptic systems [5]. Here we tested cross modal recognition of novel objects under active manipulation and unconstrained viewing of the objects. These objects were designed such that each surface of the object provided unique information. In Experiment 1, participants were allowed 30 seconds to learn the objects visually or haptically. Haptic learning resulted in relatively poor haptic recognition performance relative to visual recognition. In Experiment 2, we increased the learning time for haptic exploration and found equivalent haptic and visual recognition, but a cost in cross modal recognition. In Experiment 3, participants learned the objects using both modalities together, vision alone or haptics alone. Recognition performance was tested using both modalities together. We found that recognition performance was significantly better when objects were learned by both modalities than either of the modalities alone. Our results suggest that efficient cross modal performance depends on the spatial correspondence of object surface information across modalities.},
file_url = {/fileadmin/user_upload/files/publications/cross_modal_perception_of_actively_explored_objects_2508[0].pdf},
web_url = {http://www.eurohaptics.org/},
editor = {Oakley, I. , S. O'Modhrain, F. Newell},
publisher = {Trinity College Dublin},
address = {Dublin, Ireland},
event_name = {EuroHaptics International Conference 2003},
event_place = {Dublin, Ireland},
state = {published},
author = {Newell F{fiona}{Department Human Perception, Cognition and Action}, B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 2393,
title = {Learning to Combine Arbitrary Signals from Vision and Touch},
journal = {Proceedings EuroHaptics 2003},
year = {2003},
month = {7},
pages = {276-290},
abstract = {When different perceptual signals of the same physical property are integrated–e.g., the size of an object, which can be seen and felt–they form a more reliable sensory estimate [3]. This however implies that the sensory
system already knows which signals belong together and how they are related. In a Bayesian model of cue integration this prior knowledge can be made explicit. Here, we examine whether such a relationship between two arbitrary
sensory signals from vision and touch can be learned from their statistical cooccurrence such that they become integrated. In the Bayesian model this means changing the prior distribution over the stimuli. To this end, we trained subjects with stimuli that are usually uncorrelated in the world–the luminance of an object (visual signal) and its stiffness (haptic signal). In the training phase we
presented only combinations of these signals which were highly correlated. Before and after training we measured discrimination performance with distributions of stimuli which were either congruent with the correlation during
training or incongruent. The incongruent stimuli came from an anti-correlated distribution compared to the stimuli during training. If subjects were sensitive to the correlation between the signals then we would expect to see a change in their prior knowledge about what combinations of stimuli are usually encountered. Accordingly, this should change their discrimination performance
between pre- and post-test. We found a significant interaction between the two factors pre/post-test and congruent/incongruent. After training, discrimination
thresholds for the incongruent stimuli are increased relative to the thresholds for congruent stimuli, suggesting that subjects learned to combine the two signals
effectively.},
file_url = {fileadmin/user_upload/files/publications/EuroHaptics-2003-Jaekel.pdf},
web_url = {http://www.eurohaptics.org/},
editor = {Oakley, I. , S. O'Modhrain, F. Newell},
publisher = {Trinity College Dublin},
address = {Dublin, Ireland},
event_name = {EuroHaptics International Conference 2003},
event_place = {Dublin, Ireland},
state = {published},
author = {J\"akel F{frank} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inproceedings{ 2509,
title = {Combining sensory Information to Improve Visualization},
journal = {Proceedings of the Conference on Visualization ‘02 (VIS ‘02)},
year = {2002},
month = {11},
pages = {571-574},
abstract = {Seemingly effortlessly the human brain reconstructs the three-dimensional environment surrounding us from the light pattern striking the eyes. This seems to be true across almost all viewing and lighting conditions. One important factor for this apparent easiness is the redundancy of information provided by the sensory organs. For example, perspective distortions, shading, motion parallax, or the disparity between the two eyes' images are all, at least partly, redundant signals which provide us with information about the three-dimensional layout of the visual scene. Our brain uses all these different sensory signals and combines the available information into a coherent percept. In displays visualizing data, however, the information is often highly reduced and abstracted, which may lead to an altered perception and therefore a misinterpretation of the visualized data. In this panel we will discuss mechanisms involved in the combination of sensory information and their implications for simulations using computer displays, as well as problems resulting from current display technology such as cathode-ray tubes.},
file_url = {/fileadmin/user_upload/files/publications/combining_sensory_information_to_improve_visualization_2509[0].pdf},
web_url = {http://dl.acm.org/citation.cfm?id=602202&bnc=1},
editor = {Moorhead, R. , M. Joy},
publisher = {IEEE},
address = {Piscataway, NJ, USA},
event_name = {IEEE Conference on Visualization 2002 (VIS '02)},
event_place = {Boston, MA, USA},
state = {published},
ISBN = {0-7803-7498-3},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Banks MS{martybanks}, Wichmann FA{felix}{Department Empirical Inference}, Maloney L and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Inbook{ VanDamPE2013,
title = {Modeling multisensory integration},
year = {2014},
month = {10},
pages = {209-230},
web_url = {http://site.ebrary.com/lib/alltitles/docDetail.action?docID=10966771},
editor = {Bennett, D. , J. Hill},
publisher = {MIT Press},
address = {Cambridge, MA, USA},
booktitle = {Sensory Integration and the Unity of Consciousness},
state = {published},
ISBN = {978-0-262-02778-6},
author = {Van Dam LCJ{vandam}{Research Group Multisensory Perception and Action}, Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Inbook{ FrissenCSE2013,
title = {Enabling Unconstrained Omnidirectional Walking Through Virtual Environments: An Overview of the CyberWalk Project},
year = {2013},
pages = {113-144},
abstract = {The CyberWalk treadmill is the first truly omnidirectional treadmill of its size that allows for near natural walking through arbitrarily large Virtual Environments. The platform represents advances in treadmill and virtual reality technology and engineering, but it is also a major step towards having a single setup that allows the study of human locomotion and its many facets. This chapter focuses on the human behavioral research that was conducted to understand human locomotion from the perspective of specifying design criteria for the CyberWalk. The first part of this chapter describes research on the biomechanics of human walking, in particular, the nature of natural unconstrained walking and the effects of treadmill walking on characteristics of gait. The second part of this chapter describes the multisensory nature of walking, with a focus on the integration of vestibular and proprioceptive information during walking. The third part of this chapter describes research on large-scale human navigation and identifies possible causes for the human tendency to veer from a straight path, and even walk in circles when no external references are made available. The chapter concludes with a summary description of the features of the CyberWalk platform that were informed by this collection of research findings and briefly highlights the current and future scientific potential for this platform.},
web_url = {http://link.springer.com/content/pdf/10.1007%2F978-1-4419-8432-6_6.pdf},
editor = {Steinicke, F. , Y. Visell, J. Campos, A. Lécuyer},
publisher = {Springer},
address = {New York, NY, USA},
booktitle = {Human Walking in Virtual Environments: Perception, Technology, and Applications},
state = {published},
ISBN = {978-1-4419-8431-9},
DOI = {10.1007/978-1-4419-8432-6_6},
author = {Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Campos JL{camposjl}{Department Human Perception, Cognition and Action}, Sreenivasa M{manu}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inbook{ ErnstDM2011,
title = {Multisensory perception: from integration to remapping},
year = {2011},
pages = {225-250},
abstract = {The brain receives information about the environment from all the sensory modalities, including vision, touch, and audition. To interact efficiently with the environment, this information must eventually converge to form a reliable and accurate multimodal percept. This process is often complicated by the existence of noise at every level of signal processing, which makes the sensory information derived from the world unreliable and inaccurate. There are several ways in which the nervous system may minimize the negative consequences of noise in terms of reliability and accuracy. Two key strategies are to combine redundant sensory estimates and to use prior knowledge. This chapter elaborates further on how these strategies may be used by the nervous system to obtain the best possible estimates from noisy signals.},
web_url = {http://www.oxfordscholarship.com/view/10.1093/acprof:oso/9780195387247.001.0001/acprof-9780195387247-chapter-12},
editor = {Trommershäuser, J. , K.P. Körding, M.S. Landy},
publisher = {Oxford University Press},
address = {New York, NY, USA},
booktitle = {Sensory Cue Integration},
state = {published},
ISBN = {978-0-19-538724-7},
DOI = {10.1093/acprof:oso/9780195387247.001.0001},
author = {Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inbook{ 4964,
title = {Haptic perception in interaction with other senses},
year = {2008},
month = {12},
pages = {235-249},
abstract = {Human perception is inherently multisensory: we perceive the world simultaneously with multiple senses. While strolling the farmers market, for example, we might become aware of the presence of a delicious fruit by its characteristic smell. We might use our senses of vision and touch to identify the fruit by its typical size and shape and touch it to select only that one with the distinctive soft texture that signals ripe. When we take a bite of the fruit, we taste its characteristic flavour and hear a slight smacking sound which confirms that the fruit we perceive with our senses of vision, touch, audition, smell and taste is a ripe, delicious peach. That is, in the natural environment the information delivered by our sense of touch is combined with information gathered by each of the other senses to create a robust percept. Combining information from multiple systems is essential because no information-processing system, neither technical nor biological, is powerful enough to provide a precise and accurate sensory estimate under all conditions.},
web_url = {http://www.springerlink.com/content/978-3-7643-7611-6},
editor = {Grunwald, M.},
publisher = {Birkhäuser},
address = {Basel, Switzerland},
booktitle = {Human Haptic Perception: Basics and Applications},
state = {published},
DOI = {10.1007/978-3-7643-7612-3_18},
author = {Helbig HB{helbig}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Inbook{ 4973,
title = {Design and Evaluation of Haptic Soft Tissue Interaction},
year = {2008},
month = {8},
pages = {225-244},
abstract = {This chapter examines the application of a psychophysical evaluation technique to quantify the fidelity of haptic rendering methods. The technique is based on multidimensional scaling analysis of similarity ratings provided by users comparing pairs of haptically-presented objects. Unbeknownst to the participants, both real and virtual deformable objects were presented. In addition, virtual objects were either rendered under high fidelity condition or under lower-fidelity condition in which filtering quality was reduced. The analysis of pairwise similarity data provides quantitative confirmation that users perceived a clear difference between real and virtual objects in the lower-fidelity, but not in the higher-fidelity condition. In the latter, a single perceptual dimension, corresponding to stiffness, sufficed to explain similarity data, while two perceptual dimensions were needed in the former condition. This demonstrates how multidimensional scaling analysis can be used in the evaluation of haptic renderin
g scenarios, providing perceptual maps of real and virtual objects. It offers an opportunity to visualize and quantify the perceptual effects of changes in rendering parameters.},
file_url = {/fileadmin/user_upload/files/publications/productFlyer_978-3-540-79034-1_4973[0].pdf},
web_url = {http://www.springerlink.com/content/r5w8266m04k3/?p=187299a776f14e1e916b5ba9e2149305&pi=0},
editor = {Bicchi, A. , M. Buss, M.O. Ernst, A. Peer},
publisher = {Springer},
address = {Berlin, Germany},
series = {Springer Tracts in Advanced Robotics},
booktitle = {The Sense of Touch and its Rendering: Progresses in Haptics Research},
state = {published},
ISBN = {978-3-540-79035-8},
DOI = {10.1007/978-3-540-79035-8_11},
author = {Harders M, Leskovsky P, Cooke T{tmcooke}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Szekely G}
}
@Inbook{ 4969,
title = {Human Haptic Perception and the Design of Haptic-Enhanced Virtual Environments},
year = {2008},
month = {8},
pages = {61-106},
abstract = {This chapter presents an overview of interesting scientific findings related to human haptic perception and discuss the usability of these scientific findings for the design and development of virtual environments including haptic rendering. The first section of the chapter deals with pure haptic perception whereas the second and third sections are devoted to the integration of kinesthetic information with other sensory inputs like vision and audition.},
web_url = {http://www.springerlink.com/content/r5w8266m04k3/?p=7d9502a06cb24f06bf2f43b98a404ce4&pi=0},
editor = {Bicchi, A. , M. Buss, M.O. Ernst, A. Peer},
publisher = {Springer},
address = {Berlin, Germany},
series = {Springer Tracts in Advanced Robotics},
booktitle = {The Sense of Touch and its Rendering: Progress in Haptics Research},
state = {published},
ISBN = {978-3-540-79035-8},
DOI = {10.1007/978-3-540-79035-8_5},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Drewing K{kdrewing} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Inbook{ 4970,
title = {Multi-Modal VR Systems},
year = {2008},
month = {8},
pages = {179-206},
abstract = {This chapter presents novel multi-modal and integrated systems developed in the laboratories of the Institute of Automatic Control Engineering, Technische Universität München. First, kinesthetic, tactile, visual and acoustic hardware used for multi-modal systems are introduced individually. Then the integration of the hardware into multi-modal VR systems and chosen applications are explained. The kinesthetic-tactile integrated systems are evaluated. The objective of the evaluations has been the study of the psychophysical correlation between the tactile and the kinesthetic portion of haptic information.},
web_url = {http://www.springerlink.com/content/r5w8266m04k3/?p=8aab62ce5f38455f9c6ff5d16b696a5b&pi=0},
editor = {Bicchi, A. , M. Buss, M.O. Ernst, A. Peer},
publisher = {Springer},
address = {Berlin, Germany},
series = {Springer Tracts in Advanced Robotics},
booktitle = {The Sense of Touch and its Rendering: Progresses in Haptics Research},
state = {published},
ISBN = {978-3-540-79035-8},
DOI = {10.1007/978-3-540-79035-8_9},
author = {Fritschi M{fritschi}{Research Group Multisensory Perception and Action}, Esen H, Buss M and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Inbook{ 2918,
title = {A Bayesian view on multimodal cue integration},
year = {2005},
month = {11},
number = {Chapter 6},
pages = {105-131},
abstract = {We perceive our own body and the world surrounding us via multiple sources of sensory information derived from several modalities, including vision, touch and audition. To enable interactions with the environment this information has to converge into a coherent and unambiguous multimodal percept of the body and the world. But how does the brain come up with such a unique percept? In this chapter I review a model that in the statistical sense describes an optimal integration mechanism. The benefit of integrating sensory information comes from a reduction in variance of the final perceptual estimate. Furthermore, I point out how this integration scheme can be incorporated in a larger framework using Bayesian decision theory (BDT).},
file_url = {/fileadmin/user_upload/files/publications/Ernst_BayesIntegration_2006_2918[0].pdf},
web_url = {http://www.oup.com/us/catalog/general/subject/Psychology/Cognitive/~~/dmlldz11c2EmY2k9OTc4MDE5NTE3ODM3MQ==},
editor = {Knoblich, G. , M. Grosjean, I. Thornton, M. Shiffrar},
publisher = {Oxford University Press},
address = {Oxford, UK},
series = {Advances in Visual Cognition},
booktitle = {Human Body Perception From The Inside Out},
state = {published},
ISBN = {0-19-517837-8},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Techreport{ 1545,
title = {Viewpoint dependence in visual and haptic object recognition},
year = {2000},
month = {3},
number = {80},
abstract = {On the whole, we recognise objects best when we see
them from a familiar view and worse from views that were previously occluded from sight. Unexpectedly, we found haptic object recognition to also be viewpoint-specific, even though hand movements were unrestricted. This was due to the hands preferring the back "view" of the objects. Furthermore, when the sensory modalities (visual vs. haptic) differed between learning an object and recognising it, we found that recognition performance was best when the objects were also rotated back-to-front between learning and recognition. Our data indicate that the visual system recognises the front view of the objects best whereas the hand recognises the objects best from the back.},
file_url = {/fileadmin/user_upload/files/publications/pdf1545.pdf},
state = {published},
author = {Newell F{fiona}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Tjan BS{tjan}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ SennaPE2014,
title = {Analogous motion illusion in vision and audition},
year = {2014},
month = {6},
day = {13},
pages = {201},
abstract = {In visual perception, observers usually tend to underestimate the speed of moving objects. Given that in natural visual scenes static–or slowly moving–objects are more likely to occur, this finding has been interpreted in terms of a Bayesian prior for low velocity, supposedly encoding the statistics of natural visual scenes. Here we investigate whether an analogous tendency to underestimate velocity also occurs in audition. Given that in natural scenes objects are mostly static, if priors for velocity are modality-independent, observers should present the same bias towards slow motion also in audition. Alternatively, given that to produce sounds objects must move, if priors for velocity are modality-specific, participants should overestimate the velocity of moving sounds. In a psychophysical task, a set of loudspeakers was arranged along a circle to create a virtual auditory space in which sounds rotate around participants. On each trial, participants were presented with two consecutive moving sounds, and they had to judge which one was faster in a two-alternative forced choice task. Given that the influence of priors on perceptual estimates becomes stronger as the stimuli become noisier, we experimentally manipulated the signal-to-noise ratio of the moving auditory stimuli. Results showed that noisier stimuli appeared to move slower. This finding suggests the presence of an analogous prior for low velocity in both vision and audition. Notably, such a prior seems to operate in a modality-independent fashion by encoding the statistical properties of the world, rather than the properties of the stimuli reaching the individual senses.},
web_url = {http://uvtapp.uvt.nl/fsw/spits.ws.frmShowpage?v_page_id=3859096609314761},
event_name = {15th International Multisensory Research Forum (IMRF 2014)},
event_place = {Amsterdam, The Netherlands},
state = {published},
author = {Senna I, Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ RohdeEA2014,
title = {Sensorimotor adaptation to delayed visual feedback of the whole field of view},
year = {2014},
month = {6},
day = {13},
pages = {190},
abstract = {If humans wear prism goggles that spatially displace their visual field, they adapt their behaviour and perception with training and compensate for the visuomotor mismatch. This adaptation is of a semi-permanent nature, i.e., it persists for some time after the goggles are removed (aftereffect) and transfers to tasks that participants were not explicitly trained on. Here we tested whether this kind of adaptation is also possible in the temporal domain. Participants wore a head-mounted display with two cameras in front of the eyes that relay images in real time to the display. They were trained for one hour to perform sensorimotor tasks wearing this device (e.g., arranging blocks, walking down a corridor, playing musical instruments). In the delay condition, visual feedback from the cameras was artificially delayed by 150 ms during training. In the control condition, no additional visual lag was present. Participants improved their coordination markedly with training and reported that interaction with the world felt more coherent after training. To measure temporal aftereffects, participants were tested in three sensorimotor tasks without delay before and after training: manual tracking, rotational synchronization, and target interception. These test tasks were not used during training. We observed a transfer of delay adaptation to these tasks in the order of 50-100 ms. As expected, the aftereffects were stronger in the delay condition than in the control condition. These results show that adaptation to delayed feedback of the whole visual field is semi-permanent, with aftereffects and generalization across tasks, as in spatial prism adaptation.},
web_url = {http://uvtapp.uvt.nl/fsw/spits.ws.frmShowpage?v_page_id=3859096609314761},
event_name = {15th International Multisensory Research Forum (IMRF 2014)},
event_place = {Amsterdam, The Netherlands},
state = {published},
author = {Rohde M{marohde}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Altan G}
}
@Poster{ HorrDBE2014,
title = {The shape of things to come: temporal recalibration changes responses to unisensory stimuli},
year = {2014},
month = {6},
day = {12},
pages = {120},
abstract = {If the audio and visual stream in a movie are poorly aligned the discrepancy between the actors’ speech and lip movements are very disturbing in the beginning. However, after a short time the constant delay in one modality becomes barely noticeable. This phenomenon is termed temporal recalibration, that is, the adaptation of synchrony perception to multisensory asynchronies in the environment. Here, we investigate whether and how temporal recalibration may be achieved via adaptation of the response towards unisensory stimuli. To this aim, we exposed participants to three minutes of either -150, 0 or +150ms audiovisual asynchrony. Subsequently, we tested participants’ unisensory detection threshold, reaction time, and gap detection. We observe remarkable changes with auditory-only trials following visual-leading audiovisual exposure: increases in detection threshold (i.e., sensitivity is lower), faster reactions, and improved gap detection performance. We do not find effects of adaptation using visual stimuli. These findings indicate that asynchronous exposure leads to changes in the neural response to unisensory stimuli. We suggest that such changes are the precursors of temporal recalibration: For the delayed stimulus during exposure accumulation of sensory evidence becomes higher early after stimulus onset, which, in accordance with a negative feedback mechanism, has the cost of an overall lowering of activation compared to the stimulus leading during exposure.},
web_url = {http://uvtapp.uvt.nl/fsw/spits.ws.frmShowpage?v_page_id=1795640707648017},
event_name = {15th International Multisensory Research Forum (IMRF 2014)},
event_place = {Amsterdam, The Netherlands},
state = {published},
author = {Horr NK, Di Luca M{max}{Research Group Multisensory Perception and Action}, Barnett-Cowan M{mbc} and Ernst M{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ PariseKE2013_2,
title = {On pitch-elevation mapping: Nature, nurture and behaviour},
year = {2013},
month = {9},
pages = {77},
abstract = {The association between sound frequency and spatial elevation is a remarkable example of cross-dimensional sensory mapping. In a wide range of cognitive, perceptual, attentional, and linguistic functions, humans consistently display a positive, sometimes absolute, association between sound frequency and spatial elevation, whereby increasing frequency is mapped to increasing elevation. However, a comprehensive account for the origins of such a pervasive cross-dimensional link is still missing. Here we demonstrate that the frequency-elevation mapping observed in human behaviour is already present in both the statistics of the acoustic stimuli in the environment, and in the filtering properties of the external ear. Specifically, we singled out the effects of head- and world-centred elevation and, through a combined analysis of environmental sounds and anthropometric measures, we show that, (1) in world-centred coordinates, high sounds are statistically more likely to come from higher elevations; moreover, (2) due to the external ear, sounds coming from higher head-centred elevations have more energy at high frequencies. To link these findings to human cognition, in a psychophysical task observers localized narrow band noises with different central frequencies, while head- and world-centred elevations were put into conflict by tilting participants’ body. Sound frequency systematically biased localization in both head- and world-centred coordinates in agreement with the mappings measured in the ear and the environment. We argue that, in a shorter timescale, humans learn the statistics of the auditory signals; while, in a longer timescale, evolution might tune the filtering properties of the external ear to the statistics of the acoustic environment.},
web_url = {https://portal.g-node.org/abstracts/bc13/#/doi/nncn.bc2013.0060},
event_name = {Bernstein Conference 2013},
event_place = {Tübingen, Germany},
state = {published},
DOI = {10.12751/nncn.bc2013.0060},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Knorre K and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ PariseKE2013,
title = {On pitch-elevation mapping: Nature, nurture and behaviour},
journal = {Multisensory Research},
year = {2013},
month = {6},
day = {4},
volume = {26},
number = {1},
pages = {190},
abstract = {The association between auditory pitch and spatial elevation is one the most fascinating examples of cross-dimensional mappings: in a wide range of cognitive, perceptual, attentional and linguistic tasks, humans consistently display a positive, sometimes absolute, association between auditory pitch and spatial elevation. However, the origins of such a pervasive mapping are still largely unknown. Through a combined analysis of environmental sounds and anthropometric measures, we demonstrate that, statistically speaking, this mapping is already present in both the distal and the proximal stimulus. Specifically, in the environment, high sounds are more likely to come from above; moreover, due to the filtering properties of the external ear, sounds coming from higher elevations have more energy at high frequencies. Next, we investigated whether the internalized mapping depends on the statistics of the proximal, or of the distal stimulus. In a psychophysical task, participants had to localize narrow band-pass noises with different central frequencies, while head- and world-centred reference frames were put into conflict by tilting participants’ body orientation. The frequency of the sounds systematically biased localization in both head- and world-centred coordinates, and, remarkably, in agreement with the mappings measured in both the distal and proximal stimulus. These results clearly demonstrate that the cognitive mapping between pitch and elevation mirror the statistical properties of the auditory signals. We argue that, in a shorter time-scale, humans learn the statistical properties auditory signals; while, in a longer timescale, the evolution of the acoustic properties of the external ear itself is shaped by the statistics of the acoustic environment.},
web_url = {http://booksandjournals.brillonline.com/content/10.1163/22134808-000s0143},
event_name = {14th International Multisensory Research Forum (IMRF 2013)},
event_place = {Jerusalem, Israel},
state = {published},
DOI = {10.1163/22134808-000S0143},
author = {Parise CV{cesare}, Knorre K and Ernst MO{marc}{Department Human Perception, Cognition and Action}}
}
@Poster{ PariseH2012,
title = {When correlation implies causation in multisensory integration},
journal = {Frontiers in Computational Neuroscience},
year = {2012},
month = {9},
day = {14},
volume = {Conference Abstract: Bernstein Conference 2012},
pages = {177},
abstract = {Inferring which signals have a common underlying cause, and hence should be integrated, represents a primary challenge for a perceptual system dealing with multiple sensory inputs. This challenge is often referred to as the correspondence problem or causal inference. Previous
research has demonstrated that spatiotemporal cues, along with prior knowledge, are exploited by the human brain to solve this problem. Here we explore the role of correlation
between the fine temporal structure of auditory and visual signals in causal inference. Specifically, we investigated whether correlated signals are inferred to originate from the same distal event and hence are integrated optimally. In a localization task with visual, auditory, and combined audiovisual targets, the improvement in precision for combined relative to unimodal targets was statistically optimal only when audiovisual signals were correlated. This
result demonstrates that humans use the similarity in the temporal structure of multisensory signals to solve the correspondence problem, hence inferring causation from correlation.},
web_url = {http://www.frontiersin.org/10.3389/conf.fncom.2012.55.00063/event_abstract?sname=Bernstein_Conference_2012},
event_name = {Bernstein Conference 2012},
event_place = {München, Germany},
state = {published},
DOI = {10.3389/conf.fncom.2012.55.00063},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Spence CV and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ PariseSE2012_2,
title = {When correlation implies causation in multisensory integration},
journal = {Journal of Vision},
year = {2012},
month = {8},
volume = {12},
number = {9},
pages = {611},
abstract = {Inferring which signals have a common underlying cause, and hence should be integrated, represents a primary challenge for a perceptual system dealing with multiple sensory inputs. This challenge is often referred to as the correspondence problem or causal inference. Previous research has demonstrated that spatiotemporal cues, along with prior knowledge, are exploited by the human brain to solve this problem. Here we explore the role of correlation between the fine temporal structure of auditory and visual signals in causal inference. Specifically, we investigated whether correlated signals are inferred to originate from the same distal event and hence are integrated optimally. In a localization task with visual, auditory, and combined audiovisual targets, the improvement in precision for combined relative to unimodal targets was statistically optimal only when audiovisual signals were correlated. This result demonstrates that humans use the similarity in the temporal structure of multisensory signals to solve the correspondence problem, hence inferring causation from correlation.},
web_url = {http://www.journalofvision.org/content/12/9/611.abstract},
event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)},
event_place = {Naples, FL, USA},
state = {published},
author = {Parise C{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Spence C and Ernst M{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ PariseHSE2011,
title = {Multisensory integration: When correlation implies causation},
journal = {i-Perception},
year = {2011},
month = {10},
volume = {2},
number = {8},
pages = {901},
abstract = {Humans are equipped with multiple sensory channels, jointly providing both redundant and complementary information. A primary challenge for the brain is therefore to make sense of these multiple sources of information and bind together those signals originating from the same source while segregating them from other inputs. Whether multiple signals have a common origin or not, however, must be inferred from the signals themselves (causal inference, cf. “the correspondence problem”). Previous studies have demonstrated that spatial coincidence, temporal simultaneity, and prior knowledge are exploited to solve the correspondence problem. Here we demonstrate that cross-correlation, a measure of similarity between signals, constitutes an additional cue to solve the correspondence problem. Capitalizing on the well-known fact that sensitivity to crossmodal conflicts is inversely proportional to the strength of coupling between the signals, we measured sensitivity to crossmodal spatial conflicts as a function of the cross-correlation between audiovisual signals. Cross-correlation (time-lag 0ms) modulated observers’ performance, with lower sensitivity to crossmodal conflicts being measured for correlated than for uncorrelated audiovisual signals. The current results demonstrate that cross-correlation promotes multisensory integration. A Bayesian framework is proposed to interpret the present results whereby stimulus correlation is represented on the prior distribution of expected crossmodal co-occurrence.},
web_url = {http://imrf.mcmaster.ca/IMRF/ocs3/index.php/imrf/2011/paper/view/349},
web_url2 = {http://i-perception.perceptionweb.com/journal/I/volume/2/article/ic901},
event_name = {12th International Multisensory Research Forum (IMRF 2011)},
event_place = {Toulouse, France},
state = {published},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Harrar V, Spence C and Ernst M{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ PariseHSE2011_2,
title = {Multisensory integration: When correlation implies causation},
year = {2011},
month = {9},
web_url = {http://www.bccn-tuebingen.de/events/bernstein-symposium-series-2011/symposium-d.html},
event_name = {Bernstein Cluster D Symposium: Multisensory Perception and Action},
event_place = {Tübingen, Germany},
state = {published},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Harrar V, Spence C and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ PariseHSE2011_3,
title = {Multisensory integration: When correlation implies causation},
journal = {Perception},
year = {2011},
month = {9},
volume = {40},
number = {ECVP Abstract Supplement},
pages = {185},
abstract = {Humans are equipped with multiple sensory channels, jointly providing both redundant and complementary information. A primary challenge for the brain is therefore to make sense of these multiple sources of information and bind together those signals originating from the same source while segregating them from other inputs. Whether multiple signals have a common origin or not, however, must be inferred from the signals themselves (causal inference, cf “the correspondence problem”). Previous studies have demonstrated that spatial coincidence, temporal simultaneity, and prior knowledge are exploited to solve the correspondence problem. Here we demonstrate that crosscorrelation, a measure of similarity between signals, constitutes an additional cue to solve the correspondence problem. Capitalizing on the well-known fact that sensitivity to crossmodal conflicts is inversely proportional to the strength of coupling between the signals, we measured sensitivity to crossmodal spatial conflicts as a function of the crosscorrelation between audiovisual signals. Crosscorrelation (time-lag 0 ms) modulated observers’ performance, with lower sensitivity to crossmodal conflicts being measured for correlated than for uncorrelated audiovisual signals. The current results demonstrate that crosscorrelation promotes multisensory integration. A Bayesian framework is proposed to interpret the present results whereby stimulus correlation is represented on the prior distribution of expected crossmodal co-occurrence.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v110608},
event_name = {34th European Conference on Visual Perception},
event_place = {Toulouse, France},
state = {published},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Harrar V, Spence C and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ diLucaMBE2011,
title = {Recalibration of audiovisual simultaneity},
year = {2011},
month = {9},
web_url = {http://www.bccn-tuebingen.de/events/bernstein-symposium-series-2011/symposium-d.html},
event_name = {Bernstein Cluster D Symposium: Multisensory Perception and Action},
event_place = {Tübingen, Germany},
state = {published},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Barnett-Cowan M{mbc}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6404,
title = {"Where is the sun?": The sun is "up" in the eye of the beholder},
journal = {Perception},
year = {2010},
month = {8},
volume = {39},
number = {ECVP Abstract Supplement},
pages = {146},
abstract = {In environments where orientation is ambiguous, the visual system uses prior knowledge about lighting coming from above to recognize objects, reorient the body, and determine which way is up (where is the sun?). It has been shown that when observers are tilted to the side relative to gravity, the orientation of the light-from-above prior will change in a direction between the orientation of the body, gravity and the visual surround. The contribution of ocular torsion in this change of the light-from-above prior has been acknowledged but not specifically addressed. Here we test the hypothesis that when lighting direction is the only available visual orientation cue, change in orientation of the light-from-above prior is accounted for by ocular torsion. Observers made convex-concave judgments of a central shaded disk, flanked by three similarly- and three oppositely-shaded disks. Lighting was tested every 15° in roll in the fronto-parallel plane. Observers were tested when upright, supine, and tilted every 30
° in role relative to gravity. Our results show that change of the light-from-above prior is well predicted from a sum of two sines; one consistent with predicted ocular torsion, the other consistent with an additional component varying with twice the frequency of body tilt.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v100409},
event_name = {33rd European Conference on Visual Perception},
event_place = {Lausanne, Switzerland},
state = {published},
author = {Barnett-Cowan M{mbc}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 6498,
title = {An invisible signal can be made accessible to consciousness by training the perceptual system to use it for a novel purpose},
year = {2010},
month = {6},
volume = {4},
pages = {8-9},
abstract = {The perceptual appearance of a visual stimulus can be changed by presenting stimuli that are similar, but that differ along specific dimensions, to the observer in advance. Many negative adaptation aftereffects are familiar to students of perception, for example. A different example is “cue recruitment” (Haijiang et al., 2006): a visual signal that has no effect on some attribute of appearance can often be made to affect that attribute through the use of classical (Pavlovian) conditioning procedures. In that case, the signal has come to be treated as a new cue by the visual system, insofar as it now participates in the construction of some new aspect of appearance that it previously did not. We asked whether this learning requires that the signal be visible, i.e. whether it must have a consciously accessible perceptual consequence, of any sort, during training. To do this we employed an invisible visual signal, namely, a vertical gradient of vertical disparity obtained by slightly magnifying the image in one eye. This signal is measured by the visual system, but it had no influence on any of the perceptual attributes that observers’ visual systems computed from the displays, in which horizontal lines depicted a rotating cylinder. During training we made the eye of vertical magnification (EVM) contingent on the rotation direction of the cylinder. After training we presented an ambiguous version of the cylinder and found that EVM influenced the perceived direction of rotation consistent with contingency during training. Thus, a signal need not be visible for the adult visual system to give it new use as a participant in the construction of visual appearances. Haijiang, Q., Saunders, J. A., Stone, R. W., & Backus, B. T. (2006). Demonstration of cue recruitment: Change in visual appearance by means of Pavlovian conditioning. PNAS, 103, 483–486.},
web_url = {http://www.theassc.org/the_4th_annual_meeting_of_the_association_for_the_scientific_study_of_consciousness_assc_4},
event_name = {4th Annual Meeting of the Association for the Scientific Study of Consciousness (ASSC 4)},
event_place = {Brussels, Belgium},
state = {published},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Backus BT{backus}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6421,
title = {Change of the "light-from-above" prior when the body is tilted relative to gravity},
year = {2010},
month = {6},
volume = {11},
number = {114},
abstract = {In environments where orientation is ambiguous, the visual system uses prior knowledge about lighting coming from above to recognize objects, reorient the body, and determine which way is up (where is the sun?). It has been shown that when observers are tilted to the side relative to gravity, the orientation of the light-from-above prior will change in a direction between the orientation of the body, gravity and the visual surround. The contribution of ocular torsion in this change of the light-from-above prior has been acknowledged but not specifically addressed. Here we test the hypothesis that when lighting direction is the only available visual orientation cue, change in orientation of the light-from-above prior is accounted for by ocular torsion. Observers made convex-concave judgments of a central shaded disk, flanked by three similarly- and three oppositely-shaded disks. Lighting was tested every 15° in roll in the fronto-parallel plane. Observers were tested when upright, supine, and tilted every 30
° in role relative to gravity. Our results show that change of the light-from-above prior is well predicted from a sum of two sines; one consistent with predicted ocular torsion, the other consistent with an additional component varying with twice the frequency of body tilt.},
web_url = {http://imrf.mcmaster.ca/IMRF/ocs2/index.php/imrf/2010/paper/view/114},
event_name = {11th International Multisensory Research Forum (IMRF 2010)},
event_place = {Liverpool, UK},
state = {published},
author = {Barnett-Cowan M{mbc}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 6497,
title = {Multiple criteria for multisensory signals},
year = {2010},
month = {6},
volume = {11},
number = {374},
abstract = {Perceptual judgments are classically regarded as involving both a sensory and a decisional component. An optimal observer engaged in a signal detection task should maximize the correct responses by setting an appropriate decisional criterion according to his/her sensitivity to sensory signals. A large body of literature supports the view that observers set a near-optimal criterion in the detection of a single signal. However, there seems to be a systematic deviation from optimality when observers are required to concurrently judge multiple signals within the visual modality. In this case, observers set only a single criterion for the joint presentation of the compound signals (Gorea & Sagi 2000). This result has been interpreted as an indication that humans are unable to simultaneously handle multiple signal representations within a sensory modality. Literature, however, remains silent as to the crossmodal case. In a 2IFC task we asked participants to concurrently report whether or not visual and/or auditory stimuli changed position within each trial. Our results show that with multisensory signals observers can simultaneously set separate criteria for each modality. It is still an open question whether separate criteria are also set when multisensory signals are integrated.},
web_url = {http://imrf.mcmaster.ca/IMRF/ocs2/index.php/imrf/2010/paper/view/374},
event_name = {11th International Multisensory Research Forum (IMRF 2010)},
event_place = {Liverpool, UK},
state = {published},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6660,
title = {Nonlinear temporal distortions in vision and audition},
year = {2010},
month = {6},
volume = {11},
number = {393},
abstract = {Perceived time is not veridical but distorted and differs across the senses. Here we ask, which points in the perception of a temporal event contribute most significantly to these multisensory distortions? To this end, we investigated perceptual estimates of temporal landmarks (onset, peak amplitude, and offset) for a Gaussian standard signal (s=150ms). Particularly we were interested in how the perception of these landmarks differ across vision and audition. Participants undertook a temporal order judgment task comparing the onset, peak and offset landmarks of the standard stimuli to short spike-like stimuli (s=5ms) in vision or audition. All four combinations were tested: V-v, A-a, V-a, A-v. Results demonstrate that the visual as compared to auditory standard stimuli were perceived shorter. More interestingly, we found a compression in the perceived duration for onset-peak intervals compared to peak-offset intervals. This compression effect was more pronounced in the visual modality. Discrimination threshol
ds were worse for offset judgements compared to onset or peak judgements in both modalities. The differences in perceived duration can potentially be used to explain multisensory illusions such as the flash lag effect and perceived crossmodal asynchronies. We quantitatively explain these distortion effects using models of signal processing.},
web_url = {http://imrf.mcmaster.ca/IMRF/ocs2/index.php/imrf/2010/paper/view/393},
event_name = {11th International Multisensory Research Forum (IMRF 2010)},
event_place = {Liverpool, UK},
state = {published},
author = {Hartcher-O‘Brien J{jhartcher}{Research Group Multisensory Perception and Action}, Telgen S{telgen}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6484,
title = {We know our errors that result from noise},
year = {2010},
month = {6},
volume = {11},
number = {347},
abstract = {When performing a sensorimotor task, e.g. pointing to visual targets, we constantly make errors. Those errors can be random as a result of sensorimotor noise or they can be systematic indicating that the sensorimotor system is miscalibrated. It is generally assumed that in the absence of visual feedback we are unaware of the random pointing errors due to noise. Here we show the opposite.
Subjects performed a rapid pointing task to visual targets presented on a touch screen. They were encouraged to hit as accurately as possible by earning points when hitting close to the target. Pointing was conducted open-loop: visual feedback was prevented at the onset of the pointing movement. After the movement was completed subjects had to indicate whether they believed to have landed left or right of the target. Results show that subjects‘ left/right-discriminability was well above chance. It was still above chance when subjects were instructed to point slowly, enabling them to correct for any unintended movement error.
Together this indicates, that even though we cannot control the noise we are aware of the errors it results in. This finding has major implications for models of sensorimotor control in which noise is considered an unconscious error.},
web_url = {http://imrf.mcmaster.ca/IMRF/ocs2/index.php/imrf/2010/paper/view/347},
event_name = {11th International Multisensory Research Forum (IMRF 2010)},
event_place = {Liverpool, UK},
state = {published},
author = {van Dam LCJ{vandam}{Research Group Multisensory Perception and Action}, Pape A-A{antopia}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6490,
title = {Audiovisual integration: the duration of uncertain times},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {7},
pages = {1408},
abstract = {Despite continual temporal discrepancies between sensory inputs, signals arising from the same event are bound together into a coherent percept. It has been suggested that multiple timekeepers monitor the different sensory streams, producing differences in perceived duration of events. Given this, what is the integration strategy adopted for combining sensory information in the time domain? Specifically, if the brain has information about the duration of an event from more than one source, can the uncertainty of the duration estimate decrease, and can the Maximum Likelihood Estimate (MLE) model predict such a change? Using a 2AFC procedure, participants had to judge which interval was longer (1st or 2nd) for auditory, visual and audiovisual stimuli. Each trial contained 2 intervals: a standard stimulus (sampled from one of three durations), and a comparison interval whose duration changed randomly in relation to standard stimulus duration. The reliability of the auditory stimulus was manipulated to produce the unimodal weighting scheme. Data was fit with a cumulative Gaussian psychometric function from which PSE and JND were extracted.
Results for unimodal trials showed JND changes that depended upon the duration of the standard, according to Weber's law. JND values also decreased with decreases in signal noise. Comparison of the present bimodal results with MLE predictions revealed optimal integration of auditory and visual duration cues. Additionally the results show that the integration of uncertain visual and auditory duration signals is a weighted average of these signals. That is, PSE shifts in perceived duration tended to reflect MLE predictions with shifts following the more reliable unimodal signal. These results are the first to demonstrate ‘optimal’ integration of sensory information in the time domain and contradict other studies applying MLE to this stimulus feature.},
web_url = {http://www.journalofvision.org/content/10/7/1408.abstract},
event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/10.7.1408},
author = {Hartcher-O'Brien J{jhartcher}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6488,
title = {Does audiovisual temporal recalibration store without stimulation?},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {7},
pages = {1414},
abstract = {Recent studies have investigated adaptation to temporal discrepancies between different sensory modalities by first exposing participants to asynchronous multisensory signals, and subsequently assessing the magnitude of the adaptation effect (the size of the shift in subjective simultaneity). Although never reported, there is reason to assume that the strength of the adaptation effect declines during this measurement period. Usually, short re-exposures are interleaved with testing to prevent such declining. In the present study, we show that a decrease in the strength of adaptation still can take place, even when a common re-exposure procedure is used. In a second experiment, we investigated whether the observed decline is due to: (1) a dissipation of adaptation with the passage of time or, (2) a new adaptation induced by the test stimuli. We find that temporal adaptation does not dissipate with time but is stored until new sensory information, i.e., stimuli that differ from those used during the adaptation procedure, is presented. An alternative explanation, namely that adaptation decays over time but is re-established before the first test trial due to the experimental procedure we chose, is addressed in a control experiment. This finding is discussed in terms of Helson's adaptation level (AL) theory [1947, Adaptation-level as frame of reference for prediction of psychophysical data. The American Journal of Psychology, 60, 1–29], according to which the null point of any perceptual dimension, in our case the perception of simultaneity on the dimension of temporal order, is a summarizing statistic of all stimuli presented in the past. Any single stimulus pulls the AL toward its own value, and any single stimulus is judged as though it was being compared with the current AL.},
web_url = {http://www.journalofvision.org/content/10/7/1414},
event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/10.7.1414},
author = {Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6489,
title = {Influence of asynchrony on the perception of visual-haptic compliance},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {7},
pages = {852},
abstract = {Compliance of deformable materials is perceived through signals about resistive force and displacement. During visual-haptic interactions, visual and proprioceptive signals about material displacement are combined over time with the force signal. Here we asked whether multisensory compliance perception is affected by the timing of signals by introducing an asynchrony between the participant's movement (sensed proprioceptively) and force information or visual information. Visual-proprioceptive asynchronies are obtained by making participants see a delayed video of their haptic interaction with an object rather than the real interaction. Force-proprioceptive asynchronies are instead obtained by making participants compress a virtual object with their hand and sense the resistive force generated by a force feedback device.
Results indicate that force-proprioceptive asynchronies can significantly alter the perception of object stiffness. Moreover, we find that perceived compliance changes also as a function of the delay of visual information. These effects of asynchrony on perceived compliance would not be present if all force-displacement information would be utilized equally over time, as both delays generate a bias in compliance which is opposite in the compression and release phases of the interaction. To explain these findings we hypothesized instead that information during object compression is weighted more than information obtained during object release and that visual and proprioceptive information about the hand position are used for compliance perception depending on the relative reliability of the estimate obtained. We confirm these hypotheses by showing that sensitivity to compliance is much higher during object compression and that degradation of visual and proprioceptive information can modify the weights assigned to the two sources. Moreover, by analyzing participants' movements and feedback forces we show that the two hypothesized factors (compression-release and visual-proprioceptive reliability) can account for the change in perceived compliance due to force-proprioceptive and force-displacement asynchronies.},
web_url = {http://www.journalofvision.org/content/10/7/852.abstract},
event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/10.7.852},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Kn\"orlein B, Harders M and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6443,
title = {Mapping Shape to Visuomotor Mapping: Generalization to Novel Shapes},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {7},
pages = {1077},
abstract = {The accuracy of visually guided motor movements largely depends on the stability of the sensory environment that defines the required response mapping. Thus, as the environment keeps changing we constantly have to adapt our motor responses to stay accurate. The more sensory information we receive about the current state of the environment the more accurate we may be. Recruitment of additional cues that correlate with the environment can therefore aid in this adaptation process. It has previously been shown that subjects recruit previously irrelevant cues to help them switch between 2 specific visuomotor mappings (e.g. Martin et al., 1996; van Dam et al., 2008). However, in rapidly changing environments additional cues will only be of real benefit if it is possible to learn a more continuous correlation between the cue and required visuomotor response. Here we investigate transfer of explicitly trained cue-element/response-mapping combinations to other cue elements from the same continuous scale (a shape morph). In our experiment subjects performed a rapid pointing task to targets for which we manipulated the visuomotor mapping. During training subject simultaneously learned two mappings to two different target shapes. The target shapes were taken from a set of shape morphs (we morphed between spiky and circular shapes). After five sessions of 180 training trials, using catch trials, we tested subjects' performance on different target shape morphs that could either come from an interpolation or an extrapolation along the shape morph axis. Results show that for 7 out of the 12 subjects learning is not restricted to the trained shapes but interpolates and partially also extrapolates to other shapes along the morph axis. We conclude that participants learned implicitly the newly defined shape axis when trained with two distinct visuomotor mappings and they generalize their visuomotor mappings to this new dimension.},
web_url = {http://www.journalofvision.org/content/10/7/1077.abstract},
event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/10.7.1077},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action} and van Dam L{vandam}{Research Group Multisensory Perception and Action}}
}
@Poster{ 6483,
title = {Pre-exposure interferes with perceptual learning for ambiguous stimuli},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {7},
pages = {1140},
abstract = {The perception of a bistable stimulus is influenced by prior presentations of that stimulus. Such effects can be long lasting: e.g. position-dependent learned biases can persist for days, and reversing them requires extensive retraining (Haijiang et al., 2006). The effectiveness of training may therefore be influenced by pre-exposure to the ambiguous stimulus. Here we investigate the role of pre-exposure for learning a position dependent perceptual bias. We used rotating Necker cubes as the bistable stimuli that could be presented either above or below fixation. On training trials, additional cues (binocular disparity and occlusion) disambiguated the rotation direction for the cube. On test trials the rotating cube was presented without disambiguation cues. Subjects reported whether the front face of the cube and a moving dot moved in the same or opposite directions. Subjects received feedback about the correctness of their response. Using 350 training trials, subjects were exposed to different rotation directions for the above and below fixation locations of the cube. Following a 5-minute break a post-test (80 test trials) was performed. Separate subjects either directly started with the training, or were pre-exposed to the ambiguous stimulus in a pre-test (80 test trials). Subjects starting the training immediately, on average perceived the cube to be rotating in the trained direction for both locations on 83% of the post-test trials, replicating previous results. However, for the pre-exposed subjects, consistency with the trained percept-location contingency was only 58% in the post-test. In control conditions we simulated the pre-test using disambiguated trials and initially presented subjects with the reversed contingency than that which they would subsequently be exposed to during training. Post-test consistency with the trained contingency was 78%. This shows that the pre-exposure interference does not necessarily depend on the initial perceptual history, suggesting a fundamental difference between test and training trials.},
web_url = {http://www.journalofvision.org/content/10/7/1140.short},
event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/10.7.1140},
author = {van Dam LCJ{vandam}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Backus BT{backus}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5763,
title = {Learning times do not alter adaptation rates in rapid reaching tasks},
journal = {Journal of Vision},
year = {2009},
month = {8},
volume = {9},
number = {8},
pages = {1154},
abstract = {Humans recalibrate the mapping between their visual and motor systems when they perceive systematic changes in the environment. Two main factors influence the rate of this recalibration, the extent to which the current mapping is reliable (mapping uncertainty) and the extent to which the visual feedback is reliable (feedback uncertainty). As an optimal adaptor, the Kalman filter takes these factors into account and hence, may be best suited to model such a system. This model makes different predictions depending on the nature of the feedback noise. For correlated noise (Random walk), the mapping randomly shifts with every trial, which should increase the mapping uncertainty and therefore increase the adaptation rate. On the other hand, uncorrelated noise in the feedback (Gaussian noise around a constant mapping) should increase feedback uncertainty and therefore decrease the adaptation rate. To test these predictions for the visuo-motor system, we used a rapid pointing task similar to the one used by Burge, Ernst & Banks (2008). We also systematically varied the trial times over which subjects could learn the statistics of the environment. We replicated the result for random walks and found that adaptation was indeed faster. The expected decrease in the adaptation rate for the uncorrelated Gaussian noise, however, could not be found. Surprisingly, we did not find any significant effect of prolonged learning time on the adaptation rate in either noise environment. Hence, it appears that the temporal window for the estimation of the statistics underlying the visuomotor mapping is relatively small. The results further indicate, that the assumption the Kalman filter makes about the stationary statistics of its measurement and system noise may not be accurate. Hence, in future work it might be useful to alter the model in order to account for dynamic statistics of the measurement and internal noises.},
web_url = {http://www.journalofvision.org/9/8/1154/},
event_name = {9th Annual Meeting of the Vision Sciences Society (VSS 2009)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/9.8.1154},
author = {Narain D{dnarain}, van Dam L{vandam}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5759,
title = {Recruitment of an invisible depth cue},
journal = {Journal of Vision},
year = {2009},
month = {8},
volume = {9},
number = {8},
pages = {34},
abstract = {Cue recruitment occurs when a sensory signal is put into correlation with trusted cues and subsequently influences perceptual interpretation as the trusted cues do. In all cue recruitment experiments to date, the signal has been well above detection threshold and was easily visible. For example, it has been shown that object position and motion can be recruited as a cue to influence the interpretation of the ambiguous Necker Cube (Haijiang et al., 2006). Here we asked whether a signal that is not visible on its own could be recruited as a cue. Vertical size ratio (VSR, the ratio of vertical angles subtended by an object at the two eyes) is normally used to correct for relative position of the head when interpreting horizontal disparity (Gillam & Lawergren, 1983; Backus et al 1999) but it is not visible in displays consisting of horizontal lines only because there are no horizontal discontinuities. We manipulated VSR of displays simulating a cylinder composed of horizontal lines that rotated about a horizontal axis. On training trials, the rotation direction of the cylinder was unambiguously specified by horizontal disparity and occlusion cues and these trusted cues were correlated with the VSR cue to be recruited. On test trials, the display did not contain horizontal disparity or occlusion, so that the rotation direction specified by the trusted cues was ambiguous. If participants however made use of the VSR cue in the test display rotation direction could become unambiguous after training. For 8 out of 9 participants, apparent rotation on test trials became contingent on the value of VSR. We conclude that a signal need not have perceptual consequences by itself for the system to assign it a new use during the construction of appearances.},
web_url = {http://journalofvision.org/9/8/34/},
event_name = {9th Annual Meeting of the Vision Sciences Society (VSS 2009)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/9.8.34},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Research Group Multisensory Perception and Action} and Backus B{backus}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5911,
title = {Short-term dynamics of perceptual bias for bistable stimuli},
journal = {Journal of Vision},
year = {2009},
month = {8},
volume = {9},
number = {8},
pages = {41},
abstract = {The perception of a bistable stimulus is influenced by prior presentations of that stimulus. This influence has been characterized as priming, and more recently as a memory process that biases perception on independent multiple timescales on the order of seconds to minutes (e.g. Brascamp et al., 2008; Pastukhov & Braun, 2008). Cue recruitment studies show that learned bias can last for days, and that reversing it requires extensive retraining (Haijiang et al., 2006). These very-long-term biases are measured using trial sequences of disambiguated and ambiguous stimuli, so it is important to understand how recent stimulus presentations interact with such biases on a given trial.
We investigated short-term temporal influence using rotating Necker cubes. Observers viewed a randomly permuted sequence of 480 trials, each consisting of a 2-sec movie depicting a rotating cube just above fixation. The sequence included 320 “Training” trials (rotation direction disambiguated by stereo and occlusion cues) randomly interleaved with 160 “Test” trials (no disambiguating cues). Observers reported whether a dot moving right-to-left or left-to-right through fixation on each trial moved in the same direction as the front or the back of the cube.
Reported rotation on Test trials correlated positively with reported rotation for both recent Test trials and recent Training trials. Correlations were highest with the immediately preceding trial, diminishing for earlier trials. This temporal pattern of correlations indicates an active influence from recent trials. The short-term influence of the Test trials was at least as great as that of the Training trials. It therefore seems likely that a perceptual decision itself can affect perceptual decisions on subsequent trials. Some observers developed a bias to perceive all Test trials in one direction, suggesting an interaction of recent history with an internal state variable that governs a long-term perceptual bias.},
web_url = {http://www.journalofvision.org/9/8/41/},
event_name = {9th Annual Meeting of the Vision Sciences Society (VSS 2009)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/9.8.41},
author = {Fuller S, Backus BT{backus}{Research Group Multisensory Perception and Action}, van Dam LCJ{vandam}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5770,
title = {The time course of proprioceptive drift in the rubber hand illusion},
journal = {Journal of Vision},
year = {2009},
month = {8},
volume = {9},
number = {8},
pages = {712},
abstract = {In the well-known rubber hand illusion (RHI, Botvinick & Cohen, Nature, 1998), synchronous tactile stimulation of a subject's invisible hand and a visible rubber hand (placed at a fixed lateral distance) induces a drift of perceived location of the real hand towards the rubber hand. The purpose of this study was to investigate the spatio-temporal characteristics of this proprioceptive drift.
To this end, we measured the perceived position of the participants' index finger in relation to a small visible probe using an adaptive two-staircase method and a forced-choice task. We determined perceived finger location by fitting a psychometric function to the responses. Prior to introducing the rubber hand at a distance of 17cm from the real hand, we determined the perceived finger location in darkness. The time course of the RHI was then determined in three phases: pre-test while only looking at the rubber hand, prolonged synchronous tactile stimulation, and post-test again without touch but with visible rubber hand. The synchronous stimulation was fully controlled using two PHANToM force-feedback devices.
The perceived finger location immediately shifted 1.4cm towards the rubber hand when in view. This shift rose to an average of 6.3cm after 8min of tactile stimulation. The distribution of responses indicates that the proprioceptive drift is truly gradual. As previous work suggests (cf.Holmes et al., P&P, 2006; Tsakiris & Haggard, JEP, 2005), these findings show that the RHI involves both immediate effects that result from multisensory integration as well as gradual recalibration effects with a time constant of several minutes. After 5mins of post-test, a drift of 4.9cm remained, showing that recalibration produces an after-effect with respect to both the baseline measure recorded in darkness and the pre-test. We will further investigate the determinants of the gradual build up and decay of the drift.},
web_url = {http://journalofvision.org/9/8/712/},
event_name = {9th Annual Meeting of the Vision Sciences Society (VSS 2009)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/9.8.712},
author = {Rohde M{marohde}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5957,
title = {Walking changes perceived visual speed of both expanding and contracting optic flow fields},
journal = {10 International Multisensory Research Forum (IMRF 2009)},
year = {2009},
month = {7},
volume = {10},
pages = {288},
web_url = {http://imrf.mcmaster.ca/IMRF/2009/pdf/program_IMRF_2009.pdf},
state = {published},
author = {Souman JL{souman}{Research Group Multisensory Perception and Action}, Eikmeier V{eikmeier}{Research Group Multisensory Perception and Action}, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Freeman TCA{freemant}}
}
@Poster{ 5697,
title = {Timing cue reliability},
year = {2008},
month = {10},
abstract = {Multiple cues are integrated by weighting individual cue estimates according to their reliabilities. This has been shown repeatedly under static, prolonged viewing conditions, but in real-world situations viewing conditions may change and the reliability of cues can vary over time. Here we ask whether the perceptual system instantaneously detects reliability changes and adapts the cue weights accordingly.
Subjects binocularly viewed a spinning surface slanted in depth. In the display there was a slight discrepancy between the slant specified by the stereoscopic cue and the one specified by motion. We modulated the reliability of the motion cue by changing the speed of rotation. Increasing the speed of rotation increased the reliability of the motion cue and consequently biased the perceived slant towards the slant defined by the motion cue. This manipulation, however, did not affect the magnitude of slant specified by the motion cue. By modulating the rotation speed at different frequencies the surface was perceived to oscillate in depth.
Perceived oscillation amplitude decreased with higher modulation frequency and phase shift between rotation modulation and perceived oscillation increased. As a control, we simulated an actual oscillation of the surface and we did not obtain such response pattern. This result indicates that quick changes of reliability, defined by changes in rotation speed, have smaller perceptual consequences. Cue weights do not change instantaneously, likely because there is latency in updating the reliability of the motion cue when the spinning surface changes its speed of rotation},
file_url = {fileadmin/user_upload/files/publications/DiLucaKnillErnst.pdf},
web_url = {http://www.allpsych.uni-giessen.de/compws/},
event_name = {Workshop "Cue combination - Unifying Perceptual Theory"},
event_place = {Rauischholzhausen, Germany},
state = {published},
digital = {1},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Knill D and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ ZiatHCEL2008,
title = {Revisiting the fingertip-fovea analogy},
journal = {Perception},
year = {2008},
month = {8},
volume = {37},
number = {ECVP Abstract Supplement},
pages = {50},
abstract = {Foveated perception refers to the ability to cyclically attend to small samples of the world at high resolution. Yet, the conscious experience is that of a stable and uniform world. 'Foveated touch' could be similar to foveated vision. When scanning a textured surface with several fingers, one feels a single surface, not several; in vision, a textured surface looks uniform, not like a collection of patches. Vision can give rise to visual suppression of image displacement, or failure to detect absolute displacement of a feature. We investigated what could be called tactile suppression of stimulus displacement. We designed an experiment where subjects scanned a single Braille dot with two fingers, but the stimulus changed absolute location during the short instant when the contact left one finger and shifted to the other. There were instances when the perceptual system did not accurately remap the stimulus during the transition from one finger to the other and failed to detect the change, thereby making the world appear more stable than it really was.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v080175},
event_name = {31st European Conference on Visual Perception},
event_place = {Utrecht, Netherlands},
state = {published},
author = {Ziat M, Hayward V, Chapman CE, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Lenay C}
}
@Poster{ 5322,
title = {Switching between visuomotor mappings: learning absolute mappings or relative shifts?},
journal = {Perception},
year = {2008},
month = {8},
volume = {37},
number = {ECVP Abstract Supplement,},
pages = {109},
abstract = {Adaptation to specific visuomotor conflicts becomes faster with repetition. What is learned when repeatedly switching between different visuomotor mappings: the absolute mappings or the relative shift between the mappings? To test this, we trained participants in a rapid pointing task using a unique colour cue for each mapping between pointing location and visual feedback. After extensive training, participants adapted to a new mapping using a neutral cue. For catch trials (a change in cue and no visual feedback) different adaptation performances are predicted depending on how the mappings are encoded. When encoding an absolute mapping for each cue, participants would fall back to the mapping associated with the cue irrespective of the mapping prior to the cue-switch. In contrast, when for the cue-switch a shift in mapping is encoded, pointing performance will shift relative to the mapping prior cue-switch by an amount equal to the difference between the previously learned mappings. Results indicate that the
cues signal absolute visuomotor mappings rather than relative shifts between mappings.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v080387},
event_name = {31st European Conference on Visual Perception},
event_place = {Utrecht, Netherlands},
state = {published},
author = {van Dam LCJ{vandam}{Research Group Multisensory Perception and Action}, Hawellek DJ{dhawid}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ SoumanFSE2007,
title = {Walking in circles: the role of visual information in navigation},
journal = {Perception},
year = {2008},
month = {8},
volume = {37},
number = {ECVP Abstract Supplement},
pages = {41},
abstract = {Common myth has it that people who get lost in unfamiliar terrain end up walking in circles. We tested whether this is true and what role visual information plays. Participants walked for several hours under various conditions of visual information. Their task was to walk as straight as possible in the direction indicated at the beginning of the experiment. GPS was used to register their walking paths. Participants often walked in circles when blindfolded, although only few exhibited a consistent bias in one direction. In a forest, with ample visual information at short distance but few distant landmarks, participants walked in circles with overcast sky. However, with sunny weather they walked perfectly straight. In the Sahara desert, finally, participants only walked in circles during the night when the moon was not visible, but not when either moon or sun was visible. The results suggest that visual information is critical for walking straight. Furthermore, the mere availability of optic flow is not sufficient; participants needed distant landmarks to walk straight.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v080146},
event_name = {31st European Conference on Visual Perception},
event_place = {Utrecht, Netherlands},
state = {published},
author = {Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Sreenivasa MN{manu}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5477,
title = {Multisensory integration of non-visual sensory information for the perceptual estimation of walking speed},
year = {2008},
month = {7},
volume = {9},
number = {181},
pages = {266},
abstract = {A variety of sources of sensory information (e.g., visual, inertial and proprioceptive) are available for the estimation of walking speed. However, little is known about how they are integrated. We present a series of experiments, using a 2-IFC walking speed judgment task, investigating the relative contributions of the inertial and proprioceptive information. We used a circular treadmill equipped with a motorized handlebar, to manipulate inertial and proprioceptive inputs independently. In one experiment we directly compared walking-in-place (WIP) and walking-through-space (WTS). We found that WIP is perceived as slower than WTS. The WIP condition creates a special conflict situation because the proprioceptive cue indicates motion whereas the inertial cue indicates an absence of motion through space. In another experiment we presented a range of conflicts by combining a single proprioceptive input with different inertial inputs. We found that the inertial input is weighted more heavily when it indicates a faster walking speed than proprioception. Conversely, it receives less weight if it indicates a lower speed. This suggests that the inertial cue becomes more reliable with increasing velocity. Our findings show a more important role for inertial information in the perception of walking speed than has previously been suggested in the literature.},
web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf},
event_name = {9th International Multisensory Research Forum (IMRF 2008)},
event_place = {Hamburg, Germany},
state = {published},
author = {Frissen I{ifrissen}{Department Human Perception, Cognition and Action}, Souman JL{souman}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}}
}
@Poster{ 5024,
title = {Amodal multimodal integration},
journal = {Journal of Vision},
year = {2008},
month = {6},
volume = {8},
number = {6},
pages = {526},
abstract = {Recently it has been shown that congruent visual and haptic signals are integrated in a statistically optimal fashion. Spatial separation between the signals can preclude this integration. Here we investigate whether optimal integration occurs between an amodally completed visual stimulus and its haptic counterpart. Thus, we ask whether integration occurs despite the sensory information not being derived from the same spatial location. This may indicate that subjects inferred that the visually specified parts of the stimulus and the haptic information have a common cause and thus should be integrated.
The visual stimulus was a disparity-defined bar that was partially occluded (amodal completion condition). The bar could also be touched behind the occluder using two fingers. Subjects' task was to discriminate the size of two successively presented bars using a 2-IFC paradigm, where one interval contained conflicting haptic and visual information. Performance in the amodal completion condition was not different from a condition in which the occluder was removed (visual-haptic condition). Both conditions were consistent with an optimal integration strategy.
More interestingly, integration deviated from optimality when we introduced a slight modification to the visual stimulus - small gaps between the bar and the occluder (gap condition). This manipulation interfered with the amodal completion process and consequently subjects relied almost completely on the haptic information for discriminating the size of the bars.
These findings suggest that visual and haptic information can be combined optimally even when visual information is not directly specified by sensory information, but results from amodal completion. In conclusion, it seems that the perceptual system determines when to combine visual and haptic information based on the likelihood the signals have of belonging to the same object (i.e. if there is a causal relationship between the signals) and not only on signal co-location.},
web_url = {http://www.journalofvision.org/content/8/6/526},
event_name = {8th Annual Meeting of the Vision Sciences Society (VSS 2008)},
event_place = {Naples, FL, USA},
state = {published},
digital = {1},
DOI = {10.1167/8.6.526},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Adams W}
}
@Poster{ 5172,
title = {Humans use stereo and haptic distance cues to improve physical object size estimates},
journal = {Journal of Vision},
year = {2008},
month = {6},
volume = {8},
number = {6},
pages = {1090},
abstract = {An object's visual image size is an ambiguous cue to its physical size. But if the object's distance is known, the physical size may be disambiguated and more accurately estimated. We asked whether humans use distance cues to improve size judgments. We presented participants with a virtual ball that changed in physical size (imagine a balloon inflating or deflating) as well as distance simultaneously, and asked them to discriminate whether the physical size increased or decreased. With only visual image size information, size-change discrimination was poor. When additional haptic and/or stereo distance-change cues were provided, size-change judgments improved significantly. We conclude that the brain exploits its knowledge of how image size, physical size, and distance are related to improve perceptual size judgments. We compared participants' use of distance cues with predictions of an ideal observer that incorporates distance cues in proportion to their reliability to quantify human behavior. We independently measured participants' stereo and haptic distance discrimination performance, applied these empirical reliability measurements in the ideal model, and found participants use stereo information to a similar degree as the ideal observer, but use haptic information less than the ideal observer. This result was confirmed by an additional conflict condition in which haptic and stereo distance-change cues indicated different values and their relative use could be measured. Lastly, we ran a condition in which participants gripped the object with two fingers, so that a direct size-change cue was available, and found participants integrated direct and indirect size-change cues to improve performance.},
web_url = {http://www.journalofvision.org/8/6/1090/},
event_name = {8th Annual Meeting of the Vision Sciences Society (VSS 2008)},
event_place = {Naples, FL, USA},
state = {published},
digital = {1},
DOI = {10.1167/8.6.1090},
author = {Battaglia P{batt0086}{Research Group Multisensory Perception and Action}, Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Schrater P, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Kersten D{kersten}{Department Human Perception, Cognition and Action}}
}
@Poster{ 5149,
title = {The effect of walking on perceived visual speed depends on visual speed},
journal = {Journal of Vision},
year = {2008},
month = {6},
volume = {8},
number = {6},
pages = {1146},
abstract = {Perceived visual speed has been reported to be reduced during walking compared to standing still. This so-called ‘subtraction effect’ has been attributed to an automatic subtraction of part of the walking speed from the visual speed (Durgin et al., 2005). We investigated how general this subtraction effect is, by varying both visual speed and walking speed in a series of experiments. Observers judged the visual speed of a simulated ground plane (presented through a HMD) in a 2IFC task. In one interval, they walked in place on a treadmill, in the other they stood still. In different experiments, the interval with the visual standard speed, the order of the intervals, and the walking speed were varied. In all experiments, observers consistently reported the perceived visual speed for the lowest standard speed to be lower during walking than during standing still. However, most observers also perceived the highest standard speed as faster during walking than during standing still, which is clearly incompatible with the subtraction effect. We tested the apparent interaction between visual speed and walking in another experiment by presenting the exact same visual speed in the two intervals and asking the observers again to judge which of the two appeared to be faster. As in the previous experiments, the visual speed was reported to be faster during standing for slow visual speeds; this gradually changed into the opposite for faster visual speeds. Taken together, the results question the generality of the subtraction effect and raise doubts regarding the hypothesized functional role of this effect.},
web_url = {http://journalofvision.org/8/6/1146/},
event_name = {8th Annual Meeting of the Vision Sciences Society (VSS 2008)},
event_place = {Naples, FL, USA},
state = {published},
DOI = {10.1167/8.6.1146},
author = {Souman JL{souman}{Department Human Perception, Cognition and Action}, Frissen I{ifrissen}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}}
}
@Poster{ 4963,
title = {Perceived visual speed while walking: more than subtraction},
year = {2007},
month = {10},
pages = {61-62},
web_url = {http://www.esf.org/conferences/07226},
event_name = {ESF-EMBO Symposium on Three Dimensional Sensory and Motor Space: Perceptual Consequences of Motor Action},
event_place = {Sant Feliu de Guixols, Spain},
state = {published},
author = {Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5034,
title = {Altering cue reliability changes visual: auditory perception within seconds},
journal = {Perception},
year = {2007},
month = {8},
volume = {36},
number = {ECVP Abstract Supplement},
pages = {172},
abstract = {Multimodal (visual & auditory) information is combined by weighting unimodal signals by their relative reliabilities. Reliability in real-world stimuli, however, does not remain constant. Here we ask whether the system adjusts the weights online in the McGurk effect. Subjects were presented with an audiovisual recording of an actor‘s face producing a series of syllables. Such syllables were composed either by consistent multimodal information, or by auditory /ba/ and lip movement of /ga/. Subjects‘ task was to continuously report the perceived syllable. We varied the reliability of the visual information by changing the visibility of the face. With inconsistent multimodal information, increased reliability of the visual signal biased perception towards the illusory /da/
percept whereas decreased reliability biased perception towards the auditory /ba/. However, changes in reliability had perceptual consequences only after 3 - 4 s. Therefore, the reliability estimate of the sensory signals is not instantaneous but continuously updated with a time-constant of a few seconds.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v070511},
event_name = {30th European Conference on Visual Perception},
event_place = {Arezzo, Italy},
state = {published},
digital = {1},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 5035,
title = {Perceived visual speed while walking: Adding to subtraction},
journal = {Perception},
year = {2007},
month = {8},
volume = {36},
number = {ECVP Abstract Supplement},
pages = {169},
abstract = {Perceived visual speed has been reported to be reduced during walking compared to standing
still. This effect has been attributed to an automatic subtraction of part of the walking speed
from the visual speed (Durgin et al, 2005 Journal of Experimental Psychology: Human Perception
and Performance 31 339 ^ 353). Here, we show that both the magnitude and the direction of this
`reduction‘ depend on visual speed. Observers compared visual speed of a simulated ground plane
(presented through an HMD) while standing and walking (1.1 m sÿ1). PSEs, estimated for three
standard speeds during walking (1.0, 2.0, 3.0 m sÿ1 simulated speed), increased approximately
linearly with the standard speed, with a slope 4 1. For the lowest standard speed, the PSEs were
lower than the standard speed, whereas they were higher for the highest standard speed. The latter
is clearly incompatible with an automatic subtraction effect. The results suggest that, contrary to
what Durgin et al (2005) claim, the effect of walking on perceived visual speed is not independent
of the visual speed and raise questions regarding the functional role of the subtraction effect.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v070512},
event_name = {30th European Conference on Visual Perception},
event_place = {Arezzo, Italy},
state = {published},
author = {Souman JL{souman}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4522,
title = {Perception of crossmodal simultaneity is not transitive},
journal = {Perception},
year = {2007},
month = {8},
volume = {36},
number = {ECVP Abstract Supplement},
pages = {209},
abstract = {When signals in different modalities are physically asynchronous, observers may still perceive them as being simultaneous due to differential physical transmission and physiological conduction delays.
If sensory signals in different modalities are processed independently of each other as assumed by independent-channels models, then the relative timings that lead to perceived simultaneity should be transitive across several modality pairs. For instance, if modality A has to be presented 20 ms before modality B to seem simultaneous with it and modality B 10 ms before modality C, then A should be presented 30 ms before C to seem simultaneous with it.
Using temporal order judgments we measured the point of subjective simultaneity (PSS) in three different modality pairs (visual-auditory, tactile-auditory, visual-tactile). Our results indicate that PSS are not transitive. Thus, we infer that signals are not processed independently from each other. Perceived signal timing in one modality depends on which other modality it is paired with. Therefore, independent-channels models cannot account for processes underlying decisions about simultaneity of signals in different modalities.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v070526},
event_name = {30th European Conference on Visual Perception},
event_place = {Arezzo, Italy},
state = {published},
author = {Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ ConradVE2007,
title = {Spatio-temporal grouping in perceptual rivalry},
journal = {Perception},
year = {2007},
month = {8},
volume = {36},
number = {ECVP Abstract Supplement},
pages = {87},
abstract = {Perceptual rivalry occurs when stimuli have multiple interpretations which are equally probable. For example, two distributions of dots, one translating leftward and one rightward can be perceived as a 3-D cylinder rotating clockwise or counterclockwise. Repetitive presentation of an ambiguous stimulus can stabilize one perceptual interpretation. Here we examined how unambiguous spatio-temporal contexts affect stabilization of ambiguous structure-from-motion stimuli. Using an intermittent-presentation paradigm we stabilized one interpretation of the ambiguous cylinder and introduced contextual information by providing an unambiguous version of the 3-D cylinder. We manipulated spatial distance and temporal proximity between ambiguous stimulus and unambiguous context. The task was to report perceived rotation direction of the ambiguous cylinder. We found that stabilization was more likely to be disrupted by unambiguous context that had appeared in corresponding locations in preceding frames. Context simultaneously presented with the stimulus at a different spatial location had little effect. This shows that temporal contexts were weighted more than spatial contexts, and suggests that the visual system analyses recent perceptual history to interpret the present input.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v070492},
event_name = {30th European Conference on Visual Perception},
event_place = {Arezzo, Italy},
state = {published},
author = {Conrad V{conrad}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Vuong QC{qvuong}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4519,
title = {Temporal adaptation influences non-adapted modality pairs},
journal = {Perception},
year = {2007},
month = {8},
volume = {36},
number = {ECVP Abstract Supplement},
pages = {207},
abstract = {Repeated presentations of asynchronous audiovisual signals recalibrate the point of subjective simultaneity. It is not clear whether this effect is the result of an adaptation mechanism specific to the audiovisual modality pair or whether it is due to a mechanism common to all modalities. Only in the latter case we expect that repeated asynchronous audiovisual information influences perceived simultaneity in other modality pairs (audiotactile or visuotactile).
We presented a series of asynchronous audiovisual signals to the participants (SOA: 200ms, -200ms) and then estimated the point of subjective simultaneity for three modality pairs (audiovisual, audiotactile, visuotactile).
Consistent with previous research, perceived simultaneity in the audiovisual modality pair changed for the two SOAs. Subjective simultaneity shifted also in the audiotactile modality pair. Hence, we conclude that the three tested modalities share a common adaptation mechanism. Moreover, since the visuotactile modality pair was not significantly affected by the manipulation, audiovisual adaptation is likely the result of a phenomenal shift of the auditory events in time.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v070528},
event_name = {30th European Conference on Visual Perception},
event_place = {Arezzo, Italy},
state = {published},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4794,
title = {Auditory and visual stimuli alter tactile motion perception},
year = {2007},
month = {7},
volume = {10},
pages = {100},
abstract = {Recently Sekuler et al. [1] investigated the motion-bounce illusion and found that sound can
influence ambiguous visual motion perception. Here we investigated the motion-bounce illusion
in the tactile-auditory and the tactile-visual domain, respectively. Using a vibro-tactile
belt consisting of 7 vibrators we generated tactile apparent motion stimuli by sequentially activating
neighboring motors with an onset of 200ms between the motors. Starting at the left
and the right hip, two tactile motion stimuli run towards each other. On the body midline an
ambiguous event was perceived: either the transition of both stimuli with continuing motion
trajectories or a bounce event followed by the reversal of the movement direction. Presenting
just the tactile motion stimulus resulted in an ambiguous percept. In the tactile-auditory
condition the presentation of an auditory beep 200ms before the collision was sufficient to disambiguate
the percept such that a significantly higher proportion of bounces was reported. The
presentation of the sound at the time of the tactile collision event or 400ms before reduced the
proportion bounces reported and became indistinguishable from the uni-modal baseline condition.
In the tactile-visual condition the presentation of a flashlight at the time of the tactile
collision, 200ms or 400ms before the collision induced a higher proportion of bounce reports.
This indicates that the temporal window of audio-tactile integration is comparable to that found
in the visual-auditory domain. These results suggest that similar supramodal mechanisms exist
for apparent motion perception.},
file_url = {/fileadmin/user_upload/files/publications/TWK_2007_Abstract_[0].pdf},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=vitello01},
event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)},
event_place = {Tübingen, Germany},
state = {published},
author = {Vitello M{vitello}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4521,
title = {Crossmodal simultaneity is not transitive},
year = {2007},
month = {7},
volume = {10},
pages = {85},
abstract = {Sensory processing times can greatly differ between the senses. Hence, signals from different modalities that are presented with a delay corresponding to the processing time difference between these modalities appear simultaneous to the observer.
We hypothesized that if processing is independent for each modality and if there is a common mechanism for the perception of simultaneity across modalities then subjective simultaneity should be transitive. For example, if modality A has to be presented 20ms before B and modality B 10ms before C to be perceived as synchronous, then modality A should be presented 30ms before C to seem synchronous with it.
Observers judged the temporal order of three different modality pairs (visual-tactile, tactile-auditory, and visual-auditory) for eleven stimulus onset asynchronies. Stimuli from the three conditions were not blocked but presented randomly to prevent attentional prior-entry effects that might lead to artifactual intransitivity.
From the responses, we determined the presentation delay leading to subjective simultaneity. To appear synchronous the visual signal has to be presented 34ms before the tactile, the tactile 55ms before the auditory, and the visual 28ms before the auditory. These results deviate significantly from transitivity. We conclude that either stimulus processing time in one modality depends on which other modality it is paired with, or the notion of a common mechanism for crossmodal simultaneity has to be rejected.},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=machulla01},
event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)},
event_place = {Tübingen, Germany},
state = {published},
digital = {1},
author = {Machulla TK{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4568,
title = {Head-Trunk Relation Before and During a Turn: the Effect of Turn Angle},
year = {2007},
month = {7},
volume = {10},
pages = {79},
abstract = {During walking the behavior of the head and trunk are closely coupled. This becomes particularly
clear while taking a turn. Here, we investigate this coupling during two phases of turning,
before and during. Before a turn people make anticipatory orientations of the head into the
direction of the turn. Previous research suggests that this anticipation occurs at a constant
distance before the curve for different walking speeds. However, in most studies participants
only performed 90 turns. We tested whether anticipation distance is invariant across different
turn angles. As the turn progresses the head continues to look further into the turn than the
trunk, and slowly converging towards the end of turn. An additional question here is the dependence
of relative yaw between head and trunk on the turn angle. To answer these questions
we measured head-trunk angles across a range of different turn angles. Participants followed
predefined paths around obstacles with the radius of turn indicated by circles drawn on the
floor. Turning angles ranged from 45 to 180 in steps of 45. The position and orientation of
both the head and trunk were measured using an optical tracking system. Two parameters were
calculated from the data: head anticipation and maximum relative yaw. Head anticipation is the
distance in space where the head starts to look into the upcoming turn. Maximum relative yaw
is the maximum difference occurring between the yaw angle of the head and the trunk during
a turn. Both head anticipation and maximum relative yaw increased with turn angle, although
maximum relative yaw leveled off after 135. In a second experiment, participants followed
the same paths as in Experiment 1, but were not constrained in the turn radius. Results showed
that turn radius decreased with increasing turn angle. Nevertheless, we found the same pattern
of results as in Experiment 1. In conclusion, the relation between head and trunk both before
and during a turn is dependent on the angle of turn one is about to make.},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=sreenivasa01},
event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)},
event_place = {Tübingen, Germany},
state = {published},
author = {Sreenivasa M{manu}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4608,
title = {Optimal integration of spatiotemporal information across vision and touch},
year = {2007},
month = {7},
volume = {8},
number = {64},
abstract = {The brain integrates spatial (e.g., size, location) as well as temporal (e.g., event perception) information across different sensory modalities (e.g., Ernst & Banks, 2002; Bresciani et al., 2006) in a statistically optimal manner to come up with the most reliable percept. That is, the variance (just-noticeable difference, JND) of the multisensory perceptual estimate is maximally reduced. Here we asked whether this holds also for spatiotemporal information encoded by different sensory modalities.
To study this question, we visually presented observers with a dot moving along a line. In the haptic condition, observers finger was passively moved along the line using a robot device. Observers had to discriminate the length of two lines presented in a 2-IFC task either visually alone, haptically alone or bimodally. To judge the length of a line, spatial information (position of the moving dot or finger) had to be accumulated in time.
The bimodal discrimination performance (JND) was significantly improved relative to the performance in the uni-modal tasks and did not differ from the predictions of an optimal integration model. This result indicates that observers adopt an optimal integration strategy to integrate spatial information accumulated in time.},
web_url = {http://imrf.mcmaster.ca/IMRF/2007/viewabstract.php?id=64&symposium=0},
event_name = {8th International Multisensory Research Forum (IMRF 2007)},
event_place = {Sydney, Australia},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4563,
title = {Perceived duration in crossmodally defined intervals},
year = {2007},
month = {7},
volume = {10},
pages = {68},
abstract = {Sensory modalities do not act in isolation; they can influence one another. One intriguing example
is temporal ventriloquism, where auditory and visual signals presented asynchronously
appear to be closer in time than they are generated. The characteristics of this perceptual phenomenon
are still not completely defined. In particular, it is not clear whether the order of presentation
has an influence on the effect, whether there are temporal grouping effects for other
modalities, and whether this effect only occurs for certain asynchronies of the signals. This
study aims to fill this lack of knowledge by investigating how crossmodal temporal grouping
affects perceived duration when onset and offset of a stimulus are defined by signals of different
modalities in all combinations of auditory, visual, and tactile stimuli. To test this, a two
interval forced choice (2IFC) paradigm was used, where participants had to judge which of two
intervals was shorter. One interval consisted of a lasting sound (filled interval), the other interval
was defined by two signals of different modalities at the onset and offset (empty interval).
These two signals could be auditory (beep), visual (flash of a LED) or tactile (vibration on the
participants’ left index finger). Different stimulus onset asynchronies (SOA) between 100 and
900ms of the crossmodal interval were tested. Moreover, the order of the signals forming the
empty interval was reversed. The duration of the filled interval was 30 to 170% of the duration
of the empty interval. For each SOA of the empty interval, the point of subjective equality
(PSE) for the duration was computed, which is the duration of the filled interval perceived to
be equal to the empty interval. Results confirmed that for audiovisual intervals grouping effects
were more salient with larger SOAs between the signals defining the empty interval. When the
light preceded the sound grouping effects were also found for 500 and 700ms. For audiotactile
intervals, instead, grouping effects occurred between 500 and 900ms but only when vibration
preceded sound. Lastly, for visuotactile intervals, grouping occurred at SOA of 500ms but only
when light preceded vibration. From these results, we can conclude that audiovisual temporal
ventriloquist is only an instance of a more general crossmodal grouping effect that occurs with
various modalities. Moreover, our data indicates that this effect acts differently on specific
modality combinations. Temporal grouping is affected by both the time between the signals
and their order of presentation.},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=mayer01},
event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)},
event_place = {Tübingen, Germany},
state = {published},
author = {Mayer KM{kama}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4518,
title = {Perceived timing across modalities},
year = {2007},
month = {7},
volume = {2007},
pages = {15},
abstract = {Crossmodal stimuli can be perceived as being simultaneous even if they are not physically synchronous. This phenomenon has been attributed to different conduction delays. In this work we tested whether time in different modalities is processed independently or if crossmodal interaction influence the perception of synchrony. (1) If unimodal timing is processed independently, perceived simultaneity across modality pairs should be Transitive. For example, if modality A has to be presented 20ms before modality B to appear simultaneous and modality B 10ms before modality C, then A should be presented 30ms before C to appear simultaneous. Subjects made Temporal Order Judgments (TOJ) of asynchronous signals in three modality pairs (audio-visual, audio-tactile, visual-tactile). The Point of Subjective Simultaneity (PSS) calculated for each modality pair are not transitive, indicating that perceived time is not processed independently in each modality. (2) It has been shown that PSS of audio-visual signals can be recalibrated by the repeated presentation of asynchronous stimuli. It is not clear whether this effect is the result of an adaptation mechanism specific to the audio-visual modality pair or whether it is due to a common crossmodal mechanism. Using the same type of measurements, we show that PSS following presentation of an asynchronous audio-visual stimulus is not constant in the audio-tactile modality pair. Hence, crossmodal timing is also affected by a common adaptation mechanism. Since PSS for visual-tactile stimuli was not affected, audio-visual adaptation effects are likely the result of a phenomenal shift of the auditory events in time. Our results indicate that perceived timing in one modality depends on which other modality this is paired with and that perceived simultaneity changes also for non adapted modality pairs. These results are not consistent with independent-channels models of crossmodal timing, but they rather indicate that time perception is affected by crossmodal interactions.},
file_url = {fileadmin/user_upload/files/publications/Perception-Action-Symposium-2007-DiLuca.pdf},
web_url = {http://www.hcsnet.edu.au/hcsnetevents/2007/pasymposium/},
event_name = {International Intersensory Research Symposium 2007: Perception and Action},
event_place = {Sydney, Australia},
state = {published},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4892,
title = {Perceived Visual Speed while Walking: More than Subtraction},
year = {2007},
month = {7},
volume = {10},
pages = {155},
abstract = {Perceived visual speed has been reported to be reduced during walking compared to standing
still. This so-called ‘subtraction effect’ has been attributed to an automatic subtraction of part
of the walking speed from the visual speed [1]. In this study, we investigated how general this
subtraction effect is, by varying visual speed, walking speed and the order of the intervals in
which observers walked or stood still. Five observers judged the visual speed of a simulated
ground plane that was presented on a HMD in a 2IFC task. In one interval, they judged the
visual speed while walking in place on a treadmill (0.6, 1.0, or 1.4 m/s), and they did the same
while standing still in the other interval. Simulated visual standard speed, presented during
walking, was 1.0, 2.0, or 3.0 m/s. All observers compared the three visual standard speeds
during the three walking speeds against a range of visual test speeds during standing still and
indicated in which of the two intervals the visual speed appeared to be higher. For three of the
observers the order of the intervals was standing—walking, while it was reversed for the other
two observers. From the speed judgments, the PSE’s in the nine conditions were estimated
by fitting psychometric functions. Surprisingly, the PSE’s were hardly affected by walking
speed. Visual standard speed strongly affected visual speed judgments for the observers who
first stood still and then walked. The lowest standard speed was reported to be perceived as
slower during walking than during standing still, while the opposite was true for the highest
standard speed. When observers first walked and then stood still, this effect did not occur.
Taken together, the results question the generality of the subtraction effect and raise doubts
regarding the hypothesized functional role of this effect.},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=souman01},
event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)},
event_place = {Tübingen, Germany},
state = {published},
author = {Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4453,
title = {Sound alters tactile motion perception},
year = {2007},
month = {7},
volume = {8},
number = {68},
abstract = {Recently Sekuler et al. (Nature, 1997) investigated the motion-bounce illusion and found that sound can influence ambiguous visual motion perception. Here we investigated the motion-bounce illusion in the tactile-auditory domain. Using a vibro-tactile belt consisting of 7 vibrators we generated tactile apparent motion stimuli by sequentially activating neighboring motors with an onset of 200ms between the motors. Starting at the left and the right hip, two tactile motion stimuli run towards each other. On the body midline an ambiguous event was perceived: either the transition of both stimuli with continuing motion trajectories or a bounce event followed by the reversal of the movement direction. Presenting just the tactile motion stimulus resulted in an ambiguous percept. The presentation of an auditory beep at the time of the collision was sufficient to disambiguate the percept such that a significantly higher proportion of bounces was reported. The presentation of the sound 200ms or 400ms before the tactil
e collision event reduced the proportion bounces reported and became indistinguishable from the uni-modal baseline condition. This indicates that the temporal window of audio-tactile integration is comparable to that found in the visual-auditory domain. These results suggest that similar supramodal mechanisms exist for apparent motion perception.},
file_url = {/fileadmin/user_upload/files/publications/IMRF_2007_Abstract_[0].pdf},
web_url = {http://imrf.mcmaster.ca/IMRF/2007/viewabstract.php?id=68&symposium=0},
event_name = {8th International Multisensory Research Forum (IMRF 2007)},
event_place = {Sydney, Australia},
state = {published},
author = {Vitello M{vitello}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4851,
title = {Spatio-Temporal Grouping in Perceptual Rivalry},
year = {2007},
month = {7},
volume = {10},
number = {43},
abstract = {Perceptual rivalry occurs when stimuli have multiple interpretations which are equally probable. For example, two distributions of dots, one translating leftward and one rightward can be perceived as a 3D-cylinder rotating clockwise or counterclockwise. Repetitive presentation of the ambiguous stimulus can stabilize one perceptual interpretation. Here we examined how unambiguous spatio-temporal contexts affected stabilization of ambiguous structure-from-motion stimuli. Using an intermittent presentation paradigm we stabilized one interpretation of the ambiguous cylinder and introduced contextual information by providing an unambiguous version of the 3D-cylinder. We manipulated spatial distance and temporal proximity between ambiguous stimulus and unambiguous context. The task was to report perceived rotation direction of the ambiguous cylinder. We found that stabilization was more likely to be disrupted by unambiguous context that had appeared in corresponding locations in preceding frames. Context simultaneo
usly presented with the stimulus at a different spatial location had little effect. This shows that temporal contexts were weighted more than spatial contexts, and suggest that the visual system analyses recent perceptual history to interpret the present input.},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=conrad01},
event_name = {10th Tübinger Wahrnehmungskonferenz (TWK 2007)},
event_place = {Tübingen, Germany},
state = {published},
author = {Conrad V{conrad}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Vuong QC{qvuong}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4684,
title = {Temporal calibration between the visual, auditory and tactile senses: A psychophysical approach},
year = {2007},
month = {7},
volume = {1},
pages = {36-37},
abstract = {Human observers acquire information about physical properties of the environment through different sensory
modalities. For natural events, these sensory signals show a specific temporal, spatial and contextual configuration that
aids the integration into a coherent multisensory percept. For multimodal virtual environments, however, signals have
to be created and displayed separately for different modalities, which may result in a miscalibration of these signals.
This, in turn, can greatly reduce the observer’s sense of immersion and presence.
Using psychophysical methods, we investigate fundamental questions regarding how the temporal alignment of signals
from the visual, auditory and tactile modalities is achieved. A first project examines the perception of subjective
simultaneity of signals. Simultaneity detection poses a non-trivial matching problem to the human brain: physical and
neural transmission times differ greatly between the senses. As there is only partial compensation for these differential
delays, subjective simultaneity may result from presenting stimuli with a physical delay. Here, we are interested in
whether this phenomenon reflects an amodal timing mechanism that works across all modalities in a uniform fashion.
Further, we look at the sensitivity for asynchrony detection for different modality pairs as well as at interindividual
differences.
In a second project, we examine the ability of the human cognitive system to adapt to asynchronous information in
different modalities. Adaptation may be used to reduce the disruptive effects of temporal miscalibration between
signals in different modalities. We are interested in the strength of adaptation as well as the mechanism underlying this
effect.
Future projects aim at the investigation of
- the precise relationship between the perception of synchrony and multimodal integration,
- the influence of prior knowledge about a common origin of signals on the perception of synchrony
- the influence of timing on the perception of cause and effect
- the neural basis of the detection of synchrony
In conclusion, our research seeks to understand the mechanisms underlying temporal calibration between different
sensory modalities with the goal to identify factors that foster multimodal integration and, in turn, the sense of
presence.},
web_url = {http://peach.iti.gr/PeachSS1_final.pdf},
event_name = {1st Peach Summer School},
event_place = {Santorini, Greece},
state = {published},
author = {Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4516,
title = {How long does it take to adjust a weight?},
journal = {Journal of Vision},
year = {2007},
month = {6},
volume = {7},
number = {9},
pages = {92},
abstract = {Cue integration has been demonstrated to be close to optimal under temporally constant stimulus conditions. That is, cues are assigned different weights according to their relative reliabilities. In real-world situations, however, stimulus conditions constantly change. For example, depending on the viewing situation the reliability of cues may change over time. Here we ask whether the system takes such continuous changes in reliability into account by adjusting the cue weights online. Subjects were binocularly presented with a spinning disk slanted in depth. Thus one cue was disparity, the other motion. There was a ±30 deg conflict between the slants defined by the two cues. We varied the reliability of the motion cue by sinusoidally changing the speed of rotation at different frequencies (0.067, 0.1, 0.2 Hz). Decreasing the speed of rotation decreases the reliability of the motion cue. However, it does not affect the magnitude of slant specified by the motion cue. Subjects task was to continuously adjust the angle of a two-lines probe according to the perceived slant. We found that increasing the motion cue reliability with faster rotations biased perceived slant towards the slant defined by the motion cue. The surface was therefore perceived to oscillate in depth according to the modulation of speed. The oscillation amplitude decreased with higher modulation frequency. The phase shift between rotation modulation and perceived oscillation increased with frequency. As a control, we repeated the task in order to estimate subject's reaction time for adjusting the probe. In this control the slant of the surface was actually oscillating in depth. By subtracting the reaction time from the phase shifts obtained in the experimental conditions we estimated that the time it takes to update the weights is less then a second.},
web_url = {http://www.journalofvision.org/7/9/92/},
event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)},
event_place = {Sarasota, FL, USA},
state = {published},
digital = {1},
DOI = {10.1167/7.9.92},
author = {Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Knill D}
}
@Poster{ 4515,
title = {Integration of alternating cues to slant},
journal = {Journal of Vision},
year = {2007},
month = {6},
volume = {7},
number = {9},
pages = {90},
abstract = {Several studies showed that cue integration is close to optimal when two or more cues are available simultaneously. However, most of these studies consider only constant cues. Here we investigate how different depth cues interact when they are not presented simultaneously but they are alternating. We ask whether there is fusion of cues in time and how the interaction between cues depends on the frequency of alternation.
To study this, we presented two surfaces in alternation at six different frequencies (from 0.8 to 15 Hz). One surface was defined by a random-dot pattern displayed in stereo (disparity-defined surface); the other was defined by a monocularly viewed regular texture (texture-defined surface). The angle between the two surfaces was always +20 or −20 degrees. Participants had to indicate whether the texture-defined surface was slanted to the left or to the right. The orientation of the two surfaces was varied jointly using a double staircase procedure to find the orientation at which the texture-defined surface appeared frontoparallel.
Results indicate that there is a significant interaction between the cues depending on frequency. That is, the orientation of the stimulus needed to see the texture-defined surface as frontoparallel depended on the sign of conflict and the frequency of alternation. At high frequencies (above 6 Hz) there was a perceptual bias of the texture-defined surface in the direction of the disparity-defined surface, indicating integration of the signals. At low frequency (0.8 Hz), however, this interaction did not only disappear, it reversed in the opposite direction, indicating a contrast effect. This contrast may be explained as an aftereffect resulting from adaptation to the disparity-cue slant. We conclude that simultaneity between cues is not necessary for integration to occur. There seems to be a temporal window for integration in the order of 150 ms.},
web_url = {http://www.journalofvision.org/7/9/90/},
event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)},
event_place = {Sarasota, FL, USA},
state = {published},
digital = {1},
DOI = {10.1167/7.9.90},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4607,
title = {Neural systems involved in visual-tactile integration of shape information},
year = {2007},
month = {6},
volume = {2007},
pages = {3},
abstract = {The brain integrates multisensory information to create a coherent and more reliable perceptual estimate of the environment. This multisensory estimate is a linear combination of the individual unimodal estimates that are weighted by their relative reliabilities (e.g., Ernst and Banks, Nature, 2002).
Here we explored the neural substrates underlying visual-tactile integration in shape processing. To identify multisensory integration sites, we correlated behavioural data with neural activity evoked by multisensory integration.
Observers were presented with elliptical shapes that they could see and/or touch. Observers task was to judge the shape of the ellipse. Introducing conflicts between seen and felt shape allowed us to examine whether participants relied more on visual or tactile information (relative weight of vision and touch). To manipulate the weight attributed to vision, we degraded visual information.
We observed a decrease in visual weight when vision was degraded and thus became less reliable. Discrimination performance increased when both modalities were presented together, indicating that visual and tactile shape information is indeed fused.
BOLD response bilaterally in the anterior IPS is modulated by visual input. Change in BOLD signal these areas correlates with cue weights, suggesting that this activity reflects the relative weighting of vision and touch.},
file_url = {fileadmin/user_upload/files/publications/Psychologie_und_Gehirn_2007-Helbig.pdf},
web_url = {https://eldorado.tu-dortmund.de/handle/2003/24421},
event_name = {Fachtagung Psychologie und Gehirn 2007},
event_place = {Dortmund, Germany},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Noppeney U{unoppe}{Department Human Perception, Cognition and Action}{Research Group Cognitive Neuroimaging} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4609,
title = {Neural systems involved in visual-tactile integration of shape information},
year = {2007},
month = {1},
event_name = {42nd Winter Seminar: Biophysical Chemistry, Molecular Biology and Cybernetics of Cell Functions},
event_place = {Klosters, Switzerland},
state = {published},
author = {Helbig H{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Noppeney U{unoppe}{Department Human Perception, Cognition and Action}{Research Group Cognitive Neuroimaging} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4180,
title = {Automatic integration of visual, tactile and auditory signals for the perception of sequences of events},
journal = {Perception},
year = {2006},
month = {8},
volume = {35},
number = {ECVP Abstract Supplement},
pages = {102},
abstract = {Sequences of visual flashes, tactile taps, and auditory beeps were presented simultaneously. For each session, subjects were instructed to count the number of events presented in one modality (focal modality) and to ignore the other modalities (background). The number of events presented in the background modality(ies) could differ from the number of events in the focal modality. The experiment consisted of nine different sessions, all nine combinations between visual, tactile, and auditory signals being tested. In each session, the perceived number of events in the focal modality was significantly influenced by the background signal(s). The visual modality, which had the largest intrinsic variance (focal modality presented alone), was the most susceptible to background-evoked bias and the less efficient in biasing the other two modalities. Conversely, the auditory modality, which had the smallest intrinsic variance, was the less susceptible to background-evoked bias and the most efficient in biasing the othe
r two modalities. These results show that visual, tactile, and auditory sensory signals tend to be automatically integrated for the perception of sequences of events. They also suggest that the relative weight of each sensory signal in the integration process depends on its intrinsic relative reliability.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v060466},
event_name = {29th European Conference on Visual Perception},
event_place = {St. Petersburg, Russia},
state = {published},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Dammeier F{dammeier}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ BompasE2006,
title = {Perceptual similarity between grey levels depends on learnt sensorimotor correlation between grey levels and eye movements},
journal = {Perception},
year = {2006},
month = {8},
volume = {35},
number = {ECVP Abstract Supplement},
pages = {88},
abstract = {We propose that perceived similarity between two grey levels is related to the way they have been previously associated through eye saccades, actually reflecting their probability of co-occurrence in space and time. In the present study, we modified relative perceptual distances between grey levels by artificially associating certain pairs of grey levels with small eye saccades and other pairs with large eye saccades. In the test stage, we varied the luminance of a test patch (5 levels from 15 to 21 cd m-2) to determine the luminance perceived as 'right-in-between' that of two reference stimuli, dark and light (10 and 26 cd m-2). Each trial involved simultaneous presentation of one test grey and the two reference greys. The subject's task was to indicate whether the test patch looked more similar to the light or dark reference grey. Adaptation trials involved successive presentation of one of each test greys centred on the screen followed by the dark stimulus with a 10° eccentricity or the light stimulus with a 20° eccentricity, with equal probability. A control condition was employed without eccentricity difference. Only in the main condition does the post-test show an increased relative similarity between the test greys and the dark patch.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v060456},
event_name = {29th European Conference on Visual Perception},
event_place = {St. Petersburg},
state = {published},
author = {Bompas A{aline}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ FrissenSE2006,
title = {Aftereffects of prolonged locomotion on a circular treadmill},
year = {2006},
month = {6},
volume = {7},
pages = {31},
abstract = {Vestibular activity, motor command efference copies, and proprioception, among others, contribute to self-motion perception. According to Durgin et al. (2005) these sources are recalibrated when they are in conflict with the global self-motion percept. We tested this hypothesis by having participants walk blindfolded on a circular treadmill, under different conditions which varied in speed and direction of treadmill rotation independent of the participants’ walking speed. Recalibration was assessed with two tasks. Participants either stood in place and judged when the treadmill had rotated 360º (passive task), or walked 360º on a stationary treadmill (active task). Durgin’s, results indicate that participants should undershoot relative to pretest performance in the active task when the treadmill had rotated in the walking direction and that they should overshoot when it was moving against the walking direction. For the passive task the opposite pattern was predicted. However, we obtained an overshoot in both tasks increasing with the duration of adaptation. One possible source for the difference between Durgin’s and our results might be the availability of visual information that his participants had at the start of pre/posttests about their location in space. In our study disorientation might have accumulated leading to an increasing overshoot.},
web_url = {http://imrf.mcmaster.ca/IMRF/2006/viewabstract.php?id=175},
event_name = {7th International Multisensory Research Forum (IMRF 2006)},
event_place = {Dublin, Ireland},
state = {published},
author = {Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Souman JL{souman}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4084,
title = {Integration of shape information from vision and touch: Optimal perception and neural correlates},
journal = {Neuroimage},
year = {2006},
month = {6},
volume = {31},
number = {Supplement 1},
pages = {S158},
web_url = {http://www.sciencedirect.com/science/article/pii/S1053811906004575},
event_name = {Twelfth Annual Meeting of the Organization for Human Brain Mapping (HBM 2006)},
event_place = {Firenze, Italy},
state = {published},
DOI = {10.1016/j.neuroimage.2006.04.176},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ricciardi E, Pietrini P and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4085,
title = {Integration of shape information from vision and touch: Optimal perception and neural correlates},
journal = {Journal of Vision},
year = {2006},
month = {6},
volume = {6},
number = {6},
pages = {179},
abstract = {Recently, Ernst and Banks (2002) showed that visual-haptic size information is integrated in a statistically optimal manner, i.e. visual and haptic size estimates are weighted according to their reliabilities. Here we investigate whether the same is true for visual-haptic shape information. We further explored the neural substrates underlying visual-haptic integration in shape processing using fMRI and examined whether neural activity elicited by multisensory integration correlates with cue weighting.
For this we used ridges of elliptical objects. Subjects saw the front of the object and/or they felt the back. The elongation of the elliptical ridges on both sides of the objects could differ. Subjects task was to decide whether the ellipse was elongated vertically or horizontally. This way we could study the weight of vision and touch during shape discrimination. We varied the weight given to vision by degrading the visual information, using blur.
The psychophysical experiments showed that visual and haptic shape information is integrated in a statistical optimal way even when the visual information is displayed via a mirror. That is, we observed a decrease in visual weight when vision was degraded and thus less reliable. Furthermore, we found an increase in discrimination performance when both modalities were presented together.
We also determined neural activity with fMRI while individuals were performing the same ellipse discrimination task. When the visual reliability is reduced in the visual-haptic task, neural responses decreased in the lateral occipital cortex while increased in the anterior intraparietal cortex, a brain region strongly involved in multisensory integration.},
web_url = {http://www.journalofvision.org/content/6/6/179},
event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/6.6.179},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ricciardi E, Pietrini P and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4042,
title = {Visual bias of perceived tactile location},
journal = {Journal of Vision},
year = {2006},
month = {6},
volume = {6},
number = {6},
pages = {181},
abstract = {The primary source of information to determine where on the body we are being touched is derived from the somatosensory system. However, can visual information influence the perceived location of touch on the body? Ten participants localized a brief air puff (ca. 250ms) applied to the smooth ventral surface of the right forearm somewhere between wrist and elbow. Localization was measured with a 2AFC paradigm in which participants judged the location of the tactile stimulus relative to a visual reference using two opposing 1-up/2-down staircases. Participants‘ task was to indicate whether the air puff was closer or further from the wrist relative to the reference. In one condition the visual reference was a line drawn on the forearm midway between elbow and wrist (AIR ONLY). In another the air puff was accompanied by a temporally synchronous line of laser light projected onto the reference location (AIR+LASER). We expected the synchronous light to facilitate multimodal integration and therefore affect discrimination performance without introducing a bias. In the AIR ONLY condition the PSE was on average on the reference location. Surprisingly, in the AIR+LASER condition we did not find a change in discriminability relative to the AIR ONLY condition. We found, however, a significant shift of the PSE by 0.9cm towards the elbow. This bias was evident in eight out of the ten participants. This demonstration of a visual effect on tactile localization may indicate that judging the location of a visual reference on the body is not free from biases.},
web_url = {http://journalofvision.org/6/6/181/},
event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/6.6.181},
author = {Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3979,
title = {Active arm movement impairs tactile discrimination performance},
year = {2006},
month = {3},
volume = {9},
pages = {87},
abstract = {It is well known, that sensory consequences of self-produced movements can be predicted and
that this prediction can be used to attenuate the sensory effects. It has been shown that the
cerebellum is involved in predicting the sensory feedback of self produced movements and
thus it can modulate the somatosensory cortex activity [1].
In this study it is planned to investigate the performance of a tactile discrimination task during
rest and compare it to performance during active arm movement, respectively. Participants’
distal phalanx of the left index finger was glued to a metal pin of a custom developed Lateral-
Pin-Stroke-Device (LPSD) in order to provide pure skin stretch and to avoid cues derived from
pin slip parallel to the skin. The device is able to move the pin on a defined trajectory in any
radial lateral direction initiating from a central starting point. The task was to judge whether
the second of two pin strokes was shifted clockwise or counterclockwise compared to the first.
This experiment was performed under two different conditions—a static condition where no
arm movement was required and an active condition where participants had to perform an arm
movement in forward direction while they were doing the discrimination task.
Preliminary results show that direction discrimination performance is much higher in the
static condition. Participants were no longer able to reach the 84% discrimination threshold
in the active condition. These results are consistent with our expectations, i.e., a significantly
impaired tactile sensation during active movement. In analogy to saccadic suppression we will
discuss our results in terms of tactile suppression. Results indicate a slight anisotropy in terms
of a higher threshold in upwards- (towards the finger tip) compared to rightwards direction.
This seems to be in line with Keyson and Houtsma’s [2] findings.},
web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=vitello01},
event_name = {9th Tübingen Perception Conference (TWK 2006)},
event_place = {Tübingen, Germany},
state = {published},
author = {Vitello MP{vitello}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4832,
title = {Haptic Exploration Behavior During Bimodal Object Recognition},
year = {2006},
month = {3},
volume = {9},
pages = {79},
abstract = {For the purpose of object recognition, the combination of complementary information derived
from the different sensory systems (vision and touch) should result in a rich representation of
the object in memory and may consequently enhance recognition performance.
The purpose of this study is to investigate the haptic exploration behavior during bimodal
object recognition. Specifically, we asked whether the exploration behavior for recognition
depends on what modality was used during learning of an object.
Analog to [1], we designed an old/new object recognition task using novel objects built
each from 6 Lego bricks. In each condition subjects had to learn 4 novel object shapes either
haptically (H), visually (V), or combined haptically and visually (VH). In a following recognition
phase we added 4 distractor objects and subjects had to decide whether the presented
object was known (old) or unknown (new) from the learning phase. The recognition phase was
always bimodal (VH).
We tested 12 subjects in all three conditions (V-to-VH, H-to-VH, VH-to-VH) measuring
recognition performance. Subjects exploration behavior was recorded using video tape. For
technical reasons, the recordings of only 8 subjects was used for the video analysis. Therefore,
the video was cut into single clips each showing the exploration of one stimulus. These clips
were replayed in randomized order to raters, who judged the ‘hapticallity’ of subjects’ exploration
behavior on a scale with 9 possible answers between ‘subject just held and turned the
stimulus’ to ‘subject explored the stimulus with fingers’. We found ‘hapticallity’ during visualhaptic
recognition was judged largest for the exploration following haptic learning (H-to-VH),
smallest for the one following visual learning (V-to-VH), and intermediate for visuo-haptic
learning (VH-to-VH).
This suggests that subjects use a strategy when recognizing the objects in a way that they
match the exploration behavior between learning and recognition test. They use the haptic
modality for recognition when the object was learned haptically. When the object was learned
visually they predominantly use the visual modality during recognition ignoring touch. Also
when the object was learned bimodally (VH), the exploration strategy was matched between
learning and recognition test here indicating that the haptic modality was used almost exclusively
for actively manipulating the object but not for exploring its shape.},
web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=lange01},
event_name = {9th Tübingen Perception Conference (TWK 2006)},
event_place = {Tübingen, Germany},
state = {published},
author = {Newell FN{fiona}{Department Human Perception, Cognition and Action}, Lange C{clange}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3898,
title = {Integration of shape information from vision and touch: Optimal perception and neural correlates},
year = {2006},
month = {3},
volume = {9},
pages = {78},
abstract = {Recently, Ernst and Banks (2002) showed that visual-haptic size information is integrated in a
statistically optimal manner, i.e. visual and haptic size estimates are weighted according to their
reliabilities. Here we investigate whether the same is true for visual-haptic shape information.
We further explored the neural substrates underlying visual-haptic integration in shape processing
using fMRI and examined whether neural activity elicited by multisensory integration
correlates with cue weighting.
For this we used ridges of elliptical objects that subjects could see and/or feel. Subjects
saw the front of the object and they felt the back. The elongation of the elliptical ridges on
both sides of the objects could differ and subjects’ task was to decide whether the ellipse was
elongated vertically or horizontally. This way we could study the weight of vision and touch
during shape discrimination. We varied the weight given to vision by degrading the visual
information, using blur.
The psychophysical experiments showed that visual and haptic shape information is integrated
in a statistical optimal way even when the visual information is displayed via a mirror.
That is, we observed a decrease in visual weight when vision was degraded and thus less reliable.
Furthermore, we found an increase in discrimination performance when both modalities
were presented together. These results were crucial since the fMRI experiments relied on presenting
objects in a mirror.
We also determined neural activity with fMRI while individuals were performing the same
ellipse discrimination task. When the visual reliability is reduced in the visual-haptic task,
neural responses decreased in the lateral occipital cortex while increased in the anterior intraparietal
cortex, a brain region strongly involved in multisensory integration.},
web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=helbig01},
event_name = {9th Tübingen Perception Conference (TWK 2006)},
event_place = {Tübingen, Germany},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Ricciardi E, Pietrini P and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 4388,
title = {Texture and haptic cues in slant discrimination: combination is sensitive to reliability but not statistically optimal},
year = {2006},
month = {3},
volume = {48},
pages = {80},
event_name = {48. Tagung Experimentell Arbeitender Psychologen (TeaP 2006)},
event_place = {Mainz, Germany},
state = {published},
author = {Rosas P{pedror}, Wagemans J, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Wichmann FA{felix}{Department Empirical Inference}}
}
@Poster{ 4831,
title = {Visual Influence on Tactile Localization},
year = {2006},
month = {3},
volume = {9},
pages = {77},
abstract = {The primary source of information to determine where on the body we are being touched is
derived from the somatosensory system. However, can visual information influence the perceived
location of touch on the body? Ten participants localized a brief air puff (about 250 ms)
applied to the smooth ventral surface of the right forearm somewhere between wrist and elbow.
Localization was measured with a 2AFC paradigm in which participants judged the location of
the tactile stimulus relative to a visual reference using two opposing 1-up/2-down staircases.
Participants’ task was to indicate whether the air puff was closer or further from the wrist relative
to the reference. In one condition the visual reference was a line drawn on the forearm
midway between elbow and wrist (AIR ONLY). In another, the air puff was accompanied by a
temporally synchronous line of laser light projected onto the reference location (AIR+LASER).
The experiment was analogous to that of Bertelson and Aschersleben [1], which investigated
visual influences on auditory localization. Based on this, we expected the synchronous light to
facilitate multimodal integration and therefore affect tactile discrimination performance without
introducing a bias. In the AIR ONLY condition the PSE was on average on the reference
location. Surprisingly, in the AIR+LASER condition we did not find a change in discriminability
relative to the AIR ONLY condition. We found, however, a significant shift of the PSE by
0.9 cm towards the elbow. This bias was evident in eight out of the ten participants. This
demonstration of a visual effect on tactile localization may indicate that judging the location of
a visual reference on the body is not free from biases.},
web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=frissen01},
event_name = {9th Tübingen Perception Conference (TWK 2006)},
event_place = {Tübingen, Germany},
state = {published},
author = {Frissen I{ifrissen}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3544,
title = {From Independence to Fusion: A Comprehensive Model for Sensory Integration},
journal = {Journal of Vision},
year = {2005},
month = {9},
volume = {5},
number = {8},
pages = {650},
abstract = {Recently we demonstrated that humans integrate visual and haptic information in a statistically optimal way (Ernst & Banks, 2002). I.e., subjects make optimal use of the information provided in order to reach the decision necessary for the task. As shown by Hillis et al. (2002), however, this does not necessarily imply that the sensory signals are completely fused into a unified percept. If subjects would completely fuse the signals, by definition, they would not at all retain access to the incoming sources of information. In contrast, Hillis et al. found some weaker form of interaction between the sensory signals. The degree of interaction between the sensory signals can be taken as definition for the strength of coupling between the signals: there is no coupling if the signals are independent; there is maximal coupling if the signals are fused. Using Bayesian decision theory I here propose a comprehensive model that can account for both results. The prior used in this model represents the probability of the physical relationship (mapping) between the signals derived by the sensory systems. This probability has a narrow tuning if the mapping between the physical signals is relatively constant (such as e.g., the mapping between texture and disparity signals). If the mapping changes easily (such as e.g. the mapping between visual and haptic signals), the distribution of possible mappings reflected in such a prior is wider. This is called the “coupling prior” because the tuning of the prior will determine the level of interaction, i.e., the strength of coupling. I will further present data from a visual-haptic discrimination experiment that will support these theoretical considerations. Taken together, I propose that such a Bayesian model that uses a “Coupling Prior” for describing sensory interactions is a convenient theoretical framework for understanding multisensory integration as a continuous process between independence and complete fusion.},
web_url = {http://www.journalofvision.org/content/5/8/650.short},
event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/5.8.650},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3545,
title = {Localization, not perturbation, affects visuomotor recalibration},
journal = {Journal of Vision},
year = {2005},
month = {9},
volume = {5},
number = {8},
pages = {871},
abstract = {The visuomotor system recalibrates when visual and motor maps are in conflict, bringing the maps back into correspondence. For recalibration to occur, a conflict has to be detected. Ernst and Endreß (VSS '04) showed that the rate of recalibration in a one-dimensional visually guided pointing task depends on the uncertainty of the feedback: faster recalibration with less uncertainty. In the present work, we examined two-dimensional recalibration and how the form of visual feedback affects it. Subjects pointed with an unseen hand to a brief visual target. Visual feedback was given indicating where the point landed. We introduced a constant conflict between pointing and feedback location and examined the changes in pointing as the subject adapted. We asked whether differential vertical and horizontal uncertainty in the visual feedback affects recalibration rate differentially, or whether rate is determined by the total uncertainty. We also varied feedback uncertainty in two ways. (1) We blurred the visual feedback, thereby reducing its localizability; in this condition, uncertainty could be determined on-line from one feedback stimulus. (2) We introduced random trial-by-trial perturbations in the feedback; in this condition, uncertainty had to be learned over time. In both cases, the distributions determining the vertical and horizontal uncertainties were 2D Gaussians. Adaptation profiles (changes over time in the point location relative to the visual feedback) changed only in response to changes in localizability. Recalibration was slowest in the direction of greatest uncertainty when uncertainty was due to blur, but rate was unaffected by trial-by-trial variation. This means that subjects do not estimate uncertainty over time in order to adjust reaching. Rather, they adjust trial by trial based mostly on feedback from the previous trial.},
web_url = {http://www.journalofvision.org/content/5/8/871.abstract},
event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/5.8.871},
author = {Burge J{jburge}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks M{martybanks}}
}
@Poster{ 3495,
title = {Looking in the mirror does not prevent multimodal integration},
journal = {Journal of Vision},
year = {2005},
month = {9},
volume = {5},
number = {8},
pages = {750},
abstract = {Ernst & Banks (2002) showed that humans integrate visual and haptic signals in a statistically optimal way if they are derived from the same spatial location. Integration seems to be broken if there is a spatial discrepancy between the signals (Gepshtein et al., VSS 04).
Can cognitive factors facilitate integration even when the signals are presented at two spatial locations? We conducted two experiments, one in which visual and haptic information was presented at the same location. In the second experiment, subject looked at the object through a mirror while touching it. This way there was a spatial offset between the two information sources. If cognitive factors are sufficient for integration to occur, i.e. knowledge that the object seen in the mirror is the same as the one touched, we expect no difference between the two experimental results. If integration breaks due to the spatial discrepancy we expect subjects' percept to be less biased by multimodal information.
To study integration participants looked at an object through a distortion lens. This way, for both the “mirrored” and “direct vision” conditions there was a slight shape conflict between the visual and haptic modalities. After looking at and feeling the object simultaneously participants reported the perceived shape by either visually or haptically matching it to a reference object.
Both experiments revealed that the shape percept was in-between the haptically and visually specified shapes. Importantly, there was no significant difference between the two experimental results regardless of whether subjects matched the shape visually or haptically. However, we found a significant difference between matching by touch and matching by vision. Haptic judgments are biased towards the haptic input and vice versa.
In conclusion, multimodal signals seem to be combined if observers have high-level cognitive knowledge about the signals belonging to the same object, even when there is a spatial discrepancy.},
file_url = {/fileadmin/user_upload/files/publications/pdf3495.pdf},
web_url = {http://www.journalofvision.org/content/5/8/750.abstract},
event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/5.8.750},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3245,
title = {Simple Stimulus Metrics vs. Gestalt in High-Level Aftereffects},
journal = {Journal of Vision},
year = {2005},
month = {9},
volume = {5},
number = {8},
pages = {250},
abstract = {High-level visual aftereffects (AEs) arise when adaptation to stimuli such as shapes, faces, or spatial configurations affect the subsequent perception of comparable figures. Like classical AEs, high-level AEs are characterized by a percept that is distorted in feature space in the opposite direction of the vector between adaptation and test stimulus. Shape-contrast AEs have been reported for aspect ratio, convexity and taper, but the combined effect of such parameters remained unexplored. In the present experiment the adapting stimulus consisted of two arcs. While keeping the flexion of the arcs constant we varied the distance between them in a range of ±8.2°, allowing us to examine the effects of (1) aspect ratio and (2) convexity/concavity. The test stimulus was a closed ellipsoid of about 7.8° in diameter. Perceptual distortions were assessed with repetitive adaptation and testing in the context of a staircase procedure converging to the point of subjective circularity. We found significant main effects for both factors and the interaction, i.e. smaller aspect ratios lead to stronger effects and convex stimuli result in larger effects than concave ones. The direct spatial correspondence between the location of the adapting and test stimulus was not critical for inducing an AE. We repeated the experiment with a set of smaller stimuli of roughly 1° and the distance between the curves scaled down proportionately. Results in both experiments were comparable, although the interaction was not significant in experiment 2. The results suggest that simple stimulus metrics, such as the absolute size of the curves and distance between them, are less important in creating this AE than the overall shape created by the pair of arcs together.},
web_url = {http://www.journalofvision.org/content/5/8/250.short},
event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/5.8.250},
author = {M\"uller K-M{kaim}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Leopold DA{davidl}}
}
@Poster{ 3542,
title = {Using a Kalman Filter to predict visuomotor adaptation behavior},
journal = {Perception},
year = {2005},
month = {8},
volume = {34},
number = {ECVP Abstract Supplement},
pages = {245-246},
abstract = {The sensorimotor system recalibrates when the visual and motor maps are in conflict, bringing the maps back into correspondence. We investigated the rate at which this recalibration occurs. The Kalman filter is a reasonable statistical model for describing visuomotor adaptation. It predicts that the rate of adaptation is dependent on the reliability of the feedback signal. It also predicts that random trial-to-trial perturbation of the feedback signal should have little or no effect on the adaptation rate. We tested these predictions using a pointing task. Subjects pointed with the unseen hand to a brief visual target. Visual feedback was then provided to indicate where the pointing movement had landed. During the experiment, we introduced a constant conflict between the pointing and feedback locations, and we examined the changes in pointing as the subject adapted. From the change in pointing position over trials we determined the adaptation rate. In experiment 1, we tested whether the reliability of the feedback affected adaptation rate by blurring the visual feedback and thereby reducing its localisability. Six levels of blur were used and spatial discrimination measurements confirmed that the blur was effective in altering stimulus localisability. We also constructed a Kalman filter model of the task. We found that adaptation rates of the filter and of the subjects decreased when blur was increased (ie with less reliable feedback). In experiment 2, the reliability of the visual feedback signal was manipulated by randomly perturbing the feedback signal on a trial-by-trial basis. Again, in good agreement with the prediction of the Kalman filter, we found no significant effect on adaptation rate as we manipulated the amount of perturbation. Taken together, these results provide evidence that human visuomotor adaptation behaviour is well modeled by a Kalman filter that uses weighted information from previous trials, including the reliability of the information, to update the visuomotor map.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v050541},
web_url2 = {http://ecvp2005.neuralcorrelate.com/},
event_name = {28th European Conference on Visual Perception},
event_place = {A Coruña, Spain},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Burge J{jburge} and Banks M{martybanks}}
}
@Poster{ 3543,
title = {When touch and vision meet in the brain},
year = {2005},
month = {7},
volume = {14},
pages = {65},
event_name = {14th ESCoP Meeting 2005},
event_place = {Kansas City, MO, USA},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3494,
title = {Attention does not affect multisensory cue weighting},
year = {2005},
month = {6},
volume = {6},
pages = {39},
abstract = {Humans gather information about their environment from multiple sensory channels. It seems that cues from separate sensory modalities (e.g., vision and haptics) are combined in a statistically optimal way according to a maximum-likelihood estimator (Ernst & Banks, 2002). Ernst and Banks showed that for bi-modal perceptual estimates, the weight attributed to one sensory channel changes when its relative reliability is modified by increasing the noise associated to its signal.
Here we address the question as to whether selectively increasing the attentional load of one sensory channel does affect the weighting of cues from different sensory channels.
In our experiment, subjects main-task was to estimate the size of a raised bar using vision alone, haptics alone, or both modalities combined. Their performance in the main-task condition alone is compared to the performance obtained when a concurrent visual distractor-task is performed. We found that vision-based estimates are more affected by a visual distractor than the haptics-based estimates. Thus, attention is indeed selectively detracted from the visual modality. Moreover, we found that the cue weighting is not affected by adding the visual distractor-task.
Therefore we can conclude that multisensory integration occurs at an early stage of processing and is not affected by attention.},
file_url = {/fileadmin/user_upload/files/publications/pdf3494.pdf},
web_url = {http://imrf.mcmaster.ca/IMRF/2005/viewabstract.php?id=51},
event_name = {6th International Multisensory Research Forum (IMRF 2005)},
event_place = {Trento, Italy},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3344,
title = {Integration of visual-haptic shape information},
year = {2005},
month = {3},
file_url = {/fileadmin/user_upload/files/publications/pdf3344.pdf},
web_url = {http://www.worldhaptics.org/2005/},
event_name = {1st Joint Worldhaptic Conference and Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems (WorldHaptics 2005)},
event_place = {Pisa, Italy},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3238,
title = {Cognitive factors facilitate multimodal integration},
year = {2005},
month = {2},
volume = {8},
pages = {72},
abstract = {Ernst & Banks (2002) showed that humans integrate visual and haptic signals in a statistically optimal way if they are derived from the same spatial location. Integration seems to be broken if there is a spatial discrepancy between the signals (Gepshtein et al., in press). Can cognitive factors facilitate integration even when the signals are presented at two spatial locations? We conducted two experiments, one in which visual and haptic information was presented at the same location. In the second experiment, subject looked at the object through a mirror while touching it. This way there was a spatial offset between the two information sources. If cognitive factors are sufficient for integration to occur, i.e. knowledge that the object seen in the mirror is the same as the one touched, we expect no difference between the two experimental results. If integration breaks due to the spatial discrepancy, we expect subjects’ percept to be less biased by multimodal information. To study integration, participants looked at an object through a distortion lens. This way, for both the “mirrored” and “direct vision” conditions, there was a slight shape conflict between the visual and haptic modalities. After looking at and feeling the object simultaneously participants reported the perceived shape by either visually or haptically matching it to a reference object. Both experiments revealed that the shape percept was in-between the haptically and visually specified shapes. Importantly, there was no significant difference between the two experimental results regardless of whether subjects matched the shape visually or haptically. However, we found a significant difference between matching by touch and matching by vision. Haptic judgments are biased towards the haptic input and vice versa. In conclusion, multimodal signals seem to be combined if observers have high-level cognitive knowledge about the signals belonging to the same object, even when there is a spatial discrepancy.},
web_url = {http://www.twk.tuebingen.mpg.de/twk05/abstract.php?_load_id=helbig01},
event_name = {8th Tübingen Perception Conference (TWK 2005)},
event_place = {Tübingen, Germany},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ LangeE2005,
title = {Judging Size by Hand: No Benefit for Bimanual Estimates},
year = {2005},
month = {2},
volume = {8},
pages = {73},
abstract = {When redundant sources of sensory information are available, integrating these sources is beneficial
for a system in order to decrease the perceptual noise and so to increase the accuracy
of the overall percept [1]. However, a potential cost may come from incorrect binding of the
different sources, which will generally evoke perceptual illusions [2].
Here we ask whether humans take advantage of a bimanual size estimate originating from
an object of constant size. The stimulus was a cylindrical object that subjects felt with their
left and right hand simultaneously. To display the bimanual haptic stimulus we used two
PHANToM force-feedback devices and measured size discrimination performance using a
2IFC paradigm. Subjects’ task was to decide which of two bimanual stimuli was bigger. From
the resulting psychometric functions we determined the JND for bimanual vs unimanual size
discrimination and the point of subjective equality (PSE).
There was no difference in the bimanual vs the unimanual JNDs which indicates that subjects
did not benefit from having available two size estimates in the bimanual situation. We
therefore conclude that there is no integration of size estimates between hands when an object
is touched bimanually. We may speculate that this failure of integration to occur results from
the fact that naturally the object is touched at slightly different spatial locations and so information
is not truly redundant even though subjects were told to touch an object of constant
size. Not being able to integrate information that comes from two different spatial locations
may help to prevent misbinding of different sensory sources.},
web_url = {http://www.twk.tuebingen.mpg.de/twk05/programm.php},
event_name = {8th Tübingen Perception Conference (TWK 2005)},
event_place = {Tübingen, Germany},
state = {published},
author = {Lange C{clange}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2923,
title = {Combining sensory information from vision and touch},
journal = {International Journal of Psychophysiology},
year = {2004},
month = {9},
volume = {54},
number = {1-2},
pages = {57-58},
web_url = {http://www.sciencedirect.com/science/article/pii/S0167876004000790},
event_name = {12th World Congress of Psychophysiology: The Olympics of the Brain},
event_place = {Thessaloniki, Greece},
state = {published},
DOI = {10.1016/j.ijpsycho.2004.05.004},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 3068,
title = {Feeling what you hear: Auditory signals can modulate the perception of tactile taps},
journal = {Perception},
year = {2004},
month = {8},
volume = {33},
number = {ECVP Abstract Supplement},
pages = {143},
abstract = {Investigating multisensory integration, Shams et al (2000 Nature 408 788) recently found that the number of perceived visual flashes could be altered by a sequence of beeps presented simultaneously. Here, we tested whether auditory sequences of beeps can modulate the tactile perception of sequences of taps (2 to 4 taps per sequence). In experiment 1, the auditory and tactile sequences were presented simultaneously. The number of beeps delivered in the auditory sequence was either the same as, less than, or more than the number of tactile taps. Though task-irrelevant (subjects were instructed to focus on the tactile stimuli), the auditory stimuli significantly modulated subjects' tactile perception. Such modulation occurred only when the auditory and tactile stimuli were structurally similar. In experiment 2, we tested whether auditory - tactile interaction depends on simultaneity or whether a bias can be evoked without temporal overlap between the auditory and tactile sequences. Audition significantly modulated tactile perception when the stimuli were presented simultaneously, but this effect gradually disappeared when a temporal asynchrony was introduced between auditory and tactile stimuli. These results show that when provided with auditory and tactile signals that are likely to be generated by the same stimulus, the brain tends to automatically combine these signals.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v040389},
event_name = {27th European Conference on Visual Perception},
event_place = {Budapest, Hungary},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action} and Drewing K{kdrewing}}
}
@Poster{ 2927,
title = {Re-learning the light source prior},
journal = {Journal of Vision},
year = {2004},
month = {8},
volume = {4},
number = {8},
pages = {294},
abstract = {In order to make sense of complex and ambiguous visual input, the visual system makes use of prior knowledge, or assumptions about the structure of the world. The use of these ‘priors’ is neatly incorporated into a Bayesian framework, which has been successfully employed to model many aspects of human visual perception. Priors are usually assumed to be based on observers' previous experience and the statistics of natural scenes. Little research, however, has examined how these priors are formed and adapted or how general or context-specific they are. Here we consider the ‘light from above’ prior that is used by the visual system to extract shape from shading. Observers viewed monocular disks with shading gradients at various orientations. The reported shape (convex or concave) as a function of stimulus orientation was used to recover the observer's assumed light position. During a training phase, observers could also ‘touch’ the disks. The stimulus orientations which were presented as haptically convex were consistent with a light source ±30? from the observer's original assumed light position. Following the training, observers again judged the stimulus shape from purely visual information. In a control experiment, observers made lightness judgements of a mach-card type stimulus, before and after haptic training with the concave / convex disk stimuli. Firstly, our results confirm that observers assume a light position that is roughly overhead. Secondly, we found that haptic information can disambiguate the shading cue. Thirdly, using haptic feedback, observers were trained to use a slightly shifted light direction for their prior. Finally, the shift in prior light source direction was not specific to the trained task, but affected visual perception in a separate lightness judgement task.},
web_url = {http://www.journalofvision.org/content/4/8/294.abstract},
event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/4.8.294},
author = {Adams WJ, Graf EW and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2926,
title = {The quality of feedback does affect the rate of visuomotor adaptation},
journal = {Journal of Vision},
year = {2004},
month = {8},
volume = {4},
number = {8},
pages = {286},
abstract = {Under many conditions, the human visuomotor system quickly adapts when confronted with spatially discrepant stimuli. This adaptability can easily be demonstrated with for example the change in pointing behavior when first wearing prism glasses. Here we asked whether the quality of feedback, that is its reliability, has an effect on the rate of adaptation. The hypothesis was that the system should adapt more quickly if the feedback was more reliable. To investigate this question we conducted two adaptation experiments: One pointing experiment (closed-loop), in which we measured the rate of adaptation to a lateral prismatic displacement, and a grasping experiment (open-loop), in which we measured adaptation to a size conflict of visually magnified objects. The experiments were conducted in three phases: A pre-adaptation phase to establish baseline performance, an adaptation phase in which the visuomotor conflict was introduced, and a post-adaptation phase to determine the after-effect. We determined the rate of adaptation from the change in visuomotor behavior during the second and third phases. In several conditions the reliability of feedback was manipulated. In the pointing experiment we varied the feedback reliability by blurring the target stimulus, which was a Gaussian blob of 10% contrast (sigma = 4, 32, 48 deg). In the grasping experiment we presented rectangular blocks of five different sizes for open-loop grasping. Here the feedback was manipulated by either adding noise to the visual display or by varying the haptic presentation time. The effectiveness of all feedback manipulations was assessed by measuring discrimination thresholds. To our surprise, in both experiments, closed-loop pointing and open-loop grasping, there was no significant effect of the reliability of feedback on the rate of adaptation. We conclude that the rate of visuomotor adaptation depends on the sensory estimate that determines the conflict but not on its reliability.},
web_url = {http://www.journalofvision.org/4/8/286/},
event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/4.8.286},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Endress V}
}
@Poster{ 2928,
title = {What is an inter-sensory object? Optimal combination of vision and touch depends on thier spatial coincidence},
journal = {Journal of Vision},
year = {2004},
month = {8},
volume = {4},
number = {8},
pages = {140},
abstract = {Recent work showed that humans combine visual and haptic information about object size in a way that approaches statistical optimality: The precision of combined estimates is higher than with vision or touch alone (Ernst & Banks, 2002; Gepshtein & Banks, 2003). If the brain combines the visual and haptic signals optimally when they appear to come from the same object, the precision of combination should be greater when the signals originate from the same location in space. We examined this by varying the spatial offset between the visual and haptic stimuli. In a 2-IFC procedure, each interval contained visual and haptic stimuli, spatially superimposed or separated by up to 10 cm. The visual stimuli were random-dot stereograms of two parallel surfaces; the haptic stimuli were two parallel surfaces created by force-feedback devices. Observers indicated the interval containing the greater perceived inter-surface distance. The increase in precision with two cues as opposed to one cue should be greatest when visual and haptic weights are equal, so we equated the weights for each observer by finding the surface slant at which vision and haptics were equally precise (Gepshtein & Banks, 2003). We found that inter-modality, just-noticeable differences (JND) for object size grew as a function of spatial separation between the visual and haptic stimuli. With no separation, JNDs were close to optimal. With large separations, JNDs worsened. We examined whether this effect of spatial coincidence is affected by scene layout; for example, when the lack of coincidence is “explained” by occlusion of the haptic stimulus.},
web_url = {http://www.journalofvision.org/4/8/140/},
event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/4.8.140},
author = {Gepshtein S, Burge J{jburge}, Banks MS{martybanks} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2772,
title = {Einfluss von Aufmerksamkeit auf die Integration visueller und haptischer Informationexperimentell arbeitender Psychologen},
journal = {Experimentelle Psychologie},
year = {2004},
month = {4},
volume = {46},
pages = {106},
abstract = {Der Mensch integriert visuelle und haptische Sinneseindrücke statistisch optimal [Ernst & Banks, Nature, 415, (2002)]. Dabei werden verschiedene Informationsquellen je nach Verlässlichkeit gewichtet. Frage ist, inwiefern Aufmerksamkeit den Integrationsprozess moduliert.
Dazu untersuchten wir mit einem Dual-Task Paradigma den Einfluss von Zweitaufgaben auf die Gewichtung visueller und haptischer Größeninformation. Die primäre Aufgabe war, Balken anhand ihrer Größe visuell und/oder haptisch zu unterscheiden. Daraus bestimmten wir die Gewichtung der visuellen und haptischen Größeninformation. Parallel dazu wurden entweder visuelle oder haptische Zweitaufgaben ausgeführt (visuell: Orientierungsdiskriminierung von Linien, haptisch: Diskriminierung von Reibungskoeffizienten).
Die Vorhersage war, dass die haptische Zweitaufgabe stärkeren Einfluss auf die haptische Größendiskriminierung hat und umgekehrt. Daraus sollte sich dann auch eine stärkere Gewichtung der visuellen Modalität ergeben, wenn eine haptische Zweitaufgabe durchgeführt wird und umgekehrt. Diese Vorhersagen wurden experimentell bestätigt. Ob die Integration jedoch noch optimal ist, wird zurzeit geprüft.},
web_url = {http://www.allpsych.uni-giessen.de/teap/index.php},
editor = {Kerzel, D. , V. Franz, K. Gegenfurtner},
event_name = {46. Tagung Experimentell Arbeitender Psychologen (TeaP 2004)},
event_place = {Giessen, Germany},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2626,
title = {Integration kinaesthetischer Information in der haptischen Formwahrnehmung},
journal = {Experimentelle Psychologie},
year = {2004},
month = {4},
volume = {46},
pages = {64},
web_url = {http://www.allpsych.uni-giessen.de/teap/index.php},
editor = {Kerzel, D. , V. Franz, K. Gegenfurtner},
event_name = {46. Tagung Experimentell Arbeitender Psychologen (TeaP 2004)},
event_place = {Giessen, Germany},
state = {published},
author = {Drewing K{kdrewing} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2515,
title = {Cue Reliabilities Affect Cue Integration in Haptic Shape Perception},
year = {2004},
month = {2},
volume = {7},
pages = {123},
abstract = {When sliding a nger across a bumpy surface, the nger follows the geometry of the bumps/holes
providing positional cues for the shape. At the same time the nger is opposed by forces related
to the steepness of the bumps/holes. With a specic device Robles-de-la-Torre and Hayward [1]
dissociated positional and force cues in the haptic perception of small-scale bumps and holes:
Participants in this experiment reported to predominantly feel the class of shapes (bumps or
holes) indicated by the force cues. Drewing and Ernst [2] extended this research by disentangling
force and position cues to the perception of curves more systematically and by also
quantifying the perceived curvature. The result was that the perceived curvature could be predicted
from weighted averaging of the two cues. This is consistent with current models of cue
integration [e.g., 3].
These integration models further predict that the cue weight is proportional to the cue's
reliability. Here, we aimed at testing this prediction for the integration of force and position
cues to haptic shape by manipulating the shapes' material properties: high softness can be
assumed to decrease the reliability of the position cue as compared to low softness, and high
friction to decrease the reliability of the force cue. Using the PHANToM force-feedback device
we constructed haptic curve stimuli. We systematically intermixed force and position cues
indicating curvatures of 14 and 24 /m. Using the method of double-staircases, we measured
the point of subjective equality (PSE) of the curvature of these as compared to `natural' stimuli
(i.e., with consistent position and force cues). From the PSE data we determined the cue
weights. This was done under each combination of material properties (low vs high softness X
low vs high friction). We found that material properties affected the cue weights in a manner
consistent with our predictions. These results further conrm the validity of existing models of
cue integration in haptic shape perception.},
web_url = {http://www.twk.tuebingen.mpg.de/twk04/index.php},
event_name = {7th Tübingen Perception Conference (TWK 2004)},
event_place = {Tübingen, Germany},
state = {published},
author = {Drewing K{kdrewing}, Wiecki T{wiecki} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2513,
title = {Feeling What You Hear: An Auditory-Evoked Tactile Illusion},
year = {2004},
month = {2},
volume = {7},
pages = {73},
abstract = {Previous research indicated that sound can bias visual [14] as well as tactile perception [5,6].
The present experiment tested whether auditory stimuli can alter the tactile perception of sequences
of taps (2 to 4 taps per sequence) delivered on the index ngertip. The taps were
delivered using a PHANToM force feedback device. The subjects did not have any visual or
auditory feedback about the tactile stimulation and their task was to report after each sequence
how many taps they felt. In the rst experiment, for some trials, auditory sequences of beeps
were presented concomitantly with the tactile sequences (through earphones). The number of
beeps diffused in the auditory sequence could be the same as, less, or more than the number
of taps of the simultaneously presented tactile sequence. Though irrelevant (subjects were instructed
to focus on the tactile stimuli), the auditory stimuli systematically biased subjects'
tactile perception, i.e. subjects' responses depended signicantly on the number of diffused
beeps. The results also suggested that for such an auditory-tactile interaction to occur, a certain
amount of structural congruency between the simultaneously presented stimuli is required.
Indeed, the diffusion of an auditory stimulus obviously incongruent with the tactile sequence
failed to evoke any bias of tactile perception. In the second experiment, we tested whether the
auditory-tactile interaction also requires temporal congruency or whether a bias can be evoked
without temporal overlapping between the auditory and tactile presented sequences. The tactile
and auditory stimuli were the same as in the rst experiment (the structurally incongruent
auditory stimulus was not used here) but the auditory sequences were presented either simultaneously
with, before the beginning, or after the end of the tactile sequences. Audition strongly
biased tactile perception when the stimuli were temporally concomitant (reproduction of the
results obtained in the rst experiment). With a temporally asynchronous audio-tactile stimulus
the interaction gradually disappeared. We conclude that auditory and tactile sensory signals are
integrated when they both provide redundant information in good temporal coherence.},
web_url = {http://www.twk.tuebingen.mpg.de/twk04/index.php},
event_name = {7th Tübingen Perception Conference (TWK 2004)},
event_place = {Tübingen, Germany},
state = {published},
author = {Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Drewing K{kdrewing}, Bouyer G, Maury V and Kheddar A}
}
@Poster{ 2532,
title = {Texture and Haptic Cues in Slant Discrimination: Measuring the Effect of Texture Type},
year = {2004},
month = {2},
volume = {7},
pages = {165},
abstract = {In a number of models of depth cue combination the depth percept is constructed via a weighted
average combination of independent depth estimations. The inuence of each cue in such average
depends on the reliability of the source of information [1,5]. In particular, Ernst and Banks
(2002) formulate such combination as that of the minimum variance unbiased estimator that
can be constructed from the available cues. We have observed systematic differences in slant
discrimination performance of human observers when different types of textures were used as
cue to slant [4]. If the depth percept behaves as described above, our measurements of the
slopes of the psychometric functions provide the predicted weights for the texture cue for the
ranked texture types. However, the results for slant discrimination obtained when combining
these texture types with object motion results are difcult to reconcile with the minimum variance
unbiased estimator model [3]. This apparent failure of such model might be explained by
the existence of a coupling of texture and motion, violating the assumption of independence of
cues. Hillis, Ernst, Banks, and Landy (2002) [2] have shown that while for between-modality
combination the human visual system has access to the single-cue information, for withinmodality
combination (visual cues) the single-cue information is lost. This suggests a coupling
between visual cues and independence between visual and haptic cues. Then, in the present
study we combined the different texture types with haptic information in a slant discrimination
task, to test whether in the between-modality condition these cues are combined as predicted
by an unbiased, minimum variance estimator model. The measured weights for the cues were
consistent with a combination rule sensitive to the reliability of the sources of information, but
did not match the predictions of a statistically optimal combination.},
file_url = {/fileadmin/user_upload/files/publications/pdf2532.pdf},
web_url = {http://www.twk.tuebingen.mpg.de/twk04/index.php},
editor = {Bülthoff, H. H., H. A. Mallot, R. Ulrich, F. A. Wichmann},
event_name = {7th Tübingen Perception Conference (TWK 2004)},
event_place = {Tübingen, Germany},
state = {published},
author = {Rosas P{pedror}, Wichmann FA{felix}{Department Empirical Inference}, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Wagemans J}
}
@Poster{ 2514,
title = {The Quality of Feedback does not Affect the Rate of Visuomotor Adaptation},
year = {2004},
month = {2},
volume = {7},
pages = {81},
abstract = {Under many conditions, the human visuomotor system quickly adapts when confronted with
spatially discrepant stimuli. This adaptability can easily be demonstrated with for example
the change in pointing behavior when rst wearing prism glasses. Here we asked whether
the quality of feedback, that is its reliability, has an effect on the rate of adaptation. The hypothesis
was that the system should adapt more quickly if the feedback was more reliable. To
investigate this question we conducted two adaptation experiments: One pointing experiment
(closed-loop), in which we measured the rate of adaptation to a lateral prismatic displacement,
and a grasping experiment (open-loop), in which we measured adaptation to a size conict of
visually magnied objects. The experiments were conducted in three phases: A pre-adaptation
phase to establish baseline performance, an adaptation phase in which the visuomotor conict
was introduced, and a post-adaptation phase to determine the after-effect. We determined the
rate of adaptation from the change in visuomotor behavior during the second and third phases.
In several conditions the reliability of feedback was manipulated. In the pointing experiment
we varied the feedback reliability by blurring the target stimulus, which was a Gaussian blob
of 10% contrast (sigma = 4, 32, 48 deg). In the grasping experiment we presented rectangular
blocks of ve different sizes for open-loop grasping. Here the feedback was manipulated by
either adding noise to the visual display or by varying the haptic presentation time. The effectiveness
of all feedback manipulations was assessed by measuring discrimination thresholds.
To our surprise, in both experiments, closed-loop pointing and open-loop grasping, there was
no signicant effect of the reliability of feedback on the rate of adaptation. We conclude that
the rate of visuomotor adaptation depends on the sensory estimate that determines the conict
but not on its reliability.},
web_url = {http://www.twk.tuebingen.mpg.de/twk04/index.php},
event_name = {7th Tübingen Perception Conference (TWK 2004)},
event_place = {Tübingen, Germany},
state = {published},
author = {Endress V{vera}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 2479,
title = {Texture and haptic cues in slant discrimination: Measuring the effect of texture type on cue combination},
journal = {Journal of Vision},
year = {2003},
month = {12},
volume = {3},
number = {12},
pages = {26},
abstract = {In a number of models of depth cue combination the depth percept is constructed via a weighted average combination of independent depth estimations. The influence of each cue in such average depends on the reliability of the source of information. (Young, Landy, & Maloney, 1993; Ernst & Banks, 2002.) In particular, Ernst & Banks (2002) formulate the combination performed by the human brain as that of the minimum variance unbiased estimator that can be constructed from the available cues.
Using slant discrimination and slant judgment via probe adjustment as tasks, we have observed systematic differences in performance of human observers when a number of different types of textures were used as cue to slant (Rosas, Wichmann & Wagemans, 2003). If the depth percept behaves as described above, our measurements of the slopes of the psychometric functions provide the predicted weights for the texture cue for the ranked texture types. We have combined these texture types with object motion but the obtained results are difficult to reconcile with the unbiased minimum variance estimator model (Rosas & Wagemans, 2003). This apparent failure of such model might be explained by the existence of a coupling of texture and motion, violating the assumption of independence of cues. Hillis, Ernst, Banks, & Landy (2002) have shown that while for between-modality combination the human visual system has access to the single-cue information, for within-modality combination (visual cues: disparity and texture) the single-cue information is lost, suggesting a coupling between these cues. Then, in the present study we combine the different texture types with haptic information in a slant discrimination task, to test whether in the between-modality condition the texture cue and the haptic cue to slant are combined as predicted by an unbiased, minimum variance estimator model.},
web_url = {http://www.journalofvision.org/3/12/26/},
event_name = {2003 Fall Vision Meeting of the Optical Society of America},
event_place = {Tucson, AZ, USA},
state = {published},
DOI = {10.1167/3.12.26},
author = {Rosas P{pedror}, Wichmann FA{felix}{Department Empirical Inference}, Ernst MO{marc}{Research Group Multisensory Perception and Action} and Wagemans J}
}
@Poster{ 2346,
title = {Integration of Force and Position Cues in Haptic Curvature Perception},
year = {2003},
month = {11},
volume = {44},
pages = {112},
abstract = {When one slides a finger across a surface with a bump on it, the finger follows the geometry of the bump,
providing positional cues for the shape. At the same time, the finger is opposed by forces related to the steepness of the bump. With a specific device, Robles-de-la-Torre and Hayward (2001) dissociated positional and force cues in the haptic perception of small-scale bumps and holes: Participants in this experiment reported feeling the shape
indicated by the force cues and not those indicated by the positional cues. We extended this research by systematically disentangling the contributions of these two cues to the perception of curvature. Using the PHANToM haptic device, we presented virtual curves, in which we
intermixed force and position cues related to curvatures between 0 and 16/m. Participants compared these with pseudonatural curves. Our results suggest that perceived curvature is a weighted average of both positional and force cues.},
web_url = {http://www.psychonomic.org/past-meeting.html},
event_name = {44th Annual Meeting of The Psychonomic Society},
event_place = {Vancouver, Canada},
state = {published},
author = {Drewing K{kdrewing} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ ErnstJ2004,
title = {Learning to Combine Arbitrary Signals from Vision and Touch},
year = {2003},
month = {11},
volume = {44},
pages = {1},
abstract = {Humans integrate visual and haptic size information in a statistically optimal fashion (Ernst & Banks, 2002). Combining such size estimates is reasonable, because, naturally, both these size cues are correlated. The purpose of this study is to investigate whether cue combination is learned on the basis of a correlation between cues. Therefore, we took naturally uncorrelated cues—the object’s
luminance (visual cue) and stiffness (haptic cue)—and trained 12 subjects for 1 h in an environment in which these cues were correlated. To test whether training had an effect, we compared subjects’ discrimination performance before and after training for two intermixed conditions: In one condition, the cues were consistent with the
correlation during training (congruent); in the other, the cues were anticorrelated relative to training (incongruent). We predict that discrimination performance becomes slightly better for stimuli with congruent cues and worse for stimuli with incongruent cues. In agreement with our prediction, we found a significant interaction between preand posttest for the two congruent and incongruent conditions ( p < .001). This indicates that subjects picked up the correlation during training and learned to combine two arbitrary cues. We conclude that combination of cues can be learned on the basis of the statistics of their
co-occurrence.},
web_url = {http://www.psychonomic.org/past-meeting.html},
event_name = {44th Annual Meeting of The Psychonomic Society},
event_place = {Vancouver, Canada},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and J\"akel F{frank}{Department Empirical Inference}}
}
@Poster{ BanksE2003,
title = {A biologically plausible model of cue combination},
journal = {Journal of Vision},
year = {2003},
month = {10},
volume = {3},
number = {9},
pages = {31},
abstract = {There are now numerous demonstrations that different sources of sensory information contribute to a perceptual estimate in accordance with their statistical reliabilities. Specifically, when combining two or more sensory cues about an object property, the system weights the cues in proportion to their reciprocal variances. In so doing it minimizes the variance of the estimate of the object property. Of course, variances change from one object property to the next and from one situation to another. Does the brain have to calculate or learn the variances associated with each cue for each property and situation? We propose a biologically plausible model in which explicit calculation of variances (or weights) is unnecessary. Consider the combination of information from two senses. In the model there are two populations of neurons, one for each sense. Each neuron is characterized by its tuning function for the object property in question and by the statistics of its responses (modeled after V1 neurons). The distribution of response across each population indicates the most likely value of the object property and the uncertainty (according to that sense). Multiplication of these two distributions (point-by-point where the two populations are in registration concerning the object property being estimated) yields another distribution. The peak of this distribution (obtained by fitting a smooth function) is the model's estimate of the object property in question. The model's behavior is quite similar to a maximum-likelihood integrator for a wide variety of situations. When the difference between the two inputs is relatively small, the combined estimate shifts toward the input of lower variance and has lower variance than either input by itself. When the difference between the two inputs is large, the model exhibits statistical robustness. The model can be expanded to incorporate inputs from several sensory cues.},
web_url = {http://www.journalofvision.org/content/3/9/31.abstract},
event_name = {Third Annual Meeting of the Vision Sciences Society (VSS 2003)},
event_place = {Sarasota, FL, USA},
state = {published},
author = {Banks MS{marty} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ ErnstJ2003_2,
title = {Learning to fuse unrelated cues},
journal = {Journal of Vision},
year = {2003},
month = {10},
volume = {3},
number = {9},
pages = {32},
abstract = {Humans integrate visual and haptic size information in a statistically optimal fashion (Ernst & Banks, 2002). That is, the perceived size is a weighted average of the individual estimates with weights proportional to their inverse variances. More importantly, the fused percept has lower variance then each individual estimate. Fusion of visual and haptic size estimates is reasonable because in the natural environment these cues to an object's size are highly correlated. The purpose of this study is to investigate whether cue fusion is learned based on the correlation between cues. Therefore, we took naturally uncorrelated cues — the luminance of an object (visual cue) and its stiffness (haptic cue) — and trained 6 subjects for approximately one hour in an environment where these cues were correlated. To test whether training had an effect we compared subject's discrimination performance before and after training for two intermixed conditions: One condition in which the two cues were consistent with the correlation during training (congruent) and the other condition in which the two cues were anti-correlated relative to the training phase (incongruent). If training had an effect we would predict that the stimuli with congruent cues elicit an improvement in discrimination performance relative to the incongruent condition, because if the cues are fused after training the variance of the combined estimate should get lower. In agreement with our prediction we found a significant interaction between pre- and post-test for the two congruent and incongruent conditions (F[1,5]=20,3; p<0.01). This indicates that subjects indeed picked up the correlation in the training phase and fused the two cues. We conclude that fusion of cues can be learned on a relatively short timeframe based on the statistics of their co-occurrence.},
web_url = {http://www.journalofvision.org/content/3/9/32.abstract},
event_name = {Third Annual Meeting of the Vision Sciences Society (VSS 2003)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/3.9.32},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action} and J\"akel F{frank}{Department Empirical Inference}}
}
@Poster{ 2213,
title = {Learning to Fuse Unrelated Cues},
year = {2003},
month = {2},
volume = {6},
pages = {103},
abstract = {Fusion of dierent cues can improve the reliability of perceptual estimates. E.g., a more
accurate size estimate can be achieved when visual and haptic size information is used
in combination (Ernst & Banks, 2002, Nature, 429-433). To combine dierent cues the
observer has to know which cues belong together (i.e., the correspondence problem has
to be solved). Here we examine whether subjects can learn to fuse two arbitrary cues
based on their statistical co-occurence. To this end, we trained 6 subjects for about
an hour with highly correlated stimuli that are usually uncorrelated, the luminance of
an object (visual cue) and its stiness (haptic cue). To test for learning, we measured
subjects' discrimination performances before (pre-test) and after (post-test) training.
The discrimination task had two intermixed conditions: One condition in which the cues
were consistent with the correlation during training (congruent) and another condition
in which they were anti-correlated (incongruent). If training had an eect we would
predict that the stimuli with congruent cues elicit an improvement in discrimination
performance relative to the incongruent condition, because if the cues were truly fused
after training the variance of the combined estimate should be lower. In agreement
with our prediction we found a signicant interaction between pre- and post-test for
the two congruent and incongruent conditions (F[1,5]=20,3; p<0.01). This indicates
that subjects indeed picked up the correlation in the training phase and fused the two
cues. We conclude that fusion of cues can be learned on a relatively short time-frame
based on the statistics of their co-occurrence.},
web_url = {http://www.twk.tuebingen.mpg.de/twk03/},
editor = {Bülthoff, H. H., K. R. Gegenfurtner, H. A. Mallot, R. Ulrich, F. A. Wichmann},
event_name = {6. Tübinger Wahrnehmungskonferenz (TWK 2003)},
event_place = {Tübingen, Germany},
state = {published},
author = {J\"akel F{frank} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ 1633,
title = {Discriminating the odd: Boundaries of visual-haptic integration},
journal = {Journal of Vision},
year = {2002},
month = {11},
volume = {2},
number = {7},
pages = {402},
abstract = {We investigated the degree to which visual-haptic fusion occurs. If the nervous system uses a statistically optimal integration rule, the combined visual-haptic percept is a weighted average of the visual and haptic estimates and its variance is lower than that of either estimate alone. We used an oddity task to investigate whether the combined estimate is used in discrimination or whether independent visual and haptic estimates are used. Three horizontal bars were presented sequentially. Two of them were identical and had equal visual and haptic heights (standard stimulus). The third had a visual and/or haptic height differing from the standard (odd stimulus). Subjects indicated which of the three intervals contained the odd stimulus. If subjects used visual or haptic information independently without combining them, then discrimination would occur whenever the visual or haptic height in the odd stimulus differed from the height in the standard by more than the vision-alone or haptic-alone threshold. In contrast, if subjects relied on the combined visual-haptic estimate, discrimination should be most difficult when the visual and haptic heights differed in opposite directions from the standard's height (such that weighted averages of odd and standard stimuli are equal) and easiest when they differed in the same direction from the standard (weighted averages different). We found that discrimination was indeed most difficult when the weighted averages were the same and easiest when they differed. Thus, the fused visual-haptic percept is used for discrimination. However, if the conflict between the visual and haptic stimuli is too large, this difference in discrimination performance is not observed. In other words, visual-haptic fusion breaks with large conflicts. In some conditions, metameric behavior is observed: discrimination would be better if subjects shut the eyes or removed the hand from the bar.},
web_url = {http://journalofvision.com/2/7/402/},
event_name = {Second Annual Meeting of the Vision Sciences Society (VSS 2002)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/2.7.402},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks MS{martybanks}}
}
@Poster{ 1934,
title = {Screen cues to flatness do affect 3d percepts},
journal = {Journal of Vision},
year = {2002},
month = {11},
volume = {2},
number = {7},
pages = {297},
abstract = {3d displays on digital media are often perceived as different from the portrayed object or scene, even when the display creates the “correct” 2d retinal images. In principle, there are at least three depth cues created by digital displays that could contribute to such distortions: 1) inappropriate focus cues, 2) pixelization, and 3) inappropriate motion parallax during head movements. We measured the contribution of these inappropriate screen cues to perceived slant by varying independently the slant specified by the computer graphics algorithm (“computed slant”) and the physical slant of the CRT on which the stimuli were presented (“screen slant”). Planes with different computed and screen slants were presented (tilt = 0 deg) and observers indicated the amount of perceived slant. Precise spatial calibration ensured that retinal-image shapes, texture gradients, and disparity gradients were determined by only the computed slant. Observers were unaware of the screen slant from trial to trial. Across different experiments, we examined the influence of display type (monocular vs. binocular), screen distance (30–200 cm), head stabilization (bite bar, chin rest, and free), amount of slant, and conflict between computed and screen slant. Screen slant had a significant effect on perceived slant in a wide variety of conditions. The effect was larger in monocular than in binocular viewing conditions, at short distances, with head unstabilized, and at large screen slants. We used regression analyses to determine the effective weight given inappropriate screen cues across the various conditions. These results show that inappropriate screen cues can have a significant effect on 3d percepts and that the size of the effect depends strongly on viewing condition.},
web_url = {http://journalofvision.com/2/7/297/},
event_name = {Second Annual Meeting of the Vision Sciences Society (VSS 2002)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/2.7.297},
author = {Watt SJ, Banks MS{martybanks}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Zumer JM}
}
@Poster{ 1634,
title = {Variance predicts visual-haptic adaptation in shape perception},
journal = {Journal of Vision},
year = {2002},
month = {11},
volume = {2},
number = {7},
pages = {670},
abstract = {When people are exposed repeatedly to a conflict in visually and haptically specified shapes, they adapt and the apparent conflict is eventually eliminated. The inter-modal adaptation literature suggests that the conflict is resolved by adapting the haptic shape estimator. Another possibility is that both estimators adapt by amounts that depend on their relative variances. Thus, the visual estimator could adapt if its variance were high enough. Is relative reliability the better predictor of visual-haptic adaptation? We examined this by manipulating the variance of the visual signal during inter-modal adaptation and then measuring changes in the within-modal (vision-alone and haptics-alone) shape percepts. The stimulus was a 3D object with a rectangular front surface. It was specified visually by random-dot stereograms and haptically by PHANToM force-feedback devices. In pre- and post-tests, observers judged whether its front surface was taller or shorter than it was wide. For each modality, we found the aspect ratio that was perceptually a square. During adaptation, a conflict was created between the visually and haptically specified shapes by independently altering the visual and haptic aspect ratios of the front surface. The variance of the visual estimator (determined by dot number) was either low or high. We assessed the amount of visual and haptic adaptation by comparing pre- and post-test shape estimates. When the visual estimator's variance was low, essentially all of the adaptation occurred in the haptic estimator. When the visual estimator's variance was high, we observed visual and haptic adaptation. These results suggest that the relative reliability of visual and haptic estimators determines the relative amounts of visual and haptic adaptation.},
web_url = {http://journalofvision.com/2/7/670/},
event_name = {Second Annual Meeting of the Vision Sciences Society (VSS 2002)},
event_place = {Sarasota, FL, USA},
state = {published},
DOI = {10.1167/2.7.670},
author = {Girshick AR, Banks MS{martybanks}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Cooper R and Jacobs RA}
}
@Poster{ 1635,
title = {Integration of simultaneous visual and haptic information},
year = {2002},
month = {2},
volume = {5},
pages = {47},
abstract = {When a person looks at an object while exploring it with their hand, vision and touch
both provide useful information for estimating the properties of the object. Here, we
investigated the fusion of visual and haptic information and its limits. We propose that a
general principle, which minimizes variance in the final estimate, determines the degree
to which vision or haptics determines the integrated, visual-haptic percept. This principle
is realized by using maximum-likelihood estimation (MLE) to combine the inputs. To
investigate cue combination quantitatively, we first measured the variances associated
with visual and haptic estimation of height. Those measurements were then used to construct
the MLE integrator. The model and humans behaved very similarly in a visual-haptic
height discrimination task. Thus, the nervous system seems to combine visual and
haptic height information in a fashion quite similar to MLE integration. In a second study
we used an oddity task to investigate the break down of visual-haptic fusion. Three horizontal
bars were presented sequentially. Two of them were identical and had equal visual
and haptic heights (standard stimulus); the third had a visual and/or haptic height differing
from the standard (odd stimulus). Subjects indicated which of the three intervals contained
the odd stimulus. If subjects relied on the fused visual-haptic estimate, discrimination
should be most difficult when the weighted average of the visual and haptic heights
corresponds to the standards height and easiest if both heights of the odd stimulus are
either bigger or smaller then the standards height. In contrast, if subjects used the visual
or haptic information independently without fusing them, discrimination would occur
whenever either the visual or the haptic height in the odd stimulus differed noticeably
from the standards height. We found that discrimination was indeed most difficult when
the weighted averages were the same and easiest when they differed. Thus, the fused
visual-haptic percept is used for discriminating between the stimuli. However, if the conflict
between the visual and haptic stimuli became too large, this difference in discrimination
performance was not observed anymore. In other words, visual-haptic fusion breaks
with large conflicts. Remarkably, we observed metameric behavior in some condition.
That is, discrimination would be better if subjects simply shut their eyes or removed their
hand from the bar instead of using the fused visual-haptic information.},
web_url = {http://www.twk.tuebingen.mpg.de/twk02/},
editor = {Bülthoff, H. H., K. A. Gegenfurtner, H. A. Mallot, R. Ulrich},
event_name = {5. Tübinger Wahrnehmungskonferenz (TWK 2002)},
event_place = {Tübingen, Germany},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks MS{martybanks}}
}
@Poster{ 228,
title = {Haptic feedback affects visual perception of surfaces},
journal = {Perception},
year = {1999},
month = {8},
volume = {28},
number = {ECVP Abstract Supplement},
pages = {106},
abstract = {To derive an estimate of surface slant, the visual system combines information from several cues, each weighted according to its reliability. We asked whether
the weights change after providing haptic feedback consistent with one cue only. During the feedback phase of the experiment, subjects pushed a cube across
various planes. The cube and the planes were seen and felt. Visually the plane had texture and disparity gradients specifying different slants. Haptic sensations
for the cube and plane were provided with a force-feedback device (PHANToM); the haptic feedback for the plane was consistent with its texture gradient
only. Before and after the feedback phase, subjects made slant settings (without haptic feedback) from which we determined the weights assigned to texture
and disparity. The texture weight increased significantly from 25% to 38%. We also conducted two control experiments that showed that the change in weight
does not occur over time without the texture - disparity conflict and that the weight change was not caused by visual experience alone. We conclude that giving
haptic feedback consistent with one cue causes its weight to increase in a purely visual task. Thus, haptic feedback affects visual surface perception. Calibration
of the visual system is affected by visuomotor interaction.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v990187},
event_name = {22nd European Conference on Visual Perception},
event_place = {Trieste, Italy},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Banks MS{martybanks} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 227,
title = {Haptic feedback affects slant perception},
journal = {Investigative Ophthalmology & Visual Science},
year = {1999},
month = {5},
volume = {40},
number = {4},
pages = {802},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1999)},
event_place = {Fort Lauderdale, FL, USA},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Banks MS{martybanks} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 226,
title = {Visual and haptic recognition of objects: Effects of viewpoint},
journal = {Investigative Ophthalmology & Visual Science},
year = {1999},
month = {5},
volume = {40},
number = {4},
pages = {398},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1999)},
event_place = {Fort Lauderdale, FL, USA},
state = {published},
author = {B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Newell F{fiona}{Department Human Perception, Cognition and Action} and Tjan BS{tjan}{Department Human Perception, Cognition and Action}}
}
@Poster{ ErnstNTB1999,
title = {Visual and haptic recognition of objects: effects of transfer and viewpoint},
year = {1999},
month = {2},
pages = {103},
abstract = {We investigated the nature of object recognition in two sensory systems, namely the visual and haptic systems. Specifically, we investigated view dependent recognition performance in these systems.
Our stimuli were 3D, unfamiliar objects, which were constructed from six, red Lego parts stacked randomly. The objects were presented to the subjects in a fixed orientation and position. Subjects learned four target object either visually or haptically. Subjects then
performed an old/new recognition task either within the same modality or in the other modality as learning. Furthermore, the target objects were presented in either the same orientation as during the learning session or rotated by 180 deg.. In Experiment 1 the objects were rotated about the vertical axis and in Experiment 2 we tested rotations about the horizontal or depth axis.
In all experiments we found an effect of view within both the visual and haptic systems: Objects were easier to recognize when there was no change in orientation from learning to test. That is, recognition performance dropped from 75% to 65%. However, when recognition was performed across modalities then recognition was better if the object was rotated but only when the rotation involved an exchange of the front and back of the object.
Our results suggest that orientation specificity is a common property of both the visual and haptic systems. Furthermore, the view of the object facing the observer is more salient for the visual system, whereas the side of the object, which is best accessible to the fingers and thus easier to be explored, is more salient for the haptic system. With the stimuli we used this was the back side of the objects. This suggests that integration of information
across the fingers is equivalent to seeing an object from a particular view.},
web_url = {http://www.twk.tuebingen.mpg.de/twk99/},
event_name = {2. Tübinger Wahrnehmungskonferenz (TWK 99)},
event_place = {Tübingen, Germany},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}, Newell FN{fiona}{Department Human Perception, Cognition and Action}, Tjan BS{tjan}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 1630,
title = {Size Discrimination of seen and grasped objects and the effect of presentation time},
journal = {Perception},
year = {1998},
month = {8},
volume = {27},
number = {ECVP Abstract Supplement},
pages = {12-13},
abstract = {We investigated visuo - motor integration in grasping by studying haptic, visual, and cross-modal size discrimination. Our main interest concerned the effect of presentation time on discrimination performance and the differences between intramodal and cross-modal thresholds.
The experiments were conducted in a virtual environment in which two force-feedback devices (PHANToMTM) provided haptic information to the thumb and the index finger. Stereoscopically rendered objects were used for the visual presentation. In a two-interval forced-choice paradigm subjects had to determine which interval contained the larger object (we used cubes in all cases). Depending on condition, subjects either saw or felt each cube for a specified time. Feeling a cube required subjects to perform a two-finger grasp. The intramodal tasks were repeated with an appropriate mask between the two presentations; in these runs haptic masking consisted of randomly disturbing the finger span by the force-feedback devices. Intramodal thresholds (+/-4% visual - visual; +/-7% haptic - haptic) were significantly smaller than cross-modal thresholds (+/-13% visual - haptic; +/-14% haptic - visual). Gradually decreasing the presentation time in the intramodal conditions to less than 50 ms increased the thresholds monotonically, but significantly less so for the visual - visual condition (from +/-4% to +/-6%) than for the haptic - haptic condition (from +/-7% to +/-20%). We found no significant effect of masking on these thresholds.
Visuo - motor adaptation studies have shown that the coordinate transformation from vision to touch exhibits a considerable amount of plasticity. We hypothesise that the continuous recalibration of this transformation during the experiment constitutes the reason for the inflated cross-modal thresholds. Furthermore, we conclude that acquiring precise size information is much slower in the haptic modality than it is in vision.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v980004},
event_name = {21st European Conference on Visual Perception},
event_place = {Oxford, UK},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, van Veen H-J{veen}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ NewellETB1998,
title = {Visual and haptic recognition of unfamiliar three-dimensional objects: effects of transfer},
journal = {Perception},
year = {1998},
month = {8},
volume = {27},
number = {ECVP Abstract Supplement},
pages = {135},
abstract = {We investigated whether the representation of objects is modality-independent or modality-specific for visual and haptic memory. Recent studies have shown that the representation of verbal material is modality-independent (Easton, Srinivas, and Green, 1997 Journal of Experimental Psychology: Learning, Memory, and Cognition 23 153 - 163). Other studies have reported modality-specific representations for familiar objects (Easton, Green, and Srinivas, 1997 Psychonomic Bulletin and Review 4 403 - 410). However, we argue that verbal material may be coded in a lexicon which is shared by the visual and haptic systems and therefore the results are equivocal. Also, property differences between common objects may promote better recognition performance within modalities suggesting differences in strategy not representation.
In our experiments objects were constructed from six parts (LegoTM standard bricks) arranged randomly in stacks. All objects were made of the same material with the same overall size and aspect ratio. We used a recognition memory paradigm to test subjects' ability to recognise objects that were studied either haptically or visually. At test, the objects were presented either within or transferred across modalities. A cost of transfer was expected if vision and haptics did not share the same representation. In experiment 1, subjects studied each target object, visually or haptically, for 30 s. We found a cost in transfer on recognition performance. However, this cost was mainly due to the high number of correct responses within the visual modality. In experiment 2, the study time for haptic recognition was increased so that haptic performance was equivalent to visual performance. Again, we found a cost of transfer. We conclude that the visual system and the haptic system do not share the same representations.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v980006},
event_name = {21st European Conference on Visual Perception},
event_place = {Oxford, UK},
state = {published},
author = {Newell FN{fiona}{Department Human Perception, Cognition and Action}, Ernst MO{marc}{Department Human Perception, Cognition and Action}, Tjan B{tjan}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 1629,
title = {Grasping with conflicting visual and haptic information},
journal = {Investigative Ophthalmology & Visual Science},
year = {1998},
month = {5},
volume = {39},
number = {4},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1998)},
event_place = {Fort Lauderdale, FL, USA},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, van Veen H-J{veen}{Department Human Perception, Cognition and Action}, Goodale MA and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 1627,
title = {Measuring Visuo-Motor Accuracy for Estimating Surface Orientation: Texture and Stereo cues compared},
journal = {Investigative Ophthalmology & Visual Science},
year = {1998},
month = {5},
volume = {39},
number = {4},
pages = {914},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1998)},
event_place = {Fort Lauderdale, FL, USA},
state = {published},
author = {Knill D, Kersten D{kersten}{Department Human Perception, Cognition and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Poster{ ErnstvGB1998,
title = {Greifen in Situationen mit visueller und haptischer
Information in Konflikt},
year = {1998},
month = {2},
pages = {163},
abstract = {Verändert sich die visuell wahrgenommene Geometrie des Raumes, zum Beispiel durch das Aufsetzen einer optischen Brille, so müssen wir unser visuomotorisches System entsprechend anpassen, um erfolgreich mit Objekten in diesem Raum zu hantieren. Wir untersuchten Adaptionsprozesse des visuomotorischen Systems, indem wir mit Hilfe von virtuellen Umgebungen die Größe von Objekten manipulierten und das Greifverhalten der Versuchspersonen analysierten. Der experimentelle Aufbau bestand aus einem Computermonitor, der kopfüber befestigt war, und einem Spiegel. Die Aufgabe der Versuchspersonen bestand darin, reale Würfel verschiedener Größe (2, 3, 4 und 5 cm) zu greifen, während ihnen über den Spiegel dreidimensionale computer-generierte Bilder dieser Würfel (virtuelle Würfel) präsentiert wurden. Der Spiegel war so justiert, daß sich die Bilder der Würfel mit den realen Würfeln deckten und die Versuchspersonen keine andere visuelle Information als das Spiegelbild bekamen. Die Bilder waren zu Beginn eines jeden Durchgangs so lange sichtbar, bis die Greifbewegung begonnen wurde. Die gesamte Greifbewegung wurde mit einer Infrarot-Kamera [OPTOTRAK 3020] aufgezeichnet, die die Position von Markern, die auf der Hand befestigt waren, in allen 3 Dimensionen liefert. Während des Experiments veränderten wir die Größe der virtuellen Würfel um 1cm entweder in kleinen Schritten [1-2 mm] oder in einen großen Sprung und erzeugten damit einen Konflikt zwischen der visuellen und der haptischen Information.
Wird mit einer begrenzten Anzahl von Objekten experimentiert, wie es hier der Fall war, kann der Konflikt auf zwei Arten gelöst werden: Entweder durch eine Rekalibrierung des visuomotorischen Systems oder mit Hilfe einer gelernten Repräsentation der Objekte, wenn die Bilder den Objekten eindeutig zugeordnet werden können. Während im ersten Experiment alle Würfel das gleiche Aussehen hatten, waren die Würfel im zweiten Experiment je nach Größe verschieden farbig, um die Zuordnung des Bildes zu der Größe der Würfel zu erleichtern. Im dritten Experiment verwendeten wir zwei Gruppen von Würfeln, die sich in ihrer visuellen Textur unterschieden. Wir erzeugten den Größenkonflikt nur in einer der beiden Gruppen, um zu sehen, ob eine separate Adaption des visuomotorischen
Systems für die zwei Gruppen möglich ist.
Im ersten und zweiten Experiment stellte sich die Adaptation innerhalb weniger Griffe ein (vgl. Gentilucci et al.; Exp. Brain Res. 1995). Plötzliche Größenänderungen von 1 cm haben einen geringeren Einfluß auf die Griffe, wenn die Würfel farbig markiert und damit einfacher zu identifizieren sind. Im dritten Experiment zeigte sich, daß eine Adaption für eine separate Gruppe von Objekten nicht möglich ist. In keinem der Experimente wurde der Größenkonflikt von den Versuchspersonen bemerkt. Wir schließen aus den Ergebnissen, daß eine Adaption des visuomotorischen Systems schnell erfolgt, wobei die
Größenrepräsentation der Objekt eine Rolle spielt. Da allerdings eine separate Adaption für eine Objektgruppe nicht möglich ist, wird vermutlich das gesamte visuomotorische System rekalibriert. Diese Hypothese wird zur Zeit eingehender untersucht.},
web_url = {http://www.twk.tuebingen.mpg.de/twk98/},
event_name = {1. Tübinger Wahrnehmungskonferenz (TWK 98)},
event_place = {Tübingen, Germany},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}, van Veen HAHC{veen}{Department Human Perception, Cognition and Action}, Goodale MNA and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 274,
title = {Greifen in Situationen mit visueller und haptischer Information im Konflikt},
year = {1998},
month = {2},
volume = {1},
pages = {163},
abstract = {Verändert sich die visuell wahrgenommene Geometrie des Raumes, zum Beispiel durch das Aufsetzen einer optischen Brille, so müssen wir unser visuomotorisches System entsprechend anpassen, um erfolgreich mit Objekten in diesem Raum zu hantieren. Wir untersuchten Adaptionsprozesse des visuomotorischen Systems, indem wir mit Hilfe von virtuellen Umgebungen die Größe von Objekten manipulierten und das Greifverhalten der Versuchspersonen analysierten. Der experimentelle Aufbau bestand aus einem Computermonitor, der kopfüber befestigt war, und einem Spiegel. Die Aufgabe der Versuchspersonen bestand darin, reale Würfel verschiedener Größe (2, 3, 4 und 5 cm) zu greifen, während ihnen über den Spiegel dreidimensionale computer-generierte Bilder dieser Würfel (virtuelle Würfel) präsentiert wurden. Der Spiegel war so justiert, daß sich die Bilder der Würfel mit den realen Würfeln deckten und die Versuchspersonen keine andere visuelle Information als das Spiegelbild bekamen. Die Bilder waren zu Beginn eines jeden Durchgangs so lange sichtbar, bis die Greifbewegung begonnen wurde. Die gesamte Greifbewegung wurde mit einer Infrarot-Kamera [OPTOTRAK 3020] aufgezeichnet, die die Position von Markern, die auf der Hand befestigt waren, in allen 3 Dimensionen liefert. Während des Experiments veränderten wir die Größe der virtuellen Würfel um 1cm entweder in kleinen Schritten [1-2 mm] oder in einen großen Sprung und erzeugten damit einen Konflikt zwischen der visuellen und der haptischen Information. Wird mit einer begrenzten Anzahl von Objekten experimentiert, wie es hier der Fall war, kann der Konflikt auf zwei Arten gelöst werden: Entweder durch eine Rekalibrierung des visuomotorischen Systems oder mit Hilfe einer gelernten Repräsentation der Objekte, wenn die Bilder den Objekten eindeutig zugeordnet werden können. Während im ersten Experiment alle Würfel das gleiche Aussehen hatten, waren die Würfel im zweiten Experiment je nach Größe verschieden farbig, um die Zuordunug des Bildes zu der Größe der Würfel zu erleichtern. Im dritten Experiment verwendeten wir zwei Gruppen von Würfeln, die sich in ihrer visuellen Textur unterschieden. Wir erzeugten den Größenkonflikt nur in einer der beiden Gruppen, um zu sehen, ob eine separate Adaption des visuomotorischen Systems für die zwei Gruppen möglich ist.
Im ersten und zweiten Experiment stellte sich die Adaptation innerhalb weniger Griffe ein (vgl. Gentilucci et al.; Exp. Brain Res. 1995). Plötzliche Größenänderungen von 1 cm haben einen geringeren Einfluß auf die Griffe, wenn die Würfel farbig markiert und damit einfacher zu identifizieren sind. Im dritten Experiment zeigte sich, daß eine Adaption für eine separate Gruppe von Objekten nicht möglich ist. In keinem der Experimente wurde der Größenkonflikt von den Versuchspersonen bemerkt. Wir schließen aus den Ergebnissen, daß eine Adaption des visuomotorischen Systems schnell erfolgt, wobei die Größenrepräsentation der Objekt eine Rolle spielt. Da allerdings eine separate Adaption für eine Objektgruppe nicht möglich ist, wird vermutlich das gesamte visuomotorische System rekalibriert. Diese Hypothese wird zur Zeit eingehender untersucht.},
web_url = {http://www.twk.tuebingen.mpg.de/twk98/},
editor = {Bülthoff, H. H., M. Fahle, K. A. Gegenfurtner, H. A. Mallot},
event_name = {1. Tübinger Wahrnehmungskonferenz (TWK 98)},
event_place = {Tübingen, Germany},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, van Veen H-J{veen}{Department Human Perception, Cognition and Action}, Goodale MA and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 403,
title = {Can learning one grasp facilitate novel grasps?},
journal = {Perception},
year = {1997},
month = {8},
volume = {26},
number = {ECVP Abstract Supplement},
pages = {99},
abstract = {We investigated whether knowledge acquired during repetitive grasping can be used to grasp a similar object differing in position or size. We conducted two experiments using a mirror to project a computer-generated image to the location of an object to be grasped. Subjects saw the image until initiation of the grasp but were unable to see either their hand or the real object. The training phase consisted of repetitive grasps to a single cube in a fixed position displaying a corresponding image. In the test phase we used the same cube in different positions but displayed only a small position-marker (experiment 1). In experiment 2 subjects grasped for differently sized cubes in the trained position. To indicate size changes we displayed appropriately sized cubes at a different location. In the subsequent control phase of each experiment subjects saw fully rendered cubes in appropriate positions and sizes instead of the position-marker or size cue.
Performance in the test and control phase was similar for all measured grasp parameters, including maximum preshape aperture, maximum speed, and grasp duration. In experiment 2, in which the size of the cubes changed, variability in grasp duration (±110 ms vs ±40 ms) and maximum preshape aperture (±10 mm vs ±4 mm) was greater in the test phase than in the control phase, indicating increased uncertainty in grasping.
Had subjects learned a single motor routine they would not have been able to grasp so well for objects differing in position or size. Together with our previous results (Ernst et al, 1997, paper presented at ARVO) these findings indicate that subjects can make use of stored representations of an object's position and size to produce an appropriate grasp under open-loop conditions.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v970228},
event_name = {20th European Conference on Visual Perception},
event_place = {Helsinki, Finland},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, van Veen H-J{veen}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Poster{ 404,
title = {Can we use virtual objects in grasping studies?},
journal = {Investigative Ophthalmology & Visual Science},
year = {1997},
month = {5},
volume = {38},
number = {4},
pages = {1008},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1997)},
event_place = {Fort Lauderdale, FL, USA},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, van Veen H-J{veen}{Department Human Perception, Cognition and Action}, Goodale MA and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Thesis{ 176,
title = {Psychophysikalische Untersuchungen zur visuomotorischen Integration beim Menschen: visuelle und haptische Wahrnehmung virtueller und realer Objekte},
year = {2001},
volume = {ISBN 3-932694-91-0},
pages = {158},
state = {published},
type = {PhD},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Thesis{ 1628,
title = {Untersuchungen zur Oligomeren Struktur des Bande 3-Proteins der menschlichen Erythrozytenmembran in Lösungen mit ß-D-Dodecylmaltosid},
year = {1996},
state = {published},
type = {Diplom},
author = {Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Miscellaneous{ 4739,
title = {Mathematik der Wahrnehmung: Wendepunkte},
journal = {Akademische Mitteilungen zwölf: Fünf Sinne},
year = {2007},
pages = {32-37},
state = {published},
author = {Wichman F{felix}{Department Empirical Inference} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Miscellaneous{ 3903,
title = {Multisensorische Wahrnehmung des Menschen},
journal = {Jahrbuch der Max-Planck-Gesellschaft},
year = {2005},
volume = {2005},
pages = {353-359},
abstract = {For perceiving the environment our brain uses multiple sources of sensory information derived from several different modalities, including vision, touch and audition. Some sources of sensory information derived from different modalities provide information about the same object property or event. For example, the size of an object can both be seen with the eyes and felt with the hands. This we call redundant sources of sensory information. In this report we will show how such redundant sources of sensory information are used by the human brain in order to interact with the environment in a goal-directed fashion. Further, we show what role prior knowledge about the statistical regularities in the world plays and how it can affect the process of perception. As a model for describing such somatosensory interactions we here use Bayesian Decision Theory (BDT).},
file_url = {/fileadmin/user_upload/files/publications/Jahrbuch2005-Buelthoff_3903[0].pdf},
web_url = {http://cms.mpg.de/mpg-export/mpg/website/bilderBerichteDokumente/dokumentation/jahrbuch/2005/biologische_kybernetik/forschungsSchwerpunkt/index.html},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Conference{ PariseE2014,
title = {What is the origin of cross-modal correspondences?},
year = {2014},
month = {6},
day = {12},
pages = {21},
abstract = {There are many seemingly arbitrary associations between different perceptual properties across modalities, such as the frequency of a tone and spatial elevation, or the color of an object and temperature. Such associations are often termed crossmodal correspondences, and they represent a hallmark of human and animal perception. The pervasiveness of crossmodal correspondences, however, is at odds with their apparent arbitrariness: why encoding arbitrary mappings across sensory attributes in such a consistent manner? Aren’t they misleading unless they represent some fundamental properties of the world around us? Over the last few years a number of studies have demonstrated that crossmodal correspondences are not arbitrary at all: they faithfully represent the statistics of natural scenes, which can be learnt over time and exploited to better process multisensory information. Here, we provide an overview of such most recent evidence, with a particular emphasis on the mapping between auditory pitch and spatial elevation, a celebrated case of crossmodal correspondence whose apparent arbitrariness has baffled neuroscientists for more than a century. By combining a direct measurement of environmental statistics with bioacoustics, psychophysics, and Bayesian modeling, we have recently shown that such mapping is not only already present in the environment: it is also directly encoded in the bioacoustics, that is, in the shape of the human outer ear. Taken together, current evidence calls for a thorough characterization of the environmental statistics as the most critical step towards a comprehensive understanding of the origins and the roles of cross-modal correspondences in perception, cognition, and action.},
web_url = {http://uvtapp.uvt.nl/fsw/spits.ws.frmShowpage?v_page_id=3859096609314761},
event_name = {15th International Multisensory Research Forum (IMRF 2014)},
event_place = {Amsterdam, The Netherlands},
state = {published},
author = {Parise C{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Research Group Multisensory Perception and Action}}
}
@Conference{ PariseE2013_2,
title = {Multisensory mechanisms for perceptual disambiguation: A classification image study on the stream-bounce illusion},
journal = {Perception},
year = {2013},
month = {8},
day = {29},
volume = {42},
number = {ECVP Abstract Supplement},
pages = {239},
abstract = {Sensory information is inherently ambiguous, and observers must resolve such ambiguity to infer the actual state of the world. Here, we take the stream-bounce illusion as a tool to investigate disambiguation from a cue-integration perspective, and explore how humans gather and combine sensory information to resolve ambiguity. In a classification task, we presented two bars moving in opposite directions along the same trajectory, meeting at the centre. Observers classified such ambiguous displays as streaming or bouncing. Stimuli were embedded in audiovisual noise to estimate the perceptual templates used for the classification. Such templates, the classification images, describe the spatiotemporal noise properties selectively associated to either percept. Results demonstrate that audiovisual noise strongly biased perception. Computationally, observers’ performance is well explained by a simple model involving a matching stage, where the sensory signals are cross-correlated with the internal templates, and an integration stage, where matching estimates are linearly combined. These results reveal analogous integration principles for categorical stimulus properties (stream/bounce decisions) and continuous estimates (object size, position…). Finally, the time-course of the templates reveals that most of the decisional weight is assigned to information gathered before the crossing of the stimuli, thus highlighting a predictive nature of perceptual disambiguation.},
web_url = {http://www.ecvp.uni-bremen.de/~ecvpprog/abstract738.html},
event_name = {36th European Conference on Visual Perception (ECVP 2013)},
event_place = {Bremen, Germany},
state = {published},
DOI = {10.1068/v130738},
author = {Parise CV{cesare} and Ernst M{marc}{Department Human Perception, Cognition and Action}}
}
@Conference{ ErnstR2013,
title = {Does causal binding affect temporal recalibration?},
journal = {Multisensory Research},
year = {2013},
month = {6},
day = {6},
volume = {26},
number = {1},
pages = {48},
abstract = {Temporal recalibration has been shown for multisensory stimuli that are passively perceived such as visual–haptic or visual–auditory events. We have recently proposed models based on Bayesian inference showing that these recalibration mechanisms are in line with optimal estimation schemes. Similarly temporal recalibration has been shown for sensorimotor events. Sensorimotor events are unique in the way that when acting on the world we are the cause for the changes that occur and that we perceive. There is a unique temporal order between the cause–effect relationship, which is between the action and subsequent percept of the change. In some recent studies we investigated how this cause–effect relationship impacts on the temporal recalibration of sensorimotor recalibration.},
web_url = {http://booksandjournals.brillonline.com/content/10.1163/22134808-000s0030},
event_name = {14th International Multisensory Research Forum (IMRF 2013)},
event_place = {Jerusalem, Israel},
state = {published},
DOI = {10.1163/22134808-000S0030},
author = {Ernst M{marc} and Rohde M{marohde}}
}
@Conference{ pariseE2013,
title = {Multisensory mechanisms for perceptual disambiguation: A classification image study on the stream-bounce illusion},
year = {2013},
month = {6},
day = {6},
volume = {14},
abstract = {Sensory information is inherently ambiguous, and a given signal can in principle correspond to infinite states of the world. A primary task for the observer is therefore to disambiguate sensory information and accurately infer the actual state of the world.
Here, we take the stream-bounce illusion as a tool to investigate perceptual disambiguation from a cue-integration perspective, and explore how humans gather and combine sensory information to resolve ambiguity.
In a classification task, we presented two bars moving in opposite directions along the same trajectory meeting at the centre. We asked observers to classify such ambiguous displays as streaming or bouncing. Stimuli were embedded in dynamic audiovisual noise, so that through a reverse correlation analysis, we could estimate the perceptual templates used for the classification. Such templates, the classification images, describe the spatiotemporal statistical properties of the noise, which are selectively associated to either percept. Our results demonstrate that the features of both visual and auditory noise, and interactions thereof, strongly biased the final percept towards streaming or bouncing.
Computationally, participants’ performance is explained by a model involving a matching stage, where the perceptual systems cross-correlate the sensory signals with the internal templates; and an integration stage, where matching estimates are linearly combined to determine the final percept. These results demonstrate that observers use analogous MLE-like integration principles for categorical stimulus properties (stream/bounce decisions) as they do for continuous estimates (object size, position, etc…).
Finally, the time-course of the classification images reveal that most of the decisional weight for disambiguation is assigned to information gathered before the physical crossing of the stimuli, thus highlighting a predictive nature of perceptual disambiguation.},
web_url = {http://shoreserv.mcmaster.ca/IMRF/ocs5/index.php/imrf/2013/paper/view/159},
event_name = {14th International Multisensory Research Forum (IMRF 2013)},
event_place = {Jerusalem, Israel},
state = {published},
author = {Parise CV{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ HartcherO039BrienDE2013,
title = {A sense of time: Non-linear distortions in perceived time across the senses},
journal = {Multisensory Research},
year = {2013},
month = {6},
volume = {26},
number = {1},
pages = {139},
abstract = {Perceived time is not veridical but distorted and differs across the senses. Here we ask, which points in the perception of a temporal event contribute most significantly to these multisensory distortions? To this end, we investigated perceptual estimates of temporal landmarks (onset, peak amplitude, and offset) for a Gaussian standard signal. Furthermore, we identified that the perception of these landmarks differ across vision and audition. Participants were asked to compare the onset, peak and offset landmarks of the standard long visual/auditory stimuli to short spike-like stimuli in vision or audition. Four combinations were tested: V–v, A–a, V–a, A–v. Results demonstrate that the visual as compared to auditory standard stimuli were perceived as shorter. More interestingly, we found a compression in the perceived duration for onset-peak intervals compared to peak-offset intervals. This compression effect was more pronounced in the visual modality. We compared the distortion results to those obtained with a second signal envelope type and demonstrated the same pattern of distortions for this temporally more veridical. However, the delayed onset, advanced peak and offset were the same across the two signals. The differences in perceived duration can potentially be used to explain multisensory illusions such as the flash lag effect and perceived crossmodal asynchronies. Such distortions can be accounted for by simulated neural response functions.},
web_url = {http://booksandjournals.brillonline.com/content/10.1163/22134808-000s0103},
event_name = {14th International Multisensory Research Forum (IMRF 2013)},
event_place = {Jerusalem, Israel},
state = {published},
DOI = {10.1163/22134808-000S0103},
author = {Hartcher-O'Brien J{jhartcher}, Di Luca M{max} and Ernst MO{marc}}
}
@Conference{ DwarakanathPHE2012,
title = {Motion parallax serves as an independent cue in sound source
disambiguation},
year = {2012},
month = {11},
volume = {13},
pages = {6},
abstract = {In the absence of dominant cues to the distance of a sound source from the observer, estimating absolute or relative distance becomes difficult. Motion parallax may contribute to this estimation. However, its role as an independent cue has not yet been investigated. To address this issue, we designed an experiment that included logarithmically varying distance of sound source along the depth plane of the observer, elimination of distance related loudness
using perceptual loudness equalization and to and fro (laterally) movement of subjects while the sounds were generated in three conditions a simultaneous playback, sequential playback and simultaneous playback of phase-interrupted sounds. Sequential presentation of the low
and high sound subjects showed a substantial improvement in distance estimates relative to the baseline static condition. Improvement was also observed for the simultaneous phase interrupted sound condition. Here we demonstrate for the first time the existence of auditory
motion parallax from lateral self- motion and show that it aids distance estimation of sound position. Interestingly, a bias to perceive low frequency sounds as farther away was also observed. Auditory depth perception is improved by lateral observer motion, which alters the inter-aural difference cues available.},
web_url = {http://www.neuroschool-tuebingen-nena.de/},
event_name = {13th Conference of the Junior Neuroscientists of Tübingen (NeNA 2012)},
event_place = {Schramberg, Germany},
state = {published},
author = {Dwarakanath A{adwarakanath}, Parise C{cesare}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Hartcher-O'Brien J{jhartcher}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ 6444,
title = {Seeing ahead: adaptation to delays in visual feedback recalibrates visuomotor simultaneity perception},
journal = {Perception},
year = {2010},
month = {8},
volume = {39},
number = {ECVP Abstract Supplement},
pages = {66},
abstract = {The human capacity to adjust perceptually and behaviourally to spatial perturbations (eg, prism goggles) has fascinated researchers for a long time. To study whether such perceptual adaptation through sensorimotor skill learning also occurs in the temporal domain, we trained 10 participants on a visuomotor control task with feedback delays. Before and after adaptation, simultaneity perception was tested in a separate visuomotor temporal order judgments task (subject motion before or after visual stimulus?). Participants steered a moving dot through a maze with a stylus/graphics tablet for ca. 30 minutes with a 200 ms visual feedback delay. Over training, the point of subjective simultaneity (PSS) shifted 45±7 ms towards perceiving the visual stimulus first. No PSS shift occurred in the no delay control group (two-sample t-test: p<0.001). A negative aftereffect in task performance (drop from 0.86 to 0.32) was found in the experimental but not in the control group (sign test: p=0.002), whose magnitude tends to correlate with the PSS shift. We conclude that adaptation to feedback delays in a specific visuomotor control task leads to more general recalibration of perceived visuomotor simultaneity, a result with potential relevance for human computer interaction in the presence of transmission delays.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v100097},
event_name = {33rd European Conference on Visual Perception},
event_place = {Lausanne, Switzerland},
state = {published},
author = {Rohde M{marohde}{Research Group Multisensory Perception and Action}, van Dam L{vandam}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Conference{ 5608,
title = {The CyberWalk Platform: Human-Machine Interaction Enabling
Unconstrained Walking through VR},
year = {2008},
month = {10},
day = {24},
number = {12},
abstract = {In recent years, Virtual Reality (VR) has become increasingly realistic and immersive. Both the visual and auditory rendering of virtual environments have been improved significantly, thanks to developments in both hardware and software. In contrast, the possibilities for physical navigation through virtual environments (VE) are still relatively rudimentary.
Most commonly, users can ‘move’ through highfidelity
virtual environments using a mouse or a joystick.
Of course, the most natural way to navigate through VR
would be to walk. For small scale virtual environments one
can simply walk within a confined space. The VE can be
presented by a cave-like projection system, or by means of
a head-mounted display combined with head-tracking. For
larger VEs, however, this quickly becomes impractical or
even impossible.},
file_url = {fileadmin/user_upload/files/publications/The_Cyberwalk_platform_HMI_enabling_unconstrained_walking_through_VR.pdf},
web_url = {http://wpage.unina.it/agodesa/workshop.html},
event_name = {First Workshop for Young Researchers on Human-friendly robotics},
event_place = {Napoli, Italy},
state = {published},
author = {Robuffo Giordano P{robu_pa}{Department Human Perception, Cognition and Action}, Souman JL{souman}{Department Human Perception, Cognition and Action}, Mattone R, Luca AD, Ernst MO{marc}{Department Human Perception, Cognition and Action} and B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action}}
}
@Conference{ 5778,
title = {Multisensory perception during locomotion},
journal = {International Journal of Psychology},
year = {2008},
month = {7},
day = {22},
volume = {43},
number = {3-4},
pages = {182},
abstract = {A multitude of sensory signals and perceptual
processes are involved in the control of human
walking. In return, also the human walking
behaviour has a profound effect on the way we
perceive the world. To investigate the interaction
between multisensory perception and locomotion
we constructed different locomotion environments
containing a treadmill and some visualization. We
focus here on the perception of visual motion and
the interaction with walking speed. In several
studies we found that motion perception is increased
for high visual speeds and decreased for low
visual speeds. The results will be discussed in terms
of multisensory integration.},
web_url = {http://www.icp2008.de/},
event_name = {XXIX. International Congress of Psychology (ICP 2008)},
event_place = {Berlin, Germany},
state = {published},
DOI = {10.1080/00207594.2008.10108484},
author = {Ernst M{marc}{Research Group Multisensory Perception and Action} and Souman JL{souman}{Research Group Multisensory Perception and Action}}
}
@Conference{ 5173,
title = {Amodal Multimodal Integration},
year = {2008},
month = {7},
volume = {9},
pages = {285},
abstract = {Recently it has been shown that congruent visual and haptic signals are integrated in a statistically optimal fashion. Spatial separation between the signals can preclude this integration. Here we investigate whether optimal integration occurs between an amodally completed visual stimulus and its haptic counterpart. Thus, we ask whether
integration occurs despite the sensory information not being derived from the same spatial location. This may indicate that subjects inferred that the visually specified parts of the stimulus and the haptic information have a common cause and thus should be integrated.
The visual stimulus was a disparity-defined bar that was partially occluded (amodal completion condition). The bar could also be touched behind the occluder using two fingers. Subjects' task was to discriminate the size of two successively presented bars using a 2-IFC paradigm, where one interval contained conflicting haptic and visual
information. Performance in the amodal completion condition was not different from a condition in which the occluder was removed (visual-haptic condition). Both conditions were consistent with an optimal integration strategy.
More interestingly, integration deviated from optimality when we introduced a slight modification to the visual stimulus â€“ small gaps between the bar and the occluder (gap condition). This manipulation interfered with the amodal completion process and consequently subjects relied almost completely on the haptic information for
discriminating the size of the bars.
These findings suggest that visual and haptic information can be combined optimally even when visual information is not directly specified by sensory information, but results from amodal completion. In conclusion, it seems that the perceptual system determines when to combine visual and haptic information based on the likelihood the signals have of belonging to the same object (i.e. if there is a
causal relationship between the signals) and not only on signal co-location.},
web_url = {http://imrf.mcmaster.ca/IMRF/2008/pdf/FullProgramIMRF08.pdf},
event_name = {9th International Multisensory Research Forum (IMRF 2008)},
event_place = {Hamburg, Germany},
state = {published},
digital = {1},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Adams W}
}
@Conference{ 5064,
title = {Recalibration of Audiovisual Synchrony: What is changing?},
year = {2008},
month = {3},
day = {4},
volume = {50},
abstract = {Both physical and physiological transmission times can differ between audition and vision. Under certain conditions, the brain reduces perceived asynchrony by adapting to this temporal discrepancy. In two experiments we investigated whether this recalibration is specific to auditory and visual stimuli, or whether other modality combinations (audiotactile, visuotactile) are affected, as well.
We presented asynchronous audiovisual signals, with either auditory leading or visual leading. Then, using temporal order judgments we measured observers point of subjective simultaneity for three modality combinations. Results indicate an adjustment of perceived simultaneity for the audiovisual and the visuotactile modality pairs. We conclude that audiovisual adaptation is the result of a change of processing latencies of visual events. In a second experiment, we corroborate this finding. We demonstrate that reaction times to visual signals, but not to tactile or auditory signals, change as a result of audiovisual recalibration.},
web_url = {http://www.teap.de/index.php/teap/marburg2008},
event_name = {50. Tagung Experimentell Arbeitender Psychologen (TeaP 2008)},
event_place = {Marburg, Germany},
state = {published},
author = {Machulla T{tonja}{Research Group Multisensory Perception and Action}{Research Group Multisensory Perception and Action}, Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ 4517,
title = {The time course of multimodal percepts induced by reliability changes},
year = {2007},
month = {7},
volume = {8},
number = {105},
abstract = {A desirable goal in combining multimodal information is to maximize the reliability of the percept. This may be done using optimal strategies, where unimodal signals are weighted by their relative reliabilities. In real-world situations, stimulus conditions frequently change and with that the reliability of the signals. Here we ask whether the system takes such changes in reliability into account by adjusting the weights online in the McGurk effect.
Subjects were presented with an audiovisual recording of an actors face producing a series of syllables. Such syllables were composed either by consistent multimodal information, or by an auditory /ba/ and a lip movement of /ga/. Subjects&amp;amp;amp;amp;amp;lsquo; task was to continuously report which syllable they perceived. We varied the reliability of the visual information by changing the visibility of the face.
With inconsistent multimodal information, we found that increased reliability of the visual signal biased perception towards the illusory /da/ percept whereas decreased reliability biased perception towards the auditory /ba/. However, changes in reliability had perceptual consequences only after a delay of 3-4 seconds. From this we concluded that the reliability estimate of the sensory signals is not instantaneous but continuously updated with a time constant of a few seconds.
FP6 EC project ImmerSence (IST-2006-02714)},
web_url = {http://imrf.mcmaster.ca/IMRF/2007/viewabstract.php?id=105},
event_name = {8th International Multisensory Research Forum (IMRF 2007)},
event_place = {Sydney, Australia},
state = {published},
digital = {1},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ 4632,
title = {Timing in the perception of reliability and signal changes},
year = {2007},
month = {7},
event_name = {The University of Queensland},
event_place = {Brisbane, Australia},
state = {published},
digital = {1},
author = {Di Luca M{max}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ AdamsGe2006,
title = {Learning a new prior: Light from above},
journal = {Perception},
year = {2006},
month = {8},
volume = {35},
number = {ECVP Abstract Supplement},
pages = {115},
abstract = {To interpret complex and ambiguous visual input, the visual system uses prior knowledge, or assumptions about the world. These 'priors' could be hard-wired, or learnt in response to statistical regularities in the environment. Here, we consider the 'light from above' prior used by the visual system to extract shape from shading. Observers viewed monocular disks with shading gradients at various orientations. Reported shape (convex or concave) as a function of stimulus orientation was used to recover each observer's assumed light position. During training, observers also 'touched' the disks. The haptic (felt) shape of the training stimuli was consistent with a light source shifted by ±1 30° from the observer's original assumed light position. After training, observers again judged the stimulus shape from purely visual information. Additionally, observers made lightness judgments of a Mach-card type stimulus, before and after haptic training with the concave/convex disk stimuli. Initially, our observers assumed a light position that was roughly overhead. However, after haptic feedback, observers learned to use a shifted light direction for their prior. Importantly, this learning was not specific to the trained task, but generalised such that it affected visual perception in a separate lightness judgment task.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v060042},
event_name = {29th European Conference on Visual Perception},
event_place = {St. Petersburg},
state = {published},
author = {Adams WJ, Graf EW and Ernst MO{marc}{Research Group Multisensory Perception and Action}}
}
@Conference{ BurgeEB2006,
title = {Modeling the dynamics of visuo-motor adaptation behavior with a Kalman Filter},
year = {2006},
month = {6},
volume = {7},
pages = {42},
abstract = {When the introduction of a prism makes visually guided reaching biased and inaccurate, adaptation occurs to restore accuracy. Bias in everyday life usually accumulates from a series of small changes, a process well simulated with a random walk. If bias were the only source of error, recalibration would be simple: correct the last error. But reaching behavior is also subject to random error. How does the visuo-motor system balance the need to filter random error with the need to adapt to time-varying bias? We investigated whether the Kalman filter, the optimal algorithm for this problem, models the dynamics of visuo-motor adaptation. The filter predicts that adaptation rate will be determined by the relative variances of current measurements and changing bias: rate should decrease with feedback variance and increase with variance in bias. Subjects pointed rapidly with an unseen hand to a brief visual target. Visual feedback indicated the endpoint of the motor movement. Feedback variance was increased with blur. The relationship between visual feedback location and the movement endpoint was altered with a random walk. Trial-by-trial pointing was measured. Subjects performed with a high level of efficiency and responded to changes in relative variances as predicted by the Kalman filter.},
web_url = {http://imrf.mcmaster.ca/IMRF/2006/viewabstract.php?id=184&symposium=0},
event_name = {7th International Multisensory Research Forum (IMRF 2006)},
event_place = {Dublin, Ireland},
state = {published},
author = {Burge J, Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks MS{martybanks}}
}
@Conference{ 4074,
title = {Tactile suppression and visual attention: Effects on tactile discrimination performance},
year = {2006},
month = {6},
volume = {7},
pages = {50},
abstract = {The aim was to investigate tactile discrimination performance under various active and passive conditions and explore the influence of visual information. Participants had to discriminate the direction of lateral pin strokes on their fingertip under three conditions. In one condition (static) only tactile stimulation was provided. In a second condition (active) the shear force device was mounted on a kinesthetic feedback device so that tactile stimulation was accompanied by active arm movements. In a third condition (passive) the arm was moved passively using the kinesthetic device while subjects performed the discrimination task. In this first experiment vision was not controlled. Therefore, to investigate the influence of vision on tactile discrimination performance participants had to perform the tactile discrimination task in the active condition with either direct gaze on their hand, gaze on a live image of their hand or without sight of their hand. Results show that tactile discriminatio
n performance was higher in the static compared to the active condition. Moreover, participants performed better when they were gazing on their hand compared to the no-sight condition. We conclude that active movement impairs tactile discrimination performance. However visual spatial attention can compensate to some degree for this loss of tactile sensibility.},
file_url = {/fileadmin/user_upload/files/publications/IMRF_2006_abstract_final_[0].pdf},
web_url = {http://imrf.mcmaster.ca/IMRF/2006/viewabstract.php?id=172},
event_name = {7th International Multisensory Research Forum (IMRF 2006)},
event_place = {Dublin, Ireland},
state = {published},
author = {Vitello MP{vitello}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ 4083,
title = {There can be only one! Integrating vision and touch at different egocentric locations},
year = {2006},
month = {6},
volume = {7},
pages = {44-45},
abstract = {Ernst and Banks (2002) showed that humans integrate visual and haptic signals in a statistically optimal. Integration seems to be broken if there is a spatial discrepancy between the signals (Gepshtein et al., 2005).
Does knowledge that two signals belong to the same object facilitate integration even when they are presented at discrepant locations?
In our experiment, participants had to judge the shape of visual-haptic objects. In one condition, visual and haptic object information was presented at the same location, whereas in the other condition there was a spatial offset between the two information sources, however, subjects know that the signals belong together. In both conditions, we introduced a slight conflict between the visually and haptically perceived shape and asked participants report the felt (seen) shape. If integration breaks due to the spatial discrepancy we expect subjects percept to be less biased by visual (haptic) information.
We found that in both conditions the shape percept was in-between the haptically and visually specified shapes and did not differ significantly. This finding suggests that multimodal signals are combined if observers have reason to assume that they belong to the same event, even when there is a spatial discrepancy.},
web_url = {http://imrf.mcmaster.ca/IMRF/2006/viewabstract.php?id=59},
event_name = {7th International Multisensory Research Forum (IMRF 2006)},
event_place = {Dublin, Ireland},
state = {published},
author = {Helbig HB{helbig}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ ErnstBD2006,
title = {Vision and touch are automatically integrated for the perception of sequences of events},
year = {2006},
month = {6},
volume = {7},
pages = {14},
abstract = {The purpose of the present experiment was to investigate the integration of sequences of visual and tactile events. Participants were presented with sequences of visual flashes and tactile taps simultaneously and instructed to count either the flashes (session 1) or the taps (session 2). The number of flashes could differ from the number of taps by ±1. For both sessions, the perceived number of events was significantly influenced by the number of events presented in the task-irrelevant modality. Touch had a stronger influence on vision than vision on touch. Interestingly, touch was the more reliable of the two modalities – less variable estimates when presented alone. For both sessions, the perceptual estimates were less variable when stimuli were presented in both modalities than when the task-relevant modality was presented alone. These results indicate that even when one signal is explicitly task-irrelevant, sensory information tends to be automatically integrated across modalities. They also suggest that the relative weight of each sensory channel in the integration process depends on its relative reliability. The results are described using a Bayesian probabilistic model for multimodal integration that accounts for the coupling between the sensory estimates.},
web_url = {http://imrf.mcmaster.ca/IMRF/2006/viewabstract.php?id=191&symposium=0},
event_name = {7th International Multisensory Research Forum (IMRF 2006)},
event_place = {Dublin, Ireland},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Bresciani J-P{bresciani}{Department Human Perception, Cognition and Action} and Dammeier F{dammeier}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ ErnstBB2005,
title = {Resolving visual-tactual incongruity depends on sensory reliability},
year = {2005},
month = {6},
volume = {6},
pages = {15},
abstract = {The visuomotor system recalibrates when visual and motor maps are in conflict, bringing the maps back into correspondence. For recalibration to occur, a conflict has to be detected. Here we investigate the effect of signal reliability on the rate of recalibration.
In a first study we showed that the rate of recalibration in a one-dimensional visually guided pointing task depends on the uncertainty of the feedback: faster recalibration with less uncertainty. We further also examined two-dimensional recalibration and how the specific form of visual feedback affects it. Subjects pointed with an unseen hand to a brief visual target. Visual feedback was given indicating where the point landed. We introduced a constant incongruity between pointing (tactual) and feedback (visual) location and examined the changes in pointing as the subject recalibrated. With this task we asked whether differential vertical and horizontal uncertainty in the visual feedback affects recalibration rate differentially, or whether rate is determined by the total uncertainty. We also varied feedback uncertainty in two ways. (1) We blurred the visual feedback, thereby reducing its localizability; in this condition, uncertainty could be determined on-line from one feedback stimulus. (2) We introduced random trial-by-trial perturbations in the feedback; in this condition, uncertainty had to be learned over time. In both cases, the distributions determining the vertical and horizontal uncertainties were 2D Gaussians.
Recalibration profiles (changes over time in the point location relative to the visual feedback) changed only in response to changes in localizability. Recalibration was slowest in the direction of greatest uncertainty when uncertainty was due to blur, but rate was unaffected by trial-by-trial variation. This means that subjects do not estimate uncertainty over time in order to adjust reaching. Rather, they adjust trial by trial based mostly on feedback from the previous trial.},
web_url = {http://imrf.mcmaster.ca/IMRF/2005/viewabstract.php?id=11},
event_name = {6th International Multisensory Research Forum (IMRF 2005)},
event_place = {Trento, Italy},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}, Burge J{jburge} and Banks MS{martybanks}}
}
@Conference{ Ernst2004,
title = {Optimal Integration of Multimodal Information: Conditions and Limits},
year = {2004},
month = {6},
volume = {5},
abstract = {The human brain uses multiple sources of sensory information to construct a representation of the environment. For example, when feeling objects, the eyes and hands both simultaneously provide relevant information about an object's size or shape. The visual system estimates shape using binocular disparity, perspective projection, and many other signals. The hands supply haptic shape information by means of tactile and proprioceptive signals. Naturally, no sensory apparatus is perfect. That is, there might be a small measurement error or noise in the transmission of the neural signals. In consequence, all the signals obtained from the same object property but from different modalities or from different signals within the same modality do not necessarily agree. In other words, due to the measurement uncertainty, two estimates of the same object property will never give raise to exactly the same sensory estimate. Therefore, the question arises how the brain handles these potentially discrepant signals? Does it prefer one signal over others and discard information from non-preferred signals? Or, does the brain process signals in a more sensible way by finding a reasonable compromise between all the available signals? We addressed these questions in a recent series of experiments by exploring how the brain combines signals about an object's size from the visual and haptic modality. The potential advantage of combining information across signals is noise reduction, i.e., a decrease in variance in the combined estimate. In theory, estimates with the lowest possible variance are achieved by using the Maximum-Likelihood rule described in [1] to combine the signals. This rule states that the combined estimate is a linear weighted average, with weights that are proportional to the in-verse variance of each individual estimate. By conducting a discrimination experiment, we recently confirmed that the brain combines signals in the statistically optimal way [1]. When the relative visual reliability decreases, the visual weight decreases as well and the combined percept is closer to the haptically specified size. However, combining signals may not only be beneficial, but also may come at a cost: loss of access to single-cue information. In a second study we report that single-cue information is indeed partially lost when cues from within the same sensory modality (disparity and texture gradients in vision) are combined, but not when different modalities (vision and haptics) are combined [2]. In the case of vision and touch we found that subject had simultaneously access to all three representations: the visual, the haptic and the combined representation. That is, subjects have the benefit of combining the signals but by combining the signals no information is lost. This principle may account for the robustness when manipulating objects in everyday life. At present we investigate how top-down influences and learning mechanisms affect the integration behavior.},
web_url = {http://imrf.mcmaster.ca/IMRF/2004/},
event_name = {5th International Multisensory Research Forum (IMRF 2004)},
event_place = {Barcelona, Spain},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ 2347,
title = {Cue Integration in the Haptic Perception of Virtual Shapes},
year = {2003},
month = {10},
volume = {6},
pages = {39},
abstract = {The sense of presence in virtual environments may be greatly improved by the display of haptic virtual reality. Current haptic display technology, however, mostly remains unsatisfying and expensive. One way to overcome existing technical limitations might be to “cheat” the haptic system by exploiting its principles. Importantly, human perception of an environmental property normally relies upon the integration of several different cues, which
technologically may - at least partly – be substituted to one another. A recent promising starting point for such substitution is the experimental demonstration that haptic perception of three-dimensional shapes can be evoked by just two-dimensional forces (Robles-de-la-Torre & Hayward, 2001: Nature). The experiment dissociated positional and force cues in the perception of small-scale bumps: When sliding a finger across a bump on a surface, the finger follows the geometry of the bump providing positional cues
for the shape. At the same time the finger is opposed by forces related to the steepness of the bump. Participants in this experiment reported to feel the shape indicated by the force cues and not by the positional cues.
The present study extended this research. We aimed to disentangle the contributions of force and position cues to haptic shape perception more systematically and to explore their integration principles. For that purpose, we
constructed a set of virtual standard curves, where we intermixed force and position cues related to curvatures of 0, 8 and 16 /m using the PHANToM haptic device. Participants compared these to curves in which both cues were correlated (i.e., “natural” curves) following the method of constant stimuli. We fitted psychometric functions to the data set from each participant and each
standard curve, thus, obtaining PSEs (points of subjective equality) and 84%- discrimination thresholds. Most importantly, both force and position cues of the standard curves systematically contributed to the perceived curvature
indicated by the PSEs. Moreover, for each participant perceived curvature could be well described as a weighted average of the curvature as conveyed by the force and by the position cue. The appropriateness of this simple linear
model fits with previous findings from visual and visuo-haptic cue integration (see, e.g. Ernst & Banks, 2002: Nature). Note, that in our experiment, force cues on objects that according to position cues were planes evoked the impression of curves. Moreover, for haptic display these results imply that – at least partly – one cue may be substituted for the other in a predictable manner.},
file_url = {fileadmin/user_upload/files/publications/Presence-2003-Drewing.pdf},
web_url = {http://www.temple.edu/ispr/prev_conferences/proceedings/2003/},
event_name = {6th Annual Workshop on Presence (Presence 2003)},
event_place = {Aalborg, Denmark},
state = {published},
author = {Drewing K{kdrewing} and Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ ErnstJ2003,
title = {Learning to combine arbitrary signals from vision and touch},
year = {2003},
month = {6},
volume = {4},
abstract = {When different perceptual signals of the same physical property are integrated?e.g., the size of an object, which can be seen and felt?they form a more reliable sensory estimate. This however implies that the sensory system already knows which signals belong together and how they are related. In a Bayesian model of cue integration this prior knowledge can be made explicit. Here, we examine whether such a relationship between two arbitrary sensory signals from vision and touch can be learned from their statistical co-occurrence such that they become integrated. In the Bayesian model this means changing the prior distribution over the stimuli. To this end, we trained subjects with stimuli that are usually uncorrelated in the world?the luminance of an object (visual signal) and its stiffness (haptic signal). In the training phase we presented only combinations of these signals, which were highly correlated. Before and after training we measured discrimination performance with distributions of stimuli, which were either congruent with the correlation during training or incongruent. The incongruent stimuli came form an anti-correlated distribution compared to training. If subjects were sensitive to the correlation between the signals then we expect to see a change in their prior knowledge about what combinations of stimuli usually to encounter. Accordingly, this should change their discrimination performance between pre- and post-test. We found a significant interaction between the two factors pre/post-test and congruent/incongruent. After training, discrimination thresholds for the incongruent stimuli are increased relative to the thresholds for congruent stimuli, suggesting that subjects learned to combine the two signals effectively.},
web_url = {http://imrf.mcmaster.ca/IMRF/2003/papers.php},
event_name = {4th International Multisensory Research Forum (IMRF 2003)},
event_place = {Hamilton, Canada},
state = {published},
author = {Ernst M{marc}{Research Group Multisensory Perception and Action} and J\"akel F{frank}}
}
@Conference{ NewellBE2003,
title = {Multisensory perception of actively explored objects},
year = {2003},
month = {6},
volume = {4},
number = {76},
abstract = {Many objects in our world can be picked up and freely manipulated, thus allowing information about an object to be available to both the visual and haptic systems. However, we understand very little about how object information is shared across the modalities. Under constrained viewing cross-modal object recognition is most efficient when the same surface of an object is presented to the visual and haptic systems (Newell et al. 2001). Here we tested cross modal recognition under active manipulation and unconstrained viewing of the objects. In Experiment 1, participants were allowed 30 seconds to learn unfamiliar objects visually or haptically. Haptic learning resulted in relatively poor haptic recogition performance relative to visual recognition. In Experiment 2, we increased the learning time for haptic exploration and found equivalent haptic and visual recognition, but a cost in cross modal recognition. In Experiment 3, participants learned the objects using both modalities together, vision alone or haptics alone. Recognition performance was tested using both modalities together. We found that recognition performance was significantly better when objects were learned by both modalities than either of the modalities alone. Our results suggest that efficient cross modal performance depends on the spatial correspondence of object information across modalities.},
web_url = {http://imrf.mcmaster.ca/IMRF/2003/papers.php},
event_name = {4th International Multisensory Research Forum (IMRF 2003)},
event_place = {Hamilton, Canada},
state = {published},
author = {Newell FN{fiona}{Department Human Perception, Cognition and Action}, B\"ulthoff HH{hhb}{Department Human Perception, Cognition and Action} and Ernst M{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action}}
}
@Conference{ 1933,
title = {Using visual and haptic information for discriminating objects},
journal = {Perception},
year = {2002},
month = {8},
volume = {31},
number = {ECVP Abstract Supplement},
pages = {147},
abstract = {When feeling objects, vision and touch simultaneously provide information about size or shape. The purpose is to determine which representation we use for discriminating between objects which differ only in their visual and/or haptic size. We consider three representations: visual-alone, haptic-alone and combined visual-haptic. Integrating the information optimally, the combined visual-haptic percept is a weighted average of the two sizes. To measure discrimination performance subjects indicated the odd of three sequentially presented stimuli. These were horizontal bars, two of which were identical and had equal visual and haptic heights (standard). The third (odd) stimulus had a different visual and/or haptic height. If subjects used the visual-alone or haptic-alone representations, discrimination would occur whenever the visual or haptic height in the odd stimulus differed from the standard by more than the threshold. If subjects relied on the combined representation, no discrimination should occur when visual and haptic heights differ in opposite directions so that their weighted averages are equal and discrimination should be best when both these heights are either bigger or smaller then the standard. We found that discrimination was indeed most difficult when the weighted averages were equal. Thus, the combined visual-haptic percept is used for discrimination. However, if the conflict between visual and haptic heights became too large, discrimination improved, indicating that we also can access the visual-alone and haptic-alone representations.},
web_url = {http://www.perceptionweb.com/abstract.cgi?id=v020467},
event_name = {25th European Conference on Visual Perception},
event_place = {Glasgow, UK},
state = {published},
author = {Ernst MO{marc}{Department Human Perception, Cognition and Action}{Research Group Multisensory Perception and Action} and Banks MS{martybanks}}
}