%
% This file was created by the Typo3 extension
% sevenpack version 0.7.14
%
% --- Timezone: CET
% Creation date: 2018-11-20
% Creation time: 00-38-58
% --- Number of references
% 106
%
@Article { MichelRBHV2013,
title = {The contribution of shape and surface information in the other-race face effect},
journal = {Visual Cognition},
year = {2013},
month = {12},
volume = {21},
number = {9-10},
pages = {1202-1223},
abstract = {Faces from another race are generally more difficult to recognize than faces from one's own race. However, faces provide multiple cues for recognition and it remains unknown what are the relative contribution of these cues to this “other-race effect”. In the current study, we used three-dimensional laser-scanned head models which allowed us to independently manipulate two prominent cues for face recognition: the facial shape morphology and the facial surface properties (texture and colour). In Experiment 1, Asian and Caucasian participants implicitly learned a set of Asian and Caucasian faces that had both shape and surface cues to facial identity. Their recognition of these encoded faces was then tested in an old/new recognition task. For these face stimuli, we found a robust other-race effect: Both groups were more accurate at recognizing own-race than other-race faces. Having established the other-race effect, in Experiment 2 we provided only shape cues for recognition and in Experiment 3 we provided only surface cues for recognition. Caucasian participants continued to show the other-race effect when only shape information was available, whereas Asian participants showed no effect. When only surface information was available, there was a weak pattern for the other-race effect in Asians. Performance was poor in this latter experiment, so this pattern needs to be interpreted with caution. Overall, these findings suggest that Asian and Caucasian participants rely differently on shape and surface cues to recognize own-race faces, and that they continue to use the same cues for other-race faces, which may be suboptimal for these faces.},
department = {Department B{\"u}lthoff},
web_url = {http://www.tandfonline.com/doi/abs/10.1080/13506285.2013.823141},
DOI = {10.1080/13506285.2013.823141},
author = {Michel, C and Rossion, B and B{\"u}lthoff, I and Hayward, WG and Vuong, QC}
}
@Article { ZhaoB2013_2,
title = {The other-race effect in face recognition is sensitive to face format at encoding},
journal = {Visual Cognition},
year = {2013},
month = {10},
volume = {21},
number = {6},
pages = {722-725},
note = {21st Annual Meeting on Object Perception, Attention, and Memory (OPAM 2013)},
department = {Department B{\"u}lthoff},
web_url = {http://www.tandfonline.com/doi/abs/10.1080/13506285.2013.844971\#.UtaaUfsViQA},
DOI = {10.1080/13506285.2013.844971},
author = {Zhao, M and B{\"u}lthoff, I}
}
@Article { GaissertWFB2012,
title = {Haptic Categorical Perception of Shape},
journal = {PLoS One},
year = {2012},
month = {8},
volume = {7},
number = {8},
pages = {1-7},
abstract = {Categorization and categorical perception have been extensively studied, mainly in vision and audition. In the haptic domain, our ability to categorize objects has also been demonstrated in earlier studies. Here we show for the first time that categorical perception also occurs in haptic shape perception. We generated a continuum of complex shapes by morphing between two volumetric objects. Using similarity ratings and multidimensional scaling we ensured that participants could haptically discriminate all objects equally. Next, we performed classification and discrimination tasks. After a short training with the two shape categories, both tasks revealed categorical perception effects. Training leads to between-category expansion resulting in higher discriminability of physical differences between pairs of stimuli straddling the category boundary. Thus, even brief training can alter haptic representations of shape. This suggests that the weights attached to various haptic shape features can be changed dynamically in response to top-down information about class membership.},
department = {Department B{\"u}lthoff},
web_url = {http://www.plosone.org/article/info\%3Adoi\%2F10.1371\%2Fjournal.pone.0043062},
DOI = {10.1371/journal.pone.0043062},
EPUB = {e43062},
author = {Gaissert, N and Waterkamp, S and Fleming, RW and B{\"u}lthoff, I}
}
@Article { Bulthoff2012_16,
title = {Review: L'empreinte Des Sens},
journal = {Perception},
year = {2012},
month = {7},
volume = {41},
number = {7},
pages = {881-882},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/41/7/881},
DOI = {10.1068/p4107rvw},
author = {B{\"u}lthoff, I}
}
@Article { ArmannB2011,
title = {Male and female faces are only perceived categorically when linked to familiar identities – And when in doubt, he is a male},
journal = {Vision Research},
year = {2012},
month = {6},
volume = {63},
pages = {69-80},
abstract = {Categorical perception (CP) is a fundamental cognitive process that enables us to sort similar objects in the world into meaningful categories with clear boundaries between them. CP has been found for high-level stimuli like human faces, more precisely, for the perception of face identity, expression and ethnicity. For sex however, which represents another important and biologically relevant dimension of human faces, results have been equivocal so far. Here, we reinvestigate CP for sex using newly created face stimuli to control two factors that to our opinion might have influenced the results in earlier studies. Our new stimuli are (a) derived from single face identities, so that changes of sex are not confounded with changes of identity information, and (b) “normalized” in their degree of maleness and femaleness, to counteract natural variations of perceived masculinity and femininity of faces that might obstruct evidence of categorical perception. Despite careful normalization, we did not find evidence of CP for sex using classical test procedures, unless participants were specifically familiarized with the face identities before testing. These results support the single-route hypothesis, stating that sex and identity information in faces are not processed in parallel, in contrast to what was suggested in the classical Bruce and Young model of face perception.
Besides, interestingly, our participants show a consistent bias, before and after perceptual normalization of the male–female range of the test morph continua, to judge faces as male rather than female.},
department = {Department B{\"u}lthoff},
web_url = {http://www.sciencedirect.com/science/article/pii/S0042698912001496},
DOI = {10.1016/j.visres.2012.05.005},
author = {Armann, R and B{\"u}lthoff, I}
}
@Article { 4689,
title = {Gaze behavior in face comparison: The roles of sex, task, and symmetry},
journal = {Attention, Perception and Psychophysics},
year = {2009},
month = {7},
volume = {71},
number = {5},
pages = {1107-1126},
abstract = {Knowing where people look on a face provides an objective insight into the information entering the visual system and into cognitive processes involved in face perception. In the present study, we recorded eye movements of human participants while they compared two faces presented simultaneously. Observers‘ viewing behavior and performance was examined in two tasks of parametrically varying difficulty, using two types of face stimuli (sex morphs and identity morphs). The frequency, duration, and temporal sequence of fixations on previously defined areas of interest in the faces were analyzed. As was expected, viewing behavior and performance varied with difficulty. Interestingly, observers compared predominantly the inner halves of the face stimulia result inconsistent with the general left-hemiface bias reported for single faces. Furthermore, fixation patterns and performance differed between tasks, independently of stimulus type. Moreover, we found differences in male and female participants‘ viewing behaviors, but only when the sex of the face stimuli was task relevant.},
department = {Department B{\"u}lthoff},
web_url = {http://app.psychonomic-journals.org/content/71/5/1107.full.pdf+html},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
language = {en},
DOI = {10.3758/APP.71.5.1107},
author = {Armann, R and B{\"u}lthoff, I}
}
@Article { 2385,
title = {Categorical perception of sex occurs in familiar but not unfamiliar faces.},
journal = {Visual Cognition},
year = {2004},
month = {10},
volume = {11},
number = {7},
pages = {823-855},
abstract = {We investigated whether male and female faces are discrete categories at the perceptual level and whether familiarization plays a role in the categorical perception of sex. We created artificial sex continua between male and female faces using a 3‐D morphing algorithm and used classical categorization and discrimination tasks to investigate categorical perception of sex. In Experiments 1 and 2, 3‐D morphs were computed between individual male and female faces. In Experiments 3 and 4, we used face continua in which only the sex of the facial features changed, while the identity characteristics of the facial features remained constant. When the faces were unfamiliar (Experiments 1 and 3), we failed to find evidence for categorical perception of sex. In Experiments 2 and 4, we familiarized participants with the individual face images by instructing participants to learn the names of the individuals in the endpoint face images (Experiment 2) or to classify face images along a continuum as male or female using a feedback procedure (Experiment 4). In both these experiments we found evidence for a categorical effect for sex after familiarization. Our findings suggest that despite the importance of face perception in our everyday world, sex information present in faces is not naturally perceived categorically. Categorical perception of sex was only found after training with the face stimulus set. Our findings have implications for functional models of face processing which suggest two independent processing routes, one for facial expression and one for identity: We propose that sex perception is closely linked with the processing of facial identity.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/categorical_perception_of_sex_occurs_in_familiar_but_not_infamiliar_faces_2385[0].pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.tandfonline.com/doi/abs/10.1080/13506280444000012},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1080/13506280444000012},
author = {B{\"u}lthoff, I and Newell, F}
}
@Article { 184,
title = {Effects of parametric manipulation of inter-stimulus similarity on 3D object categorization},
journal = {Spatial Vision},
year = {1999},
month = {1},
volume = {12},
number = {1},
pages = {107-123},
abstract = {To explore the nature of the representation space of 3D objects, we studied human performance in forced-choice categorization of objects composed of four geon-like parts emanating from a common center. Two categories were defined by prototypical objects, distinguished by qualitative properties of their parts (bulging vs waist-like limbs). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90\% correct-response performance level. After training, in the first experiment, 11 subjects were tested on shapes obtained by varying the prototypical parameters both orthogonally (ORTHO) and in parallel (PARA) to the line connecting the prototypes in the parameter space. For the eight subjects who performed above chance, the error rate increased with the ORTHO parameter-space displacement between the stimulus and the corresponding prototype; the effect of the PARA displacement was weaker. Thus, the parameter-space location of the stimuli mattered more than the qualitative contrasts, which were always present. To find out whether both prototypes or just the nearest one to the test shape influenced the decision, in the second experiment we varied the similarity between the categories. Specifically, in the test stage trials the distance between the two prototypes could assume one of three values (FAR, INTERMEDIATE, and NEAR). For the 13 subjects who performed above chance, the error rate (on physically identical stimuli) in the NEAR condition was higher than in the other two conditions. The results of the two experiments contradict the prediction of theories that postulate exclusive reliance on qualitative contrasts, and support the notion of a representation space in which distances to more than one reference point or prototype are encoded (Edelman, 1998).},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf184.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.ingentaconnect.com/search/download?pub=infobike\%3a\%2f\%2fvsp\%2fspv\%2f1999\%2f00000012\%2f00000001\%2fart00006\&mimetype=application\%2fpdf\&exitTargetId=1309268380820},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1163/156856899X00067},
author = {Edelman, S and B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Article { 148,
title = {Top-down influences on stereoscopic depth-perception},
journal = {Nature Neuroscience},
year = {1998},
month = {7},
volume = {1},
number = {3},
pages = {254-257},
abstract = {The interaction between depth perception and object recognition has important implications for the nature of mental object representations and models of hierarchical organization of visual processing. It is often believed that the computation of depth influences subsequent high-level object recognition processes, and that depth processing is an early vision task that is largely immune to 'top-down' object-specific influences, such as object recognition. Here we present experimental evidence that challenges both these assumptions in the specific context of stereoscopic depth-perception. We have found that observers' recognition of familiar dynamic three- dimensional (3D) objects is unaffected even when the objects' depth structure is scrambled, as long as their two-dimensional (2D) projections are unchanged. Furthermore, the observers seem perceptually unaware of the depth anomalies introduced by scrambling. We attribute the latter result to a top-down recognition-based influence whereby expectations about a familiar object's 3D structure override the true stereoscopic information.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf148.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.nature.com/neuro/journal/v1/n3/pdf/nn0798_254.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1038/699},
author = {B{\"u}lthoff, I and B{\"u}lthoff, HH and Sinha, P}
}
@Article { 576,
title = {Illusory motion from shadows},
journal = {Nature},
year = {1996},
month = {1},
volume = {379},
number = {6560},
pages = {31},
department = {Department B{\"u}lthoff},
web_url = {http://www.nature.com/nature/journal/v379/n6560/pdf/379031a0.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1038/379031a0},
author = {Kersten, D and Knill, DC and Mamassian, P and B{\"u}lthoff, I}
}
@Article { 833,
title = {GABA-antagonist inverts movement and object detection in flies},
journal = {Brain Research},
year = {1987},
month = {3},
volume = {407},
number = {1},
pages = {152-158},
abstract = {Movement detection is one of the most elementary visual computations performed by vertebrates as well as invertebrates. However, comparatively little is known about the biophysical mechanisms underlying this computation. It has been proposed on both physiological1.8.21 and theoretical2.15.23 grounds that inhibition plays a crucial role in the directional selectivity of elementary movement detectors (EMDs). For the first time, we have studied electrophysiological and behavioral changes induced in flies after application of picrotoxinin, an antagonist of GABA. The results show that inhibitory interactions play an important role in movement detection in flies. Furthermore, our behavioral results suggest that the computation of object position is based primarily on movement detection.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf833.pdf},
department = {Department G{\"o}tz},
web_url = {http://www.sciencedirect.com/science/article/pii/0006899387912303},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1016/0006-8993(87)91230-3},
author = {B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Article { 823,
title = {Combining Neuropharmacology and Behavior to Study Motion Detection in Flies},
journal = {Biological Cybernetics},
year = {1987},
month = {2},
volume = {55},
number = {5},
pages = {313-320},
abstract = {The optomotor following response, a behavior based on movement detection was recorded in the fruitflyDrosophila melanogaster before and after the injection of picrotoxinin, an antagonist of the inhibitory neurotransmitter GABA. The directional selectivity of this response was transiently abolished or inverted after injection. This result is in agreement with picrotoxinin-induced modifications observed in electrophysiological activity of direction-selective cells in flies (B{\"u}lthoff and Schmid 1983; Schmid and B{\"u}lthoff, in preparation). Furthermore, walking and flying flies treated with picrotoxinin followed more actively motion from back to front instead of front to back as in normal animals. Since the difference in the responses to front to back and back to front motions is proposed to be the basis of fixation behavior in flies (Reichardt 1973) our results support this notion and are inconsistent with schemes explaining fixation by alternative mechanisms.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/combining_neuropharmacology_and_behavior_to_study_motion_detection_in_flies_823[0].pdf},
department = {Department G{\"o}tz},
web_url = {http://www.springerlink.com/content/y056t6h64564n574/fulltext.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1007/BF02281977},
author = {B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Article { 1128,
title = {Deoxyglucose mapping of nervous activity induced in Drosophila brain by visual movement. 3. Outer rhabdomeres absent JK84, small optics lobes KS58 and no object fixation EB12, visual mutants.},
journal = {Journal of Comparative Physiology},
year = {1986},
month = {3},
volume = {158},
number = {2},
pages = {195-202},
abstract = {Autoradiographs of the brains of the visual mutantsouter rhabdomeres absent JK84 (ora),small optic lobes KS58 (KS58) andno object fixation E B12 (B12) have been obtained by the deoxyglucose method. The patterns of metabolic activity in the optic lobes of the visually stimulated mutants is compared with that of similarly stimulated wildtype (WT) flies which was described in Part I of this work (Buchner et al. 1984b).
In the mutantKS58 the optomotor following response to movement is nearly normal despite a 40–45\% reduction of volume in the visual neuropils, medulla and lobula complex. InB12 flies the volume of these neuropils and the optomotor response are reduced. In autoradiographs of both mutants the pattern of neuronal activity induced by stimulation with moving gratings does not differ substantially from that in the WT. It suggests that only neurons irrelevant to movement detection are affected by the mutation. However, in the lobula plate of someKS58 flies and in the second chiasma of allB12 flies, the pattern of metabolic activity differs from that observed in WT flies. Up to now no causal relation has been found between the modifications described in behaviour or anatomy and those observed in the labelling of these mutants.
In the ommatidia ofora flies the outer rhabdomeres are lacking while the central photoreceptors appear to be normal. Stimulus-specific labelling is absent in the visual neuropil of these mutants stimulated with movement or flicker. This result underlines the importance of the outer rhabdomeres for visual tasks, especially for movement detection.},
department = {Department G{\"o}tz},
web_url = {http://link.springer.com/content/pdf/10.1007\%2FBF01338562.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1007/BF01338562},
author = {B{\"u}lthoff, I}
}
@Article { 1129,
title = {Freeze-substitution of Drososphila heads for subsequent 3H-2-deoxyglucose autoradiography},
journal = {Journal of Neuroscience Methods},
year = {1985},
month = {5},
volume = {13},
number = {3-4},
pages = {183-190},
abstract = {High resolution of [3H]2-deoxyglucose labelling was obtained in autoradiographs of Drosophila brains after freeze-substitution in anhydrous acetone at −76\(^{\circ}\)C. This method was applied to preparations which received visual, olfactory and mechanosensory stimulation. The autoradiographs were compared to those obtained after freeze-drying. Freeze-substitution, which has proved to be technically simple, rapid and inexpensive, yields a good quality of tissue preservation and hence is recommended for tissue dehydration prior to autoradiography.},
department = {Department G{\"o}tz},
web_url = {http://www.sciencedirect.com/science/article/pii/0165027085900664},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1016/0165-0270(85)90066-4},
author = {Rodrigues, V and B{\"u}lthoff, I}
}
@Article { 1127,
title = {Deoxyglucose mapping of nervous activity induced in Drosophila brain by visual movement. 2. Optomotor blind H31 and lobula plate-less N684 visual mutants.},
journal = {Journal of Comparative Physiology},
year = {1985},
month = {1},
volume = {156},
number = {1},
pages = {25-34},
abstract = {The pattern of visually induced local metabolic activity in the optic lobes of two structural mutants ofDrosophila melanogaster is compared with the corresponding wildtype pattern which has been reported in Part I of this work (Buchner et al. 1984b). Individualoptomotor-blind H31 (omb) flies lacking normal giant HS-neurons were tested behaviourally, and those with strongly reduced responses to visual movement were processed for 3H-deoxyglucose autoradiography. The distribution of metabolic activity in the optic lobes ofomb apparently does not differ substantially from that found in wildtype. In the mutantlobula plate-less N684 (lop) the small rudiment of the lobula plate which lacks many small-field input neurons does not show any stimulus-specific labelling. The data provide further support for the hypothesis that small-field input neurons to the lobula plate are the cellular substrate of the direction-specific labelling inDrosophila (see Buchner et al. 1984b).},
department = {Department G{\"o}tz},
web_url = {http://link.springer.com/content/pdf/10.1007\%2FBF00610663.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1007/BF00610663},
author = {B{\"u}lthoff, I and Buchner, E}
}
@Article { 1126,
title = {Deoxyglucose mapping of nervous activity induced in Drosophila brain by visual movement. 1. Wildtype},
journal = {Journal of Comparative Physiology},
year = {1984},
month = {7},
volume = {155},
number = {4},
pages = {471-483},
abstract = {Local metabolic activity was mapped in the brain ofDrosophila by the radioactive deoxyglucose technique. The distribution of label in serial autoradiographs allows us to draw the following conclusions concerning neuronal processing of visual movement information in the brain ofDrosophila.
1. The visual stimuli used (homogeneous flicker, moving gratings, reversing contrast gratings) cause only a small increase in metabolic activity in the first visual neuropil (lamina).
2. In the second visual neuropil (medulla) at least four layers respond to visual movement and reversing contrast gratings by increased metabolic activity; homogeneous flicker is less effective.
3. With the current autoradiographic resolution (2—3 \(\mu\)m) no directional selectivity can be detected in the medulla.
4. In the lobula, the anterior neuromere of the third visual neuropil, movement-specific activity is observed in three layers, two of which are more strongly labelled by ipsilateral front-to-back than by back-to-front movement.
5. In its posterior counterpart, the lobula plate, four movement-sensitive layers can be identified in which label accumulation specifically depends on the direction of the movement: Ipsilateral front-to-back movement labels a superficial anterior layer, back-to-front movement labels an inner anterior layer, upward movement labels an inner posterior layer and downward movement labels a superficial posterior layer.
6. A considerable portion of the stimulus-enhanced labelling of medulla and lobula complex is restricted to those columns which connect to the stimulated ommatidia. This retinotopic distribution of label suggests the involvement of movement-sensitive small-field neurons.
7. Certain axonal profiles connecting the lobula plate and the lateral posterior protocerebrum are labelled by ipsilateral front-to-back movement. Presumably different structures in the same region are labelled by ipsilateral downward movement. Conspicuously labelled foci and commissures in the central brain cannot yet be associated with a particular stimulus.
The results are discussed in the light of present anatomical and physiological knowledge of the visual movement detection system of flies.},
department = {Department G{\"o}tz},
web_url = {http://link.springer.com/content/pdf/10.1007\%2FBF00611912.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
DOI = {10.1007/BF00611912},
author = {Buchner, E and Buchner, S and B{\"u}lthoff, I}
}
@Inproceedings { 251,
title = {Prime-orientation dependence in detection of camouflaged biological motion},
year = {1998},
month = {8},
pages = {314-319},
department = {Department B{\"u}lthoff},
editor = {Grondin, S. , Y. Lacouture},
publisher = {International Society for Psychology},
address = {Quebec, Canada},
booktitle = {Fechner Day 98},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Quebec, Canada},
event_name = {14th Annual Meeting of the International Society for Psychophysics},
author = {Pavlova, MA and Sokolov, AN and B{\"u}lthoff, I}
}
@Inproceedings { 344,
title = {Recovery of a priori known structure from biological motion},
year = {1998},
month = {7},
pages = {64-68},
department = {Department B{\"u}lthoff},
editor = {B. Bril, A. Ledebt, G. Dietrich , A. Roby-Brami},
publisher = {Editions EDK},
address = {Paris, France},
booktitle = {Advances in Perception-Action Coupling},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Pont-{\`a}-Mousson, France},
event_name = {Fifth European Workshop on Ecological Psychology (EWEP 5)},
ISBN = {2842540123},
author = {Pavlova, MA and Sokolov, AN and B{\"u}lthoff, I}
}
@Inbook { Bulthoff2010_4,
title = {Recognition},
year = {2010},
pages = {863-864},
abstract = {In a broad sense, the term recognition refers to the
explicit feeling of familiarity that occurs when, for
example, we view an object or hear a voice that we have experienced previously. It has been widely investigated in the visual domain, and this entry is thus based mainly on this field of research. By definition, accurate recognition can only occur for objects or sets of objects that we have experienced (seen) in the past. Recognition is fundamental to interpreting perceptual experiences, as it gives explicit meaning to our visual input.},
department = {Department B{\"u}lthoff},
web_url = {https://ia601806.us.archive.org/25/items/Encyclopedia_of_Perception_Volume_1_and_2/Encyclopedia_of_Perception_Volume_1_and_2.pdf},
editor = {Goldstein, E.B.},
publisher = {Sage},
address = {Los Angeles, CA, USA},
booktitle = {Encyclopedia of Perception},
ISBN = {978-1-4129-4081-8},
author = {B{\"u}lthoff, I}
}
@Inbook { 3812,
title = {The role of familiarity in the recognition of static and dynamic objects},
year = {2006},
month = {10},
pages = {315-325},
abstract = {Although the perception of our world is experienced as effortless, the processes that underlie object recognition in the brain are often difficult to determine. In this article we review the effects of familiarity on the recognition of moving or static objects. In particular, we concentrate on exemplar-level stimuli such as walking humans, unfamiliar objects and faces. We found that the perception of these objects can be affected by their familiarity; for example the learned view of an object or the learned dynamic pattern can influence object perception. Deviations in the viewpoint from the familiar viewpoint, or changes in the temporal pattern of the objects can result in some reduction of efficiency in the perception of the object. Furthermore, more efficient sex categorization and cross-modal matching was found for familiar than for unfamiliar faces. In sum, we find that our perceptual system is organized around familiar events and that perception is most efficient with these learned events.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/Visual\%20Perception_Part_1_315-325_middle_3812[0].pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.elsevier.com/wps/find/bookdescription.cws_home/710077/description\#description},
editor = {Martinez-Conde, S. , S. Macknick, L. Martinez, J.-M. Alonso, P. Tse},
publisher = {Elsevier},
address = {Amsterdam, Netherlands},
series = {Progress in Brain Research ; 154A},
booktitle = {Visual Perception Part 1: Fundamentals of vision: Low and Mid-level processes in perception},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
language = {en},
ISBN = {978-0-444-52966-4},
DOI = {10.1016/S0079-6123(06)54017-8},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Inbook { 3351,
title = {Objektwahrnehmung},
year = {2006},
pages = {165-172},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/Handbuch_der_allgemeinen_Psychologie_165-172_middle_3351[0].pdf},
department = {Department B{\"u}lthoff},
web_url = {https://www.hogrefe.de/shop/handbuch-der-allgemeinen-psychologie-kognition-65550.html},
editor = {Funke, J. , P. A. Frensch},
publisher = {Hogrefe},
address = {G{\"o}ttingen, Germany},
series = {Handbuch der Psychologie ; 5},
booktitle = {Handbuch der Allgemeinen Psychologie: Kognition},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
language = {de},
ISBN = {978-3-8017-1846-6},
author = {B{\"u}lthoff, I and B{\"u}lthoff, HH}
}
@Inbook { 1132,
title = {Image-Based Recognition of Biological Motion, Scenes, and Objects},
year = {2003},
pages = {146-172},
abstract = {In this chapter we will review experiments using both explicit and implicit tasks to investigate object recognition using familiar objects (faces), unusual renderings of familiar objects (point-light walker), and novel scenes. While it is unlikely that participants would have already seen the particular renderings of familiar objects used in an experiment, they have definitely seen similar objects. For this reason, unfamiliar objects are used in many experiments to circumvent the problem of uncontrolled variations in prior exposure to objects. Another reason for using unfamiliar objects is that they allow us precise control over the types of features that are available for discrimination. How our visual system represents familiar and unfamiliar three-dimensional objects for the purpose of recognition is a difficult and passionately discussed issue. At the theoretical level a key question that any representational scheme has to address is how much the internal model depends on the viewing parameters. We will present 2 types of models regarding this issue and also address the question of whether the recognition process is more analytic or more holistic.},
department = {Department B{\"u}lthoff},
web_url = {http://psycnet.apa.org/psycinfo/2003-88086-006},
editor = {Peterson, M.A. , G. Rhodes},
publisher = {Oxford University Press},
address = {New York, NY, USA},
booktitle = {Perception of Faces, Objects, and Scenes: Analytic and Holistic Processes},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
ISBN = {0-19-516538-1},
DOI = {10.1093/acprof:oso/9780195313659.003.0007},
author = {B{\"u}lthoff, I and B{\"u}lthoff, HH}
}
@Inbook { 1130,
title = {Mapping nervous activity in visual mutants of Drosophila melanogaster with the deoxyglucose method},
year = {1984},
pages = {171-175},
editor = {Clement, P. , R. Ramousse},
publisher = {Editions du Centre National de la Recherche Scientifique},
address = {Paris, France},
booktitle = {La vision chez les invert{\'e}br{\'e}s},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
ISBN = {2-222-03585-6},
author = {Nicod, I}
}
@Techreport { 3637,
title = {Categorical perception of gender: No evidence for unfamiliar faces},
year = {2005},
month = {10},
number = {094},
abstract = {We investigated whether male and female faces are discrete categories at the perceptual level. We
created artificial gender continua between male and female faces using a 3D-morphing algorithm and used classical
categorization and discrimination tasks to investigate categorical perception of gender. In Experiments 1 and 3,
3D morphs were computed between male and female faces. The results of the discrimination task suggest that the
gender of unfamiliar faces is not categorically perceived. When participants were familiarized with the male and
female endpoint faces before testing (Experiment 3), a categorical effect was found. In Experiment 2, only shape
or texture of unfamiliar 3D morphs was indicative of gender, while other information (e.g. texture or shape) was
kept constant. Again there was no evidence of a categorical effect in the discrimination task. In Experiments 1, 2
and 3, changes in the gender of a face were also coupled with changes in identity which may have confounded the
findings. In Experiments 4 and 5, we used face continua in which only the gender of the facial features changed,
while the characteristic of the facial features remained constant. When the faces were unfamiliar (Experiment 4),
there was no evidence of categorical perception of gender. In Experiment 5, participants learned to classify the
face images in two gender categories using a feedback procedure. A clear categorical effect for gender was present
after training. Our findings suggest that despite the importance of faces, gender information present in faces is not
naturally perceived categorically. Consequently participants showed categorical perception of gender only after
training with the face stimulus set.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/techreport_094_[0].pdf},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
institution = {Max Planck Institute for Biological Cybernetics, T{\"u}bingen, Germany},
language = {en},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Techreport { 1512,
title = {View-based representations for dynamic 3D object recognition},
year = {1997},
month = {2},
number = {47},
abstract = {Much of the experimental and computational modeling research on human recognition processes has focused exclusively on the domain of static three-dimensional (3D) objects. The issue of the nature of internal representations underlying dynamic 3D object recognition is largely unexplored. Here we examine this issue, with emphasis on view-point dependency, using variants of biological motion sequences of the kind described by Johansson (1973).
Our first experiment investigated whether observers exhibit the well-known canonical view-point effect while recognizing 3D biological motion sequences. Results showing a markedly impaired recognition performance with sequences recorded from unusual view-points provide preliminary evidence for the role of view-point familiarity and the inability of the visual system to extract view-independent representations. Next, to examine whether the motion
traces used for recognition preserve 3D information, or are largely 2D, we developed a special class of biological motion sequences. The distinguishing characteristic of these sequences was that while they preserve the `normal' 2D
projections from one view-point, their 3D structures were randomized.
View-points preserving the `normal' 2D projections yielded vivid biological motion percepts, whereas other viewpoints yielded percepts of randomly moving dots. In the final set of experiments we examined whether this result could be
an outcome of a recognition-dependent top-down suppression of anomalies in 3D structures. Our results indicate that subjects' expectations about 3D structure can suppress the bottom-up depth information provided by binocular stereo. Taken together, these findings suggest that biological motion sequences are represented by the human visual system as 2D traces rather than as 3D structural descriptions, and that the perception of 3D structure may be based not only upon low-level processes but also upon recognition-dependent
top-down influences.},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
institution = {Max Planck Institute for Biological Cybernetics, T{\"u}bingen, Germany},
author = {B{\"u}lthoff, I and B{\"u}lthoff, HH and Sinha, P}
}
@Techreport { 1504,
title = {Features of the representation space for 3D objects},
year = {1996},
month = {9},
number = {40},
abstract = {To explore the nature of the representation space of 3D objects, we studied human performance in forced-choice classification of objects composed of four geon-like parts, emanating from a common center. The two class prototypes were distinguished by qualitative contrasts (bulging vs.\textbackslash waist-like limbs). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90\% correct-response performance level. In the first experiment, 11 subjects were tested on shapes obtained by varying the prototypical parameters both orthogonally (Ortho) and in parallel (Para) to the line connecting the prototypes in the parameter space. For the eight subjects who performed above chance, the error rate increased with the Ortho parameter-space displacement between the stimulus and the corresponding prototype (the effect of the Para displacement was marginal). Clearly, the parameter-space location of the stimuli mattered more than the qualitative contrasts (which were always present). To find out whether both prototypes or just the nearest neighbor of the test shape influenced the decision, in the second experiment we tested 18 new subjects on a fixed set of shapes, while the test-stage distance between the two classes assumed one of three values (Far, Intermediate, and Near). For the 13 subjects who performed above chance, the error rate (on physically identical stimuli) in the Near condition was higher than in the other two conditions. The results of the two experiments contradict the prediction of theories that postulate exclusive reliance on qualitative contrasts, and support the notion of a metric representation space, with the subjects' performance determined by distances to more than one reference point or prototype.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf1504.pdf},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
institution = {Max Planck Institute for Biological Cybernetics, T{\"u}bingen, Germany},
author = {Edelman, S and B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Poster { DobsSBG2013,
title = {Attending to expression or identity of dynamic faces engages different cortical areas},
year = {2013},
month = {11},
day = {10},
volume = {43},
number = {186.03},
abstract = {Identity and facial expression of faces we interact with are represented as invariant and changeable aspects, respectively - what are the cortical mechanisms that allow us to selectively extract information about these two important cues? We had subjects attend to either identity or expression of the same dynamic face stimuli and decoded concurrently measured fMRI activity to ask whether distinct cortical areas were differentially engaged in these tasks.
We measured fMRI activity (3x3x3mm, 34 slices, TR=1.5, 4T) from 6 human subjects (2 female) as they performed a change-detection task on dynamic face stimuli. At trial onset, a cue (letters ‘E’ or ‘I’) was presented (0.5s) which instructed subjects to attend to either the expression or the identity of animations of faces (8 presentations per trial of 2s movie clips depicting 1 of 2 facial identities expressing happiness or anger). Subjects were to report (by button press) changes in the cued dimension (these occurred in 20\% of trials) and ignore changes in the uncued dimension. Subjects successfully attended to the cued dimension (mean d’=2.4 for cued and d’=-1.9 for uncued dimension), and sensitivity did not differ across tasks (F(1,10)=0.19, p>0.6). Subjects performed 18-20 7min scans (20 trials/scan in pseudorandom order) in 2 sessions.
We built linear classifiers to decode the attended dimension. Face-sensitive areas were defined in separate localizer scans as clusters of voxels responding more to faces than to houses. To independently determine the voxels to be included in the analyses, we ran a task localizer in which 10s grey screen was alternated with 10s of stimuli+task. For each area, we selected the 100 voxels whose signal correlated best with task/no task alternations. BOLD signal in these voxels was averaged over 3-21s of each trial of the main experiment, concatenated across subjects and sessions and used to build the classifiers.
We found that we could decode the attended dimension on cross-validated data from many visual cortical areas (percentage correct classifications: FFA: 68\%, MT: 73\%, OFA: 79\%, STS: 68\%, V1: 77\%; p<0.05, permutation test). However, while ventral face-sensitive areas (OFA, FFA) showed larger BOLD signal during attention-to-identity than attention-to-expression trials (p<0.001, t-test), motion processing areas (MT, STS) showed the opposite effect (p<0.001, t-test). Our results suggest that attending to expression or identity engages areas involved in stimulus-specific processing of these two dimensions. Moreover, attending to expression encoded in facial motion recruits motion processing areas, while attending to face identity activates ventral face-sensitive areas.},
department = {Department B{\"u}lthoff},
web_url = {http://www.sfn.org/annual-meeting/neuroscience-2013},
event_place = {San Diego, CA, USA},
event_name = {43rd Annual Meeting of the Society for Neuroscience (Neuroscience 2013)},
author = {Dobs, K and Schultz, J and B{\"u}lthoff, I and Gardner, JL}
}
@Poster { ZhaoB2013,
title = {Learning Faces from Multiple Viewpoints Eliminates the Other-Race Effect},
journal = {Perception},
year = {2013},
month = {8},
volume = {42},
number = {ECVP Abstract Supplement},
pages = {204},
abstract = {People recognize own-race faces more accurate than those of other races. This other-race effect (ORE) has been frequently observed when faces are learned from static, single view images. However, the single-view face learning may prevent the acquisition of useful information (e.g., 3D face shape) for recognizing unfamiliar, other-race faces. Here we tested whether learning faces from multiple viewpoints reduces the ORE. In Experiment 1 participants learned faces from a single viewpoint (left or right 15\(^{\circ}\) view) and were tested with front view(0\(^{\circ}\) view) using an old/new recognition task. They showed better recognition performances for own-race faces than that for other-race faces, demonstrating the ORE in face recognition across viewpoints. In Experiment 2 participants learned each face from four viewpoints (in order, left 45\(^{\circ}\), left 15\(^{\circ}\), right 15\(^{\circ}\), and right 45\(^{\circ}\) views) and were tested in the same way as in Experiment 1. Participants recognized own- and other-race faces equally well, eliminating the ORE. These results suggest that learning faces from multiple viewpoints improves the recognition of other-race faces more than that for own-race faces, and that previously observed ORE is caused in part by the non-optimal encoding condition for other-race faces.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/42/1_suppl.toc},
event_place = {Bremen, Germany},
event_name = {36th European Conference on Visual Perception (ECVP 2013)},
DOI = {10.1177/03010066130420S101},
author = {Zhao, M and B{\"u}lthoff, I}
}
@Poster { BrielmannBA2013,
title = {Looking at faces from different angles: Europeans fixate different features in Asian and Caucasian faces},
journal = {Perception},
year = {2013},
month = {8},
volume = {42},
number = {ECVP Abstract Supplement},
pages = {204},
abstract = {The other-race effect is the widely known difficulty at recognizing faces of another race. Further, it has been clearly established in eye tracking studies that observers of different cultural background exhibit different viewing strategies. Whether those viewing strategies depend also on the type of faces shown (same-race vs. other-race faces) is under much debate. Using eye tracking, we investigated whether European observers look at different facial features when viewing Asian and Caucasian faces in a face race categorization task. Additionally, to investigate the influence of viewpoints on gaze patterns, we presented faces in frontal, half profile and profile views. Even though fixation patterns generally changed across views, fixations to the eyes were more frequent for Caucasian faces and fixations to the nose were more frequent for Asian faces, independent of face orientation. In contrast, how fixations to cheeks, mouth and outline regions changed according to the face’s race was also dependent on face orientations. In sum, our results indicate that we mainly look at prominent facial features, albeit which features are fixated most often critically depends on face race and orientation.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/42/1_suppl.toc},
event_place = {Bremen, Germany},
event_name = {36th European Conference on Visual Perception (ECVP 2013)},
DOI = {10.1177/03010066130420S101},
author = {Brielmann, A and B{\"u}lthoff, I and Armann, R}
}
@Poster { DobsBBVCS2013,
title = {Quantifying Human Sensitivity to Spatio-Temporal Information in Dynamic Faces},
journal = {Perception},
year = {2013},
month = {8},
volume = {42},
number = {ECVP Abstract Supplement},
pages = {197},
abstract = {A great deal of social information is conveyed by facial motion. However, understanding how observers use the natural timing and intensity information conveyed by facial motion is difficult because of the complexity of these motion cues. Here, we systematically manipulated animations of facial expressions to investigate observers’ sensitivity to changes in facial motion. We filmed and motion-captured four facial expressions and decomposed each expression into time courses of semantically meaningful local facial actions (e.g., eyebrow raise). These time courses were used to animate a 3D head model with either the original time courses or approximations of them. We then tested observers’ perceptual sensitivity to these changes using matching-to-sample tasks. When viewing two animations (original vs. approximation), observers chose original animations as most similar to the video of the expression. In a second experiment, we used several measures of stimulus similarity to explain observers’ choice of which approximation was most similar to the original animation when viewing two different approximations. We found that high-level cues about spatio-temporal characteristics of facial motion (e.g., onset and peak of eyebrow raise) best explained observers’ choices. Our results demonstrate the usefulness of our method; and importantly, they reveal observers’ sensitivity to natural facial dynamics.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/42/1_suppl.toc},
event_place = {Bremen. Germany},
event_name = {36th European Conference on Visual Perception (ECVP 2013)},
DOI = {10.1177/03010066130420S101},
author = {Dobs, K and B{\"u}lthoff, I and Breidt, M and Vuong, QC and Curio, C and Schultz, JW}
}
@Poster { JungBTLA2013,
title = {The Role of Race in Summary Representations of Faces},
journal = {Journal of Vision},
year = {2013},
month = {7},
volume = {13},
number = {9},
pages = {861},
abstract = {One possibility to overcome the processing limitation of the visual system is to attend selectively to relevant information only. Another strategy is to process sets of objects as ensembles and represent their average characteristics instead of individual group members (e.g., mean size, brightness, orientation). Recent evidence suggests that ensemble representation might occur even for human faces (for a summary, see Alvarez, 2011), i.e., observers can extract the mean emotion, sex, and identity from a set of faces (Habermann \& Whitney, 2007; de Fockert \& Wolfenstein, 2009). Here, we extend this line of research into the realm of face race: Can we extract the ''mean race'' of a set of faces when no conscious perception of single individuals is possible? Moreover, does the visual system process own- and other-race faces differently at this stage? Face stimuli had the same (average) male identity but were morphed, at different levels, in between Asian and Caucasian appearance. Following earlier studies (e.g., Habermann \& Whitney, 2007, 2010), observers were briefly (250ms) presented with random sets of 12 of these faces. They were then asked to adjust a test face to the perceived mean race of the set by ''morphing'' it between Asian and Caucasian appearance. The results show that for most participants the response error distribution is significantly different from random, while their responses are centered around the real stimulus set mean - suggesting that they are able to extract ''mean race''. Also, we find a bias towards responding more ''Asian'' than the actual mean of a face set. All participants tested so far are South Korean (from Seoul), indicating that even at this early (unconscious) processing stage, the visual system distinguishes between own- and other-race faces, giving more weight to the former. Follow-up experiments on Caucasian participants will be performed to validate this observation.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/13/9/861.short},
event_place = {Naples, FL, USA},
event_name = {13th Annual Meeting of the Vision Sciences Society (VSS 2013)},
DOI = {10.1167/13.9.861},
author = {Jung, W-M and B{\"u}lthoff, I and Thornton, I and Lee, S-W and Armann, R}
}
@Poster { EsinsSKWB2012_2,
title = {Comparing the other race effect and congenital prosopagnosia using a three-experiment test battery},
year = {2012},
month = {11},
volume = {13},
pages = {38},
abstract = {Congenital prosopagnosia, an innate impairment in recognizing faces, as well as the otherrace-effect, the disadvantage in recognizing faces of foreign races, both influence face recognition abilities. Here we compared both phenomena by testing three groups: German congenital
prosopagnosics (cPs), unimpaired German and unimpaired South Korean participants (n=23 per group), on three tests with Caucasian faces. First we ran the Cambridge Face Memory
Test (Duchaine \& Nakayama, 2006 Neuropsychologia 44 576-585). Participants had to recognize Caucasian target faces in a 3AFC task. German controls performed better than
Koreans (p=0.009) who performed better than prosopagnosics (p=0.0001). Variation of the individual performances was larger for cPs than for Koreans (p = 0.028). In the second experiment, participants rated the similarity of Caucasian faces (in-house 3D face-database) which differed parametrically in features or second order relations (configuration). We found differences between sensitivities to change type (featural or configural, p=0) and between
groups (p=0.005) and an interaction between both factors (p = 0.019). During the third experiment, participants had to learn exemplars of artificial objects (greebles), natural objects (shells), and faces and recognize them among distractors. The results showed an interaction (p = 0.005) between stimulus type and participant group: cPs where better for non-face stimuli and worse for face stimuli than the other groups. Our results suggest that congenital
prosopagnosia and the other-race-effect affect face perception in different ways. The broad range in performance for the cPs directs the focus of our future research towards looking for different forms of congenital prosopagnosia.},
department = {Department B{\"u}lthoff},
web_url = {http://www.neuroschool-tuebingen-nena.de/},
event_place = {Schramberg, Germany},
event_name = {13th Conference of the Junior Neuroscientists of T{\"u}bingen (NeNA 2012): Science and Education as Social Transforming Agents},
author = {Esins, J and Schultz, J and Kim, BR and Wallraven, C and B{\"u}lthoff, I}
}
@Poster { EsinsBKS2012,
title = {Can a test battery reveal subgroups in congenital prosopagnosia?},
journal = {Perception},
year = {2012},
month = {9},
volume = {41},
number = {ECVP Abstract Supplement},
pages = {113},
abstract = {Congenital prosopagnosia, the innate impairment in recognizing faces exhibits diverse deficits. Due to this heterogeneity the possible existence of subgroups of the impairment was suggested (eg Kress and Daum, 2003 Behavioural Neurology14109-21). We examined 23 congenital prosopagnosics (cPAs) identified via a screening questionnaire (as used in Stollhoff, Jost, Elze, and Kennerknecht, 2011 PLoS ONE6e15702) and 23 age-, gender and educationally matched controls with a battery consisting of nine different tests. These included well known tests like the Cambridge Face Memory Test (CFMT, Duchaine and Nakayama, 2006 Neuropsychologia44576-85), a Famous Face Test (FFT), and new, own tests about object and face recognition. As expected, cPAs had lower CFMT and FFT scores than the controls. Analyses of the performance patterns across the nine tests suggest the existence of subgroups within both cPAs and controls. These groups could not be revealed only based on the CFMT and FFT scores, indicating the necessity of tests addressing different, specific aspects of object and face perception for the identification of subgroups. Current work focuses on characterizing the subgroups and identifying the most useful tests.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/41/1_suppl.toc},
event_place = {Alghero, Italy},
event_name = {35th European Conference on Visual Perception},
DOI = {10.1177/03010066120410S101},
author = {Esins, J and B{\"u}lthoff, I and Kennerknecht, I and Schultz, J}
}
@Poster { DobsBCS2012,
title = {Investigating factors influencing the perception of identity from facial motion},
journal = {Journal of Vision},
year = {2012},
month = {8},
volume = {12},
number = {9},
pages = {35},
abstract = {Previous research has shown that facial motion can convey information about identity in addition to facial form (e.g. Hill \& Johnston, 2001). The present study aims at finding whether identity judgments vary depending on the kinds of facial movements and the task performed. To this end, we used a recent facial motion capture and animation system (Curio et al., 2006). We recorded different actors performing classic emotional facial movements (e.g. happy, sad) and non-emotional facial movements occurring in social interactions (e.g. greetings, farewell). Only non-rigid components of these facial movements were used to animate one single avatar head. In a between-subject design, four groups of participants performed identity judgments based on emotional or social facial movements in a same-different (SD) or a delayed matching-to-sample task (XAB). In the SD task, participants watched two distinct facial movements (e.g. happy and sad) and had to choose whether the same or different actors performed these facial movements. In the XAB task, participants saw one target facial movement X (e.g. happy) performed by one actor followed by two facial movements of another kind (e.g. sad) performed by two actors. Participants chose which of the latter facial movements was performed by the same actor as the one performing X. Prior to the experiment, participants were familiarized with the actors by watching them perform facial movements not subsequently tested. Participants were able to judge actor identities correctly in all conditions, except for the SD task performed on the emotional stimuli. Sensitivity to identity as measured by d-prime was higher in the XAB than in the SD task. Furthermore, performance was higher for social than for emotional stimuli. Our findings reveal an effect of task on identity judgments based on facial motion, and suggest that such judgments are easier when facial movements are less stereotypical.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/12/9/35.abstract},
institution = {Max Planck Institue for Biological Cybernetics},
event_place = {Naples, FL, USA},
event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)},
DOI = {10.1167/12.9.35},
author = {Dobs, K and B{\"u}lthoff, I and Curio, C and Schultz, J}
}
@Poster { Bulthoff2012_5,
title = {What gives a face its ethnicity?},
journal = {Journal of Vision},
year = {2012},
month = {8},
volume = {12},
number = {9},
pages = {1282},
abstract = {We can quickly and easily judge faces in terms of their ethnicity. What is the basis for our decision? Other studies have used either eye tracking (e.g., Armann \& B{\"u}lthoff 2009) or the Bubbles method (e.g., Gosselin \& Schyns 2001) in categorization tasks to investigate which facial features are used for sex or identity classification. The first method investigates which parts are preferentially looked at while the latter method shows which facial regions, when shown in isolation during the task, leads to correct classification. Here we measured the influence of facial features on ethnicity classification when they are embedded in the face of the other ethnicity. Asian and Caucasian faces of our 3D face database (http://faces.kyb.tuebingen.mpg.de) had been paired according to sex, age and appearance. We used 18 pairs of those Asian-Caucasian faces to create a variety of mixed-race faces. Mixed-race faces were obtained by exchanging one of the following facial features between both faces of a pair: mouth, nose, facial contour, shape, texture (skin) and eyes. We showed original and modified faces one by one in a simple ethnicity classification task. All faces were turned 20 degrees to the side for a more informative view of nose shape, face shape and facial contour while eyes and mouth and general face textures were still fully visible. Because of skin color differences between exchanged parts and original faces, all 3D faces were rendered as grey-level images. The results of 24 Caucasian participants show that the eyes and the texture of a face are major determinants for ethnicity classification, more than face shape and face contour, while mouth and nose had weak influence. Response times showed that participants were faster at classifying less ambiguous faces.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/12/9/1282.abstract},
event_place = {Naples, FL, USA},
event_name = {12th Annual Meeting of the Vision Sciences Society (VSS 2012)},
DOI = {10.1167/12.9.1282},
author = {B{\"u}lthoff, I}
}
@Poster { EsinsSKWB2012,
title = {Comparing the other-race-effect and congenital Prosopagnosia using a three-experiment test battery},
journal = {i-Perception},
year = {2012},
month = {7},
volume = {3},
number = {9},
pages = {688},
abstract = {Congenital prosopagnosia, an innate impairment in recognizing faces, as well as the other-race-effect, the disadvantage in recognizing faces of foreign races, both influence face recognition abilities.
Here we compared both phenomena by testing three groups: German congenital prosopagnosics (cPs), unimpaired German and unimpaired South Korean participants (n=23 per group), on three tests with Caucasian faces.
First we ran the Cambridge Face Memory Test (Duchaine \& Nakayama, 2006 Neuropsychologia 44 576-585). Participants had to recognize Caucasian target faces in a 3AFC task. German controls performed better than Koreans (p=0.009) who performed better than prosopagnosics (p=0.0001). Variation of the individual performances was larger for cPs than for Koreans (p = 0.028).
In the second experiment, participants rated the similarity of Caucasian faces (in-house 3D face-database) which differed parametrically in features or second order relations (configuration). We found differences between sensitivities to change type (featural or configural, p=0) and between groups (p=0.005) and an interaction between both factors (p = 0.019).
During the third experiment, participants had to learn exemplars of artificial objects (greebles), natural objects (shells), and faces and recognize them among distractors. The results showed an interaction (p = 0.005) between stimulus type and participant group: cPs where better for non-face stimuli and worse for face stimuli than the other groups.
Our results suggest that congenital prosopagnosia and the other-race-effect affect face perception in different ways. The broad range in performance for the cPs directs the focus of our future research towards looking for different forms of congenital prosopagnosia.},
url = {http://www.cyberneum.defileadmin/user_upload/files/publications/2012/APCV-2012-Poster-Esins.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://i-perception.perceptionweb.com/journal/I/volume/3/article/if688},
event_place = {Incheon, South Korea},
event_name = {8th Asia-Pacific Conference on Vision (APCV 2012)},
DOI = {10.1068/if688},
author = {Esins, J and Schultz, J and Kim, BR and Wallraven, C and B{\"u}lthoff, I}
}
@Poster { JungAB2012,
title = {What gives a face its race?},
journal = {i-Perception},
year = {2012},
month = {7},
volume = {3},
number = {9},
pages = {697},
abstract = {What gives a face its race?By biological criteria, human “races” do not exist (e.g., Cosmides et al., 2003). Nevertheless, every-day life and research from various fields show that we robustly and reliably perceive humans as belonging to different race groups. Here, we investigate the bases for our quick and easy judgments, by measuring the influence of manipulated facial features on race classification. Asian and Caucasian faces of our 3-dimensional face database (http://faces.kyb.tuebingen.mpg.de) were paired according to sex, age and overall appearance. With these Asian-Caucasian face pairs we created a variety of mixed-race faces, by exchanging facial features between both faces of a pair: eyes, nose, mouth, “outer” features, shape or texture. Original and modified faces were shown in a simple race classification task. We tested 24 Westerners (Germany) and 24 Easterners (South Korea). In both groups, eyes and texture were major determinants for race classification, followed by face shape, and then outer features, mouth, nose, which only had a weak influence on perceived face. Eastern participants classified Caucasian original faces better than Asian original faces, while Western participants were similarly good at classifying both races. Western participants - but not their Eastern counterparts - were less susceptible to eye, shape and texture manipulations in other-race faces than in their own-race faces. A closer look at the data suggests that this effect mainly originates from differences in processing male and female faces in Western participants only. Our results provide more evidence of differences between observers from different cultural and ethnic backgrounds in face perception and processing.},
url = {http://www.cyberneum.defileadmin/user_upload/files/publications/2012/APCV-2012-Jung.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://i-perception.perceptionweb.com/journal/I/volume/3/article/if697},
event_place = {Incheon, South Korea},
event_name = {8th Asia-Pacific Conference on Vision (APCV 2012)},
DOI = {10.1068/if697},
author = {Jung, W and Armann, R and B{\"u}lthoff, I}
}
@Poster { GaissertWvB2011,
title = {Cross-modal transfer in visual and haptic object categorization},
journal = {Perception},
year = {2011},
month = {9},
volume = {40},
number = {ECVP Abstract Supplement},
pages = {134},
abstract = {When humans have to categorize objects they often rely on shape as a deterministic feature. However, shape is not exclusive to the visual modality: the haptic system is also an expert in identifying shapes. This raises the question whether humans store separate modality-dependent shape representations or if one multimodal representation is formed. To better understand how humans categorize objects based on shape we created a set of computer-generated amoeba-like objects varing in defined shape steps. These objects were then printed using a 3D printer to generate tangible stimuli. In a discrimination task and a categorization task, participants either visually or haptically explored the objects. We found that both modalities lead to highly similar categorization behavior indicating that the processes underlying categorization are highly similar in both modalities. Next, participants were trained on special shape categories by using the visual modality alone or by using the haptic modality alone. As expected, visual training increased visual performance and haptic training increased haptic performance. Moreover, we found that visual training on shape categories greatly improved haptic performance and vice versa. Our results point to a shared representation underlying both modalities, which accounts for the surprisingly strong transfer of training across the senses.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/40/1_suppl.toc},
event_place = {Toulouse, France},
event_name = {34th European Conference on Visual Perception},
DOI = {10.1177/03010066110400S102},
author = {Gaissert, N and Waterkamp, S and van Dam, L and B{\"u}lthoff, I}
}
@Poster { DobsKBSC2011,
title = {Investigating idiosyncratic facial dynamics with motion retargeting},
journal = {Perception},
year = {2011},
month = {9},
volume = {40},
number = {ECVP Abstract Supplement},
pages = {115},
abstract = {3D facial animation systems allow the creation of well-controlled stimuli to study face processing. Despite this high level of control, such stimuli often lack naturalness due to artificial facial dynamics (eg linear morphing). The present study investigates the extent to which human visual perception can be fooled by artificial facial motion. We used a system that decomposes facial motion capture data into time courses of basic action shapes (Curio et al, 2006 APGV 1 77–84). Motion capture data from four short facial expressions were input to the system. The resulting time courses and five approximations were retargeted onto a 3D avatar head using basic action shapes created manually in Poser. Sensitivity to the subtle modifications was measured in a matching task using video sequences of the actor performing the corresponding expressions as target. Participants were able to identify the unmodified retargeted facial motion above chance level under all conditions. Furthermore, matching performance for the different approximations varied with expression. Our findings highlight the sensitivity of human perception for subtle facial dynamics. Moreover, the action shape-based system will allow us to further investigate the perception of idiosyncratic facial motion using well-controlled facial animation stimuli.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/40/1_suppl.toc},
event_place = {Toulouse, France},
event_name = {34th European Conference on Visual Perception},
DOI = {10.1177/03010066110400S102},
author = {Dobs, K and Kleiner, M and B{\"u}lthoff, I and Schultz, J and Curio, C}
}
@Poster { LeeBAWB2011,
title = {The other-race effect is not ubiquitous},
journal = {Journal of Vision},
year = {2011},
month = {9},
volume = {11},
number = {11},
pages = {626},
abstract = {race (the other-race effect or ORE) has been widely cited. Nevertheless, recognizing the identity of a face is a complex task among many others; hence it might be premature to conclude that own-race faces are always easier to
process. We investigated whether same-race faces still have a processing advantage over other-race faces when only ethnicity-related information is available to differentiate between faces. We morphed the ethnicity of 20 Caucasians and 20 Asians faces toward their other-race counterpart while keeping their idiosyncratic, identity-related features. Morphing was done at three levels (20\%, 50\%, and 80\% toward the other race). The task for two groups of participants (25 T{\"u}bingen and 26 Seoul participants) was to report which face looks more Caucasian (or Asian) after looking at the original face and a morphed face sharing the same idiosyncratic features. Both faces were presented side by side on a computer monitor in one task and sequentially
in another task. Importantly, we found no evidence for an ORE in participants’ performance and no performance difference between T{\"u}bingen and Seoul participants. Both groups were equally good and equally fast at
comparing the ethnicity of two faces regardless of the task, the ethnicity of the faces and the question asked. However, we did find evidence that Seoul and T{\"u}bingen participants used different viewing strategies. By investigating their eye-movements in the sequential task, we found that the ethnicity of participants affected fixation durations on specific areas of the face, especially
the nose. Also, the type of questions asked and stimulus race altered the pattern of eye movements. These results suggest that although Caucasians and Asians were equally good at dealing with ethnicity information of both races, they might employ different viewing strategies.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/11/11/626.abstract},
event_place = {Naples, FL, USA},
event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)},
DOI = {10.1167/11.11.626},
author = {Lee, RK and B{\"u}lthoff, I and Armann, R and Wallraven, C and B{\"u}lthoff, HH}
}
@Poster { EsinsBS2011,
title = {The role of featural and configural information for perceived similarity between faces},
journal = {Journal of Vision},
year = {2011},
month = {9},
volume = {11},
number = {11},
pages = {673},
abstract = {An important aspect of face recognition involves the role of featural and configurational information for face perception (e.g. Tanaka and Farah, 1993; Yovel and Duchaine, 2006; Rotshtein et al, 2007). In our study, we investigated the influence of featural and configural information on perceived similarity between faces. Eight pairs of male faces were chosen from our digital face database (http://faces.kyb.tuebingen.mpg.de). The texture and the face shape for both faces in a pair were equalized to create 2 basis faces that differed only in their inner facial features and their configuration, but not in face shape or texture. A computer algorithm allowed to parametrically morph the features, the configuration, or both between the two basis faces of a pair. In our case the morphing was done in 25\% steps. 24 participants rated the similarity between pairs of the created faces using a 7-point Likert scale. The faces to compare came from the same basis face pair and could differ either in features or in configuration by 0\%, 25\%, 50\%, 75\% or 100\%. The results revealed that for the same amount of morphing, faces differing by their features are perceived as less similar than faces differing by their configurations. These findings replicate previous results obtained with less natural or less controlled conditions. Furthermore, we found that linear increases of the difference between both faces in configural or featural information resulted in a nonlinear increase of perceived dissimilarity. An important aspect for the relevance of our results is how natural the face stimuli look like. We asked 24 participants to rate the naturalness of all stimuli including the original faces and the created faces. Despite numerous manipulations, the vast majority of our created face stimuli were rated as natural as the original faces.},
url = {http://www.cyberneum.defileadmin/user_upload/files/publications/2011/VSS-2011-Esins.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/11/11/673.abstract},
event_place = {Naples, FL, USA},
event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)},
DOI = {10.1167/11.11.673},
author = {Esins, J and B{\"u}lthoff, I and Schultz, J}
}
@Poster { BulthoffSMT2011,
title = {Using avatars to explore height/pitch effects when
learning new faces},
journal = {Journal of Vision},
year = {2011},
month = {9},
volume = {11},
number = {11},
pages = {596},
abstract = {In a previous series of desktop experiments we found no evidence that individuals' height influenced their representation of others' faces or their ability to process faces viewed from above or below (VSS 2009). However, in those experiments face orientation and body height were ambiguous as isolated faces were shown on a computer screen to an observer sitting on a chair. To address those concerns and to specifically examine the influence of learned viewpoint, we created a virtual museum containing 20 full-bodied avatars (statues) that were either sitting or standing. Using a head-mounted display, observers walked through this virtual space three times, approached each statue and viewed them from any horizontal (yaw) angle without time restrictions. We equated eye-level - and thus simulated height – for all participants and restricted their vertical movement to ensure that the faces of sitting avatars were always viewed from above and standing avatars from below. After familiarization, recognition was tested using a standard old-new paradigm in which 2D images of the learnt faces were shown from various viewpoints. Results showed a clear influence of learned viewpoint. Faces that had been learned from above (below) were recognized more quickly and accurately in that orientation than from the opposite orientation. Thus, recognition of specific, newly learned faces appears to be view-dependent in terms of pitch angle. Our failure to find a height effect in our previous study suggests that the variety of views of human faces experienced during a lifetime and possibly the preponderance of conversational situations between humans at close range typically counteracts any influence that body size might have on a person's viewing experience of others' faces.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/11/11/596.abstract},
event_place = {Naples, FL, USA},
event_name = {11th Annual Meeting of the Vision Sciences Society (VSS 2011)},
DOI = {10.1167/11.11.596},
author = {B{\"u}lthoff, I and Shrimpton, S and Mohler, BJ and Thornton, IM}
}
@Poster { 6787,
title = {No other-race effect found in a task using faces differering only in race-specifying information},
journal = {Perception},
year = {2010},
month = {8},
volume = {39},
number = {ECVP Abstract Supplement},
pages = {90},
abstract = {Generally, faces of one’s own ethnicity are better remembered than faces of another race. The mechanisms of this other race effect (ORE) are still unresolved. The present study investigates whether expertise for own-race results in ORE in a discrimination task when only race-specifying information varies between faces, with no interference of identity change and no memory load. If expertise is an important factor for ORE, Caucasian participants, for example, should better discriminate between two Caucasian faces presented side by side than between two Asian faces. We tested participants in Seoul and T{\"u}bingen with pairs of Asian or Caucasian faces. Their task was to tell which face of the pair was either more Asian or more Caucasian. Although we found that Asian face pairs were unexpectedly but consistently better discriminated than Caucasian faces, this Asian advantage did not differ between both city groups. Our results show furthermore that Seoul and T{\"u}bingen participants’ discrimination performance was similar for Asian and Caucasian faces. These findings suggests that when there is no memory component involved in the task and when face appearance only differs in race-specifying information, own-race expertise does not result in better performance for own-race faces.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/39/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Lausanne, Switzerland},
event_name = {33rd European Conference on Visual Perception},
language = {en},
DOI = {10.1177/03010066100390S101},
author = {B{\"u}lthoff, I and Lee, RK and Wallraven, C and B{\"u}lthoff, HH}
}
@Poster { 6728,
title = {Race-specific norms for coding face identity and a functional role for norms},
journal = {Journal of Vision},
year = {2010},
month = {5},
volume = {10},
number = {7},
pages = {706},
abstract = {High-level perceptual aftereffects have revealed that faces are coded relative to norms that are dynamically updated by experience. The nature of these norms and the advantage of such a norm-based representation, however, are not yet fully understood. Here, we used adaptation techniques to get insight into the perception of faces of different race categories. We measured identity aftereffects for adapt-test pairs that were opposite a race-specific average and pairs that were opposite a ‘generic’ average, made by morphing together Asian and Caucasian faces. Aftereffects were larger following exposure to anti-faces that were created relative to the race-specific (Asian and Caucasian) averages than to anti-faces created using the mixed-race average. Since adapt-test pairs that lie opposite to each other in face space generate larger identity aftereffects than non-opposite test pairs, these results suggest that Asian and Caucasian faces are coded using race-specific norms. We also found that identification thresholds were lower when targets were distributed around the race-specific norms than around the mixed-race norm, which is also consistent with a functional role for race-specific norms.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/10/7/706.abstract},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Naples, FL, USA},
event_name = {10th Annual Meeting of the Vision Sciences Society (VSS 2010)},
language = {en},
DOI = {10.1167/10.7.706},
author = {Armann, RGM and Jeffery, L and Calder, A and B{\"u}lthoff, I and Rhodes, G}
}
@Poster { Bulthoff2009,
title = {Sex categorization is influenced by facial information about identity},
journal = {Perception},
year = {2009},
month = {8},
volume = {38},
number = {ECVP Abstract Supplement},
pages = {78},
abstract = {According to Bruce and Young's (1986 British Journal of Psychology 77 305 - 327) classic model of face recognition, sex-related information about a face is accessed independently of information about identity. Therefore familiarity with a face should not influence sex categorization. This issue of independence has remained controversial as it has been supported in some studies and questioned in others. Here we used faces that were presented in two conditions: sex-unchanged and sex-changed. Participants were very familiar with some of the identities. For all participants, the unchanged familiar faces presented congruent identity and sex information while the sex-changed familiar faces presented incongruent identity and sex information. Participants performed a sex categorization task on all familiar and unfamiliar faces presented in the unchanged and sex-changed condition. They were asked to ignore identity and base their responses solely on the sex appearance of the faces. Our results show that participants were slower and less correct for sex-changed than for unchanged familiar faces while those differences did not appear for unfamiliar faces. These results indicate that sex and identity are not independent as participants could not ignore identity information while doing a sex categorization task.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/38/1_suppl.toc},
event_place = {Regensburg, Germany},
event_name = {32nd European Conference on Visual Perception},
DOI = {10.1177/03010066090380S101},
author = {B{\"u}lthoff, I}
}
@Poster { 5943,
title = {From unsupervised to supervised categorization in vision and haptics},
year = {2009},
month = {7},
volume = {10},
number = {679},
pages = {172-173},
abstract = {Categorization studies have primarily focused on the visual percept of objects. But in every-day life humans combine percepts from different modalities. To better understand this cue combination and to learn more about the mechanisms underlying categorization, we performed different categorization tasks visually and haptically and compared the two modalities. All experiments used the same set of complex, parametrically-defined, shell-like objects based on three shape parameters (see figure and [Gaissert, N., C. Wallraven and H. H. B{\~A}¼lthoff: Analyzing perceptual representations of complex, parametrically-defined shapes using MDS. Eurohaptics 2008, 265-274]). For the visual task, we used printed pictures of the objects, whereas for the haptic experiments, 3D plastic models were generated using a 3D printer and explored by blindfolded participants using both hands.
Three different categorization tasks were performed in which all objects were presented to participants simultaneously. In an unsupervised task participants had to categorize the objects in as many groups as they liked to. In a semi-supervised task participants had to form exactly three groups. In a supervised task participants received three prototype objects (see figure) and had to sort all other objects into three categories defined by the prototypes. The categorization was repeated until the same groups were formed twice in a row. The amount of repetitions needed across modalities was the same, showing that the task was equally hard visually and haptically. For more detailed analyses we generated similarity matrices based on which stimulus was paired with which other stimulus. As a measure of consistency {\^a}{\euro}“ within and across modalities as well as within and across tasks {\^a}{\euro}“ we calculated cross correlations between these matrices (see figure). Correlations within modalities were always higher than across modalities. In addition, as expected, the more constrained the task, the more consistently participants grouped the stimuli. Critically, multi-dimensional scaling analysis of the similarity matrices showed that all three shape parameters were perceived visually and haptically in all categorization tasks, but that the weighting of the parameters was dependent on the modality. In line with our previous results, this demonstrates the remarkable robustness of visual and haptic processing of complex shapes.},
department = {Department B{\"u}lthoff},
web_url = {http://imrf.mcmaster.ca/IMRF/ocs/index.php/meetings/2009/paper/view/679},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {New York, NY, USA},
event_name = {10th International Multisensory Research Forum (IMRF 2009)},
language = {en},
author = {Gaissert, N and Wallraven, C and B{\"u}lthoff, I}
}
@Poster { 5693,
title = {Categorical perception of male and female faces and the single-route hypothesis},
journal = {Perception},
year = {2008},
month = {8},
volume = {37},
number = {ECVP Abstract Supplement},
pages = {117},
abstract = {Categorical perception (CP) has been demonstrated for face identity and facial expression, while conflicting results have been reported for sex. Furthermore, the question whether processing of sex and identity information is linked remains open. Based on extensive ratings of faces and sex morphs from our face database, we created 'controlled' male and female faces with similar perceived degrees of 'maleness' and 'femaleness'. We then examined CP of sex for these faces with classical discrimination and classification tasks using sex continua. Participants were naive (1), or had been familiarized with average faces of both sexes (2), or with the 'controlled' male and female faces (3). Our results confirm the lack of naturally occurring CP for sex in (1). Furthermore, they provide more evidence for the linked processing of sex and identity, as only participants in (3) showed clear CP. We found no evidence that familiarization with sex information (as given by average male and female faces) transfers to individual faces.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/37/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Utrecht, Netherlands},
event_name = {31st European Conference on Visual Perception},
language = {en},
DOI = {10.1177/03010066080370S101},
author = {Armann, R and B{\"u}lthoff, I}
}
@Poster { 5021,
title = {The effect of context in face and object recognition},
journal = {Perception},
year = {2007},
month = {8},
volume = {36},
number = {ECVP Abstract Supplement},
pages = {146},
abstract = {Whether recognition and categorization are parallel or serial processes remains controversial. To
address this, we investigated whether face recognition is influenced by task-irrelevant face categ-
ories. We examined the recognition of a target face presented in the context of other faces of
the same or different racial category using a same \verb=^= different matching task. Caucasian partici-
pants were presented during learning with a set of six faces displaying one target face among
different numbers of same-race faces. Participants recognized Caucasian targets better when five
same-race faces rather than a single same-race face were present in the set, while this effect was
absent for Asian targets. Surprisingly, participants recognized Asian targets better in sets with
equal numbers of Asian and Caucasian context faces. Similar experiments, but with novel
objects, were conducted in which categories were defined by similarity or expertise. These factors
did not fully account for the context effects observed with faces. Overall, the results suggest
that face recognition and categorization interact but other factors such as task difficulty may
also affect face recognition.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/36/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Arezzo, Italy},
event_name = {30th European Conference on Visual Perception},
language = {en},
DOI = {10.1177/03010066070360S101},
author = {B{\"u}lthoff, I and Vuong, QC}
}
@Poster { 4882,
title = {Looking Down, Looking Up: Does Stature Influence Face Recognition?},
year = {2007},
month = {7},
volume = {10},
pages = {109},
abstract = {In the German population, men are on average 13 cm taller than women [1]. Smaller people,
many of them women, look at other faces from below (viewing angle) while tall people look
at others from above. The minimal distance between 2 persons not engaged in mutual gaze
is around 50 cm [2]. Thus, with regard to male and female average statures, in close-up situations,
the average viewing angle between males and females is around 13 deg. Do people
have therefore different “preferred” representations of faces depending on their stature? More
specifically, are tall and small people more efficient at processing face seen “from above” and
from “below” respectively? Furthermore, do observers have different “preferred” representations
of male and female faces because men are on average taller than women? To investigate
the influence of stature and sex on face recognition, we first investigated whether efficiency in a
sex classification task might be influenced by face orientation. To maximize stature differences
between participants, we tested two groups: small women (under 165cm) and tall men (over
180cm). If face representation is influenced by stature, we expect small women to be more
efficient (faster) at processing faces viewed as seen from below and vice-versa for tall men.
Furthermore, because of natural average stature differences between men and women, efficient
categorization of male and female faces might depend on their orientation. We used unfamiliar
male and female faces shown at pitch angles between -18 deg (looking downward) to +18 deg
(looking upward). We tested participants in a speeded sex classification task. Male and female
participants saw 220 faces one by one and had to classify them as male or female as fast as
possible. Classification accuracy was high (over 95\%). Analysis of reaction times does not
show any relation between stature of observer, sex of shown face and its pitch orientation, thus
suggesting that face processing with regards to sex is not influenced predominantly by stature
of observer or sex of presented face.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=buelthoff01},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {10th T{\"u}binger Wahrnehmungskonferenz (TWK 2007)},
language = {en},
author = {B{\"u}lthoff, I and Wolf, T and Thornton, IM}
}
@Poster { 4881,
title = {Sex Matters When You Ask the Right Question: What Affects Eye Movements in Face Comparison Tasks?},
year = {2007},
month = {7},
volume = {10},
pages = {108},
abstract = {Knowing where people look in a face provides an objective insight onto the information entering
the visual system and into the cognitive processes involved in face perception. Eye-tracking
studies on face perception have mostly investigated observers’ viewing behavior when studying
single faces. However, in day-to-day situations, humans also compare faces or match a person’s
face to a photograph. During comparison, facial information remains visually accessible,
freeing observers from time and encoding constraints [1]. Here, we recorded eye movements of
human participants while they compared two faces presented simultaneously. We used (i) two
different tasks (discrimination or categorization), and (ii) faces differing either in identity or in
sex. In addition, we varied (iii) task difficulty, i.e. the similarity of the two faces in a pair. Eye
movements to previously defined areas of interest (AOIs) on the faces were analyzed in terms
of frequency, duration and the temporal pattern of fixations made. We found that the eyes were
fixated most often in the discrimination tasks (37\% of all fixations) but the nose in the categorization
task (34.5\%), while the total number of fixations increased with task difficulty. Faces
differing in sex were more difficult to discriminate than faces differing in identity (63\% versus
76\% correct responses), which was also reflected in more fixations to face pairs differing in
sex (14.4 versus 11.8 fixations per trial). With increasing task difficulty, fixations to only some
AOIs increased, in accordance with the literature (more to the eyes in the sex and more over
all areas in the identity discrimination tasks; [2]). Unexpectedly, we found a striking effect of
tasks on performance measures, as over 80\% of participants could detect the more feminine of
two faces (categorization task) even at the most similar level, but for the same face pairs their
performance in a discrimination task was less than 30\% correct. Another interesting finding
is that observers mostly compared the inner halves of the two faces of a pair, instead of the
corresponding features (e.g., the left eye of the left face with the left eye of the right face). This
viewing behavior remained the same in a control experiment where participants’ head was not
fixed. Quite surprisingly, female participants fixated significantly more often the eyes of the
face stimuli than male participants, but only when the sex of the faces was a relevant feature in
the task.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk07/abstract.php?_load_id=armann01},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {10th T{\"u}binger Wahrnehmungskonferenz (TWK 2007)},
language = {en},
author = {Armann, R and B{\"u}lthoff, I}
}
@Poster { 4690,
title = {Sex matters when you ask the right question: What affects eye movements in face comparison tasks?},
journal = {Journal of Vision},
year = {2007},
month = {6},
volume = {7},
number = {9},
pages = {5},
abstract = {Eye-tracking studies on face perception have mostly investigated observer's eye movement behavior when studying single faces. However, in day-to-day situations, humans also compare faces or try to match a person's face to a photograph. During comparison, facial information remains visually accessible. This frees observers from time and encoding constraints (Galpin \& Underwood, 2005). Here, we present eye movement data of participants required to compare two faces that were presented side by side. We used (1) two different tasks (discrimination or categorization), and (2) two types of face stimuli: faces differing either in identity or in sex. In addition, we varied for (3) task difficulty i.e. the similarity of the two faces in a pair. Eye-fixations in predefined facial regions were recorded and analyzed, for example, with regards to their frequency and duration. Our findings reveal, for instance, that the eyes were fixated more often in the discrimination tasks (38\% of all fixations) than in the categorization task (29\%), while the total number of fixations increased significantly with increasing task difficulty (p [[lt]] 0.001 in all cases, N=20). Faces differing in sex were more difficult to discriminate than faces differing in identity (63 \% versus 76 \% correct responses), which was reflected by increased fixations to face pairs that differed in sex (14.4 versus 11.8 fixations per trial). Unexpectedly, we found a striking effect of tasks on performance measures, as over 80 \% of participants could detect the more feminine of two faces (categorization task) even at the most similar level, but for the same face pairs their performance in a discrimination task was less than 30 \% correct. Viewing behavior of male and female participants differed, but only when the sex of the faces was relevant for the task.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/Poster_VSS_2007_[0].pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/7/9/5/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)},
language = {en},
DOI = {10.1167/7.9.5},
author = {Armann, R and B{\"u}lthoff, I}
}
@Poster { 5036,
title = {The role of surface and shape information in the other-race face effect},
journal = {Journal of Vision},
year = {2007},
month = {6},
volume = {7},
number = {9},
pages = {7},
abstract = {Both shape and surface dimensions play an important role in face (e.g. O'Toole et al., 1999) and race recognition (Hill et al., 1995). However, the relative contribution of these cues to other-race (OR) face recognition has not been investigated. Some facial properties may be diagnostic in one race but not in the other (e.g. Valentine, 1991). Observers of different races would rely on facial cues that are diagnostic for their own-race faces, a phenomenon which could partly explain our relative difficulty at recognizing OR faces at the individual level (the so-called other-race effect). Here, we tested this hypothesis by examining the relative role of shape and surface properties in the other-race effect (ORE). For this purpose, we used Asian and Caucasian faces from the MPI face database (Vetter \& Blanz, 1999) so that we could vary both shape and surface information, only shape information (in which the surface texture was averaged across individual faces of the same race), or only surface information (in which shape was averaged). The ORE was measured in Asian and Caucasian participants using an old/new recognition task. When faces varied along both shape and surface dimensions, Asians and Caucasians showed a strong ORE (i.e. a better recognition performance for same- than other-race faces). With faces varying along only shape dimensions, the ORE was no longer observed in Asians, but remained present in Caucasians. Finally, when presented with faces varying only along surfacedimensions, the ORE was not found for Caucasians whereas it was present in Asians. These results suggest that the difficulty in recognizing OR faces for Asian observers can be partly due to their inability to discriminate among surface properties of OR faces, whereas ORE for Caucasian participants would be mainly due to their inability to discriminate among shape cues of OR faces.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/7/9/7/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {7th Annual Meeting of the Vision Sciences Society (VSS 2007)},
language = {en},
DOI = {10.1167/7.9.7},
author = {Michel, C and Rossion, B and Hayward, W and B{\"u}lthoff, I and Vuong, Q}
}
@Poster { BulthoffN2006,
title = {Cross-modal interaction can modulate face distinctiveness},
journal = {Perception},
year = {2006},
month = {8},
volume = {35},
number = {ECVP Abstract Supplement},
pages = {204},
abstract = {We had shown that memory for a face can be influenced by the distinctiveness of an utterance to which it has been associated (B{\"u}lthoff and Newell, 2004 Perception 33 Supplement, 108). Furthermore, recognition of a face can be primed by a paired utterance, suggesting that there is a tight, cross-modal coupling between visual and auditory stimuli and that face distinctiveness can be influenced by cross-modal interaction with auditory stimuli like utterances. When instrumental sounds are used instead of utterances, the perceptual quality of auditory stimuli seemed also to affect memory for faces. Here we further investigated whether instrumental sounds can also prime face recognition. Our results show that this is not the case; arbitrary auditory stimuli do not prime recognition of faces. This suggests that utterances are easier to associate closely with faces than arbitrary sounds. We also investigated whether the observed priming effect of utterances might have been based on the use of different first names in each utterance. We repeated the priming experiment using the same utterances, but name information was removed. A significant priming effect was observed. Thus the semantic information related to the first name is not decisive for the priming effect of utterances on face recognition.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/35/1_suppl.toc},
event_place = {St. Petersburg},
event_name = {29th European Conference on Visual Perception},
DOI = {10.1177/03010066060350S101},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 4059,
title = {Voices, not arbitrary sounds, prime the recognition of familiar faces},
journal = {Journal of Vision},
year = {2006},
month = {6},
volume = {6},
number = {6},
pages = {10},
abstract = {Our previous studies have shown that memory for a face can be affected by the distinctiveness of a voice to which it had been paired (B{\"u}lthoff \& Newell, ECVP2004). Moreover, we showed that voices can prime face recognition, suggesting a tight, cross-modal coupling between both types of stimuli. Further investigations however, seemed to suggest that non person-related audio stimuli could also affect memory for faces. For example, faces that had been associated with distinctive instrumental sounds were indeed better recognized in an old/new task than faces paired to typical sounds. Here we investigated whether these arbitrary sounds can also prime face recognition. Our results suggest that arbitrary audio stimuli do not prime recognition of faces. This finding suggests that attentional differences may have resulted in better recognition performance for faces paired to distinctive sounds in the explicit old/new task. Voices are easier to associate closely to faces. We also investigated whethe
r the voice priming effect found earlier might be based on the use of different first names in each audio stimulus, that is, whether the effect was based on semantic rather than perceptual information. We repeated the priming experiment using the same voice stimuli, but name information was removed. The results show that there is still a significant priming effect of voices to faces, albeit weaker than in the full voice experiment. The semantic information related to the first name helps but is not be decisive for the priming effect of voices on face recognition.},
department = {Department B{\"u}lthoff},
web_url = {http://journalofvision.org/6/6/10/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {6th Annual Meeting of the Vision Sciences Society (VSS 2006)},
language = {en},
DOI = {10.1167/6.6.10},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 4829,
title = {Face Distinctiveness can be Modulated by Cross-Modal Interaction with Auditory Stimuli},
year = {2006},
month = {3},
volume = {9},
pages = {72},
abstract = {In this study we ask whether visually typical faces can become perceptually distinctive when
they are paired to auditory stimuli that are distinctive. In a first set of experiments (B¨ulthoff
\& Newell, ECVP 2004), we had investigated the effect of voice distinctiveness on face recognition.
Memory for a face can be influenced by the distinctiveness of an utterance to which
it has been associated. Furthermore, recognition of a familiar face can be primed by a paired
utterance. These findings suggest that there is a tight, cross-modal coupling between the faces
presented and the associated utterances and that face distinctiveness can be influenced by crossmodal
interaction with auditory stimuli like voices. In another set of experiment, we used instrumental
sounds instead of voices and showed that arbitrary auditory stimuli could also affect
memory for faces. Faces that had been paired with distinctive instrumental sounds were better
recognized in an old/new task than faces paired to typical instrumental sounds. Here we
investigated whether these instrumental sounds can also prime face recognition although these
auditory stimuli are not associated to faces naturally as voices are. Our results suggest that this
is not the case; arbitrary audio stimuli do not prime recognition of faces. This finding suggests
that attentional differences may have resulted in better recognition performance for faces paired
to distinctive sounds in the old/new task. It also suggests that utterances are easier to associate
closely to faces than arbitrary sounds. In a last set of experiments we investigated whether the
voice priming effect shown in the first set of experiments might be based on the use of different
first names in each utterance. Thus, we asked whether semantic rather than perceptual information
was determinant in the used utterances. We repeated the priming experiment using the
same voice stimuli, but name information was removed. The results show that there is still a
significant priming effect of voices to faces, albeit weaker than in the full voice experiment.
The semantic information related to the first name helps but is not be decisive for the priming
effect of voices on face recognition.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk06/abstract.php?_load_id=buelthoff01},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {9th T{\"u}bingen Perception Conference (TWK 2006)},
language = {en},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 3301,
title = {Sensitivity to changes in identity, caricature and sex in face recognition},
year = {2005},
month = {2},
volume = {8},
pages = {124},
abstract = {It is known that we are quite accurate at judging the sex of unfamiliar faces [1]. Furthermore sex categorization is performed more rapidly, on average, than familiarity or identity decisions [2]. In one of our recent studies on face perception, with unfamiliar faces [3] we were surprised
to find that discrimination performance was much lower for faces differing in sex quality than when the facial features were morphed between two identities. Here, we investigated if this observation holds also for familiar faces. The motivation for this series of experiments was to
find out if memory of familiar faces was showing similar differences; participants being more inaccurate when they had to remember the specific feminity or masculinity of a well known face than when identity-related changes of facial features were involved. Participants had to
identify the veridical faces of familiar work colleagues among ten distractor faces that were morphing variations of the original faces. Distractor faces varied either in identity, caricature or sex. In the identity face sets, distractor faces were morphs between the original face and
unfamiliar faces mixed in different proportions. In the caricature face sets, distractors were different caricatures of the original face. Finally, in the sex face sets, distractor faces were different feminized and masculinized versions of the veridical face. Participants performed best when the original face was presented among identity distractors. They had a tendency to choose feature enhancing caricatures over the original faces in caricature sets. Participants were very poor at finding the original faces in the sex sets. Generally our findings with unfamiliar faces show that sex-related changes in facial features are less obvious to the observers than
identity-related changes. Furthermore our study on familiar faces suggests that we do not retain sex-related facial information in memory as accurately as identity-related information. These results have important implications for models of face recognition and how facial features are
represented in the brain.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk05/programm.php},
editor = {B{\"u}lthoff, H. H., H. A. Mallot, R. Ulrich and F. A. Wichmann},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {8th T{\"u}bingen Perception Conference (TWK 2005)},
author = {B{\"u}lthoff, I}
}
@Poster { 3067,
title = {Interactions between audition and vision for face recognition},
journal = {Perception},
year = {2004},
month = {9},
volume = {33},
number = {ECVP 2004 Abstract Supplement},
pages = {108},
abstract = {We can recognise distinctive faces more easily than typical ones. We investigated whether this distinctiveness effect appears for visually typical faces when these faces have been associated with features that are distinctive in another sensory modality. Participants first learned a set of unfamiliar faces. During learning, half of these faces were paired with distinctive auditory stimuli and half with typical stimuli. In experiment 1, the auditory stimuli were voices. We found that recognition performance in a visual recognition test was significantly (p < 0.005) better for faces that had been paired with distinctive voices. In experiment 2, we tested whether voice information improved face recognition directly by association or whether distinctiveness effects were due to enhanced attention during learning. In a priming experiment, participants recognised a face significantly faster (p <0.05) when this face was preceded by its congruent voice. Thus the quality of auditory information can affect recognition in another modality like vision. In experiment 3, the stimuli consisted of non-speech sounds. In this experiment, we tested whether voices and faces represent a special case of cross-modal memory enhancement or whether this distinctiveness effect occurs also with more arbitrary associations. Recognition performance in a visual recognition test suggests that a similar effect is present.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/33/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
institution = {Max-Planck-Institut f{\"u}r biologische Kybernetik; Trinity College, Ireland, T{\"u}bingen, Germany;},
event_place = {Budapest, Hungary},
event_name = {27th European Conference on Visual Perception},
language = {en},
DOI = {10.1068/ecvp04a},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 2631,
title = {Haptic Magnitude Estimates of Size for Graspable Shapes},
year = {2004},
month = {2},
volume = {7},
pages = {122},
abstract = {Studies of visual size perception with the method of magnitude estimation have shown a linear
relationship between actual sizes and magnitude estimates [1]. Similar studies for touch do
not yield unequivocal evidence for a linear relationship; in some cases, a positively accelerated
power function described best the relationship between stimulus sizes and estimates [2].
We have investigated haptic magnitude estimation for length in two haptic experiments with
different methods of haptic exploration (whole hand, nger span).
The haptic stimuli consisted of 15 rectangular shapes. The only difference from one shape
to another was the length of the horizontal side, which ranged from 40 mm to 68 mm in equal
intervals. For all shapes, the depth and height were 10 mm and 40 mm, respectively.
In the Multiple cues Experiment, blindfolded participants used their dominant hand to feel
each shape freely. The shape was presented xed at onto a support, so they could feel the
entire shape under their hand. The participants' task was to give a modulus-free magnitude
estimate for the horizontal side. All shapes were presented once in random order in each block.
In the Single cue Experiment, blindfolded participants were restricted to grasping the horizontal
side of a shape between the thumb and index nger of their dominant hand. Their task
was to give a magnitude estimate for the length of that side.
Magnitude estimates for side length could be tted by a two-parameter linear function with
a high goodness-of-t statistic in both experiments (R2
'
.97). Thus, when participants were
given a size range of 40 to 68 mm, their magnitude estimates increased linearly with each
physical increment, independently of the exploration method used.
Because of the small range of total size variation present in the shape set, we do not conclude
from our results that haptic magnitude estimation of unidimensional size is generally
linear. It should be noted that the present linear functions had a negative y-intercept and that
when a power function was t to the data, the exponent was greater than 1.0 in both experiments,
and goodness-of-t was also high. Our results suggest, however, that haptic perception
of size can safely be considered linear within this small part of the size continuum. These results
are important for considering further psychophysical studies with shapes within this size
range.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk04/index.php},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {7th T{\"u}bingen Perception Conference (TWK 2004)},
language = {en},
author = {B{\"u}lthoff, I and Klatzky, RL and Newell, FN}
}
@Poster { 2630,
title = {Interaction between vision and audition in face recognition},
journal = {Abstracts of the Psychonomic Society},
year = {2003},
month = {11},
volume = {8},
pages = {57},
abstract = {Face studies have shown that distinctive faces are more easily recognized than typical faces in memory tasks. We investigated whether a cross-modal interaction between auditory and visual stimuli exists for face distinctiveness. During training, participants were presented with faces from two sets. In one set all faces were accompanied by characteristic auditory stimuli (d-faces). In the other set, all faces were accompanied by typical auditory stimuli (s-faces). Face stimuli were counterbalanced across auditory conditions. We measured recognition performance in an old/new recognition task. Face recognition alone was tested. Our results show that participants were significantly better (t(12) = 3.89, p< 0.005) at recognizing d-faces than s-faces in the test session. These results show that there is an interaction between different sensory inputs and that typicality of stimuli in one modality can be modified by concomitantly presented stimuli in other sensory modalities.},
department = {Department B{\"u}lthoff},
web_url = {http://c.ymcdn.com/sites/www.psychonomic.org/resource/resmgr/Annual_Meeting/Past_and_Future_Meetings/Abstracts03.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Vancouver, Canada},
event_name = {44th Annual Meeting of The Psychonomic Society},
author = {B{\"u}lthoff, I and Newell, FN and B{\"u}lthoff, HH}
}
@Poster { BulthoffN2003_2,
title = {Interaction between vision and speech in face recognition},
journal = {Journal of Vision},
year = {2003},
month = {10},
volume = {3},
number = {9},
pages = {825},
abstract = {Many face studies have shown that in memory tasks, distinctive faces are more easily recognized than typical faces. All these studies were performed with visual information only. We investigated whether a cross-modal interaction between auditory and visual stimuli exists for face distinctiveness. Our experimental question was: Can visually typical faces become perceptually distinctive when they are accompanied by voice stimuli that are distinctive? In a training session, participants were presented with faces from two sets. In one set all faces were accompanied by characteristic auditory stimuli during learning (d-faces: different languages, intonations, accents, etc.). In the other set, all faces were accompanied by typical auditory stimuli during learning(s-faces: same words, same language). Face stimuli were counterbalanced across auditory conditions. We measured recognition performance in an old/new recognition task. Face recognition alone was tested. Our results show that participants were significantly better (t(12) = 3.89, p< 0.005) at recognizing d-faces than s-faces in the test session. These results show that there is an interaction between different sensory inputs and that typicality of stimuli in one modality can be modified by concomitantly presented stimuli in other sensory modalities.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/3/9/825.abstract},
event_place = {Sarasota, FL, USA},
event_name = {Third Annual Meeting of the Vision Sciences Society (VSS 2003)},
DOI = {10.1167/3.9.825},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { BulthoffN2003,
title = {Cross-modal Aspect of Face Distinctiveness},
year = {2003},
month = {2},
volume = {6},
pages = {147},
abstract = {Various factors have been identied that in
uence face recognition. Despite the diversity
of the studies on face recognition, mostly factors related to visual information have
been investigated so far. Among factors like facial motion, orientation and illumination,
the distinctiveness of faces has been extensively studied. It is well known that
distinctive faces are more easily recognized than typical faces in memory tasks. In our
study we have addressed the question whether factors that are not of visual nature
might also in
uence face recognition. More specically, our experimental question was:
can visually typical faces become perceptually distinctive when they are accompanied
by voice stimuli that are distinctive and can these faces therefore become in this way
more easily recognizable? In a training session, participants saw faces from two sets.
In one set all faces were accompanied by characteristic auditory stimuli during learning
(d-faces: dierent languages, intonations, accents, etc.). In the other set, all faces were
accompanied by typical auditory stimuli during learning(s-faces: same words, same language).
Face stimuli were counterbalanced across auditory conditions. Face recognition
alone was tested. We measured recognition performance in an old/new recognition task.
Our results show that participants were signicantly better (t(12) = 3.89, p< 0.005) at
recognizing d-faces than s-faces in the test session. Thus, our results demonstrate the
perceptual quality of auditory stimuli (distinctive or typical) presented simultaneously
with face stimuli can modify face recognition performance in a subsequent memory
task and that typicality of stimuli in one modality can be modied by concomitantly
presented stimuli in other sensory modalities.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk03/},
event_place = {T{\"u}bingen, Germany},
event_name = {6. T{\"u}binger Wahrnehmungskonferenz (TWK 2003)},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { Bulthoff2002,
title = {No categorical perception of face gender found with different discrimination tasks},
journal = {Journal of Vision},
year = {2002},
month = {11},
volume = {2},
number = {7},
pages = {620},
abstract = {Faces are easily categorized as male or female. But is this categorization done at the perceptual level? In previous studies (ECVP 2001), we found no categorical perception of gender for face stimuli using two discrimination tasks: either simultaneous same-different task or delayed matching-to-sample. This conflicts with results of another study using a different task (Campanella et al, Visual Cognition, 2001). Here we tested whether categorical perception of gender might become apparent if we used a discrimination task (sequential same-different task) more similar to that used by Campanella et al. We employed the same type of stimuli as in our previous experiments. The face stimuli were created by generating series of morphs between pairs of male and female 3D faces (gender continua). We also generated a gender continuum based on an average face. While gender-related information was present in this latter continuum, the stimuli lacked individual characteristic facial features that might induce identity-related categorical perception. If male and female faces belong to perceptually distinct gender categories, we would expect that two faces that straddle the gender boundary are more easily discriminated than two faces that belonged to the same gender category. In our previous experiments we never found any evidence of categorical perception for unfamiliar faces. Our present results confirm these findings. We found no evidence that participants could discriminate more easily between faces that straddle the gender category. Thus no categorical effect for face gender was revealed when sequential same-different discrimination task was used. The conflicting results obtained by both studies do not appear to be due to the different discrimination tasks employed.},
department = {Department B{\"u}lthoff},
web_url = {http://www.journalofvision.org/content/2/7/620.abstract},
event_place = {Sarasota, FL, USA},
event_name = {Second Annual Meeting of the Vision Sciences Society (VSS 2002)},
DOI = {10.1167/2.7.620},
author = {B{\"u}lthoff, I}
}
@Poster { 1131,
title = {Face gender is not perceived categorically},
year = {2002},
month = {2},
volume = {5},
pages = {84},
abstract = {In previous studies, we investigated whether male and female faces are perceived as distinct categories at the perceptual level and found no evidence of categorical perception using various discrimination tasks. In the present study we tested whether categorical perception of our stimuli might become apparent with yet another discrimination task, a sequential same-different task. The face stimuli used in all our experiments were derived from a database of 200 3D-laser scans of male and female faces (http://faces.kyb.tuebingen.mpg.de). Series of 3D-morphs were computed between individual male and female faces using the method of Blanz \& Vetter (1999). Additionally, all faces of the database were used to compute average male and female faces to generate another series of morphs which was devoid of any individual features. One prediction of categorical perception is that two face stimuli that belong to different gender categories should be easier to discriminate than two face stimuli belonging to the same gender. In all our studies including the present one, most face pairs that straddle the gender category were not more easily discriminated than same category pairs. Thus, despite the use of different discrimination tasks, we found no categorical effect for face gender with our face stimuli, even when exemplar specific effects are eliminated as it is the case with average faces. We will discuss these results and compare them to the conflicting results of Campanella et al. (2001) who carried out similar experiments with different morphing techniques},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk02/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {5. T{\"u}binger Wahrnehmungskonferenz (TWK 2002)},
author = {B{\"u}lthoff, I}
}
@Poster { 1111,
title = {Recognizing faces across different views: does caricaturing help?},
year = {2002},
month = {2},
volume = {5},
pages = {83},
abstract = {Caricatured faces are recognized as quickly and accurately as (and sometimes faster and
better than) the veridical versions (Benson \& Perrett, 1994). This “caricature effect” (CE)
has been demonstrated only for the frontal view of faces and only when the caricatures
were presented during the testing phase. First, we investigated whether the caricature
effect generalizes across changes in viewpoint (frontal, three-quarter, and profile). Second,
we examined the effect of presenting caricatured faces during the learning phase,
which we term the “reverse caricature effect” (RCE). Face recognition performance was
tested using two tasks: an old/new recognition paradigm and a sequential matching task.
Observers learned faces either in the frontal, three-quarter, or profile views, and were
tested with all three viewpoints. Half of the subjects participated in the CE condition
(learning with veridicals, testing with caricatures) and the other half of the subjects participated
in the RCE condition (learning with caricatures, testing with veridicals). The
caricatures were created using a 3D face morphing algorithm (Blanz \& Vetter, 1999).
Accuracy sensitivity was measured using d’. For the CE condition, caricatures were recognized
more accurately than veridical versions of the same face (mean d’: sequential
matching: caricature=1.15, veridical=1.09; Old/New: caricature=1.42, veridical=1.18).
This difference was (nearly) significant (sequential matching: F(2,58)=28, p<0.001; Old/
New: F(1, 162)=3.19, p=0.076). The interaction between face caricature level and viewpoint
(testing view and/or learning view) was not significant. This suggests that the caricature
effect generalizes across viewpoint. Similar results were found for the RCE condition.
These results are discussed within the framework of a face space model for
representing faces.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf1111.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk02/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {5. T{\"u}binger Wahrnehmungskonferenz (TWK 2002)},
author = {Knappmeyer, B and Tappe, C and B{\"u}lthoff, I}
}
@Poster { 1133,
title = {Gender, average heads and categorical perception},
journal = {Journal of Vision},
year = {2001},
month = {12},
volume = {1},
number = {3},
pages = {281},
abstract = {Background: Our visual system uses a sophisticated mechanism called categorical perception to discriminate between highly similar objects. Small perceptual differences are enhanced thus creating clear boundaries between groups of items. Purpose: Although it seems to be an easy task to classify people by gender, we wondered whether facial information was sufficient for this purpose. Using the morphing technique of Blanz and Vetter (1999) we built an average three-dimensional head model from a database of 200 laser-scanned faces. We constructed an artificial gender continuum of this average head and used the faces in categorization and discrimination experiments. Results: Gender information was present in our face set and was easily identified by the participants. However when we tested for the existence of a categorical effect, we found no evidence of enhanced discrimination for faces straddling the gender category boundary. In previous studies we found also no evidence of categorical perception when using faces of individuals (Buelthoff \& Newell, 2000). Our results with average faces confirm the previous findings and avoid any personal distinctive features that might interfere with the analysis. Furthermore, the use of average faces insures to have endpoint faces situated at approximately equal distance from the gender boundary. Conclusion: The absence of a categorical effect is surprising. Categorical perception has been shown repeatedly for other information displayed by faces (expressions and identity). Although we can tell quite reliably the sex of a face, there is no evidence of a distorted perceptual space for face gender. Furthermore our results show that categorical perception does not always exist when similar items are categorized, not even for an important category like faces. Clearly, despite its enormous importance for social interactions we have not learned to deal with the gender of faces very effectively.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf1133.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://journalofvision.org/1/3/281},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {First Annual Meeting of the Vision Sciences Society (VSS 2001)},
DOI = {10.1167/1.3.281},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 1134,
title = {Average faces and gender categories: no evidence of categorical perception},
journal = {Perception},
year = {2001},
month = {8},
volume = {30},
number = {ECVP Abstract Supplement},
pages = {54},
abstract = {Categorical perception is a sophisticated mechanism which allows our visual system to discriminate between highly similar objects. Perceptually, physical differences between groups of objects are enhanced as compared to equal-sized differences within a group of objects, thus creating clear boundaries between groups of items. Humans are expert in face recognition. Does a categorical perception mechanism help us to differentiate between male and female faces?
Using a three-dimensional morphing technique, we built an average.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/30/1_suppl/1.full.pdf+html},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Kuşadasi, Turkey},
event_name = {Twenty-fourth European Conference on Visual Perception},
DOI = {10.1177/03010066010300S101},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 1114,
title = {The caricature effect across viewpoint changes in face perception},
year = {2000},
month = {11},
day = {16},
abstract = {The finding that caricatures are recognized more quickly and accurately than veridical faces has been demonstrated only for frontal views of human faces (e.g., Benson \& Perrett, 1994). In the present study, we investigated whether there is also a caricature effect for three-quarter and profile views. Furthermore, we examined what happens to the caricature advantage when generalizing across view changes. We applied a 3D caricature algorithm to laser scanned head models. In a sequential matching task, we systematically varied the view of the target faces (left/right profile, left/right three-quarter, full-face), the view of the test faces (left/right profile, left/right threequarter, fullface) and the face type (anticaricature, veridical, caricature). The caricature effect was replicated for frontal views. We also found a clear caricature advantage for three-quarter and profile views. When generalizing across views, the caricature advantage was present for the majority of view change conditions. In a few conditions, there was an anticaricature advantage.},
department = {Department B{\"u}lthoff},
web_url = {http://www.opam.net/opam2000/OPAM_2000_Pro.pdf},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {New Orleans, LA, USA},
event_name = {8th Annual Workshop on Object Perception and Memory (OPAM 2000)},
author = {Cheng, CY and Knappmeyer, B and B{\"u}lthoff, I}
}
@Poster { 109,
title = {Investigating categorical perception of gender with 3-D morphs of familiar faces},
journal = {Perception},
year = {2000},
month = {8},
volume = {29},
number = {ECVP Abstract Supplement},
pages = {57},
abstract = {We could find no evidence for categorical perception of face gender using unfamiliar human faces (I B{\"u}lthoff et al, 1998 Perception 27 Supplement, 127a). Therefore we have investigated whether familiarising participants with the stimuli prior to testing might favour categorical perception.
We created artificial gender continua using 3-D morphs between laser-scanned heads. The observers had to classify all faces according to their gender in a classification task. If perception of face gender is categorical, we would expect participants to classify the morphs into two distinct gender categories. Furthermore, they should differentiate pairs of morphs that straddle the gender boundary more accurately than other pairs in a discrimination task. The participants were familiarised before testing with half of the faces used for creating the morphs. They could categorise most familiar and unfamiliar faces into distinctive gender categories. Thus, they could extract the gender information and use it to classify the images. On the other hand, we found no evidence of increased discriminability for the morph pairs that straddle the gender boundary. Apparently, observers did not perceive the gender of a face categorically, even when these faces were familiar to them.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf109.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/29/1_suppl/1.full.pdf+html},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Groningen, Netherlands},
event_name = {23rd European Conference on Visual Perception (ECVP 2000)},
DOI = {10.1177/03010066000290S101},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 110,
title = {There is no categorical effect for the discrimination of face gender using 3D-morphs of laser scans of heads},
journal = {Investigative Ophthalmology \& Visual Science},
year = {2000},
month = {5},
volume = {41},
number = {4},
pages = {S225},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf110.pdf},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Fort Lauderdale, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 2000)},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Poster { 287,
title = {Geschlechtswahrnehmung von Gesichtern, die durch 3D-Morph-Verfahren erzeugt wurden},
year = {1999},
month = {2},
pages = {52},
abstract = {Zeigt die Bestimmung der Geschlechtszugeh{\"o}rigkeit von Gesichtern die charakteristischen Merkmale der kategorischen Wahrnehmung?
Durch ein automatisiertes 3D-Morph-Verfahren wurden aus 3D-Laser-scans von m{\"a}nnlichen und weiblichen K{\"o}pfen Misch-Gesichter synthetisiert. Das Morph-Verfahren erlaubt sowohl die Textur als auch die Form eines Gesichtes zu ver{\"a}ndern, so da{\ss} Pigmentation und Form zwischen m{\"a}nnlichen und weiblichen Gesichtern kontinuierlich angepa{\ss}t werden k{\"o}nnen. Andere geschlechtsspezifische Merkmale wie Frisur, Bart, Make-up oder Schmuck wurden weggelassen oder computergraphisch entfernt. Alle Gesichter wurden in frontaler oder seitlicher Ansicht (3/4-view) mit neutralem Gesichtsausdruck pr{\"a}sentiert. Versuchspersonen haben zuerst eine Diskriminationsaufgabe (XAB-Test) durchgef{\"u}hrt und danach wurde die subjektive Geschlechtsgrenze entlang des Morph-Kontinuums in einer Kategorisierungsaufgabe bestimmt.
Es zeigte sich f{\"u}r alle Versuchspersonen die typische Stufenfunktion in der Kategorisierungsaufgabe. Im XAB-Test war es jedoch f{\"u}r die Versuchspersonen nicht einfacher, ein Gesichtspaar zu unterscheiden, das durch die putative kategorische Geschlechtsgrenze getrennt war als f{\"u}r Gesichtspaare an dem mehr weiblichen oder m{\"a}nnlichen Ende des Morph-Kontinuums.
Unsere Experimente zeigen, da{\ss} das Geschlecht eines Gesichts nicht kategorisch wahrgenommen wird.},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/pdf287.pdf},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk99/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {2. T{\"u}binger Wahrnehmungskonferenz (TWK 99)},
author = {B{\"u}lthoff, I and Newell, FN and Vetter, T}
}
@Poster { BulthoffNVB1998,
title = {Gender perception of 3-D head laser scans},
journal = {Perception},
year = {1998},
month = {8},
volume = {27},
number = {ECVP Abstract Supplement},
pages = {127},
abstract = {We investigated whether the judgment of face gender shows the typical characteristics of categorical perception. As stimuli we used images of morphs created between pairs of male/female 3-D head laser scans. In experiment 1, texture and shape were morphed between both faces. In experiment 2, either the average texture of all faces was mapped onto the shape continuum between the two faces or we mapped the texture continuum between each face pair onto an average shape face. Thus, either the shape or the texture remained constant in any one condition. The subjects viewed these morphs first in a discrimination task (XAB) and then in a categorisation task which was used to locate the subjective gender boundary between each male/female face pair. Although we found that subjects could categorise the face images by their gender in the categorisation task and that texture alone is a better gender indicator than shape alone, the subjects did not discriminate more easily between face images situated at the category boundary in any of our discrimination experiments. We argue that we do not perceive the gender of a face categorically and that more cues are needed to decide the gender of a person than those provided by the faces only.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/27/1_suppl.toc},
event_place = {Oxford, UK},
event_name = {21st European Conference on Visual Perception},
DOI = {10.1177/03010066980270S101},
author = {B{\"u}lthoff, I and Newell, FN and Vetter, T and B{\"u}lthoff, HH}
}
@Poster { 1123,
title = {Gender perception of 3D head laser scans},
journal = {Perception},
year = {1998},
month = {8},
volume = {27},
number = {ECVP Abstract Supplement},
pages = {127},
abstract = {We investigated whether the judgment of face gender shows the typical characteristics of categorical perception. As stimuli we used images of morphs created between pairs of male/female 3-D head laser scans. In experiment 1, texture and shape were morphed between both faces. In experiment 2, either the average texture of all faces was mapped onto the shape continuum between the two faces or we mapped the texture continuum between each face pair onto an average shape face. Thus, either the shape or the texture remained constant in any one condition. The subjects viewed these morphs first in a discrimination task (XAB) and then in a categorisation task which was used to locate the subjective gender boundary between each male/female face pair. Although we found that subjects could categorise the face images by their gender in the categorisation task and that texture alone is a better gender indicator than shape alone, the subjects did not discriminate more easily between face images situated at the category boundary in any of our discrimination experiments. We argue that we do not perceive the gender of a face categorically and that more cues are needed to decide the gender of a person than those provided by the faces only.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/27/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Oxford, UK},
event_name = {21st European Conference on Visual Perception},
DOI = {10.1177/03010066980270S101},
author = {B{\"u}lthoff, I and Newell, FN and Vetter, T and B{\"u}lthoff, HH}
}
@Poster { 1065,
title = {Perception of a camouflaged point-light walker: a differential priming effect},
journal = {Perception},
year = {1998},
month = {8},
volume = {27},
number = {ECVP Abstract Supplement},
pages = {123},
abstract = {Recently, we showed that recovery of a priori known structure from biological motion leveled off with changing display orientation (eg Pavlova and Sokolov, 1997 Perception 26 Supplement, 92). How does image-plane rotation of a prime affect detection of a camouflaged point-light walker? At each of five randomly presented display orientations between upright and inverted (0\(^{\circ}\), 45\(^{\circ}\), 90\(^{\circ}\), 135\(^{\circ}\), and 180\(^{\circ}\)), viewers saw a sequence of displays (each display for 1 s). Half of them comprised a camouflaged point-light walker, and half a 'scrambled-walker' mask. In a confidence-rating procedure, observers judged whether a walker was present. Prior to each experimental sequence, they were primed (for 10 s) either with an upright-, 45\(^{\circ}\)-, 90\(^{\circ}\)-, or 180\(^{\circ}\)-oriented sample of the walker. Pronounced priming effects were found only with an upright-oriented prime: it improved detectability for the same-oriented displays, and to a lesser extent for 45\(^{\circ}\). With 45\(^{\circ}\)-prime, sensitivity for 0\(^{\circ}\)-, 45\(^{\circ}\)-, and 90\(^{\circ}\)-oriented displays was higher than for 135\(^{\circ}\) and 180\(^{\circ}\). However, with 90\(^{\circ}\)- and 180\(^{\circ}\)-primes ROC curves for all orientations were situated close to one another. These findings indicate that the priming effect in biological motion is partly independent of the relative orientation of priming and primed displays. Moreover, it occurs only if a prime corresponds to a limited range of deviations from upright orientation within which display is spontaneously recognisable despite a discrepancy between event kinematics and dynamics (Pavlova, 1996 Perception 25 Supplement, 6). The primacy of dynamic constraints in the perception of structure from biological motion is discussed.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/27/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Oxford, UK},
event_name = {21st European Conference on Visual Perception},
DOI = {10.1177/03010066980270S101},
author = {Pavlova, MA and B{\"u}lthoff, I and Sokolov, AN}
}
@Poster { 1125,
title = {Effects of shape and texture on the perceptual categorization of gender in faces},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1998},
month = {5},
volume = {39},
number = {4},
pages = {173},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Fort Lauderdale, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1998)},
author = {Newell, FN and B{\"u}lthoff, I and Vetter, T and B{\"u}lthoff, HH}
}
@Poster { 1124,
title = {Is the gender of a face categorically perceived?},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1998},
month = {5},
volume = {39},
number = {4},
pages = {171},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Fort Lauderdale, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1998)},
author = {B{\"u}lthoff, I and Newell, FN and Vetter, T and B{\"u}lthoff, HH}
}
@Poster { 283,
title = {Masking a point-light walker},
year = {1998},
month = {2},
pages = {120},
abstract = {In spite of potential perceptual ambiguity of a point-light walking figure, with upright display orientation observers can readily recover the invariant structure from biological
motion. However, regardless of the same low-level relations between moving dots within upright and inverted orientation, perception of a point-light walker is dramatically impeded with 180º-display inversion. Spontaneous recognition was found to improve abruptly with changing display orientation from inverted to upright (Pavlova, 1996, Perception 25, Suppl.). This evidence implies that the visual system implements additional
processing constraints for the unambiguous interpretation of biological motion.
We used a masking paradigm to study the processing constraints in biological motion perception. At each of randomly presented five orientations (0\(^{\circ}\), 45\(^{\circ}\), 90\(^{\circ}\), 135\(^{\circ}\), and 180\(^{\circ}\)), viewers saw a sequence of 210 displays. Half of them comprised a canonical 11 pointlight walker, and half a partly distorted walker, in which rigid pair-wise connections between moving dots were perturbed. A 66-dot “scrambled-walker” mask camouflaged both figures. Prior each experimental sequence, a sample of a canonical walker in respective orientation was demonstrated. Observers judged whether a canonical figure was present. A jackknife estimating of the ROC parameters indicated that detectability leveled off with changing orientation from upright to 135\(^{\circ}\), and then slightly increased to display inversion. However, even with 135\(^{\circ}\) and 180\(^{\circ}\) it was above chance. For orientations 0\(^{\circ}\), 45\(^{\circ}\) and 90\(^{\circ}\), perceptual learning to detect a canonical walker proceeded rather
rapidly in the course of the experiment.
Comparison with the data on spontaneous recognition of biological motion suggests that display orientation affects bottom-up processing of biological motion more strongly than
top-down.
We suppose that some processing constraints (such as axis-of-symmetry, dynamic constraints) in perception of biological motion be hierarchically nested. Dynamic constraints appear to be the most powerful: the highest detectability was found with upright orientation.
While with changing orientation these constraints lose their strength, others processing constraints are getting more influential. For instance, the lower sensitivity for 135\(^{\circ}\) as compared to 180\(^{\circ}\) might be accounted for by the axis-of-symmetry constraint that is implemented by the visual system at 180\(^{\circ}\). Likewise, due to the inefficiency of this constraint, biological motion pattern is perceived as more multistable with 90\(^{\circ}\)-150\(^{\circ}\), as compared to 180\(^{\circ}\) display orientation.},
department = {Department B{\"u}lthoff},
web_url = {http://www.twk.tuebingen.mpg.de/twk98/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {1. T{\"u}binger Wahrnehmungskonferenz (TWK 98)},
author = {Pavlova, MA and Sokolov, AN and B{\"u}lthoff, I}
}
@Poster { 568,
title = {Interdependence of feature dimensions in the representation of 3D objects},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1996},
month = {4},
volume = {37},
number = {3},
pages = {S1125},
abstract = {Purpose. The dimensions of the representation space of 3D objects may be independent, if nonaccidental - generic or qualitative shape contrasts serve as the distinguishing features. Alternatively, the dimensions can be interdependent, as predicted by some theories that postulate metric feature-space representations. To explore this issue, we studied human performance in forced-choice classification of objects composed of 4 geon-like parts, emanating from a common center. Methods. The two class prototypes were distinguished by qualitative contrasts (cross-section shape; bulge/waist), and by metric parameters (degree of bulge/waist, taper ratio). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90\% correct-response performance level. Subsequent trials involved both original and modified versions of the prototypes; the latter were obtained by varying the metric parameters both orthogonally (ORTHO) and in parallel (PARA) to the line connecting the prototypes in the parameter space. Results. 8 out of 11 subjects succeeded to learn the task within the allotted time. For these subjects, the error rates increased progressively with the parameter-space displacement between the stimulus and the corresponding prototype. The effect of ORTHO displacement was significant: F(1, 68) = 3.6, p < 0.06. There was also a hint of a marginal PARA displacement effect: F(1, 68) = 1.9, p = 0.17 Conclusions. Theories that postulate exclusive reliance on qualitative contrasts (such as Biederman's Recognition By Components) predict near-perfect discrimination performance for stimuli derived from the prototypes both by PARA and by ORTHO parameter-space displacement. Our results contradict this prediction, and support the notion of a metric representation space, in which any displacement away from the familiar region incurs performance costs.},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Fort Lauderdale, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology},
author = {Edelman, S and B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Poster { 566,
title = {Top-down influence of recognition on stereoscopic depth perception},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1996},
month = {4},
volume = {37},
number = {3},
pages = {1125},
abstract = {Purpose. Last year we demonstrated that the recognition of biological motion sequences is consistent with a view-based recognition framework. We found that anomalies in the depth structure of 3D objects had an intriguing lack of influence on subject ratings of its figural goodness. In the present work, we attempt to explain this result by showing a strong top-down influence from high-level vision (object recognition) on early vision (stereoscopic depth perception). Methods. We used biological motion sequences of the kind first described by Johansson (Percep. \& Psychophysics, 14, 201-211, 1973) to study the perception of 3D structure of human-like versus randomly moving dots displayed in stereo. The depth structure of the human sequence was altered by adding controlled amounts of depth noise (that left the 2D projections largely unchanged). ''Random'' sequences were created by adding x-y positional noise to the ''Human'' sequences. In a 2AFC task, participants had to decide whether 3 randomly chosen dots from stereoscopically displayed dot motion sequence appeared at the same distance from the observer. Results. Subject performance was significantly (p < 0.005) better with ''random'' sequences than with ''human'' ones. In a human sequence triples drawn from the same limb were often perceived as being in one depth plane irrespective of their actual ''distorted'' 3D configuration. Conclusions. Those results indicate the existence of top-down object-specific influences that suppress the perception of deviations from the expected 3D structure in a motion sequence. The absence of such an influence for novel structures might account for subjects' better performance with the random sequences.},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Fort Lauderdale, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1996)},
author = {B{\"u}lthoff, I and Sinha, P and B{\"u}lthoff, HH}
}
@Poster { 1120,
title = {Recognizing biological motion sequences},
journal = {Perception},
year = {1995},
month = {8},
volume = {24},
number = {ECVP Abstract Supplement},
pages = {112},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {T{\"u}bingen, Germany},
event_name = {18th European Conference on Visual Perception},
author = {B{\"u}lthoff, I and Sinha, P}
}
@Poster { 1122,
title = {View-based representations for biological motion sequences},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1995},
month = {5},
volume = {36},
number = {4},
pages = {S417},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Fort Lauderdale, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1995)},
author = {Sinha, P and B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Poster { 680,
title = {General lighting can overcome accidental viewing},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1994},
month = {5},
volume = {35},
number = {4},
pages = {1741},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {Annual Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1994)},
author = {B{\"u}lthoff, I and Kersten, D and B{\"u}lthoff, HH}
}
@Poster { 837,
title = {Pharmacological inversion of directional specificity in movement detectors},
journal = {Investigative Ophthalmology \& Visual Science},
year = {1985},
month = {5},
volume = {26},
number = {3},
pages = {56},
department = {Department G{\"o}tz},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {Annual Spring Meeting of the Association for Research in Vision and Ophthalmology (ARVO 1985)},
author = {B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Poster { 844,
title = {Umkehrung der Bewegungs- und Objektwahrnehmung durch einen GABA-Antagonisten bei Fliegen},
journal = {Verhandlungen der Deutschen Zoologischen Gesellschaft 1985},
year = {1985},
month = {5},
pages = {223},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/umkehrung_der_bewegungs_und_objektwahrnehmung_durch_einen_gaba_antagonisten_bei_fliegen_844[0].pdf},
department = {Department G{\"o}tz},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Wien, Austria},
event_name = {78. Jahresversammlung der Deutschen Zoologischen Gesellschaft},
author = {B{\"u}lthoff, HH and B{\"u}lthoff, I}
}
@Poster { 840,
title = {Beeinflussung der Bewegungsdetektion durch Neuropharmaka},
journal = {Verhandlungen der Deutschen Zoologischen Gesellschaft 1984},
year = {1984},
month = {6},
pages = {276},
url = {http://www.cyberneum.de/fileadmin/user_upload/files/publications/beeinflussung_der_bewegungsdetektion_durch_neuropharmaka_840[0].pdf},
department = {Department G{\"o}tz},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Giessen, Germany},
event_name = {77. Jahresversammlung der Deutschen Zoologischen Gesellschaft},
author = {B{\"u}lthoff, HH and B{\"u}lthoff, I and Schmid, A}
}
@Conference { Bulthoff2013_10,
title = {What is important in a face? Identity, ethnicity and sex-related information},
year = {2013},
month = {12},
day = {12},
abstract = {We are very good at classifying familiar and unfamiliar faces in terms of their race or sex, but compared to the robust identification of familiar faces, discriminating unfamiliar faces, especially other-race faces, is more difficult (other-race effect). In this talk, I will present three studies investigating what is important in a face for race classification and person identification. First we investigated what gives a face its perceived ethnicity. To this end, mixed-race faces were created by embedding one facial feature (e.g. Caucasian mouth) into the face of the other ethnicity (e.g. Asian face). The perceived ethnicity of these mixed-race faces was assessed in a classification task. The eyes and the texture (skin) proved to be major determinants of ethnicity for Asian and Caucasian participants. Second, we examined what is at the base of the other-race effect. We dissociated ethnicity from identity information by creating Asian and Caucasian faces that shared the same identity (e.g. making a Caucasian face look more Asian), and tested the other-race effect while controlling identity-related facial information. Participants showed equal race discrimination performance for same- and other-race faces. Thus no other-race effect appeared when ethnicity was the only varying factor between the test faces, suggesting that the other-race effect cannot be attributed to face race per se. Finally, we tested what type of facial information is most relevant for the identification of familiar faces. We created both sex-morphs and identity-morphs of very familiar faces, and asked participants to pick the original familiar face among its sex- or identity-morphs. We found a better performance for identity- than sex-manipulated faces, indicating that sex-related facial information is represented less accurately than identity-related information. The implications of these results for models of face representation will be discussed.},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
event_place = {Kaiserslautern, Germany},
event_name = {Technische Universit{\"a}t Kaiserslautern, Fachbereich Sozialwissenschaften: Forschungskolloquium WS 2013/14},
author = {B{\"u}lthoff, I}
}
@Conference { ZhaoB2013_3,
title = {The other-race effect in face recognition is sensitive to
face format at encoding},
year = {2013},
month = {11},
day = {14},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://www.opam.net/opam2013/program.php},
event_place = {Toronto, ON, Canada},
event_name = {21st Annual Conference on Object Perception, Attention, and Memory (OPAM 2013)},
author = {Zhao, M and B{\"u}lthoff, I}
}
@Conference { Zhao2013,
title = {The other-race effect in face recognition is sensitive to face format at encoding},
year = {2013},
month = {11},
day = {6},
abstract = {People recognize own race faces better than those from other races. This other race effect in face recognition has been attributed to differences in holistic processing (Michel, et al., 2006; Tanaka, et al., 2004), in contact (Hancock \& Rhodes, 2008; Rhodes et al., 2009), and in the motivation to individualize faces (Hugenberg, et al., 2010).
Here I would like to present two studies that tested whether the other race effect is dependent upon the relative engagement of holistic and feature processing at encoding. We manipulated face format at encoding so that the holistic processing was either disrupted or completely removed. The results showed that the other race effect observed under normal face encoding was either eliminated or reversed (i.e., an other race advantage). These results provide a strong support for an encoding dependent account
of the other race effect, which might also underlie the effects of racial contact and face individualization on the other race effect observed in prior research.},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
web_url = {http://www.psychology.hku.hk/index.php?id=1454},
event_place = {Hong Kong, China},
event_name = {The University of Hong Kong: Department of Psychology Seminar},
author = {Zhao, M and B{\"u}lthoff, I}
}
@Conference { Bulthoff2013_6,
title = {Wie viel Wahrheit steckt in der Wahrnehmung? / Quelle
v{\'e}rit{\'e} se cache dans la perception?},
year = {2013},
month = {5},
day = {31},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
event_place = {Bern, Switzerland},
event_name = {Symposium der Schweizerischen Gesellschaft f{\"u}r Histologietechnik},
author = {B{\"u}lthoff, I}
}
@Conference { Bulthoff2012_15,
title = {Face Perception: Using a morphable face model to determine what makes a face look Asian or Caucasian and what makes a face attractive and why?},
year = {2012},
month = {10},
day = {24},
abstract = {For German observers, Koreans look far more similar to each other than Germans do and vice-versa. This phenomenon is referred to as the other-race effect (ORE). So far, this ORE was described in tasks involving faces that did not differ only in ethnicity, but also in identity. In the first study that I will present, we dissociated ethnicity from identity information to create pairs of faces that share similar identity information but differ in ethnicity. For each face pair, participants reported which face looked more Asian or more Caucasian. We tested participants from Korea and Germany. Both groups of participants showed equal performance for same-race (high expertise) and other-race pairs (low expertise). Thus they showed no evidence of an other-race effect when ethnicity was the only varying factor between the faces to compare. Participants’ cultural background, however, affected their eye movement strategy. In our second study about ethnicity, mixed-race (Asian and Caucasian) faces were created by embedding one facial feature of one ethnicity (e.g. Caucasian mouth) in a face of the other ethnicity (e.g. Asian face). The influence of each exchanged facial feature on the ethnicity perception for the face it was embedded in was assessed in an ethnicity classification task. The results show that the eyes and the texture (skin) are major determinants of ethnicity classification for both Asian and Caucasian observers. In the last part of my presentation, I will talk more generally about what makes a face attractive and why.},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
web_url = {http://cse.snu.ac.kr/en/node/5125},
event_place = {Seoul, South Korea},
event_name = {Seoul National University: Department of Computer Science and Engineering},
author = {B{\"u}lthoff, I}
}
@Conference { KimESBW2012,
title = {Mapping the other-race-effect in face recognition using a three-experiment test battery},
journal = {i-Perception},
year = {2012},
month = {7},
day = {15},
volume = {3},
number = {9},
pages = {711},
abstract = {The fact that people are better at recognizing faces of their own race than others is called the other-race-effect (ORE). Most studies use only a single test to map and determine the characteristics of the ORE, however. Here, we investigated how two groups of fifteen age-matched Korean and German participants recognize Asian and Caucasian faces with three experiments as part of testing a new battery for characterizing face-processing performance. Participants first underwent the standard Cambridge face memory test in which they had to learn Caucasian target faces at varying noise levels which then were to be recognized in a forced-choice task. In this task, German participants performed significantly better than Koreans (83\% versus 72\%). The second experiment used a standard old-new recognition task with 20 Caucasian and 20 Asian faces (courtesy of the tarrlab@CMU). Here, Koreans were better with Asian faces (d’-difference=1.23) whereas Germans only showed a tendency towards an ORE (d’-difference=0.44). In the third experiment, participants had to rate the similarity of Caucasian face pairs which varied parametrically along featural and configural dimensions using the morphable faces from the MPI face-database. Here, we found that Korean participants were significantly less sensitive to featural changes than German participants. In conclusion, we were able to demonstrate an ORE for most of our experimental conditions. Interestingly, data from the third experiment suggests that the ORE may be due more to lessened sensitivity to featural than to configural processing for other-race faces. Future studies will extend this new test battery to prosopagnosics.
Acknowledgement: This research was supported by the World Class University (WCU) program through the National Research Foundation of Korea funded by the Ministry of Education, Science, and Technology (R31-1008-000-10008-0).},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://i-perception.perceptionweb.com/journal/I/article/if711},
event_place = {Incheon, South Korea},
event_name = {8th Asia-Pacific Conference on Vision (APCV 2012)},
DOI = {10.1068/if711},
author = {Kim, BR and Esins, J and Schultz, J and B{\"u}lthoff, I and Wallraven, C}
}
@Conference { Bulthoff2012_13,
title = {Faces and beauty},
year = {2012},
month = {6},
day = {7},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
event_place = {Radebeul, Germany},
event_name = {10th Anniversary Year of Cognitec Systems GmbH},
author = {B{\"u}lthoff, I}
}
@Conference { Bulthoff2012_14,
title = {Was macht ein Gesicht h{\"u}bsch?},
year = {2012},
month = {2},
day = {9},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
web_url = {http://neuroschool-tuebingen-schuelerlabor.de/fileadmin/user_upload/Dokumente/lab/Lehrerfortbildung_2012.pdf},
event_place = {T{\"u}bingen, Germany},
event_name = {Lehrerfortbildung des Sch{\"u}lerlabors Neurowissenschaften: {\"A}sthetische Empfindungen, Emotionen und neuronale Aktivit{\"a}t},
author = {B{\"u}lthoff, I}
}
@Conference { BulthoffAWB2011,
title = {Investigating the other-race effect in different face recognition tasks},
journal = {i-Perception},
year = {2011},
month = {7},
volume = {2},
number = {4},
pages = {355},
abstract = {Faces convey various types of information like identity, ethnicity, sex or emotion. We investigated whether the well-known other-race effect (ORE) is observable when facial information other than identity varies between test faces. First, in a race comparison task, German and Korean participants compared the ethnicity of two faces sharing similar identity information but differing in ethnicity. Participants reported which face looked more Asian or Caucasian. Their behavioral results showed that Koreans and Germans were equally good at discriminating ethnicity information in Asian and Caucasian faces. The nationality of participants, however, affected their eye-movement strategy when the test faces were shown sequentially, thus, when memory was involved. In the second study, we focused on ORE in terms of recognition of facial expressions. Korean participants viewed Asian and Caucasian faces showing different facial expressions for 100ms to 800ms and reported the emotion of the faces. Surprisingly, under all three presentation times, Koreans were significantly better with Caucasian faces. These two studies suggest that ORE does not appear in all recognition tasks involving other-race faces. Here, when identity information is not involved in the task, we are not better at discriminating ethnicity and facial expressions in same race compared to other race faces.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://i-perception.perceptionweb.com/journal/I/volume/2/article/ic355},
institution = {Max Planck Institute for Biological Cybernetics},
event_place = {Hong Kong},
event_name = {7th Asia-Pacific Conference on Vision (APCV 2011)},
DOI = {10.1068/ic355},
author = {Lee, RK and B{\"u}lthoff, I and Armann, R and Wallraven, C and B{\"u}lthoff, HH}
}
@Conference { Bulthoff2011_10,
title = {Interplay between identity and sex recognition in familiar faces},
year = {2011},
month = {6},
day = {20},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
event_place = {Genova, Italy},
event_name = {Workshop ''Yet another journey through computation''},
author = {B{\"u}lthoff, I}
}
@Conference { 6726,
title = {Faces are represented relative to race-specific norms},
journal = {Perception},
year = {2010},
month = {8},
volume = {39},
number = {ECVP Abstract Supplement},
pages = {155},
abstract = {Recent models of face perception often adopt a framework in which faces are represented as points in a multidimensional space, relative to the average face that serves as a norm. Faces share many visual properties and could be encoded in one face space against one single norm. However, certain face properties may result in grouping of similar faces. How faces might be ‘subclassified’ in face space remains thus to be determined. We studied the processing of faces of different races, using high-level aftereffects, where exposure to one face systematically distorts the perception of a subsequently viewed face towards the ‘opposite’ identity in face space. We measured identity aftereffects for adapt-test pairs that were opposite race-specific (Asian and Caucasian) averages and pairs that were opposite a ‘generic’ average (both races morphed together). Aftereffects were larger for race-specific than for generic anti-faces. Since adapt-test pairs that lie opposite each other in face space generate larger aftereffects than non-opposite test pairs, these results suggest that Asian and Caucasian faces are coded using race-specific norms. Moreover, identification (at low identity strength) of the target faces was easier around the race-specific norms than around the generic norm, indicating that norms also have a functional role in face processing.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://pec.sagepub.com/content/39/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
institution = {Max Planck Institute for Biological Cybernetics},
event_place = {Lausanne, Switzerland},
event_name = {33rd European Conference on Visual Perception},
language = {en},
DOI = {10.1177/03010066100390S101},
author = {Armann, RGM and Jeffery, L and Calder, AJ and B{\"u}lthoff, I and Rhodes, G}
}
@Conference { Bulthoff2010_2,
title = {Interplay between identity and sex recognition in familiar faces},
year = {2010},
month = {6},
day = {18},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
web_url = {http://www.zkw.uni-bremen.de/veranstaltungen.php?arc=1\&lang=de},
event_place = {Bremen, Germany},
event_name = {Universit{\"a}t Bremen: Zentrum f{\"u}r Kognitionswissenschaften (ZKW)},
author = {B{\"u}lthoff, I}
}
@Conference { Bulthoff2010,
title = {Die Wechselwirkung von Identit{\"a}t und Geschlecht bei der Gesichtswahrnehmung},
year = {2010},
month = {6},
pages = {16},
abstract = {Wir untersuchten die Wechselwirkung von identitt{\"a}ts- und geschlechtsspezififischen Informationen in der Gesichtswahrnehmung. In Experiment 1 war es die Aufgabe der Probanden, die Originalversion eines bekannten Gesichts neben einer Anzahl modifizierter Versionen dieses Gesichts auszuw{\"a}hlen. Diese Aufgabe war leichter, wenn das Originalgesicht zusammen mit identit{\"a}tsmodifizierten statt mit geschlechtsmodifizierten Versionen des Originalgesichts pr{\"a}sentiert wurde, was den Schluss nahelegt, dass geschlechtsspezifische Informationen nicht akkurat im Ged{\"a}chtnis abgelegt werden. In Experiment 2, modifizierten wir das Geschlecht einer Reihe von Gesichtern, indem wir weibliche Gesichter in m{\"a}nnliche Gesichter transformierten. Probanden hatten gr{\"o}ssere Schwierigkeiten, diese geschlechtsmodififizierten Gesichter als m{\"a}nnliche Gesichter zu klassifizieren, wenn ihnen das Originalgesicht bekannt war. Im Gegensatz zum klassischen Modell der Gesichtsverarbeitung nach Bruce \& Young (1986), weisen unsere Daten darauf hin, dass bei Gesichtern geschlechtspezififische Informationen nicht unabh{\"a}ngig von identit{\"a}tsspezifischen Informationen verarbeitet werden. Zusammenfassend k{\"o}nnen wir sagen, dass unser visuelles System nicht unbedingt darauf angelegt zu sein scheint, perfekte Modelle auch bekannter Gesichter zu speichern
\{besonders nicht im Hinblick auf das Geschlecht. Vermutlich ist eine akkurate Geschlechtsinformation nicht notwendig - vor allem da sie nur aus den beiden Klassen m{\"a}nnlich und weiblich besteht - so dass identit{\"a}tsspezfifische Informationen geschlechtsspezifische Informationen
dominieren, auch wenn dies nicht zweckm{\"a}ssig ist.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
event_place = {Greifswald, Germany},
event_name = {36. Tagung Psychologie und Gehirn (PuG 2010)},
author = {B{\"u}lthoff, I}
}
@Conference { ArmannB2009,
title = {Categorical perception of male and female faces depends on familiarity},
year = {2009},
month = {4},
pages = {3},
abstract = {The perception of face identity, race and also facial expressions has been shown to be categorical. For another characteristic of faces, sex, results have been conflicting so far. To resolve this controversy, we created male and female faces with similar perceived degrees of 'maleness' and 'femaleness', based on extensive ratings of faces and sex morphs from our face database. We then created sex continua using these controlled stimuli and tested categorical perception (CP) with classical discrimination and classification tasks. Participants were na{\"i}ve (1), or had been familiarized with average faces of both sexes (2), or with the 'controlled' male and female faces (3). Our results confirm the lack of naturally occurring CP for sex in (1). Moreover, since only participants in (3) showed clear CP, our results suggest (as stated in the „single-route hypothesis‟) that the processing of sex and identity information in faces is not independent from each other. We found no evidence that familiarization with sex information (as given by average male and female faces) transfers to individual faces.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {https://www.psychology.org.au/Assets/Files/2009-Combined-Abstracts.pdf},
event_place = {Wollongong, Australia},
event_name = {2009 Australian Psychology Conferences: 36th Australasian Experimental Psychology Conference},
author = {Armann, R and B{\"u}lthoff, I}
}
@Conference { ArmannB2008,
title = {Categorical Perception of Male and Female Faces and the
Single-Route Hypothesis},
year = {2008},
month = {10},
pages = {13},
abstract = {The cognitive process of categorizing perceptually similar stimuli into qualitatively different categories is fundamental to any systematic acting upon the world, as it helps to reduce the immense number of entities to more manageable fragments and spares us from learning anew each time we encounter an unknown individual. Categories are evident in all sensory modalities and range from relatively simple (e. g., color perception) to the most abstract human concepts, as for example faces. Categorical perception (CP) has been shown for face identity (e. g., Beale \& Keil 1995), ethnicity (Levin \& Beale, 2000), and facial expression (Calder et al., 1996). Astonishingly, for sex, a natural facial characteristic consisting of only two biologically relevant categories, conicting results have been reported so far. CP for sex has been shown (Campanella et al., 2001) when sex information was varied linearly (by morphing) between male and female face identities, thus intermixing identity and sex information. When sex continua were created based on single face identities (B{\"u}ulthoff and Newell, 2004), no CP for sex was found in native participants.
So the question remained open whether or not there is CP for the perception of sex as a facial dimension or if processing of the sex of a face is directly linked to processing
of the face's identity, as proposed by the ''`single-route hypothesis''' (e. g., by Rossion, 2002, Ganel \& Goshen \{Gottstein, 2002; B{\"u}ulthoff \& Newell, 2004). To overcome one potential constraint of earlier studies, i. e., 'asymmetric' sex morph continua, we performed extensive ratings of faces and sex morphs from our face database, to create 'controlled' male and female faces with similar perceived degrees of 'maleness' and 'femaleness'. We then examined CP of sex for these faces with classical discrimination and classification experiments. Critically, we manipulated the degree of familiarization of the faces prior to testing, as follows. Observers were either native, or familiarized with the average male and female face of all faces, or the endpoint identities of the morph continua, or with other
male and female faces with the same perceived degree of maleness and femaleness than the test faces. Our results
confirm the lack of naturally occurring CP for sex and provide more evidence for the linked processing of sex and identity, as participants showed clear CP only after familiarization with the test face identities.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
event_place = {Ellwangen, Germany},
event_name = {9th Conference of the Junior Neuroscientists of T{\"u}bingen (NeNa 2008)},
author = {Armann, R and B{\"u}lthoff, I}
}
@Conference { 5471,
title = {Visual and haptic perceptual representations of complex 3-D objects},
journal = {Perception},
year = {2008},
month = {8},
volume = {37},
number = {ECVP Abstract Supplement},
pages = {125},
abstract = {In this study we combined two new techniques to investigate visual and haptic perceptual representations of three-dimensional, parametrically-defined shapes. We generated a 3-D object space of shell-shaped objects by altering three model parameters defining shell shape. We created 21 equidistant plastic models of the objects with a 3-D printing device. Haptic exploration was done by having blindfolded participants explore these objects with both hands and no restrictions to the exploratory procedure. To ensure visual interaction without any haptic information, visual representation of these objects were presented to participants via a head-mounted display. Participants manipulated a position-tracked physical substitute to rotate the objects on the display. Pairwise similarity ratings were performed and analysed using multidimensional scaling techniques. Both visual and haptic perceptual representations were highly consistent with the underlying physical three-dimensional parameter space. Interestingly, haptic exploration resulted in a more precise perceptual representation than the visual condition. Additionally, very similar MDS maps of the visual and the haptic exploration provide evidence that one shared perceptual space is underlying both modalities.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://pec.sagepub.com/content/37/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
institution = {Max Planck Institute for Biological Cybernetics},
event_place = {Utrecht, Netherlands},
event_name = {31st European Conference on Visual Perception},
language = {en},
DOI = {10.1177/03010066080370S101},
author = {Gaissert, N and Wallraven, C and B{\"u}lthoff, I}
}
@Conference { Bulthoff2006,
title = {Shape perception for object recognition and face categorization},
year = {2006},
month = {7},
day = {5},
department = {Department B{\"u}lthoff},
talk_type = {Invited Lecture},
event_place = {M{\"u}nchen, Germany},
event_name = {Ludwig-Maximilians-University Munich: Deptartment of Psychology},
author = {B{\"u}lthoff, I}
}
@Conference { 3815,
title = {Investigating face recognition with voices and face morphs},
year = {2006},
month = {1},
abstract = {Investigating face recognition with voices and face morphs
Humans can easily identify faces at the individual level although faces belong to a class of objects with high similarity between exemplars. Characterizing conditions for which faces are more easily recognized allows us to better understand the mechanisms underlying face recognition.
Numerous studies have shown that distinctive faces are better recognized than typical faces. Those results have implication for the mental representation of faces. In a set of experiments we tested cross-modal effects of distinctiveness. More specifically we asked whether distinctive voices can improve memory for otherwise typical faces. Our results suggest that the quality of information in one modality, i.e., audition, can affect recognition in another modality, i.e., vision; thus showing that face distinctiveness can be of multi-modal nature.
Because we encounter faces of only two sexes but recognize faces of innumerable different identities, it is often implicitly assumed that sex classification is an easier task than identification. We investigated how sensitive we are to variations of identity-related features or sex-related features of highly familiar faces. The results suggest that while extracting and processing sex-related information from a face is a comparatively easy task, we do not seem to retain sex-related facial information in memory as accurately as identity-related information. These results have implications for models of face representation and face processing.},
department = {Department B{\"u}lthoff},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {G{\"o}ttingen, Gemany},
event_name = {Face Mini-Symposium: Georg-August-Universit{\"a}t G{\"o}ttingen, Zentrum f{\"u}r Neurobiologie des Verhaltens},
language = {en},
author = {B{\"u}lthoff, I}
}
@Conference { 3590,
title = {Accuracy in face recognition: Better performance for face identification with changes in identity and caricature but not with changes in sex},
journal = {Journal of Vision},
year = {2005},
month = {9},
volume = {5},
number = {8},
pages = {379},
abstract = {Because we encounter faces of only two sexes but recognize faces of innumerable different identities, it is often implicitly assumed that sex determination is easier than identification in face recognition. Many studies support this assumption. For example, we are very accurate at telling the sex of unfamiliar faces in photographs (Bruce, et al., 1993. Perception, 22, 131–52) and sex categorization is performed more rapidly, on average, than familiarity or identity decisions (Bruyer, Galvez, \& Prairial, 1993. British Journal of Psychology, 84, 433–441). The question that we investigated here is how sensitive we are to variations of identity-related features or sex-related features in familiar faces. 38 participants had to pick out the veridical faces of ten familiar work colleagues from amongst distractor faces that were variations of the original faces. Distractor faces varied either in identity, caricature or sex. In the identity face sets, distractor faces were various morphs between the original face and two unfamiliar faces. In the caricature face sets, distractors were various caricatures of the original face. Finally, in the sex face sets, distractor faces were various feminized and masculinized versions of the original face. Participants were most accurate at identifying the original face amongst distractors in the identity sets. They had a tendency to choose positive caricatures over the original faces in caricature sets. However, participants were very poor at finding the original faces in the sex sets. The results suggest that while extracting and processing sex-related information from a face is a comparatively easy task, we do not seem to retain sex-related facial information in memory as accurately as identity-related information. These results have implications for models of face representation and face processing.},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://journalofvision.org/5/8/379/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {Fifth Annual Meeting of the Vision Sciences Society (VSS 2005)},
language = {en},
DOI = {10.1167/5.8.379},
author = {B{\"u}lthoff, I and Newell, F}
}
@Conference { 3813,
title = {Shape perception for object recognition and face categorization},
journal = {Perception},
year = {2005},
month = {8},
volume = {34},
number = {ECVP Abstract Supplement},
pages = {21},
abstract = {Even though shape is the basis of object recognition, there is still an on-going debate about how it is perceived and represented in the brain. An important question is how various visual cues, like disparity and texture, are integrated into a unique shape percept. Different visual information has also been shown to play an ancillary role in shape perception. For example, cast shadows can help disambiguate shape perception (Kersten et al, 1996 Nature 379 31) while 2D retinal motion information can help organize dots into meaningful shapes despite incongruent depth information (B{\"u}lthoff et al, 1998 Nature Neuroscience 1 254 - 257).
Shape perception is also important for object categorization. For example, faces varying in shape and texture may be perceptually grouped into different categories (a phenomenon known as categorical perception). Previous studies have shown that faces varying in expressions, identity or race are perceived categorically (e.g. Levin \&amp;amp;amp;amp; Angelone, 2002 Perception 31 567 - 578). We did not find similar effect for faces varying in masculinity/feminity (B{\"u}lthoff \&amp;amp;amp;amp; Newell, 2004 Visual Cognition 11 823 - 855). This difference in perception for sex and identity is supported by new studies showing a lack of sensitivity to sex changes in familiar faces, while changes in identity are easily noticed. These results have implications for the nature of shape representations of faces in the brain.},
department = {Department B{\"u}lthoff},
web_url = {http://pec.sagepub.com/content/34/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {A Coru{\~n}a, Spain},
event_name = {28th European Conference on Visual Perception},
language = {en},
DOI = {10.1177/03010066050340S101},
author = {B{\"u}lthoff, I}
}
@Conference { 3066,
title = {Distinctive auditory information improves visual face recognition},
journal = {Journal of Vision},
year = {2004},
month = {8},
volume = {4},
number = {8},
pages = {139},
abstract = {Face recognition studies have shown that distinctiveness can improve recognition. Distinctiveness effects have also been found in stimuli other than faces suggesting that it is a general mechanism. Here we tested cross-modal effects of distinctiveness and asked whether distinctive voices can improve memory for otherwise typical faces. In all experiments participants first learned a set of static, unfamiliar faces. During learning, half of these faces were paired with distinctive voices and half were paired with typical voices. Face stimuli were counterbalanced across these voice conditions. In Experiment 1 we found that recognition performance in a visual recognition test was significantly (p},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://journalofvision.org/4/8/139/},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Sarasota, FL, USA},
event_name = {Fourth Annual Meeting of the Vision Sciences Society (VSS 2004)},
DOI = {10.1167/4.8.139},
author = {B{\"u}lthoff, I and Newell, FN}
}
@Conference { 1121,
title = {Features of the representation space for 3D objects},
journal = {Perception},
year = {1996},
month = {9},
volume = {25},
number = {ECVP Abstract Supplement},
pages = {49-50},
abstract = {To explore the nature of the representation space of 3-D objects, we studied human performance in forced-choice classification of objects composed of four geon-like parts, emanating from a common centre. The two class prototypes were distinguished by qualitative contrasts (cross-section shape; bulge/waist), and by metric parameters (degree of bulge/waist, taper ratio). Subjects were trained to discriminate between the two prototypes (shown briefly, from a number of viewpoints, in stereo) in a 1-interval forced-choice task, until they reached a 90\% correct-response performance level. In experiment 1, eleven subjects were tested on shapes obtained by varying the prototypical parameters both orthogonally (Ortho), and in parallel (Para) to the line connecting the prototypes in the parameter space. For the eight subjects who performed above chance, the error rate increased with the Ortho parameter-space displacement between the stimulus and the corresponding prototype: F1,68=3.6, p<0.06 (the effect of the Para displacement was marginal). Clearly, the parameter-space location of the stimuli mattered more than the qualitative contrasts (which were always present). To find out whether both prototypes or just the nearest neighbour of the test shape influenced the decision, in experiment 2 eight new subjects were tested on a fixed set of shapes, while the test-stage distance between the two classes assumed one of three values (Far, Intermediate, or Near). For the six subjects who performed above chance, the error rate (on physically identical stimuli) in the Near condition was higher than in the other two conditions: F1,89=3.7, p<0.06. The results of the two experiments contradict the prediction of theories that postulate exclusive reliance on qualitative contrasts, and support the notion of a metric representation space with the subjects' performance determined by distances to more than one reference point or prototype (cf Edelman, 1995 Minds and Machines 5 45 - 68).},
department = {Department B{\"u}lthoff},
talk_type = {Abstract Talk},
web_url = {http://pec.sagepub.com/content/25/1_suppl.toc},
institute = {Biologische Kybernetik},
organization = {Max-Planck-Gesellschaft},
event_place = {Strasbourg, France},
event_name = {19th European Conference of Visual Perception},
author = {B{\"u}lthoff, HH and Edelman, S and B{\"u}lthoff, I}
}