Navigation

Giving credit

In this script we reproduce the data analysis conducted by
Haxby et al. in “Distributed and Overlapping Representations of Faces and
Objects in Ventral Temporal Cortex”.

Specifically, we look at decoding accuracy for different objects in
three different masks: the full ventral stream (mask_vt), the house
selective areas (mask_house) and the face selective areas (mask_face),
that have been defined via a standard GLM-based analysis.

### Fetch data using nilearn dataset fetcher ################################fromnilearnimportdatasetshaxby_dataset=datasets.fetch_haxby(n_subjects=1)# print basic information on the datasetprint('First subject anatomical nifti image (3D) located is at: %s'%haxby_dataset.anat[0])print('First subject functional nifti image (4D) is located at: %s'%haxby_dataset.func[0])# Load nilearn NiftiMasker, the practical masking and unmasking toolfromnilearn.input_dataimportNiftiMasker# load labelsimportnumpyasnplabels=np.recfromcsv(haxby_dataset.session_target[0],delimiter=" ")stimuli=labels['labels']# identify resting state labels in order to be able to remove themresting_state=stimuli==b"rest"# find names of remaining active labelscategories=np.unique(stimuli[np.logical_not(resting_state)])# extract tags indicating to which acquisition run a tag belongssession_labels=labels["chunks"][np.logical_not(resting_state)]# The classifier: a support vector classifierfromsklearn.svmimportSVCclassifier=SVC(C=1.,kernel="linear")# A classifier to set the chance levelfromsklearn.dummyimportDummyClassifierdummy_classifier=DummyClassifier()# Make a data splitting object for cross validationfromsklearn.cross_validationimportLeaveOneLabelOut,cross_val_scorecv=LeaveOneLabelOut(session_labels)func_filename=haxby_dataset.func[0]mask_names=['mask_vt','mask_face','mask_house']mask_scores={}mask_chance_scores={}formask_nameinmask_names:print("Working on mask %s"%mask_name)# For decoding, standardizing is often very importantmask_filename=haxby_dataset[mask_name][0]masker=NiftiMasker(mask_img=mask_filename,standardize=True)masked_timecourses=masker.fit_transform(func_filename)[np.logical_not(resting_state)]mask_scores[mask_name]={}mask_chance_scores[mask_name]={}forcategoryincategories:print("Processing %s%s"%(mask_name,category))task_mask=np.logical_not(resting_state)classification_target=(stimuli[task_mask]==category)mask_scores[mask_name][category]=cross_val_score(classifier,masked_timecourses,classification_target,cv=cv,scoring="f1")mask_chance_scores[mask_name][category]=cross_val_score(dummy_classifier,masked_timecourses,classification_target,cv=cv,scoring="f1")print("Scores: %1.2f +- %1.2f"%(mask_scores[mask_name][category].mean(),mask_scores[mask_name][category].std()))# make a rudimentary diagramimportmatplotlib.pyplotaspltplt.figure()tick_position=np.arange(len(categories))plt.xticks(tick_position,categories,rotation=45)forcolor,mask_nameinzip('rgb',mask_names):score_means=[mask_scores[mask_name][category].mean()forcategoryincategories]plt.bar(tick_position,score_means,label=mask_name,width=.25,color=color)score_chance=[mask_chance_scores[mask_name][category].mean()forcategoryincategories]plt.bar(tick_position,score_chance,width=.25,edgecolor='k',facecolor='none')tick_position=tick_position+.2plt.ylabel('Classification accurancy (f1 score)')plt.xlabel('Visual stimuli category')plt.legend(loc='best')plt.title('Category-specific classification accuracy for different masks')plt.tight_layout()plt.show()

Total running time of the example: 42.20 seconds
( 0 minutes 42.20 seconds)