@article{DalyPred2015,
title = {Music-induced emotions can be predicted from a combination of brain activity and acoustic features},
author = {Ian Daly and Duncan Williams and James Hallowell and Faustina Hwang and Alexis Kirke and Asad Malik and James Weaver and Eduardo Miranda and Slawomir J. Nasuto},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/music-induced-emotions-can-be-predicted-from-a-combination-of-brain-activity-and-acoustic-features.pdf},
doi = {10.1016/j.bandc.2015.08.003},
year = {2015},
date = {2015-12-01},
journal = {Brain and Cognition},
volume = {101},
pages = {1-11},
abstract = {It is widely acknowledged that music can communicate and induce a wide range of emotions in the listener. However, music is a highly-complex audio signal composed of a wide range of complex time- and frequency-varying components. Additionally, music-induced emotions are known to differ greatly between listeners. Therefore, it is not immediately clear what emotions will be induced in a given individual by a piece of music. We attempt to predict the music-induced emotional response in a listener by measuring the activity in the listeners electroencephalogram (EEG). We combine these measures with acoustic descriptors of the music, an approach that allows us to consider music as a complex set of time-varying acoustic features, independently of any specific music theory. Regression models are found which allow us to predict the music-induced emotions of our participants with a correlation between the actual and predicted responses of up to r=0.234,p<0.001. This regression fit suggests that over 20% of the variance of the participant's music induced emotions can be predicted by their neural activity and the properties of the music. Given the large amount of noise, non-stationarity, and non-linearity in both EEG and music, this is an encouraging result. Additionally, the combination of measures of brain activity and acoustic features describing the music played to our participants allows us to predict music-induced emotions with significantly higher accuracies than either feature type alone (p<0.01).},
keywords = {Acoustic features, EEG, Emotion, Music},
pubstate = {published},
tppubtype = {article}
}

It is widely acknowledged that music can communicate and induce a wide range of emotions in the listener. However, music is a highly-complex audio signal composed of a wide range of complex time- and frequency-varying components. Additionally, music-induced emotions are known to differ greatly between listeners. Therefore, it is not immediately clear what emotions will be induced in a given individual by a piece of music. We attempt to predict the music-induced emotional response in a listener by measuring the activity in the listeners electroencephalogram (EEG). We combine these measures with acoustic descriptors of the music, an approach that allows us to consider music as a complex set of time-varying acoustic features, independently of any specific music theory. Regression models are found which allow us to predict the music-induced emotions of our participants with a correlation between the actual and predicted responses of up to r=0.234,p<0.001. This regression fit suggests that over 20% of the variance of the participant's music induced emotions can be predicted by their neural activity and the properties of the music. Given the large amount of noise, non-stationarity, and non-linearity in both EEG and music, this is an encouraging result. Additionally, the combination of measures of brain activity and acoustic features describing the music played to our participants allows us to predict music-induced emotions with significantly higher accuracies than either feature type alone (p<0.01).

@article{WilliamsRD2015,
title = {Investigating Perceived Emotional Correlates of Rhythmic Density in Algorithmic Music Composition},
author = {Duncan Williams and Alexis Kirke and Eduardo Miranda and Ian Daly and James Hallowell and James Weaver and Asad Malik and Etienne Roesch and Faustina Hwang and Slawomir Nasuto},
doi = {10.1145/2749466},
year = {2015},
date = {2015-07-01},
journal = {ACM Transactions on Applied Perception (TAP)},
volume = {12},
number = {3},
pages = {1-21},
abstract = {Affective algorithmic composition is a growing field that combines perceptually motivated affective computing strategies with novel music generation. This article presents work toward the development of one application. The long-term goal is to develop a responsive and adaptive system for inducing affect that is both controlled and validated by biophysical measures. Literature documenting perceptual responses to music identifies a variety of musical features and possible affective correlations, but perceptual evaluations of these musical features for the purposes of inclusion in a music generation system are not readily available. A discrete feature, rhythmic density (a function of note duration in each musical bar, regardless of tempo), was selected because it was shown to be well-correlated with affective responses in existing literature. A prototype system was then designed to produce controlled degrees of variation in rhythmic density via a transformative algorithm. A two-stage perceptual evaluation of a stimulus set created by this prototype was then undertaken. First, listener responses from a pairwise scaling experiment were analyzed via Multidimensional Scaling Analysis (MDS). The statistical best-fit solution was rotated such that stimuli with the largest range of variation were placed across the horizontal plane in two dimensions. In this orientation, stimuli with deliberate variation in rhythmic density appeared farther from the source material used to generate them than from stimuli generated by random permutation. Second, the same stimulus set was then evaluated according to the order suggested in the rotated two-dimensional solution in a verbal elicitation experiment. A Verbal Protocol Analysis (VPA) found that listener perception of the stimulus set varied in at least two commonly understood emotional descriptors, which might be considered affective correlates of rhythmic density. Thus, these results further corroborate previous studies wherein musical parameters are monitored for changes in emotional expression and that some similarly parameterized control of perceived emotional content in an affective algorithmic composition system can be achieved and provide a methodology for evaluating and including further possible musical features in such a system. Some suggestions regarding the test procedure and analysis techniques are also documented here.},
keywords = {Affective composition, Emotion, Music generation},
pubstate = {published},
tppubtype = {article}
}

Affective algorithmic composition is a growing field that combines perceptually motivated affective computing strategies with novel music generation. This article presents work toward the development of one application. The long-term goal is to develop a responsive and adaptive system for inducing affect that is both controlled and validated by biophysical measures. Literature documenting perceptual responses to music identifies a variety of musical features and possible affective correlations, but perceptual evaluations of these musical features for the purposes of inclusion in a music generation system are not readily available. A discrete feature, rhythmic density (a function of note duration in each musical bar, regardless of tempo), was selected because it was shown to be well-correlated with affective responses in existing literature. A prototype system was then designed to produce controlled degrees of variation in rhythmic density via a transformative algorithm. A two-stage perceptual evaluation of a stimulus set created by this prototype was then undertaken. First, listener responses from a pairwise scaling experiment were analyzed via Multidimensional Scaling Analysis (MDS). The statistical best-fit solution was rotated such that stimuli with the largest range of variation were placed across the horizontal plane in two dimensions. In this orientation, stimuli with deliberate variation in rhythmic density appeared farther from the source material used to generate them than from stimuli generated by random permutation. Second, the same stimulus set was then evaluated according to the order suggested in the rotated two-dimensional solution in a verbal elicitation experiment. A Verbal Protocol Analysis (VPA) found that listener perception of the stimulus set varied in at least two commonly understood emotional descriptors, which might be considered affective correlates of rhythmic density. Thus, these results further corroborate previous studies wherein musical parameters are monitored for changes in emotional expression and that some similarly parameterized control of perceived emotional content in an affective algorithmic composition system can be achieved and provide a methodology for evaluating and including further possible musical features in such a system. Some suggestions regarding the test procedure and analysis techniques are also documented here.

@inbook{Daly2014mu,
title = {Machine learning to identify neural correlates of music and emotions},
author = {Ian Daly and Etienne Roesch and James Weaver and Slawomir J. Nasuto},
editor = {Eduardo Reck Miranda, Julien Castet},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Machine-learning-to-identify-neural-correlates-of-music-and-emotions.pdf},
doi = {10.1007/978-1-4471-6584-2_5},
isbn = {978-1-4471-6583-5},
year = {2014},
date = {2014-10-04},
pages = {89-103},
publisher = {Springer},
abstract = {While music is widely understood to induce an emotional response in the listener, the exact nature of that response and its neural correlates are not yet fully explored. Furthermore, the large number of features which may be extracted from, and used to describe, neurological data, music stimuli, and emotional responses, means that the relationships between these datasets produced during music listening tasks or the operation of a brain–computer music interface (BCMI) are likely to be complex and multidimensional. As such, they may not be apparent from simple visual inspection of the data alone. Machine learning, which is a field of computer science that aims at extracting information from data, provides an attractive framework for uncovering stable relationships between datasets and has been suggested as a tool by which neural correlates of music and emotion may be revealed. In this chapter, we provide an introduction to the use of machine learning methods for identifying neural correlates of musical perception and emotion. We then provide examples of machine learning methods used to study the complex relationships between neurological activity, musical stimuli, and/or emotional responses.},
keywords = {EEG, Emotion, Machine learning, Models of emotion, Music},
pubstate = {published},
tppubtype = {inbook}
}

While music is widely understood to induce an emotional response in the listener, the exact nature of that response and its neural correlates are not yet fully explored. Furthermore, the large number of features which may be extracted from, and used to describe, neurological data, music stimuli, and emotional responses, means that the relationships between these datasets produced during music listening tasks or the operation of a brain–computer music interface (BCMI) are likely to be complex and multidimensional. As such, they may not be apparent from simple visual inspection of the data alone. Machine learning, which is a field of computer science that aims at extracting information from data, provides an attractive framework for uncovering stable relationships between datasets and has been suggested as a tool by which neural correlates of music and emotion may be revealed. In this chapter, we provide an introduction to the use of machine learning methods for identifying neural correlates of musical perception and emotion. We then provide examples of machine learning methods used to study the complex relationships between neurological activity, musical stimuli, and/or emotional responses.

@article{Williams2014,
title = {Investigating affect in algorithmic composition systems},
author = {Duncan Williams and Alexis Kirke and Eduardo Miranda and Etienne Roesch and Ian Daly and Slawomir Nasuto},
doi = {10.1177/0305735614543282},
year = {2014},
date = {2014-09-15},
journal = {Psychology of Music},
pages = {1-24},
abstract = {There has been a significant amount of work implementing systems for algorithmic composition with the intention of targeting specific emotional responses in the listener, but a full review of this work is not currently available. This gap creates a shared obstacle to those entering the field. Our aim is thus to give an overview of progress in the area of these affectively driven systems for algorithmic composition. Performative and transformative systems are included and differentiated where appropriate, highlighting the challenges these systems now face if they are to be adapted to, or have already incorporated, some form of affective control. Possible real-time applications for such systems, utilizing affectively driven algorithmic composition and biophysical sensing to monitor and induce affective states in the listener are suggested.},
keywords = {Emotion, Music, Music generation},
pubstate = {published},
tppubtype = {article}
}

There has been a significant amount of work implementing systems for algorithmic composition with the intention of targeting specific emotional responses in the listener, but a full review of this work is not currently available. This gap creates a shared obstacle to those entering the field. Our aim is thus to give an overview of progress in the area of these affectively driven systems for algorithmic composition. Performative and transformative systems are included and differentiated where appropriate, highlighting the challenges these systems now face if they are to be adapted to, or have already incorporated, some form of affective control. Possible real-time applications for such systems, utilizing affectively driven algorithmic composition and biophysical sensing to monitor and induce affective states in the listener are suggested.

We found neural correlates of music-induced emotion in a number of frequencies over the pre-frontal cortex. Additionally, we found a set of patterns of functional connectivity, defined by inter-channel coherence measures, to be significantly different between groups of music-induced emotional responses.},
keywords = {Asymmetry, EEG, Emotion, Functional connectivity, Music},
pubstate = {published},
tppubtype = {article}
}

This paper presents an EEG study into the neural correlates of music-induced emotions. We presented participants with a large dataset containing musical pieces in different styles, and asked them to report on their induced emotional responses.

We found neural correlates of music-induced emotion in a number of frequencies over the pre-frontal cortex. Additionally, we found a set of patterns of functional connectivity, defined by inter-channel coherence measures, to be significantly different between groups of music-induced emotional responses.