@conference{Daly2015ACII,
title = {Identifying music-induced emotions from EEG for use in brain-computer music interfacing},
author = {Ian Daly and Duncan Williams and Asad Malik and James Weaver and Faustina Hwang and Alexis Kirke and Eduardo Miranda, and Slawomir J. Nasuto},
url = {https://www.computer.org/csdl/proceedings/acii/2015/9953/00/07344685.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the 4th workshop on affective brain-computer interfaces at the ACII 2015},
pages = {923-929},
abstract = {Brain-computer music interfaces (BCMI) provide a method to modulate an individuals affective state via the selection or generation of music according to their current affective state. Potential applications of such systems may include entertainment of therapeutic applications. We outline a proposed design for such a BCMI and seek a method for automatically differentiating different music induced affective states. Band-power features are explored for use in automatically identifying music-induced affective states. Additionally, a linear discriminant analysis classifier and a support vector machine are evaluated with respect to their ability to classify music induced affective states from the electroencephalogram recorded during a BCMI calibration task. Accuracies of up to 79.5% (p < 0.001) are achieved with the support vector machine.},
keywords = {Affective computing, BCMI, Classification, EEG, Music generation},
pubstate = {published},
tppubtype = {conference}
}

Brain-computer music interfaces (BCMI) provide a method to modulate an individuals affective state via the selection or generation of music according to their current affective state. Potential applications of such systems may include entertainment of therapeutic applications. We outline a proposed design for such a BCMI and seek a method for automatically differentiating different music induced affective states. Band-power features are explored for use in automatically identifying music-induced affective states. Additionally, a linear discriminant analysis classifier and a support vector machine are evaluated with respect to their ability to classify music induced affective states from the electroencephalogram recorded during a BCMI calibration task. Accuracies of up to 79.5% (p < 0.001) are achieved with the support vector machine.

@conference{Daly2015CEEC,
title = {Towards Human-Computer Music Interaction: Evaluation of an Affectively-Driven Music Generator Via Galvanic Skin Response Measures},
author = {Ian Daly and Asad Malik and James Weaver and Faustina Hwang and Slawomir J. Nasuto and Duncan Williams and Alexis Kirke and Eduardo Miranda},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Identifying-music-induced-emotions-from-EEG-for-use-in-brain-computer-music-interfacing.pdf},
doi = {10.1109/CEEC.2015.7332705},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the Seventh Computer Science and Electronic Engineering Conference 2015 (CEEC'15)},
pages = {87 - 92},
publisher = {IEEE},
abstract = {An affectively driven music generation system is described and evaluated. The system is developed for the intended eventual use in human-computer interaction systems such as brain-computer music interfaces. It is evaluated for its ability to induce changes in a listeners affective state. The affectively-driven algorithmic composition system was used to generate a stimulus set covering 9 discrete sectors of a 2-dimensional affective space by means of a 16 channel feedforward artificial neural network. This system was used to generate 90 short pieces of music with specific affective intentions, 10 stimuli for each of the 9 sectors in the affective space. These pieces were played to 20 healthy participants, and it was observed that the music generation system induced the intended affective states in the participants. This is further verified by inspecting the galvanic skin response recorded from participants.},
keywords = {BCMI, GSR, Music generation},
pubstate = {published},
tppubtype = {conference}
}

An affectively driven music generation system is described and evaluated. The system is developed for the intended eventual use in human-computer interaction systems such as brain-computer music interfaces. It is evaluated for its ability to induce changes in a listeners affective state. The affectively-driven algorithmic composition system was used to generate a stimulus set covering 9 discrete sectors of a 2-dimensional affective space by means of a 16 channel feedforward artificial neural network. This system was used to generate 90 short pieces of music with specific affective intentions, 10 stimuli for each of the 9 sectors in the affective space. These pieces were played to 20 healthy participants, and it was observed that the music generation system induced the intended affective states in the participants. This is further verified by inspecting the galvanic skin response recorded from participants.

@article{WilliamsRD2015,
title = {Investigating Perceived Emotional Correlates of Rhythmic Density in Algorithmic Music Composition},
author = {Duncan Williams and Alexis Kirke and Eduardo Miranda and Ian Daly and James Hallowell and James Weaver and Asad Malik and Etienne Roesch and Faustina Hwang and Slawomir Nasuto},
doi = {10.1145/2749466},
year = {2015},
date = {2015-07-01},
journal = {ACM Transactions on Applied Perception (TAP)},
volume = {12},
number = {3},
pages = {1-21},
abstract = {Affective algorithmic composition is a growing field that combines perceptually motivated affective computing strategies with novel music generation. This article presents work toward the development of one application. The long-term goal is to develop a responsive and adaptive system for inducing affect that is both controlled and validated by biophysical measures. Literature documenting perceptual responses to music identifies a variety of musical features and possible affective correlations, but perceptual evaluations of these musical features for the purposes of inclusion in a music generation system are not readily available. A discrete feature, rhythmic density (a function of note duration in each musical bar, regardless of tempo), was selected because it was shown to be well-correlated with affective responses in existing literature. A prototype system was then designed to produce controlled degrees of variation in rhythmic density via a transformative algorithm. A two-stage perceptual evaluation of a stimulus set created by this prototype was then undertaken. First, listener responses from a pairwise scaling experiment were analyzed via Multidimensional Scaling Analysis (MDS). The statistical best-fit solution was rotated such that stimuli with the largest range of variation were placed across the horizontal plane in two dimensions. In this orientation, stimuli with deliberate variation in rhythmic density appeared farther from the source material used to generate them than from stimuli generated by random permutation. Second, the same stimulus set was then evaluated according to the order suggested in the rotated two-dimensional solution in a verbal elicitation experiment. A Verbal Protocol Analysis (VPA) found that listener perception of the stimulus set varied in at least two commonly understood emotional descriptors, which might be considered affective correlates of rhythmic density. Thus, these results further corroborate previous studies wherein musical parameters are monitored for changes in emotional expression and that some similarly parameterized control of perceived emotional content in an affective algorithmic composition system can be achieved and provide a methodology for evaluating and including further possible musical features in such a system. Some suggestions regarding the test procedure and analysis techniques are also documented here.},
keywords = {Affective composition, Emotion, Music generation},
pubstate = {published},
tppubtype = {article}
}

Affective algorithmic composition is a growing field that combines perceptually motivated affective computing strategies with novel music generation. This article presents work toward the development of one application. The long-term goal is to develop a responsive and adaptive system for inducing affect that is both controlled and validated by biophysical measures. Literature documenting perceptual responses to music identifies a variety of musical features and possible affective correlations, but perceptual evaluations of these musical features for the purposes of inclusion in a music generation system are not readily available. A discrete feature, rhythmic density (a function of note duration in each musical bar, regardless of tempo), was selected because it was shown to be well-correlated with affective responses in existing literature. A prototype system was then designed to produce controlled degrees of variation in rhythmic density via a transformative algorithm. A two-stage perceptual evaluation of a stimulus set created by this prototype was then undertaken. First, listener responses from a pairwise scaling experiment were analyzed via Multidimensional Scaling Analysis (MDS). The statistical best-fit solution was rotated such that stimuli with the largest range of variation were placed across the horizontal plane in two dimensions. In this orientation, stimuli with deliberate variation in rhythmic density appeared farther from the source material used to generate them than from stimuli generated by random permutation. Second, the same stimulus set was then evaluated according to the order suggested in the rotated two-dimensional solution in a verbal elicitation experiment. A Verbal Protocol Analysis (VPA) found that listener perception of the stimulus set varied in at least two commonly understood emotional descriptors, which might be considered affective correlates of rhythmic density. Thus, these results further corroborate previous studies wherein musical parameters are monitored for changes in emotional expression and that some similarly parameterized control of perceived emotional content in an affective algorithmic composition system can be achieved and provide a methodology for evaluating and including further possible musical features in such a system. Some suggestions regarding the test procedure and analysis techniques are also documented here.

@conference{Williams2015,
title = {Dynamic game soundtrack generation in response to a continuously varying emotional trajectory},
author = {Duncan Williams and Alexis Kirke and Joel Eaton and Eduardo Miranda and Ian Daly and James Weaver and Etienne Roesch and Faustina Hwang and Slawomir Nasuto},
url = {http://www.aes.org/e-lib/browse.cfm?elib=17593},
year = {2015},
date = {2015-02-11},
booktitle = {Proceedings of the 56th International Conference: Audio for Games (February 2015)},
pages = {2-2},
abstract = {Dynamic soundtracking presents various practical and aesthetic challenges to composers working with games. This paper presents an implementation of a system addressing some of these challenges with an affectively-driven music generation algorithm based on a second order Markov-model. The system can respond in real-time to emotional trajectories derived from 2-dimensions of affect on the circumplex model (arousal and valence), which are mapped to five musical parameters. A transition matrix is employed to vary the generated output in continuous response to the affective state intended by the gameplay.},
keywords = {Affective composition, Markov model, Music generation},
pubstate = {published},
tppubtype = {conference}
}

Dynamic soundtracking presents various practical and aesthetic challenges to composers working with games. This paper presents an implementation of a system addressing some of these challenges with an affectively-driven music generation algorithm based on a second order Markov-model. The system can respond in real-time to emotional trajectories derived from 2-dimensions of affect on the circumplex model (arousal and valence), which are mapped to five musical parameters. A transition matrix is employed to vary the generated output in continuous response to the affective state intended by the gameplay.

@article{Williams2014,
title = {Investigating affect in algorithmic composition systems},
author = {Duncan Williams and Alexis Kirke and Eduardo Miranda and Etienne Roesch and Ian Daly and Slawomir Nasuto},
doi = {10.1177/0305735614543282},
year = {2014},
date = {2014-09-15},
journal = {Psychology of Music},
pages = {1-24},
abstract = {There has been a significant amount of work implementing systems for algorithmic composition with the intention of targeting specific emotional responses in the listener, but a full review of this work is not currently available. This gap creates a shared obstacle to those entering the field. Our aim is thus to give an overview of progress in the area of these affectively driven systems for algorithmic composition. Performative and transformative systems are included and differentiated where appropriate, highlighting the challenges these systems now face if they are to be adapted to, or have already incorporated, some form of affective control. Possible real-time applications for such systems, utilizing affectively driven algorithmic composition and biophysical sensing to monitor and induce affective states in the listener are suggested.},
keywords = {Emotion, Music, Music generation},
pubstate = {published},
tppubtype = {article}
}

There has been a significant amount of work implementing systems for algorithmic composition with the intention of targeting specific emotional responses in the listener, but a full review of this work is not currently available. This gap creates a shared obstacle to those entering the field. Our aim is thus to give an overview of progress in the area of these affectively driven systems for algorithmic composition. Performative and transformative systems are included and differentiated where appropriate, highlighting the challenges these systems now face if they are to be adapted to, or have already incorporated, some form of affective control. Possible real-time applications for such systems, utilizing affectively driven algorithmic composition and biophysical sensing to monitor and induce affective states in the listener are suggested.

@conference{Williams2014conf,
title = {Evaluating perceptual separation in a pilot system for affective composition},
author = {Duncan Williams and Alexis Kirke and Eduardo Miranda and Ian Daly and Etienne Roesch and James Weaver and Slawomir J. Nasuto},
url = {http://cmr.soc.plymouth.ac.uk/pubs/ICMC_2014_DW.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the joint Sound and Music Computing Conference and International Computer Music Conference},
abstract = {Research evaluating perceptual responses to music has identified many structural features as correlates that might be incorporated in computer music systems for affectively charged algorithmic composition and/or expressive music performance. In order to investigate the possible integration of isolated musical features to such a system, a discrete feature known to correlate some with emotional responses – rhythmic density – was selected from a literature review and incorporated into a prototype system. This system produces variation in rhythm density via a transformative process. A stimulus set created using this system was then subjected to a perceptual evaluation.

Pairwise comparisons were used to scale differences between 48 stimuli. Listener responses were analysed with Multidimensional scaling (MDS). The 2-Dimensional solution was then rotated to place the stimuli with the largest range of variation across the horizontal plane.

Stimuli with variation in rhythmic density were placed further from the source material than stimuli that were generated by random permutation. This, combined with the striking similarity between the MDS scaling and that of the 2-dimensional emotional model used by some affective algorithmic composition systems, suggests that
isolated musical feature manipulation can now be used to parametrically control affectively charged automated composition in a larger system.},
keywords = {Affective composition, Music generation},
pubstate = {published},
tppubtype = {conference}
}

Research evaluating perceptual responses to music has identified many structural features as correlates that might be incorporated in computer music systems for affectively charged algorithmic composition and/or expressive music performance. In order to investigate the possible integration of isolated musical features to such a system, a discrete feature known to correlate some with emotional responses – rhythmic density – was selected from a literature review and incorporated into a prototype system. This system produces variation in rhythm density via a transformative process. A stimulus set created using this system was then subjected to a perceptual evaluation.

Pairwise comparisons were used to scale differences between 48 stimuli. Listener responses were analysed with Multidimensional scaling (MDS). The 2-Dimensional solution was then rotated to place the stimuli with the largest range of variation across the horizontal plane.

Stimuli with variation in rhythmic density were placed further from the source material than stimuli that were generated by random permutation. This, combined with the striking similarity between the MDS scaling and that of the 2-dimensional emotional model used by some affective algorithmic composition systems, suggests that
isolated musical feature manipulation can now be used to parametrically control affectively charged automated composition in a larger system.