@conference{Daly2015ACII,
title = {Identifying music-induced emotions from EEG for use in brain-computer music interfacing},
author = {Ian Daly and Duncan Williams and Asad Malik and James Weaver and Faustina Hwang and Alexis Kirke and Eduardo Miranda, and Slawomir J. Nasuto},
url = {https://www.computer.org/csdl/proceedings/acii/2015/9953/00/07344685.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the 4th workshop on affective brain-computer interfaces at the ACII 2015},
pages = {923-929},
abstract = {Brain-computer music interfaces (BCMI) provide a method to modulate an individuals affective state via the selection or generation of music according to their current affective state. Potential applications of such systems may include entertainment of therapeutic applications. We outline a proposed design for such a BCMI and seek a method for automatically differentiating different music induced affective states. Band-power features are explored for use in automatically identifying music-induced affective states. Additionally, a linear discriminant analysis classifier and a support vector machine are evaluated with respect to their ability to classify music induced affective states from the electroencephalogram recorded during a BCMI calibration task. Accuracies of up to 79.5% (p < 0.001) are achieved with the support vector machine.},
keywords = {Affective computing, BCMI, Classification, EEG, Music generation},
pubstate = {published},
tppubtype = {conference}
}

Brain-computer music interfaces (BCMI) provide a method to modulate an individuals affective state via the selection or generation of music according to their current affective state. Potential applications of such systems may include entertainment of therapeutic applications. We outline a proposed design for such a BCMI and seek a method for automatically differentiating different music induced affective states. Band-power features are explored for use in automatically identifying music-induced affective states. Additionally, a linear discriminant analysis classifier and a support vector machine are evaluated with respect to their ability to classify music induced affective states from the electroencephalogram recorded during a BCMI calibration task. Accuracies of up to 79.5% (p < 0.001) are achieved with the support vector machine.

@conference{Daly2015CEEC,
title = {Towards Human-Computer Music Interaction: Evaluation of an Affectively-Driven Music Generator Via Galvanic Skin Response Measures},
author = {Ian Daly and Asad Malik and James Weaver and Faustina Hwang and Slawomir J. Nasuto and Duncan Williams and Alexis Kirke and Eduardo Miranda},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Identifying-music-induced-emotions-from-EEG-for-use-in-brain-computer-music-interfacing.pdf},
doi = {10.1109/CEEC.2015.7332705},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the Seventh Computer Science and Electronic Engineering Conference 2015 (CEEC'15)},
pages = {87 - 92},
publisher = {IEEE},
abstract = {An affectively driven music generation system is described and evaluated. The system is developed for the intended eventual use in human-computer interaction systems such as brain-computer music interfaces. It is evaluated for its ability to induce changes in a listeners affective state. The affectively-driven algorithmic composition system was used to generate a stimulus set covering 9 discrete sectors of a 2-dimensional affective space by means of a 16 channel feedforward artificial neural network. This system was used to generate 90 short pieces of music with specific affective intentions, 10 stimuli for each of the 9 sectors in the affective space. These pieces were played to 20 healthy participants, and it was observed that the music generation system induced the intended affective states in the participants. This is further verified by inspecting the galvanic skin response recorded from participants.},
keywords = {BCMI, GSR, Music generation},
pubstate = {published},
tppubtype = {conference}
}

An affectively driven music generation system is described and evaluated. The system is developed for the intended eventual use in human-computer interaction systems such as brain-computer music interfaces. It is evaluated for its ability to induce changes in a listeners affective state. The affectively-driven algorithmic composition system was used to generate a stimulus set covering 9 discrete sectors of a 2-dimensional affective space by means of a 16 channel feedforward artificial neural network. This system was used to generate 90 short pieces of music with specific affective intentions, 10 stimuli for each of the 9 sectors in the affective space. These pieces were played to 20 healthy participants, and it was observed that the music generation system induced the intended affective states in the participants. This is further verified by inspecting the galvanic skin response recorded from participants.

@article{Daly2014tempoBCI,
title = {Investigating music tempo as a feedback mechanism for closed-loop BCI control},
author = {Ian Daly and Duncan Williams and Faustina Hwang and Alexis Kirke and Asad Malik and Etienne Roesch and James Weaver and Eduardo Miranda and Slawomir Nasuto},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/tempoBCI.pdf},
doi = {10.1080/2326263X.2014.979728},
year = {2014},
date = {2014-10-17},
journal = {Brain-Computer Interfaces},
volume = {1},
number = {3},
pages = {158-169},
abstract = {The feedback mechanism used in a brain-computer interface (BCI) forms an integral part of the closed-loop learning process required for successful operation of a BCI. However, ultimate success of the BCI may be dependent upon the modality of the feedback used. This study explores the use of music tempo as a feedback mechanism in BCI and compares it to the more commonly used visual feedback mechanism. Three different feedback modalities are compared for a kinaesthetic motor imagery BCI: visual, auditory via music tempo, and a combined visual and auditory feedback modality. Visual feedback is provided via the position, on the y-axis, of a moving ball. In the music feedback condition, the tempo of a piece of continuously generated music is dynamically adjusted via a novel music-generation method. All the feedback mechanisms allowed users to learn to control the BCI. However, users were not able to maintain as stable control with the music tempo feedback condition as they could in the visual feedback and combined conditions. Additionally, the combined condition exhibited significantly less inter-user variability, suggesting that multi-modal feedback may lead to more robust results. Finally, common spatial patterns are used to identify participant-specific spatial filters for each of the feedback modalities. The mean optimal spatial filter obtained for the music feedback condition is observed to be more diffuse and weaker than the mean spatial filters obtained for the visual and combined feedback conditions.},
keywords = {BCI, BCMI, ERD, Motor imagery, Music, Tempo},
pubstate = {published},
tppubtype = {article}
}

The feedback mechanism used in a brain-computer interface (BCI) forms an integral part of the closed-loop learning process required for successful operation of a BCI. However, ultimate success of the BCI may be dependent upon the modality of the feedback used. This study explores the use of music tempo as a feedback mechanism in BCI and compares it to the more commonly used visual feedback mechanism. Three different feedback modalities are compared for a kinaesthetic motor imagery BCI: visual, auditory via music tempo, and a combined visual and auditory feedback modality. Visual feedback is provided via the position, on the y-axis, of a moving ball. In the music feedback condition, the tempo of a piece of continuously generated music is dynamically adjusted via a novel music-generation method. All the feedback mechanisms allowed users to learn to control the BCI. However, users were not able to maintain as stable control with the music tempo feedback condition as they could in the visual feedback and combined conditions. Additionally, the combined condition exhibited significantly less inter-user variability, suggesting that multi-modal feedback may lead to more robust results. Finally, common spatial patterns are used to identify participant-specific spatial filters for each of the feedback modalities. The mean optimal spatial filter obtained for the music feedback condition is observed to be more diffuse and weaker than the mean spatial filters obtained for the visual and combined feedback conditions.

@conference{Daly2014tempoconf,
title = {Brain-computer music interfacing for continuous control of musical tempo},
author = {Ian Daly and Duncan Williams and Faustina Hwang and Alexis Kirke and Asad Malik and Etienne Roesch and James Weaver and Eduardo Miranda and Slawomir J. Nasuto},
url = {http://dx.doi.org/10.3217/978-3-85125-378-8-4},
doi = {10.3217/978-3-85125-378-8-4},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Graz Brain-computer interface conference 2014},
abstract = {A Brain-computer music interface (BCMI) is developed to allow for continuous modification of the tempo of dynamically generated music. Six out of seven participants are able to control the BCMI at significant accuracies and their performance is observed to increase over time.},
keywords = {BCI, BCMI, Music, Tempo},
pubstate = {published},
tppubtype = {conference}
}

A Brain-computer music interface (BCMI) is developed to allow for continuous modification of the tempo of dynamically generated music. Six out of seven participants are able to control the BCMI at significant accuracies and their performance is observed to increase over time.