@article{DalyPred2015,
title = {Music-induced emotions can be predicted from a combination of brain activity and acoustic features},
author = {Ian Daly and Duncan Williams and James Hallowell and Faustina Hwang and Alexis Kirke and Asad Malik and James Weaver and Eduardo Miranda and Slawomir J. Nasuto},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/music-induced-emotions-can-be-predicted-from-a-combination-of-brain-activity-and-acoustic-features.pdf},
doi = {10.1016/j.bandc.2015.08.003},
year = {2015},
date = {2015-12-01},
journal = {Brain and Cognition},
volume = {101},
pages = {1-11},
abstract = {It is widely acknowledged that music can communicate and induce a wide range of emotions in the listener. However, music is a highly-complex audio signal composed of a wide range of complex time- and frequency-varying components. Additionally, music-induced emotions are known to differ greatly between listeners. Therefore, it is not immediately clear what emotions will be induced in a given individual by a piece of music. We attempt to predict the music-induced emotional response in a listener by measuring the activity in the listeners electroencephalogram (EEG). We combine these measures with acoustic descriptors of the music, an approach that allows us to consider music as a complex set of time-varying acoustic features, independently of any specific music theory. Regression models are found which allow us to predict the music-induced emotions of our participants with a correlation between the actual and predicted responses of up to r=0.234,p<0.001. This regression fit suggests that over 20% of the variance of the participant's music induced emotions can be predicted by their neural activity and the properties of the music. Given the large amount of noise, non-stationarity, and non-linearity in both EEG and music, this is an encouraging result. Additionally, the combination of measures of brain activity and acoustic features describing the music played to our participants allows us to predict music-induced emotions with significantly higher accuracies than either feature type alone (p<0.01).},
keywords = {Acoustic features, EEG, Emotion, Music},
pubstate = {published},
tppubtype = {article}
}

It is widely acknowledged that music can communicate and induce a wide range of emotions in the listener. However, music is a highly-complex audio signal composed of a wide range of complex time- and frequency-varying components. Additionally, music-induced emotions are known to differ greatly between listeners. Therefore, it is not immediately clear what emotions will be induced in a given individual by a piece of music. We attempt to predict the music-induced emotional response in a listener by measuring the activity in the listeners electroencephalogram (EEG). We combine these measures with acoustic descriptors of the music, an approach that allows us to consider music as a complex set of time-varying acoustic features, independently of any specific music theory. Regression models are found which allow us to predict the music-induced emotions of our participants with a correlation between the actual and predicted responses of up to r=0.234,p<0.001. This regression fit suggests that over 20% of the variance of the participant's music induced emotions can be predicted by their neural activity and the properties of the music. Given the large amount of noise, non-stationarity, and non-linearity in both EEG and music, this is an encouraging result. Additionally, the combination of measures of brain activity and acoustic features describing the music played to our participants allows us to predict music-induced emotions with significantly higher accuracies than either feature type alone (p<0.01).

@conference{Daly2015ACII,
title = {Identifying music-induced emotions from EEG for use in brain-computer music interfacing},
author = {Ian Daly and Duncan Williams and Asad Malik and James Weaver and Faustina Hwang and Alexis Kirke and Eduardo Miranda, and Slawomir J. Nasuto},
url = {https://www.computer.org/csdl/proceedings/acii/2015/9953/00/07344685.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of the 4th workshop on affective brain-computer interfaces at the ACII 2015},
pages = {923-929},
abstract = {Brain-computer music interfaces (BCMI) provide a method to modulate an individuals affective state via the selection or generation of music according to their current affective state. Potential applications of such systems may include entertainment of therapeutic applications. We outline a proposed design for such a BCMI and seek a method for automatically differentiating different music induced affective states. Band-power features are explored for use in automatically identifying music-induced affective states. Additionally, a linear discriminant analysis classifier and a support vector machine are evaluated with respect to their ability to classify music induced affective states from the electroencephalogram recorded during a BCMI calibration task. Accuracies of up to 79.5% (p < 0.001) are achieved with the support vector machine.},
keywords = {Affective computing, BCMI, Classification, EEG, Music generation},
pubstate = {published},
tppubtype = {conference}
}

Brain-computer music interfaces (BCMI) provide a method to modulate an individuals affective state via the selection or generation of music according to their current affective state. Potential applications of such systems may include entertainment of therapeutic applications. We outline a proposed design for such a BCMI and seek a method for automatically differentiating different music induced affective states. Band-power features are explored for use in automatically identifying music-induced affective states. Additionally, a linear discriminant analysis classifier and a support vector machine are evaluated with respect to their ability to classify music induced affective states from the electroencephalogram recorded during a BCMI calibration task. Accuracies of up to 79.5% (p < 0.001) are achieved with the support vector machine.

A fully automated and online artifact removal method for the electroencephalogram (EEG) is developed for use in brain-computer interfacing (BCI). The method (FORCe) is based upon a novel combination of wavelet decomposition, independent component analysis, and thresholding. FORCe is able to operate on a small channel set during online EEG acquisition and does not require additional signals (e.g., electrooculogram signals). Evaluation of FORCe is performed offline on EEG recorded from 13 BCI particpants with cerebral palsy (CP) and online with three healthy participants. The method outperforms the state-of the-art automated artifact removal methods Lagged Auto-Mutual Information Clustering (LAMIC) and Fully Automated Statistical Thresholding for EEG artifact Rejection (FASTER), and is able to remove a wide range of artifact types including blink, electromyogram (EMG), and electrooculogram (EOG) artifacts.

The electroencephalogram (EEG) may be described by a large number of different feature types and automated feature selection methods are needed in order to reliably identify features which correlate with continuous independent variables.

New method

A method is presented for the automated identification of features that differentiate two or more groups in neurological datasets based upon a spectral decomposition of the feature set. Furthermore, the method is able to identify features that relate to continuous independent variables.

Results

The proposed method is first evaluated on synthetic EEG datasets and observed to reliably identify the correct features. The method is then applied to EEG recorded during a music listening task and is observed to automatically identify neural correlates of music tempo changes similar to neural correlates identified in a previous study. Finally, the method is applied to identify neural correlates of music-induced affective states. The identified neural correlates reside primarily over the frontal cortex and are consistent with widely reported neural correlates of emotions.

Comparison with existing methods

The proposed method is compared to the state-of-the-art methods of canonical correlation analysis and common spatial patterns, in order to identify features differentiating synthetic event-related potentials of different amplitudes and is observed to exhibit greater performance as the number of unique groups in the dataset increases.

The electroencephalogram (EEG) may be described by a large number of different feature types and automated feature selection methods are needed in order to reliably identify features which correlate with continuous independent variables.

New method

A method is presented for the automated identification of features that differentiate two or more groups in neurological datasets based upon a spectral decomposition of the feature set. Furthermore, the method is able to identify features that relate to continuous independent variables.

Results

The proposed method is first evaluated on synthetic EEG datasets and observed to reliably identify the correct features. The method is then applied to EEG recorded during a music listening task and is observed to automatically identify neural correlates of music tempo changes similar to neural correlates identified in a previous study. Finally, the method is applied to identify neural correlates of music-induced affective states. The identified neural correlates reside primarily over the frontal cortex and are consistent with widely reported neural correlates of emotions.

Comparison with existing methods

The proposed method is compared to the state-of-the-art methods of canonical correlation analysis and common spatial patterns, in order to identify features differentiating synthetic event-related potentials of different amplitudes and is observed to exhibit greater performance as the number of unique groups in the dataset increases.

Conclusions

The proposed method is able to identify neural correlates of continuous variables in EEG datasets and is shown to outperform canonical correlation analysis and common spatial patterns.

@inbook{Daly2014mu,
title = {Machine learning to identify neural correlates of music and emotions},
author = {Ian Daly and Etienne Roesch and James Weaver and Slawomir J. Nasuto},
editor = {Eduardo Reck Miranda, Julien Castet},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Machine-learning-to-identify-neural-correlates-of-music-and-emotions.pdf},
doi = {10.1007/978-1-4471-6584-2_5},
isbn = {978-1-4471-6583-5},
year = {2014},
date = {2014-10-04},
pages = {89-103},
publisher = {Springer},
abstract = {While music is widely understood to induce an emotional response in the listener, the exact nature of that response and its neural correlates are not yet fully explored. Furthermore, the large number of features which may be extracted from, and used to describe, neurological data, music stimuli, and emotional responses, means that the relationships between these datasets produced during music listening tasks or the operation of a brain–computer music interface (BCMI) are likely to be complex and multidimensional. As such, they may not be apparent from simple visual inspection of the data alone. Machine learning, which is a field of computer science that aims at extracting information from data, provides an attractive framework for uncovering stable relationships between datasets and has been suggested as a tool by which neural correlates of music and emotion may be revealed. In this chapter, we provide an introduction to the use of machine learning methods for identifying neural correlates of musical perception and emotion. We then provide examples of machine learning methods used to study the complex relationships between neurological activity, musical stimuli, and/or emotional responses.},
keywords = {EEG, Emotion, Machine learning, Models of emotion, Music},
pubstate = {published},
tppubtype = {inbook}
}

While music is widely understood to induce an emotional response in the listener, the exact nature of that response and its neural correlates are not yet fully explored. Furthermore, the large number of features which may be extracted from, and used to describe, neurological data, music stimuli, and emotional responses, means that the relationships between these datasets produced during music listening tasks or the operation of a brain–computer music interface (BCMI) are likely to be complex and multidimensional. As such, they may not be apparent from simple visual inspection of the data alone. Machine learning, which is a field of computer science that aims at extracting information from data, provides an attractive framework for uncovering stable relationships between datasets and has been suggested as a tool by which neural correlates of music and emotion may be revealed. In this chapter, we provide an introduction to the use of machine learning methods for identifying neural correlates of musical perception and emotion. We then provide examples of machine learning methods used to study the complex relationships between neurological activity, musical stimuli, and/or emotional responses.

@conference{Wairagkar2014,
title = {Novel single trial movement classification based on temporal dynamics of EEG},
author = {Maitreyee Wairagkar and Ian Daly and Yoshikatsu Hayashi and Slawomir Nasuto},
url = {http://centaur.reading.ac.uk/37412/1/Graz%20conference%202014-Final%20version.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Graz Brain-computer interface conference 2014},
abstract = {Various complex oscillatory processes are involved in the generation of the motor command. The temporal dynamics of these processes were studied for movement detection from single trial electroencephalogram (EEG). Autocorrelation analysis was performed on the EEG signals to find robust markers of movement detection. The evolution of the autocorrelation function was characterised via the relaxation time of the autocorrelation by exponential curve fitting. It was observed that the decay constant of
the exponential curve increased during movement, indicating that the autocorrelation function decays slowly during motor execution. Significant differences were observed between movement and no moment tasks. Additionally, a linear discriminant analysis (LDA) classifier was used to identify movement trials with a peak accuracy of 74%. },
keywords = {Autocorrelation, BCI, Classification, EEG, ERD, Motor imagery},
pubstate = {published},
tppubtype = {conference}
}

Various complex oscillatory processes are involved in the generation of the motor command. The temporal dynamics of these processes were studied for movement detection from single trial electroencephalogram (EEG). Autocorrelation analysis was performed on the EEG signals to find robust markers of movement detection. The evolution of the autocorrelation function was characterised via the relaxation time of the autocorrelation by exponential curve fitting. It was observed that the decay constant of
the exponential curve increased during movement, indicating that the autocorrelation function decays slowly during motor execution. Significant differences were observed between movement and no moment tasks. Additionally, a linear discriminant analysis (LDA) classifier was used to identify movement trials with a peak accuracy of 74%.

We found neural correlates of music-induced emotion in a number of frequencies over the pre-frontal cortex. Additionally, we found a set of patterns of functional connectivity, defined by inter-channel coherence measures, to be significantly different between groups of music-induced emotional responses.},
keywords = {Asymmetry, EEG, Emotion, Functional connectivity, Music},
pubstate = {published},
tppubtype = {article}
}

This paper presents an EEG study into the neural correlates of music-induced emotions. We presented participants with a large dataset containing musical pieces in different styles, and asked them to report on their induced emotional responses.

We found neural correlates of music-induced emotion in a number of frequencies over the pre-frontal cortex. Additionally, we found a set of patterns of functional connectivity, defined by inter-channel coherence measures, to be significantly different between groups of music-induced emotional responses.

@article{Daly2013artComp,
title = {Automated artifact removal from the electroencephalogram; a comparative study},
author = {Ian Daly and Nicoletta Nicolaou and Slawomir J. Nasuto and Kevin Warwick},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/published_MS_authors_copy.pdf},
doi = {10.1177/1550059413476485},
year = {2013},
date = {2013-10-01},
journal = {Clinical EEG and Neuroscience},
volume = {44},
number = {4},
pages = {291-306},
abstract = {Contamination of the electroencephalogram (EEG) by artifacts greatly reduces the quality of the recorded signals. There is a need for automated artifact removal methods. However, such methods are rarely evaluated against one another via rigorous criteria, with results often presented based upon visual inspection alone. This work presents a comparative study of automatic methods for removing blink, electrocardiographic, and electromyographic artifacts from the EEG. Three methods are considered; wavelet, blind source separation (BSS), and multivariate singular spectrum analysis (MSSA)-based correction. These are applied to data sets containing mixtures of artifacts. Metrics are devised to measure the performance of each method. The BSS method is seen to be the best approach for artifacts of high signal to noise ratio (SNR). By contrast, MSSA performs well at low SNRs but at the expense of a large number of false positive corrections.},
keywords = {Artefact removal, EEG, ICA, MSSA, Wavelets},
pubstate = {published},
tppubtype = {article}
}

Contamination of the electroencephalogram (EEG) by artifacts greatly reduces the quality of the recorded signals. There is a need for automated artifact removal methods. However, such methods are rarely evaluated against one another via rigorous criteria, with results often presented based upon visual inspection alone. This work presents a comparative study of automatic methods for removing blink, electrocardiographic, and electromyographic artifacts from the EEG. Three methods are considered; wavelet, blind source separation (BSS), and multivariate singular spectrum analysis (MSSA)-based correction. These are applied to data sets containing mixtures of artifacts. Metrics are devised to measure the performance of each method. The BSS method is seen to be the best approach for artifacts of high signal to noise ratio (SNR). By contrast, MSSA performs well at low SNRs but at the expense of a large number of false positive corrections.

@article{Daly2012phase,
title = {Testing for significance of phase synchronisation dynamics in the EEG},
author = {Ian Daly and Catherine Sweeney-Reed and Slawomir J. Nasuto},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Testing-for-significance-of-phase-synchronisation-dynamics-in.pdf},
doi = {10.1007/s10827-012-0428-2},
year = {2013},
date = {2013-06-01},
journal = {Journal of Computational Neuroscience},
volume = {34},
number = {3},
pages = {411-432},
abstract = {A number of tests exist to check for statistical significance of phase synchronisation within the Electroencephalogram (EEG); however, the majority suffer from a lack of generality and applicability. They may also fail to account for temporal dynamics in the phase synchronisation, regarding synchronisation as a constant state instead of a dynamical process. Therefore, a novel test is developed for identifying the statistical significance of phase synchronisation based upon a combination of work characterising temporal dynamics of multivariate time-series and Markov modelling. We show how this method is better able to assess the significance of phase synchronisation than a range of commonly used significance tests. We also show how the method may be applied to identify and classify significantly different phase synchronisation dynamics in both univariate and multivariate datasets.},
keywords = {EEG, Functional connectivity, Markov models, Semi-Markov models, Significance testing},
pubstate = {published},
tppubtype = {article}
}

A number of tests exist to check for statistical significance of phase synchronisation within the Electroencephalogram (EEG); however, the majority suffer from a lack of generality and applicability. They may also fail to account for temporal dynamics in the phase synchronisation, regarding synchronisation as a constant state instead of a dynamical process. Therefore, a novel test is developed for identifying the statistical significance of phase synchronisation based upon a combination of work characterising temporal dynamics of multivariate time-series and Markov modelling. We show how this method is better able to assess the significance of phase synchronisation than a range of commonly used significance tests. We also show how the method may be applied to identify and classify significantly different phase synchronisation dynamics in both univariate and multivariate datasets.

@article{Daly2013headArtifacts,
title = {On the automated removal of artifacts related to head movement from the EEG},
author = {Ian Daly and Martin Billinger and Reinhold Scherer and Gernot Muller-Putz},
url = {http://www.iandaly.co.uk/newDesign2016/wp-content/uploads/2016/01/On-the-automated-removal-of-artifacts-related-to-head-movement-from-the-EEG.pdf},
doi = {10.1109/TNSRE.2013.2254724},
issn = {1534-4320},
year = {2013},
date = {2013-05-01},
journal = {IEEE Transactions on Neural Systems and Rehabilitation Engineering},
volume = {21},
number = {3},
pages = {427 - 434},
abstract = {Contamination of the electroencephalogram (EEG) by artifacts related to head movement is a major cause of reduced signal quality. This is a problem in both neuroscience and other uses of the EEG. To attempt to reduce the influence, on the EEG, of artifacts related to head movement, an accelerometer is placed on the head and independent component analysis is applied to attempt to separate artifacts which are statistically related to head movements. To evaluate the method, EEG and accelerometer measurements are made from 14 individuals with Cerebral palsy attempting to control a sensorimotor rhythm based brain-computer interface. Results show that the approach significantly reduces the influence of head movement related artifacts in the EEG.},
keywords = {Artefact removal, EEG, EMG, Head movement, ICA},
pubstate = {published},
tppubtype = {article}
}

Contamination of the electroencephalogram (EEG) by artifacts related to head movement is a major cause of reduced signal quality. This is a problem in both neuroscience and other uses of the EEG. To attempt to reduce the influence, on the EEG, of artifacts related to head movement, an accelerometer is placed on the head and independent component analysis is applied to attempt to separate artifacts which are statistically related to head movements. To evaluate the method, EEG and accelerometer measurements are made from 14 individuals with Cerebral palsy attempting to control a sensorimotor rhythm based brain-computer interface. Results show that the approach significantly reduces the influence of head movement related artifacts in the EEG.

@article{Chen2013a,
title = {Event-related desynchronization (ERD) in the alpha band during a hand mental rotation task},
author = {Xiaogang Chen and Guangyu Bin and Ian Daly and Xiaorong Gao},
url = {http://www.ncbi.nlm.nih.gov/pubmed/23458675},
doi = {10.1016/j.neulet.2013.02.036},
year = {2013},
date = {2013-04-29},
journal = {Neuroscience Letters},
volume = {541},
pages = {238-242},
abstract = {Recent studies have demonstrated that mentally rotating the hands involves participants engaging in motor imagery processing. However, far less is known about the possible neurophysiological basis of such processing. To contribute to a better understanding of hand mental rotation processing, event-related spectral perturbation (ERSP) methods were applied to electroencephalography (EEG) data collected from participants mentally rotating their hands. Time-frequency analyses revealed that alpha-band power suppression was larger over central-parietal regions. This is in accordance with motor imagery findings suggesting that the motor regions may be involved in processing or detection of kinaesthetic information. Furthermore, the presence of a significant negative correlation between reaction times (RTs) and alpha-band power suppression over central regions is illustrated. These findings are consistent with the neural efficiency hypothesis, which proposes the non-use of many brain regions irrelevant for the task performance as well as the more focused use of specific task-related regions in individuals with better performance. These results indicate that ERSP provides some independent insights into the mental rotation process and further confirms that parietal and motor cortices are involved in mental rotation.},
keywords = {EEG, ERD, Hand rotation},
pubstate = {published},
tppubtype = {article}
}

Recent studies have demonstrated that mentally rotating the hands involves participants engaging in motor imagery processing. However, far less is known about the possible neurophysiological basis of such processing. To contribute to a better understanding of hand mental rotation processing, event-related spectral perturbation (ERSP) methods were applied to electroencephalography (EEG) data collected from participants mentally rotating their hands. Time-frequency analyses revealed that alpha-band power suppression was larger over central-parietal regions. This is in accordance with motor imagery findings suggesting that the motor regions may be involved in processing or detection of kinaesthetic information. Furthermore, the presence of a significant negative correlation between reaction times (RTs) and alpha-band power suppression over central regions is illustrated. These findings are consistent with the neural efficiency hypothesis, which proposes the non-use of many brain regions irrelevant for the task performance as well as the more focused use of specific task-related regions in individuals with better performance. These results indicate that ERSP provides some independent insights into the mental rotation process and further confirms that parietal and motor cortices are involved in mental rotation.

A hybrid brain-computer interface (hBCI) may combine two or more BCI paradigms with the objective of improving performance (accuracy, stability, bit rate etc.) over that achievable with a single paradigm. However, the approach taken in some recent hBCI studies did not achieve accuracies significantly better than a single paradigm. Therefore, we introduce a re-weighting method for classifying a hybrid feature set. This approach produces higher accuracies than with the ERD paradigm.

@article{Pfurtscheller2012a,
title = {Coupling between intrinsic prefrontal HbO2 and central EEG beta power oscillations in the resting brain},
author = {Gert Pfurtscheller and Ian Daly and Gunther Bauernfeind and Gernot R. Muller-Putz},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/journal.pone_.0043640.pdf},
doi = {10.1371/journal.pone.0043640},
year = {2012},
date = {2012-08-24},
journal = {PLOS One},
volume = {7},
number = {8},
pages = {1-9},
abstract = {There is increasing interest in the intrinsic activity in the resting brain, especially that of ultraslow and slow oscillations. Using near-infrared spectroscopy (NIRS), electroencephalography (EEG), blood pressure (BP), respiration and heart rate recordings during 5 minutes of rest, combined with cross spectral and sliding cross correlation calculations, we identified a short-lasting coupling (duration 100s) between prefrontal oxyhemoglobin (HbO2) in the frequency band between 0.07 and 0.13 Hz and central EEG alpha and/or beta power oscillations in 8 of the 9 subjects investigated. The HbO2 peaks preceded the EEG band power peaks by 3.7 s in 6 subjects, with moderate or no coupling between BP and HbO2 oscillations. HbO2 and EEG band power oscillations were approximately in phase with BP oscillations in the 2 subjects with an extremely high coupling (squared coherence >0.8) between BP and HbO2 oscillation. No coupling was identified in one subject. These results indicate that slow precentral (de)oxyhemoglobin concentration oscillations during awake rest can be temporarily coupled with EEG fluctuations in sensorimotor areas and modulate the excitability level in the brains’ motor areas, respectively. Therefore, this provides support for the idea that resting state networks fluctuate with frequencies of between 0.01 and 0.1 Hz (Mantini et.al. PNAS 2007).},
keywords = {Beta coupling, EEG, fNIRS, Resting state},
pubstate = {published},
tppubtype = {article}
}

There is increasing interest in the intrinsic activity in the resting brain, especially that of ultraslow and slow oscillations. Using near-infrared spectroscopy (NIRS), electroencephalography (EEG), blood pressure (BP), respiration and heart rate recordings during 5 minutes of rest, combined with cross spectral and sliding cross correlation calculations, we identified a short-lasting coupling (duration 100s) between prefrontal oxyhemoglobin (HbO2) in the frequency band between 0.07 and 0.13 Hz and central EEG alpha and/or beta power oscillations in 8 of the 9 subjects investigated. The HbO2 peaks preceded the EEG band power peaks by 3.7 s in 6 subjects, with moderate or no coupling between BP and HbO2 oscillations. HbO2 and EEG band power oscillations were approximately in phase with BP oscillations in the 2 subjects with an extremely high coupling (squared coherence >0.8) between BP and HbO2 oscillation. No coupling was identified in one subject. These results indicate that slow precentral (de)oxyhemoglobin concentration oscillations during awake rest can be temporarily coupled with EEG fluctuations in sensorimotor areas and modulate the excitability level in the brains’ motor areas, respectively. Therefore, this provides support for the idea that resting state networks fluctuate with frequencies of between 0.01 and 0.1 Hz (Mantini et.al. PNAS 2007).

@conference{Portelli2011,
title = {Low Cost Brain Computer Interface First Results},
author = {Anthony J. Portelli and Ian Daly and Mathew Spencer and Slawomir J. Nasuto},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Low-Cost-Brain-Computer-Interface.pdf},
year = {2011},
date = {2011-09-01},
booktitle = {Proceedings of the 5th International Brain-Computer Interface Conference 2011},
abstract = {Brain Computer Interfacing (BCI) has been previously demonstrated to restore patient communication, meeting with varying degrees of success. Due to the nature of the equipment traditionally used in BCI experimentation (the electroencephalograph) it is mostly confined to clinical and research environments. The required medical safety standards, subsequent cost of equipment and its application/training times are all issues that need to be resolved if BCIs are to be taken out of the lab/clinic and delivered to the home market. The results in this paper demonstrate a system developed with a low cost medical grade EEG amplifier unit in conjunction with the open source BCI2000 software suite thus constructing the cheapest per electrode system available, meeting rigorous clinical safety standards. Discussion of the future of this technology and future work concerning this platform are also introduced.},
keywords = {BCI, EEG, Low-cost BCI},
pubstate = {published},
tppubtype = {conference}
}

Brain Computer Interfacing (BCI) has been previously demonstrated to restore patient communication, meeting with varying degrees of success. Due to the nature of the equipment traditionally used in BCI experimentation (the electroencephalograph) it is mostly confined to clinical and research environments. The required medical safety standards, subsequent cost of equipment and its application/training times are all issues that need to be resolved if BCIs are to be taken out of the lab/clinic and delivered to the home market. The results in this paper demonstrate a system developed with a low cost medical grade EEG amplifier unit in conjunction with the open source BCI2000 software suite thus constructing the cheapest per electrode system available, meeting rigorous clinical safety standards. Discussion of the future of this technology and future work concerning this platform are also introduced.

@phdthesis{Daly2011a,
title = {Phase Synchronisation in Brain Computer Interfacing},
author = {Ian Daly},
url = {http://www.iandaly.co.uk/publications/thesis/Phase_Synchronisation_in_Brain_Computer_Interfacing.pdf},
year = {2011},
date = {2011-07-01},
pages = {1-262},
address = {University of Reading},
school = {School of Systems Engineering},
abstract = {Brain Computer Interfaces (BCIs) are an emerging area of research combining the Neuroscience, Computer Science, Engineering, Mathematics, Human Computer Interaction and Psychology research fields. A BCI enables an individual to exert control of a computer without activation of the efferent nervous system or the muscles. This allows individuals suffering with partial or complete paralysis and associated conditions which prevent muscle movement to control a computer and hence communicate and exert control over their environment.

This thesis first investigates tools for automatically removing artifacts from the Electroencephalogram (EEG), a signal commonly used in the control a BCI. Tools for measuring inter-regional connectivity patterns within the brain via phase synchronisation are then evaluated and extended to provide novel measures of inter-regional connectivity across the entire cortex.

Feature selection approaches are then introduced and evaluated before being applied to select good feature sets for the discrimination of connectivity patterns. These approaches are compared to Markov modelling approaches which model
and classify temporal dependencies in the data.

Brain Computer Interfaces (BCIs) are an emerging area of research combining the Neuroscience, Computer Science, Engineering, Mathematics, Human Computer Interaction and Psychology research fields. A BCI enables an individual to exert control of a computer without activation of the efferent nervous system or the muscles. This allows individuals suffering with partial or complete paralysis and associated conditions which prevent muscle movement to control a computer and hence communicate and exert control over their environment.

This thesis first investigates tools for automatically removing artifacts from the Electroencephalogram (EEG), a signal commonly used in the control a BCI. Tools for measuring inter-regional connectivity patterns within the brain via phase synchronisation are then evaluated and extended to provide novel measures of inter-regional connectivity across the entire cortex.

Feature selection approaches are then introduced and evaluated before being applied to select good feature sets for the discrimination of connectivity patterns. These approaches are compared to Markov modelling approaches which model
and classify temporal dependencies in the data.

The resulting tool-set is applied to a novel BCI control paradigm based upon the detection of single finger taps. It is demonstrated that the connectivity features produce significantly better classification accuracies than can be achieved using conventional features traditionally applied in BCI.

A Neural Mass model is coupled with a novel method to generate realistic Phase reset ERPs.

The power spectra of these synthetic ERPs are compared with the spectra of real ERPs and synthetic ERPs generated via the Additive model. Real ERP spectra show similarities with synthetic Phase reset ERPs and synthetic Additive ERPs.

@conference{IanDalySlawomirJ.Nasuto2008,
title = {Towards natural human computer interaction in BCI},
author = {Ian Daly and Slawomir J. Nasuto and Kevin Warwick},
url = {http://www.iandaly.co.uk/wp-content/uploads/2016/01/Towards-natural-human-computer-interaction-in-BCI.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the International symposium on artificial intelligence and simulated behaviour, Aberdeen, UK},
organization = {AISB},
abstract = {BCI systems require correct classification of signals interpreted from the brain for useful operation. To this end this paper investigates a method proposed in [1] to correctly classify a series of images presented to a group of subjects in [2].

We show that it is possible to use the proposed methods to correctly recognise the original stimuli presented to a subject from analysis of their EEG. Additionally we use a verification set to show that the trained classification method can be applied to a different set of data.

We go on to investigate the issue of invariance in EEG signals. That is, the brain representation of similar stimuli is recognisable across different subjects.

Finally we consider the usefulness of the methods investigated towards an improved BCI system and discuss how it could potentially lead to great improvements in the ease of use for the end user by offering an alternative, more intuitive control based mode of operation.},
keywords = {BCI, Classification, EEG, Speech},
pubstate = {published},
tppubtype = {conference}
}

BCI systems require correct classification of signals interpreted from the brain for useful operation. To this end this paper investigates a method proposed in [1] to correctly classify a series of images presented to a group of subjects in [2].

We show that it is possible to use the proposed methods to correctly recognise the original stimuli presented to a subject from analysis of their EEG. Additionally we use a verification set to show that the trained classification method can be applied to a different set of data.

We go on to investigate the issue of invariance in EEG signals. That is, the brain representation of similar stimuli is recognisable across different subjects.

Finally we consider the usefulness of the methods investigated towards an improved BCI system and discuss how it could potentially lead to great improvements in the ease of use for the end user by offering an alternative, more intuitive control based mode of operation.