@ARTICLE{Dimitrova-Grekow_Teodora_Speech_2019, author={Dimitrova-Grekow, Teodora and Klis, Aneta and Igras-Cybulska, Magdalena}, volume={vol. 44}, number={No 2}, journal={Archives of Acoustics}, pages={277-286}, howpublished={online}, year={2019}, publisher={Polish Academy of Sciences, Institute of Fundamental Technological Research, Committee on Acoustics}, abstract={The human voice is one of the basic means of communication, thanks to which one also can easily convey the emotional state. This paper presents experiments on emotion recognition in human speech based on the fundamental frequency. AGH Emotional Speech Corpus was used. This database consists of audio samples of seven emotions acted by 12 different speakers (6 female and 6 male). We explored phrases of all the emotions – all together and in various combinations. Fast Fourier Transformation and magnitude spectrum analysis were applied to extract the fundamental tone out of the speech audio samples. After extraction of several statistical features of the fundamental frequency, we studied if they carry information on the emotional state of the speaker applying different AI methods. Analysis of the outcome data was conducted with classifiers: K-Nearest Neighbours with local induction, Random Forest, Bagging, JRip, and Random Subspace Method from algorithms collection for data mining WEKA. The results prove that the fundamental frequency is a prospective choice for further experiments.}, type={Article}, title={Speech Emotion Recognition Based on Voice Fundamental Frequency}, URL={http://www.journals.pan.pl/Content/112091/PDF/aoa.2019.128491.pdf}, doi={10.24425/aoa.2019.128491}, keywords={emotion recognition, speech signal analysis, voice analysis, fundamental frequency, speech corpora}, }