@ARTICLE{Qian_Kun_Teaching_2018, author={Qian Kun and Janott, Christoph and Zhang Zixing and Deng Jun and Baird, Alice and Heiser Clemens and Hohenhorst, Winfried and Herzog, Michael and Hemmert Werner and Schuller, Björn}, volume={vol. 43}, number={No 3}, journal={Archives of Acoustics}, pages={465-475}, howpublished={online}, year={2018}, publisher={Polish Academy of Sciences, Institute of Fundamental Technological Research, Committee on Acoustics}, abstract={This paper proposes a comprehensive study on machine listening for localisation of snore sound excitation. Here we investigate the effects of varied frame sizes, and overlap of the analysed audio chunk for extracting low-level descriptors. In addition, we explore the performance of each kind of feature when it is fed into varied classifier models, including support vector machines, k-nearest neighbours, linear discriminant analysis, random forests, extreme learning machines, kernel-based extreme learning machines, multilayer perceptrons, and deep neural networks. Experimental results demonstrate that, wavelet packet transform energy can outperform most other features. A deep neural network trained with subband energy ratios reaches the highest performance achieving an unweighted average recall of 72.8% from four types for snoring.}, type={Artykuły / Articles}, title={Teaching Machines on Snoring: A Benchmark on Computer Audition for Snore Sound Excitation Localisation}, URL={http://www.journals.pan.pl/Content/108118/PDF/123918.pdf}, doi={10.24425/123918}, keywords={snore sound, obstructive sleep apnea, acoustic features, machine learning}, }