@inproceedings{4e62870220fc41318389e872e131a32c,
title = "The representation of speech in deep neural networks",
abstract = "In this paper, we investigate the connection between how people understand speech and how speech is understood by a deep neural network. A na{\"i}ve, general feed-forward deep neural network was trained for the task of vowel/consonant classification. Subsequently, the representations of the speech signal in the different hidden layers of the DNN were visualized. The visualizations allow us to study the distance between the representations of different types of input frames and observe the clustering structures formed by these representations. In the different visualizations, the input frames were labeled with different linguistic categories: sounds in the same phoneme class, sounds with the same manner of articulation, and sounds with the same place of articulation. We investigate whether the DNN clusters speech representations in a way that corresponds to these linguistic categories and observe evidence that the DNN does indeed appear to learn structures that humans use to understand speech without being explicitly trained to do so.",
keywords = "Deep neural networks, Speech representations, Visualizations",
author = "Odette Scharenborg and {van der Gouw}, Nikki and Martha Larson and Elena Marchiori",
note = "Accepted author manuscript; 25th International Conference on MultiMedia Modeling, MMM 2019 ; Conference date: 08-01-2019 Through 11-01-2019",
year = "2019",
doi = "10.1007/978-3-030-05716-9_16",
language = "English",
isbn = "978-303005715-2",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer",
pages = "194--205",
editor = "Ioannis Kompatsiaris and Benoit Huet and Vasileios Mezaris and Cathal Gurrin and Wen-Huang Cheng and Stefanos Vrochidis",
booktitle = "MultiMedia Modeling",
edition = "Part II",
}