@inproceedings{3dd8820adc0e4430ac8850ea3f77d28e,
title = "Automatic Smile and Frown Recognition with Kinetic Earables",
abstract = "In this paper, we introduce inertial signals obtained from an earable placed in the ear canal as a new compelling sensing modality for recognising two key facial expressions: Smile and frown. Borrowing principles from Facial Action Coding Systems, we first demonstrate that an inertial measurement unit of an earable can capture facial muscle deformation activated by a set of temporal microexpressions. Building on these observations, we then present three different learning schemes - shallow models with statistical features, hidden Markov model, and deep neural networks to automatically recognise smile and frown expressions from inertial signals. The experimental results show that in controlled non-conversational settings, we can identify smile and frown with high accuracy (F1 score: 0.85).",
keywords = "Earable, Facs, Kinetic modeling, Smile and frown recognition",
author = "Seungchul Lee and Chulhong Min and Alessandro Montanari and Akhil Mathur and Youngjae Chang and Junehwa Song and Fahim Kawsar",
year = "2019",
doi = "10.1145/3311823.3311869",
language = "English",
isbn = "978-1-4503-6547-5",
series = "ACM International Conference Proceeding Series",
publisher = "ACM",
pages = "1--4",
booktitle = "AH2019",
address = "United States",
note = "10th Augmented Human International Conference, AH 2019 ; Conference date: 11-03-2019 Through 12-03-2019",
}