@inproceedings{c420a64e2db447c785490e23f6b9fb99,
title = "RCEA: Real-time, Continuous Emotion Annotation for Collecting Precise Mobile Video Ground Truth Labels",
abstract = "Collecting accurate and precise emotion ground truth labels for mobile video watching is essential for ensuring meaningful predictions. However, video-based emotion annotation techniques either rely on post-stimulus discrete self-reports, or allow real-time, continuous emotion annotations (RCEA) only for desktop settings. Following a user-centric approach, we designed an RCEA technique for mobile video watching, and validated its usability and reliability in a controlled, indoor (N=12) and later outdoor (N=20) study. Drawing on physiological measures, interaction logs, and subjective workload reports, we show that (1) RCEA is perceived to be usable for annotating emotions while mobile video watching, without increasing users' mental workload (2) the resulting time-variant annotations are comparable with intended emotion attributes of the video stimuli (classification error for valence: 8.3%; arousal: 25%). We contribute a validated annotation technique and associated annotation fusion method, that is suitable for collecting fine-grained emotion annotations while users watch mobile videos.",
keywords = "annotation, continuous, emotion, labels, mobile, real-time, video",
author = "Tianyi Zhang and {El Ali}, Abdallah and Chen Wang and Alan Hanjalic and Pablo Cesar",
note = "Virtual/online event due to COVID-19 ; 2020 ACM CHI Conference on Human Factors in Computing Systems, CHI 2020 ; Conference date: 25-04-2020 Through 30-04-2020",
year = "2020",
month = apr,
day = "21",
doi = "10.1145/3313831.3376808",
language = "English",
series = "Conference on Human Factors in Computing Systems - Proceedings",
publisher = "ACM",
booktitle = "CHI 2020 - Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems",
address = "United States",
}