@inproceedings{1d4b2d3727dc411e80fec720473311ec,
title = "Evaluating BERT-based Rewards for Question Generation with Reinforcement Learning",
abstract = "Question generation systems aim to generate natural language questions that are relevant to a given piece of text, and can usually be answered by just considering this text. Prior works have identified a range of shortcomings (including semantic drift and exposure bias) and thus have turned to the reinforcement learning paradigm to improve the effectiveness of question generation. As part of it, different reward functions have been proposed. As typically these reward functions have been empirically investigated in different experimental settings (different datasets, models and parameters) we lack a common framework to fairly compare them. In this paper, we first categorize existing rewards systematically. We then provide such a fair empirical evaluation of different reward functions (including three we propose here for QG) in a common framework. We find rewards that model answerability to be the most effective. ",
keywords = "question generation, reinforcement learning, reward functions",
author = "Peide Zhu and Claudia Hauff",
year = "2021",
doi = "10.1145/3471158.3472240",
language = "English",
series = "ICTIR 2021 - Proceedings of the 2021 ACM SIGIR International Conference on Theory of Information Retrieval",
publisher = "Association for Computing Machinery (ACM)",
pages = "261--270",
booktitle = "ICTIR 2021 - Proceedings of the 2021 ACM SIGIR International Conference on Theory of Information Retrieval",
address = "United States",
note = "11th ACM SIGIR International Conference on Theory of Information Retrieval, ICTIR 2021 ; Conference date: 11-07-2021 Through 11-07-2021",
}