@inproceedings{203, author = {Menghai Pan and Weixiao Huang and Yanhua Li and Xun Zhou and Zhenming Liu and Jie Bao and Yu Zheng and Jun Luo}, title = {Is Reinforcement Learning the Choice of Human Learners?: A Case Study of Taxi Drivers}, abstract = {Learning to make optimal decisions is a common yet complicated task. While computer agents can learn to make decisions by running reinforcement learning (RL), it remains unclear how human beings learn. In this paper, we perform the first data-driven case study on taxi drivers to validate whether humans mimic RL to learn. We categorize drivers into three groups based on their performance trends and analyze the correlations between human drivers and agents trained using RL. We discover that drivers that become more efficient at earning over time exhibit similar learning patterns to those of agents, whereas drivers that become less efficient tend to do the opposite. Our study (1) provides evidence that some human drivers do adapt RL when learning, (2) enhances the deep understanding of taxi drivers' learning strategies, (3) offers a guideline for taxi drivers to improve their earnings, and (4) develops a generic analytical framework to study and validate human learning strategies.}, year = {2020}, journal = {Proceedings of the 28th International Conference on Advances in Geographic Information Systems}, chapter = {357}, pages = {10}, month = {11}, url = {https://par.nsf.gov/biblio/10225178}, doi = {10.1145/3397536.3422246}, }