@article{674, author = {Ritesh Ahuja and Sepanta Zeighami and Gabriel Ghinita and Cyrus Shahabi}, title = {A Neural Approach to Spatio-Temporal Data Release with User-Level Differential Privacy}, abstract = {Several "data-for-good" projects [1, 5, 12] initiated by major companies (e.g., Meta, Google) release to the public spatio-temporal datasets to benefit COVID-19 spread modeling [17, 47, 64] and understand human mobility [14, 24]. Most often, spatio-temporal data are provided in the form of snapshot high resolution population density information, where the released statistics capture population counts in small areas for short time periods. Since high resolution is required for utility (e.g., in modeling COVID hotspots) privacy risks are elevated. To prevent malicious actors from using the data to infer sensitive details about individuals, the released datasets must be first sanitized. Typically, [1, 5, 7, 12], differential privacy (DP) is employed as protection model, due to its formal protection guarantees that prevent an adversary to learn whether a particular individual's data has been included in the release or not.}, year = {2023}, journal = {Proceedings of the ACM on Management of Data}, volume = {1}, chapter = {1}, pages = {25}, month = {05}, issn = {2836-6573}, url = {https://par.nsf.gov/biblio/10431849}, doi = {10.1145/3588701}, }