Rethinking machine unlearning for large language models
Authors
Venue
Nature Machine Intelligence
Abstract
Comprehensive review of machine unlearning in LLMs, aiming to eliminate undesirable data influence (sensitive or illegal information) while maintaining essential knowledge generation. Envisions LLM unlearning as a pivotal element in life-cycle management for developing safe, secure, trustworthy, and resource-efficient generative AI.
Tags
Links
BibTeX
Local Entry
@article{liu2025rethinkingllmunlearning,
title = {Rethinking machine unlearning for large language models},
author = {Sijia Liu and Yuanshun Yao and Jinghan Jia and Stephen Casper and Nathalie Baracaldo and Peter Hase and Yuguang Yao and Chris Yuhao Liu and Xiaojun Xu and Hang Li and Kush R. Varshney and Mohit Bansal and Sanmi Koyejo and Yang Liu},
year = {2025},
journal = {Nature Machine Intelligence},
url = {https://www.nature.com/articles/s42256-025-00985-0},
abstract = {Comprehensive review of machine unlearning in LLMs, aiming to eliminate undesirable data influence (sensitive or illegal information) while maintaining essential knowledge generation. Envisions LLM unlearning as a pivotal element in life-cycle management for developing safe, secure, trustworthy, and resource-efficient generative AI.}
} From AUTO:OPENALEX
@article{liu2025rethinkingllmunlearning,
title = {Rethinking machine unlearning for large language models},
author = {Sijia Liu and Yuanshun Yao and Jinghan Jia and Stephen Casper and Nathalie Baracaldo and Peter Hase and Yuguang Yao and Chris Yuhao Liu and Xiaojun Xu and Hang Li and Kush R. Varshney and Mohit Bansal and Oluwasanmi Koyejo and Yang Liu},
year = {2025},
journal = {Nature Machine Intelligence},
doi = {10.1038/s42256-025-00985-0}
}