@inproceedings{1091, keywords = {large language model (LLM), multi-agent system (MAS), knowledge sharing, reasoning}, author = {Ayushman Das and Shu-Ching Chen and Mei-Ling Shyu and Saad Sadiq}, title = {Enabling Synergistic Knowledge Sharing and Reasoning in Large Language Models with Collaborative Multi-Agents}, abstract = {Despite the significant advancements in the field of Natural Language Processing (NLP), Large Language Models (LLMs) have shown limitations in performing complex tasks that require arithmetic, commonsense, and symbolic reasoning. Reasoning frameworks like ReAct, Chain-of-thought (CoT), Tree-of-thoughts (ToT), etc. have shown success but with limitations in solving long-form complex tasks. To address this, we propose a knowledge-sharing and collaborative multi-agent assisted framework on LLMs that leverages the capabilities of existing reasoning frameworks and the collaborative skills of multi-agent systems (MASs). The objectives of the proposed framework are to overcome the limitations of LLMs, enhance their reasoning capabilities, and improve their performance in complex tasks. It involves generating natural language rationales and in-context few-shot learning via prompting, and integrates the reasoning techniques with efficient knowledge-sharing and communication driven agent networks. The potential benefits of the proposed framework include saving time and money, improved efficiency for computationally intensive reasoning, and the ability to incorporate multiple collaboration strategies for dynamically changing environments.}, year = {2023}, chapter = {92}, pages = {7}, month = {11}, publisher = {IEEE}, isbn = {979-8-3503-3912-3}, url = {https://par.nsf.gov/biblio/10523458}, doi = {10.1109/CIC58953.2023.00021}, }