Add 1 files
Browse files- 2511/2511.03506.md +1053 -0
2511/2511.03506.md
ADDED
|
@@ -0,0 +1,1053 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Title: HaluMem: Evaluating Hallucinations in Memory Systems of Agents
|
| 2 |
+
|
| 3 |
+
URL Source: https://arxiv.org/html/2511.03506
|
| 4 |
+
|
| 5 |
+
Published Time: Tue, 11 Nov 2025 01:57:34 GMT
|
| 6 |
+
|
| 7 |
+
Markdown Content:
|
| 8 |
+
1]China Telecom Research Institute 2]MemTensor (Shanghai) Technology 3]Harbin Engineering University
|
| 9 |
+
|
| 10 |
+
(November 9, 2025)
|
| 11 |
+
|
| 12 |
+
###### Abstract
|
| 13 |
+
|
| 14 |
+
Memory systems are key components that enable AI systems such as LLMs and AI agents to achieve long-term learning and sustained interaction. However, during memory storage and retrieval, these systems frequently exhibit memory hallucinations, including fabrication, errors, conflicts, and omissions. Existing evaluations of memory hallucinations are primarily end-to-end question answering, which makes it difficult to localize the operational stage within the memory system where hallucinations arise. To address this, we introduce the Hallucination in Memory Benchmark (HaluMem), the first operation level hallucination evaluation benchmark tailored to memory systems. HaluMem defines three evaluation tasks (memory extraction, memory updating, and memory question answering) to comprehensively reveal hallucination behaviors across different operational stages of interaction. To support evaluation, we construct user-centric, multi-turn human-AI interaction datasets, HaluMem-Medium and HaluMem-Long. Both include about 15k memory points and 3.5k multi-type questions. The average dialogue length per user reaches 1.5k and 2.6k turns, with context lengths exceeding 1M tokens, enabling evaluation of hallucinations across different context scales and task complexities. Empirical studies based on HaluMem show that existing memory systems tend to generate and accumulate hallucinations during the extraction and updating stages, which subsequently propagate errors to the question answering stage. Future research should focus on developing interpretable and constrained memory operation mechanisms that systematically suppress hallucinations and improve memory reliability.
|
| 15 |
+
|
| 16 |
+
1 Introduction
|
| 17 |
+
--------------
|
| 18 |
+
|
| 19 |
+
Each interaction between a user and an LLM may contain personalized information about the user [shi2024wildfeedback, zhao2025do]. However, such information is often forgotten once the conversation ends, making it difficult for the model to continuously understand the user, adapt to persona shifts, or generate personalized responses [liu2023thinkinmemory, zhang2024guided]. To ensure that LLMs maintain coherence and personalization in long-term interactions, it is crucial to develop a mechanism capable of recording, updating, and utilizing user information, which constitutes the core function of a memory system.
|
| 20 |
+
|
| 21 |
+
A memory system serves as the fundamental infrastructure for organizing and managing information based on the history of human–AI conversations. It extracts, structures, and continuously updates key information generated across multi-turn interactions between users and AI systems, retrieving and injecting this information into the model as needed to support personalization and long-term consistency [li2025memos_long, memobase, kang2025memory, rasmussen2025zep, supermemory]. Specifically, a memory system identifies stable user profiles, narratives, and events from dialogues and stores them as plaintext entries enriched with metadata. When new queries or tasks arise, the system retrieves and selectively integrates relevant memories based on the current intent and context, enabling the AI system to “remember and correctly utilize” user information, thereby preserving semantic coherence, behavioral consistency, and preference alignment. Representative systems such as MemOS [li2025memos_long], Mem0 [chhikara2025mem0], Zep [rasmussen2025zep], Supermemory [supermemory], and Memobase [memobase] continuously record user profiles, events, and evolving preferences, supporting the creation, revision, and tracking of memories to construct a system-level memory layer with structured management capabilities.
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
|
| 25 |
+
Figure 1: Examples of operation-level hallucination in a memory system.
|
| 26 |
+
|
| 27 |
+
Although these systems significantly improve the organization and controllability of memory, they are commonly affected by the phenomenon of memory hallucination (Figure [1](https://arxiv.org/html/2511.03506v2#S1.F1 "Figure 1 ‣ 1 Introduction ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents")), which manifests as fabricated, erroneous, conflicting, or missing information during the processes of memory extraction or updating [DBLP:journals/corr/abs-2507-18910, DBLP:conf/fllm/AgrawalKA024, math13050856]. Such issues undermine the accuracy and consistency of memory. More importantly, these upstream hallucinations are often amplified during the generation stage, further inducing generation hallucination [10.1145/3703155] and compromising the overall reliability of the system. To effectively mitigate these hallucination phenomena, it is essential to establish a systematic hallucination evaluation mechanism for memory systems. However, existing methods for evaluating hallucinations in memory systems remain limited. Most mainstream studies adopt end-to-end, question–answer-based evaluation frameworks that assess memory quality indirectly through the final output performance of AI systems, making it difficult to determine at which stage of the memory process the hallucination originates.
|
| 28 |
+
|
| 29 |
+
To address this issue, we propose the Hallucination in Memory Benchmark (HaluMem), the first operation-level hallucination evaluation benchmark for memory systems, which comprises two datasets, HaluMem‑Medium and HaluMem‑Long. HaluMem constructs an evaluation framework that encompasses three types of tasks: memory extraction, memory updating, and memory question answering, in order to comprehensively reveal the hallucination behaviors of memory systems across different operational levels of interaction.
|
| 30 |
+
|
| 31 |
+
However, achieving operation-level evaluation of hallucinations in memory systems is a nontrivial task, as it requires a multi-turn conversational dataset that can comprehensively represent the processes of memory generation, updating, and retrieval across interactions 1 1 1 The dataset we designed is a user-centered multi-turn human–AI conversation dataset. This design is motivated by the fact that memory systems are inherently intended to support personalized and long-term human–AI interactions. Their core functionality lies in organizing, storing, and updating user-related memories throughout continuous dialogue, which enables a more realistic evaluation of how hallucinations occur when memory systems organize and update memories around the user.. Such a dataset is used as input to the memory system under evaluation and requires the system to identify and process memories of different operational types that accumulate throughout the conversation. To this end, we provide a multi-turn conversation dataset with precise annotations for each memory operation and its corresponding result, referred to as a memory point. By comparing the memory points produced by a memory system with the annotated ground-truth memory points, we can perform fine-grained evaluation to determine whether hallucinations occur in memory extraction, memory updating, or question answering (i.e., memory retrieval). Specifically, we measure accuracy and coverage to assess hallucinations caused by errors or fabrications in memory extraction, use consistency to evaluate hallucinations arising from errors or omissions in memory updating, and identify hallucinations in question answering that result from incorrect references or fabricated content.
|
| 32 |
+
|
| 33 |
+
Based on this design, we construct two benchmark datasets: HaluMem-Medium and HaluMem-Long. Both datasets contain approximately 15,000 memory points and more than 3,400 evaluation queries, with each user involved in over one thousand conversational turns on average. The latter further extends the average dialogue context length per user to the scale of millions of tokens, allowing examination of hallucination behaviors in ultra-long conversations.
|
| 34 |
+
|
| 35 |
+
The main contributions of this work are summarized as follows:
|
| 36 |
+
|
| 37 |
+
* •We propose HaluMem, the first operation-level benchmark for evaluating memory hallucinations, which overcomes the limitations of prior end-to-end evaluation methods by systematically revealing hallucination phenomena across three operational dimensions: memory extraction, memory updating, and memory-based question answering.
|
| 38 |
+
* •We construct an extensive multi-turn evaluation dataset for human–AI interactions and design two user-centered benchmarks, HaluMem-Medium and HaluMem-Long, to evaluate hallucination behaviors of memory systems under different contextual scales and task complexities.
|
| 39 |
+
* •Through stage-wise evaluation, we reveal the cumulative and amplifying effects of hallucinations across memory extraction, updating, and question answering, providing a new analytical perspective for understanding and mitigating hallucinations in memory systems.
|
| 40 |
+
|
| 41 |
+
2 Related Work
|
| 42 |
+
--------------
|
| 43 |
+
|
| 44 |
+
### 2.1 Memory System
|
| 45 |
+
|
| 46 |
+
Large Language Models (LLMs) and AI Agents built upon them possess implicit memory capabilities, where knowledge is primarily embedded within model parameters through large-scale pre-training and fine-tuning, thereby forming a parameterized form of long-term memory. Although such implicit memory enables LLMs to demonstrate strong knowledge recall and reasoning abilities during inference and generation, it suffers from poor manageability—the internal memory of the model cannot be explicitly accessed, updated, or deleted, and there is no mechanism for controlling its lifecycle. When encountering outdated or conflicting information, the model often struggles to revise or replace old knowledge, which can lead to memory hallucination, manifesting as the generation of erroneous, obsolete, or inconsistent content.
|
| 47 |
+
|
| 48 |
+
Table 1: Comparison of Various Memory Systems.
|
| 49 |
+
|
| 50 |
+
Method Memory Type Memory Operation Manageability Graph Support
|
| 51 |
+
Supermemory [supermemory]Plain Text (with Metadata)CUDE fair–Excellent Yes
|
| 52 |
+
Memobase [memobase]Plain Text (with Metadata)CUD Excellent No
|
| 53 |
+
Zep [rasmussen2025zep]Plain Text (with Metadata)CUD fair–Excellent Yes
|
| 54 |
+
Mem0 [chhikara2025mem0]Plain Text (with Metadata)CUD fair–Excellent Yes
|
| 55 |
+
MemOS [li2025memos_long]Parameter; Activation; Plain Text (with Metadata)CUD fair–Excellent Yes
|
| 56 |
+
|
| 57 |
+
Various forms of external memory modules have been proposed to address the limitations of parameterized memory. Early external memory mechanisms were primarily represented by Retrieval-Augmented Generation (RAG). In particular, RAG [lewis2020rag] introduces an external plaintext knowledge retrieval mechanism. Before generation, relevant documents are retrieved from a vector database and incorporated into the model’s input, enabling controllable and updatable external memory. This approach offers high manageability, and because the external memory is transparent and editable, it exhibits a relatively low degree of memory hallucination. However, traditional RAG systems primarily rely on text-based memory structures and lack explicit modeling of inter-entity relationships, meaning they do not support graph structures. Consequently, they remain limited in handling complex knowledge reasoning and maintaining long-term consistency. Building upon this, GraphRAG [edge2025graphrag] further integrates a knowledge graph structure, organizing and retrieving knowledge in the form of entity–relation pairs. By leveraging graph indexing and multi-hop path retrieval, GraphRAG significantly enhances the representational capacity and retrieval accuracy of structured knowledge, leading to improved performance in relational reasoning. Nevertheless, the construction and maintenance of graph structures entail high costs, and synchronization during updates introduces additional complexity. As a result, GraphRAG demonstrates moderate manageability, inferior to that of RAG, and may introduce additional memory hallucinations due to inconsistent updates among graph nodes or edges.
|
| 58 |
+
|
| 59 |
+
With the growing demand for AI systems capable of personalized interaction and long-term learning, researchers have begun exploring memory systems that possess genuine long-term maintainability and operational controllability, as summarized in Table [1](https://arxiv.org/html/2511.03506v2#S2.T1 "Table 1 ‣ 2.1 Memory System ‣ 2 Related Work ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents"). Supermemory [supermemory] provides long-term memory for language models by combining document retrieval and user-specific memory. It integrates both retrieval-augmented generation and agent memory, using a contextual graph that captures temporal, relational, and personal information, enabling consistent and personalized responses across interactions. Memobase [memobase] focuses on user-level long-term memory by recording preferences and interaction histories in a plaintext structure. During interactions, it dynamically generates context snippets from user profiles and recent events to enable personalized recall, though some risk of hallucination may occur during memory extraction. Zep [rasmussen2025zep] introduces a context engineering framework that integrates agent memory, Graph RAG, and context assembly capabilities, with its core component Graphiti enabling temporally-aware synthesis of conversational and business data for personalized long-term context. Mem0 [chhikara2025mem0] employs a metadata-enriched plaintext storage format that supports comprehensive memory operations (Create/Extract, Update, Delete, and Expand/Enrich), incorporating conflict detection and memory merging to ensure consistency and traceability. MemOS [li2025memos_long] attempts to abstract memory as a system-level resource by unifying the management of three types of memory: parametric memory, activation memory, and explicit (plaintext) memory. Through lifecycle control, version management, and migration mechanisms, MemOS enables cross-model and cross-session memory sharing and integration. However, while graph structures enhance the expressiveness of memory representation, they also increase management complexity and make the system more prone to hallucination.
|
| 60 |
+
|
| 61 |
+
### 2.2 Evaluation Hallucinations in Memory Systems
|
| 62 |
+
|
| 63 |
+
Table 2: Comparison of Hallucination Evaluation Benchmarks
|
| 64 |
+
|
| 65 |
+
Feature HaluMem PersonaMem LOCOMO LongMemEval PrefEval
|
| 66 |
+
Evaluation Granularity Operation-level End-to-end End-to-end End-to-end End-to-end
|
| 67 |
+
Evaluation Timing After each session After all sessions After all sessions After all sessions After all sessions
|
| 68 |
+
Evaluation Tasks Memory Extraction, Memory Updating, Memory QA Multiple Choice QA, Summarization, Generation QA, Memory Recall Generation, Classification
|
| 69 |
+
Memory Type Persona, Event, Relationship Persona Persona, Event Persona, Event Persona
|
| 70 |
+
Memory Update Yes Yes No Yes Yes
|
| 71 |
+
Conversation Time Span 10∼\sim 20 years Several years*Several months∼\sim 2.5 years-
|
| 72 |
+
Avg Length / Session 8.3k tokens 6k tokens 477 tokens 3k tokens-
|
| 73 |
+
Max Context Length 1M tokens 1M tokens 9k tokens 1.5M tokens 100k tokens
|
| 74 |
+
Question Num 3,467∼\sim 6,000 7,512 500 3,000
|
| 75 |
+
|
| 76 |
+
* •* “Several years” for PersonaMem is inferred from the paper and dataset, not explicitly labeled.
|
| 77 |
+
|
| 78 |
+
Hallucinations in memory systems can be divided into two types: memory hallucinations and generation hallucinations. The former refers to inconsistencies or errors that occur during the processes of storing, updating, or retrieving information within a memory system, such as fabricated memories, outdated memories, unresolved conflicts, or incorrect retrievals. The latter refers to hallucinations that arise during the generation phase of language models, where the model produces outputs inconsistent with factual truth or the given context. These two types of hallucinations are closely interrelated: memory hallucinations often act as upstream causes of generation hallucinations, while generation hallucinations may further amplify or obscure memory-related errors.
|
| 79 |
+
|
| 80 |
+
Generation hallucinations are the most extensively studied type of hallucination in current research and are typically divided into two categories: factual hallucinations and faithfulness hallucinations [huang2025survey]. Factual hallucinations assess whether the model output aligns with objective facts, whereas faithfulness hallucinations evaluate whether the output remains faithful to the given context or source information. Around these two categories, researchers have proposed a variety of mature evaluation methods and metric systems, including: external-retrieval-based factual verification [DBLP:journals/corr/abs-2502-13622], which compares generated content with external knowledge bases; model-internal-state-based reliability assessment [su-etal-2024-unsupervised, DBLP:conf/iclr/0026L0GWTFY24], which analyzes attention distributions or activation patterns to estimate hallucination risk; behavior-based evaluation of output consistency and verifiability [liang-etal-2024-learning]; and uncertainty- or LLM-discriminator-based automated hallucination detection [kang2025uncertaintyquantificationhallucinationdetection, DBLP:journals/corr/abs-2505-08200]. These approaches have substantially improved the interpretability and quantifiability of hallucinations at the generation level, leading to relatively mature and systematic detection frameworks for generation hallucinations.
|
| 81 |
+
|
| 82 |
+
In contrast, research on memory hallucinations remains at an early stage. Representative studies include LoCoMo, LongMemEval, PrefEval, and PersonaMem (Table [2](https://arxiv.org/html/2511.03506v2#S2.T2 "Table 2 ‣ 2.2 Evaluation Hallucinations in Memory Systems ‣ 2 Related Work ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents")). The early benchmark LoCoMo [maharana2024evaluating] focuses on memory retention under long-context settings. It adopts an end-to-end evaluation paradigm, assessing models through question answering, summarization, and generation tasks to test factual recall and event tracking over ultra-long texts. Although the dataset is relatively large (about 7.5k questions), it only covers a time span of several months and lacks an explicit memory updating mechanism, primarily reflecting the model’s capability for static information retention. Subsequently, LongMemEval [wulongmemeval] extends this framework by introducing metrics such as Information Retention Rate and Memory Recall Accuracy, covering approximately 2.5 years of multi-turn dialogue and incorporating explicit memory updates to quantify knowledge consistency across time. This represents a shift from static evaluation toward dynamic memory modeling. In the direction of personalization, PrefEval [zhao2025do] evaluates a model’s ability to maintain and follow user preferences over long-term interactions, using generation and classification tasks to assess preference consistency, though it remains limited to persona-level memory. PersonaMem [jiang2025know] further constructs simulated user personas and event histories, employing multiple-choice evaluations to assess persona consistency, traceability, and update accuracy. With a longer time span (on the order of years), it provides a more representative benchmark for personalized long-term memory assessment.
|
| 83 |
+
|
| 84 |
+
Although these studies provide preliminary frameworks for evaluating memory-related capabilities, they remain predominantly end-to-end and fail to localize hallucinations at the operational level within memory systems.
|
| 85 |
+
|
| 86 |
+
3 Problem Definition
|
| 87 |
+
--------------------
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+
Figure 2: Comparison between HaluMem and existing hallucination evaluation methods for memory systems.
|
| 92 |
+
|
| 93 |
+
Let there be a memory system S S that endows an AI system A A (including an LLM or AI agent) with long-term memory and personalization capabilities. The memory system receives a multi-turn dialogue sequence between the user and the assistant, denoted as D=(u 1,a 1),(u 2,a 2),…,(u N,a N)D={(u_{1},a_{1}),(u_{2},a_{2}),\dots,(u_{N},a_{N})}, where u i u_{i} and a i a_{i} represent the utterances of the user and the AI at turn i i, respectively. Each memory point is stored as a plaintext entry, and a single memory is defined as m m. With respect to the dialogue flow D D, the memory system involves four types of operations during interaction: (1) Memory Extraction (E E): extracting newly generated memory points from D D; (2) Memory Updating (U U): modifying or deleting existing memories; (3) Memory Retrieval (R R): recalling memories relevant to the current query 2 2 2 Since retrieval R R primarily focuses on relevance and recall rate and rarely introduces generative processing by LLMs, this study concentrates on the three stages that directly induce hallucinations, namely E E, U U, and Q Q.; (4) Memory Question Answering (Q Q): constructing prompts and invoking A A to generate responses.
|
| 94 |
+
|
| 95 |
+
Existing evaluations of memory systems typically adopt an end-to-end question–answer paradigm. Given a set of dialogue-based queries 𝒬={q j}j=1 J\mathcal{Q}=\{q_{j}\}_{j=1}^{J} and their corresponding gold answers 𝒴∗={y j∗}j=1 J\mathcal{Y}^{*}=\{y_{j}^{*}\}_{j=1}^{J}, the evaluation pipeline can be abstracted as
|
| 96 |
+
|
| 97 |
+
M^=U(E(D)),R^j=R(M^,q j),y^j=A(R^j,q j).\hat{M}=U\left(E(D)\right),\quad\hat{R}_{j}=R(\hat{M},q_{j}),\quad\hat{y}_{j}=A\left(\hat{R}_{j},q_{j}\right).
|
| 98 |
+
|
| 99 |
+
End-to-end evaluation is measured using answer-level metrics such as accuracy or F1 score:
|
| 100 |
+
|
| 101 |
+
Acc e2e=1 J∑j=1 J 𝕀[y^j=y j∗].\text{Acc}_{\text{e2e}}=\frac{1}{J}\sum_{j=1}^{J}\mathbb{I}\left[\hat{y}_{j}=y_{j}^{*}\right].
|
| 102 |
+
|
| 103 |
+
When y^j≠y j∗\hat{y}_{j}\neq y_{j}^{*}, the metric Acc e2e\text{Acc}_{\text{e2e}} cannot identify the source of the error. It remains unclear whether the hallucination arises from the extraction stage E E, where incorrect or fabricated memories are introduced, from the updating stage U U, where old memories are mistakenly modified or not properly refreshed, or from the question-answering stage Q Q, where unsupported generative content is produced despite correct memories being available. The lack of traceability prevents the development of targeted mitigation strategies.
|
| 104 |
+
|
| 105 |
+
To enable a localized and diagnostic evaluation, we construct fine-grained annotations and define gold standards for each stage. (1) Extraction gold standard:G ext={m i}i=1 K G^{\mathrm{ext}}=\{m_{i}\}_{i=1}^{K}, representing the set of memory points that should be newly added during the dialogue. (2) Updating gold standard:G upd={m old→m new}G^{\text{upd}}=\{m^{\text{old}}\to m^{\text{new}}\}, representing the set of memory point pairs before and after updates during the dialogue. (3) Question–answer dataset: for each query q j q_{j}, a gold answer y j∗y_{j}^{*} is provided. The system outputs are defined as follows:
|
| 106 |
+
|
| 107 |
+
M^ext=E(D),G^upd=U(M^ext,D),y^j=A(R(M^,q j),q j),\hat{M}^{\text{ext}}=E(D),\quad\hat{G}^{\text{upd}}=U(\hat{M}^{\text{ext}},D),\quad\hat{y}_{j}=A\left(R(\hat{M},q_{j}),q_{j}\right),
|
| 108 |
+
|
| 109 |
+
where M^\hat{M} denotes the set of memory points representing the current state of the memory system when query q j q_{j} is processed. By providing stage-specific gold standards and evaluation metrics for E E, U U, and Q Q, the proposed HaluMem benchmark enables operation-level hallucination evaluation within memory systems.
|
| 110 |
+
|
| 111 |
+
4 Methodology for Constructing HaluMem
|
| 112 |
+
--------------------------------------
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
|
| 116 |
+
Figure 3: Framework of the HaluMem Construction Pipeline.
|
| 117 |
+
|
| 118 |
+
To systematically evaluate memory systems in realistic scenarios, we construct the Hallucination in Memory Benchmark (HaluMem). To ensure the quality of the dataset and the controllability of the construction process, we design a user-centered, six-stage procedure based on a progressive expansion strategy.
|
| 119 |
+
|
| 120 |
+
Stage 1: Persona Construction. This stage initiates the HaluMem dataset construction by creating virtual users with complete persona profiles to simulate real participants in later human–AI dialogues. Each persona includes three parts: Core Profile Information, Dynamic State Information, and Preference Information. The core profile captures stable background traits; the dynamic state reflects current circumstances such as career, health, and relationships; and the preferences define personal tastes. Each user receives six to eight randomly assigned preferences across areas like food, music, and film. The core profile provides a static foundation, while the dynamic and preference elements, which evolve randomly, add realism, diversity, and rich material for memory extraction. An initial timestamp ensures that all personas reflect a consistent starting point in time. To enhance the authenticity of virtual users, user seeds are randomly sampled from the Persona Hub 3 3 3 A collection of one billion diverse personas automatically curated from web data[tao2024personahub], and rule‑based procedures are applied to generate structured persona drafts. GPT‑4o then verifies and refines them. See Appendix [E.1](https://arxiv.org/html/2511.03506v2#A5.SS1 "E.1 User Profile Example in Stage 1 ‣ Appendix E Examples from the Process of Constructing HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") for an example.
|
| 121 |
+
|
| 122 |
+
Stage 2: Life Skeleton. After generating persona profiles, the second stage builds a life skeleton that defines each user’s evolutionary trajectory. Each user receives several core career events centered on life goals, which serve as anchors for the evolution of dynamic information. Updates to social status, career transitions, or health conditions are typically associated with these career events. Preference information evolves separately through probabilistic modifications or deletions, independent of these career events. These probabilistic rules ensure a diverse yet coherent evolution. The life skeleton captures the user’s potential future states and serves as a structured script for later memory addition, modification, and deletion, maintaining the complexity and consistency of the evaluation scenarios.
|
| 123 |
+
|
| 124 |
+
Stage 3: Event Flow. As the core component of dataset construction, the third stage aims to transform the abstract “life skeleton” generated in Stage 2 into a structured and narrative event flow. The objective is to “eventify” discrete evolution instructions, constructing for each persona a complete memory timeline that integrates initial states, career development, and daily preference changes, thereby balancing narrative coherence with machine interpretability. The core of this stage includes three types of events:
|
| 125 |
+
|
| 126 |
+
* •Init Events: Generated from the user’s initial profile, covering core, dynamic, and preference information. They serve as the starting point of the memory timeline, simulating the user’s first self-introduction.
|
| 127 |
+
* •Career Events: Derived from the life skeleton built in Stage 2, representing the main storyline of user development. Each career event is divided into sub-stages and instantiated with dynamic details (e.g., promotions, illnesses) to enrich the narrative.
|
| 128 |
+
* •Daily Events: Generated from the evolution of user preferences, independent of career progression. Each preference change becomes a concrete life scenario recorded as an atomic event with pre- and post-change states and their cause.
|
| 129 |
+
|
| 130 |
+
Within this framework, career events serve as the narrative backbone, while init and daily events provide necessary background and contextual details. Through the integration and chronological alignment of the three event types, this stage produces a coherent and complete event sequence that functions as the user’s memory transaction log. See Appendix [E.2](https://arxiv.org/html/2511.03506v2#A5.SS2 "E.2 Event Structure Examples in Stage 3 ‣ Appendix E Examples from the Process of Constructing HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") for event examples.
|
| 131 |
+
|
| 132 |
+
Stage 4: Session Summaries and Memory Points. This stage transforms the structured event flow from Stage 3 into realistic session summaries and detailed memory points. For each event, we create a human–AI dialogue scenario shaped by the user’s motivation. The system has access to the current persona profile, along with all prior events and memory points, ensuring logical, causal, and consistent generation. As events unfold, the persona profile is dynamically updated to reflect the user’s evolving state. Each memory point includes its content, type(persona, event, or relationship), and importance, with updated entries preserving replaced information for traceability. More details provided in Appendices [A.1](https://arxiv.org/html/2511.03506v2#A1.SS1 "A.1 Definition of Memory Types ‣ Appendix A Supplementary Details of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") and [E.3](https://arxiv.org/html/2511.03506v2#A5.SS3 "E.3 Examples of Memory Points, Dialogues, and QA Pairs in Stages 4–6 ‣ Appendix E Examples from the Process of Constructing HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 133 |
+
|
| 134 |
+
Stage 5: Session Generation. This stage converts the structured event flow and memory points from previous stages into complete, multi-turn dialogues that are context-rich, goal-driven, and adversarially challenging. The process has three steps: adversarial content injection, multi-turn dialogue generation, and memory self-verification. Adversarial content injection adds distractor memories 4 4 4 False but similar memories that the AI naturally uses while the user stays silent, mimicking realistic information contamination. Memory self-verification checks and refines each memory point for consistency with the generated dialogues. Overall, this stage simulates how memory is formed, maintained, and challenged in realistic conversations, producing data that test long-term memory performance and hallucination resistance. Examples appear in Appendix [E.3](https://arxiv.org/html/2511.03506v2#A5.SS3 "E.3 Examples of Memory Points, Dialogues, and QA Pairs in Stages 4–6 ‣ Appendix E Examples from the Process of Constructing HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 135 |
+
|
| 136 |
+
Stage 6: Question Generation. The final stage constructs a set of memory-related question–answer pairs based on the sessions and memory points generated previously. Six categories of memory evaluation questions are predefined, and the number and types of questions are programmatically allocated according to event type and complexity to ensure balanced coverage. For each career event, all its sub-stages are integrated into a single unit to increase reasoning depth and complexity. Each question–answer pair is annotated with a difficulty level and accompanied by traceable evidence, explicitly linking the answer to the supporting memory points. See Appendices [A.2](https://arxiv.org/html/2511.03506v2#A1.SS2 "A.2 Definition of Question Types ‣ Appendix A Supplementary Details of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") and [E.3](https://arxiv.org/html/2511.03506v2#A5.SS3 "E.3 Examples of Memory Points, Dialogues, and QA Pairs in Stages 4–6 ‣ Appendix E Examples from the Process of Constructing HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") for details.
|
| 137 |
+
|
| 138 |
+
Human Annotation. To verify the quality of HaluMem, we conducted human annotation on part of the sessions in HaluMem-Medium, covering both memory points and question–answer pairs. For each user, 35 sessions were randomly selected, totaling 700 sessions (over 50% of the dataset). Eight annotators with at least a bachelor’s degree rated each session on Correctness, Relevance, and Consistency. After 10 days of annotation, the results showed a correctness rate of 95.70%, an average relevance score of 9.58, and an average consistency score of 9.45. These results demonstrate the high quality and reliability of the HaluMem benchmark. More details are provided in Appendix [C](https://arxiv.org/html/2511.03506v2#A3 "Appendix C Annotation Guidelines and Instructions ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 139 |
+
|
| 140 |
+
Overall, We constructed two datasets: HaluMem-Medium and HaluMem-Long. HaluMem-Medium includes 30,073 rounds of dialogue from 20 users, with an average context length of about 160k tokens, 14,948 memory points, and 3,467 QA pairs. HaluMem-Long extends each user’s context to 1M tokens through inserted irrelevant dialogues 5 5 5 Mainly sourced from ELI5 [fan2019eli5], [GPT-OSS-120B-Distilled-Reasoning-math](https://huggingface.co/datasets/Jackrong/GPT-OSS-120B-Distilled-Reasoning-math), and factual QA pairs generated using GPT-4o., containing 53,516 rounds in total. Details are given in Appendices [A.3](https://arxiv.org/html/2511.03506v2#A1.SS3 "A.3 Dataset Statistics ‣ Appendix A Supplementary Details of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") and [A.4](https://arxiv.org/html/2511.03506v2#A1.SS4 "A.4 Construction Details of HaluMem‑Long ‣ Appendix A Supplementary Details of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 141 |
+
|
| 142 |
+
5 Evaluation Framework of HaluMem
|
| 143 |
+
---------------------------------
|
| 144 |
+
|
| 145 |
+
For each user, the session-level evaluation procedure of HaluMem is defined as follows: (1) Dialogue sessions D 1,D 2,…,D S D^{1},D^{2},\dots,D^{S} are sequentially fed, in chronological order, into the memory system S S. (2) If the current session D s D^{s} contains reference memory points or QA tasks 6 6 6 The evaluation of QA tasks follows the conventional end-to-end procedure. Therefore, in Figure [4](https://arxiv.org/html/2511.03506v2#S5.F4 "Figure 4 ‣ 5 Evaluation Framework of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents"), we only illustrate the simplified testing process for hallucination evaluation in memory extraction and memory updating., the corresponding evaluation process (extraction, updating, or question answering) is triggered immediately after S S completes processing that session, and the results are recorded. (3) After processing all sessions, the metrics of the three categories of tasks are aggregated to obtain the overall system performance.
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
|
| 149 |
+
Figure 4: Hallucination evaluation process
|
| 150 |
+
|
| 151 |
+
To support this evaluation workflow, the system is required to provide the following three types of API interfaces: (1) Add Dialogue API: inputs dialogues and automatically performs memory extraction; (2) Get Dialogue Memory API: retrieves the memory points extracted by the system from a specified session; (3) Retrieve Memory API: retrieves the user’s most relevant memory content based on a given query.
|
| 152 |
+
|
| 153 |
+
Based on the above workflow and interface design, HaluMem conducts operation-level evaluation of memory systems across three essential tasks: Memory Extraction, Memory Updating, and Memory Question Answering.
|
| 154 |
+
|
| 155 |
+
### 5.1 Memory Extraction
|
| 156 |
+
|
| 157 |
+
The memory extraction task evaluates whether the system can correctly identify and store key information from dialogues while avoiding fabricated or irrelevant memories. For each dialogue session D s D^{s} that contains reference memories, the benchmark provides a gold memory set G s ext={m i s}i=1 K s G^{\mathrm{ext}}_{s}=\{m_{i}^{s}\}_{i=1}^{K_{s}} that should be extracted. The system output after processing D s D^{s} is the extracted memory set M^s ext={m^j s}j=1 K^s\widehat{M}^{\mathrm{ext}}_{s}=\{\hat{m}_{j}^{s}\}_{j=1}^{\widehat{K}_{s}}, which is used for evaluation.
|
| 158 |
+
|
| 159 |
+
##### Memory Integrity (Anti-Amnesia)
|
| 160 |
+
|
| 161 |
+
This metric measures whether the system omits crucial information that should be extracted:
|
| 162 |
+
|
| 163 |
+
Memory Recall=N correct N should,Weighted Memory Recall=∑i=1 N should w i⋅s i∑i=1 N should w i,\text{Memory Recall}=\frac{N_{\mathrm{correct}}}{N_{\mathrm{should}}},\quad\text{Weighted Memory Recall}=\frac{\sum_{i=1}^{N_{\mathrm{should}}}w_{i}\cdot s_{i}}{\sum_{i=1}^{N_{\mathrm{should}}}w_{i}},(1)
|
| 164 |
+
|
| 165 |
+
where N should=|G s ext|N_{\mathrm{should}}=|G^{\mathrm{ext}}_{s}|, N correct N_{\mathrm{correct}} denotes the number of correctly extracted memories, w i w_{i} represents the importance weight of the i i-th memory, and s i∈{1,0.5,0}s_{i}\in\{1,0.5,0\} indicates the extraction score (completed extracted, partially extracted, or omitted).
|
| 166 |
+
|
| 167 |
+
##### Memory Accuracy (Anti-Hallucination)
|
| 168 |
+
|
| 169 |
+
This metric evaluates whether the extracted memories are factual and free from hallucination:
|
| 170 |
+
|
| 171 |
+
Memory Accuracy=∑j=1 N extract s j N extract,Target Memory Precision=∑j∈M T s j|M T|,\text{Memory Accuracy}=\frac{\sum_{j=1}^{N_{\mathrm{extract}}}s_{j}}{N_{\mathrm{extract}}},\quad\text{Target Memory Precision}=\frac{\sum_{j\in M_{T}}s_{j}}{|M_{T}|},(2)
|
| 172 |
+
|
| 173 |
+
where N extract=|M^s ext|N_{\mathrm{extract}}=|\widehat{M}^{\mathrm{ext}}_{s}|, and M T M_{T} denotes the set of target memories that match the reference ones.
|
| 174 |
+
|
| 175 |
+
##### False Memory Resistance (FMR)
|
| 176 |
+
|
| 177 |
+
This metric measures the system’s ability to resist hallucination when facing distracting content that the AI mentions but the user does not confirm:
|
| 178 |
+
|
| 179 |
+
FMR=N miss N D,\text{FMR}=\frac{N_{\mathrm{miss}}}{N_{D}},(3)
|
| 180 |
+
|
| 181 |
+
where N D N_{D} represents the total number of distractor memories and N miss N_{\mathrm{miss}} denotes the number of distractors successfully ignored by the system, where a higher value indicates stronger resistance.
|
| 182 |
+
|
| 183 |
+
### 5.2 Memory Updating
|
| 184 |
+
|
| 185 |
+
The memory updating task evaluates whether the system can correctly modify, merge, or replace existing memories during new dialogues so that consistency is maintained without introducing hallucinations. For each dialogue session D s D^{s} that contains annotated updates, the gold update set is defined as
|
| 186 |
+
|
| 187 |
+
G s upd={(m old→m new)},G^{\mathrm{upd}}_{s}=\{(m^{\mathrm{old}}\rightarrow m^{\mathrm{new}})\},
|
| 188 |
+
|
| 189 |
+
The system output is denoted as G^s upd\widehat{G}^{\mathrm{upd}}_{s}.
|
| 190 |
+
|
| 191 |
+
Typical memory update hallucinations include: (1) incorrect modification of old information, (2) omission of new information, and (3) version conflicts or self-contradictions. Therefore, the following metrics are defined to evaluate memory update hallucination:
|
| 192 |
+
|
| 193 |
+
Memory Update Accuracy=N correct-upd N target-upd,\displaystyle=\frac{N_{\mathrm{correct\text{-}upd}}}{N_{\mathrm{target\text{-}upd}}},(4)
|
| 194 |
+
Memory Update Hallucination Rate=N wrong-upd N target-upd,\displaystyle=\frac{N_{\mathrm{wrong\text{-}upd}}}{N_{\mathrm{target\text{-}upd}}},
|
| 195 |
+
Memory Update Omission Rate=N missed-upd N target-upd,\displaystyle=\frac{N_{\mathrm{missed\text{-}upd}}}{N_{\mathrm{target\text{-}upd}}},
|
| 196 |
+
|
| 197 |
+
where N target-upd=|G s upd|N_{\mathrm{target\text{-}upd}}=|G^{\mathrm{upd}}_{s}|, N correct-upd N_{\mathrm{correct\text{-}upd}} is the number of correctly updated items, N wrong-upd N_{\mathrm{wrong\text{-}upd}} is the number of incorrect or hallucinated updates, and N missed-upd N_{\mathrm{missed\text{-}upd}} is the number of updates that should have been made but were not.
|
| 198 |
+
|
| 199 |
+
### 5.3 Memory Question Answering
|
| 200 |
+
|
| 201 |
+
The memory question-answering task evaluates the end-to-end performance of the system, including extraction, updating, retrieval, and generation. For each question q j q_{j}, the system uses the Retrieve Memory API to obtain relevant memories R^(q j)\widehat{R}(q_{j}). The retrieved set R^(q j)\widehat{R}(q_{j}) and the question are then passed to the AI system A A to generate an answer y^j\hat{y}_{j}. The generated answer is compared with the reference answer y j∗y_{j}^{*}, and the following metrics are defined:
|
| 202 |
+
|
| 203 |
+
QA-Accuracy\displaystyle\mathrm{QA\text{-}Accuracy}=N correct N total,\displaystyle=\frac{N_{\mathrm{correct}}}{N_{\mathrm{total}}},(5)
|
| 204 |
+
QA-Hallucination\displaystyle\mathrm{QA\text{-}Hallucination}=N hallucinated N total,\displaystyle=\frac{N_{\mathrm{hallucinated}}}{N_{\mathrm{total}}},
|
| 205 |
+
QA-Omission\displaystyle\mathrm{QA\text{-}Omission}=N omitted N total,\displaystyle=\frac{N_{\mathrm{omitted}}}{N_{\mathrm{total}}},
|
| 206 |
+
|
| 207 |
+
where N total N_{\mathrm{total}} denotes the total number of test questions, N correct N_{\mathrm{correct}} represents the number of questions that are correctly answered, N hallucinated N_{\mathrm{hallucinated}} indicates the number of questions that contain fabricated or incorrect information, and N omitted N_{\mathrm{omitted}} refers to the number of questions that are left unanswered due to missing memories.
|
| 208 |
+
|
| 209 |
+
6 Experiments
|
| 210 |
+
-------------
|
| 211 |
+
|
| 212 |
+
### 6.1 Experimental Setup
|
| 213 |
+
|
| 214 |
+
We conducted a comprehensive evaluation of several state-of-the-art memory systems on HaluMem, including Mem0 (both standard and graph versions) [chhikara2025mem0], Memobase [memobase], Supermemory [supermemory], and Zep [rasmussen2025zep].7 7 7 MemOS will also be included in the evaluation as soon as possible. Each memory system was independently evaluated in two subsets, HaluMem-Medium and HaluMem-Long, with efforts made to ensure consistent parameter configurations across evaluations.
|
| 215 |
+
|
| 216 |
+
To automate the evaluation of three core tasks, memory extraction, memory updating, and memory question answering, we use GPT-4o for consistency determination and scoring. We designed various prompt templates to guide the automated evaluation by GPT-4o (See Appendix [D.2](https://arxiv.org/html/2511.03506v2#A4.SS2 "D.2 Prompts for Scoring in Memory Evaluation Tasks ‣ Appendix D Prompts ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") for details.). In the memory updating task, we retrieved the 10 most relevant memories from the memory system for each memory labeled as "update type" for verification. In the memory question answering task, we retrieved 20 most relevant memories for each question to assist in generating answers, using GPT-4o uniformly as the answer generation model. The prompt templates used for answer generation across different memory systems are provided in Appendix [D.1](https://arxiv.org/html/2511.03506v2#A4.SS1 "D.1 Prompts for Memory Question Answering Task ‣ Appendix D Prompts ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 217 |
+
|
| 218 |
+
Some memory systems required specific Configurations due to their unique interfaces and constraints; details are provided in Appendix [B](https://arxiv.org/html/2511.03506v2#A2 "Appendix B Special Configurations for Some Memory Systems ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 219 |
+
|
| 220 |
+
### 6.2 Experimental Results
|
| 221 |
+
|
| 222 |
+
Following the evaluation procedure outlined in Section [5](https://arxiv.org/html/2511.03506v2#S5 "5 Evaluation Framework of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents"), we conducted comprehensive evaluations of all memory systems across the three tasks in the HaluMem benchmark. The results were aggregated, and all metrics introduced in Section [5](https://arxiv.org/html/2511.03506v2#S5 "5 Evaluation Framework of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") were subsequently computed.
|
| 223 |
+
|
| 224 |
+
#### 6.2.1 Overall Evaluation on HaluMem
|
| 225 |
+
|
| 226 |
+
Table 3: Evaluation results of all memory systems on HaluMem. “R” denotes Recall, “Target P” denotes Target Memory Precision, “Acc.” denotes Accuracy, “FMR” denotes False Memory Resistance, “C” denotes Correct Rate (Accuracy), “H” denotes Hallucination Rate, and “O” denotes Omission Rate. The values in parentheses in the “Target P” and “Acc.” columns represent the number of extracted memories. Color scale reflects performance (red = worse, green = better); Best values in bold.
|
| 227 |
+
|
| 228 |
+
Dataset System Memory Integrity Memory Accuracy Memory Updating Question Answering
|
| 229 |
+
R↑\uparrow Weighted R↑\uparrow Target P↑\uparrow Acc.↑\uparrow FMR↑\uparrow C↑\uparrow H↓\downarrow O↓\downarrow C↑\uparrow H↓\downarrow O↓\downarrow
|
| 230 |
+
Medium Mem0 42.91%65.03%86.26%(10556)60.86%(16291)56.80%25.50%0.45%74.02%53.02%19.17%27.81%
|
| 231 |
+
Mem0-Graph 43.28%65.52%87.20%(10567)61.86%(16230)55.70%24.50%0.26%75.24%54.66%19.28%26.06%
|
| 232 |
+
Memobase 14.55%25.88%92.24%(5443)32.29%(17081)80.78%5.20%0.55%94.25%35.33%29.97%34.71%
|
| 233 |
+
Supermemory 41.53%64.76%90.32%(14134)60.83%(22551)51.77%16.37%1.15%82.47%54.07%22.24%23.69%
|
| 234 |
+
Zep-----47.28%0.42%52.31%55.47%21.92%22.62%
|
| 235 |
+
Long Mem0 3.23%11.89%88.01%(1134)46.01%(2433)87.65%1.45%0.03%98.51%28.11%17.29%54.60%
|
| 236 |
+
Mem0-Graph 2.24%10.76%87.32%(785)41.26%(1866)88.36%1.47%0.04%98.40%32.44%21.82%45.74%
|
| 237 |
+
Memobase 6.18%14.68%88.56%(3077)25.61%(11795)85.39%4.10%0.36%95.38%33.60%29.46%36.96%
|
| 238 |
+
Supermemory 53.02%70.73%85.82%(24483)29.71%(77134)36.86%17.01%0.58%82.42%53.77%22.21%24.02%
|
| 239 |
+
Zep-----37.35%0.48%62.14%50.19%22.51%27.30%
|
| 240 |
+
|
| 241 |
+
* •Note: since Zep does not provide a Get Dialogue Memory API, it is not possible to calculate Memory Integrity or Memory Accuracy. For details, see Appendix [B](https://arxiv.org/html/2511.03506v2#A2 "Appendix B Special Configurations for Some Memory Systems ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 242 |
+
|
| 243 |
+
Table [3](https://arxiv.org/html/2511.03506v2#S6.T3 "Table 3 ‣ 6.2.1 Overall Evaluation on HaluMem ‣ 6.2 Experimental Results ‣ 6 Experiments ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") presents the evaluation results of all memory systems on three tasks: memory extraction, memory updating, and memory question answering. The evaluation metrics for memory extraction include memory integrity and memory accuracy.
|
| 244 |
+
|
| 245 |
+
Overall, almost all memory systems perform worse on HaluMem-Long than on HaluMem-Medium, with Mem0, Mem0-Graph, and Memobase showing particularly notable declines. Notably, except for Supermemory and Zep, all other systems extract significantly fewer memories on HaluMem-Long than on HaluMem-Medium. This indicates that future memory systems need to improve their ability to process irrelevant information and distinguish between high- and low-value memories.
|
| 246 |
+
|
| 247 |
+
In the memory extraction task, all systems achieve recall (R) rates below 60% in terms of memory integrity, indicating that many reference memory points are not extracted. The better performance in weighted memory recall (weighted R) compared to recall suggests that the systems can prioritize important memory points. Regarding memory accuracy, all systems have accuracy (Acc.) below 62%, reflecting a high proportion of hallucinations, although performance on Target P is relatively good. Supermemory performs the worst on FMR because it tends to extract excessive information without effectively filtering distractions or unhelpful content. Other systems adopt more conservative strategies and thus perform better in FMR. In summary, future memory systems should strike a balance among coverage of important memories, extraction accuracy, and resistance to interference, aiming for both high quality and reliability in memory retrieval.
|
| 248 |
+
|
| 249 |
+
In the memory updating task, all systems achieve correct update rates below 50%, and except for Supermemory, their performance drops considerably on HaluMem-Long. Systems showing better performance in memory integrity also tend to exhibit higher update accuracy, but all suffer omission rates above 50%. This issue primarily stems from insufficient coverage in memory extraction: when the pre-update memories are not extracted, related updates cannot be properly processed. Moreover, the fact that all systems exhibit hallucination rates below 2% does not necessarily imply strong hallucination suppression, since very few samples actually enter the update stage. Overall, current systems face a clear bottleneck in memory updating: the extraction and updating stages lack stable linkage, resulting in low accuracy and high omission rates.
|
| 250 |
+
|
| 251 |
+
In the memory question-answering task, the best-performing systems are also those that perform well in memory integrity and memory updating, further highlighting the crucial role of memory extraction. For example, Mem0 and Mem0-Graph show clear performance declines on HaluMem-Long compared to HaluMem-Medium, which strongly correlates with their substantial reduction in extracted memory points. However, all systems achieve answer accuracies below 56%, with both hallucination rate and omission rate remaining high, and their overall performance further decreases on HaluMem-Long. This demonstrates that current memory systems’ QA performance depends heavily on the sufficiency and accuracy of upstream memory extraction, and remains prone to factual deviation and memory confusion under interference or extended context conditions.
|
| 252 |
+
|
| 253 |
+
#### 6.2.2 Performance on Different Memory Types
|
| 254 |
+
|
| 255 |
+
Table 4: Typewise accuracy on event, persona, and relationship memory.
|
| 256 |
+
|
| 257 |
+
Dataset System Event Persona Relationship
|
| 258 |
+
Medium Mem0 29.69%33.74%27.77%
|
| 259 |
+
Mem0-Graph 30.02%33.71%26.60%
|
| 260 |
+
Memobase 5.12%13.38%6.79%
|
| 261 |
+
Supermemory 28.66%32.11%20.67%
|
| 262 |
+
Zep 44.83%∗49.75%∗38.81%∗
|
| 263 |
+
Long Mem0 0.92%3.01%2.18%
|
| 264 |
+
Mem0-Graph 1.10%2.00%1.59%
|
| 265 |
+
Memobase 4.09%5.32%4.21%
|
| 266 |
+
Supermemory 38.48%40.85%32.61%
|
| 267 |
+
Zep 35.76%∗39.07%∗31.16%∗
|
| 268 |
+
|
| 269 |
+
* •* The memory entries of Zep include only those from the memory updating task. For details, see Appendix [B](https://arxiv.org/html/2511.03506v2#A2 "Appendix B Special Configurations for Some Memory Systems ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents").
|
| 270 |
+
|
| 271 |
+
Table [4](https://arxiv.org/html/2511.03506v2#S6.T4 "Table 4 ‣ 6.2.2 Performance on Different Memory Types ‣ 6.2 Experimental Results ‣ 6 Experiments ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") reports the extraction accuracy of each memory system for event, persona, and relationship memories, which include all memory points from both the memory extraction and updating tasks while excluding distractor memories. Zep achieve the best performance on HaluMem-Medium, whereas Supermemory performs best on HaluMem-Long. However, Mem0, Mem0-Graph, and Memobase show a marked decline in long-context scenarios, suggesting difficulty in consistently capturing valuable information in complex dialogues. Supermemory is the only one whose performance on HaluMem-Long surpasses that on HaluMem-Medium, probably because it extracts a larger number of memory points in the long-context condition. Across memory types, Persona memories yield slightly higher accuracy, indicating that static personal traits are easier to capture, whereas understanding event dynamics and relationship changes remains challenging. Overall, all systems still show low performance across the three memory categories, indicating significant limitations in current memory modeling.
|
| 272 |
+
|
| 273 |
+
#### 6.2.3 Performance on Different Question Types
|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
|
| 277 |
+
Figure 5: Performance of the Memory System Across Different Question Types
|
| 278 |
+
|
| 279 |
+
Figure [5](https://arxiv.org/html/2511.03506v2#S6.F5 "Figure 5 ‣ 6.2.3 Performance on Different Question Types ‣ 6.2 Experimental Results ‣ 6 Experiments ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") illustrates the performance of different memory systems across six categories of questions. Overall, the accuracy of all memory systems remains relatively low across most categories, indicating substantial room for improvement. The Mem0 series and Memobase show significantly poorer performance on HaluMem-Long compared to HaluMem-Medium, suggesting a notable degradation under ultra-long context conditions. In contrast, SuperMemory and Zep demonstrate relatively stable behavior and achieve consistently superior overall performance on both datasets. Furthermore, all memory systems perform comparatively better on memory boundary and memory conflict questions, indicating their capability to effectively recognize unknown or misleading information and respond correctly. However, their performance deteriorates substantially on multi-hop inference, dynamic update, and generalization & application questions, suggesting that current memory systems still struggle with complex reasoning and preference tracking.
|
| 280 |
+
|
| 281 |
+
#### 6.2.4 Efficiency Analysis of Memory Systems
|
| 282 |
+
|
| 283 |
+
Table 5: Time consumption of all memory systems during evaluation.
|
| 284 |
+
|
| 285 |
+
Dataset System Dialogue Addition Memory Retrieval Total
|
| 286 |
+
Time (min)Time (min)Time (min)
|
| 287 |
+
Medium Mem0 2768.14 41.66 2809.8
|
| 288 |
+
Mem0-Graph 2840.07 54.65 2894.72
|
| 289 |
+
Memobase 293.30 139.95 433.25
|
| 290 |
+
Supermemory 273.21 95.53 368.74
|
| 291 |
+
Zep-53.34-
|
| 292 |
+
Long Mem0 691.62 39.15 730.77
|
| 293 |
+
Mem0-Graph 870.32 62.42 932.74
|
| 294 |
+
Memobase 239.29 136.19 375.48
|
| 295 |
+
Supermemory 1672.53 137.02 1809.55
|
| 296 |
+
Zep-50.22-
|
| 297 |
+
|
| 298 |
+
Table [5](https://arxiv.org/html/2511.03506v2#S6.T5 "Table 5 ‣ 6.2.4 Efficiency Analysis of Memory Systems ‣ 6.2 Experimental Results ‣ 6 Experiments ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") shows the time consumption of all memory systems during the evaluation process for dialogue addition and memory retrieval, as well as their total runtime. Overall, dialogue addition requires substantially more time than memory retrieval, indicating that the write stage is the primary computational bottleneck. Enhancing the efficiency of memory extraction and updating is thus crucial for improving interactive performance. On HaluMem-Medium, Supermemory performs best in both dialogue addition and total runtime, while Mem0 shows the best retrieval efficiency. However, the dialogue addition time of Mem0 and Mem0-Graph exceeds 2700 minutes, revealing their low processing efficiency during dialogue ingestion and memory construction. On HaluMem-Long, the dialogue addition time for Mem0, Mem0-Graph, and Memobase decreases, mainly because the number of processed memory points is reduced rather than due to performance improvement. In contrast, Supermemory extracts a substantially larger number of memory points, resulting in the highest time cost.
|
| 299 |
+
|
| 300 |
+
7 Conclusion
|
| 301 |
+
------------
|
| 302 |
+
|
| 303 |
+
Most existing benchmarks for memory systems adopt a black box, end to end question answering setup, which makes it difficult to analyze and measure hallucinations introduced by internal memory operations. To address this gap, we present the Hallucination in Memory Benchmark (HaluMem), the first operation level hallucination evaluation benchmark for memory systems. HaluMem conducts a comprehensive assessment of memory hallucinations and overall performance through three tasks: memory extraction, memory updating, and memory question answering. For dataset construction, we design a user-centric, six-stage pipeline based on a progressive expansion strategy, and build two datasets, HaluMem-Medium and HaluMem-Long, whose construction quality is verified through human annotation. In the experimental study, we systematically evaluate multiple advanced memory systems on HaluMem, analyzing performance on the three tasks, extraction accuracy across different memory types, and efficiency. The results reveal persistent bottlenecks in coverage, accuracy, update capability, robustness to interference, and question answering reliability. Future work should improve extraction quality, update logic, semantic understanding, and system efficiency in order to achieve more stable and comprehensive long term memory.
|
| 304 |
+
|
| 305 |
+
\appendixpage
|
| 306 |
+
|
| 307 |
+
Appendix A Supplementary Details of HaluMem
|
| 308 |
+
-------------------------------------------
|
| 309 |
+
|
| 310 |
+
This appendix provides additional statistical information and key definitions of the HaluMem dataset to support a more detailed understanding of its data composition and task taxonomy. The HaluMem dataset consists of two parts: HaluMem-Medium and HaluMem-Long, representing medium- and long-context multi-turn human–AI interaction scenarios, respectively. Each subset contains multiple types of memory points and questions, enabling systematic evaluation of hallucination behaviors in memory systems.
|
| 311 |
+
|
| 312 |
+
Table 6: Statistical Overview of HaluMem Datasets
|
| 313 |
+
|
| 314 |
+
Metrics HaluMem-Medium HaluMem-Long
|
| 315 |
+
Interaction Statistics
|
| 316 |
+
Avg Context Length (tokens/user)159,910.95 1,007,264.65
|
| 317 |
+
Avg Session Num (per user)69.35 120.85
|
| 318 |
+
Avg Dialogue Turns per Session 21.68 22.14
|
| 319 |
+
Total Dialogue Turns 30,073 53,516
|
| 320 |
+
Memory Statistics
|
| 321 |
+
Avg Memory Num per Session 10.78 6.18
|
| 322 |
+
Distractor Memories 2,648 2,648
|
| 323 |
+
Update Memories 3,122 3,122
|
| 324 |
+
Persona Memories 9,116 9,116
|
| 325 |
+
Event Memories 4,550 4,550
|
| 326 |
+
Relationship Memories 1,282 1,282
|
| 327 |
+
Total Memories 14,948 14,948
|
| 328 |
+
Question Statistics
|
| 329 |
+
Avg Questions per User 173.35 173.35
|
| 330 |
+
Total Questions 3,467 3,467
|
| 331 |
+
Question Type Distribution:
|
| 332 |
+
Basic Fact Recall 746 746
|
| 333 |
+
Multi-hop Inference 198 198
|
| 334 |
+
Dynamic Update 180 180
|
| 335 |
+
Memory Boundary 828 828
|
| 336 |
+
Memory Conflict 769 769
|
| 337 |
+
Generalization & Application 746 746
|
| 338 |
+
|
| 339 |
+
### A.1 Definition of Memory Types
|
| 340 |
+
|
| 341 |
+
HaluMem categorizes memory content into three core types, reflecting different semantic levels and stability characteristics:
|
| 342 |
+
|
| 343 |
+
* •Persona Memory: Describes user’s identity, interests, habits, beliefs, and other stable characteristics.
|
| 344 |
+
* •Event Memory: Records specific events, experiences, or plans that occurred to the user.
|
| 345 |
+
* •Relationship Memory: Describes user’s relationships, interactions, or views of others.
|
| 346 |
+
|
| 347 |
+
### A.2 Definition of Question Types
|
| 348 |
+
|
| 349 |
+
To comprehensively cover different types of hallucination, HaluMem defines six categories of evaluation questions:
|
| 350 |
+
|
| 351 |
+
* •Basic Fact Recall: Directly ask about single objective facts or user preferences that explicitly appear in the dialogue, without requiring reasoning or information integration.
|
| 352 |
+
* •Multi-hop Inference: Requires synthesizing multiple information fragments from dialogues, and can only derive answers through logical reasoning or temporal reasoning.
|
| 353 |
+
* •Dynamic Update: Tests the ability to track information changes over time, requiring identification of the latest status or preference changes.
|
| 354 |
+
* •Memory Boundary: Tests the system’s ability to identify unknown information by asking about details not mentioned in the input information to examine whether the system will fabricate answers.
|
| 355 |
+
* •Generalization & Application: Based on known user preferences or characteristics, infer reasonable suggestions or judgments in new scenarios.
|
| 356 |
+
* •Memory Conflict: Tests the system’s ability to identify and correct erroneous premises. Questions deliberately contain incorrect information that directly contradicts known memory points, requiring the system to identify contradictions, correct errors, and answer based on correct information.
|
| 357 |
+
|
| 358 |
+
### A.3 Dataset Statistics
|
| 359 |
+
|
| 360 |
+
Table [6](https://arxiv.org/html/2511.03506v2#A1.T6 "Table 6 ‣ Appendix A Supplementary Details of HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") presents the main statistical features of HaluMem-Medium and HaluMem-Long, including context scale, session quantity, memory distribution, and question-type composition. All values are based on the finalized dataset version.
|
| 361 |
+
|
| 362 |
+
### A.4 Construction Details of HaluMem‑Long
|
| 363 |
+
|
| 364 |
+
HaluMem‑Long is built upon HaluMem‑Medium to test memory systems under ultra‑long context scenarios, focusing on robustness and hallucination suppression. Based on each user’s sessions in HaluMem‑Medium, additional irrelevant dialogues were inserted:
|
| 365 |
+
|
| 366 |
+
* •Within sessions: extra unrelated exchanges were added to existing conversations.
|
| 367 |
+
* •Between sessions: new sessions composed entirely of irrelevant dialogues were interleaved.
|
| 368 |
+
|
| 369 |
+
These irrelevant dialogues include:
|
| 370 |
+
|
| 371 |
+
* •Factual Q&A derived partly from the ELI5 dataset [fan2019eli5] and partly generated by us.
|
| 372 |
+
* •
|
| 373 |
+
|
| 374 |
+
The ELI5 dataset consists of factual question–answer pairs (e.g., the second QA example), whereas GPT-OSS-120B-Distilled-Reasoning-math contains question–answer pairs involving mathematics (e.g., the third QA example). To further enrich the diversity of irrelevant dialogues, we also sampled factual QA pairs across eight domains using GPT-4o (e.g., the first example), including Historical Figure, Scientific Concept, Country or Place, Famous Invention, Philosophical Theory, Artwork or Painting, Historical Event, and Mathematical Theorem. These QA pairs are used to simulate dialogues between users and the AI driven by instrumental needs in realistic scenarios. They have minimal impact on the user’s original conversations and do not affect the memory system’s personalized memories of the user. See Appendix [E.4](https://arxiv.org/html/2511.03506v2#A5.SS4 "E.4 Examples of irrelevant dialogues ‣ Appendix E Examples from the Process of Constructing HaluMem ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") for examples of irrelevant dialogues.
|
| 375 |
+
|
| 376 |
+
Appendix B Special Configurations for Some Memory Systems
|
| 377 |
+
---------------------------------------------------------
|
| 378 |
+
|
| 379 |
+
This appendix documents the special configurations applied to several memory systems evaluated on HaluMem. While the experimental setup strives to maintain consistent configurations across all evaluated systems, certain memory systems exhibit unique API constraints that necessitate specific adjustments or workarounds. Each subsection below outlines these system-specific configurations to ensure reproducibility.
|
| 380 |
+
|
| 381 |
+
### B.1 Memobase
|
| 382 |
+
|
| 383 |
+
Since Memobase does not provide a Get Dialogue Memory API, we adopted a localized deployment approach and directly accessed the corresponding dialogue memories from its underlying database. Additionally, the Retrieve Memory API of Memobase only supports controlling the maximum length of the returned memory text. Based on test results, we set the maximum length for memory recall in the memory updating task to 250 tokens and the recall length for the memory question answering task to 500 tokens.
|
| 384 |
+
|
| 385 |
+
### B.2 Zep
|
| 386 |
+
|
| 387 |
+
According to our current understanding, the official APIs provided by Zep do not support retrieving all memory points within a specific session, meaning they do not offer functionality equivalent to a Get Dialogue Memory API. Consequently, we were unable to evaluate Zep’s performance on the memory extraction task. We attempted to use the function ‘thread.get_user_context()‘ offered by Zep to obtain all memories under a given thread; however, this method only returns recent memories rather than the complete set, which does not meet the evaluation requirements. Moreover, since Zep’s memory processing workflow operates entirely asynchronously, we could not accurately measure the time consumption in the dialogue addition phase and instead recorded only the time cost associated with memory retrieval.
|
| 388 |
+
|
| 389 |
+
Appendix C Annotation Guidelines and Instructions
|
| 390 |
+
-------------------------------------------------
|
| 391 |
+
|
| 392 |
+
### C.1 Annotation Objective
|
| 393 |
+
|
| 394 |
+
Task Background: Given a user’s persona description and multi-turn human-AI dialogue content, memory points and question-answer (QA) pairs are generated using large language models. The generated items must be manually verified to ensure strict grounding in the dialogue content. Specifically, memory points should have explicit evidence in the dialogue, and QA pairs should be relevant to the dialogue, with answers directly inferable from it.
|
| 395 |
+
|
| 396 |
+
Core Objective: Assess whether the content in the Evaluation Item is consistent with the corresponding Dialogue Info.
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+
Figure 6: Annotation interface.
|
| 401 |
+
|
| 402 |
+
An illustrative screenshot of the annotation interface is provided below (Figure [6](https://arxiv.org/html/2511.03506v2#A3.F6 "Figure 6 ‣ C.1 Annotation Objective ‣ Appendix C Annotation Guidelines and Instructions ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents")).
|
| 403 |
+
|
| 404 |
+
### C.2 Information Fields
|
| 405 |
+
|
| 406 |
+
* •User Persona Info: Basic information about the user provided in the dialogue setting.
|
| 407 |
+
* •Dialogue Info: Multi-turn dialogue content between the user and the AI. Each turn contains one user utterance (user) and one assistant response (assistant).
|
| 408 |
+
* •Evaluation Item: The item to be annotated, which can be either a memory point or a QA pair, as indicated by the Evaluation Type. For memory points, the item is a textual description about the user. For QA pairs, it includes a question and an answer (e.g., Question: xxx; Answer: xxx).
|
| 409 |
+
* •Evaluation Type: Indicates the type of Evaluation Item: “memory” for memory points and “question” for QA pairs.
|
| 410 |
+
* •
|
| 411 |
+
|
| 412 |
+
Evaluation Item Type: Categorizes the memory point or question as follows:
|
| 413 |
+
|
| 414 |
+
* –
|
| 415 |
+
|
| 416 |
+
Memory Points:
|
| 417 |
+
|
| 418 |
+
* *Persona Memory: Describes user’s identity, interests, habits, beliefs, and other stable characteristics.
|
| 419 |
+
* *Event Memory: Records specific events, experiences, or plans that occurred to the user.
|
| 420 |
+
* *Relationship Memory: Describes user’s relationships, interactions, or perspectives on others.
|
| 421 |
+
|
| 422 |
+
* –
|
| 423 |
+
|
| 424 |
+
Questions:
|
| 425 |
+
|
| 426 |
+
* *Basic Fact Recall: Directly asks about single objective facts or user preferences explicitly mentioned in the dialogue, without requiring reasoning or information integration.
|
| 427 |
+
* *Multi-hop Inference: Requires synthesizing multiple pieces of dialogue information, deriving answers through logical or temporal reasoning.
|
| 428 |
+
* *Dynamic Update: Tests the ability to track information changes over time, requiring identification of the latest status or preference changes.
|
| 429 |
+
* *Memory Boundary: Tests the system’s ability to recognize unknown information by querying details not mentioned in the input, assessing whether the system will fabricate answers.
|
| 430 |
+
* *Generalization & Application: Infers reasonable suggestions or judgments in new scenarios based on known user preferences or characteristics.
|
| 431 |
+
* *Memory Conflict: Evaluates the system’s ability to identify and correct erroneous premises. Questions deliberately contain incorrect information contradicting known memory points, requiring the system to identify contradictions, correct errors, and answer based on correct information.
|
| 432 |
+
|
| 433 |
+
### C.3 Annotation Dimensions and Scoring
|
| 434 |
+
|
| 435 |
+
Each memory point and QA pair is evaluated along three dimensions: Correctness, Relevance, and Consistency.
|
| 436 |
+
|
| 437 |
+
* •Evaluation Result: A single-choice judgment of “correct” or “incorrect”. For memory points, this assesses whether the item is supported by the dialogue. For QA pairs, it assesses whether the question and answer can be clearly found in the dialogue.
|
| 438 |
+
* •
|
| 439 |
+
|
| 440 |
+
Scoring (0–10): Two separate scores are assigned:
|
| 441 |
+
|
| 442 |
+
* –Consistency: Measures whether the memory point or question (Evaluation Item) matches its declared type (Evaluation Item Type). 0–3 indicates poor consistency, 4–6 partial consistency, and 7–10 full consistency.
|
| 443 |
+
* –Relevance: Measures whether the memory point or question is related to the dialogue (Dialogue Info) or the user persona (User Persona Info). 0–3 indicates low relevance, 4–6 moderate relevance, and 7–10 high relevance.
|
| 444 |
+
|
| 445 |
+
Appendix D Prompts
|
| 446 |
+
------------------
|
| 447 |
+
|
| 448 |
+
This section presents some of the important prompt templates involved in the paper.
|
| 449 |
+
|
| 450 |
+
### D.1 Prompts for Memory Question Answering Task
|
| 451 |
+
|
| 452 |
+
Figures [7](https://arxiv.org/html/2511.03506v2#A4.F7 "Figure 7 ‣ D.1 Prompts for Memory Question Answering Task ‣ Appendix D Prompts ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents")∼\sim[10](https://arxiv.org/html/2511.03506v2#A4.F10 "Figure 10 ‣ D.1 Prompts for Memory Question Answering Task ‣ Appendix D Prompts ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") show the prompt templates used by all memory systems in memory question answering task to assemble questions and retrieve memory points, which are then fed into GPT-4o to generate responses. All of these memory templates are obtained from the official GitHub repositories of the respective memory systems.
|
| 453 |
+
|
| 454 |
+
Figure 7: Prompt for Mem0 and Mem0-Graph
|
| 455 |
+
|
| 456 |
+
Figure 8: Prompt for Memobase
|
| 457 |
+
|
| 458 |
+
Figure 9: Prompt for Supermemory
|
| 459 |
+
|
| 460 |
+
Figure 10: Prompt for Zep
|
| 461 |
+
|
| 462 |
+
### D.2 Prompts for Scoring in Memory Evaluation Tasks
|
| 463 |
+
|
| 464 |
+
Figures [11](https://arxiv.org/html/2511.03506v2#A4.F11 "Figure 11 ‣ D.2 Prompts for Scoring in Memory Evaluation Tasks ‣ Appendix D Prompts ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents")∼\sim[17](https://arxiv.org/html/2511.03506v2#A4.F17 "Figure 17 ‣ D.2 Prompts for Scoring in Memory Evaluation Tasks ‣ Appendix D Prompts ‣ HaluMem: Evaluating Hallucinations in Memory Systems of Agents") respectively illustrate the prompt templates used to guide GPT-4o in scoring for memory extraction, memory updating, and memory question answering tasks.
|
| 465 |
+
|
| 466 |
+
Figure 11: Prompt for Memory Integrity
|
| 467 |
+
|
| 468 |
+
Figure 12: Prompt for Memory Accuracy (1/3)
|
| 469 |
+
|
| 470 |
+
Figure 13: Prompt for Memory Accuracy (2/3)
|
| 471 |
+
|
| 472 |
+
Figure 14: Prompt for Memory Accuracy (3/3)
|
| 473 |
+
|
| 474 |
+
Figure 15: Prompt for Memory Updating
|
| 475 |
+
|
| 476 |
+
Figure 16: Prompt for Memory Question Answering (1/2)
|
| 477 |
+
|
| 478 |
+
Figure 17: Prompt for Memory Question Answering (2/2)
|
| 479 |
+
|
| 480 |
+
Appendix E Examples from the Process of Constructing HaluMem
|
| 481 |
+
------------------------------------------------------------
|
| 482 |
+
|
| 483 |
+
### E.1 User Profile Example in Stage 1
|
| 484 |
+
|
| 485 |
+
As shown in Listing LABEL:lst:core_info∼\sim LABEL:lst:preference_info, these JSON structures respectively illustrate examples of a user’s core profile information, dynamic state information, and preference information generated in stage 1.
|
| 486 |
+
|
| 487 |
+
Listing 1: Example of a User’s Core Profile Information.
|
| 488 |
+
|
| 489 |
+
1{
|
| 490 |
+
|
| 491 |
+
2"basic_info"
|
| 492 |
+
|
| 493 |
+
3"name"
|
| 494 |
+
|
| 495 |
+
4"gender"
|
| 496 |
+
|
| 497 |
+
5"birth_date"
|
| 498 |
+
|
| 499 |
+
6"location"
|
| 500 |
+
|
| 501 |
+
7},
|
| 502 |
+
|
| 503 |
+
8"age"
|
| 504 |
+
|
| 505 |
+
9"current_age"
|
| 506 |
+
|
| 507 |
+
10"latest_date"
|
| 508 |
+
|
| 509 |
+
11},
|
| 510 |
+
|
| 511 |
+
12"education"
|
| 512 |
+
|
| 513 |
+
13"highest_degree"
|
| 514 |
+
|
| 515 |
+
14"major"
|
| 516 |
+
|
| 517 |
+
15},
|
| 518 |
+
|
| 519 |
+
16"personality"
|
| 520 |
+
|
| 521 |
+
17"mbti"
|
| 522 |
+
|
| 523 |
+
18"tags"
|
| 524 |
+
|
| 525 |
+
19"Innovative Spirit",
|
| 526 |
+
|
| 527 |
+
20"Active Thinking",
|
| 528 |
+
|
| 529 |
+
21"Debate Skills",
|
| 530 |
+
|
| 531 |
+
22"Empathetic"
|
| 532 |
+
|
| 533 |
+
23]
|
| 534 |
+
|
| 535 |
+
24},
|
| 536 |
+
|
| 537 |
+
25"family_life"
|
| 538 |
+
|
| 539 |
+
26"parent_status"
|
| 540 |
+
|
| 541 |
+
27"partner_status"
|
| 542 |
+
|
| 543 |
+
28"child_status"
|
| 544 |
+
|
| 545 |
+
29"parent_members"
|
| 546 |
+
|
| 547 |
+
30{
|
| 548 |
+
|
| 549 |
+
31"member_type"
|
| 550 |
+
|
| 551 |
+
32"birth_date"
|
| 552 |
+
|
| 553 |
+
33"description"
|
| 554 |
+
|
| 555 |
+
34},
|
| 556 |
+
|
| 557 |
+
35{
|
| 558 |
+
|
| 559 |
+
36"member_type"
|
| 560 |
+
|
| 561 |
+
37"birth_date"
|
| 562 |
+
|
| 563 |
+
38"description"
|
| 564 |
+
|
| 565 |
+
39}
|
| 566 |
+
|
| 567 |
+
40],
|
| 568 |
+
|
| 569 |
+
41"partner"
|
| 570 |
+
|
| 571 |
+
42"child_members"
|
| 572 |
+
|
| 573 |
+
43"family_description"
|
| 574 |
+
|
| 575 |
+
44},
|
| 576 |
+
|
| 577 |
+
45"life_goal"
|
| 578 |
+
|
| 579 |
+
46"life_goal_type"
|
| 580 |
+
|
| 581 |
+
47"statement"
|
| 582 |
+
|
| 583 |
+
48"motivation"
|
| 584 |
+
|
| 585 |
+
49"target_metrics"
|
| 586 |
+
|
| 587 |
+
50}
|
| 588 |
+
|
| 589 |
+
51}
|
| 590 |
+
|
| 591 |
+
Listing 2: Example of a User’s Dynamic State Information.
|
| 592 |
+
|
| 593 |
+
1{
|
| 594 |
+
|
| 595 |
+
2"career_status"
|
| 596 |
+
|
| 597 |
+
3"employment_status"
|
| 598 |
+
|
| 599 |
+
4"industry"
|
| 600 |
+
|
| 601 |
+
5"company_name"
|
| 602 |
+
|
| 603 |
+
6"job_title"
|
| 604 |
+
|
| 605 |
+
7"monthly_income"
|
| 606 |
+
|
| 607 |
+
8"savings_amount"
|
| 608 |
+
|
| 609 |
+
9"career_description"
|
| 610 |
+
|
| 611 |
+
10},
|
| 612 |
+
|
| 613 |
+
11"health_status"
|
| 614 |
+
|
| 615 |
+
12"physical_health"
|
| 616 |
+
|
| 617 |
+
13"physical_chronic_conditions"
|
| 618 |
+
|
| 619 |
+
14"mental_health"
|
| 620 |
+
|
| 621 |
+
15"mental_chronic_conditions"
|
| 622 |
+
|
| 623 |
+
16"situation_reason"
|
| 624 |
+
|
| 625 |
+
17},
|
| 626 |
+
|
| 627 |
+
18"social_relationships"
|
| 628 |
+
|
| 629 |
+
19"ThomasSusan"
|
| 630 |
+
|
| 631 |
+
20"relationship_type"
|
| 632 |
+
|
| 633 |
+
21"description"
|
| 634 |
+
|
| 635 |
+
22},
|
| 636 |
+
|
| 637 |
+
23"MartinezDaniel"
|
| 638 |
+
|
| 639 |
+
24"relationship_type"
|
| 640 |
+
|
| 641 |
+
25"description"
|
| 642 |
+
|
| 643 |
+
26},
|
| 644 |
+
|
| 645 |
+
27"WilliamsJoshua"
|
| 646 |
+
|
| 647 |
+
28"relationship_type"
|
| 648 |
+
|
| 649 |
+
29"description"
|
| 650 |
+
|
| 651 |
+
30}
|
| 652 |
+
|
| 653 |
+
31}
|
| 654 |
+
|
| 655 |
+
32}
|
| 656 |
+
|
| 657 |
+
Listing 3: Example of a User’s Preference Information.
|
| 658 |
+
|
| 659 |
+
1{
|
| 660 |
+
|
| 661 |
+
2"Pet Preference"
|
| 662 |
+
|
| 663 |
+
3"memory_points"
|
| 664 |
+
|
| 665 |
+
4{
|
| 666 |
+
|
| 667 |
+
5"type"
|
| 668 |
+
|
| 669 |
+
6"type_description"
|
| 670 |
+
|
| 671 |
+
7"specific_item"
|
| 672 |
+
|
| 673 |
+
8"reason"
|
| 674 |
+
|
| 675 |
+
9},
|
| 676 |
+
|
| 677 |
+
10{
|
| 678 |
+
|
| 679 |
+
11"type"
|
| 680 |
+
|
| 681 |
+
12"type_description"
|
| 682 |
+
|
| 683 |
+
13"specific_item"
|
| 684 |
+
|
| 685 |
+
14"reason"
|
| 686 |
+
|
| 687 |
+
15},
|
| 688 |
+
|
| 689 |
+
16{
|
| 690 |
+
|
| 691 |
+
17"type"
|
| 692 |
+
|
| 693 |
+
18"type_description"
|
| 694 |
+
|
| 695 |
+
19"specific_item"
|
| 696 |
+
|
| 697 |
+
20"reason"
|
| 698 |
+
|
| 699 |
+
21},
|
| 700 |
+
|
| 701 |
+
22{
|
| 702 |
+
|
| 703 |
+
23"type"
|
| 704 |
+
|
| 705 |
+
24"type_description"
|
| 706 |
+
|
| 707 |
+
25"specific_item"
|
| 708 |
+
|
| 709 |
+
26"reason"
|
| 710 |
+
|
| 711 |
+
27}
|
| 712 |
+
|
| 713 |
+
28]
|
| 714 |
+
|
| 715 |
+
29},
|
| 716 |
+
|
| 717 |
+
30"Sports Preference"
|
| 718 |
+
|
| 719 |
+
31...
|
| 720 |
+
|
| 721 |
+
32},
|
| 722 |
+
|
| 723 |
+
33...
|
| 724 |
+
|
| 725 |
+
34}
|
| 726 |
+
|
| 727 |
+
### E.2 Event Structure Examples in Stage 3
|
| 728 |
+
|
| 729 |
+
As shown in Listing LABEL:lst:init_event–Listing LABEL:lst:daily_event, these JSON structures illustrate examples of the three types of events generated in Stage 3. Among them, the init event occurs at the very beginning and provides all the initialization information for a user. The career event, representing a user’s career development process, is relatively more complex. Listing LABEL:lst:career_event presents a sub-stage event ("Recognizing the Need for Change") that belongs to a larger career event ("Transition to New Role Amidst Health Challenges"). In this example, the "related_career_events" field specifies the identifiers of other sub-stage events that belong to the same overarching career event. The daily event is triggered whenever a user’s preference information changes, and thus each instance centers around a specific preference update. In the example shown in Listing LABEL:lst:daily_event, the "related_daily_routine" field lists the identifiers of other daily events that correspond to the same preference type.
|
| 730 |
+
|
| 731 |
+
Listing 4: Example of a Init Event.
|
| 732 |
+
|
| 733 |
+
1{
|
| 734 |
+
|
| 735 |
+
2"event_index"
|
| 736 |
+
|
| 737 |
+
3"event_type"
|
| 738 |
+
|
| 739 |
+
4"event_name"
|
| 740 |
+
|
| 741 |
+
5"event_time"
|
| 742 |
+
|
| 743 |
+
6"event_description"
|
| 744 |
+
|
| 745 |
+
7"initial_fixed"
|
| 746 |
+
|
| 747 |
+
8(The corresponding user’s core profile information will be placed here.)
|
| 748 |
+
|
| 749 |
+
9}
|
| 750 |
+
|
| 751 |
+
10}’
|
| 752 |
+
|
| 753 |
+
Listing 5: Example of a Career Event.
|
| 754 |
+
|
| 755 |
+
1{
|
| 756 |
+
|
| 757 |
+
2"event_index"
|
| 758 |
+
|
| 759 |
+
3"event_type"
|
| 760 |
+
|
| 761 |
+
4"event_name"
|
| 762 |
+
|
| 763 |
+
5"event_time"
|
| 764 |
+
|
| 765 |
+
6"main_conflict"
|
| 766 |
+
|
| 767 |
+
7"stage_result"
|
| 768 |
+
|
| 769 |
+
8"event_start_time"
|
| 770 |
+
|
| 771 |
+
9"event_end_time"
|
| 772 |
+
|
| 773 |
+
10"user_age"
|
| 774 |
+
|
| 775 |
+
11"dynamic_updates"
|
| 776 |
+
|
| 777 |
+
12{
|
| 778 |
+
|
| 779 |
+
13"type_to_update"
|
| 780 |
+
|
| 781 |
+
14"update_direction"
|
| 782 |
+
|
| 783 |
+
15"before_dynamic"
|
| 784 |
+
|
| 785 |
+
16"employment_status"
|
| 786 |
+
|
| 787 |
+
17"industry"
|
| 788 |
+
|
| 789 |
+
18"company_name"
|
| 790 |
+
|
| 791 |
+
19"job_title"
|
| 792 |
+
|
| 793 |
+
20"monthly_income"
|
| 794 |
+
|
| 795 |
+
21"savings_amount"
|
| 796 |
+
|
| 797 |
+
22"career_description"
|
| 798 |
+
|
| 799 |
+
23},
|
| 800 |
+
|
| 801 |
+
24"update_reason"
|
| 802 |
+
|
| 803 |
+
25"after_dynamic"
|
| 804 |
+
|
| 805 |
+
26"employment_status"
|
| 806 |
+
|
| 807 |
+
27"industry"
|
| 808 |
+
|
| 809 |
+
28"company_name"
|
| 810 |
+
|
| 811 |
+
29"job_title"
|
| 812 |
+
|
| 813 |
+
30"monthly_income"
|
| 814 |
+
|
| 815 |
+
31"savings_amount"
|
| 816 |
+
|
| 817 |
+
32"career_description"
|
| 818 |
+
|
| 819 |
+
33},
|
| 820 |
+
|
| 821 |
+
34"changed_keys"
|
| 822 |
+
|
| 823 |
+
35}
|
| 824 |
+
|
| 825 |
+
36],
|
| 826 |
+
|
| 827 |
+
37"stage_description"
|
| 828 |
+
|
| 829 |
+
38"event_description"
|
| 830 |
+
|
| 831 |
+
39"event_result"
|
| 832 |
+
|
| 833 |
+
40"related_career_events"
|
| 834 |
+
|
| 835 |
+
41}
|
| 836 |
+
|
| 837 |
+
Listing 6: Example of a Daily Event.
|
| 838 |
+
|
| 839 |
+
1{
|
| 840 |
+
|
| 841 |
+
2"event_index"
|
| 842 |
+
|
| 843 |
+
3"event_type"
|
| 844 |
+
|
| 845 |
+
4"event_name"
|
| 846 |
+
|
| 847 |
+
5"event_time"
|
| 848 |
+
|
| 849 |
+
6"preference_type"
|
| 850 |
+
|
| 851 |
+
7"step"
|
| 852 |
+
|
| 853 |
+
8"update_direction"
|
| 854 |
+
|
| 855 |
+
9"type_to_update"
|
| 856 |
+
|
| 857 |
+
10"main_conflict"
|
| 858 |
+
|
| 859 |
+
11"update_reason"
|
| 860 |
+
|
| 861 |
+
12"before_preference"
|
| 862 |
+
|
| 863 |
+
13"memory_points"
|
| 864 |
+
|
| 865 |
+
14{
|
| 866 |
+
|
| 867 |
+
15"type"
|
| 868 |
+
|
| 869 |
+
16"type_description"
|
| 870 |
+
|
| 871 |
+
17"specific_item"
|
| 872 |
+
|
| 873 |
+
18"reason"
|
| 874 |
+
|
| 875 |
+
19}
|
| 876 |
+
|
| 877 |
+
20]
|
| 878 |
+
|
| 879 |
+
21},
|
| 880 |
+
|
| 881 |
+
22"after_preference"
|
| 882 |
+
|
| 883 |
+
23"memory_points"
|
| 884 |
+
|
| 885 |
+
24{
|
| 886 |
+
|
| 887 |
+
25"type"
|
| 888 |
+
|
| 889 |
+
26"type_description"
|
| 890 |
+
|
| 891 |
+
27"specific_item"
|
| 892 |
+
|
| 893 |
+
28"reason"
|
| 894 |
+
|
| 895 |
+
29}
|
| 896 |
+
|
| 897 |
+
30]
|
| 898 |
+
|
| 899 |
+
31},
|
| 900 |
+
|
| 901 |
+
32"related_daily_routine"
|
| 902 |
+
|
| 903 |
+
33"changed_index"
|
| 904 |
+
|
| 905 |
+
34"event_description"
|
| 906 |
+
|
| 907 |
+
35}
|
| 908 |
+
|
| 909 |
+
### E.3 Examples of Memory Points, Dialogues, and QA Pairs in Stages 4–6
|
| 910 |
+
|
| 911 |
+
As shown in Listing LABEL:lst:init_event–Listing LABEL:lst:daily_event, these JSON structures respectively illustrate examples of the memory points generated in Stage 4, the human–AI dialogues generated in Stage 5, and the memory question–answer pairs generated in Stage 6. Each memory point contains fields such as "memory_content", "memory_type", "memory_source", "is_update" (indicating whether it is an updated memory point), "original_memories" (previous related memories, if updated), "timestamp", and "importance", which together enrich the representation of each memory point and provide support for subsequent evaluation. Each dialogue round consists of one utterance from the user and one response from the AI assistant, with both the utterance content and timestamps recorded. Each question includes the question text, a reference answer, the relevant memory points required to derive the answer, the question type, and its difficulty level.
|
| 912 |
+
|
| 913 |
+
Listing 7: Example of a Memory Point.
|
| 914 |
+
|
| 915 |
+
1{
|
| 916 |
+
|
| 917 |
+
2"index"
|
| 918 |
+
|
| 919 |
+
3"memory_content"
|
| 920 |
+
|
| 921 |
+
4"memory_type"
|
| 922 |
+
|
| 923 |
+
5"memory_source"
|
| 924 |
+
|
| 925 |
+
6"is_update"
|
| 926 |
+
|
| 927 |
+
7"original_memories"
|
| 928 |
+
|
| 929 |
+
8"Martin Mark is considering a career change due to health impacts from his current role."
|
| 930 |
+
|
| 931 |
+
9],
|
| 932 |
+
|
| 933 |
+
10"timestamp"
|
| 934 |
+
|
| 935 |
+
11"importance"
|
| 936 |
+
|
| 937 |
+
12}
|
| 938 |
+
|
| 939 |
+
Listing 8: Example of a Single Dialogue Turn.
|
| 940 |
+
|
| 941 |
+
1[
|
| 942 |
+
|
| 943 |
+
2{
|
| 944 |
+
|
| 945 |
+
3"role"
|
| 946 |
+
|
| 947 |
+
4"content"
|
| 948 |
+
|
| 949 |
+
5"timestamp"
|
| 950 |
+
|
| 951 |
+
6"dialogue_turn"
|
| 952 |
+
|
| 953 |
+
7},
|
| 954 |
+
|
| 955 |
+
8{
|
| 956 |
+
|
| 957 |
+
9"role"
|
| 958 |
+
|
| 959 |
+
10"content"
|
| 960 |
+
|
| 961 |
+
11"timestamp"
|
| 962 |
+
|
| 963 |
+
12"dialogue_turn"
|
| 964 |
+
|
| 965 |
+
13},
|
| 966 |
+
|
| 967 |
+
14...
|
| 968 |
+
|
| 969 |
+
15]
|
| 970 |
+
|
| 971 |
+
Listing 9: Example of a Question.
|
| 972 |
+
|
| 973 |
+
1{
|
| 974 |
+
|
| 975 |
+
2"question"
|
| 976 |
+
|
| 977 |
+
3"answer"
|
| 978 |
+
|
| 979 |
+
4"evidence"
|
| 980 |
+
|
| 981 |
+
5{
|
| 982 |
+
|
| 983 |
+
6"memory_content"
|
| 984 |
+
|
| 985 |
+
7"memory_type"
|
| 986 |
+
|
| 987 |
+
8}
|
| 988 |
+
|
| 989 |
+
9],
|
| 990 |
+
|
| 991 |
+
10"difficulty"
|
| 992 |
+
|
| 993 |
+
11"question_type"
|
| 994 |
+
|
| 995 |
+
12}
|
| 996 |
+
|
| 997 |
+
### E.4 Examples of irrelevant dialogues
|
| 998 |
+
|
| 999 |
+
As shown in Listing LABEL:lst:irrelevant_dialogue, this JSON structure presents several examples of irrelevant dialogues.
|
| 1000 |
+
|
| 1001 |
+
Listing 10: Examples of Irrelevant Dialogues.
|
| 1002 |
+
|
| 1003 |
+
1[
|
| 1004 |
+
|
| 1005 |
+
2{
|
| 1006 |
+
|
| 1007 |
+
3"role"
|
| 1008 |
+
|
| 1009 |
+
4"content"
|
| 1010 |
+
|
| 1011 |
+
5},
|
| 1012 |
+
|
| 1013 |
+
6{
|
| 1014 |
+
|
| 1015 |
+
7"role"
|
| 1016 |
+
|
| 1017 |
+
8"content"
|
| 1018 |
+
|
| 1019 |
+
9},
|
| 1020 |
+
|
| 1021 |
+
10{
|
| 1022 |
+
|
| 1023 |
+
11"role"
|
| 1024 |
+
|
| 1025 |
+
12"content"
|
| 1026 |
+
|
| 1027 |
+
13},
|
| 1028 |
+
|
| 1029 |
+
14{
|
| 1030 |
+
|
| 1031 |
+
15"role"
|
| 1032 |
+
|
| 1033 |
+
16"content"
|
| 1034 |
+
|
| 1035 |
+
17},
|
| 1036 |
+
|
| 1037 |
+
18{
|
| 1038 |
+
|
| 1039 |
+
19"role"
|
| 1040 |
+
|
| 1041 |
+
20"content"
|
| 1042 |
+
|
| 1043 |
+
21},
|
| 1044 |
+
|
| 1045 |
+
22{
|
| 1046 |
+
|
| 1047 |
+
23"role"
|
| 1048 |
+
|
| 1049 |
+
24"content"
|
| 1050 |
+
|
| 1051 |
+
25}
|
| 1052 |
+
|
| 1053 |
+
26]
|