Skip to content

History Management

xronai.history.history_manager.HistoryManager

Manages conversation history for workflows involving supervisors, agents, and tools.

This class works alongside the chat_history in Supervisor and Agent classes, providing persistent storage and advanced querying capabilities while maintaining the conversation structure and relationships.

Attributes:

Name Type Description
workflow_id str

Unique identifier for the workflow.

base_path Path

The root directory for storing all logs.

workflow_path Path

Path to the specific directory for this workflow's logs.

history_file Path

Path to the JSONL file storing the conversation history.

Source code in xronai/history/history_manager.py
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
class HistoryManager:
    """
    Manages conversation history for workflows involving supervisors, agents, and tools.

    This class works alongside the chat_history in Supervisor and Agent classes,
    providing persistent storage and advanced querying capabilities while maintaining
    the conversation structure and relationships.

    Attributes:
        workflow_id (str): Unique identifier for the workflow.
        base_path (Path): The root directory for storing all logs.
        workflow_path (Path): Path to the specific directory for this workflow's logs.
        history_file (Path): Path to the JSONL file storing the conversation history.
    """

    def __init__(self, workflow_id: str, base_path: Optional[str] = None):
        """
        Initialize the HistoryManager.

        Args:
            workflow_id (str): Unique identifier for the workflow.
                             Must be provided by a main supervisor.
            base_path (Optional[str]): The root directory for history logs. 
                                       Defaults to 'xronai_logs'.

        Raises:
            ValueError: If workflow_id is None or workflow directory doesn't exist.
        """
        if not workflow_id:
            raise ValueError("workflow_id must be provided")

        self.workflow_id = workflow_id
        self.base_path = Path(base_path) if base_path else Path("xronai_logs")
        self.workflow_path = self.base_path / self.workflow_id
        self.history_file = self.workflow_path / "history.jsonl"

        if not self.workflow_path.exists():
            raise ValueError(f"Workflow directory does not exist: {self.workflow_path}. "
                             "It should be created by the main supervisor.")

    def append_message(self,
                       message: Dict[str, Any],
                       sender_type: EntityType,
                       sender_name: str,
                       parent_id: Optional[str] = None,
                       tool_call_id: Optional[str] = None,
                       supervisor_chain: Optional[List[str]] = None) -> str:
        """
        Append a message to the conversation history.

        This method is called alongside chat_history updates to maintain
        persistent storage of the conversation.

        Args:
            message (Dict[str, Any]): The message to append (same format as chat_history)
            sender_type (EntityType): Type of the sender
            sender_name (str): Name of the sender
            parent_id (Optional[str]): ID of the parent message in conversation
            tool_call_id (Optional[str]): ID of related tool call if applicable
            supervisor_chain (Optional[List[str]]): List of supervisors in the delegation chain

        Returns:
            str: Generated message ID for reference in future messages

        Example:
            >>> msg_id = history_manager.append_message(
            ...     message={"role": "user", "content": "Hello"},
            ...     sender_type=EntityType.USER,
            ...     sender_name="user"
            ... )

            >>> msg_id = history_manager.append_message(
            ...     message={"role": "assistant", "content": "Process data"},
            ...     sender_type=EntityType.MAIN_SUPERVISOR,
            ...     sender_name="MainSupervisor",
            ...     supervisor_chain=["MainSupervisor", "AssistantSupervisor"]
            ... )
        """
        message_id = str(uuid.uuid4())

        # Prepare entry with metadata
        entry = {
            'message_id': message_id,
            'timestamp': datetime.utcnow().isoformat(),
            'workflow_id': self.workflow_id,
            'sender_type': sender_type,
            'sender_name': sender_name,
            'parent_id': parent_id,
            'tool_call_id': tool_call_id,
            'supervisor_chain': supervisor_chain or [],  # Empty list if None
            **message  # Include original message fields
        }

        # Append to history file
        with open(self.history_file, 'a') as f:
            f.write(json.dumps(entry) + '\n')

        return message_id

    def load_chat_history(self, entity_name: str) -> List[Dict[str, Any]]:
        """
        Load and reconstruct the LLM-compatible conversation history for a given entity (supervisor or agent).

        This function extracts only those messages from the full workflow history that were 
        truly exchanged with or delegated to this entity, ensuring that synthetic user prompts, 
        agent LLM responses, tool calls, and tool results are correctly ordered and threaded 
        as would be expected by any LLM for conversation continuation. Irrelevant messages 
        intended for other agents are excluded.

        Args:
            entity_name (str): The name of the entity for which to load chat history 
                (e.g. supervisor, assistant supervisor, or agent).

        Returns:
            List[Dict[str, Any]]: Ordered list of chat messages in the expected chat_history 
                format for initializing or restoring the entity's LLM context.

        Example:
            >>> agent.chat_history = history_manager.load_chat_history("AgentName")
        """
        if not self.history_file.exists():
            return []

        with open(self.history_file, "r") as f:
            all_msgs = [json.loads(line) for line in f]

        system = next((m for m in all_msgs if m["role"] == "system" and m["sender_name"] == entity_name), None)
        history = []
        if system:
            history.append(self._format_for_chat_history(system))

        delegated_user_msgs = []
        for m in all_msgs:
            if m["role"] == "user":
                if m.get("supervisor_chain") and m["supervisor_chain"] and m["supervisor_chain"][-1] == entity_name:
                    delegated_user_msgs.append(m)
                else:
                    if any(n["role"] == "assistant" and n["sender_name"] == entity_name and
                           n.get("parent_id") == m["message_id"] for n in all_msgs):
                        delegated_user_msgs.append(m)

        delegated_user_msgs.sort(key=lambda x: x["timestamp"])

        for user_msg in delegated_user_msgs:
            history.append(self._format_for_chat_history(user_msg))

            queue = collections.deque()

            children = [m for m in all_msgs if m.get("parent_id") == user_msg["message_id"]]
            children = [m for m in children if m["sender_name"] == entity_name or m["role"] == "tool"]
            children.sort(key=lambda x: x["timestamp"])

            for ch in children:
                queue.append(ch)
            while queue:
                msg = queue.popleft()
                formatted = self._format_for_chat_history(msg)
                if formatted not in history:
                    history.append(formatted)
                if msg["role"] == "assistant" and msg.get("tool_calls"):
                    for tool_call in msg["tool_calls"]:
                        tool_msgs = [
                            t for t in all_msgs if t["role"] == "tool" and t.get("tool_call_id") == tool_call["id"] and
                            t.get("parent_id") == msg["message_id"]
                        ]
                        tool_msgs.sort(key=lambda x: x["timestamp"])
                        for tmsg in tool_msgs:
                            tfmt = self._format_for_chat_history(tmsg)
                            if tfmt not in history:
                                history.append(tfmt)

                            wrapups = [
                                mm for mm in all_msgs
                                if mm.get("parent_id") == tmsg["message_id"] and mm["sender_name"] == entity_name
                            ]
                            wrapups.sort(key=lambda x: x["timestamp"])
                            for wmsg in wrapups:
                                queue.append(wmsg)

        return history

    def get_frontend_history(self) -> List[Dict[str, Any]]:
        """
        Get complete conversation history formatted for frontend display.

        Returns:
            List[Dict[str, Any]]: Complete conversation history with proper threading

        Example:
            >>> history = history_manager.get_frontend_history()
        """
        if not self.history_file.exists():
            return []

        messages = []
        with open(self.history_file, 'r') as f:
            messages = [json.loads(line) for line in f]

        # Add delegation chain information for display
        for msg in messages:
            if msg.get('supervisor_chain'):
                msg['delegation_path'] = ' → '.join(msg['supervisor_chain'])

            # Format display name based on sender type and chain
            if msg['sender_type'] in [EntityType.MAIN_SUPERVISOR, EntityType.ASSISTANT_SUPERVISOR]:
                msg['display_name'] = f"{msg['sender_type']}: {msg['sender_name']}"
                if msg.get('supervisor_chain'):
                    msg['display_name'] += f" ({msg['delegation_path']})"
            else:
                msg['display_name'] = ("User" if msg['sender_type'] == EntityType.USER else msg['sender_name'])

        return self._build_conversation_thread(messages)

    def _format_for_chat_history(self, msg: Dict[str, Any]) -> Dict[str, Any]:
        """
        Format a raw persisted message as an LLM-compatible chat turn.

        Converts a stored message (from history.jsonl) into the minimal 
        chat message dict expected by the LLM OpenAI-compatible API: 
        always includes 'role' and 'content', and optionally adds 
        'tool_calls' (for assistant tool call steps) or 'tool_call_id' and 
        'name' (for tool response steps).

        Args:
            msg (Dict[str, Any]): Raw message object loaded from the workflow history.

        Returns:
            Dict[str, Any]: A minimal chat message ready for LLM dialog replay,
                compatible with openai.ChatCompletion and similar APIs.

        Returns Example:
            # For role='assistant'
            {'role': 'assistant', 'content': '...' [, 'tool_calls': [...] ]}
            # For role='tool'
            {'role': 'tool', 'content': '42', 'tool_call_id': 'call_xyz', 'name': 'calculate'}
        """
        formatted = {
            "role": msg["role"],
            "content": msg.get("content", ""),
        }
        if "tool_calls" in msg and msg["tool_calls"]:
            formatted["tool_calls"] = msg["tool_calls"]
        if msg["role"] == "tool":
            formatted["tool_call_id"] = msg.get("tool_call_id")
            formatted["name"] = msg["sender_name"]  # who returned tool output
        return formatted

    def has_system_message(self, entity_name: str) -> bool:
        """
        Check if system message exists for an entity in the current workflow.

        Args:
            entity_name (str): Name of the entity to check

        Returns:
            bool: True if system message exists, False otherwise
        """
        if not self.history_file.exists():
            return False

        with open(self.history_file, 'r') as f:
            for line in f:
                msg = json.loads(line)
                if (msg['role'] == 'system' and msg['sender_name'] == entity_name and
                        msg['workflow_id'] == self.workflow_id):
                    return True
        return False

    def _sort_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        Sort messages based on their relationships and timestamps.

        Args:
            messages (List[Dict[str, Any]]): Messages to sort

        Returns:
            List[Dict[str, Any]]: Sorted messages
        """
        return sorted(messages, key=lambda x: x.get('timestamp', ''))

    def _build_conversation_thread(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        Build a threaded conversation structure.

        Args:
            messages (List[Dict[str, Any]]): Raw messages from history

        Returns:
            List[Dict[str, Any]]: Threaded conversation structure
        """
        messages = self._sort_messages(messages)

        threaded = []
        message_map = {}

        for msg in messages:
            msg_copy = msg.copy()
            msg_copy['responses'] = []
            message_map[msg['message_id']] = msg_copy

            if msg['parent_id'] and msg['parent_id'] in message_map:
                message_map[msg['parent_id']]['responses'].append(msg_copy)
            else:
                threaded.append(msg_copy)

        return threaded

    def clear_history(self) -> None:
        """Clear the entire conversation history for the current workflow."""
        if self.history_file.exists():
            self.history_file.unlink()
            self.history_file.touch()

    def get_messages_by_entity(self, entity_name: str) -> List[Dict[str, Any]]:
        """
        Get all messages related to a specific entity.

        Args:
            entity_name (str): Name of the entity

        Returns:
            List[Dict[str, Any]]: All messages related to the entity
        """
        messages = []
        with open(self.history_file, 'r') as f:
            for line in f:
                msg = json.loads(line)
                if msg['sender_name'] == entity_name:
                    messages.append(msg)
        return self._sort_messages(messages)

__init__(workflow_id, base_path=None)

Initialize the HistoryManager.

Parameters:

Name Type Description Default
workflow_id str

Unique identifier for the workflow. Must be provided by a main supervisor.

required
base_path Optional[str]

The root directory for history logs. Defaults to 'xronai_logs'.

None

Raises:

Type Description
ValueError

If workflow_id is None or workflow directory doesn't exist.

Source code in xronai/history/history_manager.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
def __init__(self, workflow_id: str, base_path: Optional[str] = None):
    """
    Initialize the HistoryManager.

    Args:
        workflow_id (str): Unique identifier for the workflow.
                         Must be provided by a main supervisor.
        base_path (Optional[str]): The root directory for history logs. 
                                   Defaults to 'xronai_logs'.

    Raises:
        ValueError: If workflow_id is None or workflow directory doesn't exist.
    """
    if not workflow_id:
        raise ValueError("workflow_id must be provided")

    self.workflow_id = workflow_id
    self.base_path = Path(base_path) if base_path else Path("xronai_logs")
    self.workflow_path = self.base_path / self.workflow_id
    self.history_file = self.workflow_path / "history.jsonl"

    if not self.workflow_path.exists():
        raise ValueError(f"Workflow directory does not exist: {self.workflow_path}. "
                         "It should be created by the main supervisor.")

append_message(message, sender_type, sender_name, parent_id=None, tool_call_id=None, supervisor_chain=None)

Append a message to the conversation history.

This method is called alongside chat_history updates to maintain persistent storage of the conversation.

Parameters:

Name Type Description Default
message Dict[str, Any]

The message to append (same format as chat_history)

required
sender_type EntityType

Type of the sender

required
sender_name str

Name of the sender

required
parent_id Optional[str]

ID of the parent message in conversation

None
tool_call_id Optional[str]

ID of related tool call if applicable

None
supervisor_chain Optional[List[str]]

List of supervisors in the delegation chain

None

Returns:

Name Type Description
str str

Generated message ID for reference in future messages

Example

msg_id = history_manager.append_message( ... message={"role": "user", "content": "Hello"}, ... sender_type=EntityType.USER, ... sender_name="user" ... )

msg_id = history_manager.append_message( ... message={"role": "assistant", "content": "Process data"}, ... sender_type=EntityType.MAIN_SUPERVISOR, ... sender_name="MainSupervisor", ... supervisor_chain=["MainSupervisor", "AssistantSupervisor"] ... )

Source code in xronai/history/history_manager.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def append_message(self,
                   message: Dict[str, Any],
                   sender_type: EntityType,
                   sender_name: str,
                   parent_id: Optional[str] = None,
                   tool_call_id: Optional[str] = None,
                   supervisor_chain: Optional[List[str]] = None) -> str:
    """
    Append a message to the conversation history.

    This method is called alongside chat_history updates to maintain
    persistent storage of the conversation.

    Args:
        message (Dict[str, Any]): The message to append (same format as chat_history)
        sender_type (EntityType): Type of the sender
        sender_name (str): Name of the sender
        parent_id (Optional[str]): ID of the parent message in conversation
        tool_call_id (Optional[str]): ID of related tool call if applicable
        supervisor_chain (Optional[List[str]]): List of supervisors in the delegation chain

    Returns:
        str: Generated message ID for reference in future messages

    Example:
        >>> msg_id = history_manager.append_message(
        ...     message={"role": "user", "content": "Hello"},
        ...     sender_type=EntityType.USER,
        ...     sender_name="user"
        ... )

        >>> msg_id = history_manager.append_message(
        ...     message={"role": "assistant", "content": "Process data"},
        ...     sender_type=EntityType.MAIN_SUPERVISOR,
        ...     sender_name="MainSupervisor",
        ...     supervisor_chain=["MainSupervisor", "AssistantSupervisor"]
        ... )
    """
    message_id = str(uuid.uuid4())

    # Prepare entry with metadata
    entry = {
        'message_id': message_id,
        'timestamp': datetime.utcnow().isoformat(),
        'workflow_id': self.workflow_id,
        'sender_type': sender_type,
        'sender_name': sender_name,
        'parent_id': parent_id,
        'tool_call_id': tool_call_id,
        'supervisor_chain': supervisor_chain or [],  # Empty list if None
        **message  # Include original message fields
    }

    # Append to history file
    with open(self.history_file, 'a') as f:
        f.write(json.dumps(entry) + '\n')

    return message_id

clear_history()

Clear the entire conversation history for the current workflow.

Source code in xronai/history/history_manager.py
346
347
348
349
350
def clear_history(self) -> None:
    """Clear the entire conversation history for the current workflow."""
    if self.history_file.exists():
        self.history_file.unlink()
        self.history_file.touch()

get_frontend_history()

Get complete conversation history formatted for frontend display.

Returns:

Type Description
List[Dict[str, Any]]

List[Dict[str, Any]]: Complete conversation history with proper threading

Example

history = history_manager.get_frontend_history()

Source code in xronai/history/history_manager.py
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
def get_frontend_history(self) -> List[Dict[str, Any]]:
    """
    Get complete conversation history formatted for frontend display.

    Returns:
        List[Dict[str, Any]]: Complete conversation history with proper threading

    Example:
        >>> history = history_manager.get_frontend_history()
    """
    if not self.history_file.exists():
        return []

    messages = []
    with open(self.history_file, 'r') as f:
        messages = [json.loads(line) for line in f]

    # Add delegation chain information for display
    for msg in messages:
        if msg.get('supervisor_chain'):
            msg['delegation_path'] = ' → '.join(msg['supervisor_chain'])

        # Format display name based on sender type and chain
        if msg['sender_type'] in [EntityType.MAIN_SUPERVISOR, EntityType.ASSISTANT_SUPERVISOR]:
            msg['display_name'] = f"{msg['sender_type']}: {msg['sender_name']}"
            if msg.get('supervisor_chain'):
                msg['display_name'] += f" ({msg['delegation_path']})"
        else:
            msg['display_name'] = ("User" if msg['sender_type'] == EntityType.USER else msg['sender_name'])

    return self._build_conversation_thread(messages)

get_messages_by_entity(entity_name)

Get all messages related to a specific entity.

Parameters:

Name Type Description Default
entity_name str

Name of the entity

required

Returns:

Type Description
List[Dict[str, Any]]

List[Dict[str, Any]]: All messages related to the entity

Source code in xronai/history/history_manager.py
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
def get_messages_by_entity(self, entity_name: str) -> List[Dict[str, Any]]:
    """
    Get all messages related to a specific entity.

    Args:
        entity_name (str): Name of the entity

    Returns:
        List[Dict[str, Any]]: All messages related to the entity
    """
    messages = []
    with open(self.history_file, 'r') as f:
        for line in f:
            msg = json.loads(line)
            if msg['sender_name'] == entity_name:
                messages.append(msg)
    return self._sort_messages(messages)

has_system_message(entity_name)

Check if system message exists for an entity in the current workflow.

Parameters:

Name Type Description Default
entity_name str

Name of the entity to check

required

Returns:

Name Type Description
bool bool

True if system message exists, False otherwise

Source code in xronai/history/history_manager.py
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
def has_system_message(self, entity_name: str) -> bool:
    """
    Check if system message exists for an entity in the current workflow.

    Args:
        entity_name (str): Name of the entity to check

    Returns:
        bool: True if system message exists, False otherwise
    """
    if not self.history_file.exists():
        return False

    with open(self.history_file, 'r') as f:
        for line in f:
            msg = json.loads(line)
            if (msg['role'] == 'system' and msg['sender_name'] == entity_name and
                    msg['workflow_id'] == self.workflow_id):
                return True
    return False

load_chat_history(entity_name)

Load and reconstruct the LLM-compatible conversation history for a given entity (supervisor or agent).

This function extracts only those messages from the full workflow history that were truly exchanged with or delegated to this entity, ensuring that synthetic user prompts, agent LLM responses, tool calls, and tool results are correctly ordered and threaded as would be expected by any LLM for conversation continuation. Irrelevant messages intended for other agents are excluded.

Parameters:

Name Type Description Default
entity_name str

The name of the entity for which to load chat history (e.g. supervisor, assistant supervisor, or agent).

required

Returns:

Type Description
List[Dict[str, Any]]

List[Dict[str, Any]]: Ordered list of chat messages in the expected chat_history format for initializing or restoring the entity's LLM context.

Example

agent.chat_history = history_manager.load_chat_history("AgentName")

Source code in xronai/history/history_manager.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
def load_chat_history(self, entity_name: str) -> List[Dict[str, Any]]:
    """
    Load and reconstruct the LLM-compatible conversation history for a given entity (supervisor or agent).

    This function extracts only those messages from the full workflow history that were 
    truly exchanged with or delegated to this entity, ensuring that synthetic user prompts, 
    agent LLM responses, tool calls, and tool results are correctly ordered and threaded 
    as would be expected by any LLM for conversation continuation. Irrelevant messages 
    intended for other agents are excluded.

    Args:
        entity_name (str): The name of the entity for which to load chat history 
            (e.g. supervisor, assistant supervisor, or agent).

    Returns:
        List[Dict[str, Any]]: Ordered list of chat messages in the expected chat_history 
            format for initializing or restoring the entity's LLM context.

    Example:
        >>> agent.chat_history = history_manager.load_chat_history("AgentName")
    """
    if not self.history_file.exists():
        return []

    with open(self.history_file, "r") as f:
        all_msgs = [json.loads(line) for line in f]

    system = next((m for m in all_msgs if m["role"] == "system" and m["sender_name"] == entity_name), None)
    history = []
    if system:
        history.append(self._format_for_chat_history(system))

    delegated_user_msgs = []
    for m in all_msgs:
        if m["role"] == "user":
            if m.get("supervisor_chain") and m["supervisor_chain"] and m["supervisor_chain"][-1] == entity_name:
                delegated_user_msgs.append(m)
            else:
                if any(n["role"] == "assistant" and n["sender_name"] == entity_name and
                       n.get("parent_id") == m["message_id"] for n in all_msgs):
                    delegated_user_msgs.append(m)

    delegated_user_msgs.sort(key=lambda x: x["timestamp"])

    for user_msg in delegated_user_msgs:
        history.append(self._format_for_chat_history(user_msg))

        queue = collections.deque()

        children = [m for m in all_msgs if m.get("parent_id") == user_msg["message_id"]]
        children = [m for m in children if m["sender_name"] == entity_name or m["role"] == "tool"]
        children.sort(key=lambda x: x["timestamp"])

        for ch in children:
            queue.append(ch)
        while queue:
            msg = queue.popleft()
            formatted = self._format_for_chat_history(msg)
            if formatted not in history:
                history.append(formatted)
            if msg["role"] == "assistant" and msg.get("tool_calls"):
                for tool_call in msg["tool_calls"]:
                    tool_msgs = [
                        t for t in all_msgs if t["role"] == "tool" and t.get("tool_call_id") == tool_call["id"] and
                        t.get("parent_id") == msg["message_id"]
                    ]
                    tool_msgs.sort(key=lambda x: x["timestamp"])
                    for tmsg in tool_msgs:
                        tfmt = self._format_for_chat_history(tmsg)
                        if tfmt not in history:
                            history.append(tfmt)

                        wrapups = [
                            mm for mm in all_msgs
                            if mm.get("parent_id") == tmsg["message_id"] and mm["sender_name"] == entity_name
                        ]
                        wrapups.sort(key=lambda x: x["timestamp"])
                        for wmsg in wrapups:
                            queue.append(wmsg)

    return history