mhamzaanjum380 commited on
Commit
aee413d
·
verified ·
1 Parent(s): d6187cd

Upload 2 files

Browse files
Files changed (2) hide show
  1. explore_metadata.ipynb +601 -0
  2. metadata.jsonl +0 -0
explore_metadata.ipynb ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "source": [
20
+ "# Load metadata.jsonl\n",
21
+ "import json\n",
22
+ "# Load the metadata.jsonl file\n",
23
+ "with open('metadata.jsonl', 'r') as jsonl_file:\n",
24
+ " json_list = list(jsonl_file)"
25
+ ],
26
+ "metadata": {
27
+ "id": "jErfXbqHx1T3"
28
+ },
29
+ "execution_count": null,
30
+ "outputs": []
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "source": [
35
+ "type(json_list)"
36
+ ],
37
+ "metadata": {
38
+ "id": "RCcbpQD3x1Pp"
39
+ },
40
+ "execution_count": null,
41
+ "outputs": []
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "source": [
46
+ "json_QA = []\n",
47
+ "for json_str in json_list:\n",
48
+ " json_data = json.loads(json_str)\n",
49
+ " json_QA.append(json_data)"
50
+ ],
51
+ "metadata": {
52
+ "id": "F-6MzF9Zx1LR"
53
+ },
54
+ "execution_count": null,
55
+ "outputs": []
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "source": [
60
+ "json_QA[0]"
61
+ ],
62
+ "metadata": {
63
+ "id": "guJYoExXx1Fv"
64
+ },
65
+ "execution_count": null,
66
+ "outputs": []
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "source": [
71
+ "import random\n",
72
+ "\n",
73
+ "random_samples = random.sample(json_QA, 1)\n",
74
+ "for sample in random_samples:\n",
75
+ " print(\"=\" * 75)\n",
76
+ " print(f\"Task ID: {sample['task_id']}\")\n",
77
+ " print(f\"Question: {sample['Question']}\")\n",
78
+ " print(f\"Level: {sample['Level']}\")\n",
79
+ " print(f\"Final Answer: {sample['Final answer']}\")\n",
80
+ " print(f\"Annotator Metadata: \")\n",
81
+ " print(f\" ├── Steps: \")\n",
82
+ " for step in sample['Annotator Metadata']['Steps'].split('\\n'):\n",
83
+ " print(f\" │ ├── {step}\")\n",
84
+ " print(f\" ├── Number of steps: {sample['Annotator Metadata']['Number of steps']}\")\n",
85
+ " print(f\" ├── How long did this take?: {sample['Annotator Metadata']['How long did this take?']}\")\n",
86
+ " print(f\" ├── Tools:\")\n",
87
+ " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n",
88
+ " print(f\" │ ├── {tool}\")\n",
89
+ " print(f\" └── Number of tools: {sample['Annotator Metadata']['Number of tools']}\")\n",
90
+ "print(\"=\" * 75)"
91
+ ],
92
+ "metadata": {
93
+ "id": "9lHV1amUx1A4"
94
+ },
95
+ "execution_count": null,
96
+ "outputs": []
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "source": [
101
+ "import os\n",
102
+ "from dotenv import load_dotenv\n",
103
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
104
+ "from langchain_community.vectorstores import FAISS"
105
+ ],
106
+ "metadata": {
107
+ "id": "A5EaWko_x086"
108
+ },
109
+ "execution_count": null,
110
+ "outputs": []
111
+ },
112
+ {
113
+ "cell_type": "code",
114
+ "source": [
115
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\")"
116
+ ],
117
+ "metadata": {
118
+ "id": "pNY9Q1egx04l"
119
+ },
120
+ "execution_count": null,
121
+ "outputs": []
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "source": [
126
+ "from langchain.schema import Document\n",
127
+ "\n",
128
+ "docs = []\n",
129
+ "for sample in json_QA:\n",
130
+ " content = f\"Question : {sample['Question']}\\n\\nFinal answer : {sample['Final answer']}\"\n",
131
+ " doc = Document(\n",
132
+ " page_content=content,\n",
133
+ " metadata={\n",
134
+ " \"source\": sample['task_id']\n",
135
+ " }\n",
136
+ " )\n",
137
+ " docs.append(doc)"
138
+ ],
139
+ "metadata": {
140
+ "id": "iZLSkNl_x00a"
141
+ },
142
+ "execution_count": null,
143
+ "outputs": []
144
+ },
145
+ {
146
+ "cell_type": "code",
147
+ "source": [
148
+ "docs[2]"
149
+ ],
150
+ "metadata": {
151
+ "id": "gOWff8jhB9RT"
152
+ },
153
+ "execution_count": null,
154
+ "outputs": []
155
+ },
156
+ {
157
+ "cell_type": "code",
158
+ "source": [
159
+ "db = FAISS.from_documents(documents=docs, embedding=embeddings)"
160
+ ],
161
+ "metadata": {
162
+ "id": "7GUXBcBQx0qk"
163
+ },
164
+ "execution_count": null,
165
+ "outputs": []
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "source": [
170
+ "db.save_local(\"qa_index\")"
171
+ ],
172
+ "metadata": {
173
+ "id": "o8wwBjRw5mXL"
174
+ },
175
+ "execution_count": null,
176
+ "outputs": []
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "source": [
181
+ "folder_path = \"qa_index\"\n",
182
+ "# It is loaded in new_db\n",
183
+ "new_db = FAISS.load_local(folder_path, embeddings=embeddings, allow_dangerous_deserialization=True)"
184
+ ],
185
+ "metadata": {
186
+ "id": "IzUkiQIjx0nF"
187
+ },
188
+ "execution_count": null,
189
+ "outputs": []
190
+ },
191
+ {
192
+ "cell_type": "code",
193
+ "source": [
194
+ "retriever = new_db.as_retriever()"
195
+ ],
196
+ "metadata": {
197
+ "id": "PqHNQ5DX96OF"
198
+ },
199
+ "execution_count": null,
200
+ "outputs": []
201
+ },
202
+ {
203
+ "cell_type": "code",
204
+ "source": [
205
+ "query = \"On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?\"\n",
206
+ "docs = retriever.invoke(query)\n",
207
+ "docs[0]"
208
+ ],
209
+ "metadata": {
210
+ "id": "hTpWn1hXx0j3"
211
+ },
212
+ "execution_count": null,
213
+ "outputs": []
214
+ },
215
+ {
216
+ "cell_type": "code",
217
+ "source": [
218
+ "# list of the tools used in all the samples\n",
219
+ "from collections import Counter, OrderedDict\n",
220
+ "\n",
221
+ "tools = []\n",
222
+ "for sample in json_QA:\n",
223
+ " for tool in sample['Annotator Metadata']['Tools'].split('\\n'):\n",
224
+ " tool = tool[2:].strip().lower()\n",
225
+ " if tool.startswith(\"(\"):\n",
226
+ " tool = tool[11:].strip()\n",
227
+ " tools.append(tool)\n",
228
+ "tools_counter = OrderedDict(Counter(tools))\n",
229
+ "print(\"List of tools used in all samples:\")\n",
230
+ "print(\"Total number of tools used:\", len(tools_counter))\n",
231
+ "for tool, count in tools_counter.items():\n",
232
+ " print(f\" ├── {tool}: {count}\")"
233
+ ],
234
+ "metadata": {
235
+ "id": "PXjVbAEQx0gU"
236
+ },
237
+ "execution_count": null,
238
+ "outputs": []
239
+ },
240
+ {
241
+ "cell_type": "code",
242
+ "source": [
243
+ "system_prompt = \"\"\"\n",
244
+ "You are a helpful assistant tasked with answering questions using a set of tools.\n",
245
+ "If the tool is not available, you can try to find the information online. You can also use your own knowledge to answer the question.\n",
246
+ "You need to provide a step-by-step explanation of how you arrived at the answer.\n",
247
+ "==========================\n",
248
+ "Here is a few examples showing you how to answer the question step by step.\n",
249
+ "\"\"\"\n",
250
+ "\n",
251
+ "for i, samples in enumerate(random_samples):\n",
252
+ " system_prompt += f\"\\nQuestion {i+1}: {samples['Question']}\\nSteps:\\n{samples['Annotator Metadata']['Steps']}\\nTools:\\n{samples['Annotator Metadata']['Tools']}\\nFinal Answer: {samples['Final answer']}\\n\"\n",
253
+ "system_prompt += \"\\n==========================\\n\"\n",
254
+ "system_prompt += \"Now, please answer the following question step by step.\\n\""
255
+ ],
256
+ "metadata": {
257
+ "id": "s_Nny7csx0cb"
258
+ },
259
+ "execution_count": null,
260
+ "outputs": []
261
+ },
262
+ {
263
+ "cell_type": "code",
264
+ "source": [
265
+ "# save the system_prompt to a file\n",
266
+ "with open('system_prompt.txt', 'w') as f:\n",
267
+ " f.write(system_prompt)"
268
+ ],
269
+ "metadata": {
270
+ "id": "mgVVvO8zx0Yj"
271
+ },
272
+ "execution_count": null,
273
+ "outputs": []
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "source": [
278
+ "# load the system prompt from the file\n",
279
+ "with open('system_prompt.txt', 'r') as f:\n",
280
+ " system_prompt = f.read()\n",
281
+ "print(system_prompt)"
282
+ ],
283
+ "metadata": {
284
+ "id": "tGRnor1Ox0UZ"
285
+ },
286
+ "execution_count": null,
287
+ "outputs": []
288
+ },
289
+ {
290
+ "cell_type": "markdown",
291
+ "source": [
292
+ "## Start building Agent"
293
+ ],
294
+ "metadata": {
295
+ "id": "_Dv0qdFZ_c8i"
296
+ }
297
+ },
298
+ {
299
+ "cell_type": "code",
300
+ "source": [
301
+ "from langgraph.graph import MessagesState, START, StateGraph\n",
302
+ "from langgraph.prebuilt import tools_condition\n",
303
+ "from langgraph.prebuilt import ToolNode\n",
304
+ "from langchain_google_genai import ChatGoogleGenerativeAI\n",
305
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
306
+ "from langchain_community.tools.tavily_search import TavilySearchResults\n",
307
+ "from langchain_community.document_loaders import WikipediaLoader\n",
308
+ "from langchain_community.document_loaders import ArxivLoader\n",
309
+ "from langchain_community.vectorstores import FAISS\n",
310
+ "from langchain.tools.retriever import create_retriever_tool\n",
311
+ "from langchain_core.messages import HumanMessage, SystemMessage\n",
312
+ "from langchain_core.tools import tool"
313
+ ],
314
+ "metadata": {
315
+ "id": "25fNKGasx0Qk"
316
+ },
317
+ "execution_count": null,
318
+ "outputs": []
319
+ },
320
+ {
321
+ "cell_type": "code",
322
+ "source": [
323
+ "embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-mpnet-base-v2\")"
324
+ ],
325
+ "metadata": {
326
+ "id": "sEmFrORkx0Mp"
327
+ },
328
+ "execution_count": null,
329
+ "outputs": []
330
+ },
331
+ {
332
+ "cell_type": "code",
333
+ "source": [
334
+ "vector_store = new_db.as_retriever()"
335
+ ],
336
+ "metadata": {
337
+ "id": "DelgLC92x0JS"
338
+ },
339
+ "execution_count": null,
340
+ "outputs": []
341
+ },
342
+ {
343
+ "cell_type": "code",
344
+ "execution_count": null,
345
+ "metadata": {
346
+ "id": "TkxZGipCxvsH"
347
+ },
348
+ "outputs": [],
349
+ "source": [
350
+ "question_retrieve_tool = create_retriever_tool(\n",
351
+ " vector_store,\n",
352
+ " \"Question Retriever\",\n",
353
+ " \"Find similar questions in the vector database for the given question.\",\n",
354
+ ")"
355
+ ]
356
+ },
357
+ {
358
+ "cell_type": "code",
359
+ "source": [
360
+ "@tool\n",
361
+ "def multiply(a: int, b: int) -> int:\n",
362
+ " \"\"\"Multiply two numbers.\n",
363
+ "\n",
364
+ " Args:\n",
365
+ " a: first int\n",
366
+ " b: second int\n",
367
+ " \"\"\"\n",
368
+ " return a * b\n",
369
+ "\n",
370
+ "@tool\n",
371
+ "def add(a: int, b: int) -> int:\n",
372
+ " \"\"\"Add two numbers.\n",
373
+ "\n",
374
+ " Args:\n",
375
+ " a: first int\n",
376
+ " b: second int\n",
377
+ " \"\"\"\n",
378
+ " return a + b\n",
379
+ "\n",
380
+ "@tool\n",
381
+ "def subtract(a: int, b: int) -> int:\n",
382
+ " \"\"\"Subtract two numbers.\n",
383
+ "\n",
384
+ " Args:\n",
385
+ " a: first int\n",
386
+ " b: second int\n",
387
+ " \"\"\"\n",
388
+ " return a - b\n",
389
+ "\n",
390
+ "@tool\n",
391
+ "def divide(a: int, b: int) -> int:\n",
392
+ " \"\"\"Divide two numbers.\n",
393
+ "\n",
394
+ " Args:\n",
395
+ " a: first int\n",
396
+ " b: second int\n",
397
+ " \"\"\"\n",
398
+ " if b == 0:\n",
399
+ " raise ValueError(\"Cannot divide by zero.\")\n",
400
+ " return a / b\n",
401
+ "\n",
402
+ "@tool\n",
403
+ "def modulus(a: int, b: int) -> int:\n",
404
+ " \"\"\"Get the modulus of two numbers.\n",
405
+ "\n",
406
+ " Args:\n",
407
+ " a: first int\n",
408
+ " b: second int\n",
409
+ " \"\"\"\n",
410
+ " return a % b\n",
411
+ "\n",
412
+ "@tool\n",
413
+ "def wiki_search(query: str) -> str:\n",
414
+ " \"\"\"Search Wikipedia for a query and return maximum 2 results.\n",
415
+ "\n",
416
+ " Args:\n",
417
+ " query: The search query.\"\"\"\n",
418
+ " search_docs = WikipediaLoader(query=query, load_max_docs=2).load()\n",
419
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
420
+ " [\n",
421
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content}\\n</Document>'\n",
422
+ " for doc in search_docs\n",
423
+ " ])\n",
424
+ " return {\"wiki_results\": formatted_search_docs}\n",
425
+ "\n",
426
+ "@tool\n",
427
+ "def web_search(query: str) -> str:\n",
428
+ " \"\"\"Search Tavily for a query and return maximum 3 results.\n",
429
+ "\n",
430
+ " Args:\n",
431
+ " query: The search query.\"\"\"\n",
432
+ " search_docs = TavilySearchResults(max_results=3).invoke(query=query)\n",
433
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
434
+ " [\n",
435
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content}\\n</Document>'\n",
436
+ " for doc in search_docs\n",
437
+ " ])\n",
438
+ " return {\"web_results\": formatted_search_docs}\n",
439
+ "\n",
440
+ "@tool\n",
441
+ "def arvix_search(query: str) -> str:\n",
442
+ " \"\"\"Search Arxiv for a query and return maximum 3 result.\n",
443
+ "\n",
444
+ " Args:\n",
445
+ " query: The search query.\"\"\"\n",
446
+ " search_docs = ArxivLoader(query=query, load_max_docs=3).load()\n",
447
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
448
+ " [\n",
449
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content[:1000]}\\n</Document>'\n",
450
+ " for doc in search_docs\n",
451
+ " ])\n",
452
+ " return {\"arvix_results\": formatted_search_docs}\n",
453
+ "\n",
454
+ "@tool\n",
455
+ "def similar_question_search(question: str) -> str:\n",
456
+ " \"\"\"Search the vector database for similar questions and return the first results.\n",
457
+ "\n",
458
+ " Args:\n",
459
+ " question: the question human provided.\"\"\"\n",
460
+ " matched_docs = vector_store.similarity_search(query, 3)\n",
461
+ " formatted_search_docs = \"\\n\\n---\\n\\n\".join(\n",
462
+ " [\n",
463
+ " f'<Document source=\"{doc.metadata[\"source\"]}\" page=\"{doc.metadata.get(\"page\", \"\")}\"/>\\n{doc.page_content[:1000]}\\n</Document>'\n",
464
+ " for doc in matched_docs\n",
465
+ " ])\n",
466
+ " return {\"similar_questions\": formatted_search_docs}\n"
467
+ ],
468
+ "metadata": {
469
+ "id": "fjaTIMVwFQJX"
470
+ },
471
+ "execution_count": null,
472
+ "outputs": []
473
+ },
474
+ {
475
+ "cell_type": "code",
476
+ "source": [
477
+ "tools = [\n",
478
+ " multiply,\n",
479
+ " add,\n",
480
+ " subtract,\n",
481
+ " divide,\n",
482
+ " modulus,\n",
483
+ " wiki_search,\n",
484
+ " web_search,\n",
485
+ " arvix_search,\n",
486
+ " similar_question_search,\n",
487
+ " question_retrieve_tool\n",
488
+ "]"
489
+ ],
490
+ "metadata": {
491
+ "id": "9NVPKEV0GAFi"
492
+ },
493
+ "execution_count": null,
494
+ "outputs": []
495
+ },
496
+ {
497
+ "cell_type": "code",
498
+ "source": [],
499
+ "metadata": {
500
+ "id": "K9zA9G1uqBGj"
501
+ },
502
+ "execution_count": null,
503
+ "outputs": []
504
+ },
505
+ {
506
+ "cell_type": "code",
507
+ "source": [
508
+ "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n",
509
+ "llm_with_tools = llm.bind_tools(tools)"
510
+ ],
511
+ "metadata": {
512
+ "id": "qas0W-ImGBte"
513
+ },
514
+ "execution_count": null,
515
+ "outputs": []
516
+ },
517
+ {
518
+ "cell_type": "code",
519
+ "source": [
520
+ "# load the system prompt from the file\n",
521
+ "with open('system_prompt.txt', 'r') as f:\n",
522
+ " system_prompt = f.read()\n",
523
+ "\n",
524
+ "\n",
525
+ "# System message\n",
526
+ "sys_msg = SystemMessage(content=system_prompt)"
527
+ ],
528
+ "metadata": {
529
+ "id": "wVmI8Rf5GBpb"
530
+ },
531
+ "execution_count": null,
532
+ "outputs": []
533
+ },
534
+ {
535
+ "cell_type": "code",
536
+ "source": [
537
+ "# Node\n",
538
+ "def assistant(state: MessagesState):\n",
539
+ " \"\"\"Assistant node\"\"\"\n",
540
+ " return {\"messages\": [llm_with_tools.invoke([sys_msg] + state[\"messages\"])]}\n",
541
+ "\n",
542
+ "# Build graph\n",
543
+ "builder = StateGraph(MessagesState)\n",
544
+ "builder.add_node(\"assistant\", assistant)\n",
545
+ "builder.add_node(\"tools\", ToolNode(tools))\n",
546
+ "builder.add_edge(START, \"assistant\")\n",
547
+ "builder.add_conditional_edges(\n",
548
+ " \"assistant\",\n",
549
+ " tools_condition,\n",
550
+ ")\n",
551
+ "builder.add_edge(\"tools\", \"assistant\")\n",
552
+ "\n",
553
+ "# Compile graph\n",
554
+ "graph = builder.compile()"
555
+ ],
556
+ "metadata": {
557
+ "id": "gBXKT6YtGBkU"
558
+ },
559
+ "execution_count": null,
560
+ "outputs": []
561
+ },
562
+ {
563
+ "cell_type": "code",
564
+ "source": [
565
+ "from IPython.display import Image, display\n",
566
+ "\n",
567
+ "display(Image(graph.get_graph(xray=True).draw_mermaid_png()))"
568
+ ],
569
+ "metadata": {
570
+ "id": "Clsd8J7fGBfl"
571
+ },
572
+ "execution_count": null,
573
+ "outputs": []
574
+ },
575
+ {
576
+ "cell_type": "code",
577
+ "source": [
578
+ "question = \"\"\n",
579
+ "messages = [HumanMessage(content=question)]\n",
580
+ "messages = graph.invoke({\"messages\": messages})"
581
+ ],
582
+ "metadata": {
583
+ "id": "G0tvlcKnGBbr"
584
+ },
585
+ "execution_count": null,
586
+ "outputs": []
587
+ },
588
+ {
589
+ "cell_type": "code",
590
+ "source": [
591
+ "for m in messages['messages']:\n",
592
+ " m.pretty_print()"
593
+ ],
594
+ "metadata": {
595
+ "id": "uIpDcVbjG-hN"
596
+ },
597
+ "execution_count": null,
598
+ "outputs": []
599
+ }
600
+ ]
601
+ }
metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff