Memory
The Memory pattern enables an agent to retain information learned from earlier interactions and use it later. It is inspired by the MemGPT paper, which defines two memory types:
- Core memory: Small, high-value facts always included in the prompt (e.g., user name, key preferences). Updated via a tool; injected into the system context each turn.
- Archival memory: Larger, long-tail information stored externally and retrieved on demand (e.g., detailed notes, historical results). Not included by default; the agent searches it explicitly when needed.
There are certain use cases where memory is beneficial:
- Personalization: Persist user preferences or profile details.
- Long-running tasks: Track decisions or partial results across sessions.
- Knowledge recall: Store and retrieve references, notes, or summaries.
How archival memory works (vector search):
- Chunking: Split information into manageable text chunks.
- Embedding: Convert each chunk into a vector using an embedding model (e.g., text-embedding-3-small).
- Storage: Save vectors in a database optimized for similarity search.
- Retrieval: Embed the query and return the most similar stored chunks.
Implementation
Section titled “Implementation”The examples below demonstrate a basic in-memory core store and a placeholder archival search.
import { Agent, type AgentItem, type AgentItemMessage,} from "@hoangvvo/llm-agent";import { zodTool } from "@hoangvvo/llm-agent/zod";import type { TextPart } from "@hoangvvo/llm-sdk";import { z } from "zod";import { getModel } from "./get-model.ts";
// Memory pattern example: provide tools + instructions for core/archival memory.
interface MemoryBlock { id: string; content: string;}
class InMemoryStore { core = new Map<string, string>(); archival = new Map<string, string>();
fetchCore(): MemoryBlock[] { return [...this.core.entries()].map(([id, content]) => ({ id, content })); } updateCore(block: MemoryBlock): MemoryBlock[] { const { id, content } = block; if (!content.trim()) this.core.delete(id); else this.core.set(id, content); return this.fetchCore(); } searchArchival(query: string): MemoryBlock[] { // TODO: Replace with semantic vector search using embeddings. const q = query.toLowerCase(); return [...this.archival.entries()] .filter( ([id, c]) => id.toLowerCase().includes(q) || c.toLowerCase().includes(q), ) .map(([id, content]) => ({ id, content })); } updateArchival(block: MemoryBlock): void { const { id, content } = block; if (!content.trim()) this.archival.delete(id); else this.archival.set(id, content); }}
const MEMORY_PROMPT = `You can remember information learned from interactions with the user in two types of memory called core memory and archival memory.Core memory is always available in your conversation context, providing essential, foundational context for keeping track of key details about the user.As core memory is limited in size, it is important to only store the most important information. For other less important details, use archival memory.Archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a search operation to see data inside it.Archival memory is used to remember less significant details about the user or information found during the conversation. When the user mentions a name, topic, or details you don't know, search your archival memory to see if you have any information about it.`;
const coreMemoryPrompt = (memories: MemoryBlock[]) => `Core memories (JSON list):\n${JSON.stringify(memories)}`;
const store = new InMemoryStore();const model = getModel("openai", "gpt-4o");
const memoryAgent = new Agent<void>({ name: "memory", model, instructions: [ MEMORY_PROMPT, `You cannot see prior conversation turns beyond what is provided in the current input. When a user shares a durable preference or profile detail, call core_memory_update to store it.When asked to recall such facts and it's not present in the current input, rely on the core memories in this prompt.For less important or long-tail info, use archival_memory_search before answering.`, async () => coreMemoryPrompt(store.fetchCore()), ], tools: [ zodTool({ name: "core_memory_update", description: "Update or add a core memory block. Returns all core memories after the update.", parameters: z.object({ id: z.string(), content: z.string() }), async execute({ id, content }) { console.log( `[memory.core_memory_update] id=${id} len=${content.length}`, ); const memoryId = id?.trim() ? id : Math.random().toString(36).slice(2, 11); const updated = store.updateCore({ id: memoryId, content }); return { content: [ { type: "text", text: JSON.stringify({ core_memories: updated }) }, ], is_error: false, }; }, }), zodTool({ name: "archival_memory_search", description: "Search for memories in the archival memory", parameters: z.object({ query: z.string() }), async execute({ query }) { console.log(`[memory.archival_memory_search] query="${query}"`); // TODO: Replace with semantic vector search using embeddings. const results = store.searchArchival(query); return { content: [{ type: "text", text: JSON.stringify({ results }) }], is_error: false, }; }, }), zodTool({ name: "archival_memory_update", description: "Update or add a memory block in the archival memory", parameters: z.object({ id: z.string(), content: z.string() }), async execute({ id, content }) { console.log( `[memory.archival_memory_update] id=${id} len=${content.length}`, ); // TODO: store vector embedding for semantic search const memoryId = id?.trim() ? id : Math.random().toString(36).slice(2, 11); store.updateArchival({ id: memoryId, content }); const result = content.trim() === "" ? { success: true, action: "deleted" } : { success: true, action: "updated", memory: { id: memoryId, content }, }; return { content: [{ type: "text", text: JSON.stringify(result) }], is_error: false, }; }, }), ],});
// Demo: four independent turns to show core + archival memory// Turn 1 — store a core memoryconst items1: AgentItem[] = [ { type: "message", role: "user", content: [ { type: "text", text: "Remember that my favorite color is blue." }, ], },];console.log( `[user] ${((items1[0] as AgentItemMessage).content[0] as TextPart).text}`,);const res1 = await memoryAgent.run({ context: undefined, input: items1 });console.dir(res1.content, { depth: null });
// Turn 2 — recall using core memory (no prior messages)const items2: AgentItem[] = [ { type: "message", role: "user", content: [{ type: "text", text: "What's my favorite color?" }], },];console.log( `[user] ${((items2[0] as AgentItemMessage).content[0] as TextPart).text}`,);const res2 = await memoryAgent.run({ context: undefined, input: items2 });console.dir(res2.content, { depth: null });
// Turn 3 — store less-important info in archival memoryconst items3: AgentItem[] = [ { type: "message", role: "user", content: [ { type: "text", text: "I captured some background notes titled 'q3-report-research' for future reference: " + "Key data sources for the Q3 report include Salesforce pipeline exports, Google Analytics weekly sessions, and the paid ads spend spreadsheet. " + "Please tuck this away so you can look it up later.", }, ], },];console.log( `[user] ${((items3[0] as AgentItemMessage).content[0] as TextPart).text}`,);const res3 = await memoryAgent.run({ context: undefined, input: items3 });console.dir(res3.content, { depth: null });
// Turn 4 — recall via archival search (no prior messages)const items4: AgentItem[] = [ { type: "message", role: "user", content: [ { type: "text", text: "Can you pull up what we have under 'q3-report-research'?", }, ], },];console.log( `[user] ${((items4[0] as AgentItemMessage).content[0] as TextPart).text}`,);const res4 = await memoryAgent.run({ context: undefined, input: items4 });console.dir(res4.content, { depth: null });
use std::{ collections::HashMap, sync::{Arc, Mutex},};
use dotenvy::dotenv;use llm_agent::{Agent, AgentItem, AgentRequest, AgentTool, AgentToolResult, InstructionParam};use llm_sdk::{JSONSchema, Message, Part};use serde::{Deserialize, Serialize};use serde_json::json;
// Memory pattern example: core + archival memory tools and instructions.
#[derive(Clone, Default)]struct Store { core: Arc<Mutex<HashMap<String, String>>>, archival: Arc<Mutex<HashMap<String, String>>>,}
#[derive(Clone, Serialize, Deserialize)]struct MemoryBlock { id: String, content: String,}
impl Store { fn fetch_core(&self) -> Vec<MemoryBlock> { self.core .lock() .unwrap() .iter() .map(|(id, content)| MemoryBlock { id: id.clone(), content: content.clone(), }) .collect() } fn update_core(&self, b: MemoryBlock) -> Vec<MemoryBlock> { let mut core = self.core.lock().unwrap(); if b.content.trim().is_empty() { core.remove(&b.id); } else { core.insert(b.id, b.content); } drop(core); self.fetch_core() } fn search_archival(&self, query: &str) -> Vec<MemoryBlock> { // TODO: Replace with semantic vector search using embeddings. let q = query.to_lowercase(); self.archival .lock() .unwrap() .iter() .filter(|(id, c)| id.to_lowercase().contains(&q) || c.to_lowercase().contains(&q)) .map(|(id, content)| MemoryBlock { id: id.clone(), content: content.clone(), }) .collect() } fn update_archival(&self, b: MemoryBlock) { let mut arch = self.archival.lock().unwrap(); if b.content.trim().is_empty() { arch.remove(&b.id); } else { arch.insert(b.id, b.content); } }}
type Ctx = ();
struct CoreMemoryUpdate { store: Store,}#[async_trait::async_trait]impl AgentTool<Ctx> for CoreMemoryUpdate { fn name(&self) -> String { "core_memory_update".into() } fn description(&self) -> String { "Update or add a core memory block. Returns all core memories after the update.".into() } fn parameters(&self) -> JSONSchema { json!({ "type": "object", "properties": {"id": {"type": "string"}, "content": {"type": "string"}}, "required": ["id", "content"], "additionalProperties": false }) } async fn execute( &self, args: serde_json::Value, _context: &Ctx, _state: &llm_agent::RunState, ) -> Result<AgentToolResult, Box<dyn std::error::Error + Send + Sync>> { #[derive(Deserialize)] struct In { id: String, content: String, } let mut input: In = serde_json::from_value(args)?; println!( "[memory.core_memory_update] id={} len={}", input.id, input.content.len() ); if input.id.trim().is_empty() { input.id = rand_id(); } let updated = self.store.update_core(MemoryBlock { id: input.id, content: input.content, }); let body = json!({"core_memories": updated}).to_string(); Ok(AgentToolResult { content: vec![Part::text(body)], is_error: false, }) }}
struct ArchivalSearch { store: Store,}#[async_trait::async_trait]impl AgentTool<Ctx> for ArchivalSearch { fn name(&self) -> String { "archival_memory_search".into() } fn description(&self) -> String { "Search for memories in the archival memory".into() } fn parameters(&self) -> JSONSchema { json!({ "type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"], "additionalProperties": false }) } async fn execute( &self, args: serde_json::Value, _context: &Ctx, _state: &llm_agent::RunState, ) -> Result<AgentToolResult, Box<dyn std::error::Error + Send + Sync>> { #[derive(Deserialize)] struct In { query: String, } let input: In = serde_json::from_value(args)?; println!("[memory.archival_memory_search] query=\"{}\"", input.query); // TODO: Replace with semantic vector search using embeddings let results = self.store.search_archival(&input.query); let body = json!({"results": results}).to_string(); Ok(AgentToolResult { content: vec![Part::text(body)], is_error: false, }) }}
struct ArchivalUpdate { store: Store,}#[async_trait::async_trait]impl AgentTool<Ctx> for ArchivalUpdate { fn name(&self) -> String { "archival_memory_update".into() } fn description(&self) -> String { "Update or add a memory block in the archival memory".into() } fn parameters(&self) -> JSONSchema { json!({ "type": "object", "properties": {"id": {"type": "string"}, "content": {"type": "string"}}, "required": ["id", "content"], "additionalProperties": false }) } async fn execute( &self, args: serde_json::Value, _context: &Ctx, _state: &llm_agent::RunState, ) -> Result<AgentToolResult, Box<dyn std::error::Error + Send + Sync>> { #[derive(Deserialize)] struct In { id: String, content: String, } let mut input: In = serde_json::from_value(args)?; println!( "[memory.archival_memory_update] id={} len={}", input.id, input.content.len() ); if input.id.trim().is_empty() { input.id = rand_id(); } self.store.update_archival(MemoryBlock { id: input.id.clone(), content: input.content.clone(), }); let resp = if input.content.trim().is_empty() { json!({"success": true, "action": "deleted"}) } else { json!({"success": true, "action": "updated", "memory": {"id": input.id, "content": input.content}}) }; Ok(AgentToolResult { content: vec![Part::text(resp.to_string())], is_error: false, }) }}
fn rand_id() -> String { format!("{:x}", std::process::id())}
#[tokio::main]async fn main() { dotenv().ok();
// Use OpenAI gpt-4o via env var OPENAI_API_KEY let model = Arc::new(llm_sdk::openai::OpenAIModel::new( "gpt-4o", llm_sdk::openai::OpenAIModelOptions { api_key: std::env::var("OPENAI_API_KEY").expect("OPENAI_API_KEY must be set"), ..Default::default() }, ));
let store = Store::default();
let memory_prompt = r#"You can remember information learned from interactions with the user in two types of memory called core memory and archival memory.Core memory is always available in your conversation context, providing essential, foundational context for keeping track of key details about the user.As core memory is limited in size, it is important to only store the most important information. For other less important details, use archival memory.Archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a search operation to see data inside it.Archival memory is used to remember less significant details about the user or information found during the conversation. When the user mentions a name, topic, or details you don't know, search your archival memory to see if you have any information about it."#;
let rules_prompt = r#"You cannot see prior conversation turns beyond what is provided in the current input.When a user shares a durable preference or profile detail, call core_memory_update to store it.When asked to recall such facts and it's not present in the current input, rely on the core memories in this prompt.For less important or long-tail info, use archival_memory_search before answering."#;
let agent = Agent::new( llm_agent::AgentParams::new("memory", model.clone()) .add_instruction(memory_prompt) .add_instruction(rules_prompt) .add_instruction(InstructionParam::AsyncFunc(Box::new({ let store = store.clone(); move |_| { let store = store.clone(); Box::pin(async move { let blocks = store.fetch_core(); Ok(format!( "Core memories (JSON list):\n{}", serde_json::to_string(&blocks).unwrap() )) }) } }))) .add_tool(CoreMemoryUpdate { store: store.clone(), }) .add_tool(ArchivalSearch { store: store.clone(), }) .add_tool(ArchivalUpdate { store: store.clone(), }), );
// Four independent sessions (agent cannot see prior turns except via memory) // Turn 1 — store a core memory let items1: Vec<AgentItem> = vec![AgentItem::Message(Message::user(vec![Part::text( "Remember that my favorite color is blue.", )]))]; println!("[user] Remember that my favorite color is blue."); let res1 = agent .run(AgentRequest { context: (), input: items1, }) .await .expect("run failed"); println!("res1: {:#?}", res1.content);
// Turn 2 — recall using core memory (no prior messages) let items2: Vec<AgentItem> = vec![AgentItem::Message(Message::user(vec![Part::text( "What's my favorite color?", )]))]; println!("[user] What's my favorite color?"); let res2 = agent .run(AgentRequest { context: (), input: items2, }) .await .expect("run failed"); println!("res2: {:#?}", res2.content);
// Turn 3 — capture background notes for later lookup let turn3 = "I captured some background notes titled 'q3-report-research' for future \ reference: " .to_string() + "Key data sources for the Q3 report include Salesforce pipeline exports, Google \ Analytics weekly sessions, and the paid ads spend spreadsheet. " + "Please tuck this away so you can look it up later."; let items3: Vec<AgentItem> = vec![AgentItem::Message(Message::user(vec![Part::text(&turn3)]))]; println!("[user] {}", turn3); let res3 = agent .run(AgentRequest { context: (), input: items3, }) .await .expect("run failed"); println!("res3: {:#?}", res3.content);
// Turn 4 — fetch the saved background notes let turn4 = "Can you pull up what we have under 'q3-report-research'?"; let items4: Vec<AgentItem> = vec![AgentItem::Message(Message::user(vec![Part::text(turn4)]))]; println!("[user] {}", turn4); let res4 = agent .run(AgentRequest { context: (), input: items4, }) .await .expect("run failed"); println!("res4: {:#?}", res4.content);}
package main
import ( "context" "encoding/json" "fmt" "log" "os" "strings"
llmagent "github.com/hoangvvo/llm-sdk/agent-go" llmsdk "github.com/hoangvvo/llm-sdk/sdk-go" "github.com/hoangvvo/llm-sdk/sdk-go/openai" "github.com/joho/godotenv" "github.com/sanity-io/litter")
// Memory pattern example with core + archival memory tools and instructions.
type MemoryBlock struct { ID string `json:"id"` Content string `json:"content"` Meta map[string]any `json:"metadata,omitempty"`}
type Store struct { Core map[string]string Archival map[string]string}
func NewStore() *Store { return &Store{Core: map[string]string{}, Archival: map[string]string{}}}
func (s *Store) FetchCore() []MemoryBlock { res := make([]MemoryBlock, 0, len(s.Core)) for id, content := range s.Core { res = append(res, MemoryBlock{ID: id, Content: content}) } return res}func (s *Store) UpdateCore(b MemoryBlock) []MemoryBlock { if strings.TrimSpace(b.Content) == "" { delete(s.Core, b.ID) } else { s.Core[b.ID] = b.Content } return s.FetchCore()}func (s *Store) SearchArchival(query string) []MemoryBlock { // TODO: Replace with semantic vector search using embeddings. q := strings.ToLower(query) res := []MemoryBlock{} for id, content := range s.Archival { idLower := strings.ToLower(id) contentLower := strings.ToLower(content) if strings.Contains(idLower, q) || strings.Contains(contentLower, q) { res = append(res, MemoryBlock{ID: id, Content: content}) } } return res}func (s *Store) UpdateArchival(b MemoryBlock) { if strings.TrimSpace(b.Content) == "" { delete(s.Archival, b.ID) } else { s.Archival[b.ID] = b.Content }}
// No context required for this exampletype Ctx = struct{}
// Toolstype CoreMemoryUpdateTool struct{ S *Store }
func (t *CoreMemoryUpdateTool) Name() string { return "core_memory_update" }func (t *CoreMemoryUpdateTool) Description() string { return "Update or add a core memory block. Returns all core memories after the update."}func (t *CoreMemoryUpdateTool) Parameters() llmsdk.JSONSchema { return llmsdk.JSONSchema{ "type": "object", "properties": map[string]any{ "id": map[string]any{"type": "string"}, "content": map[string]any{"type": "string"}, }, "required": []string{"id", "content"}, "additionalProperties": false, }}func (t *CoreMemoryUpdateTool) Execute(ctx context.Context, params json.RawMessage, _ Ctx, _ *llmagent.RunState) (llmagent.AgentToolResult, error) { var in struct{ ID, Content string } if err := json.Unmarshal(params, &in); err != nil { return llmagent.AgentToolResult{}, err } fmt.Printf("[memory.core_memory_update] id=%s len=%d\n", in.ID, len(in.Content)) id := strings.TrimSpace(in.ID) if id == "" { id = randID() } updated := t.S.UpdateCore(MemoryBlock{ID: id, Content: in.Content}) b, _ := json.Marshal(map[string]any{"core_memories": updated}) return llmagent.AgentToolResult{Content: []llmsdk.Part{llmsdk.NewTextPart(string(b))}, IsError: false}, nil}
type ArchivalSearchTool struct{ S *Store }
func (t *ArchivalSearchTool) Name() string { return "archival_memory_search" }func (t *ArchivalSearchTool) Description() string { return "Search for memories in the archival memory"}func (t *ArchivalSearchTool) Parameters() llmsdk.JSONSchema { return llmsdk.JSONSchema{ "type": "object", "properties": map[string]any{"query": map[string]any{"type": "string"}}, "required": []string{"query"}, "additionalProperties": false, }}func (t *ArchivalSearchTool) Execute(ctx context.Context, params json.RawMessage, _ Ctx, _ *llmagent.RunState) (llmagent.AgentToolResult, error) { var in struct{ Query string } if err := json.Unmarshal(params, &in); err != nil { return llmagent.AgentToolResult{}, err } fmt.Printf("[memory.archival_memory_search] query=\"%s\"\n", in.Query) // TODO: Replace substring search with semantic vector search using embeddings results := t.S.SearchArchival(in.Query) b, _ := json.Marshal(map[string]any{"results": results}) return llmagent.AgentToolResult{Content: []llmsdk.Part{llmsdk.NewTextPart(string(b))}, IsError: false}, nil}
type ArchivalUpdateTool struct{ S *Store }
func (t *ArchivalUpdateTool) Name() string { return "archival_memory_update" }func (t *ArchivalUpdateTool) Description() string { return "Update or add a memory block in the archival memory"}func (t *ArchivalUpdateTool) Parameters() llmsdk.JSONSchema { return llmsdk.JSONSchema{ "type": "object", "properties": map[string]any{ "id": map[string]any{"type": "string"}, "content": map[string]any{"type": "string"}, }, "required": []string{"id", "content"}, "additionalProperties": false, }}func (t *ArchivalUpdateTool) Execute(ctx context.Context, params json.RawMessage, _ Ctx, _ *llmagent.RunState) (llmagent.AgentToolResult, error) { var in struct{ ID, Content string } if err := json.Unmarshal(params, &in); err != nil { return llmagent.AgentToolResult{}, err } fmt.Printf("[memory.archival_memory_update] id=%s len=%d\n", in.ID, len(in.Content)) id := strings.TrimSpace(in.ID) if id == "" { id = randID() } t.S.UpdateArchival(MemoryBlock{ID: id, Content: in.Content}) var resp map[string]any if strings.TrimSpace(in.Content) == "" { resp = map[string]any{"success": true, "action": "deleted"} } else { resp = map[string]any{"success": true, "action": "updated", "memory": map[string]any{"id": id, "content": in.Content}} } b, _ := json.Marshal(resp) return llmagent.AgentToolResult{Content: []llmsdk.Part{llmsdk.NewTextPart(string(b))}, IsError: false}, nil}
func main() { godotenv.Load("../.env")
apiKey := os.Getenv("OPENAI_API_KEY") if apiKey == "" { log.Fatal("OPENAI_API_KEY must be set") } model := openai.NewOpenAIModel("gpt-4o", openai.OpenAIModelOptions{APIKey: apiKey})
store := NewStore()
// Instructions: static memory guide + dynamic core memories snapshot memPrompt := `You can remember information learned from interactions with the user in two types of memory called core memory and archival memory.Core memory is always available in your conversation context, providing essential, foundational context for keeping track of key details about the user.As core memory is limited in size, it is important to only store the most important information. For other less important details, use archival memory.Archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a search operation to see data inside it.Archival memory is used to remember less significant details about the user or information found during the conversation. When the user mentions a name, topic, or details you don't know, search your archival memory to see if you have any information about it.`
rulesPrompt := `You cannot see prior conversation turns beyond what is provided in the current input.When a user shares a durable preference or profile detail, call core_memory_update to store it.When asked to recall such facts and it's not present in the current input, rely on the core memories in this prompt.For less important or long-tail info, use archival_memory_search before answering.` coreInstr := func(ctx context.Context, _ Ctx) (string, error) { blocks := store.FetchCore() b, _ := json.Marshal(blocks) return "Core memories (JSON list):\n" + string(b), nil }
agent := llmagent.NewAgent("memory", model, llmagent.WithInstructions( llmagent.InstructionParam[Ctx]{String: &memPrompt}, llmagent.InstructionParam[Ctx]{String: &rulesPrompt}, llmagent.InstructionParam[Ctx]{Func: coreInstr}, ), llmagent.WithTools( &CoreMemoryUpdateTool{S: store}, &ArchivalSearchTool{S: store}, &ArchivalUpdateTool{S: store}, ), )
// Demo: four independent sessions (agent cannot see prior turns except via memory) ctx := context.Background()
// Turn 1 — store a core memory items1 := []llmagent.AgentItem{ llmagent.NewAgentItemMessage(llmsdk.NewUserMessage(llmsdk.NewTextPart("Remember that my favorite color is blue."))), } fmt.Println("[user] Remember that my favorite color is blue.") res1, err := agent.Run(ctx, llmagent.AgentRequest[Ctx]{Context: Ctx{}, Input: items1}) if err != nil { log.Fatal(err) } litter.Dump(res1.Content)
// Turn 2 — recall using core memory items2 := []llmagent.AgentItem{ llmagent.NewAgentItemMessage(llmsdk.NewUserMessage(llmsdk.NewTextPart("What's my favorite color?"))), } fmt.Println("[user] What's my favorite color?") res2, err := agent.Run(ctx, llmagent.AgentRequest[Ctx]{Context: Ctx{}, Input: items2}) if err != nil { log.Fatal(err) } litter.Dump(res2.Content)
// Turn 3 — capture background notes for later lookup (archival) turn3 := "I captured some background notes titled 'q3-report-research' for future reference: " + "Key data sources for the Q3 report include Salesforce pipeline exports, Google Analytics weekly sessions, and the paid ads spend spreadsheet. " + "Please tuck this away so you can look it up later." items3 := []llmagent.AgentItem{ llmagent.NewAgentItemMessage(llmsdk.NewUserMessage(llmsdk.NewTextPart(turn3))), } fmt.Println("[user] " + turn3) res3, err := agent.Run(ctx, llmagent.AgentRequest[Ctx]{Context: Ctx{}, Input: items3}) if err != nil { log.Fatal(err) } litter.Dump(res3.Content)
// Turn 4 — fetch the saved background notes via search turn4 := "Can you pull up what we have under 'q3-report-research'?" items4 := []llmagent.AgentItem{ llmagent.NewAgentItemMessage(llmsdk.NewUserMessage(llmsdk.NewTextPart(turn4))), } fmt.Println("[user] " + turn4) res4, err := agent.Run(ctx, llmagent.AgentRequest[Ctx]{Context: Ctx{}, Input: items4}) if err != nil { log.Fatal(err) } litter.Dump(res4.Content)}
func randID() string { // simple pseudo id return fmt.Sprintf("%x", os.Getpid())}