Planner-Executor
The planner-executor pattern gives the model a private to-do list so it can tackle multi-step work without forgetting what comes next. It is popular for coding agents and investigative flows because the model can revise the plan between turns, mark items complete, and keep the bookkeeping out of the user-facing conversation.
sequenceDiagram participant User participant Agent participant Plan as Plan store participant Tool as update_plan tool User->>Agent: task prompt Agent->>Plan: load current plan (if any) Agent->>Tool: update_plan(explanation, steps) Tool-->>Agent: plan accepted Agent->>Plan: persist snapshot Agent->>User: execute step / respond Agent->>Plan: mark progress next turn
Each cycle the model may adjust the plan before acting; your code inspects the proposal, tweaks it if necessary, and persists the snapshot so you can resume later or render progress in the UI.
The only tool required is update_plan
with payload { explanation?: string; plan: { status: "pending" | "in_progress" | "complete"; step: string; }[] }
, so validate it, cap the number of steps if you want, and reject plans that skip prerequisites. Well-crafted instructions remind the model to update the plan before long actions, mark finished items as complete
, and keep the list short for clarity.
Implementation
Section titled “Implementation”import { Agent, type AgentItem } from "@hoangvvo/llm-agent";import { zodTool } from "@hoangvvo/llm-agent/zod";import { z } from "zod";import { getModel } from "./get-model.ts";
interface PlanItem { status: "pending" | "in_progress" | "complete"; step: string;}
class PlanStore { #plan: PlanItem[] = []; #explanation = ""; list(): PlanItem[] { return this.#plan.slice(); } set(plan: PlanItem[], explanation: string) { this.#plan = plan.slice(); this.#explanation = explanation; } explanation(): string { return this.#explanation; }}
const planStore = new PlanStore();
function formatPlan(): string { const list = planStore.list(); const lines: string[] = []; lines.push( `\n─ PLAN (internal) · ${list.length} item${list.length === 1 ? "" : "s"}`, ); const expl = planStore.explanation(); if (expl) lines.push(`Explanation: ${expl}`); if (list.length === 0) { lines.push("(empty)"); } else { const symbol = (s: PlanItem["status"]) => s === "complete" ? "✓" : s === "in_progress" ? "▸" : "○"; for (const t of list) { lines.push(`${symbol(t.status)} ${t.step}`); } } return lines.join("\n");}
function clearAndRenderScreen(messages: string[]) { // Clear the console for a clean redraw try { console.clear(); } catch { process.stdout.write("\x1b[2J\x1b[H"); } // Print assistant messages back-to-back if (messages.length > 0) { process.stdout.write(messages.join("\n\n") + "\n\n"); } // Always render internal plan at the bottom process.stdout.write(formatPlan() + "\n");}
const updatePlanTool = zodTool({ name: "update_plan", description: "Replace internal plan with a new list of steps (status + step) and optional explanation.", parameters: z.object({ explanation: z.string(), plan: z .array( z .object({ status: z.enum(["pending", "in_progress", "complete"]), step: z.string(), }) .strict(), ) .nonempty(), }), async execute({ explanation, plan }) { planStore.set(plan, explanation); return { content: [ { type: "text", text: JSON.stringify({ ok: true, explanation, plan }) }, ], is_error: false, }; },});
const model = getModel("openai", "gpt-4o");
const agent = new Agent<void>({ name: "planner-executor", model, instructions: [ `You are a planner–executor assistant.Break the user's goal into clear, actionable steps using the tool update_plan (explanation, plan: [{status, step}]).Use the TODO tools strictly as your internal plan: NEVER reveal or enumerate TODO items to the user. Do not mention the words TODO, task list, or the names of tools.Keep user-visible replies concise and focused on results and next-step confirmations.Work iteratively: plan an initial set of high-level steps, then refine/execute one major step per turn, marking completed items along the way via tools.When the work is complete, respond with the final deliverable and a brief one-paragraph summary of what you did.`, () => { const rows = planStore .list() .map((p, i) => `${i + 1}. [${p.status}] ${p.step}`) .join("\n"); const expl = planStore.explanation(); return `INTERNAL PLAN:\n${rows}\nExplanation: ${expl}`; }, ], tools: [updatePlanTool], max_turns: 20,});
// Demo: multi-turn execution for a complex taskconst items: AgentItem[] = [ { type: "message", role: "user", content: [ { type: "text", text: "You are hired to produce a concise PRD (Product Requirements Document) for a travel booking app. " + "Do high-level planning and execution across turns: outline the PRD structure, then draft sections " + "(Overview, Target Users, Core Features, MVP Scope, Non-Goals, Success Metrics, Risks), and finally " + "produce the final PRD in markdown. Keep replies brief and focused on progress/results only.", }, ], },];
const messages: string[] = [];clearAndRenderScreen(messages);
for (let turn = 1; ; turn += 1) { const res = await agent.run({ input: items, context: undefined });
// Capture only assistant-visible text to display back-to-back const visibleText = res.content .filter((p) => p.type === "text") .map((p) => (p as any).text as string) .join("\n"); if (visibleText.trim()) messages.push(visibleText.trim());
clearAndRenderScreen(messages);
// Append agent output items to the conversation items.push(...res.output);
// Stop when plan exists and all steps have status DONE const plan = planStore.list(); const havePlan = plan.length > 0; const allDone = havePlan && plan.every((p) => p.status === "complete"); if (allDone) break;
// Otherwise continue to next turn items.push({ type: "message", role: "user", content: [{ type: "text", text: "NEXT" }], });}
// Final render to ensure the last state persists on screenclearAndRenderScreen(messages);
use std::{ io::Write, sync::{Arc, Mutex},};
use dotenvy::dotenv;use llm_agent::{Agent, AgentItem, AgentRequest, AgentTool, AgentToolResult};use llm_sdk::{JSONSchema, Message, Part};use serde::{Deserialize, Serialize};use serde_json::json;
#[derive(Clone, Serialize, Deserialize)]struct PlanItem { status: String, step: String,}
#[derive(Default, Clone)]struct Store { m: Arc<Mutex<Vec<PlanItem>>>, explanation: Arc<Mutex<String>>,}impl Store { fn list(&self) -> Vec<PlanItem> { self.m.lock().unwrap().clone() } fn set(&self, next: Vec<PlanItem>, explanation: String) { *self.m.lock().unwrap() = next; *self.explanation.lock().unwrap() = explanation; } fn explanation(&self) -> String { self.explanation.lock().unwrap().clone() }}
fn format_todos(s: &Store) -> String { let list = s.list(); let mut out = String::new(); out.push_str(&format!("\n─ PLAN (internal) · {} items\n", list.len())); let expl = s.explanation(); if !expl.is_empty() { out.push_str(&format!("Explanation: {}\n", expl)); } if list.is_empty() { out.push_str("(empty)\n"); return out; } for t in list { let sym = match t.status.trim() { "in_progress" => "▸", "complete" => "✓", _ => "○", }; out.push_str(&format!("{} {}\n", sym, t.step)); } out}
fn clear_and_render(messages: &[String], s: &Store) { // Clear screen and position cursor at (1,1) print!("\x1B[2J\x1B[1;1H"); let _ = std::io::stdout().flush(); if !messages.is_empty() { println!("{}\n", messages.join("\n\n")); } print!("{}", format_todos(s)); let _ = std::io::stdout().flush();}
type Ctx = ();
struct UpdatePlan { s: Store,}#[async_trait::async_trait]impl AgentTool<Ctx> for UpdatePlan { fn name(&self) -> String { "update_plan".into() } fn description(&self) -> String { "Replace internal plan with explanation and steps".into() } fn parameters(&self) -> JSONSchema { json!({ "type":"object", "properties":{ "explanation":{"type":"string"}, "plan":{ "type":"array", "items":{ "type":"object", "properties":{ "status":{"type":"string","enum":["pending","in_progress","complete"]}, "step":{"type":"string"} }, "required":["status","step"], "additionalProperties":false } } }, "required":["explanation","plan"], "additionalProperties":false }) } async fn execute( &self, args: serde_json::Value, _ctx: &Ctx, _state: &llm_agent::RunState, ) -> Result<AgentToolResult, Box<dyn std::error::Error + Send + Sync>> { #[derive(Deserialize)] struct In { explanation: String, plan: Vec<PlanItem>, } let p: In = serde_json::from_value(args)?; self.s.set(p.plan.clone(), p.explanation.clone()); Ok(AgentToolResult { content: vec![Part::text( json!({"ok": true, "explanation": p.explanation, "plan": p.plan}).to_string(), )], is_error: false, }) }}
#[tokio::main]async fn main() { dotenv().ok(); let model = Arc::new(llm_sdk::openai::OpenAIModel::new( "gpt-4o", llm_sdk::openai::OpenAIModelOptions { api_key: std::env::var("OPENAI_API_KEY").expect("OPENAI_API_KEY must be set"), ..Default::default() }, ));
let store = Store::default(); let overview = "You are a planner–executor assistant.\nBreak the user's goal into clear, \ actionable steps using the tool update_plan (explanation, plan: [{status, \ step}]).\nUse the plan strictly as your internal plan: NEVER reveal or \ enumerate plan items to the user. Do not mention the words TODO, task list, \ or the names of tools.\nKeep user-visible replies concise and focused on \ results and next-step confirmations.\nWork iteratively: plan an initial set \ of high-level steps, then refine/execute one major step per turn, marking \ completed items along the way via tools.\nWhen the work is complete, respond \ with the final deliverable and a brief one-paragraph summary of what you did.";
let agent = Agent::new( llm_agent::AgentParams::new("planner-executor", model) .add_instruction(overview) .add_tool(UpdatePlan { s: store.clone() }) .max_turns(20), );
let mut items: Vec<AgentItem> = vec![AgentItem::Message(Message::user(vec![Part::text( "You are hired to produce a concise PRD (Product Requirements Document) for a travel \ booking app. Do high-level planning and execution across turns: outline the PRD \ structure, then draft sections (Overview, Target Users, Core Features, MVP Scope, \ Non-Goals, Success Metrics, Risks), and finally produce the final PRD in markdown. Keep \ replies brief and focused on progress/results only.", )]))];
let mut messages: Vec<String> = vec![]; clear_and_render(&messages, &store);
loop { let res = agent .run(AgentRequest { context: (), input: items.clone(), }) .await .expect("run failed"); let mut visible: Vec<String> = vec![]; for p in &res.content { if let Part::Text(t) = p { visible.push(t.text.clone()) } } if !visible.is_empty() { messages.push(visible.join("\n").trim().to_string()); } clear_and_render(&messages, &store);
items.extend(res.output); let list = store.list(); let all_done = !list.is_empty() && list.iter().all(|t| t.status.trim() == "complete"); if all_done { break; } items.push(AgentItem::Message(Message::user(vec![Part::text("NEXT")]))) }
clear_and_render(&messages, &store);}
package main
import ( "context" "encoding/json" "fmt" "os" "strings"
llmagent "github.com/hoangvvo/llm-sdk/agent-go" llmsdk "github.com/hoangvvo/llm-sdk/sdk-go" "github.com/hoangvvo/llm-sdk/sdk-go/openai" "github.com/joho/godotenv")
// Internal TODO model and storetype Todo struct { Status string `json:"status"` Step string `json:"step"`}
type Store struct { m map[string]*Todo explanation string}
func NewStore() *Store { return &Store{m: map[string]*Todo{}, explanation: ""} }func (s *Store) List() []*Todo { out := make([]*Todo, 0, len(s.m)) for _, t := range s.m { out = append(out, t) } return out}func (s *Store) Explanation() string { return s.explanation }func (s *Store) ResetWith(plan []Todo, explanation string) { s.m = map[string]*Todo{} for i := range plan { it := plan[i] key := fmt.Sprintf("%d", i) itCopy := it s.m[key] = &itCopy } s.explanation = explanation}
func formatTodos(s *Store) string { list := s.List() var b strings.Builder fmt.Fprintf(&b, "\n─ PLAN (internal) · %d items\n", len(list)) if s.Explanation() != "" { fmt.Fprintf(&b, "Explanation: %s\n", s.Explanation()) } if len(list) == 0 { b.WriteString("(empty)\n") return b.String() } for _, t := range list { sym := "○" if strings.EqualFold(strings.TrimSpace(t.Status), "in_progress") { sym = "▸" } if strings.EqualFold(strings.TrimSpace(t.Status), "complete") { sym = "✓" } fmt.Fprintf(&b, "%s %s\n", sym, t.Step) } return b.String()}
func clearAndRender(messages []string, s *Store) { // Clear console // Prefer console clear; fallback to ANSI fmt.Print("\033[2J\033[H") if len(messages) > 0 { fmt.Println(strings.Join(messages, "\n\n")) fmt.Println() } fmt.Print(formatTodos(s))}
// No context for this exampletype Ctx = struct{}
// Tools// Single tool: update_plantype UpdatePlanTool struct{ S *Store }
func (t *UpdatePlanTool) Name() string { return "update_plan" }func (t *UpdatePlanTool) Description() string { return "Replace internal plan with explanation and steps"}func (t *UpdatePlanTool) Parameters() llmsdk.JSONSchema { // Strict schema: all properties required, no additional item := map[string]any{ "type": "object", "properties": map[string]any{ "status": map[string]any{"type": "string", "enum": []string{"pending", "in_progress", "complete"}}, "step": map[string]any{"type": "string"}, }, "required": []string{"status", "step"}, "additionalProperties": false, } m := llmsdk.JSONSchema{"type": "object"} m["properties"] = map[string]any{ "explanation": map[string]any{"type": "string"}, "plan": map[string]any{"type": "array", "items": item}, } m["required"] = []string{"explanation", "plan"} m["additionalProperties"] = false return m}func (t *UpdatePlanTool) Execute(_ context.Context, params json.RawMessage, _ Ctx, _ *llmagent.RunState) (llmagent.AgentToolResult, error) { var p struct { Explanation string `json:"explanation"` Plan []Todo `json:"plan"` } if err := json.Unmarshal(params, &p); err != nil { return llmagent.AgentToolResult{}, err } t.S.ResetWith(p.Plan, p.Explanation) body, _ := json.Marshal(map[string]any{"ok": true, "explanation": p.Explanation, "plan": t.S.List()}) return llmagent.AgentToolResult{Content: []llmsdk.Part{llmsdk.NewTextPart(string(body))}, IsError: false}, nil}
func main() { godotenv.Load("../.env") apiKey := os.Getenv("OPENAI_API_KEY") if apiKey == "" { panic("OPENAI_API_KEY must be set") }
model := openai.NewOpenAIModel("gpt-4o", openai.OpenAIModelOptions{APIKey: apiKey})
store := NewStore()
// Build agent overview := `You are a planner–executor assistant.Break the user's goal into clear, actionable steps using the tool update_plan (explanation, plan: [{status, step}]).Use the plan strictly as your internal plan: NEVER reveal or enumerate plan items to the user. Do not mention the words TODO, task list, or the names of tools.Keep user-visible replies concise and focused on results and next-step confirmations.Work iteratively: plan an initial set of high-level steps, then refine/execute one major step per turn, marking completed items along the way via tools.When the work is complete, respond with the final deliverable and a brief one-paragraph summary of what you did.`
agent := llmagent.NewAgent("planner-executor", model, llmagent.WithInstructions( llmagent.InstructionParam[Ctx]{String: &overview}, // Dynamic instruction: inject internal plan llmagent.InstructionParam[Ctx]{Func: func(_ context.Context, _ Ctx) (string, error) { var b strings.Builder b.WriteString("INTERNAL PLAN:\n") list := store.List() for i, it := range list { fmt.Fprintf(&b, "%d. [%s] %s\n", i+1, it.Status, it.Step) } if store.Explanation() != "" { fmt.Fprintf(&b, "Explanation: %s\n", store.Explanation()) } return b.String(), nil }}, ), llmagent.WithTools(&UpdatePlanTool{S: store}), llmagent.WithMaxTurns[Ctx](20), )
// Conversation items := []llmagent.AgentItem{ llmagent.NewAgentItemMessage(llmsdk.NewUserMessage(llmsdk.NewTextPart( "You are hired to produce a concise PRD (Product Requirements Document) for a travel booking app. " + "Do high-level planning and execution across turns: outline the PRD structure, then draft sections " + "(Overview, Target Users, Core Features, MVP Scope, Non-Goals, Success Metrics, Risks), and finally " + "produce the final PRD in markdown. Keep replies brief and focused on progress/results only.", ))), }
var messages []string clearAndRender(messages, store)
ctx := context.Background() for turn := 1; ; turn++ { res, err := agent.Run(ctx, llmagent.AgentRequest[Ctx]{Context: Ctx{}, Input: items}) if err != nil { panic(err) } // Append assistant-visible text var visible []string for _, p := range res.Content { if p.TextPart != nil { visible = append(visible, p.TextPart.Text) } } if len(visible) > 0 { messages = append(messages, strings.TrimSpace(strings.Join(visible, "\n"))) } clearAndRender(messages, store)
// Append output items = append(items, res.Output...)
list := store.List() allDone := len(list) > 0 for _, t := range list { allDone = allDone && strings.EqualFold(strings.TrimSpace(t.Status), "DONE") } if allDone { break }
items = append(items, llmagent.NewAgentItemMessage(llmsdk.NewUserMessage(llmsdk.NewTextPart("NEXT")))) }
clearAndRender(messages, store)}
Pair this pattern with the Run Session lifecycle so each user or request gets an isolated copy of the plan.