Text generation
Text modality is represented as TextParts.
interface TextPart { type: "text"; text: string; citations?: Citation[];}pub struct TextPart { pub text: String, #[serde(skip_serializing_if = "Option::is_none")] pub citations: Option<Vec<Citation>>,}type TextPart struct { Text string `json:"text"` Citations []Citation `json:"citations,omitempty"`}Generate Text
Section titled “Generate Text”Use generate() to call the language model with TextPart objects.

import { getModel } from "./get-model.ts";
const model = getModel("openai", "gpt-4o");
const response = await model.generate({ messages: [ { role: "user", content: [ { type: "text", text: "Tell me a story.", }, ], }, { role: "assistant", content: [ { type: "text", text: "What kind of story would you like to hear?", }, ], }, { role: "user", content: [ { type: "text", text: "A fairy tale.", }, ], }, ],});
console.dir(response, { depth: null });use dotenvy::dotenv;use llm_sdk::{LanguageModelInput, Message, Part};
mod common;
#[tokio::main]async fn main() { dotenv().ok();
let model = common::get_model("openai", "gpt-4o");
let response = model .generate(LanguageModelInput { messages: vec![ Message::user(vec![Part::text("Tell me a story.")]), Message::assistant(vec![Part::text( "Sure! What kind of story would you like to hear?", )]), Message::user(vec![Part::text("a fairy tale")]), ], ..Default::default() }) .await .unwrap();
println!("{response:#?}");}package main
import ( "context" "log"
llmsdk "github.com/hoangvvo/llm-sdk/sdk-go" "github.com/hoangvvo/llm-sdk/sdk-go/examples" "github.com/sanity-io/litter")
func main() { model := examples.GetModel("openai", "gpt-4o")
response, err := model.Generate(context.Background(), &llmsdk.LanguageModelInput{ Messages: []llmsdk.Message{ llmsdk.NewUserMessage( llmsdk.NewTextPart("Tell me a story."), ), llmsdk.NewAssistantMessage( llmsdk.NewTextPart("What kind of story would you like to hear?"), ), llmsdk.NewUserMessage( llmsdk.NewTextPart("A fairy tale."), ), }, })
if err != nil { log.Fatalf("Generation failed: %v", err) }
litter.Dump(response)}Stream Text
Section titled “Stream Text”Text generation can also be streamed using the stream() method. TextPart in streamed responses will be represented as TextPartDelta.
interface TextPartDelta { type: "text"; text: string; citation?: CitationDelta;}pub struct TextPartDelta { pub text: String, #[serde(skip_serializing_if = "Option::is_none")] pub citation: Option<CitationDelta>,}type TextPartDelta struct { Text string `json:"text"` Citation *CitationDelta `json:"citation,omitempty"`}Individual text chunks can be combined to create the final text output.
import { StreamAccumulator } from "@hoangvvo/llm-sdk";import { getModel } from "./get-model.ts";
const model = getModel("openai", "gpt-4o");
const response = model.stream({ messages: [ { role: "user", content: [ { type: "text", text: "Tell me a story.", }, ], }, { role: "assistant", content: [ { type: "text", text: "What kind of story would you like to hear?", }, ], }, { role: "user", content: [ { type: "text", text: "A fairy tale.", }, ], }, ],});
const accumulator = new StreamAccumulator();
let current = await response.next();while (!current.done) { console.dir(current.value, { depth: null }); accumulator.addPartial(current.value); current = await response.next();}
const finalResponse = accumulator.computeResponse();console.dir(finalResponse, { depth: null });use dotenvy::dotenv;use futures::stream::StreamExt;use llm_sdk::{LanguageModelInput, Message, Part, StreamAccumulator};
mod common;
#[tokio::main]async fn main() { dotenv().ok();
let model = common::get_model("openai", "gpt-4o");
let mut stream = model .stream(LanguageModelInput { messages: vec![ Message::user(vec![Part::text("Tell me a story.")]), Message::assistant(vec![Part::text( "Sure! What kind of story would you like to hear?", )]), Message::user(vec![Part::text("A fairy tale.")]), ], ..Default::default() }) .await .unwrap();
let mut accumulator = StreamAccumulator::new();
while let Some(partial_response) = stream.next().await { let partial_response = partial_response.unwrap(); accumulator.add_partial(partial_response.clone()).unwrap(); println!("{partial_response:#?}"); }
let final_response = accumulator.compute_response(); println!("{final_response:#?}");}package main
import ( "context" "log"
llmsdk "github.com/hoangvvo/llm-sdk/sdk-go" "github.com/hoangvvo/llm-sdk/sdk-go/examples" "github.com/sanity-io/litter")
func main() { model := examples.GetModel("openai", "gpt-4o")
response, err := model.Stream(context.Background(), &llmsdk.LanguageModelInput{ Messages: []llmsdk.Message{ llmsdk.NewUserMessage( llmsdk.NewTextPart("Tell me a story."), ), llmsdk.NewAssistantMessage( llmsdk.NewTextPart("What kind of story would you like to hear?"), ), llmsdk.NewUserMessage( llmsdk.NewTextPart("A fairy tale."), ), }, })
if err != nil { log.Fatalf("Stream failed: %v", err) }
accumulator := llmsdk.NewStreamAccumulator()
for response.Next() { current := response.Current() litter.Dump(current)
if err := accumulator.AddPartial(*current); err != nil { log.Printf("Failed to add partial: %v", err) } }
if err := response.Err(); err != nil { log.Fatalf("Stream error: %v", err) }
finalResponse, err := accumulator.ComputeResponse() if err != nil { log.Fatalf("Failed to compute response: %v", err) }
litter.Dump(finalResponse)}