Restore a conversation using the same ID and persistence directory:
# Later, in a different sessiondel conversation# Deserialize the conversationprint("Deserializing conversation...")conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=cwd, persistence_dir=persistence_dir, conversation_id=conversation_id,)conversation.send_message("Continue task")conversation.run() # Continues from saved state
When you set a persistence_dir, your conversation will be persisted to a directory structure where each
conversation has its own subdirectory. By default, the persistence directory is workspace/conversations/
(unless you specify a custom path).Directory structure:
workspace/conversations
<conversation-id-1>
base_state.json
events
event-00000-<event-id>.json
event-00001-<event-id>.json
...
<conversation-id-2>
...
Each conversation directory contains:
base_state.json: The core conversation state including agent configuration, execution status, statistics, and metadata
events/: A subdirectory containing individual event files, each named with a sequential index and event ID (e.g., event-00000-abc123.json)
The collection of event files in the events/ directory represents the same trajectory data you would find in the trajectory.json file from OpenHands V0, but split into individual files for better performance and granular access.
import osimport uuidfrom pydantic import SecretStrfrom openhands.sdk import ( LLM, Agent, Conversation, Event, LLMConvertibleEvent, get_logger,)from openhands.sdk.tool import Toolfrom openhands.tools.file_editor import FileEditorToolfrom openhands.tools.terminal import TerminalToollogger = get_logger(__name__)# Configure LLMapi_key = os.getenv("LLM_API_KEY")assert api_key is not None, "LLM_API_KEY environment variable is not set."model = os.getenv("LLM_MODEL", "anthropic/claude-sonnet-4-5-20250929")base_url = os.getenv("LLM_BASE_URL")llm = LLM( usage_id="agent", model=model, base_url=base_url, api_key=SecretStr(api_key),)# Toolscwd = os.getcwd()tools = [ Tool(name=TerminalTool.name), Tool(name=FileEditorTool.name),]# Add MCP Toolsmcp_config = { "mcpServers": { "fetch": {"command": "uvx", "args": ["mcp-server-fetch"]}, }}# Agentagent = Agent(llm=llm, tools=tools, mcp_config=mcp_config)llm_messages = [] # collect raw LLM messagesdef conversation_callback(event: Event): if isinstance(event, LLMConvertibleEvent): llm_messages.append(event.to_llm_message())conversation_id = uuid.uuid4()persistence_dir = "./.conversations"conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=cwd, persistence_dir=persistence_dir, conversation_id=conversation_id,)conversation.send_message( "Read https://github.com/OpenHands/OpenHands. Then write 3 facts " "about the project into FACTS.txt.")conversation.run()conversation.send_message("Great! Now delete that file.")conversation.run()print("=" * 100)print("Conversation finished. Got the following LLM messages:")for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}")# Conversation persistenceprint("Serializing conversation...")del conversation# Deserialize the conversationprint("Deserializing conversation...")conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=cwd, persistence_dir=persistence_dir, conversation_id=conversation_id,)print("Sending message to deserialized conversation...")conversation.send_message("Hey what did you create? Return an agent finish action")conversation.run()# Report costcost = llm.metrics.accumulated_costprint(f"EXAMPLE_COST: {cost}")
You can run the example code as-is.
The model name should follow the LiteLLM convention: provider/model_name (e.g., anthropic/claude-sonnet-4-5-20250929, openai/gpt-4o).
The LLM_API_KEY should be the API key for your chosen provider.
ChatGPT Plus/Pro subscribers: You can use LLM.subscription_login() to authenticate with your ChatGPT account and access Codex models without consuming API credits. See the LLM Subscriptions guide for details.