Skip to content

Migrating from LangGraph Cloud

This guide helps you migrate your existing LangGraph Cloud workflows to DuraGraph with minimal code changes.

BenefitDescription
Open SourceSelf-host and customize DuraGraph to your needs
Cost ControlNo per-execution pricing, pay only for infrastructure
Data SovereigntyKeep your data in your own infrastructure
LangGraph CompatibleDrop-in API compatibility with LangGraph Cloud
Enterprise ReadyProduction-ready with PostgreSQL and NATS JetStream
FeatureLangGraph CloudDuraGraphStatus
Assistants APIFull parity
Threads APIFull parity
Runs APIFull parity
SSE StreamingFull parity
Thread StateFull parity
CheckpointsFull parity
Human-in-the-LoopFull parity
Search/CountFull parity
Webhooks🚧Coming Q1
Crons🚧Coming Q2
Store API🚧Coming Q2
LangGraph StudioUse DuraGraph Dashboard
LangGraph CloudDuraGraphMethodStatus
/ok/okGET
/info/infoGET
/health/healthGET
/metrics/metricsGET
LangGraph CloudDuraGraphMethodStatus
/assistants/api/v1/assistantsPOST
/assistants/api/v1/assistantsGET
/assistants/{id}/api/v1/assistants/{id}GET
/assistants/{id}/api/v1/assistants/{id}PATCH
/assistants/{id}/api/v1/assistants/{id}DELETE
/assistants/search/api/v1/assistants/searchPOST
/assistants/count/api/v1/assistants/countPOST
/assistants/{id}/versions/api/v1/assistants/{id}/versionsGET
/assistants/{id}/schemas/api/v1/assistants/{id}/schemasGET
LangGraph CloudDuraGraphMethodStatus
/threads/api/v1/threadsPOST
/threads/api/v1/threadsGET
/threads/{id}/api/v1/threads/{id}GET
/threads/{id}/api/v1/threads/{id}PATCH
/threads/{id}/api/v1/threads/{id}DELETE
/threads/search/api/v1/threads/searchPOST
/threads/count/api/v1/threads/countPOST
/threads/{id}/messages/api/v1/threads/{id}/messagesPOST
/threads/{id}/state/api/v1/threads/{id}/stateGET
/threads/{id}/state/api/v1/threads/{id}/statePOST
/threads/{id}/history/api/v1/threads/{id}/historyGET
/threads/{id}/copy/api/v1/threads/{id}/copyPOST
LangGraph CloudDuraGraphMethodStatus
/threads/{id}/runs/api/v1/threads/{id}/runsPOST
/threads/{id}/runs/api/v1/threads/{id}/runsGET
/threads/{id}/runs/{run_id}/api/v1/threads/{id}/runs/{run_id}GET
/threads/{id}/runs/{run_id}/api/v1/threads/{id}/runs/{run_id}DELETE
/threads/{id}/runs/{run_id}/cancel/api/v1/threads/{id}/runs/{run_id}/cancelPOST
/threads/{id}/runs/{run_id}/resume/api/v1/threads/{id}/runs/{run_id}/resumePOST
/threads/{id}/runs/{run_id}/join/api/v1/threads/{id}/runs/{run_id}/joinGET
/threads/{id}/runs/stream/api/v1/threads/{id}/runs/streamPOST
/runs/api/v1/runsPOST
/runs/wait/api/v1/runs/waitPOST
/runs/stream/api/v1/runs/streamPOST
/runs/batch/api/v1/runs/batchPOST

The only required change is updating the base URL:

# Before (LangGraph Cloud)
from langgraph_sdk import get_client
client = get_client(url="https://api.smith.langchain.com")
# After (DuraGraph)
from langgraph_sdk import get_client
client = get_client(url="http://localhost:8081") # Or your DuraGraph instance
# Before (LangGraph Cloud)
client = get_client(
url="https://api.smith.langchain.com",
api_key="lsv2_..."
)
# After (DuraGraph) - JWT or API Key
client = get_client(
url="http://your-duragraph.com",
api_key="your-duragraph-token"
)
import asyncio
from langgraph_sdk import get_client
async def main():
# Connect to DuraGraph (same SDK as LangGraph Cloud)
client = get_client(url="http://localhost:8081")
# Create an assistant
assistant = await client.assistants.create(
graph_id="chatbot",
name="My Chatbot",
config={"model": "gpt-4"}
)
print(f"Created assistant: {assistant['assistant_id']}")
# Create a thread
thread = await client.threads.create()
print(f"Created thread: {thread['thread_id']}")
# Start a run
run = await client.runs.create(
thread_id=thread["thread_id"],
assistant_id=assistant["assistant_id"],
input={"messages": [{"role": "user", "content": "Hello!"}]}
)
# Stream events
async for event in client.runs.stream(
thread_id=thread["thread_id"],
run_id=run["run_id"]
):
print(f"Event: {event}")
asyncio.run(main())
import { Client } from '@langchain/langgraph-sdk';
async function main() {
// Connect to DuraGraph
const client = new Client({
apiUrl: 'http://localhost:8081',
});
// Create an assistant
const assistant = await client.assistants.create({
graphId: 'chatbot',
name: 'My Chatbot',
config: { model: 'gpt-4' },
});
console.log(`Created assistant: ${assistant.assistant_id}`);
// Create a thread
const thread = await client.threads.create();
console.log(`Created thread: ${thread.thread_id}`);
// Start a run with streaming
const stream = client.runs.stream(thread.thread_id, assistant.assistant_id, {
input: { messages: [{ role: 'user', content: 'Hello!' }] },
});
for await (const event of stream) {
console.log('Event:', event);
}
}
main();
package main
import (
"context"
"fmt"
"net/http"
"bytes"
"encoding/json"
"io"
)
const baseURL = "http://localhost:8081/api/v1"
func main() {
ctx := context.Background()
// Create an assistant
assistantPayload := map[string]interface{}{
"name": "My Chatbot",
"model": "gpt-4",
}
assistant, _ := post(ctx, "/assistants", assistantPayload)
fmt.Printf("Created assistant: %s\n", assistant["assistant_id"])
// Create a thread
thread, _ := post(ctx, "/threads", map[string]interface{}{})
fmt.Printf("Created thread: %s\n", thread["thread_id"])
// Start a run
runPayload := map[string]interface{}{
"assistant_id": assistant["assistant_id"],
"input": map[string]interface{}{
"messages": []map[string]string{
{"role": "user", "content": "Hello!"},
},
},
}
threadID := thread["thread_id"].(string)
run, _ := post(ctx, fmt.Sprintf("/threads/%s/runs", threadID), runPayload)
fmt.Printf("Created run: %s\n", run["run_id"])
}
func post(ctx context.Context, path string, payload map[string]interface{}) (map[string]interface{}, error) {
body, _ := json.Marshal(payload)
req, _ := http.NewRequestWithContext(ctx, "POST", baseURL+path, bytes.NewReader(body))
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
data, _ := io.ReadAll(resp.Body)
var result map[string]interface{}
json.Unmarshal(data, &result)
return result, nil
}

DuraGraph fully supports LangGraph’s Command pattern for human-in-the-loop workflows:

# Resume a run that's waiting for human input
await client.runs.resume(
thread_id=thread_id,
run_id=run_id,
command={
"resume": "approved", # Or any value to pass to the graph
"update": {"key": "value"}, # Optional state updates
}
)
# Create run with interrupt points
run = await client.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
input={"message": "Process this"},
interrupt_before=["approval_node"], # Pause before this node
interrupt_after=["review_node"], # Pause after this node
)
SettingLangGraph CloudDuraGraph
Base URLhttps://api.smith.langchain.comhttp://your-instance:8081
API Prefix//api/v1/
Auth HeaderX-API-Key or AuthorizationAuthorization: Bearer <token>
StreamingSSESSE (compatible)
CheckpointsManagedPostgreSQL-backed
Terminal window
# Clone DuraGraph
git clone https://github.com/duragraph/duragraph.git
cd duragraph
# Start DuraGraph
docker compose up -d
# Run conformance tests against your instance
API_BASE_URL=http://localhost:8081/api/v1 pytest tests/conformance/

Run both systems in parallel during migration:

import os
# Feature flag for gradual migration
USE_DURAGRAPH = os.getenv("USE_DURAGRAPH", "false").lower() == "true"
if USE_DURAGRAPH:
client = get_client(url="http://your-duragraph.com")
else:
client = get_client(url="https://api.smith.langchain.com")

Connection refused:

Terminal window
# Check if DuraGraph is running
curl http://localhost:8081/health
# Check container status
docker compose ps

Authentication errors:

Terminal window
# Verify your token
curl -H "Authorization: Bearer YOUR_TOKEN" http://localhost:8081/api/v1/assistants

Run stuck in queued:

Terminal window
# Check server logs
docker compose logs server
# Verify NATS connection
docker compose logs nats

Streaming not working:

Terminal window
# Test SSE endpoint directly
curl -N http://localhost:8081/api/v1/stream?run_id=YOUR_RUN_ID

Monitor latency during migration:

import time
async def benchmark():
start = time.time()
run = await client.runs.create(...)
await client.runs.join(thread_id, run["run_id"])
latency = time.time() - start
print(f"End-to-end latency: {latency:.2f}s")
  1. ✅ Start DuraGraph locally: docker compose up -d
  2. ✅ Update your SDK base URL
  3. ✅ Run your existing tests
  4. ✅ Compare performance metrics
  5. Deploy to production
  6. Set up monitoring