Stream Agent Real-Time UI
User Intent
"How do I build a real-time streaming chat UI with AI responses?"
Operation
SDK Method: streamAgent()
Use Case: Real-time streaming chat interface with tool calling
Complete Code Example (TypeScript)
import { Graphlit } from 'graphlit-client';
import { AgentStreamEvent } from 'graphlit-client/dist/generated/graphql-types';
const graphlit = new Graphlit();
// UI State
let conversationId: string | undefined;
let currentMessage = '';
let isTyping = false;
await graphlit.streamAgent(
'What are the key findings in the research papers?',
async (event: AgentStreamEvent) => {
switch (event.type) {
case 'conversation_started':
// UI: Store conversation ID, show typing indicator
conversationId = event.conversationId;
isTyping = true;
updateUI({ showTyping: true });
break;
case 'message_update':
// UI: Append text chunk to message bubble (real-time)
currentMessage += event.message.message;
updateMessageBubble(currentMessage);
if (!event.isStreaming) {
// UI: Message complete, hide typing, show final metadata
isTyping = false;
finalizeMessage({
text: currentMessage,
tokens: event.message.tokens,
model: event.message.model
});
currentMessage = ''; // Reset for next message
}
break;
case 'tool_update':
// UI: Show tool execution card with status
updateToolCard({
name: event.toolCall.name,
status: event.status, // 'preparing' | 'executing' | 'completed' | 'failed'
arguments: event.toolCall.arguments,
result: event.result,
error: event.error
});
break;
case 'reasoning_update':
// UI: Show expandable "Thinking..." section (Claude extended thinking)
updateReasoningBlock({
content: event.reasoning,
isVisible: true
});
break;
case 'conversation_completed':
// UI: Hide typing indicator, show token count badge
isTyping = false;
updateUI({
showTyping: false,
metadata: {
tokens: event.message.tokens,
model: event.message.model,
throughput: event.message.throughput
}
});
break;
case 'error':
// UI: Show error toast, enable retry button
showError({
message: event.error.message,
code: event.error.code,
recoverable: event.error.recoverable
});
if (event.error.recoverable) {
showRetryButton();
}
break;
}
},
conversationId, // Continue existing conversation
undefined, // Use default specification
[], // Tools (optional)
{} // Tool handlers (optional)
);Event Types → UI Patterns
1. conversation_started
conversation_startedWhen: Conversation begins UI Actions:
Store
conversationIdfor subsequent turnsShow typing indicator
Scroll to bottom of chat
case 'conversation_started':
setConversationId(event.conversationId);
setIsTyping(true);
scrollToBottom();
break;2. message_update
message_updateWhen: Message chunks arrive (streaming) and completion UI Actions:
Append each chunk to message bubble
When
isStreaming: false, finalize message
case 'message_update':
if (event.isStreaming) {
// Stream chunk by chunk
appendTextToMessageBubble(event.message.message);
} else {
// Final message with metadata
finalizeMessageBubble({
fullText: event.message.message,
tokens: event.message.tokens,
model: event.message.model,
throughput: event.message.throughput
});
}
break;3. tool_update
tool_updateWhen: AI calls a tool/function UI Actions:
Show tool execution card
Update status: preparing → executing → completed/failed
Display result or error
case 'tool_update':
const toolCard = findOrCreateToolCard(event.toolCall.id);
switch (event.status) {
case 'preparing':
toolCard.setStatus('Preparing...');
toolCard.setIcon('spinner');
break;
case 'executing':
toolCard.setStatus(`Executing ${event.toolCall.name}`);
toolCard.showArguments(event.toolCall.arguments);
break;
case 'completed':
toolCard.setStatus('Completed');
toolCard.setIcon('check');
toolCard.showResult(event.result);
break;
case 'failed':
toolCard.setStatus('Failed');
toolCard.setIcon('error');
toolCard.showError(event.error);
break;
}
break;4. reasoning_update
reasoning_updateWhen: Model is thinking (Claude extended thinking) UI Actions:
Show expandable "Thinking..." section
Display reasoning content
case 'reasoning_update':
showReasoningPanel({
content: event.reasoning,
isCollapsible: true,
defaultExpanded: false
});
break;5. conversation_completed
conversation_completedWhen: Full response complete UI Actions:
Hide typing indicator
Enable input field
Show token count badge
Update conversation metadata
case 'conversation_completed':
setIsTyping(false);
enableInputField();
showMetadataBadge({
tokens: event.message.tokens,
model: event.message.model,
duration: calculateDuration()
});
break;6. error
errorWhen: Error occurs UI Actions:
Show error toast/banner
If
recoverable: true, show retry buttonLog error for debugging
case 'error':
showErrorToast({
title: 'Error',
message: event.error.message,
severity: event.error.recoverable ? 'warning' : 'error'
});
if (event.error.recoverable) {
showRetryButton(() => {
// Retry the same prompt
retryLastMessage();
});
}
break;Multi-Turn Conversation
// Store conversation ID across turns
let conversationId: string | undefined;
// First message
await graphlit.streamAgent(
'What is quantum computing?',
async (event) => {
if (event.type === 'conversation_started') {
conversationId = event.conversationId; // Store for next turn
}
// ... handle other events
}
);
// Second message (with context from first)
await graphlit.streamAgent(
'Can you give an example?',
async (event) => {
// ... handle events
},
conversationId // Same conversation = has context
);Tool Calling UI
const tools = [
{
name: 'searchDocuments',
description: 'Search through documents',
schema: JSON.stringify({
type: 'object',
properties: {
query: { type: 'string' }
}
})
}
];
const toolHandlers = {
searchDocuments: async (args: { query: string }) => {
const results = await graphlit.queryContents({
search: args.query
});
return { results };
}
};
await graphlit.streamAgent(
'Find information about AI safety',
async (event) => {
if (event.type === 'tool_update') {
// UI: Show tool card
// "🔍 Searching documents for 'AI safety'..."
// Then show results when completed
}
},
conversationId,
undefined,
tools,
toolHandlers
);Cancellation
const abortController = new AbortController();
// Start streaming
const streamPromise = graphlit.streamAgent(
'Write a long essay...',
async (event) => {
// ... handle events
},
undefined,
undefined,
[],
{},
{
abortSignal: abortController.signal // Pass abort signal
}
);
// User clicks "Stop" button
stopButton.onClick(() => {
abortController.abort(); // Cancel streaming
showMessage('Generation stopped');
});Production Pattern (React Example)
import { useState } from 'react';
function ChatInterface() {
const [conversationId, setConversationId] = useState<string>();
const [messages, setMessages] = useState<Message[]>([]);
const [currentChunk, setCurrentChunk] = useState('');
const [isStreaming, setIsStreaming] = useState(false);
const [tools, setTools] = useState<ToolStatus[]>([]);
const graphlit = new Graphlit();
async function sendMessage(prompt: string) {
setIsStreaming(true);
setCurrentChunk('');
await graphlit.streamAgent(
prompt,
async (event: AgentStreamEvent) => {
switch (event.type) {
case 'conversation_started':
setConversationId(event.conversationId);
break;
case 'message_update':
if (event.isStreaming) {
setCurrentChunk(prev => prev + event.message.message);
} else {
// Finalize message
setMessages(prev => [...prev, {
role: 'assistant',
content: event.message.message,
tokens: event.message.tokens,
model: event.message.model
}]);
setCurrentChunk('');
}
break;
case 'tool_update':
setTools(prev => updateToolStatus(prev, event));
break;
case 'conversation_completed':
setIsStreaming(false);
break;
case 'error':
setIsStreaming(false);
showError(event.error.message);
break;
}
},
conversationId
);
}
return (
<div className="chat-interface">
<MessageList messages={messages} />
{currentChunk && <StreamingMessage text={currentChunk} />}
{isStreaming && <TypingIndicator />}
{tools.length > 0 && <ToolStatusCards tools={tools} />}
<InputField onSend={sendMessage} disabled={isStreaming} />
</div>
);
}Key Differences: streamAgent vs promptConversation
Streaming
✅ Real-time chunks
❌ Wait for complete
Tool calling
✅ Supported
❌ Not supported
Citations
❌ Not available
✅ Returns citations
UI complexity
Higher (event handling)
Lower (single response)
Use case
Chat UI, streaming
Simple Q&A, citations
When to use streamAgent:
Building chat UI with real-time streaming
Need tool/function calling
Want to show AI "thinking" process
When to use promptConversation:
Simple Q&A without streaming
Need citations/sources
Don't need tool calling
Common Issues
Issue: Events arrive out of order Solution: This shouldn't happen. Ensure you're not modifying shared state incorrectly.
Issue: Message chunks duplicated
Solution: Only append text when isStreaming: true. Final message comes with isStreaming: false.
Issue: Conversation ID not available
Solution: Wait for conversation_started event before using conversationId.
Issue: Tools not executing
Solution: Verify tools array and toolHandlers object are passed correctly.
Issue: Can't cancel streaming
Solution: Pass abortSignal in options parameter.
Last updated
Was this helpful?