Build Knowledge Graph from Meeting Recordings
User Intent
Operation
Prerequisites
Complete Code Example (TypeScript)
import { Graphlit } from 'graphlit-client';
import { ObservableTypes } from 'graphlit-client/dist/generated/graphql-types';
import {
FilePreparationServiceTypes,
AudioTranscriptionServiceTypes,
ExtractionServiceTypes,
ObservableTypes
} from 'graphlit-client/dist/generated/graphql-types';
const graphlit = new Graphlit();
console.log('=== Building Knowledge Graph from Meeting ===\n');
// Step 1: Create transcription + extraction workflow
console.log('Step 1: Creating workflow...');
const workflow = await graphlit.createWorkflow({
name: "Meeting Entity Extraction",
preparation: {
jobs: [{
connector: {
type: FilePreparationServiceTypes.Deepgram,
audioTranscription: {
model: DeepgramModels.Nova2 // Fast, accurate
}
}
}]
},
extraction: {
jobs: [{
connector: {
type: EntityExtractionServiceTypes.ModelText,
extractedTypes: [
ObservableTypes.Person, // Participants, mentioned people
ObservableTypes.Organization, // Companies discussed
ObservableTypes.Product, // Products, services mentioned
ObservableTypes.Event, // Action items, deadlines, follow-ups
ObservableTypes.Place, // Locations mentioned
ObservableTypes.Category // Topics, projects, themes
]
}
}]
}
});
console.log(`✓ Workflow: ${workflow.createWorkflow.id}\n`);
// Step 2: Ingest meeting recording
console.log('Step 2: Ingesting meeting recording...');
const meeting = await graphlit.ingestUri(
'https://example.com/meetings/q4-planning.mp4',
"Q4 Planning Meeting",
undefined,
undefined,
undefined,
{ id: workflow.createWorkflow.id }
);
console.log(`✓ Ingested: ${meeting.ingestUri.id}\n`);
// Step 3: Wait for transcription + extraction
console.log('Step 3: Transcribing and extracting entities...');
console.log('(This may take several minutes for long recordings)\n');
let isDone = false;
let lastStatus = '';
while (!isDone) {
const status = await graphlit.isContentDone(meeting.ingestUri.id);
isDone = status.isContentDone.result;
if (!isDone) {
const newStatus = ' Processing...';
if (newStatus !== lastStatus) {
console.log(newStatus);
lastStatus = newStatus;
}
await new Promise(resolve => setTimeout(resolve, 5000));
}
}
console.log('✓ Processing complete\n');
// Step 4: Get meeting details
console.log('Step 4: Retrieving meeting transcript and entities...');
const meetingDetails = await graphlit.getContent(meeting.ingestUri.id);
const content = meetingDetails.content;
console.log(`✓ Meeting: ${content.name}`);
console.log(` Duration: ${content.audio?.duration || 0} seconds`);
console.log(` Entities: ${content.observations?.length || 0}\n`);
// Step 5: Display transcript excerpt
console.log('Step 5: Transcript excerpt...\n');
const transcript = content.markdown || content.text || '';
const excerpt = transcript.substring(0, 500);
console.log(excerpt);
console.log(transcript.length > 500 ? '...\n' : '\n');
// Step 6: Analyze extracted entities
console.log('Step 6: Analyzing entities...\n');
// Group by type
const byType = new Map<string, Set<string>>();
content.observations?.forEach(obs => {
if (!byType.has(obs.type)) {
byType.set(obs.type, new Set());
}
byType.get(obs.type)!.add(obs.observable.name);
});
byType.forEach((entities, type) => {
console.log(`${type} (${entities.size}):`);
Array.from(entities).slice(0, 5).forEach(name => {
console.log(` - ${name}`);
});
if (entities.size > 5) {
console.log(` ... and ${entities.size - 5} more`);
}
console.log();
});
// Step 7: Analyze entity timestamps
console.log('Step 7: Entity mentions with timestamps...\n');
const people = content.observations?.filter(obs =>
obs.type === ObservableTypes.Person
) || [];
people.slice(0, 3).forEach(person => {
console.log(`${person.observable.name}:`);
person.occurrences?.slice(0, 3).forEach(occ => {
if (occ.startTime !== undefined && occ.endTime !== undefined) {
const minutes = Math.floor(occ.startTime / 60);
const seconds = Math.floor(occ.startTime % 60);
console.log(` ${minutes}:${seconds.toString().padStart(2, '0')} - Confidence: ${occ.confidence.toFixed(2)}`);
}
});
console.log();
});
// Step 8: Extract action items (Events)
console.log('Step 8: Action items and deadlines...\n');
const events = content.observations?.filter(obs =>
obs.type === ObservableTypes.Event
) || [];
if (events.length > 0) {
console.log('Identified action items:');
events.forEach(event => {
console.log(` - ${event.observable.name}`);
// Show when mentioned
const firstMention = event.occurrences?.[0];
if (firstMention?.startTime !== undefined) {
const min = Math.floor(firstMention.startTime / 60);
const sec = Math.floor(firstMention.startTime % 60);
console.log(` Mentioned at: ${min}:${sec.toString().padStart(2, '0')}`);
}
});
} else {
console.log('No action items identified');
}
console.log('\n✓ Meeting analysis complete!');Step-by-Step Explanation
Step 1: Create Transcription Workflow
Step 2: Supported Audio/Video Formats
Step 3: Processing Timeline
Step 4: Transcript Structure
Step 5: Entity Extraction from Transcript
Step 6: Timestamp Analysis
Step 7: Speaker Diarization
Configuration Options
Choosing Transcription Service
Audio Quality Preprocessing
Variations
Variation 1: Multi-Meeting Analysis
Variation 2: Action Item Tracker
Variation 3: Meeting Sentiment & Topic Analysis
Variation 4: Searchable Meeting Archive
Variation 5: Meeting Summary Generation
Common Issues & Solutions
Issue: Poor Transcription Quality
Issue: Processing Takes Too Long
Issue: No Speaker Diarization
Issue: Missing Action Items
Developer Hints
Transcription Service Selection
Audio Format Best Practices
Cost Optimization
Meeting Entity Quality
Performance Tips
Production Patterns
Pattern from Meeting Intelligence Apps
Enterprise Use Cases
Last updated