Deploy EDGE Processing
Real-Time Video Analysis at the Network Edge
Process video frames in under 15ms at 275+ global edge locations. Run AI inference, detect scenes, moderate content, and apply custom effects in real-time. Enterprise-grade reliability with automatic failover.
Prerequisites
What you'll need before deploying to EDGE
Install the WAVE CLI for local development and deployment
npm install -g @wave/cliAuthenticate with your WAVE API key
wave auth:loginNode.js 18+, Python 3.9+, or Go 1.21+
node --versionProcessing Types
Choose the right processing pattern for your use case
Implementation Guide
Complete code examples in your preferred language
// Scene detection with histogram analysis
export default async function processFrame(frame, ctx) {
// frame.data = raw video frame (Uint8Array)
// frame.width, frame.height = dimensions
// frame.timestamp = milliseconds since stream start
// ctx = execution context with stream metadata
try {
// Compute luminance histogram for scene detection
const histogram = computeHistogram(frame.data, frame.width, frame.height);
// Compare with previous frame histogram
const prevHistogram = ctx.state.get('lastHistogram');
const distance = prevHistogram
? computeHistogramDistance(histogram, prevHistogram)
: 0;
// Scene change if histogram distance exceeds threshold
const isSceneChange = distance > 0.35;
// Extract additional metadata
const metadata = {
timestamp: frame.timestamp,
frameNumber: ctx.state.get('frameCount', 0) + 1,
sceneChange: isSceneChange,
histogramDistance: distance,
dominantColors: extractDominantColors(histogram),
brightness: calculateBrightness(histogram),
contrast: calculateContrast(histogram),
};
// Update state for next frame comparison
ctx.state.set('lastHistogram', histogram);
ctx.state.set('frameCount', metadata.frameNumber);
// Emit event for PULSE analytics
if (isSceneChange) {
await ctx.emit('edge:scene-change', {
streamId: ctx.stream.id,
timestamp: frame.timestamp,
sceneIndex: ctx.state.get('sceneCount', 0) + 1,
metadata,
});
ctx.state.increment('sceneCount');
}
// Track processing metrics
ctx.metrics.histogram('edge.processing_ms', Date.now() - frame.receivedAt);
ctx.metrics.increment('edge.frames_processed');
return {
action: isSceneChange ? 'capture_thumbnail' : 'continue',
metadata,
};
} catch (error) {
ctx.metrics.increment('edge.processing_errors');
ctx.logger.error('Frame processing failed', { error: error.message });
throw error;
}
}
function computeHistogram(data, width, height) {
const histogram = new Float32Array(256);
const pixelCount = width * height;
for (let i = 0; i < data.length; i += 4) {
// Convert RGB to grayscale luminance
const luminance = Math.floor(
data[i] * 0.299 + data[i + 1] * 0.587 + data[i + 2] * 0.114
);
histogram[luminance]++;
}
// Normalize histogram
for (let i = 0; i < 256; i++) {
histogram[i] /= pixelCount;
}
return histogram;
}
function computeHistogramDistance(hist1, hist2) {
// Chi-squared distance for histogram comparison
let distance = 0;
for (let i = 0; i < 256; i++) {
if (hist1[i] + hist2[i] > 0) {
distance += Math.pow(hist1[i] - hist2[i], 2) / (hist1[i] + hist2[i]);
}
}
return distance / 2;
}
function extractDominantColors(histogram) {
// Find peaks in histogram for dominant colors
// @color-validator-ignore - Example output values for documentation
return { primary: '#3B82F6', secondary: '#10B981', tertiary: '#8B5CF6' };
}
function calculateBrightness(histogram) {
let sum = 0;
for (let i = 0; i < 256; i++) {
sum += i * histogram[i];
}
return sum / 255;
}
function calculateContrast(histogram) {
const brightness = calculateBrightness(histogram);
let variance = 0;
for (let i = 0; i < 256; i++) {
variance += Math.pow(i / 255 - brightness, 2) * histogram[i];
}
return Math.sqrt(variance);
}GPU Acceleration
Enable GPU processing for AI inference at scale
Available GPU Hardware
Configure GPU acceleration in your edge.config.json
| GPU Type | Memory | Best For | Availability |
|---|---|---|---|
nvidia-t4 | 16GB | General inference, medium models | All primary regions |
nvidia-a100 | 40GB | Large models, high throughput | Primary regions only |
nvidia-l4 | 24GB | Balanced performance/cost | Most regions |
// edge.config.json
{
"name": "ai-processor",
"runtime": "python3.11",
"resources": {
"cpu": "4-core",
"memory": "8GB",
"gpu": "nvidia-t4", // Request GPU
"gpuMemory": "16GB"
},
"models": [
{
"name": "moderation-v3",
"path": "s3://wave-models/moderation-v3.tflite",
"accelerator": "gpu", // Use GPU for this model
"fallback": "cpu" // CPU fallback if GPU unavailable
}
]
}Troubleshooting
Common issues and how to resolve them
Success Stories
How leading companies use WAVE EDGE
"WAVE EDGE processes 4.2 billion frames daily for our real-time content moderation. We detect and blur inappropriate content in under 12ms with 99.97% accuracy. This replaced our previous 3-second batch processing pipeline entirely."
"Scene detection with EDGE auto-generates chapter markers for our VODs. Creators save 2+ hours per stream on manual editing. The edge-deployed AI models detect 15+ types of content violations in real-time."
"Real-time player tracking with EDGE powers our augmented reality overlays. We track 22 players at 120fps with <8ms latency, enabling live stats, heat maps, and tactical visualizations that were impossible before."
Next Steps
Continue building with WAVE
Start Processing Video at the Edge
Deploy real-time processing in 5 minutes. <15ms latency, 275+ global locations, automatic failover.