Fix potential memory leaks in audio handling - Can crash server after extended use

This comprehensive fix addresses memory leaks in both backend and frontend that could cause server crashes after extended use.

Backend fixes:
- MemoryManager class monitors process and GPU memory usage
- Automatic cleanup when thresholds exceeded (4GB process, 2GB GPU)
- Whisper model reloading to clear GPU memory fragmentation
- Aggressive temporary file cleanup based on age
- Context manager for audio processing with guaranteed cleanup
- Integration with session manager for resource tracking
- Background monitoring thread runs every 30 seconds

Frontend fixes:
- MemoryManager singleton tracks all browser resources
- SafeMediaRecorder wrapper ensures stream cleanup
- AudioBlobHandler manages blob lifecycle and object URLs
- Automatic cleanup of closed AudioContexts
- Proper MediaStream track stopping
- Periodic cleanup of orphaned resources
- Cleanup on page unload

Admin features:
- GET /admin/memory - View memory statistics
- POST /admin/memory/cleanup - Trigger manual cleanup
- Real-time metrics including GPU usage and temp files
- Model reload tracking

Key improvements:
- AudioContext properly closed after use
- Object URLs revoked after use
- MediaRecorder streams properly stopped
- Audio chunks cleared after processing
- GPU cache cleared after each transcription
- Temp files tracked and cleaned aggressively

This prevents the gradual memory increase that could lead to out-of-memory errors or performance degradation after hours of use.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-06-03 08:37:13 -06:00
parent 92b7c41f61
commit 1b9ad03400
7 changed files with 1194 additions and 93 deletions

View File

@@ -23,6 +23,7 @@ import { PerformanceMonitor } from './performanceMonitor';
import { SpeakerManager } from './speakerManager';
import { ConnectionManager } from './connectionManager';
import { ConnectionUI } from './connectionUI';
import { MemoryManager, AudioBlobHandler, SafeMediaRecorder } from './memoryManager';
// import { apiClient } from './apiClient'; // Available for cross-origin requests
// Initialize error boundary
@@ -32,6 +33,9 @@ const errorBoundary = ErrorBoundary.getInstance();
ConnectionManager.getInstance(); // Initialize connection manager
const connectionUI = ConnectionUI.getInstance();
// Initialize memory management
const memoryManager = MemoryManager.getInstance();
// Configure API client if needed for cross-origin requests
// import { apiClient } from './apiClient';
// apiClient.configure({ baseUrl: 'https://api.talk2me.com', credentials: 'include' });
@@ -149,8 +153,8 @@ function initApp(): void {
// Set initial values
let isRecording: boolean = false;
let mediaRecorder: MediaRecorder | null = null;
let audioChunks: Blob[] = [];
let safeMediaRecorder: SafeMediaRecorder | null = null;
let currentAudioHandler: AudioBlobHandler | null = null;
let currentSourceText: string = '';
let currentTranslationText: string = '';
let currentTtsServerUrl: string = '';
@@ -409,7 +413,7 @@ function initApp(): void {
});
// Function to start recording
function startRecording(): void {
async function startRecording(): Promise<void> {
// Request audio with specific constraints for better compression
const audioConstraints = {
audio: {
@@ -421,86 +425,79 @@ function initApp(): void {
}
};
navigator.mediaDevices.getUserMedia(audioConstraints)
.then(stream => {
// Use webm/opus for better compression (if supported)
const mimeType = MediaRecorder.isTypeSupported('audio/webm;codecs=opus')
? 'audio/webm;codecs=opus'
: 'audio/webm';
const options = {
mimeType: mimeType,
audioBitsPerSecond: 32000 // Low bitrate for speech (32 kbps)
};
try {
mediaRecorder = new MediaRecorder(stream, options);
} catch (e) {
// Fallback to default if options not supported
console.warn('Compression options not supported, using defaults');
mediaRecorder = new MediaRecorder(stream);
}
audioChunks = [];
mediaRecorder.addEventListener('dataavailable', event => {
audioChunks.push(event.data);
});
mediaRecorder.addEventListener('stop', async () => {
// Create blob with appropriate MIME type
const mimeType = mediaRecorder?.mimeType || 'audio/webm';
const audioBlob = new Blob(audioChunks, { type: mimeType });
// Log compression results
const sizeInKB = (audioBlob.size / 1024).toFixed(2);
console.log(`Audio compressed to ${sizeInKB} KB (${mimeType})`);
// If the audio is still too large, we can compress it further
if (audioBlob.size > 500 * 1024) { // If larger than 500KB
statusIndicator.textContent = 'Compressing audio...';
const compressedBlob = await compressAudioBlob(audioBlob);
transcribeAudio(compressedBlob);
} else {
transcribeAudio(audioBlob);
}
});
mediaRecorder.start();
isRecording = true;
recordBtn.classList.add('recording');
recordBtn.classList.replace('btn-primary', 'btn-danger');
recordBtn.innerHTML = '<div class="recording-wave"><span></span><span></span><span></span><span></span><span></span></div>';
statusIndicator.textContent = 'Recording... Click to stop';
statusIndicator.classList.add('processing');
})
.catch(error => {
console.error('Error accessing microphone:', error);
alert('Error accessing microphone. Please make sure you have given permission for microphone access.');
});
try {
// Clean up any previous recorder
if (safeMediaRecorder) {
safeMediaRecorder.cleanup();
}
safeMediaRecorder = new SafeMediaRecorder();
await safeMediaRecorder.start(audioConstraints);
isRecording = true;
recordBtn.classList.add('recording');
recordBtn.classList.replace('btn-primary', 'btn-danger');
recordBtn.innerHTML = '<div class="recording-wave"><span></span><span></span><span></span><span></span><span></span></div>';
statusIndicator.textContent = 'Recording... Click to stop';
statusIndicator.classList.add('processing');
} catch (error) {
console.error('Error accessing microphone:', error);
alert('Error accessing microphone. Please make sure you have given permission for microphone access.');
isRecording = false;
}
}
// Function to stop recording
function stopRecording(): void {
if (!mediaRecorder) return;
async function stopRecording(): Promise<void> {
if (!safeMediaRecorder || !safeMediaRecorder.isRecording()) return;
mediaRecorder.stop();
isRecording = false;
recordBtn.classList.remove('recording');
recordBtn.classList.replace('btn-danger', 'btn-primary');
recordBtn.innerHTML = '<i class="fas fa-microphone"></i>';
statusIndicator.textContent = 'Processing audio...';
statusIndicator.classList.add('processing');
showLoadingOverlay('Transcribing your speech...');
// Stop all audio tracks
mediaRecorder.stream.getTracks().forEach(track => track.stop());
try {
isRecording = false;
recordBtn.classList.remove('recording');
recordBtn.classList.replace('btn-danger', 'btn-primary');
recordBtn.innerHTML = '<i class="fas fa-microphone"></i>';
statusIndicator.textContent = 'Processing audio...';
statusIndicator.classList.add('processing');
showLoadingOverlay('Transcribing your speech...');
const audioBlob = await safeMediaRecorder.stop();
// Log compression results
const sizeInKB = (audioBlob.size / 1024).toFixed(2);
console.log(`Audio compressed to ${sizeInKB} KB`);
// Clean up old audio handler
if (currentAudioHandler) {
currentAudioHandler.cleanup();
}
// Create new audio handler
currentAudioHandler = new AudioBlobHandler(audioBlob);
// If the audio is still too large, compress it further
if (audioBlob.size > 500 * 1024) { // If larger than 500KB
statusIndicator.textContent = 'Compressing audio...';
const compressedBlob = await compressAudioBlob(audioBlob);
// Update handler with compressed blob
currentAudioHandler.cleanup();
currentAudioHandler = new AudioBlobHandler(compressedBlob);
transcribeAudio(compressedBlob);
} else {
transcribeAudio(audioBlob);
}
} catch (error) {
console.error('Error stopping recording:', error);
statusIndicator.textContent = 'Error processing audio';
hideLoadingOverlay();
}
}
// Function to compress audio blob if needed
async function compressAudioBlob(blob: Blob): Promise<Blob> {
return new Promise((resolve) => {
const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
memoryManager.registerAudioContext(audioContext);
const reader = new FileReader();
reader.onload = async (e) => {
@@ -510,6 +507,8 @@ function initApp(): void {
// Downsample to 16kHz mono
const offlineContext = new OfflineAudioContext(1, audioBuffer.duration * 16000, 16000);
memoryManager.registerAudioContext(offlineContext as any);
const source = offlineContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(offlineContext.destination);
@@ -522,9 +521,15 @@ function initApp(): void {
const compressedSizeKB = (wavBlob.size / 1024).toFixed(2);
console.log(`Further compressed to ${compressedSizeKB} KB`);
// Clean up contexts
memoryManager.cleanupAudioContext(audioContext);
memoryManager.cleanupAudioContext(offlineContext as any);
resolve(wavBlob);
} catch (error) {
console.error('Compression failed, using original:', error);
// Clean up on error
memoryManager.cleanupAudioContext(audioContext);
resolve(blob); // Return original if compression fails
}
};

View File

@@ -0,0 +1,309 @@
/**
* Memory management utilities for preventing leaks in audio handling
*/
export class MemoryManager {
private static instance: MemoryManager;
private audioContexts: Set<AudioContext> = new Set();
private objectURLs: Set<string> = new Set();
private mediaStreams: Set<MediaStream> = new Set();
private intervals: Set<number> = new Set();
private timeouts: Set<number> = new Set();
private constructor() {
// Set up periodic cleanup
this.startPeriodicCleanup();
// Clean up on page unload
window.addEventListener('beforeunload', () => this.cleanup());
}
static getInstance(): MemoryManager {
if (!MemoryManager.instance) {
MemoryManager.instance = new MemoryManager();
}
return MemoryManager.instance;
}
/**
* Register an AudioContext for cleanup
*/
registerAudioContext(context: AudioContext): void {
this.audioContexts.add(context);
}
/**
* Register an object URL for cleanup
*/
registerObjectURL(url: string): void {
this.objectURLs.add(url);
}
/**
* Register a MediaStream for cleanup
*/
registerMediaStream(stream: MediaStream): void {
this.mediaStreams.add(stream);
}
/**
* Register an interval for cleanup
*/
registerInterval(id: number): void {
this.intervals.add(id);
}
/**
* Register a timeout for cleanup
*/
registerTimeout(id: number): void {
this.timeouts.add(id);
}
/**
* Clean up a specific AudioContext
*/
cleanupAudioContext(context: AudioContext): void {
if (context.state !== 'closed') {
context.close().catch(console.error);
}
this.audioContexts.delete(context);
}
/**
* Clean up a specific object URL
*/
cleanupObjectURL(url: string): void {
URL.revokeObjectURL(url);
this.objectURLs.delete(url);
}
/**
* Clean up a specific MediaStream
*/
cleanupMediaStream(stream: MediaStream): void {
stream.getTracks().forEach(track => {
track.stop();
});
this.mediaStreams.delete(stream);
}
/**
* Clean up all resources
*/
cleanup(): void {
// Clean up audio contexts
this.audioContexts.forEach(context => {
if (context.state !== 'closed') {
context.close().catch(console.error);
}
});
this.audioContexts.clear();
// Clean up object URLs
this.objectURLs.forEach(url => {
URL.revokeObjectURL(url);
});
this.objectURLs.clear();
// Clean up media streams
this.mediaStreams.forEach(stream => {
stream.getTracks().forEach(track => {
track.stop();
});
});
this.mediaStreams.clear();
// Clear intervals and timeouts
this.intervals.forEach(id => clearInterval(id));
this.intervals.clear();
this.timeouts.forEach(id => clearTimeout(id));
this.timeouts.clear();
console.log('Memory cleanup completed');
}
/**
* Get memory usage statistics
*/
getStats(): MemoryStats {
return {
audioContexts: this.audioContexts.size,
objectURLs: this.objectURLs.size,
mediaStreams: this.mediaStreams.size,
intervals: this.intervals.size,
timeouts: this.timeouts.size
};
}
/**
* Start periodic cleanup of orphaned resources
*/
private startPeriodicCleanup(): void {
setInterval(() => {
// Clean up closed audio contexts
this.audioContexts.forEach(context => {
if (context.state === 'closed') {
this.audioContexts.delete(context);
}
});
// Clean up stopped media streams
this.mediaStreams.forEach(stream => {
const activeTracks = stream.getTracks().filter(track => track.readyState === 'live');
if (activeTracks.length === 0) {
this.mediaStreams.delete(stream);
}
});
// Log stats in development
if (process.env.NODE_ENV === 'development') {
const stats = this.getStats();
if (Object.values(stats).some(v => v > 0)) {
console.log('Memory manager stats:', stats);
}
}
}, 30000); // Every 30 seconds
// Don't track this interval to avoid self-reference
// It will be cleared on page unload
}
}
interface MemoryStats {
audioContexts: number;
objectURLs: number;
mediaStreams: number;
intervals: number;
timeouts: number;
}
/**
* Wrapper for safe audio blob handling
*/
export class AudioBlobHandler {
private blob: Blob;
private objectURL?: string;
private memoryManager: MemoryManager;
constructor(blob: Blob) {
this.blob = blob;
this.memoryManager = MemoryManager.getInstance();
}
/**
* Get object URL (creates one if needed)
*/
getObjectURL(): string {
if (!this.objectURL) {
this.objectURL = URL.createObjectURL(this.blob);
this.memoryManager.registerObjectURL(this.objectURL);
}
return this.objectURL;
}
/**
* Get the blob
*/
getBlob(): Blob {
return this.blob;
}
/**
* Clean up resources
*/
cleanup(): void {
if (this.objectURL) {
this.memoryManager.cleanupObjectURL(this.objectURL);
this.objectURL = undefined;
}
// Help garbage collection
(this.blob as any) = null;
}
}
/**
* Safe MediaRecorder wrapper
*/
export class SafeMediaRecorder {
private mediaRecorder?: MediaRecorder;
private stream?: MediaStream;
private chunks: Blob[] = [];
private memoryManager: MemoryManager;
constructor() {
this.memoryManager = MemoryManager.getInstance();
}
async start(constraints: MediaStreamConstraints = { audio: true }): Promise<void> {
// Clean up any existing recorder
this.cleanup();
this.stream = await navigator.mediaDevices.getUserMedia(constraints);
this.memoryManager.registerMediaStream(this.stream);
const options = {
mimeType: MediaRecorder.isTypeSupported('audio/webm;codecs=opus')
? 'audio/webm;codecs=opus'
: 'audio/webm'
};
this.mediaRecorder = new MediaRecorder(this.stream, options);
this.chunks = [];
this.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
this.chunks.push(event.data);
}
};
this.mediaRecorder.start();
}
stop(): Promise<Blob> {
return new Promise((resolve, reject) => {
if (!this.mediaRecorder) {
reject(new Error('MediaRecorder not initialized'));
return;
}
this.mediaRecorder.onstop = () => {
const blob = new Blob(this.chunks, {
type: this.mediaRecorder?.mimeType || 'audio/webm'
});
resolve(blob);
// Clean up after delivering the blob
setTimeout(() => this.cleanup(), 100);
};
this.mediaRecorder.stop();
});
}
cleanup(): void {
if (this.stream) {
this.memoryManager.cleanupMediaStream(this.stream);
this.stream = undefined;
}
if (this.mediaRecorder) {
if (this.mediaRecorder.state !== 'inactive') {
try {
this.mediaRecorder.stop();
} catch (e) {
// Ignore errors
}
}
this.mediaRecorder = undefined;
}
// Clear chunks
this.chunks = [];
}
isRecording(): boolean {
return this.mediaRecorder?.state === 'recording';
}
}