- Add better logging for audio chunk processing - Increase audio buffer size for better speech detection - Improve WAV format creation for better Whisper compatibility - Lower silence threshold for better speech detection - Extend silence timeout from 1 to 2 seconds
865 lines
32 KiB
JavaScript
865 lines
32 KiB
JavaScript
// Main application JavaScript with PWA support
|
|
document.addEventListener('DOMContentLoaded', function() {
|
|
// Register service worker
|
|
if ('serviceWorker' in navigator) {
|
|
registerServiceWorker();
|
|
}
|
|
|
|
// Initialize app
|
|
initApp();
|
|
|
|
// Initialize the real-time interpreter
|
|
initRealtimeInterpreter();
|
|
|
|
// Check for PWA installation prompts
|
|
initInstallPrompt();
|
|
});
|
|
|
|
// Service Worker Registration
|
|
async function registerServiceWorker() {
|
|
try {
|
|
const registration = await navigator.serviceWorker.register('/service-worker.js');
|
|
console.log('Service Worker registered with scope:', registration.scope);
|
|
|
|
// Setup periodic sync if available
|
|
if ('periodicSync' in registration) {
|
|
// Request permission for background sync
|
|
const status = await navigator.permissions.query({
|
|
name: 'periodic-background-sync',
|
|
});
|
|
|
|
if (status.state === 'granted') {
|
|
try {
|
|
// Register for background sync to check for updates
|
|
await registration.periodicSync.register('translation-updates', {
|
|
minInterval: 24 * 60 * 60 * 1000, // once per day
|
|
});
|
|
console.log('Periodic background sync registered');
|
|
} catch (error) {
|
|
console.error('Periodic background sync could not be registered:', error);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Setup push notification if available
|
|
if ('PushManager' in window) {
|
|
setupPushNotifications(registration);
|
|
}
|
|
} catch (error) {
|
|
console.error('Service Worker registration failed:', error);
|
|
}
|
|
}
|
|
|
|
// Initialize the main application
|
|
function initApp() {
|
|
// DOM elements
|
|
const recordBtn = document.getElementById('recordBtn');
|
|
const translateBtn = document.getElementById('translateBtn');
|
|
const sourceText = document.getElementById('sourceText');
|
|
const translatedText = document.getElementById('translatedText');
|
|
const sourceLanguage = document.getElementById('sourceLanguage');
|
|
const targetLanguage = document.getElementById('targetLanguage');
|
|
const playSource = document.getElementById('playSource');
|
|
const playTranslation = document.getElementById('playTranslation');
|
|
const clearSource = document.getElementById('clearSource');
|
|
const clearTranslation = document.getElementById('clearTranslation');
|
|
const statusIndicator = document.getElementById('statusIndicator');
|
|
const progressContainer = document.getElementById('progressContainer');
|
|
const progressBar = document.getElementById('progressBar');
|
|
const audioPlayer = document.getElementById('audioPlayer');
|
|
|
|
// Set initial values
|
|
let isRecording = false;
|
|
let mediaRecorder = null;
|
|
let audioChunks = [];
|
|
let currentSourceText = '';
|
|
let currentTranslationText = '';
|
|
|
|
// Make sure target language is different from source
|
|
if (targetLanguage.options[0].value === sourceLanguage.value) {
|
|
targetLanguage.selectedIndex = 1;
|
|
}
|
|
|
|
// Event listeners for language selection
|
|
sourceLanguage.addEventListener('change', function() {
|
|
if (targetLanguage.value === sourceLanguage.value) {
|
|
for (let i = 0; i < targetLanguage.options.length; i++) {
|
|
if (targetLanguage.options[i].value !== sourceLanguage.value) {
|
|
targetLanguage.selectedIndex = i;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
targetLanguage.addEventListener('change', function() {
|
|
if (targetLanguage.value === sourceLanguage.value) {
|
|
for (let i = 0; i < sourceLanguage.options.length; i++) {
|
|
if (sourceLanguage.options[i].value !== targetLanguage.value) {
|
|
sourceLanguage.selectedIndex = i;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
// Record button click event
|
|
recordBtn.addEventListener('click', function() {
|
|
if (isRecording) {
|
|
stopRecording();
|
|
} else {
|
|
startRecording();
|
|
}
|
|
});
|
|
|
|
// Function to start recording
|
|
function startRecording() {
|
|
navigator.mediaDevices.getUserMedia({ audio: true })
|
|
.then(stream => {
|
|
mediaRecorder = new MediaRecorder(stream);
|
|
audioChunks = [];
|
|
|
|
mediaRecorder.addEventListener('dataavailable', event => {
|
|
audioChunks.push(event.data);
|
|
});
|
|
|
|
mediaRecorder.addEventListener('stop', () => {
|
|
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
|
|
transcribeAudio(audioBlob);
|
|
});
|
|
|
|
mediaRecorder.start();
|
|
isRecording = true;
|
|
recordBtn.classList.add('recording');
|
|
recordBtn.classList.replace('btn-primary', 'btn-danger');
|
|
recordBtn.innerHTML = '<i class="fas fa-stop"></i>';
|
|
statusIndicator.textContent = 'Recording... Click to stop';
|
|
})
|
|
.catch(error => {
|
|
console.error('Error accessing microphone:', error);
|
|
alert('Error accessing microphone. Please make sure you have given permission for microphone access.');
|
|
});
|
|
}
|
|
|
|
// Function to stop recording
|
|
function stopRecording() {
|
|
mediaRecorder.stop();
|
|
isRecording = false;
|
|
recordBtn.classList.remove('recording');
|
|
recordBtn.classList.replace('btn-danger', 'btn-primary');
|
|
recordBtn.innerHTML = '<i class="fas fa-microphone"></i>';
|
|
statusIndicator.textContent = 'Processing audio...';
|
|
|
|
// Stop all audio tracks
|
|
mediaRecorder.stream.getTracks().forEach(track => track.stop());
|
|
}
|
|
|
|
// Function to transcribe audio
|
|
function transcribeAudio(audioBlob) {
|
|
const formData = new FormData();
|
|
formData.append('audio', audioBlob);
|
|
formData.append('source_lang', sourceLanguage.value);
|
|
|
|
showProgress();
|
|
|
|
fetch('/transcribe', {
|
|
method: 'POST',
|
|
body: formData
|
|
})
|
|
.then(response => response.json())
|
|
.then(data => {
|
|
hideProgress();
|
|
|
|
if (data.success) {
|
|
currentSourceText = data.text;
|
|
sourceText.innerHTML = `<p>${data.text}</p>`;
|
|
playSource.disabled = false;
|
|
translateBtn.disabled = false;
|
|
statusIndicator.textContent = 'Transcription complete';
|
|
|
|
// Cache the transcription in IndexedDB
|
|
saveToIndexedDB('transcriptions', {
|
|
text: data.text,
|
|
language: sourceLanguage.value,
|
|
timestamp: new Date().toISOString()
|
|
});
|
|
} else {
|
|
sourceText.innerHTML = `<p class="text-danger">Error: ${data.error}</p>`;
|
|
statusIndicator.textContent = 'Transcription failed';
|
|
}
|
|
})
|
|
.catch(error => {
|
|
hideProgress();
|
|
console.error('Transcription error:', error);
|
|
sourceText.innerHTML = `<p class="text-danger">Failed to transcribe audio. Please try again.</p>`;
|
|
statusIndicator.textContent = 'Transcription failed';
|
|
});
|
|
}
|
|
|
|
// Translate button click event
|
|
translateBtn.addEventListener('click', function() {
|
|
if (!currentSourceText) {
|
|
return;
|
|
}
|
|
|
|
statusIndicator.textContent = 'Translating...';
|
|
showProgress();
|
|
|
|
fetch('/translate', {
|
|
method: 'POST',
|
|
headers: {
|
|
'Content-Type': 'application/json'
|
|
},
|
|
body: JSON.stringify({
|
|
text: currentSourceText,
|
|
source_lang: sourceLanguage.value,
|
|
target_lang: targetLanguage.value
|
|
})
|
|
})
|
|
.then(response => response.json())
|
|
.then(data => {
|
|
hideProgress();
|
|
|
|
if (data.success) {
|
|
currentTranslationText = data.translation;
|
|
translatedText.innerHTML = `<p>${data.translation}</p>`;
|
|
playTranslation.disabled = false;
|
|
statusIndicator.textContent = 'Translation complete';
|
|
|
|
// Cache the translation in IndexedDB
|
|
saveToIndexedDB('translations', {
|
|
sourceText: currentSourceText,
|
|
sourceLanguage: sourceLanguage.value,
|
|
targetText: data.translation,
|
|
targetLanguage: targetLanguage.value,
|
|
timestamp: new Date().toISOString()
|
|
});
|
|
} else {
|
|
translatedText.innerHTML = `<p class="text-danger">Error: ${data.error}</p>`;
|
|
statusIndicator.textContent = 'Translation failed';
|
|
}
|
|
})
|
|
.catch(error => {
|
|
hideProgress();
|
|
console.error('Translation error:', error);
|
|
translatedText.innerHTML = `<p class="text-danger">Failed to translate. Please try again.</p>`;
|
|
statusIndicator.textContent = 'Translation failed';
|
|
});
|
|
});
|
|
|
|
// Play source text
|
|
playSource.addEventListener('click', function() {
|
|
if (!currentSourceText) return;
|
|
|
|
playAudio(currentSourceText, sourceLanguage.value);
|
|
statusIndicator.textContent = 'Playing source audio...';
|
|
});
|
|
|
|
// Play translation
|
|
playTranslation.addEventListener('click', function() {
|
|
if (!currentTranslationText) return;
|
|
|
|
playAudio(currentTranslationText, targetLanguage.value);
|
|
statusIndicator.textContent = 'Playing translation audio...';
|
|
});
|
|
|
|
// Function to play audio via TTS
|
|
function playAudio(text, language) {
|
|
showProgress();
|
|
|
|
fetch('/speak', {
|
|
method: 'POST',
|
|
headers: {
|
|
'Content-Type': 'application/json'
|
|
},
|
|
body: JSON.stringify({
|
|
text: text,
|
|
language: language
|
|
})
|
|
})
|
|
.then(response => response.json())
|
|
.then(data => {
|
|
hideProgress();
|
|
|
|
if (data.success) {
|
|
audioPlayer.src = data.audio_url;
|
|
audioPlayer.onended = function() {
|
|
statusIndicator.textContent = 'Ready';
|
|
};
|
|
audioPlayer.play();
|
|
} else {
|
|
statusIndicator.textContent = 'TTS failed';
|
|
alert('Failed to play audio: ' + data.error);
|
|
}
|
|
})
|
|
.catch(error => {
|
|
hideProgress();
|
|
console.error('TTS error:', error);
|
|
statusIndicator.textContent = 'TTS failed';
|
|
});
|
|
}
|
|
|
|
// Clear buttons
|
|
clearSource.addEventListener('click', function() {
|
|
sourceText.innerHTML = '<p class="text-muted">Your transcribed text will appear here...</p>';
|
|
currentSourceText = '';
|
|
playSource.disabled = true;
|
|
translateBtn.disabled = true;
|
|
});
|
|
|
|
clearTranslation.addEventListener('click', function() {
|
|
translatedText.innerHTML = '<p class="text-muted">Translation will appear here...</p>';
|
|
currentTranslationText = '';
|
|
playTranslation.disabled = true;
|
|
});
|
|
|
|
// Progress indicator functions
|
|
function showProgress() {
|
|
progressContainer.classList.remove('d-none');
|
|
let progress = 0;
|
|
const interval = setInterval(() => {
|
|
progress += 5;
|
|
if (progress > 90) {
|
|
clearInterval(interval);
|
|
}
|
|
progressBar.style.width = `${progress}%`;
|
|
}, 100);
|
|
progressBar.dataset.interval = interval;
|
|
}
|
|
|
|
function hideProgress() {
|
|
const interval = progressBar.dataset.interval;
|
|
if (interval) {
|
|
clearInterval(Number(interval));
|
|
}
|
|
progressBar.style.width = '100%';
|
|
setTimeout(() => {
|
|
progressContainer.classList.add('d-none');
|
|
progressBar.style.width = '0%';
|
|
}, 500);
|
|
}
|
|
|
|
// Check TTS server status on page load if the alert element exists
|
|
const ttsServerAlert = document.getElementById('ttsServerAlert');
|
|
if (ttsServerAlert) {
|
|
checkTtsServer();
|
|
}
|
|
|
|
// Function to check TTS server status
|
|
function checkTtsServer() {
|
|
const ttsServerMessage = document.getElementById('ttsServerMessage');
|
|
const ttsServerUrl = document.getElementById('ttsServerUrl');
|
|
|
|
fetch('/check_tts_server')
|
|
.then(response => response.json())
|
|
.then(data => {
|
|
let currentTtsServerUrl = data.url;
|
|
if (ttsServerUrl) ttsServerUrl.value = currentTtsServerUrl;
|
|
|
|
// Load saved API key if available
|
|
const savedApiKey = localStorage.getItem('ttsApiKeySet');
|
|
const ttsApiKey = document.getElementById('ttsApiKey');
|
|
if (ttsApiKey && savedApiKey === 'true') {
|
|
ttsApiKey.placeholder = '••••••• (API key saved)';
|
|
}
|
|
|
|
if (ttsServerAlert && ttsServerMessage) {
|
|
if (data.status === 'error' || data.status === 'auth_error') {
|
|
ttsServerAlert.classList.remove('d-none');
|
|
ttsServerAlert.classList.remove('alert-success');
|
|
ttsServerAlert.classList.add('alert-warning');
|
|
ttsServerMessage.textContent = data.message;
|
|
|
|
if (data.status === 'auth_error') {
|
|
ttsServerMessage.textContent = 'Authentication error with TTS server. Please check your API key.';
|
|
}
|
|
} else {
|
|
ttsServerAlert.classList.remove('d-none');
|
|
ttsServerAlert.classList.remove('alert-warning');
|
|
ttsServerAlert.classList.add('alert-success');
|
|
ttsServerMessage.textContent = 'TTS server is online and ready.';
|
|
setTimeout(() => {
|
|
ttsServerAlert.classList.add('d-none');
|
|
}, 3000);
|
|
}
|
|
}
|
|
})
|
|
.catch(error => {
|
|
console.error('Failed to check TTS server:', error);
|
|
if (ttsServerAlert && ttsServerMessage) {
|
|
ttsServerAlert.classList.remove('d-none');
|
|
ttsServerAlert.classList.remove('alert-success');
|
|
ttsServerAlert.classList.add('alert-warning');
|
|
ttsServerMessage.textContent = 'Failed to check TTS server status.';
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
// Real-time interpreter module
|
|
function initRealtimeInterpreter() {
|
|
// DOM elements
|
|
const realtimeBtn = document.getElementById('realtimeBtn');
|
|
const realtimeStatusIndicator = document.getElementById('realtimeStatusIndicator');
|
|
const sourceLanguage = document.getElementById('sourceLanguage');
|
|
const targetLanguage = document.getElementById('targetLanguage');
|
|
const sourceText = document.getElementById('sourceText');
|
|
const translatedText = document.getElementById('translatedText');
|
|
const audioPlayer = document.getElementById('audioPlayer');
|
|
|
|
// SocketIO connection
|
|
let socket = null;
|
|
let mediaRecorder = null;
|
|
let audioContext = null;
|
|
let isInterpreting = false;
|
|
let sessionId = null;
|
|
|
|
// Audio processing variables
|
|
const bufferSize = 16384; // Increased from 4096 for better speech capture
|
|
let audioProcessor = null;
|
|
let micStream = null;
|
|
|
|
// Initialize the audio context
|
|
function initAudioContext() {
|
|
try {
|
|
window.AudioContext = window.AudioContext || window.webkitAudioContext;
|
|
audioContext = new AudioContext();
|
|
return true;
|
|
} catch (e) {
|
|
console.error('Web Audio API is not supported in this browser', e);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// Connect to Socket.IO server
|
|
function connectSocket() {
|
|
if (socket) {
|
|
return; // Already connected
|
|
}
|
|
|
|
// Connect to the same host where the Flask app is running
|
|
socket = io.connect(window.location.origin, {
|
|
forceNew: true
|
|
});
|
|
|
|
// Socket event handlers
|
|
socket.on('connect', () => {
|
|
console.log('Socket connected');
|
|
});
|
|
|
|
socket.on('disconnect', () => {
|
|
console.log('Socket disconnected');
|
|
stopInterpreting();
|
|
});
|
|
|
|
socket.on('error', (data) => {
|
|
console.error('Socket error:', data.message);
|
|
realtimeStatusIndicator.textContent = `Error: ${data.message}`;
|
|
});
|
|
|
|
socket.on('session_started', (data) => {
|
|
sessionId = data.session_id;
|
|
console.log('Interpreter session started:', sessionId);
|
|
realtimeStatusIndicator.textContent = 'Interpreter active - listening...';
|
|
});
|
|
|
|
socket.on('session_ended', () => {
|
|
console.log('Interpreter session ended');
|
|
sessionId = null;
|
|
realtimeStatusIndicator.textContent = 'Interpreter stopped';
|
|
});
|
|
|
|
socket.on('chunk_received', () => {
|
|
// This is a confirmation that the server received our audio chunk
|
|
// We can use this to update UI if needed
|
|
});
|
|
|
|
socket.on('transcription_result', (data) => {
|
|
if (data.session_id === sessionId) {
|
|
// Update source text with transcription
|
|
if (sourceText.querySelector('p.text-muted')) {
|
|
sourceText.innerHTML = ''; // Clear placeholder
|
|
}
|
|
|
|
const p = document.createElement('p');
|
|
p.textContent = data.text;
|
|
sourceText.appendChild(p);
|
|
|
|
// Auto-scroll to bottom
|
|
sourceText.scrollTop = sourceText.scrollHeight;
|
|
}
|
|
});
|
|
|
|
socket.on('translation_result', (data) => {
|
|
if (data.session_id === sessionId) {
|
|
// Update translated text
|
|
if (translatedText.querySelector('p.text-muted')) {
|
|
translatedText.innerHTML = ''; // Clear placeholder
|
|
}
|
|
|
|
const p = document.createElement('p');
|
|
p.textContent = data.text;
|
|
translatedText.appendChild(p);
|
|
|
|
// Auto-scroll to bottom
|
|
translatedText.scrollTop = translatedText.scrollHeight;
|
|
}
|
|
});
|
|
|
|
socket.on('audio_ready', (data) => {
|
|
if (data.session_id === sessionId) {
|
|
// Play the translated audio
|
|
audioPlayer.src = data.audio_url;
|
|
audioPlayer.play();
|
|
}
|
|
});
|
|
}
|
|
|
|
// Start the real-time interpreter
|
|
function startInterpreting() {
|
|
if (!initAudioContext()) {
|
|
alert('Your browser does not support the Web Audio API required for the real-time interpreter.');
|
|
return;
|
|
}
|
|
|
|
connectSocket();
|
|
|
|
// Request microphone access
|
|
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
|
|
.then((stream) => {
|
|
micStream = stream;
|
|
|
|
// Start a new interpreter session
|
|
socket.emit('start_interpreter_session', {
|
|
source_lang: sourceLanguage.value,
|
|
target_lang: targetLanguage.value
|
|
});
|
|
|
|
// Setup audio processing
|
|
const audioInput = audioContext.createMediaStreamSource(stream);
|
|
audioProcessor = audioContext.createScriptProcessor(bufferSize, 1, 1);
|
|
|
|
// Connect the audio processing node
|
|
audioInput.connect(audioProcessor);
|
|
audioProcessor.connect(audioContext.destination);
|
|
|
|
// Process audio data
|
|
let silenceStart = performance.now();
|
|
let isSilent = true;
|
|
const silenceThreshold = 0.005; // Lower threshold to be more sensitive to speech
|
|
const silenceDelay = 2000; // 2 seconds of silence before stopping
|
|
|
|
audioProcessor.onaudioprocess = function(e) {
|
|
if (!isInterpreting) return;
|
|
|
|
// Get audio data
|
|
const inputData = e.inputBuffer.getChannelData(0);
|
|
|
|
// Check for silence
|
|
let sum = 0;
|
|
for (let i = 0; i < inputData.length; i++) {
|
|
sum += Math.abs(inputData[i]);
|
|
}
|
|
const average = sum / inputData.length;
|
|
|
|
// Detect speech vs silence
|
|
if (average > silenceThreshold) {
|
|
if (isSilent) {
|
|
isSilent = false;
|
|
realtimeStatusIndicator.textContent = 'Interpreting...';
|
|
}
|
|
silenceStart = performance.now(); // Reset silence timer
|
|
} else if (!isSilent && (performance.now() - silenceStart > silenceDelay)) {
|
|
isSilent = true;
|
|
realtimeStatusIndicator.textContent = 'Waiting for speech...';
|
|
}
|
|
|
|
// Convert buffer to WAV format
|
|
const wavBuffer = convertToWav(inputData);
|
|
|
|
// Send to server if not silent or within silence delay
|
|
if (!isSilent || (performance.now() - silenceStart <= silenceDelay)) {
|
|
socket.emit('audio_chunk', {
|
|
audio: wavBuffer
|
|
});
|
|
console.log(`Sent audio chunk: ${wavBuffer.length} bytes`);
|
|
}
|
|
};
|
|
|
|
// Update UI
|
|
isInterpreting = true;
|
|
realtimeBtn.textContent = 'Stop Interpreter';
|
|
realtimeBtn.classList.replace('btn-primary', 'btn-danger');
|
|
realtimeStatusIndicator.textContent = 'Interpreter active - listening...';
|
|
|
|
// Disable language selectors during interpretation
|
|
sourceLanguage.disabled = true;
|
|
targetLanguage.disabled = true;
|
|
})
|
|
.catch((error) => {
|
|
console.error('Error accessing microphone:', error);
|
|
realtimeStatusIndicator.textContent = 'Error: Microphone access denied';
|
|
alert('Error accessing microphone. Please make sure you have given permission for microphone access.');
|
|
});
|
|
}
|
|
|
|
// Stop the real-time interpreter
|
|
function stopInterpreting() {
|
|
if (!isInterpreting) return;
|
|
|
|
// Stop audio processing
|
|
if (audioProcessor) {
|
|
audioProcessor.disconnect();
|
|
audioProcessor = null;
|
|
}
|
|
|
|
// Stop microphone stream
|
|
if (micStream) {
|
|
micStream.getTracks().forEach(track => track.stop());
|
|
micStream = null;
|
|
}
|
|
|
|
// End the interpreter session
|
|
if (socket && socket.connected) {
|
|
socket.emit('end_interpreter_session');
|
|
}
|
|
|
|
// Update UI
|
|
isInterpreting = false;
|
|
realtimeBtn.textContent = 'Start Interpreter';
|
|
realtimeBtn.classList.replace('btn-danger', 'btn-primary');
|
|
realtimeStatusIndicator.textContent = 'Interpreter ready';
|
|
|
|
// Re-enable language selectors
|
|
sourceLanguage.disabled = false;
|
|
targetLanguage.disabled = false;
|
|
}
|
|
|
|
// Convert audio buffer to WAV format
|
|
function convertToWav(audioBuffer) {
|
|
const sampleRate = audioContext.sampleRate;
|
|
const numberOfChannels = 1;
|
|
const bitsPerSample = 16;
|
|
|
|
// Log audio properties for debugging
|
|
console.log(`Recording audio: ${sampleRate}Hz, ${numberOfChannels} channel(s), ${audioBuffer.length} samples`);
|
|
|
|
// Calculate sizes and create ArrayBuffers
|
|
const dataSize = audioBuffer.length * 2; // 16-bit samples
|
|
const totalSize = 44 + dataSize; // 44 bytes for header + data size
|
|
|
|
const buffer = new ArrayBuffer(totalSize);
|
|
const view = new DataView(buffer);
|
|
|
|
// Write WAV header
|
|
writeString(view, 0, 'RIFF');
|
|
view.setUint32(4, 36 + dataSize, true); // File size - 8
|
|
writeString(view, 8, 'WAVE');
|
|
writeString(view, 12, 'fmt ');
|
|
view.setUint32(16, 16, true); // Format chunk length
|
|
view.setUint16(20, 1, true); // PCM format
|
|
view.setUint16(22, numberOfChannels, true);
|
|
view.setUint32(24, sampleRate, true);
|
|
view.setUint32(28, sampleRate * numberOfChannels * (bitsPerSample / 8), true); // Byte rate
|
|
view.setUint16(32, numberOfChannels * (bitsPerSample / 8), true); // Block align
|
|
view.setUint16(34, bitsPerSample, true);
|
|
writeString(view, 36, 'data');
|
|
view.setUint32(40, dataSize, true);
|
|
|
|
// Write audio data
|
|
let offset = 44;
|
|
for (let i = 0; i < audioBuffer.length; i++) {
|
|
// Convert float to int16
|
|
const s = Math.max(-1, Math.min(1, audioBuffer[i]));
|
|
const val = s < 0 ? s * 0x8000 : s * 0x7FFF;
|
|
view.setInt16(offset, val, true);
|
|
offset += 2;
|
|
}
|
|
|
|
// Helper function to write strings to DataView
|
|
function writeString(view, offset, string) {
|
|
for (let i = 0; i < string.length; i++) {
|
|
view.setUint8(offset + i, string.charCodeAt(i));
|
|
}
|
|
}
|
|
|
|
// Log the created buffer size
|
|
console.log(`Created WAV buffer: ${buffer.byteLength} bytes`);
|
|
|
|
return new Uint8Array(buffer);
|
|
}
|
|
|
|
// Toggle interpreter on button click
|
|
realtimeBtn.addEventListener('click', function() {
|
|
if (isInterpreting) {
|
|
stopInterpreting();
|
|
} else {
|
|
startInterpreting();
|
|
}
|
|
});
|
|
|
|
// Cleanup function for when the page is unloaded
|
|
window.addEventListener('beforeunload', function() {
|
|
stopInterpreting();
|
|
if (socket) {
|
|
socket.disconnect();
|
|
}
|
|
});
|
|
|
|
// Initialize status
|
|
realtimeStatusIndicator.textContent = 'Interpreter ready';
|
|
}
|
|
|
|
// IndexedDB functions for offline data storage
|
|
function openIndexedDB() {
|
|
return new Promise((resolve, reject) => {
|
|
const request = indexedDB.open('VoiceTranslatorDB', 1);
|
|
|
|
request.onupgradeneeded = (event) => {
|
|
const db = event.target.result;
|
|
|
|
// Create stores for transcriptions and translations
|
|
if (!db.objectStoreNames.contains('transcriptions')) {
|
|
db.createObjectStore('transcriptions', { keyPath: 'timestamp' });
|
|
}
|
|
|
|
if (!db.objectStoreNames.contains('translations')) {
|
|
db.createObjectStore('translations', { keyPath: 'timestamp' });
|
|
}
|
|
};
|
|
|
|
request.onsuccess = (event) => {
|
|
resolve(event.target.result);
|
|
};
|
|
|
|
request.onerror = (event) => {
|
|
reject('IndexedDB error: ' + event.target.errorCode);
|
|
};
|
|
});
|
|
}
|
|
|
|
function saveToIndexedDB(storeName, data) {
|
|
openIndexedDB().then(db => {
|
|
const transaction = db.transaction([storeName], 'readwrite');
|
|
const store = transaction.objectStore(storeName);
|
|
store.add(data);
|
|
}).catch(error => {
|
|
console.error('Error saving to IndexedDB:', error);
|
|
});
|
|
}
|
|
|
|
function loadSavedTranslations() {
|
|
openIndexedDB().then(db => {
|
|
const transaction = db.transaction(['translations'], 'readonly');
|
|
const store = transaction.objectStore('translations');
|
|
const request = store.getAll();
|
|
|
|
request.onsuccess = (event) => {
|
|
const translations = event.target.result;
|
|
if (translations && translations.length > 0) {
|
|
// Could add a history section or recently used translations
|
|
console.log('Loaded saved translations:', translations.length);
|
|
}
|
|
};
|
|
}).catch(error => {
|
|
console.error('Error loading from IndexedDB:', error);
|
|
});
|
|
}
|
|
|
|
// PWA installation prompt
|
|
function initInstallPrompt() {
|
|
let deferredPrompt;
|
|
const installButton = document.createElement('button');
|
|
installButton.style.display = 'none';
|
|
installButton.classList.add('btn', 'btn-success', 'fixed-bottom', 'm-3');
|
|
installButton.innerHTML = 'Install Voice Translator <i class="fas fa-download ml-2"></i>';
|
|
document.body.appendChild(installButton);
|
|
|
|
window.addEventListener('beforeinstallprompt', (e) => {
|
|
// Prevent Chrome 67 and earlier from automatically showing the prompt
|
|
e.preventDefault();
|
|
// Stash the event so it can be triggered later
|
|
deferredPrompt = e;
|
|
// Update UI to notify the user they can add to home screen
|
|
installButton.style.display = 'block';
|
|
|
|
installButton.addEventListener('click', (e) => {
|
|
// Hide our user interface that shows our install button
|
|
installButton.style.display = 'none';
|
|
// Show the prompt
|
|
deferredPrompt.prompt();
|
|
// Wait for the user to respond to the prompt
|
|
deferredPrompt.userChoice.then((choiceResult) => {
|
|
if (choiceResult.outcome === 'accepted') {
|
|
console.log('User accepted the install prompt');
|
|
} else {
|
|
console.log('User dismissed the install prompt');
|
|
}
|
|
deferredPrompt = null;
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
// Push notification setup
|
|
function setupPushNotifications(swRegistration) {
|
|
// First check if we already have permission
|
|
if (Notification.permission === 'granted') {
|
|
console.log('Notification permission already granted');
|
|
subscribeToPushManager(swRegistration);
|
|
} else if (Notification.permission !== 'denied') {
|
|
// Otherwise, ask for permission
|
|
Notification.requestPermission().then(function(permission) {
|
|
if (permission === 'granted') {
|
|
console.log('Notification permission granted');
|
|
subscribeToPushManager(swRegistration);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
async function subscribeToPushManager(swRegistration) {
|
|
try {
|
|
// Get the server's public key
|
|
const response = await fetch('/api/push-public-key');
|
|
const data = await response.json();
|
|
|
|
// Convert the base64 string to Uint8Array
|
|
function urlBase64ToUint8Array(base64String) {
|
|
const padding = '='.repeat((4 - base64String.length % 4) % 4);
|
|
const base64 = (base64String + padding)
|
|
.replace(/-/g, '+')
|
|
.replace(/_/g, '/');
|
|
|
|
const rawData = window.atob(base64);
|
|
const outputArray = new Uint8Array(rawData.length);
|
|
|
|
for (let i = 0; i < rawData.length; ++i) {
|
|
outputArray[i] = rawData.charCodeAt(i);
|
|
}
|
|
return outputArray;
|
|
}
|
|
|
|
const convertedVapidKey = urlBase64ToUint8Array(data.publicKey);
|
|
|
|
// Subscribe to push notifications
|
|
const subscription = await swRegistration.pushManager.subscribe({
|
|
userVisibleOnly: true,
|
|
applicationServerKey: convertedVapidKey
|
|
});
|
|
|
|
// Send the subscription details to the server
|
|
await fetch('/api/push-subscribe', {
|
|
method: 'POST',
|
|
headers: {
|
|
'Content-Type': 'application/json',
|
|
},
|
|
body: JSON.stringify(subscription)
|
|
});
|
|
|
|
console.log('User is subscribed to push notifications');
|
|
} catch (error) {
|
|
console.error('Failed to subscribe to push notifications:', error);
|
|
}
|
|
}
|