feat(mission-control): restore MC tabs - temple, office, memory, claude, pdf-viewer, resume, resume-upload, temple-3d, demos

Also added:
- Memory API endpoints
- Briefs API endpoints
- AnveVoice stats API
- Claude spawn API
- TTS proxy
- Cleopatra voice widget
- api-auth middleware
This commit is contained in:
2026-03-23 16:30:44 +01:00
parent d5575b58e3
commit 45af56d9cf
30 changed files with 5092 additions and 715 deletions
+357
View File
@@ -0,0 +1,357 @@
"use client";
import { useState, useEffect, useRef } from "react";
type Language = "es" | "en";
const TTS_PROXY = "/api/tts-proxy";
const labels = {
es: {
title: "Cleopatra",
subtitle: "Asistente de Ventas IA Premium",
status: {
listening: "🎤 Escuchando...",
speaking: "🔊 Hablando...",
thinking: "⏳ Pensando...",
ready: "💬 Lista para ayudarte"
},
youSaid: "Dijiste:",
cleopatra: "👑 Cleopatra:",
micHint: "Toca el micrófono",
speakingHint: "Habla ahora...",
tryAgain: "Tengo problemas para conectar.",
placeholder: "Escribe aquí..."
},
en: {
title: "Cleopatra",
subtitle: "Premium AI Sales Assistant",
status: {
listening: "🎤 Listening...",
speaking: "🔊 Speaking...",
thinking: "⏳ Thinking...",
ready: "💬 Ready to help"
},
youSaid: "You said:",
cleopatra: "👑 Cleopatra:",
micHint: "Tap the microphone",
speakingHint: "Speak now...",
tryAgain: "I'm having trouble connecting.",
placeholder: "Type here..."
}
};
export default function CleopatraVoiceWidget() {
const [lang, setLang] = useState<Language>("es");
const [isListening, setIsListening] = useState(false);
const [isSpeaking, setIsSpeaking] = useState(false);
const [isThinking, setIsThinking] = useState(false);
const [transcript, setTranscript] = useState("");
const [lastReply, setLastReply] = useState("");
const [inputText, setInputText] = useState("");
const recognitionRef = useRef<any>(null);
const audioRef = useRef<HTMLAudioElement | null>(null);
const streamRef = useRef<MediaStream | null>(null);
const t = labels[lang];
useEffect(() => {
return () => {
stopAll();
};
}, []);
const stopAll = () => {
if (streamRef.current) {
streamRef.current.getTracks().forEach(track => track.stop());
streamRef.current = null;
}
if (recognitionRef.current) {
recognitionRef.current.abort();
recognitionRef.current = null;
}
setIsListening(false);
if (audioRef.current) {
audioRef.current.pause();
audioRef.current = null;
}
setIsSpeaking(false);
};
const speak = async (text: string) => {
try {
setIsSpeaking(true);
// Use proxy to fetch TTS (handles HTTP->HTTPS)
const res = await fetch(TTS_PROXY, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ text, lang })
});
if (!res.ok) throw new Error("TTS failed");
const data = await res.json();
const base64Audio = data.audio;
if (!base64Audio) throw new Error("No audio");
// Play audio
const audio = new Audio(`data:audio/mp3;base64,${base64Audio}`);
audioRef.current = audio;
audio.onended = () => setIsSpeaking(false);
audio.onerror = () => {
setIsSpeaking(false);
console.error("Audio playback error");
};
await audio.play();
} catch (err) {
console.error("TTS error:", err);
// Fallback to browser TTS
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = lang === "es" ? "es-ES" : "en-US";
utterance.rate = 0.9;
utterance.onstart = () => setIsSpeaking(true);
utterance.onend = () => setIsSpeaking(false);
utterance.onerror = () => setIsSpeaking(false);
window.speechSynthesis.speak(utterance);
}
};
const toggleLang = () => {
setLang(prev => prev === "es" ? "en" : "es");
};
const startListening = async () => {
try {
stopAll();
const stream = await navigator.mediaDevices.getUserMedia({
audio: { echoCancellation: true, noiseSuppression: true, autoGainControl: true }
});
streamRef.current = stream;
const SpeechRecognition = (window as any).webkitSpeechRecognition || (window as any).SpeechRecognition;
if (!SpeechRecognition) {
alert(lang === "es" ? "Speech recognition not supported" : "Speech recognition not supported");
return;
}
const recognition = new SpeechRecognition();
recognition.continuous = false;
recognition.interimResults = true;
recognition.lang = lang === "es" ? "es-ES" : "en-US";
recognitionRef.current = recognition;
recognition.onstart = () => {
setIsListening(true);
setTranscript("");
};
recognition.onresult = (event: any) => {
const result = event.results[0];
const text = result[0].transcript;
setTranscript(text);
if (result.isFinal) {
handleSend(text);
setIsListening(false);
}
};
recognition.onend = () => setIsListening(false);
recognition.onerror = (event: any) => {
console.error("Speech error:", event.error);
setIsListening(false);
if (event.error !== "no-speech") {
alert(lang === "es" ? "Error de voz: " + event.error : "Speech error: " + event.error);
}
};
recognition.start();
} catch (err) {
console.error("Mic error:", err);
alert(lang === "es" ? "No se pudo acceder al micrófono" : "Could not access microphone");
}
};
const stopListening = () => {
if (recognitionRef.current) recognitionRef.current.abort();
setIsListening(false);
};
const handleSend = async (text: string) => {
if (!text.trim()) return;
setIsThinking(true);
setLastReply("");
try {
const res = await fetch("/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ text, language: lang })
});
const data = await res.json();
const reply = data.response || (lang === "es" ? "Estoy aquí para ayudarte." : "I'm here to help.");
setLastReply(reply);
await speak(reply);
} catch (err) {
console.error("Error:", err);
const errorReply = t.tryAgain;
setLastReply(errorReply);
await speak(errorReply);
}
setIsThinking(false);
};
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (inputText.trim()) {
handleSend(inputText);
setInputText("");
}
};
const skipSpeaking = () => {
if (audioRef.current) {
audioRef.current.pause();
audioRef.current = null;
}
setIsThinking(false);
setIsSpeaking(false);
};
return (
<div className="bg-gradient-to-br from-purple-950 to-indigo-950 p-8 rounded-3xl shadow-2xl max-w-lg mx-auto border border-purple-500/30">
{/* Language Toggle */}
<div className="flex justify-end mb-4">
<button
onClick={toggleLang}
className="px-3 py-1 rounded-full text-sm font-bold bg-purple-800/50 hover:bg-purple-700/50 text-white border border-purple-500/30 transition-all"
>
{lang === "es" ? "🇪🇸 ES" : "🇺🇸 EN"}
</button>
</div>
{/* Header */}
<div className="text-center mb-6">
<div className="text-5xl mb-3">👑</div>
<h3 className="text-2xl font-bold text-white">{t.title}</h3>
<p className="text-purple-300 text-sm">{t.subtitle}</p>
</div>
{/* Status */}
<div className="flex justify-center mb-6">
<div className={`px-5 py-2 rounded-full text-sm font-medium ${
isListening ? "bg-red-500/80 animate-pulse text-white" :
isSpeaking ? "bg-green-500/80 animate-pulse text-white" :
isThinking ? "bg-yellow-500/80 text-white" :
"bg-gray-700/80 text-gray-300"
}`}>
{isListening ? t.status.listening :
isSpeaking ? t.status.speaking :
isThinking ? t.status.thinking :
t.status.ready}
</div>
</div>
{/* Visualizer */}
<div className="flex justify-center items-end gap-1 h-16 mb-6">
{[...Array(16)].map((_, i) => (
<div
key={i}
className={`w-2 rounded-full transition-all duration-75 ${
isListening || isSpeaking ? "bg-gradient-to-t from-purple-500 to-pink-500" : "bg-purple-900"
}`}
style={{
height: isListening || isSpeaking ? `${20 + Math.random() * 40}px` : isThinking ? `${10 + Math.sin(Date.now() / 200 + i) * 10 + 10}px` : "8px"
}}
/>
))}
</div>
{/* Transcript */}
{transcript && (
<div className="bg-black/40 rounded-xl p-4 mb-4 border border-purple-500/20">
<p className="text-xs text-purple-300 mb-1">{t.youSaid}</p>
<p className="text-white text-lg">{transcript}</p>
</div>
)}
{/* Reply */}
{lastReply && (
<div className="bg-gradient-to-r from-purple-900/60 to-indigo-900/60 rounded-xl p-4 mb-4 border border-green-500/20">
<p className="text-xs text-green-300 mb-1">{t.cleopatra}</p>
<p className="text-white">{lastReply}</p>
</div>
)}
{/* Text Input */}
<form onSubmit={handleSubmit} className="mb-4">
<div className="flex gap-2">
<input
type="text"
value={inputText}
onChange={(e) => setInputText(e.target.value)}
placeholder={t.placeholder}
className="flex-1 bg-black/40 border border-purple-500/30 rounded-xl px-4 py-3 text-white placeholder-gray-500 focus:outline-none focus:border-purple-500"
/>
<button
type="submit"
disabled={isThinking || isSpeaking}
className="px-6 py-3 bg-purple-600 hover:bg-purple-500 disabled:opacity-50 rounded-xl text-white font-bold transition-all"
>
</button>
</div>
</form>
{/* Controls */}
<div className="flex justify-center gap-4">
<button
onClick={(e) => {
e.preventDefault();
e.stopPropagation();
if (isListening) stopListening();
else startListening();
}}
className={`w-20 h-20 rounded-full flex items-center justify-center text-3xl transition-all shadow-lg ${
isListening ? "bg-red-500 hover:bg-red-600 animate-pulse ring-4 ring-red-400/50" : "bg-gradient-to-br from-purple-500 to-indigo-500 hover:from-purple-600 hover:to-indigo-600"
} text-white`}
>
{isListening ? "⏹" : "🎤"}
</button>
{(isSpeaking || isThinking) && (
<button
onClick={(e) => {
e.preventDefault();
skipSpeaking();
}}
className="w-14 h-14 rounded-full bg-orange-500 hover:bg-orange-600 flex items-center justify-center text-2xl"
>
</button>
)}
</div>
{/* Hint */}
<p className="text-center text-xs text-purple-400 mt-5">
{isListening ? t.speakingHint : t.micHint}
</p>
</div>
);
}