feat: Voice chat in Mission Control with browser mic
This commit is contained in:
@@ -3,7 +3,8 @@
|
||||
import { useState } from "react";
|
||||
import { motion } from "framer-motion";
|
||||
import { useMissionControl } from "@/lib/mission-control/store";
|
||||
import { Task, TaskStatus } from "@/lib/mission-control/types";
|
||||
import { TaskStatus } from "@/lib/mission-control/types";
|
||||
import VoiceChat from "./VoiceChat";
|
||||
|
||||
interface ProjectSummary {
|
||||
id: string;
|
||||
@@ -273,6 +274,11 @@ export default function MissionControlDashboard() {
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Voice Chat */}
|
||||
<div className="mt-8">
|
||||
<VoiceChat />
|
||||
</div>
|
||||
|
||||
{/* Quick Actions */}
|
||||
<div className="mt-8 flex gap-4">
|
||||
<button
|
||||
|
||||
@@ -0,0 +1,202 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useRef, useState } from "react";
|
||||
import { motion } from "framer-motion";
|
||||
|
||||
interface VoiceChatProps {
|
||||
onTranscript?: (text: string) => void;
|
||||
}
|
||||
|
||||
export default function VoiceChat({ onTranscript }: VoiceChatProps) {
|
||||
const [isListening, setIsListening] = useState(false);
|
||||
const [transcript, setTranscript] = useState("");
|
||||
const [response, setResponse] = useState("");
|
||||
const [isProcessing, setIsProcessing] = useState(false);
|
||||
const [speechSupported, setSpeechSupported] = useState(true);
|
||||
|
||||
const recognitionRef = useRef<any>(null);
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition;
|
||||
|
||||
if (!SpeechRecognition) {
|
||||
setSpeechSupported(false);
|
||||
return;
|
||||
}
|
||||
|
||||
const recognition = new SpeechRecognition();
|
||||
recognition.lang = "es-ES";
|
||||
recognition.interimResults = true;
|
||||
recognition.continuous = false;
|
||||
|
||||
recognition.onresult = (event: any) => {
|
||||
const result = Array.from(event.results)
|
||||
.map((res: any) => res[0]?.transcript ?? "")
|
||||
.join("");
|
||||
setTranscript(result);
|
||||
};
|
||||
|
||||
recognition.onend = () => {
|
||||
setIsListening(false);
|
||||
if (transcript.trim()) {
|
||||
handleSend(transcript);
|
||||
}
|
||||
};
|
||||
|
||||
recognition.onerror = (event: any) => {
|
||||
console.error("Speech recognition error:", event.error);
|
||||
setIsListening(false);
|
||||
};
|
||||
|
||||
recognitionRef.current = recognition;
|
||||
|
||||
return () => {
|
||||
if (recognitionRef.current) {
|
||||
recognitionRef.current.abort();
|
||||
}
|
||||
};
|
||||
}, []);
|
||||
|
||||
const toggleListening = () => {
|
||||
if (isListening) {
|
||||
recognitionRef.current?.stop();
|
||||
setIsListening(false);
|
||||
} else {
|
||||
setTranscript("");
|
||||
recognitionRef.current?.start();
|
||||
setIsListening(true);
|
||||
}
|
||||
};
|
||||
|
||||
const handleSend = async (text: string) => {
|
||||
if (!text.trim() || isProcessing) return;
|
||||
|
||||
setIsProcessing(true);
|
||||
const userMessage = text;
|
||||
setTranscript("");
|
||||
|
||||
// Simulate AI response (replace with actual API call)
|
||||
const responses: Record<string, string> = {
|
||||
"hola": "¡Hola! Soy Horus. ¿En qué puedo ayudarte hoy?",
|
||||
"hello": "Hi! I'm Horus. How can I help you today?",
|
||||
"qué tareas": "Tienes varias tareas pendientes en Mission Control. La más crítica es conseguir tu primer cliente pagado.",
|
||||
"what tasks": "You have several pending tasks in Mission Control. The most critical is closing your first paying client.",
|
||||
"estado": "Todo funciona bien. El servidor está en puerto 1284, el firewall configurado, y los briefings están programados para las 6am.",
|
||||
"status": "Everything is running fine. Server on port 1284, firewall configured, briefings scheduled for 6am.",
|
||||
};
|
||||
|
||||
// Simple keyword matching
|
||||
let botResponse = "Entendido. Puedo ayudarte con Mission Control, los proyectos SiteMente, o configurar el sistema.";
|
||||
const lower = userMessage.toLowerCase();
|
||||
|
||||
for (const [key, value] of Object.entries(responses)) {
|
||||
if (lower.includes(key)) {
|
||||
botResponse = value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Simulate delay
|
||||
await new Promise(resolve => setTimeout(resolve, 800));
|
||||
|
||||
setResponse(botResponse);
|
||||
setIsProcessing(false);
|
||||
|
||||
// Speak the response
|
||||
speak(botResponse);
|
||||
|
||||
if (onTranscript) {
|
||||
onTranscript(userMessage);
|
||||
}
|
||||
};
|
||||
|
||||
const speak = (text: string) => {
|
||||
const utterance = new SpeechSynthesisUtterance(text);
|
||||
utterance.lang = "es-ES";
|
||||
speechSynthesis.speak(utterance);
|
||||
};
|
||||
|
||||
const handleKeyPress = (e: React.KeyboardEvent) => {
|
||||
if (e.key === "Enter" && !e.shiftKey) {
|
||||
e.preventDefault();
|
||||
handleSend(transcript);
|
||||
}
|
||||
};
|
||||
|
||||
if (!speechSupported) {
|
||||
return (
|
||||
<div className="p-4 rounded-xl border border-white/10 bg-white/5">
|
||||
<p className="text-white/60 text-sm">Voice not supported in this browser</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="rounded-xl border border-white/10 bg-white/5 overflow-hidden">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between px-4 py-3 border-b border-white/10">
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-lg">🎤</span>
|
||||
<span className="font-semibold">Voice Chat</span>
|
||||
</div>
|
||||
<button
|
||||
onClick={toggleListening}
|
||||
className={`flex items-center gap-2 px-3 py-1.5 rounded-full text-sm font-medium transition ${
|
||||
isListening
|
||||
? "bg-red-500/20 text-red-400 animate-pulse"
|
||||
: "bg-brand-pink/20 text-brand-pink hover:bg-brand-pink/30"
|
||||
}`}
|
||||
>
|
||||
<span className={`w-2 h-2 rounded-full ${isListening ? "bg-red-500" : "bg-brand-pink"}`} />
|
||||
{isListening ? "Listening..." : "Click to speak"}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Messages */}
|
||||
<div className="p-4 space-y-4 min-h-[200px] max-h-[300px] overflow-y-auto">
|
||||
{response && (
|
||||
<motion.div
|
||||
initial={{ opacity: 0, y: 10 }}
|
||||
animate={{ opacity: 1, y: 0 }}
|
||||
className="flex gap-3"
|
||||
>
|
||||
<div className="flex-shrink-0 w-8 h-8 rounded-full bg-brand-pink/20 flex items-center justify-center">
|
||||
👁️
|
||||
</div>
|
||||
<div className="flex-1 p-3 rounded-xl bg-white/10">
|
||||
<p className="text-sm">{response}</p>
|
||||
</div>
|
||||
</motion.div>
|
||||
)}
|
||||
|
||||
{isProcessing && (
|
||||
<div className="flex items-center gap-2 text-white/50">
|
||||
<span className="animate-pulse">Processing...</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Input */}
|
||||
<div className="p-3 border-t border-white/10">
|
||||
<div className="flex gap-2">
|
||||
<input
|
||||
type="text"
|
||||
value={transcript}
|
||||
onChange={(e) => setTranscript(e.target.value)}
|
||||
onKeyPress={handleKeyPress}
|
||||
placeholder="Or type a message..."
|
||||
className="flex-1 bg-white/10 border border-white/20 rounded-lg px-3 py-2 text-sm text-white placeholder:text-white/40 focus:outline-none focus:border-brand-pink"
|
||||
/>
|
||||
<button
|
||||
onClick={() => handleSend(transcript)}
|
||||
disabled={!transcript.trim() || isProcessing}
|
||||
className="px-4 py-2 bg-brand-pink rounded-lg text-sm font-medium disabled:opacity-50 disabled:cursor-not-allowed hover:bg-[#ff7bc0] transition"
|
||||
>
|
||||
Send
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user