"use client"; import { useEffect, useMemo, useRef, useState } from "react"; // eslint-disable-next-line @typescript-eslint/no-explicit-any type SpeechRecognitionInstance = any; // eslint-disable-next-line @typescript-eslint/no-explicit-any type SpeechRecognitionConstructor = new () => SpeechRecognitionInstance; type ChatMessage = { role: "user" | "assistant"; content: string; timestamp: number; }; type ApiResponse = { response: string; shouldCaptureEmail: boolean; suggestedActions: string[]; }; type SiteMenteVoiceWidgetProps = { initialLang?: "es" | "en"; }; const quickActions = { es: [ { label: "¿Cuánto cuesta?", icon: "💰" }, { label: "Ver casos de éxito", icon: "🎯" }, { label: "¿Cómo funciona?", icon: "⚙️" }, ], en: [ { label: "Pricing?", icon: "💰" }, { label: "Success stories", icon: "🎯" }, { label: "How it works?", icon: "⚙️" }, ], } as const; const initialGreeting = { es: "Hola, soy el cerebro de SiteMente. ¿En qué te puedo ayudar hoy?", en: "Hi, I'm the SiteMente brain. How can I help you today?", } as const; export default function SiteMenteVoiceWidget({ initialLang = "es", }: SiteMenteVoiceWidgetProps) { const [isOpen, setIsOpen] = useState(false); const [lang, setLang] = useState<"es" | "en">(initialLang); const [messages, setMessages] = useState([ { role: "assistant", content: initialGreeting[initialLang], timestamp: Date.now(), }, ]); const [input, setInput] = useState(""); const [voiceMode, setVoiceMode] = useState(true); const [isRecording, setIsRecording] = useState(false); const [isSpeaking, setIsSpeaking] = useState(false); const [isLoading, setIsLoading] = useState(false); const [transcript, setTranscript] = useState(""); const [speechSupported, setSpeechSupported] = useState(true); const [showTooltip, setShowTooltip] = useState(false); const recognitionRef = useRef(null); const isRecordingRef = useRef(false); const transcriptRef = useRef(""); const messagesEndRef = useRef(null); const localeLabel = useMemo( () => (lang === "es" ? "ES" : "EN"), [lang] ); useEffect(() => { setLang(initialLang); }, [initialLang]); useEffect(() => { const seen = window.localStorage.getItem("sitemente:voice-tooltip"); if (!seen) { setShowTooltip(true); window.localStorage.setItem("sitemente:voice-tooltip", "1"); const timeout = window.setTimeout(() => setShowTooltip(false), 4000); return () => window.clearTimeout(timeout); } return undefined; }, []); useEffect(() => { isRecordingRef.current = isRecording; }, [isRecording]); useEffect(() => { transcriptRef.current = transcript; }, [transcript]); useEffect(() => { const SpeechRecognitionImpl = typeof window !== "undefined" ? ((window as typeof window & { webkitSpeechRecognition?: SpeechRecognitionConstructor; }).SpeechRecognition || (window as typeof window & { webkitSpeechRecognition?: SpeechRecognitionConstructor; }).webkitSpeechRecognition) : undefined; if (!SpeechRecognitionImpl) { setSpeechSupported(false); return; } const recognition = new SpeechRecognitionImpl(); recognition.lang = lang === "es" ? "es-ES" : "en-US"; recognition.interimResults = true; recognition.continuous = false; recognition.onresult = (event) => { const result = Array.from(event.results) .map((res) => res[0]?.transcript ?? "") .join(" "); setTranscript(result.trim()); }; recognition.onerror = () => { setIsRecording(false); }; recognition.onend = () => { if (isRecordingRef.current) { setIsRecording(false); const finalTranscript = transcriptRef.current.trim(); if (finalTranscript) { handleSend(finalTranscript); } setTranscript(""); } }; recognitionRef.current = recognition; }, [lang]); useEffect(() => { messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); }, [messages, isOpen]); useEffect(() => { if (!isSpeaking) return; return () => { window.speechSynthesis?.cancel(); }; }, [isSpeaking]); useEffect(() => { setMessages((prev) => { if (prev.length === 0) return prev; const updated = [...prev]; if (updated[0].role === "assistant") { updated[0] = { ...updated[0], content: initialGreeting[lang], }; } return updated; }); }, [lang]); const startRecording = () => { if (!speechSupported || !recognitionRef.current) return; setTranscript(""); setIsRecording(true); recognitionRef.current.start(); }; const stopRecording = () => { if (!recognitionRef.current) return; recognitionRef.current.stop(); setIsRecording(false); }; const speak = (text: string) => { if (!("speechSynthesis" in window)) return; window.speechSynthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.lang = lang === "es" ? "es-ES" : "en-US"; utterance.onstart = () => setIsSpeaking(true); utterance.onend = () => setIsSpeaking(false); utterance.onerror = () => setIsSpeaking(false); window.speechSynthesis.speak(utterance); }; const handleSend = async (text: string) => { if (!text.trim() || isLoading) return; const userMessage: ChatMessage = { role: "user", content: text, timestamp: Date.now(), }; setMessages((prev) => [...prev, userMessage]); setInput(""); setIsLoading(true); try { const response = await fetch("/api/chat/agent", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ message: text, locale: lang, history: messages.slice(-6), }), }); if (!response.ok) { throw new Error("Failed to fetch response."); } const data = (await response.json()) as ApiResponse; const assistantMessage: ChatMessage = { role: "assistant", content: data.response, timestamp: Date.now(), }; setMessages((prev) => [...prev, assistantMessage]); if (voiceMode) { speak(data.response); } } catch (error) { const fallbackMessage: ChatMessage = { role: "assistant", content: lang === "es" ? "Hubo un problema al responder. ¿Quieres intentarlo de nuevo?" : "There was a problem responding. Want to try again?", timestamp: Date.now(), }; setMessages((prev) => [...prev, fallbackMessage]); } finally { setIsLoading(false); } }; const voiceIndicator = isRecording ? "🎤" : isSpeaking ? "🔊" : "🎤"; return ( <> {!isOpen && (
{showTooltip && (
{lang === "es" ? "Prueba la voz" : "Try voice"}
)}
▶ Demo
)} {isOpen && (
setIsOpen(false)} />
SiteMente IA

{lang === "es" ? "El cerebro de tu web" : "Your website brain"}

{voiceIndicator}
{messages.length === 1 && (
{quickActions[lang].map((action) => ( ))}
)}
{messages.map((message) => (
{message.role === "assistant" && ( SM )}

{message.content}

{new Date(message.timestamp).toLocaleTimeString( lang === "es" ? "es-ES" : "en-US", { hour: "2-digit", minute: "2-digit" } )}
))} {isLoading && (
)} {isSpeaking && (
{lang === "es" ? "Hablando..." : "Speaking..."}
)}
{voiceMode ? (
{transcript && (

{transcript}

)}

{isRecording ? lang === "es" ? "Escuchando..." : "Listening..." : isSpeaking ? lang === "es" ? "Hablando..." : "Speaking..." : lang === "es" ? "Toca para hablar" : "Tap to talk"}

{isSpeaking && (
)}
) : (
setInput(event.target.value)} placeholder={ lang === "es" ? "Escribe tu mensaje..." : "Type your message..." } className="flex-1 rounded-full border border-white/20 bg-white/10 px-4 py-2 text-sm text-white placeholder:text-white/50 focus:border-white/50 focus:outline-none" />
)} {!speechSupported && (

{lang === "es" ? "Tu navegador no soporta voz. Usa el modo texto." : "Your browser doesn't support voice. Use text mode."}

)}
)} ); }