334 lines
12 KiB
TypeScript
334 lines
12 KiB
TypeScript
"use client";
|
|
|
|
import { useState, useRef, useEffect, useCallback } from "react";
|
|
|
|
interface MiniMaxVoiceWidgetProps {
|
|
businessName?: string;
|
|
businessType?: "restaurant" | "real-estate" | "clinic" | "car-rental" | "default";
|
|
theme?: "dark" | "light";
|
|
apiUrl?: string;
|
|
enabled?: boolean; // Toggle on/off for testing
|
|
}
|
|
|
|
type Lang = "es" | "en";
|
|
|
|
// Exact greeting as per spec
|
|
const SPANISH_GREETING = "Hola, soy el asistente de SiteMente. ¿En qué puedo ayudarte hoy?";
|
|
const ENGLISH_GREETING = "I can also speak English. How can I help you today?";
|
|
|
|
const SPANISH_MISUNDERSTAND = "No he entendido del todo, ¿podrías repetirlo o escribirlo, por favor?";
|
|
const ENGLISH_MISUNDERSTAND = "I didn't quite catch that. Could you repeat or type it, please?";
|
|
|
|
export default function MiniMaxVoiceWidget({
|
|
businessName = "SiteMente",
|
|
businessType = "restaurant",
|
|
theme = "dark",
|
|
apiUrl = "/api/ai/voice-chat-v2",
|
|
enabled = true
|
|
}: MiniMaxVoiceWidgetProps) {
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [isSpeaking, setIsSpeaking] = useState(false);
|
|
const [messages, setMessages] = useState<{role: "user" | "assistant", content: string}[]>([]);
|
|
const [language, setLanguage] = useState<Lang>("es");
|
|
const [showChat, setShowChat] = useState(false);
|
|
const [error, setError] = useState<string | null>(null);
|
|
const [isInitialized, setIsInitialized] = useState(false);
|
|
|
|
const recognitionRef = useRef<SpeechRecognition | null>(null);
|
|
const synthRef = useRef<SpeechSynthesis | null>(null);
|
|
const messagesEndRef = useRef<HTMLDivElement>(null);
|
|
const inputRef = useRef<HTMLInputElement>(null);
|
|
|
|
// Initialize speech APIs
|
|
useEffect(() => {
|
|
if (typeof window === "undefined" || !enabled) return;
|
|
|
|
// Speech Recognition
|
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
if (SpeechRecognition) {
|
|
recognitionRef.current = new SpeechRecognition();
|
|
recognitionRef.current.continuous = false;
|
|
recognitionRef.current.interimResults = true;
|
|
recognitionRef.current.lang = "es-ES";
|
|
|
|
recognitionRef.current.onresult = (event) => {
|
|
const transcript = Array.from(event.results)
|
|
.map(result => result[0].transcript)
|
|
.join("");
|
|
|
|
if (event.results[0].isFinal && transcript.trim()) {
|
|
handleUserInput(transcript);
|
|
}
|
|
};
|
|
|
|
recognitionRef.current.onerror = (event) => {
|
|
console.error("Speech error:", event.error);
|
|
setIsListening(false);
|
|
if (event.error === "not-allowed") {
|
|
setError("Microphone access denied. Please allow microphone access.");
|
|
} else if (event.error !== "no-speech") {
|
|
setError(`Speech error: ${event.error}`);
|
|
}
|
|
};
|
|
|
|
recognitionRef.current.onend = () => setIsListening(false);
|
|
}
|
|
|
|
// Speech Synthesis
|
|
synthRef.current = window.speechSynthesis;
|
|
|
|
return () => {
|
|
recognitionRef.current?.stop();
|
|
synthRef.current?.cancel();
|
|
};
|
|
}, [enabled]);
|
|
|
|
// Initialize with greeting
|
|
useEffect(() => {
|
|
if (enabled && !isInitialized) {
|
|
setIsInitialized(true);
|
|
setMessages([{ role: "assistant", content: SPANISH_GREETING }]);
|
|
speak(SPANISH_GREETING);
|
|
}
|
|
}, [enabled]);
|
|
|
|
// Scroll to bottom
|
|
useEffect(() => {
|
|
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
|
|
}, [messages]);
|
|
|
|
// Focus input when chat opens
|
|
useEffect(() => {
|
|
if (showChat && inputRef.current) {
|
|
inputRef.current.focus();
|
|
}
|
|
}, [showChat]);
|
|
|
|
// Speak function with exact greeting behavior
|
|
const speak = useCallback((text: string) => {
|
|
if (!synthRef.current || !enabled) return;
|
|
|
|
synthRef.current.cancel();
|
|
|
|
const utterance = new SpeechSynthesisUtterance(text);
|
|
utterance.lang = language === "es" ? "es-ES" : "en-US";
|
|
utterance.rate = 0.9;
|
|
utterance.pitch = 1;
|
|
utterance.volume = 1;
|
|
|
|
utterance.onstart = () => setIsSpeaking(true);
|
|
utterance.onend = () => setIsSpeaking(false);
|
|
utterance.onerror = () => setIsSpeaking(false);
|
|
|
|
synthRef.current.speak(utterance);
|
|
}, [language, enabled]);
|
|
|
|
// Handle user input
|
|
const handleUserInput = async (text: string) => {
|
|
if (!text.trim() || !enabled) return;
|
|
|
|
const userText = text.trim();
|
|
setMessages(prev => [...prev, { role: "user", content: userText }]);
|
|
setIsSpeaking(true);
|
|
|
|
// Detect language - improved logic
|
|
// If user writes ANY English word, switch to English
|
|
const spanishPatterns = /^(hola|gracias|por| favor|quiero|necesito|reserva|precio|dónde|cuándo|cómo|cuánto|tengo|busco|qué|es|son|hay|me|puedo)\s/i;
|
|
const englishPatterns = /^(hello|thanks|please|want|need|book|price|where|how|much|have|can|i|my|your|the|a|an|is|are|do|does|what|who|when|where|why|how)\s/i;
|
|
|
|
let detectedLang: Lang = language;
|
|
|
|
// Check if it's clearly English (more inclusive)
|
|
const isEnglish = englishPatterns.test(userText.toLowerCase());
|
|
const isSpanish = spanishPatterns.test(userText.toLowerCase());
|
|
|
|
// If user writes in English (more than 50% English letters or explicit English words)
|
|
if (!isSpanish && (isEnglish || /[a-z]{3,}/i.test(userText))) {
|
|
// More English characters or explicit English = English
|
|
const englishChars = (userText.match(/[a-zA-Z]/g) || []).length;
|
|
const spanishChars = (userText.match(/[áéíóúñü]/gi) || []).length;
|
|
if (englishChars > spanishChars * 2 || isEnglish) {
|
|
detectedLang = "en";
|
|
}
|
|
} else if (isSpanish) {
|
|
detectedLang = "es";
|
|
}
|
|
|
|
// Update language if detected differently
|
|
if (detectedLang !== language) {
|
|
setLanguage(detectedLang);
|
|
}
|
|
|
|
try {
|
|
const response = await fetch(apiUrl, {
|
|
method: "POST",
|
|
headers: { "Content-Type": "application/json" },
|
|
body: JSON.stringify({
|
|
message: userText,
|
|
language: detectedLang,
|
|
businessType,
|
|
businessName,
|
|
history: messages.slice(-4)
|
|
})
|
|
});
|
|
|
|
if (!response.ok) throw new Error("API failed");
|
|
|
|
const data = await response.json();
|
|
const aiResponse = data.response;
|
|
|
|
setMessages(prev => [...prev, { role: "assistant", content: aiResponse }]);
|
|
speak(aiResponse);
|
|
|
|
} catch (err) {
|
|
console.error("API error:", err);
|
|
const fallback = language === "es"
|
|
? "Lo siento, tengo problemas técnicos. ¿Podrías escribir tu mensaje?"
|
|
: "Sorry, I'm having technical issues. Could you type your message?";
|
|
setMessages(prev => [...prev, { role: "assistant", content: fallback }]);
|
|
setError("API error - check API key");
|
|
speak(fallback);
|
|
}
|
|
};
|
|
|
|
// Toggle microphone
|
|
const toggleListening = () => {
|
|
if (!recognitionRef.current) {
|
|
setError("Speech recognition not supported. Try Chrome.");
|
|
return;
|
|
}
|
|
|
|
if (isListening) {
|
|
recognitionRef.current.stop();
|
|
} else {
|
|
setError(null);
|
|
recognitionRef.current.lang = language === "es" ? "es-ES" : "en-US";
|
|
recognitionRef.current.start();
|
|
setIsListening(true);
|
|
}
|
|
};
|
|
|
|
// Handle text input
|
|
const handleTextSubmit = (e: React.FormEvent) => {
|
|
e.preventDefault();
|
|
const text = inputRef.current?.value;
|
|
if (text?.trim()) {
|
|
handleUserInput(text.trim());
|
|
if (inputRef.current) inputRef.current.value = "";
|
|
}
|
|
};
|
|
|
|
// Theme
|
|
const buttonColor = theme === "dark" ? "bg-brand-pink" : "bg-blue-600";
|
|
const bgColor = theme === "dark" ? "bg-[#1a1625]" : "bg-white";
|
|
const textColor = theme === "dark" ? "text-white" : "text-gray-900";
|
|
const inputBg = theme === "dark" ? "bg-white/10" : "bg-gray-100";
|
|
|
|
if (!enabled) return null;
|
|
|
|
return (
|
|
<div className="fixed bottom-6 right-6 z-50">
|
|
{/* Main Button */}
|
|
<button
|
|
onClick={() => setShowChat(!showChat)}
|
|
className={`${buttonColor} w-14 h-14 rounded-full shadow-lg flex items-center justify-center transition-all hover:scale-110 ${isListening ? "animate-pulse" : ""}`}
|
|
title={showChat ? "Close" : "AI Assistant"}
|
|
>
|
|
{isSpeaking ? "🔊" : isListening ? "👂" : "🎙️"}
|
|
</button>
|
|
|
|
{/* Chat Panel */}
|
|
{showChat && (
|
|
<div className={`absolute bottom-20 right-0 w-80 ${bgColor} rounded-2xl shadow-2xl border border-white/10 overflow-hidden`}>
|
|
{/* Header */}
|
|
<div className={`${theme === "dark" ? "bg-brand-purple" : "bg-blue-600"} p-3 flex items-center justify-between`}>
|
|
<div className="flex items-center gap-2">
|
|
<span className="text-xl">🤖</span>
|
|
<span className="font-semibold text-white text-sm">Asistente SiteMente</span>
|
|
</div>
|
|
<button
|
|
type="button"
|
|
onClick={() => setLanguage(language === "es" ? "en" : "es")}
|
|
className="px-2 py-1 bg-white/20 rounded text-xs text-white hover:bg-white/30 transition"
|
|
>
|
|
{language === "es" ? "🇪🇸 ES" : "🇺🇸 EN"}
|
|
</button>
|
|
</div>
|
|
|
|
{/* Messages */}
|
|
<div className={`h-56 overflow-y-auto p-3 space-y-2 ${textColor}`}>
|
|
{messages.map((msg, i) => (
|
|
<div key={i} className={`flex ${msg.role === "user" ? "justify-end" : "justify-start"}`}>
|
|
<div className={`max-w-[85%] px-3 py-2 rounded-lg text-sm ${
|
|
msg.role === "user"
|
|
? `${buttonColor} text-white`
|
|
: theme === "dark" ? "bg-white/10 text-white" : "bg-gray-100 text-gray-900"
|
|
}`}>
|
|
{msg.content}
|
|
</div>
|
|
</div>
|
|
))}
|
|
{isListening && (
|
|
<div className="flex justify-start">
|
|
<div className="bg-white/10 px-3 py-2 rounded-lg text-sm text-white animate-pulse">
|
|
🎤 Listening...
|
|
</div>
|
|
</div>
|
|
)}
|
|
<div ref={messagesEndRef} />
|
|
</div>
|
|
|
|
{/* Input */}
|
|
<form onSubmit={handleTextSubmit} className={`p-3 border-t ${theme === "dark" ? "border-white/10" : "border-gray-200"}`}>
|
|
<div className="flex gap-2">
|
|
<input
|
|
ref={inputRef}
|
|
type="text"
|
|
placeholder={language === "es" ? "Escribe aquí..." : "Type here..."}
|
|
className={`flex-1 px-3 py-2 rounded-lg text-sm ${inputBg} ${textColor} placeholder-white/50 focus:outline-none focus:ring-2 ${buttonColor}`}
|
|
disabled={isSpeaking}
|
|
/>
|
|
<button
|
|
type="submit"
|
|
disabled={isSpeaking}
|
|
className={`${buttonColor} px-3 py-2 rounded-lg text-white disabled:opacity-50`}
|
|
>
|
|
➤
|
|
</button>
|
|
</div>
|
|
</form>
|
|
|
|
{/* Mic Button */}
|
|
<div className={`p-3 pt-0`}>
|
|
<button
|
|
onClick={toggleListening}
|
|
disabled={isSpeaking}
|
|
className={`w-full py-2 rounded-lg font-semibold text-white transition ${
|
|
isListening
|
|
? "bg-red-500 animate-pulse"
|
|
: `${buttonColor} hover:opacity-90`
|
|
} disabled:opacity-50`}
|
|
>
|
|
{isListening ? "🛑 Detener" : "🎤 Hablar"}
|
|
</button>
|
|
</div>
|
|
|
|
{/* Error */}
|
|
{error && (
|
|
<div className="px-3 pb-2 text-xs text-red-400">
|
|
{error}
|
|
</div>
|
|
)}
|
|
</div>
|
|
)}
|
|
</div>
|
|
);
|
|
}
|
|
|
|
declare global {
|
|
interface Window {
|
|
SpeechRecognition: typeof SpeechRecognition;
|
|
webkitSpeechRecognition: typeof SpeechRecognition;
|
|
}
|
|
}
|