Add MiniMax Voice Widget V2 - disabled by default for testing
This commit is contained in:
@@ -0,0 +1,314 @@
|
||||
"use client";
|
||||
|
||||
import { useState, useRef, useEffect, useCallback } from "react";
|
||||
|
||||
interface MiniMaxVoiceWidgetProps {
|
||||
businessName?: string;
|
||||
businessType?: "restaurant" | "real-estate" | "clinic" | "car-rental" | "default";
|
||||
theme?: "dark" | "light";
|
||||
apiUrl?: string;
|
||||
enabled?: boolean; // Toggle on/off for testing
|
||||
}
|
||||
|
||||
type Lang = "es" | "en";
|
||||
|
||||
// Exact greeting as per spec
|
||||
const SPANISH_GREETING = "Hola, soy el asistente de SiteMente. ¿En qué puedo ayudarte hoy?";
|
||||
const ENGLISH_GREETING = "I can also speak English. How can I help you today?";
|
||||
|
||||
const SPANISH_MISUNDERSTAND = "No he entendido del todo, ¿podrías repetirlo o escribirlo, por favor?";
|
||||
const ENGLISH_MISUNDERSTAND = "I didn't quite catch that. Could you repeat or type it, please?";
|
||||
|
||||
export default function MiniMaxVoiceWidget({
|
||||
businessName = "SiteMente",
|
||||
businessType = "restaurant",
|
||||
theme = "dark",
|
||||
apiUrl = "/api/ai/voice-chat-v2",
|
||||
enabled = true
|
||||
}: MiniMaxVoiceWidgetProps) {
|
||||
const [isListening, setIsListening] = useState(false);
|
||||
const [isSpeaking, setIsSpeaking] = useState(false);
|
||||
const [messages, setMessages] = useState<{role: "user" | "assistant", content: string}[]>([]);
|
||||
const [language, setLanguage] = useState<Lang>("es");
|
||||
const [showChat, setShowChat] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const [isInitialized, setIsInitialized] = useState(false);
|
||||
|
||||
const recognitionRef = useRef<SpeechRecognition | null>(null);
|
||||
const synthRef = useRef<SpeechSynthesis | null>(null);
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
// Initialize speech APIs
|
||||
useEffect(() => {
|
||||
if (typeof window === "undefined" || !enabled) return;
|
||||
|
||||
// Speech Recognition
|
||||
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
||||
if (SpeechRecognition) {
|
||||
recognitionRef.current = new SpeechRecognition();
|
||||
recognitionRef.current.continuous = false;
|
||||
recognitionRef.current.interimResults = true;
|
||||
recognitionRef.current.lang = "es-ES";
|
||||
|
||||
recognitionRef.current.onresult = (event) => {
|
||||
const transcript = Array.from(event.results)
|
||||
.map(result => result[0].transcript)
|
||||
.join("");
|
||||
|
||||
if (event.results[0].isFinal && transcript.trim()) {
|
||||
handleUserInput(transcript);
|
||||
}
|
||||
};
|
||||
|
||||
recognitionRef.current.onerror = (event) => {
|
||||
console.error("Speech error:", event.error);
|
||||
setIsListening(false);
|
||||
if (event.error === "not-allowed") {
|
||||
setError("Microphone access denied. Please allow microphone access.");
|
||||
} else if (event.error !== "no-speech") {
|
||||
setError(`Speech error: ${event.error}`);
|
||||
}
|
||||
};
|
||||
|
||||
recognitionRef.current.onend = () => setIsListening(false);
|
||||
}
|
||||
|
||||
// Speech Synthesis
|
||||
synthRef.current = window.speechSynthesis;
|
||||
|
||||
return () => {
|
||||
recognitionRef.current?.stop();
|
||||
synthRef.current?.cancel();
|
||||
};
|
||||
}, [enabled]);
|
||||
|
||||
// Initialize with greeting
|
||||
useEffect(() => {
|
||||
if (enabled && !isInitialized) {
|
||||
setIsInitialized(true);
|
||||
setMessages([{ role: "assistant", content: SPANISH_GREETING }]);
|
||||
speak(SPANISH_GREETING);
|
||||
}
|
||||
}, [enabled]);
|
||||
|
||||
// Scroll to bottom
|
||||
useEffect(() => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
|
||||
}, [messages]);
|
||||
|
||||
// Focus input when chat opens
|
||||
useEffect(() => {
|
||||
if (showChat && inputRef.current) {
|
||||
inputRef.current.focus();
|
||||
}
|
||||
}, [showChat]);
|
||||
|
||||
// Speak function with exact greeting behavior
|
||||
const speak = useCallback((text: string) => {
|
||||
if (!synthRef.current || !enabled) return;
|
||||
|
||||
synthRef.current.cancel();
|
||||
|
||||
const utterance = new SpeechSynthesisUtterance(text);
|
||||
utterance.lang = language === "es" ? "es-ES" : "en-US";
|
||||
utterance.rate = 0.9;
|
||||
utterance.pitch = 1;
|
||||
utterance.volume = 1;
|
||||
|
||||
utterance.onstart = () => setIsSpeaking(true);
|
||||
utterance.onend = () => setIsSpeaking(false);
|
||||
utterance.onerror = () => setIsSpeaking(false);
|
||||
|
||||
synthRef.current.speak(utterance);
|
||||
}, [language, enabled]);
|
||||
|
||||
// Handle user input
|
||||
const handleUserInput = async (text: string) => {
|
||||
if (!text.trim() || !enabled) return;
|
||||
|
||||
const userText = text.trim();
|
||||
setMessages(prev => [...prev, { role: "user", content: userText }]);
|
||||
setIsSpeaking(true);
|
||||
|
||||
// Detect language
|
||||
const spanishWords = ["hola", "gracias", "por favor", "quiero", "necesito", "reserva", "precio", "dónde", "cuándo", "cómo", "cuánto", "tengo", "quiero", "busco", "necesito"];
|
||||
const englishWords = ["hello", "thanks", "please", "want", "need", "book", "price", "where", "how", "much", "have", "looking"];
|
||||
|
||||
const isSpanish = spanishWords.some(w => userText.toLowerCase().includes(w));
|
||||
const isEnglish = englishWords.some(w => userText.toLowerCase().includes(w));
|
||||
|
||||
let detectedLang: Lang = language;
|
||||
if (isSpanish && !isEnglish) detectedLang = "es";
|
||||
else if (isEnglish && !isSpanish) detectedLang = "en";
|
||||
else if (isSpanish && isEnglish && language === "es") detectedLang = "es";
|
||||
|
||||
if (detectedLang !== language) {
|
||||
setLanguage(detectedLang);
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(apiUrl, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
message: userText,
|
||||
language: detectedLang,
|
||||
businessType,
|
||||
businessName,
|
||||
history: messages.slice(-4)
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) throw new Error("API failed");
|
||||
|
||||
const data = await response.json();
|
||||
const aiResponse = data.response;
|
||||
|
||||
setMessages(prev => [...prev, { role: "assistant", content: aiResponse }]);
|
||||
speak(aiResponse);
|
||||
|
||||
} catch (err) {
|
||||
console.error("API error:", err);
|
||||
const fallback = language === "es" ? SPANISH_MISUNDERSTAND : ENGLISH_MISUNDERSTAND;
|
||||
setMessages(prev => [...prev, { role: "assistant", content: fallback }]);
|
||||
speak(fallback);
|
||||
}
|
||||
};
|
||||
|
||||
// Toggle microphone
|
||||
const toggleListening = () => {
|
||||
if (!recognitionRef.current) {
|
||||
setError("Speech recognition not supported. Try Chrome.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (isListening) {
|
||||
recognitionRef.current.stop();
|
||||
} else {
|
||||
setError(null);
|
||||
recognitionRef.current.lang = language === "es" ? "es-ES" : "en-US";
|
||||
recognitionRef.current.start();
|
||||
setIsListening(true);
|
||||
}
|
||||
};
|
||||
|
||||
// Handle text input
|
||||
const handleTextSubmit = (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
const text = inputRef.current?.value;
|
||||
if (text?.trim()) {
|
||||
handleUserInput(text.trim());
|
||||
if (inputRef.current) inputRef.current.value = "";
|
||||
}
|
||||
};
|
||||
|
||||
// Theme
|
||||
const buttonColor = theme === "dark" ? "bg-brand-pink" : "bg-blue-600";
|
||||
const bgColor = theme === "dark" ? "bg-[#1a1625]" : "bg-white";
|
||||
const textColor = theme === "dark" ? "text-white" : "text-gray-900";
|
||||
const inputBg = theme === "dark" ? "bg-white/10" : "bg-gray-100";
|
||||
|
||||
if (!enabled) return null;
|
||||
|
||||
return (
|
||||
<div className="fixed bottom-6 right-6 z-50">
|
||||
{/* Main Button */}
|
||||
<button
|
||||
onClick={() => setShowChat(!showChat)}
|
||||
className={`${buttonColor} w-14 h-14 rounded-full shadow-lg flex items-center justify-center transition-all hover:scale-110 ${isListening ? "animate-pulse" : ""}`}
|
||||
title={showChat ? "Close" : "AI Assistant"}
|
||||
>
|
||||
{isSpeaking ? "🔊" : isListening ? "👂" : "🎙️"}
|
||||
</button>
|
||||
|
||||
{/* Chat Panel */}
|
||||
{showChat && (
|
||||
<div className={`absolute bottom-20 right-0 w-80 ${bgColor} rounded-2xl shadow-2xl border border-white/10 overflow-hidden`}>
|
||||
{/* Header */}
|
||||
<div className={`${theme === "dark" ? "bg-brand-purple" : "bg-blue-600"} p-3 flex items-center justify-between`}>
|
||||
<div className="flex items-center gap-2">
|
||||
<span className="text-xl">🤖</span>
|
||||
<span className="font-semibold text-white text-sm">Asistente SiteMente</span>
|
||||
</div>
|
||||
<span className="text-xs text-white/70">
|
||||
{language === "es" ? "ES" : "EN"}
|
||||
</span>
|
||||
</div>
|
||||
|
||||
{/* Messages */}
|
||||
<div className={`h-56 overflow-y-auto p-3 space-y-2 ${textColor}`}>
|
||||
{messages.map((msg, i) => (
|
||||
<div key={i} className={`flex ${msg.role === "user" ? "justify-end" : "justify-start"}`}>
|
||||
<div className={`max-w-[85%] px-3 py-2 rounded-lg text-sm ${
|
||||
msg.role === "user"
|
||||
? `${buttonColor} text-white`
|
||||
: theme === "dark" ? "bg-white/10 text-white" : "bg-gray-100 text-gray-900"
|
||||
}`}>
|
||||
{msg.content}
|
||||
</div>
|
||||
</div>
|
||||
))}
|
||||
{isListening && (
|
||||
<div className="flex justify-start">
|
||||
<div className="bg-white/10 px-3 py-2 rounded-lg text-sm text-white animate-pulse">
|
||||
🎤 Listening...
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div ref={messagesEndRef} />
|
||||
</div>
|
||||
|
||||
{/* Input */}
|
||||
<form onSubmit={handleTextSubmit} className={`p-3 border-t ${theme === "dark" ? "border-white/10" : "border-gray-200"}`}>
|
||||
<div className="flex gap-2">
|
||||
<input
|
||||
ref={inputRef}
|
||||
type="text"
|
||||
placeholder={language === "es" ? "Escribe aquí..." : "Type here..."}
|
||||
className={`flex-1 px-3 py-2 rounded-lg text-sm ${inputBg} ${textColor} placeholder-white/50 focus:outline-none focus:ring-2 ${buttonColor}`}
|
||||
disabled={isSpeaking}
|
||||
/>
|
||||
<button
|
||||
type="submit"
|
||||
disabled={isSpeaking}
|
||||
className={`${buttonColor} px-3 py-2 rounded-lg text-white disabled:opacity-50`}
|
||||
>
|
||||
➤
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
{/* Mic Button */}
|
||||
<div className={`p-3 pt-0`}>
|
||||
<button
|
||||
onClick={toggleListening}
|
||||
disabled={isSpeaking}
|
||||
className={`w-full py-2 rounded-lg font-semibold text-white transition ${
|
||||
isListening
|
||||
? "bg-red-500 animate-pulse"
|
||||
: `${buttonColor} hover:opacity-90`
|
||||
} disabled:opacity-50`}
|
||||
>
|
||||
{isListening ? "🛑 Detener" : "🎤 Hablar"}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Error */}
|
||||
{error && (
|
||||
<div className="px-3 pb-2 text-xs text-red-400">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
SpeechRecognition: typeof SpeechRecognition;
|
||||
webkitSpeechRecognition: typeof SpeechRecognition;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user