278 lines
9.6 KiB
TypeScript
278 lines
9.6 KiB
TypeScript
"use client";
|
|
|
|
import { useState, useRef, useEffect, useCallback } from "react";
|
|
|
|
interface MiniMaxVoiceWidgetProps {
|
|
businessName?: string;
|
|
businessType?: "restaurant" | "real-estate" | "clinic" | "car-rental" | "default";
|
|
theme?: "dark" | "light";
|
|
apiUrl?: string; // Your VPS API endpoint for MiniMax
|
|
}
|
|
|
|
// Language state
|
|
type Lang = "es" | "en";
|
|
|
|
const SPANISH_GREETING = "¡Hola! Soy el asistente de inteligencia artificial. ¿En qué puedo ayudarte hoy?";
|
|
const ENGLISH_GREETING = "Hello! I'm the AI assistant. How can I help you today?";
|
|
|
|
// MiniMax system prompt
|
|
const SYSTEM_PROMPT = `You are a friendly AI assistant for a business website.
|
|
You help customers with questions about services, hours, bookings, and general inquiries.
|
|
Keep responses brief and helpful (2-3 sentences max).
|
|
Default language is Spanish. After the first greeting, respond in the language the user uses.
|
|
Business type: {businessType}. Business name: {businessName}.`;
|
|
|
|
export default function MiniMaxVoiceWidget({
|
|
businessName = "SiteMente",
|
|
businessType = "restaurant",
|
|
theme = "dark",
|
|
apiUrl = "/api/ai/voice-chat"
|
|
}: MiniMaxVoiceWidgetProps) {
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [isSpeaking, setIsSpeaking] = useState(false);
|
|
const [messages, setMessages] = useState<{role: "user" | "assistant", content: string}[]>([]);
|
|
const [language, setLanguage] = useState<Lang>("es");
|
|
const [isInitialized, setIsInitialized] = useState(false);
|
|
const [showChat, setShowChat] = useState(false);
|
|
const [error, setError] = useState<string | null>(null);
|
|
|
|
const recognitionRef = useRef<SpeechRecognition | null>(null);
|
|
const synthRef = useRef<SpeechSynthesis | null>(null);
|
|
const messagesEndRef = useRef<HTMLDivElement>(null);
|
|
|
|
// Initialize speech recognition and synthesis
|
|
useEffect(() => {
|
|
if (typeof window === "undefined") return;
|
|
|
|
// Speech Recognition (Web Speech API)
|
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
if (SpeechRecognition) {
|
|
recognitionRef.current = new SpeechRecognition();
|
|
recognitionRef.current.continuous = false;
|
|
recognitionRef.current.interimResults = true;
|
|
recognitionRef.current.lang = language === "es" ? "es-ES" : "en-US";
|
|
|
|
recognitionRef.current.onresult = (event) => {
|
|
const transcript = Array.from(event.results)
|
|
.map(result => result[0].transcript)
|
|
.join("");
|
|
|
|
if (event.results[0].isFinal) {
|
|
handleUserInput(transcript);
|
|
}
|
|
};
|
|
|
|
recognitionRef.current.onerror = (event) => {
|
|
console.error("Speech recognition error:", event.error);
|
|
setIsListening(false);
|
|
if (event.error !== "no-speech") {
|
|
setError(`Speech error: ${event.error}`);
|
|
}
|
|
};
|
|
|
|
recognitionRef.current.onend = () => {
|
|
setIsListening(false);
|
|
};
|
|
}
|
|
|
|
// Speech Synthesis
|
|
synthRef.current = window.speechSynthesis;
|
|
|
|
return () => {
|
|
recognitionRef.current?.stop();
|
|
synthRef.current?.cancel();
|
|
};
|
|
}, [language]);
|
|
|
|
// Scroll to bottom
|
|
useEffect(() => {
|
|
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
|
|
}, [messages]);
|
|
|
|
// Initialize with greeting
|
|
useEffect(() => {
|
|
if (!isInitialized) {
|
|
setIsInitialized(true);
|
|
const greeting = language === "es" ? SPANISH_GREETING : ENGLISH_GREETING;
|
|
setMessages([{ role: "assistant", content: greeting }]);
|
|
speak(greeting);
|
|
}
|
|
}, []);
|
|
|
|
// Speak text using browser TTS
|
|
const speak = useCallback((text: string) => {
|
|
if (!synthRef.current) return;
|
|
|
|
synthRef.current.cancel();
|
|
const utterance = new SpeechSynthesisUtterance(text);
|
|
utterance.lang = language === "es" ? "es-ES" : "en-US";
|
|
utterance.rate = 0.9;
|
|
utterance.pitch = 1;
|
|
|
|
utterance.onstart = () => setIsSpeaking(true);
|
|
utterance.onend = () => setIsSpeaking(false);
|
|
utterance.onerror = () => setIsSpeaking(false);
|
|
|
|
synthRef.current.speak(utterance);
|
|
}, [language]);
|
|
|
|
// Handle user input
|
|
const handleUserInput = async (text: string) => {
|
|
if (!text.trim()) return;
|
|
|
|
// Add user message
|
|
setMessages(prev => [...prev, { role: "user", content: text }]);
|
|
|
|
// Detect language from first message
|
|
const spanishWords = ["hola", "gracias", "por favor", "quiero", "necesito", "reserva", "precio", "dónde", "cuándo"];
|
|
const isSpanish = spanishWords.some(word => text.toLowerCase().includes(word));
|
|
if (isSpanish && language === "en") {
|
|
setLanguage("es");
|
|
} else if (!isSpanish && language === "es" && text.length > 5) {
|
|
setLanguage("en");
|
|
}
|
|
|
|
setIsSpeaking(true);
|
|
|
|
try {
|
|
// Call MiniMax API
|
|
const response = await fetch(apiUrl, {
|
|
method: "POST",
|
|
headers: { "Content-Type": "application/json" },
|
|
body: JSON.stringify({
|
|
message: text,
|
|
language,
|
|
businessType,
|
|
businessName,
|
|
history: messages.slice(-5) // Last 5 messages for context
|
|
})
|
|
});
|
|
|
|
if (!response.ok) throw new Error("API request failed");
|
|
|
|
const data = await response.json();
|
|
const aiResponse = data.response || data.message || "Lo siento, no entendí. ¿Puedes repetir?";
|
|
|
|
setMessages(prev => [...prev, { role: "assistant", content: aiResponse }]);
|
|
speak(aiResponse);
|
|
|
|
} catch (err) {
|
|
console.error("API error:", err);
|
|
// Fallback response
|
|
const fallbackResponse = language === "es"
|
|
? "Lo siento, tuve un problema. ¿Puedes repetir?"
|
|
: "Sorry, I had an issue. Can you repeat that?";
|
|
setMessages(prev => [...prev, { role: "assistant", content: fallbackResponse }]);
|
|
speak(fallbackResponse);
|
|
}
|
|
};
|
|
|
|
// Toggle listening
|
|
const toggleListening = () => {
|
|
if (!recognitionRef.current) {
|
|
setError("Speech recognition not supported in this browser");
|
|
return;
|
|
}
|
|
|
|
if (isListening) {
|
|
recognitionRef.current.stop();
|
|
} else {
|
|
setError(null);
|
|
recognitionRef.current.lang = language === "es" ? "es-ES" : "en-US";
|
|
recognitionRef.current.start();
|
|
setIsListening(true);
|
|
}
|
|
};
|
|
|
|
// Theme colors
|
|
const buttonColor = theme === "dark" ? "bg-brand-pink" : "bg-blue-600";
|
|
const bgColor = theme === "dark" ? "bg-[#1a1625]" : "bg-white";
|
|
const textColor = theme === "dark" ? "text-white" : "text-gray-900";
|
|
const inputBg = theme === "dark" ? "bg-white/10" : "bg-gray-100";
|
|
|
|
return (
|
|
<div className="fixed bottom-6 right-6 z-50">
|
|
{/* Chat Toggle */}
|
|
<button
|
|
onClick={() => setShowChat(!showChat)}
|
|
className={`${buttonColor} w-14 h-14 rounded-full shadow-lg flex items-center justify-center transition-all hover:scale-110 ${isListening ? "animate-pulse" : ""}`}
|
|
title={isListening ? "Listening..." : "AI Assistant"}
|
|
>
|
|
{isSpeaking ? "🔊" : isListening ? "👂" : "🎙️"}
|
|
</button>
|
|
|
|
{/* Chat Panel */}
|
|
{showChat && (
|
|
<div className={`absolute bottom-20 right-0 w-80 ${bgColor} rounded-2xl shadow-2xl border border-white/10 overflow-hidden`}>
|
|
{/* Header */}
|
|
<div className={`${theme === "dark" ? "bg-brand-purple" : "bg-blue-600"} p-3 flex items-center justify-between`}>
|
|
<div className="flex items-center gap-2">
|
|
<span className="text-xl">🤖</span>
|
|
<span className={`font-semibold ${theme === "dark" ? "text-white" : "text-white"}`}>
|
|
{businessName} AI
|
|
</span>
|
|
</div>
|
|
<button
|
|
onClick={() => setLanguage(lang => lang === "es" ? "en" : "es")}
|
|
className="text-xs bg-white/20 px-2 py-1 rounded text-white"
|
|
>
|
|
{language === "es" ? "🇪🇸 ES" : "🇬🇧 EN"}
|
|
</button>
|
|
</div>
|
|
|
|
{/* Messages */}
|
|
<div className={`h-64 overflow-y-auto p-3 space-y-2 ${textColor}`}>
|
|
{messages.map((msg, i) => (
|
|
<div key={i} className={`flex ${msg.role === "user" ? "justify-end" : "justify-start"}`}>
|
|
<div className={`max-w-[80%] px-3 py-2 rounded-lg text-sm ${
|
|
msg.role === "user"
|
|
? buttonColor + " text-white"
|
|
: theme === "dark" ? "bg-white/10 text-white" : "bg-gray-100 text-gray-900"
|
|
}`}>
|
|
{msg.content}
|
|
</div>
|
|
</div>
|
|
))}
|
|
{isListening && (
|
|
<div className="flex justify-start">
|
|
<div className="bg-white/10 px-3 py-2 rounded-lg text-sm text-white animate-pulse">
|
|
🎤 Listening...
|
|
</div>
|
|
</div>
|
|
)}
|
|
<div ref={messagesEndRef} />
|
|
</div>
|
|
|
|
{/* Controls */}
|
|
<div className={`p-3 border-t ${theme === "dark" ? "border-white/10" : "border-gray-200"} flex gap-2`}>
|
|
<button
|
|
onClick={toggleListening}
|
|
disabled={isSpeaking}
|
|
className={`flex-1 py-2 rounded-lg font-semibold text-white transition ${
|
|
isListening ? "bg-red-500 animate-pulse" : buttonColor + " hover:opacity-90"
|
|
} disabled:opacity-50`}
|
|
>
|
|
{isListening ? "🛑 Stop" : "🎤 Hablar"}
|
|
</button>
|
|
</div>
|
|
|
|
{/* Error */}
|
|
{error && (
|
|
<div className="px-3 pb-2 text-xs text-red-400">
|
|
{error}
|
|
</div>
|
|
)}
|
|
</div>
|
|
)}
|
|
</div>
|
|
);
|
|
}
|
|
|
|
// Type declarations for Web Speech API
|
|
declare global {
|
|
interface Window {
|
|
SpeechRecognition: typeof SpeechRecognition;
|
|
webkitSpeechRecognition: typeof SpeechRecognition;
|
|
}
|
|
}
|