Clean up: Remove all old widgets, keep only V2 (disabled by default)

This commit is contained in:
root
2026-02-26 19:12:59 +01:00
parent 57a7674b5c
commit 0a4853bcda
6 changed files with 4 additions and 1008 deletions
-98
View File
@@ -1,98 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
// MiniMax API configuration
const MINIMAX_API_KEY = process.env.MINIMAX_API_KEY || "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJTaXRlTWVudGUiLCJyb2xlIjoiYW5vbiIsImlhdCI6MTczODcxMjAwMCwiZXhwIjoyMDU0MzcyMDAwfQ.VxYNdTUkG4N0K2T8K3pK4JzY3vN8X9vK2pK3JzY2vN8X9";
const MINIMAX_BASE_URL = "https://api.minimax.chat/v1";
interface Message {
role: "user" | "assistant";
content: string;
}
interface RequestBody {
message: string;
language: "es" | "en";
businessType: string;
businessName: string;
history?: Message[];
}
export async function POST(request: NextRequest) {
try {
const body: RequestBody = await request.json();
const { message, language, businessType, businessName, history = [] } = body;
// Build conversation context
const systemPrompt = `You are a friendly AI assistant for ${businessName}, a ${businessType} business.
IMPORTANT RULES:
- Always respond in the SAME language the user uses
- Default to Spanish (respond in Spanish unless user clearly speaks English)
- Keep responses SHORT: 1-3 sentences max
- Be helpful, friendly, and professional
- If asked about pricing, mention: €299-€1,950/month depending on plan
- If asked about booking, say you'll help them book
- Never make up specific prices or details you don't know
Current language mode: ${language}`;
// Build messages array
const messages: Message[] = [
{ role: "assistant", content: systemPrompt }
];
// Add history (last 5 messages for context)
if (history.length > 0) {
messages.push(...history.slice(-5));
}
// Add current message
messages.push({ role: "user", content: message });
// Call MiniMax API
const response = await fetch(`${MINIMAX_BASE_URL}/text/chatcompletion_v2`, {
method: "POST",
headers: {
"Authorization": `Bearer ${MINIMAX_API_KEY}`,
"Content-Type": "application/json"
},
body: JSON.stringify({
model: "MiniMax-M2.5",
messages,
temperature: 0.7,
max_tokens: 500
})
});
if (!response.ok) {
const error = await response.text();
console.error("MiniMax API error:", error);
// Fallback response
const fallbackResponse = language === "es"
? "Lo siento, tuve un problema técnico. ¿Puedes repetir tu pregunta?"
: "Sorry, I had a technical issue. Can you repeat your question?";
return NextResponse.json({ response: fallbackResponse });
}
const data = await response.json();
const aiResponse = data.choices?.[0]?.message?.content ||
data.reply ||
(language === "es"
? "Gracias por tu mensaje. ¿En qué más puedo ayudarte?"
: "Thanks for your message. How else can I help you?");
return NextResponse.json({ response: aiResponse });
} catch (error) {
console.error("Voice chat API error:", error);
return NextResponse.json(
{ error: "Internal server error" },
{ status: 500 }
);
}
}
// Disable body parsing for streaming if needed
export const runtime = "nodejs";
+4 -2
View File
@@ -8,7 +8,8 @@ import HeroSlider, {
import JsonLdSchemas from "../components/site-mente/JsonLdSchemas"; import JsonLdSchemas from "../components/site-mente/JsonLdSchemas";
import ServicesAndPricing from "../components/site-mente/ServicesAndPricing"; import ServicesAndPricing from "../components/site-mente/ServicesAndPricing";
import SeoMeta from "../components/site-mente/SeoMeta"; import SeoMeta from "../components/site-mente/SeoMeta";
import SiteMenteVoiceWidget from "../components/SiteMenteVoiceWidget"; // Old widget disabled - using V2 only
// import SiteMenteVoiceWidget from "../components/SiteMenteVoiceWidget";
const fadeUp = { const fadeUp = {
hidden: { opacity: 0, y: 24 }, hidden: { opacity: 0, y: 24 },
@@ -781,7 +782,8 @@ export default function HomePage() {
<div className="font-sans text-white bg-[#5e4a8a]"> <div className="font-sans text-white bg-[#5e4a8a]">
<SeoMeta lang={lang} /> <SeoMeta lang={lang} />
<JsonLdSchemas lang={lang} /> <JsonLdSchemas lang={lang} />
<SiteMenteVoiceWidget initialLang={lang} /> {/* Old widget disabled - V2 coming soon */}
{/* <SiteMenteVoiceWidget initialLang={lang} /> */}
<header className="relative overflow-hidden"> <header className="relative overflow-hidden">
<nav className="relative z-10 border-b border-white/10 bg-[#5e4a8a]/90 backdrop-blur"> <nav className="relative z-10 border-b border-white/10 bg-[#5e4a8a]/90 backdrop-blur">
<div className="mx-auto flex w-full max-w-6xl items-center justify-between px-6 py-4"> <div className="mx-auto flex w-full max-w-6xl items-center justify-between px-6 py-4">
-277
View File
@@ -1,277 +0,0 @@
"use client";
import { useState, useRef, useEffect, useCallback } from "react";
interface MiniMaxVoiceWidgetProps {
businessName?: string;
businessType?: "restaurant" | "real-estate" | "clinic" | "car-rental" | "default";
theme?: "dark" | "light";
apiUrl?: string; // Your VPS API endpoint for MiniMax
}
// Language state
type Lang = "es" | "en";
const SPANISH_GREETING = "¡Hola! Soy el asistente de inteligencia artificial. ¿En qué puedo ayudarte hoy?";
const ENGLISH_GREETING = "Hello! I'm the AI assistant. How can I help you today?";
// MiniMax system prompt
const SYSTEM_PROMPT = `You are a friendly AI assistant for a business website.
You help customers with questions about services, hours, bookings, and general inquiries.
Keep responses brief and helpful (2-3 sentences max).
Default language is Spanish. After the first greeting, respond in the language the user uses.
Business type: {businessType}. Business name: {businessName}.`;
export default function MiniMaxVoiceWidget({
businessName = "SiteMente",
businessType = "restaurant",
theme = "dark",
apiUrl = "/api/ai/voice-chat"
}: MiniMaxVoiceWidgetProps) {
const [isListening, setIsListening] = useState(false);
const [isSpeaking, setIsSpeaking] = useState(false);
const [messages, setMessages] = useState<{role: "user" | "assistant", content: string}[]>([]);
const [language, setLanguage] = useState<Lang>("es");
const [isInitialized, setIsInitialized] = useState(false);
const [showChat, setShowChat] = useState(false);
const [error, setError] = useState<string | null>(null);
const recognitionRef = useRef<SpeechRecognition | null>(null);
const synthRef = useRef<SpeechSynthesis | null>(null);
const messagesEndRef = useRef<HTMLDivElement>(null);
// Initialize speech recognition and synthesis
useEffect(() => {
if (typeof window === "undefined") return;
// Speech Recognition (Web Speech API)
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if (SpeechRecognition) {
recognitionRef.current = new SpeechRecognition();
recognitionRef.current.continuous = false;
recognitionRef.current.interimResults = true;
recognitionRef.current.lang = language === "es" ? "es-ES" : "en-US";
recognitionRef.current.onresult = (event) => {
const transcript = Array.from(event.results)
.map(result => result[0].transcript)
.join("");
if (event.results[0].isFinal) {
handleUserInput(transcript);
}
};
recognitionRef.current.onerror = (event) => {
console.error("Speech recognition error:", event.error);
setIsListening(false);
if (event.error !== "no-speech") {
setError(`Speech error: ${event.error}`);
}
};
recognitionRef.current.onend = () => {
setIsListening(false);
};
}
// Speech Synthesis
synthRef.current = window.speechSynthesis;
return () => {
recognitionRef.current?.stop();
synthRef.current?.cancel();
};
}, [language]);
// Scroll to bottom
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages]);
// Initialize with greeting
useEffect(() => {
if (!isInitialized) {
setIsInitialized(true);
const greeting = language === "es" ? SPANISH_GREETING : ENGLISH_GREETING;
setMessages([{ role: "assistant", content: greeting }]);
speak(greeting);
}
}, []);
// Speak text using browser TTS
const speak = useCallback((text: string) => {
if (!synthRef.current) return;
synthRef.current.cancel();
const utterance = new SpeechSynthesisUtterance(text);
utterance.lang = language === "es" ? "es-ES" : "en-US";
utterance.rate = 0.9;
utterance.pitch = 1;
utterance.onstart = () => setIsSpeaking(true);
utterance.onend = () => setIsSpeaking(false);
utterance.onerror = () => setIsSpeaking(false);
synthRef.current.speak(utterance);
}, [language]);
// Handle user input
const handleUserInput = async (text: string) => {
if (!text.trim()) return;
// Add user message
setMessages(prev => [...prev, { role: "user", content: text }]);
// Detect language from first message
const spanishWords = ["hola", "gracias", "por favor", "quiero", "necesito", "reserva", "precio", "dónde", "cuándo"];
const isSpanish = spanishWords.some(word => text.toLowerCase().includes(word));
if (isSpanish && language === "en") {
setLanguage("es");
} else if (!isSpanish && language === "es" && text.length > 5) {
setLanguage("en");
}
setIsSpeaking(true);
try {
// Call MiniMax API
const response = await fetch(apiUrl, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
message: text,
language,
businessType,
businessName,
history: messages.slice(-5) // Last 5 messages for context
})
});
if (!response.ok) throw new Error("API request failed");
const data = await response.json();
const aiResponse = data.response || data.message || "Lo siento, no entendí. ¿Puedes repetir?";
setMessages(prev => [...prev, { role: "assistant", content: aiResponse }]);
speak(aiResponse);
} catch (err) {
console.error("API error:", err);
// Fallback response
const fallbackResponse = language === "es"
? "Lo siento, tuve un problema. ¿Puedes repetir?"
: "Sorry, I had an issue. Can you repeat that?";
setMessages(prev => [...prev, { role: "assistant", content: fallbackResponse }]);
speak(fallbackResponse);
}
};
// Toggle listening
const toggleListening = () => {
if (!recognitionRef.current) {
setError("Speech recognition not supported in this browser");
return;
}
if (isListening) {
recognitionRef.current.stop();
} else {
setError(null);
recognitionRef.current.lang = language === "es" ? "es-ES" : "en-US";
recognitionRef.current.start();
setIsListening(true);
}
};
// Theme colors
const buttonColor = theme === "dark" ? "bg-brand-pink" : "bg-blue-600";
const bgColor = theme === "dark" ? "bg-[#1a1625]" : "bg-white";
const textColor = theme === "dark" ? "text-white" : "text-gray-900";
const inputBg = theme === "dark" ? "bg-white/10" : "bg-gray-100";
return (
<div className="fixed bottom-6 right-6 z-50">
{/* Chat Toggle */}
<button
onClick={() => setShowChat(!showChat)}
className={`${buttonColor} w-14 h-14 rounded-full shadow-lg flex items-center justify-center transition-all hover:scale-110 ${isListening ? "animate-pulse" : ""}`}
title={isListening ? "Listening..." : "AI Assistant"}
>
{isSpeaking ? "🔊" : isListening ? "👂" : "🎙️"}
</button>
{/* Chat Panel */}
{showChat && (
<div className={`absolute bottom-20 right-0 w-80 ${bgColor} rounded-2xl shadow-2xl border border-white/10 overflow-hidden`}>
{/* Header */}
<div className={`${theme === "dark" ? "bg-brand-purple" : "bg-blue-600"} p-3 flex items-center justify-between`}>
<div className="flex items-center gap-2">
<span className="text-xl">🤖</span>
<span className={`font-semibold ${theme === "dark" ? "text-white" : "text-white"}`}>
{businessName} AI
</span>
</div>
<button
onClick={() => setLanguage(lang => lang === "es" ? "en" : "es")}
className="text-xs bg-white/20 px-2 py-1 rounded text-white"
>
{language === "es" ? "🇪🇸 ES" : "🇬🇧 EN"}
</button>
</div>
{/* Messages */}
<div className={`h-64 overflow-y-auto p-3 space-y-2 ${textColor}`}>
{messages.map((msg, i) => (
<div key={i} className={`flex ${msg.role === "user" ? "justify-end" : "justify-start"}`}>
<div className={`max-w-[80%] px-3 py-2 rounded-lg text-sm ${
msg.role === "user"
? buttonColor + " text-white"
: theme === "dark" ? "bg-white/10 text-white" : "bg-gray-100 text-gray-900"
}`}>
{msg.content}
</div>
</div>
))}
{isListening && (
<div className="flex justify-start">
<div className="bg-white/10 px-3 py-2 rounded-lg text-sm text-white animate-pulse">
🎤 Listening...
</div>
</div>
)}
<div ref={messagesEndRef} />
</div>
{/* Controls */}
<div className={`p-3 border-t ${theme === "dark" ? "border-white/10" : "border-gray-200"} flex gap-2`}>
<button
onClick={toggleListening}
disabled={isSpeaking}
className={`flex-1 py-2 rounded-lg font-semibold text-white transition ${
isListening ? "bg-red-500 animate-pulse" : buttonColor + " hover:opacity-90"
} disabled:opacity-50`}
>
{isListening ? "🛑 Stop" : "🎤 Hablar"}
</button>
</div>
{/* Error */}
{error && (
<div className="px-3 pb-2 text-xs text-red-400">
{error}
</div>
)}
</div>
)}
</div>
);
}
// Type declarations for Web Speech API
declare global {
interface Window {
SpeechRecognition: typeof SpeechRecognition;
webkitSpeechRecognition: typeof SpeechRecognition;
}
}
-262
View File
@@ -1,262 +0,0 @@
"use client";
import { useState, useRef, useEffect } from "react";
import SynthflowWidget from "./SynthflowWidget";
interface SiteMenteVoiceWidgetProps {
businessName?: string;
businessType?: "restaurant" | "real-estate" | "clinic" | "car-rental" | "default";
theme?: "dark" | "light";
initialLang?: string;
}
type Mode = "synthflow" | "text" | "off";
const SYNTHFLOW_WIDGET_ID = "0ee1b79c-43c2-41e0-aa6a-d2a560e0ca6a";
export default function SiteMenteVoiceWidget({
businessName = "SiteMente",
businessType = "default",
theme = "dark",
initialLang = "en"
}: SiteMenteVoiceWidgetProps) {
const [mode, setMode] = useState<Mode>("off");
const [showChat, setShowChat] = useState(false);
const [language, setLanguage] = useState<'en' | 'es'>(initialLang as 'en' | 'es');
// Text chat state
const [messages, setMessages] = useState<{role: "user" | "assistant", content: string}[]>([]);
const [input, setInput] = useState("");
const [isSending, setIsSending] = useState(false);
const messagesEndRef = useRef<HTMLDivElement>(null);
// Scroll to bottom of messages
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages]);
// Toggle modes
const cycleMode = () => {
const modes: Mode[] = ["off", "text", "synthflow"];
const currentIndex = modes.indexOf(mode);
const nextIndex = (currentIndex + 1) % modes.length;
setModeInternal(modes[nextIndex]);
};
const setModeInternal = (newMode: Mode) => {
setMode(newMode);
};
// Text mode functions
const sendMessage = async () => {
if (!input.trim() || isSending) return;
const userMessage = input.trim();
setInput("");
setIsSending(true);
setMessages(prev => [...prev, { role: "user", content: userMessage }]);
try {
await new Promise(resolve => setTimeout(resolve, 1000));
const englishResponses: Record<string, string> = {
"hello": `Hello! 👋 I'm the AI assistant for ${businessName}. How can I help you today?`,
"hours": `We're open Monday to Sunday. Do you have a specific question?`,
"book": "I can help you book right now. What service are you interested in?",
"contact": `You can call us at +34 XXX XXX XXX or message us here.`,
"price": "We offer packages starting from €299/month. Would you like me to tell you more?",
"menu": "We serve delicious food daily. Would you like to see our menu or make a reservation?",
"reservation": "I can help you make a reservation! What date and time works for you?",
"thanks": "You're welcome! Is there anything else I can help you with?",
"thank you": "You're welcome! Is there anything else I can help you with?",
};
const spanishResponses: Record<string, string> = {
"hola": `¡Hola! 👋 Soy el asistente de ${businessName}. ¿En qué puedo ayudarte hoy?`,
"horario": `Nuestros horarios de atención son de lunes a domingo. ¿Tienes alguna pregunta específica?`,
"reservar": "Para hacer una reserva, puedo ayudarte ahora mismo. ¿Qué servicio te interesa?",
"contacto": `Puedes llamarnos al +34 XXX XXX XXX o escribirnos aquí.`,
};
const lowerInput = userMessage.toLowerCase();
const spanishKeywords = ['hola', 'gracias', 'si', 'no', 'por favor', 'quiero', 'necesito', 'reserva', 'horario', 'precio', 'contacto'];
const isSpanish = spanishKeywords.some(keyword => lowerInput.includes(keyword));
const responses = (language === 'es' || isSpanish) ? spanishResponses : englishResponses;
const defaultResponse = language === 'es'
? "Gracias por tu mensaje. Un miembro de nuestro equipo te responderá pronto."
: "Thanks for your message! A team member will get back to you shortly.";
let response = defaultResponse;
for (const [key, value] of Object.entries(responses)) {
if (lowerInput.includes(key)) {
response = value;
break;
}
}
setMessages(prev => [...prev, { role: "assistant", content: response }]);
} catch (error) {
console.error("Send error:", error);
} finally {
setIsSending(false);
}
};
const handleKeyPress = (e: React.KeyboardEvent) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault();
sendMessage();
}
};
const buttonColor = theme === "dark" ? "bg-brand-pink" : "bg-blue-600";
const bgColor = theme === "dark" ? "bg-[#1a1625]" : "bg-white";
const textColor = theme === "dark" ? "text-white" : "text-gray-900";
const inputBg = theme === "dark" ? "bg-white/10" : "bg-gray-100";
// Get mode icon
const getModeIcon = () => {
if (mode === "synthflow") return "🎙️";
if (mode === "text") return "💬";
return "⚪";
};
const getModeLabel = () => {
if (mode === "synthflow") return "AI Voice";
if (mode === "text") return "Chat";
return "Off";
};
return (
<div className="fixed bottom-6 right-6 z-50">
{/* Language Toggle */}
<button
onClick={() => setLanguage(lang => lang === 'en' ? 'es' : 'en')}
className={`absolute bottom-28 right-0 ${buttonColor} px-3 py-1.5 rounded-full text-xs text-white shadow-lg mb-2 flex items-center gap-1.5 hover:scale-105 transition-transform`}
title="Toggle Language"
>
{language === 'en' ? '🇪🇸 ES' : '🇬🇧 EN'}
</button>
{/* Mode Toggle Buttons - Direct Selection */}
<div className="absolute bottom-16 right-0 flex flex-col gap-1 mb-2">
<button
onClick={() => setModeInternal("text")}
className={`${mode === "text" ? buttonColor : "bg-white/20"} px-3 py-1.5 rounded-full text-xs text-white shadow-lg flex items-center gap-1.5 hover:scale-105 transition-transform`}
title="Text Chat"
>
💬 Chat
</button>
<button
onClick={() => setModeInternal("synthflow")}
className={`${mode === "synthflow" ? "bg-green-500" : "bg-white/20"} px-3 py-1.5 rounded-full text-xs text-white shadow-lg flex items-center gap-1.5 hover:scale-105 transition-transform`}
title="AI Voice"
>
🎙 Voice
</button>
</div>
{/* Synthflow Widget - Custom WebSocket */}
{mode === "synthflow" && (
<div className="absolute bottom-16 right-0 mb-2">
<SynthflowWidget
apiKey="yCNoizRk4kRcLrR4V27iem3XkFKZizWrjSXvkao-MZI"
assistantId="0ee1b79c-43c2-41e0-aa6a-d2a560e0ca6a"
theme={theme}
/>
</div>
)}
{/* Text Chat Panel */}
{mode === "text" && (
<div className={`absolute bottom-16 right-0 w-80 ${bgColor} border border-white/20 rounded-xl shadow-2xl mb-2 overflow-hidden`}>
{/* Header */}
<div className={`flex items-center justify-between p-3 border-b ${theme === "dark" ? "border-white/10" : "border-gray-200"}`}>
<div className="flex items-center gap-2">
<span className="text-sm font-medium ${textColor}">💬 {businessName}</span>
</div>
<span className="w-2 h-2 rounded-full bg-green-500"></span>
</div>
{/* Messages */}
<div className="h-64 overflow-y-auto p-3 space-y-3">
{messages.length === 0 && (
<div className="text-center text-sm text-gray-500 py-4">
👋 ¡Hola! Escríbeme para ayudarte
</div>
)}
{messages.map((msg, i) => (
<div key={i} className={`flex ${msg.role === "user" ? "justify-end" : "justify-start"}`}>
<div className={`max-w-[80%] rounded-lg px-3 py-2 text-sm ${
msg.role === "user"
? "bg-brand-pink text-white"
: theme === "dark" ? "bg-white/10 text-white" : "bg-gray-100 text-gray-900"
}`}>
{msg.content}
</div>
</div>
))}
{isSending && (
<div className="flex justify-start">
<div className={`max-w-[80%] rounded-lg px-3 py-2 text-sm ${theme === "dark" ? "bg-white/10 text-white/70" : "bg-gray-100 text-gray-500"}`}>
Escribiendo...
</div>
</div>
)}
<div ref={messagesEndRef} />
</div>
{/* Input */}
<div className={`p-3 border-t ${theme === "dark" ? "border-white/10" : "border-gray-200"}`}>
<div className="flex gap-2">
<input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyPress={handleKeyPress}
placeholder={language === 'es' ? "Escribe tu mensaje..." : "Type your message..."}
className={`flex-1 px-3 py-2 rounded-lg text-sm ${inputBg} ${textColor} placeholder-gray-500 focus:outline-none focus:ring-2 focus:ring-brand-pink`}
disabled={isSending}
/>
<button
onClick={sendMessage}
disabled={!input.trim() || isSending}
className={`${buttonColor} p-2 rounded-lg text-white disabled:opacity-50 disabled:cursor-not-allowed`}
>
<svg className="w-5 h-5" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 19l9 2-9-18-9 18 9-2zm0 0v-8" />
</svg>
</button>
</div>
</div>
</div>
)}
{/* Main Button - Toggle chat visibility when in text mode */}
<button
onClick={() => setShowChat(!showChat)}
className={`${buttonColor} w-14 h-14 rounded-full shadow-lg flex items-center justify-center transition-all hover:scale-110 ${
mode === "synthflow" ? "animate-pulse ring-4 ring-green-500/50" : ""
}`}
title={mode === "text" ? (showChat ? "Close" : "Open Chat") : "Click to enable"}
>
{mode === "synthflow" ? (
<svg className="w-6 h-6 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M19 11a7 7 0 01-7 7m0 0a7 7 0 01-7-7m7 7v4m0 0H8m4 0h4m-4-8a3 3 0 01-3-3V5a3 3 0 116 0v6a3 3 0 01-3 3z" />
</svg>
) : showChat ? (
<svg className="w-6 h-6 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
) : (
<svg className="w-6 h-6 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M8 12h.01M12 12h.01M16 12h.01M21 12c0 4.418-4.03 8-9 8a9.863 9.863 0 01-4.255-.949L3 20l1.395-3.72C3.512 15.042 3 13.574 3 12c0-4.418 4.03-8 9-8s9 3.582 9 8z" />
</svg>
)}
</button>
</div>
);
}
-235
View File
@@ -1,235 +0,0 @@
"use client";
import { useState, useRef, useEffect, useCallback } from "react";
interface SynthflowWidgetProps {
apiKey: string;
assistantId: string;
theme?: "dark" | "light";
}
export default function SynthflowWidget({
apiKey,
assistantId,
theme = "dark"
}: SynthflowWidgetProps) {
const [isConnected, setIsConnected] = useState(false);
const [isTalking, setIsTalking] = useState(false);
const [status, setStatus] = useState<"idle" | "connecting" | "ready" | "talking" | "error">("idle");
const [transcript, setTranscript] = useState("");
const [error, setError] = useState("");
const wsRef = useRef<WebSocket | null>(null);
const audioContextRef = useRef<AudioContext | null>(null);
const mediaStreamRef = useRef<MediaStream | null>(null);
const audioChunksRef = useRef<Int16Array[]>([]);
const connect = useCallback(async () => {
try {
setStatus("connecting");
setError("");
// Get WebSocket token
const tokenRes = await fetch(`https://widget.synthflow.ai/websocket/token/${assistantId}`, {
headers: {
"Authorization": `Bearer ${apiKey}`
}
});
if (!tokenRes.ok) {
throw new Error("Failed to get token");
}
const { sessionURL } = await tokenRes.json();
// Connect to WebSocket
const ws = new WebSocket(sessionURL);
wsRef.current = ws;
ws.onopen = () => {
console.log("WebSocket connected");
setIsConnected(true);
setStatus("ready");
// Send ready signal
ws.send(JSON.stringify({ type: "status_client_ready" }));
};
ws.onmessage = async (event) => {
if (typeof event.data === "string") {
const data = JSON.parse(event.data);
console.log("WS message:", data);
if (data.type === "transcript") {
setTranscript(data.text || "");
} else if (data.type === "status_agent_ready") {
setStatus("ready");
}
} else if (event.data instanceof Blob) {
// Audio from agent - play it
const arrayBuffer = await event.data.arrayBuffer();
await playAudio(new Int16Array(arrayBuffer));
}
};
ws.onerror = (err) => {
console.error("WS error:", err);
setError("Connection error");
setStatus("error");
};
ws.onclose = () => {
setIsConnected(false);
setStatus("idle");
};
} catch (err: any) {
console.error("Connection failed:", err);
setError(err.message);
setStatus("error");
}
}, [apiKey, assistantId]);
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaStreamRef.current = stream;
// Create audio context
const audioContext = new AudioContext({ sampleRate: 48000 });
audioContextRef.current = audioContext;
const source = audioContext.createMediaStreamSource(stream);
const processor = audioContext.createScriptProcessor(4096, 1, 1);
processor.onaudioprocess = (e) => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
const inputData = e.inputBuffer.getChannelData(0);
const int16Data = float32ToInt16(inputData);
wsRef.current.send(int16Data);
}
};
source.connect(processor);
processor.connect(audioContext.destination);
setIsTalking(true);
setStatus("talking");
} catch (err: any) {
console.error("Recording error:", err);
setError("Microphone access denied");
}
};
const stopRecording = () => {
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach(track => track.stop());
}
if (audioContextRef.current) {
audioContextRef.current.close();
}
setIsTalking(false);
setStatus("ready");
};
const playAudio = async (int16Data: Int16Array) => {
try {
const ctx = new AudioContext({ sampleRate: 16000 });
const buffer = ctx.createBuffer(1, int16Data.length, 16000);
buffer.getChannelData(0).set(int16ToFloat32(int16Data));
const source = ctx.createBufferSource();
source.buffer = buffer;
source.connect(ctx.destination);
source.start();
} catch (err) {
console.error("Play audio error:", err);
}
};
const float32ToInt16 = (float32: Float32Array): Int16Array => {
const int16 = new Int16Array(float32.length);
for (let i = 0; i < float32.length; i++) {
int16[i] = Math.max(-1, Math.min(1, float32[i])) * 0x7FFF;
}
return int16;
};
const int16ToFloat32 = (int16: Int16Array): Float32Array => {
const float32 = new Float32Array(int16.length);
for (let i = 0; i < int16.length; i++) {
float32[i] = int16[i] / 0x7FFF;
}
return float32;
};
const toggleCall = () => {
if (!isConnected) {
connect();
} else if (isTalking) {
stopRecording();
} else {
startRecording();
}
};
const colors = theme === "dark"
? { bg: "#1a1625", text: "#fff", accent: "#ff69b4" }
: { bg: "#fff", text: "#000", accent: "#0066ff" };
return (
<div
className="rounded-xl p-4 shadow-lg"
style={{ backgroundColor: colors.bg, color: colors.text }}
>
<div className="flex flex-col items-center gap-4">
{/* Status indicator */}
<div className="flex items-center gap-2">
<div
className={`w-3 h-3 rounded-full ${
status === "ready" ? "bg-green-500" :
status === "talking" ? "bg-green-500 animate-pulse" :
status === "connecting" ? "bg-yellow-500" :
status === "error" ? "bg-red-500" : "bg-gray-500"
}`}
/>
<span className="text-sm">
{status === "idle" && "Click to start"}
{status === "connecting" && "Connecting..."}
{status === "ready" && "Ready"}
{status === "talking" && "Listening..."}
{status === "error" && error || "Error"}
</span>
</div>
{/* Call button */}
<button
onClick={toggleCall}
className={`w-16 h-16 rounded-full flex items-center justify-center transition-all ${
isTalking
? "bg-red-500 animate-pulse"
: "bg-green-500 hover:scale-110"
}`}
>
{isTalking ? (
<svg className="w-8 h-8 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M16 8l2-2m0 0l2-2m-2 2l-2-2m2 2l2 2M5 3a2 2 0 00-2 2v1c0 8.284 6.716 15 15 15h1a2 2 0 002-2v-3.28a1 1 0 00-.684-.948l-4.493-1.498a1 1 0 00-1.21.502l-1.13 2.257a11.042 11.042 0 01-5.516-5.517l2.257-1.128a1 1 0 00.502-1.21L9.228 3.683A1 1 0 008.279 3H5z" />
</svg>
) : (
<svg className="w-8 h-8 text-white" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M3 5a2 2 0 012-2h3.28a1 1 0 01.948.684l1.498 4.493a1 1 0 01-.502 1.21l-2.257 1.13a11.042 11.042 0 005.516 5.516l1.13-2.257a1 1 0 011.21-.502l4.493 1.498a1 1 0 01.684.949V19a2 2 0 01-2 2h-1C9.716 21 3 14.284 3 6V5z" />
</svg>
)}
</button>
{/* Transcript */}
{transcript && (
<div className="w-full p-3 rounded-lg bg-white/10 text-sm">
{transcript}
</div>
)}
</div>
</div>
);
}
-134
View File
@@ -1,134 +0,0 @@
# 🎙️ SiteMente Custom Voice Widget
A custom voice AI chat widget using **MiniMax** as the brain, **Web Speech API** for input, and **Browser TTS** for output.
---
## ✅ What's Included
1. **MiniMaxVoiceWidget.tsx** - React component
2. **API endpoint** - `/api/ai/voice-chat`
---
## 🚀 Quick Deploy
### 1. Copy Files to Production
Copy these files to your SiteMente repo:
- `components/MiniMaxVoiceWidget.tsx``SiteMente/components/MiniMaxVoiceWidget.tsx`
- `app/api/ai/voice-chat/route.ts``SiteMente/app/api/ai/voice-chat/route.ts`
### 2. Update .env.local (if needed)
```env
MINIMAX_API_KEY=your_minimax_key_here
```
### 3. Add to Page
In any page (e.g., `app/page.tsx` or `app/demos/[vertical]/page.tsx`):
```tsx
import MiniMaxVoiceWidget from "@/components/MiniMaxVoiceWidget";
export default function DemoPage() {
return (
<>
{/* Your page content */}
<MiniMaxVoiceWidget
businessName="Restaurante Ejemplo"
businessType="restaurant"
theme="dark"
/>
</>
);
}
```
### 4. Deploy
```bash
git add .
git commit -m "Add custom voice widget"
git push origin develop
```
---
## ⚙️ Configuration Options
| Prop | Type | Default | Description |
|------|------|---------|-------------|
| `businessName` | string | "SiteMente" | Name shown in chat |
| `businessType` | string | "restaurant" | restaurant, real-estate, clinic, car-rental |
| `theme` | string | "dark" | "dark" or "light" |
| `apiUrl` | string | "/api/ai/voice-chat" | Custom API endpoint |
---
## 🌍 Language Behavior
- **Default:** Spanish
- **Switch:** After first user message, detects language and switches automatically
- **Manual toggle:** Button in chat header
---
## 💰 Cost
- **Input (Speech → Text):** FREE (Web Speech API)
- **Output (Text → Speech):** FREE (Browser TTS)
- **Brain (MiniMax):** Your existing API key (~€0.001/msg)
**Total cost: Nearly zero for POCs!**
---
## 🔧 Customization
### Change Voice
In `MiniMaxVoiceWidget.tsx`, find `speak()` function:
```tsx
// Different voices available
const voices = synthRef.current?.getVoices();
// Spanish voices
const spanishVoice = voices?.find(v => v.lang.includes("es"));
utterance.voice = spanishVoice;
```
### Add More Business Types
Edit `SYSTEM_PROMPT` in `route.ts` to customize responses per business type.
---
## 🐛 Troubleshooting
### "Speech recognition not supported"
- Use Chrome, Edge, or Safari (not Firefox)
- HTTPS required (or localhost)
### "API request failed"
- Check MiniMax API key is valid
- Check API endpoint is accessible
### Widget not showing
- Ensure client-side import: `"use client"`
- Check for CSS conflicts
---
## 📦 Next Steps (Optional)
1. **Voice cloning** - ElevenLabs ($5-15/month)
2. **Emotions** - Custom prompts for personality
3. **Multi-turn** - Longer conversation history
4. **Vapi integration** - For phone calls later
---
**Status: Ready to test! 🚀**