39 lines
1.2 KiB
TypeScript
39 lines
1.2 KiB
TypeScript
import { NextRequest, NextResponse } from 'next/server';
|
|
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
|
|
export async function POST(req: NextRequest) {
|
|
try {
|
|
const { messages } = await req.json();
|
|
|
|
if (!process.env.GEMINI_API_KEY) {
|
|
return NextResponse.json({ error: 'Gemini API key not configured' }, { status: 500 });
|
|
}
|
|
|
|
const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY);
|
|
const model = genAI.getGenerativeModel({ model: 'gemini-2.0-flash' });
|
|
|
|
const history = messages.slice(0, -1).map((msg: any) => ({
|
|
role: msg.role === 'assistant' ? 'model' : 'user',
|
|
parts: [{ text: msg.content }],
|
|
}));
|
|
|
|
const userMessage = messages[messages.length - 1].content;
|
|
|
|
const chat = model.startChat({
|
|
history: history,
|
|
generationConfig: {
|
|
temperature: 0.7,
|
|
maxOutputTokens: 1000,
|
|
},
|
|
});
|
|
|
|
const result = await chat.sendMessage(userMessage);
|
|
const text = result.response.text();
|
|
|
|
return NextResponse.json({ text });
|
|
} catch (error: any) {
|
|
console.error('Error in chat API:', error);
|
|
return NextResponse.json({ error: error.message }, { status: 500 });
|
|
}
|
|
}
|