// app/api/chat/route.ts

import OpenAI from "openai";
import { OpenAIStream, StreamingTextResponse } from "ai";
import { NextResponse } from "next/server";

// Optional, but recommended: run on the edge runtime.
// See https://vercel.com/docs/concepts/functions/edge-functions
export const runtime = "edge";

const openai = new OpenAI({
  apiKey: process.env.OPENAI_API_KEY!,
});

export async function POST(req: Request) {
  // Extract the `messages` from the body of the request
  try {
    const { messages, ...body } = await req.json();

    console.log("BODY", body.taskInfo);

    // console.log("BODY===>", body);
    console.log("HERE ARE THE MESSAGES ===>", messages);

    const prompt = `You are a chatbot that helps users with questions specific to project tasks.
- Project Details:
    - Name: ${body.projectInfo.name}
    - Description: ${body.projectInfo.description}
    - Tech Stack: ${body.projectInfo.stack.join(", ")}

- Feature Context:
    - Name: ${body.featureInfo.name}
    - Description: ${body.featureInfo.description}

- Task Context:
    - Name: ${body.taskInfo.name}
    - Description: ${body.taskInfo.description}

OPERATION GUIDELINES:

1. Provide information and answer questions specifically related to the project, feature, or task context provided.
2. Do not give generic answers; tailor responses based on the given context.`;

    messages.unshift({ role: "system", content: prompt });
    console.log("MESSAGES ===>", messages);

    // Request the OpenAI API for the response based on the prompt
    const response = await openai.chat.completions.create({
      model: "gpt-3.5-turbo-16k",
      stream: true,
      messages: messages,
    });

    // Convert the response into a friendly text-stream
    const stream = OpenAIStream(response);

    // Respond with the stream
    return new StreamingTextResponse(stream);
  } catch (err) {
    console.error(err);
    return NextResponse.json({ err }, { status: 500 });
  }
}