Added option to choose openai provider

This commit is contained in:
zaidmukaddam 2024-08-26 21:00:45 +05:30
parent 9498bf1c97
commit 56dd60105d
2 changed files with 80 additions and 32 deletions

View File

@ -6,18 +6,18 @@ import { createOpenAI as createGroq } from '@ai-sdk/openai';
import { z } from 'zod';
const groq = createGroq({
baseURL: 'https://api.groq.com/openai/v1',
apiKey: process.env.GROQ_API_KEY,
baseURL: 'https://api.groq.com/openai/v1',
apiKey: process.env.GROQ_API_KEY,
});
export async function suggestQuestions(history: any[]) {
'use server';
'use server';
const { object } = await generateObject({
model: groq('llama-3.1-70b-versatile'),
temperature: 0,
system:
`You are a search engine query generator. You 'have' to create 3 questions for the search engine based on the message history which has been provided to you.
const { object } = await generateObject({
model: groq('llama-3.1-70b-versatile'),
temperature: 0,
system:
`You are a search engine query generator. You 'have' to create 3 questions for the search engine based on the message history which has been provided to you.
The questions should be open-ended and should encourage further discussion while maintaining the whole context. Limit it to 5-10 words per question.
Always put the user input's context is some way so that the next search knows what to search for exactly.
Try to stick to the context of the conversation and avoid asking questions that are too general or too specific.
@ -26,18 +26,22 @@ For programming based conversations, always generate questions that are about th
For location based conversations, always generate questions that are about the culture, history, or other topics that are related to the location.
For the translation based conversations, always generate questions that may continue the conversation or ask for more information or translations.
Never use pronouns in the questions as they blur the context.`,
messages: history,
schema: z.object({
questions: z.array(z.string()).describe('The generated questions based on the message history.')
}),
});
messages: history,
schema: z.object({
questions: z.array(z.string()).describe('The generated questions based on the message history.')
}),
});
return {
questions: object.questions
};
return {
questions: object.questions
};
}
export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer' = "alloy") {
if (process.env.OPENAI_PROVIDER === 'azure') {
if (!process.env.AZURE_OPENAI_API_KEY || !process.env.AZURE_OPENAI_API_URL) {
throw new Error('Azure OpenAI API key and URL are required.');
}
const url = process.env.AZURE_OPENAI_API_URL!;
const response = await fetch(url, {
@ -60,7 +64,39 @@ export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fa
const arrayBuffer = await response.arrayBuffer();
const base64Audio = Buffer.from(arrayBuffer).toString('base64');
return {
audio: `data:audio/mp3;base64,${base64Audio}`,
};
} else if (process.env.OPENAI_PROVIDER === 'openai') {
const openai = new OpenAI();
const response = await openai.audio.speech.create({
model: "tts-1",
voice: voice,
input: text,
});
const arrayBuffer = await response.arrayBuffer();
const base64Audio = Buffer.from(arrayBuffer).toString('base64');
return {
audio: `data:audio/mp3;base64,${base64Audio}`,
};
} else {
const openai = new OpenAI();
const response = await openai.audio.speech.create({
model: "tts-1",
voice: voice,
input: text,
});
const arrayBuffer = await response.arrayBuffer();
const base64Audio = Buffer.from(arrayBuffer).toString('base64');
return {
audio: `data:audio/mp3;base64,${base64Audio}`,
};
}
}

View File

@ -7,7 +7,7 @@ import FirecrawlApp from '@mendable/firecrawl-js';
import { z } from "zod";
import { geolocation } from "@vercel/functions";
// Allow streaming responses up to 30 seconds
// Allow streaming responses up to 60 seconds
export const maxDuration = 60;
const azure = createAzure({
@ -15,12 +15,24 @@ const azure = createAzure({
apiKey: process.env.AZURE_API_KEY,
});
const provider = process.env.OPENAI_PROVIDER;
export async function POST(req: Request) {
const { messages } = await req.json();
const { latitude, longitude, city } = geolocation(req)
let model;
if (provider === "azure") {
model = azure.chat("gpt-4o-mini");
} else if (provider === "openai") {
model = openai.chat("gpt-4o-mini");
} else {
model = openai.chat("gpt-4o-mini");
}
const result = await streamText({
model: azure.chat("gpt-4o-mini"),
model,
messages: convertToCoreMessages(messages),
temperature: 0.72,
topP: 0.95,
@ -263,13 +275,13 @@ When asked a "What is" question, maintain the same format as the question and an
const abortController = new AbortController();
try {
const blobPromise = put(`mplx/image-${Date.now()}.${format}`, Buffer.from(imageData, 'base64'),
{
access: 'public',
abortSignal: abortController.signal,
});
{
access: 'public',
abortSignal: abortController.signal,
});
const timeout = setTimeout(() => {
// Abort the request after 10 seconds
// Abort the request after 2 seconds
abortController.abort();
}, 2000);