100 lines
3.5 KiB
TypeScript
100 lines
3.5 KiB
TypeScript
'use server';
|
|
|
|
import { generateObject } from 'ai';
|
|
import { createOpenAI as createGroq } from '@ai-sdk/openai';
|
|
import { z } from 'zod';
|
|
import { load } from 'cheerio';
|
|
|
|
const groq = createGroq({
|
|
baseURL: 'https://api.groq.com/openai/v1',
|
|
apiKey: process.env.GROQ_API_KEY,
|
|
});
|
|
|
|
export async function suggestQuestions(history: any[]) {
|
|
'use server';
|
|
|
|
const { object } = await generateObject({
|
|
model: groq('llama-3.2-11b-text-preview'),
|
|
temperature: 0,
|
|
system:
|
|
`You are a search engine query generator. You 'have' to create only '3' questions for the search engine based on the message history which has been provided to you.
|
|
The questions should be open-ended and should encourage further discussion while maintaining the whole context. Limit it to 5-10 words per question.
|
|
Always put the user input's context is some way so that the next search knows what to search for exactly.
|
|
Try to stick to the context of the conversation and avoid asking questions that are too general or too specific.
|
|
For weather based converations sent to you, always generate questions that are about news, sports, or other topics that are not related to the weather.
|
|
For programming based conversations, always generate questions that are about the algorithms, data structures, or other topics that are related to it or an improvement of the question.
|
|
For location based conversations, always generate questions that are about the culture, history, or other topics that are related to the location.
|
|
For the translation based conversations, always generate questions that may continue the conversation or ask for more information or translations.
|
|
Never use pronouns in the questions as they blur the context.`,
|
|
messages: history,
|
|
schema: z.object({
|
|
questions: z.array(z.string()).describe('The generated questions based on the message history.')
|
|
}),
|
|
});
|
|
|
|
return {
|
|
questions: object.questions
|
|
};
|
|
}
|
|
|
|
const ELEVENLABS_API_KEY = process.env.ELEVENLABS_API_KEY;
|
|
|
|
export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer' = "alloy") {
|
|
|
|
const VOICE_ID = 'JBFqnCBsd6RMkjVDRZzb' // This is the ID for the "George" voice. Replace with your preferred voice ID.
|
|
const url = `https://api.elevenlabs.io/v1/text-to-speech/${VOICE_ID}`
|
|
const method = 'POST'
|
|
|
|
if (!ELEVENLABS_API_KEY) {
|
|
throw new Error('ELEVENLABS_API_KEY is not defined');
|
|
}
|
|
|
|
const headers = {
|
|
Accept: 'audio/mpeg',
|
|
'xi-api-key': ELEVENLABS_API_KEY,
|
|
'Content-Type': 'application/json',
|
|
}
|
|
|
|
const data = {
|
|
text,
|
|
model_id: 'eleven_turbo_v2_5',
|
|
voice_settings: {
|
|
stability: 0.5,
|
|
similarity_boost: 0.5,
|
|
},
|
|
}
|
|
|
|
const body = JSON.stringify(data)
|
|
|
|
const input = {
|
|
method,
|
|
headers,
|
|
body,
|
|
}
|
|
|
|
const response = await fetch(url, input)
|
|
|
|
const arrayBuffer = await response.arrayBuffer();
|
|
|
|
const base64Audio = Buffer.from(arrayBuffer).toString('base64');
|
|
|
|
return {
|
|
audio: `data:audio/mp3;base64,${base64Audio}`,
|
|
};
|
|
}
|
|
|
|
export async function fetchMetadata(url: string) {
|
|
try {
|
|
const response = await fetch(url, { next: { revalidate: 3600 } }); // Cache for 1 hour
|
|
const html = await response.text();
|
|
const $ = load(html);
|
|
|
|
const title = $('head title').text() || $('meta[property="og:title"]').attr('content') || '';
|
|
const description = $('meta[name="description"]').attr('content') || $('meta[property="og:description"]').attr('content') || '';
|
|
|
|
return { title, description };
|
|
} catch (error) {
|
|
console.error('Error fetching metadata:', error);
|
|
return null;
|
|
}
|
|
} |