fix: suggested question generation, switched from groq to gemini.

This commit is contained in:
zaidmukaddam 2024-11-07 15:31:58 +05:30
parent 61c10f767a
commit fb19021c91
4 changed files with 26 additions and 28 deletions

View File

@ -1,21 +1,23 @@
'use server'; 'use server';
import { generateObject } from 'ai'; import { generateObject } from 'ai';
import { createOpenAI as createGroq } from '@ai-sdk/openai'; import { google } from '@ai-sdk/google'
import { z } from 'zod'; import { z } from 'zod';
import { load } from 'cheerio'; import { load } from 'cheerio';
const groq = createGroq({
baseURL: 'https://api.groq.com/openai/v1',
apiKey: process.env.GROQ_API_KEY,
});
export async function suggestQuestions(history: any[]) { export async function suggestQuestions(history: any[]) {
'use server'; 'use server';
console.log(history);
const { object } = await generateObject({ const { object } = await generateObject({
model: groq('llama-3.2-90b-text-preview'), model: google('gemini-1.5-pro-002',{
temperature: 0, structuredOutputs: true,
}),
temperature: 1,
maxTokens: 300,
topP: 0.95,
topK: 40,
system: system:
`You are a search engine query generator. You 'have' to create only '3' questions for the search engine based on the message history which has been provided to you. `You are a search engine query generator. You 'have' to create only '3' questions for the search engine based on the message history which has been provided to you.
The questions should be open-ended and should encourage further discussion while maintaining the whole context. Limit it to 5-10 words per question. The questions should be open-ended and should encourage further discussion while maintaining the whole context. Limit it to 5-10 words per question.
@ -54,7 +56,7 @@ export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fa
'xi-api-key': ELEVENLABS_API_KEY, 'xi-api-key': ELEVENLABS_API_KEY,
'Content-Type': 'application/json', 'Content-Type': 'application/json',
} }
const data = { const data = {
text, text,
model_id: 'eleven_turbo_v2_5', model_id: 'eleven_turbo_v2_5',

View File

@ -261,7 +261,7 @@ const HomeContent = () => {
const initialQuery = searchParams.get('query') || ''; const initialQuery = searchParams.get('query') || '';
const initialModel = searchParams.get('model') || 'azure:gpt4o-mini'; const initialModel = searchParams.get('model') || 'azure:gpt4o-mini';
const [lastSubmittedQuery, setLastSubmittedQuery] = useState(initialQuery); const lastSubmittedQueryRef = useRef(initialQuery);
const [hasSubmitted, setHasSubmitted] = useState(!!initialQuery); const [hasSubmitted, setHasSubmitted] = useState(!!initialQuery);
const [selectedModel, setSelectedModel] = useState(initialModel); const [selectedModel, setSelectedModel] = useState(initialModel);
const bottomRef = useRef<HTMLDivElement>(null); const bottomRef = useRef<HTMLDivElement>(null);
@ -284,7 +284,7 @@ const HomeContent = () => {
onFinish: async (message, { finishReason }) => { onFinish: async (message, { finishReason }) => {
console.log("[finish reason]:", finishReason); console.log("[finish reason]:", finishReason);
if (message.content && finishReason === 'stop' || finishReason === 'length') { if (message.content && finishReason === 'stop' || finishReason === 'length') {
const newHistory = [...messages, { role: "user", content: lastSubmittedQuery }, { role: "assistant", content: message.content }]; const newHistory = [...messages, { role: "user", content: lastSubmittedQueryRef.current }, { role: "assistant", content: message.content }];
const { questions } = await suggestQuestions(newHistory); const { questions } = await suggestQuestions(newHistory);
setSuggestedQuestions(questions); setSuggestedQuestions(questions);
} }
@ -1264,33 +1264,30 @@ GPT-4o has been re-enabled! You can use it by selecting the model from the dropd
} }
}, [messages, suggestedQuestions]); }, [messages, suggestedQuestions]);
const handleExampleClick = useCallback(async (card: typeof suggestionCards[number]) => { const handleExampleClick = async (card: typeof suggestionCards[number]) => {
const exampleText = card.text; const exampleText = card.text;
track("search example", { query: exampleText }); track("search example", { query: exampleText });
setLastSubmittedQuery(exampleText.trim()); lastSubmittedQueryRef.current = exampleText;
setHasSubmitted(true); setHasSubmitted(true);
setSuggestedQuestions([]); setSuggestedQuestions([]);
console.log('exampleText', exampleText);
console.log('lastSubmittedQuery', lastSubmittedQueryRef.current);
await append({ await append({
content: exampleText.trim(), content: exampleText.trim(),
role: 'user', role: 'user',
}); });
};
}, [append, setLastSubmittedQuery, setHasSubmitted, setSuggestedQuestions]);
const handleSuggestedQuestionClick = useCallback(async (question: string) => { const handleSuggestedQuestionClick = useCallback(async (question: string) => {
setHasSubmitted(true); setHasSubmitted(true);
setSuggestedQuestions([]); setSuggestedQuestions([]);
setInput(question.trim());
await append({ await append({
content: question.trim(), content: question.trim(),
role: 'user' role: 'user'
}); });
}, [append]);
}, [setInput, append]);
const handleMessageEdit = useCallback((index: number) => { const handleMessageEdit = useCallback((index: number) => {
setIsEditingMessage(true); setIsEditingMessage(true);

View File

@ -12,7 +12,7 @@
"@ai-sdk/anthropic": "^0.0.55", "@ai-sdk/anthropic": "^0.0.55",
"@ai-sdk/azure": "^0.0.51", "@ai-sdk/azure": "^0.0.51",
"@ai-sdk/cohere": "latest", "@ai-sdk/cohere": "latest",
"@ai-sdk/google": "^0.0.52", "@ai-sdk/google": "^0.0.55",
"@ai-sdk/groq": "^0.0.1", "@ai-sdk/groq": "^0.0.1",
"@ai-sdk/mistral": "^0.0.41", "@ai-sdk/mistral": "^0.0.41",
"@ai-sdk/openai": "^0.0.58", "@ai-sdk/openai": "^0.0.58",

View File

@ -15,8 +15,8 @@ dependencies:
specifier: latest specifier: latest
version: 0.0.28(zod@3.23.8) version: 0.0.28(zod@3.23.8)
'@ai-sdk/google': '@ai-sdk/google':
specifier: ^0.0.52 specifier: ^0.0.55
version: 0.0.52(zod@3.23.8) version: 0.0.55(zod@3.23.8)
'@ai-sdk/groq': '@ai-sdk/groq':
specifier: ^0.0.1 specifier: ^0.0.1
version: 0.0.1(zod@3.23.8) version: 0.0.1(zod@3.23.8)
@ -281,15 +281,14 @@ packages:
zod: 3.23.8 zod: 3.23.8
dev: false dev: false
/@ai-sdk/google@0.0.52(zod@3.23.8): /@ai-sdk/google@0.0.55(zod@3.23.8):
resolution: {integrity: sha512-bfsA/1Ae0SQ6NfLwWKs5SU4MBwlzJjVhK6bTVBicYFjUxg9liK/W76P1Tq/qK9OlrODACz3i1STOIWsFPpIOuQ==} resolution: {integrity: sha512-dvEMS8Ex2H0OeuFBiT4Q1Kfrxi1ckjooy/PazNLjRQ3w9o9VQq4O24eMQGCuW1Z47qgMdXjhDzsH6qD0HOX6Cw==}
engines: {node: '>=18'} engines: {node: '>=18'}
peerDependencies: peerDependencies:
zod: ^3.0.0 zod: ^3.0.0
dependencies: dependencies:
'@ai-sdk/provider': 0.0.24 '@ai-sdk/provider': 0.0.26
'@ai-sdk/provider-utils': 1.0.20(zod@3.23.8) '@ai-sdk/provider-utils': 1.0.22(zod@3.23.8)
json-schema: 0.4.0
zod: 3.23.8 zod: 3.23.8
dev: false dev: false