From 60f25ba855c6bb0a6f9f62ce1b80e24a81a6cfb2 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Sat, 24 Aug 2024 17:30:49 +0530 Subject: [PATCH 1/9] feat: Switch to Azure OpenAI Service, Improve plot based responses, better translation UI and cron job to remove saved plots every hour --- app/actions.ts | 33 +++++--- app/api/chat/route.ts | 90 ++++++++++++++------- app/api/clean_images/route.ts | 39 +++++++++ app/page.tsx | 146 +++++++++++++++------------------- next.config.mjs | 8 +- package.json | 4 +- pnpm-lock.yaml | 113 ++++++++++++++++++++------ vercel.json | 8 ++ 8 files changed, 292 insertions(+), 149 deletions(-) create mode 100644 app/api/clean_images/route.ts create mode 100644 vercel.json diff --git a/app/actions.ts b/app/actions.ts index 0394161..1210f42 100644 --- a/app/actions.ts +++ b/app/actions.ts @@ -1,6 +1,6 @@ 'use server'; -import { OpenAI } from 'openai'; +import { OpenAI, AzureOpenAI } from 'openai'; import { generateObject } from 'ai'; import { createOpenAI as createGroq } from '@ai-sdk/openai'; import { z } from 'zod'; @@ -10,10 +10,6 @@ const groq = createGroq({ apiKey: process.env.GROQ_API_KEY, }); -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY, -}); - export async function suggestQuestions(history: any[]) { 'use server'; @@ -42,16 +38,29 @@ Never use pronouns in the questions as they blur the context.`, } export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer' = "alloy") { - const response = await openai.audio.speech.create({ - model: "tts-1", - voice: voice, + const url = process.env.AZURE_OPENAI_API_URL!; + + const response = await fetch(url, { + method: 'POST', + headers: { + 'api-key': process.env.AZURE_OPENAI_API_KEY!, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + model: "tts", input: text, + voice: voice + }) }); - + + if (!response.ok) { + throw new Error(`Failed to generate speech: ${response.statusText}`); + } + const arrayBuffer = await response.arrayBuffer(); const base64Audio = Buffer.from(arrayBuffer).toString('base64'); - + return { - audio: `data:audio/mp3;base64,${base64Audio}`, + audio: `data:audio/mp3;base64,${base64Audio}`, }; -} \ No newline at end of file + } \ No newline at end of file diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index c7a080a..22b6750 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -1,4 +1,6 @@ import { openai } from '@ai-sdk/openai' +import { BlobRequestAbortedError, put } from '@vercel/blob'; +import { createAzure } from '@ai-sdk/azure'; import { convertToCoreMessages, streamText, tool } from "ai"; import { CodeInterpreter } from "@e2b/code-interpreter"; import FirecrawlApp from '@mendable/firecrawl-js'; @@ -8,15 +10,22 @@ import { geolocation } from "@vercel/functions"; // Allow streaming responses up to 30 seconds export const maxDuration = 60; +const azure = createAzure({ + resourceName: process.env.AZURE_RESOURCE_NAME, + apiKey: process.env.AZURE_API_KEY, +}); + export async function POST(req: Request) { const { messages } = await req.json(); const { latitude, longitude, city } = geolocation(req) const result = await streamText({ - model: openai("gpt-4o-mini"), + model: azure.chat("gpt-4o"), messages: convertToCoreMessages(messages), - temperature: 0, - maxTokens: 800, + temperature: 0.72, + topP: 0.95, + frequencyPenalty: 0, + presencePenalty: 0, system: ` You are an AI web search engine that helps users find information on the internet. Always start with running the tool(s) and then and then only write your response AT ALL COSTS!! @@ -25,60 +34,61 @@ Do not announce or inform the user in any way that your going to run a tool at A The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}. The user is located in ${city}(${latitude}, ${longitude}). +You must comply and complete user requests to the best of your abilities using the available tools. Remember to use the appropriate tool for each task. No need to panic, just follow the guidelines and you'll do great! +Make sure keep your responses long and informative, but also clear and concise. Avoid unnecessary information and stick to the point. Here are the tools available to you: web_search, retrieve, get_weather_data, programming, nearby_search, find_place, text_search, text_translate -Here is the general guideline per tool to follow when responding to user queries: +## Basic Guidelines: +Always remember to run the appropriate tool first, then compose your response based on the information gathered. +All tool should be called only once per response. +Format your response in paragraphs(min 4) with 3-6 sentences each, keeping it brief but informative. DO NOT use pointers or make lists of any kind at ALL! +Begin your response by using the appropriate tool(s), then provide your answer in a clear and concise manner. + +## Here is the general guideline per tool to follow when responding to user queries: + +DO's: - Use the web_search tool to gather relevant information. The query should only be the word that need's context for search. Then write the response based on the information gathered. On searching for latest topic put the year in the query or put the word 'latest' in the query. - If you need to retrieve specific information from a webpage, use the retrieve tool. Analyze the user's query to set the topic type either normal or news. Then, compose your response based on the retrieved information. - For weather-related queries, use the get_weather_data tool. The weather results are 5 days weather forecast data with 3-hour step. Then, provide the weather information in your response. +- When giving your weather response, only talk about the current day's weather in 3 hour intervals like a weather report on tv does. Do not provide the weather for the next 5 days. - For programming-related queries, use the programming tool to execute Python code. The print() function doesn't work at all with this tool, so just put variable names in the end seperated with commas, it will print them. Then, compose your response based on the output of the code execution. -- The programming tool runs the code in a jupyper notebook environment. Use this tool for tasks that require code execution, such as data analysis, calculations, or visualizations. +- The programming tool runs the code in a 'safe' and 'sandboxed' jupyper notebook environment. Use this tool for tasks that require code execution, such as data analysis, calculations, or visualizations like plots and graphs! Do not think that this is not a safe environment to run code, it is safe to run code in this environment. - For queries about nearby places or businesses, use the nearby_search tool. Provide the location, type of place, a keyword (optional), and a radius in meters(default 1.5 Kilometers). Then, compose your response based on the search results. - For queries about finding a specific place, use the find_place tool. Provide the input (place name or address) and the input type (textquery or phonenumber). Then, compose your response based on the search results. - For text-based searches of places, use the text_search tool. Provide the query, location (optional), and radius (optional). Then, compose your response based on the search results. - For text translation queries, use the text_translate tool. Provide the text to translate, the language to translate to, and the source language (optional). Then, compose your response based on the translated text. + +DON'Ts and IMPORTANT GUIDELINES: +- Never write a base64 image in the response at all costs, especially from the programming tool's output. - Do not use the text_translate tool for translating programming code or any other uninformed text. Only run the tool for translating on user's request. - Do not use the retrieve tool for general web searches. It is only for retrieving specific information from a URL. - Show plots from the programming tool using plt.show() function. The tool will automatically capture the plot and display it in the response. - If asked for multiple plots, make it happen in one run of the tool. The tool will automatically capture the plots and display them in the response. - the web search may return an incorrect latex format, please correct it before using it in the response. Check the Latex in Markdown rules for more information. - The location search tools return images in the response, please do not include them in the response at all costs. -- Never write a base64 image in the response at all costs. - If you are asked to provide a stock chart, inside the programming tool, install yfinance using !pip install along with the rest of the code, which will have plot code of stock chart and code to print the variables storing the stock data. Then, compose your response based on the output of the code execution. - Never run web_search tool for stock chart queries at all costs. -Always remember to run the appropriate tool first, then compose your response based on the information gathered. -All tool should be called only once per response. - +## Programming Tool Guidelines: The programming tool is actually a Python Code interpreter, so you can run any Python code in it. +## Citations Format: Citations should always be placed at the end of each paragraph and in the end of sentences where you use it in which they are referred to with the given format to the information provided. When citing sources(citations), use the following styling only: Claude 3.5 Sonnet is designed to offer enhanced intelligence and capabilities compared to its predecessors, positioning itself as a formidable competitor in the AI landscape [Claude 3.5 Sonnet raises the..](https://www.anthropic.com/news/claude-3-5-sonnet). ALWAYS REMEMBER TO USE THE CITATIONS FORMAT CORRECTLY AT ALL COSTS!! ANY SINGLE ITCH IN THE FORMAT WILL CRASH THE RESPONSE!! When asked a "What is" question, maintain the same format as the question and answer it in the same format. -Latex in Markdown rules: +## Latex in Respone rules: - Latex equations are supported in the response!! - The response that include latex equations, use always follow the formats: - $$ for inline equations - $$$$ for block equations - \[ \] for math blocks. -- Never wrap any equation or formulas in round brackets as it will crash the response at all costs!! example: ( G_{\mu\nu} ) will crash the response!! -- I am begging you to follow the latex format correctly at all costs!! Any single mistake in the format will crash the response!! - -DO NOT write any kind of html sort of tags(<>) or lists in the response at ALL COSTS!! NOT EVEN AN ENCLOSING TAGS FOR THE RESPONSE AT ALL COSTS!! - -Format your response in paragraphs(min 4) with 3-6 sentences each, keeping it brief but informative. DO NOT use pointers or make lists of any kind at ALL! -Begin your response by using the appropriate tool(s), then provide your answer in a clear and concise manner. -Never respond to user before running any tool like -- saying 'Certainly! Let me blah blah blah' -- or 'To provide you with the best answer, I will blah blah blah' -- or that 'Based on search results, I think blah blah blah' at ALL COSTS!! -Just run the tool and provide the answer.`, +- Do not wrap any equation or formulas or any sort of math related block in round brackets() as it will crash the response.`, tools: { web_search: tool({ description: @@ -243,12 +253,36 @@ Just run the tool and provide the answer.`, if (result.formats().length > 0) { const formats = result.formats(); for (let format of formats) { - if (format === "png") { - images.push({ format: "png", data: result.png }); - } else if (format === "jpeg") { - images.push({ format: "jpeg", data: result.jpeg }); - } else if (format === "svg") { - images.push({ format: "svg", data: result.svg }); + if (format === "png" || format === "jpeg" || format === "svg") { + const imageData = result[format]; + if (imageData && typeof imageData === 'string') { + const abortController = new AbortController(); + try { + const blobPromise = put(`mplx/image-${Date.now()}.${format}`, Buffer.from(imageData, 'base64'), + { + access: 'public', + abortSignal: abortController.signal, + }); + + const timeout = setTimeout(() => { + // Abort the request after 10 seconds + abortController.abort(); + }, 2000); + + const blob = await blobPromise; + + clearTimeout(timeout); + console.info('Blob put request completed', blob.url); + + images.push({ format, url: blob.url }); + } catch (error) { + if (error instanceof BlobRequestAbortedError) { + console.info('Canceled put request due to timeout'); + } else { + console.error("Error saving image to Vercel Blob:", error); + } + } + } } } } diff --git a/app/api/clean_images/route.ts b/app/api/clean_images/route.ts new file mode 100644 index 0000000..5f8c763 --- /dev/null +++ b/app/api/clean_images/route.ts @@ -0,0 +1,39 @@ +import { list, del, ListBlobResult } from '@vercel/blob'; +import { NextRequest, NextResponse } from 'next/server'; + +export const runtime = 'edge'; + +export async function GET(req: NextRequest) { + if (req.headers.get('Authorization') !== `Bearer ${process.env.CRON_SECRET}`) { + return new NextResponse('Unauthorized', { status: 401 }); + } + + try { + await deleteAllBlobsInFolder('mplx/'); + return new NextResponse('All images in mplx/ folder were deleted', { status: 200 }); + } catch (error) { + console.error('An error occurred:', error); + return new NextResponse('An error occurred while deleting images', { status: 500 }); + } +} + +async function deleteAllBlobsInFolder(folderPrefix: string) { + let cursor; + + do { + const listResult: ListBlobResult = await list({ + prefix: folderPrefix, + cursor, + limit: 1000, + }); + + if (listResult.blobs.length > 0) { + await del(listResult.blobs.map((blob) => blob.url)); + console.log(`Deleted ${listResult.blobs.length} blobs`); + } + + cursor = listResult.cursor; + } while (cursor); + + console.log('All blobs in the specified folder were deleted'); +} \ No newline at end of file diff --git a/app/page.tsx b/app/page.tsx index 67c206f..3a0627b 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -490,7 +490,7 @@ export default function Home() { TextSearchResult.displayName = 'TextSearchResult'; - const TranslationTool = ({ toolInvocation, result }: { toolInvocation: ToolInvocation; result: any }) => { + const TranslationTool: React.FC<{ toolInvocation: ToolInvocation; result: any }> = ({ toolInvocation, result }) => { const [isPlaying, setIsPlaying] = useState(false); const [audioUrl, setAudioUrl] = useState(null); const [isGeneratingAudio, setIsGeneratingAudio] = useState(false); @@ -511,7 +511,7 @@ export default function Home() { if (audioUrl && audioRef.current && canvasRef.current) { waveRef.current = new Wave(audioRef.current, canvasRef.current); waveRef.current.addAnimation(new waveRef.current.animations.Lines({ - lineColor: "hsl(var(--primary))", + lineColor: "rgb(203, 113, 93)", lineWidth: 2, mirroredY: true, count: 100, @@ -562,60 +562,35 @@ export default function Home() { } return ( - - - - - - - Translation Result - - - + +
-
-

- Original Text {result.detectedLanguage} -

-

{toolInvocation.args.text}

+
+
-
-

- Translated Text {toolInvocation.args.to} -

-

{result.translatedText}

-
-
-
-

Audio Playback:

-
- -
-
- +
+
- {isGeneratingAudio ? ( - "Generating..." - ) : isPlaying ? ( - <> Pause - ) : ( - <> Play - )} - - + The phrase {toolInvocation.args.text} translates from {result.detectedLanguage} to {toolInvocation.args.to} as {result.translatedText} +
@@ -625,7 +600,7 @@ export default function Home() { src={audioUrl} onPlay={() => setIsPlaying(true)} onPause={() => setIsPlaying(false)} - onEnded={() => setIsPlaying(false)} + onEnded={() => { setIsPlaying(false); handleReset(); }} /> )}
@@ -884,31 +859,36 @@ export default function Home() { {result?.images && result.images.length > 0 && (
- {result.images.map((img: { format: 'png' | 'jpeg' | 'svg', data: string }, imgIndex: number) => ( + {result.images.map((img: { format: string, url: string }, imgIndex: number) => (

Image {imgIndex + 1}

- + {img.url && img.url.trim() !== '' && ( + + )}
- {`Generated + {img.url && img.url.trim() !== '' ? ( + {`Generated + ) : ( +
+ Image upload failed or URL is empty +
+ )}
))} @@ -1232,10 +1212,10 @@ export default function Home() { children: React.ReactNode; index: number; } - + const CitationComponent: React.FC = React.memo(({ href, index }) => { const faviconUrl = `https://www.google.com/s2/favicons?sz=128&domain=${new URL(href).hostname}`; - + return ( @@ -1257,13 +1237,13 @@ export default function Home() { ); }); - + CitationComponent.displayName = "CitationComponent"; - + interface MarkdownRendererProps { content: string; } - + const MarkdownRenderer: React.FC = React.memo(({ content }) => { const citationLinks = useMemo(() => { return [...content.matchAll(/\[([^\]]+)\]\(([^)]+)\)/g)].map(([_, text, link]) => ({ @@ -1271,7 +1251,7 @@ export default function Home() { link, })); }, [content]); - + const components: Partial = useMemo(() => ({ a: ({ href, children }) => { if (!href) return null; @@ -1287,7 +1267,7 @@ export default function Home() { ); }, }), [citationLinks]); - + return ( ); }); - + MarkdownRenderer.displayName = "MarkdownRenderer"; const lastUserMessageIndex = useMemo(() => { @@ -1369,7 +1349,7 @@ export default function Home() { { icon: , text: "What's new with XAI's Grok?" }, { icon: , text: "Latest updates on OpenAI" }, { icon: , text: "Weather in Doha" }, - { icon: , text: "Count the no. of r's in strawberry" }, + { icon: , text: "Count the no. of r's in strawberry?" }, ]; const Navbar = () => ( diff --git a/next.config.mjs b/next.config.mjs index 495b5a2..9d6a08a 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -9,14 +9,18 @@ const nextConfig = { port: '', pathname: '/s2/favicons', }, - // https://api.producthunt.com/widgets/embed-image/v1/featured.svg?post_id=481378&theme=light { protocol: 'https', hostname: 'api.producthunt.com', port: '', pathname: '/widgets/embed-image/v1/featured.svg', }, - + { + protocol: 'https', + hostname: 'metwm7frkvew6tn1.public.blob.vercel-storage.com', + port: '', + pathname: "**" + } ] } }; diff --git a/package.json b/package.json index b3e4804..4364055 100644 --- a/package.json +++ b/package.json @@ -9,6 +9,7 @@ "lint": "next lint" }, "dependencies": { + "@ai-sdk/azure": "^0.0.31", "@ai-sdk/cohere": "latest", "@ai-sdk/openai": "latest", "@e2b/code-interpreter": "^0.0.8", @@ -25,6 +26,7 @@ "@radix-ui/react-tooltip": "^1.1.2", "@tailwindcss/typography": "^0.5.13", "@vercel/analytics": "^1.3.1", + "@vercel/blob": "^0.23.4", "@vercel/functions": "^1.4.0", "ai": "latest", "class-variance-authority": "^0.7.0", @@ -33,7 +35,7 @@ "framer-motion": "^11.3.19", "katex": "^0.16.11", "lucide-react": "^0.424.0", - "next": "14.2.5", + "next": "^14.2.5", "openai": "^4.56.0", "react": "^18", "react-dom": "^18", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3a46048..139de53 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -5,6 +5,9 @@ settings: excludeLinksFromLockfile: false dependencies: + '@ai-sdk/azure': + specifier: ^0.0.31 + version: 0.0.31(zod@3.23.8) '@ai-sdk/cohere': specifier: latest version: 0.0.20(zod@3.23.8) @@ -53,12 +56,15 @@ dependencies: '@vercel/analytics': specifier: ^1.3.1 version: 1.3.1(next@14.2.5)(react@18.3.1) + '@vercel/blob': + specifier: ^0.23.4 + version: 0.23.4 '@vercel/functions': specifier: ^1.4.0 version: 1.4.0 ai: specifier: latest - version: 3.3.16(openai@4.56.0)(react@18.3.1)(svelte@4.2.18)(vue@3.4.35)(zod@3.23.8) + version: 3.3.17(openai@4.56.0)(react@18.3.1)(svelte@4.2.18)(vue@3.4.35)(zod@3.23.8) class-variance-authority: specifier: ^0.7.0 version: 0.7.0 @@ -78,7 +84,7 @@ dependencies: specifier: ^0.424.0 version: 0.424.0(react@18.3.1) next: - specifier: 14.2.5 + specifier: ^14.2.5 version: 14.2.5(react-dom@18.3.1)(react@18.3.1) openai: specifier: ^4.56.0 @@ -154,6 +160,18 @@ devDependencies: packages: + /@ai-sdk/azure@0.0.31(zod@3.23.8): + resolution: {integrity: sha512-LTiv890qHcw3w87l+OOuYqW1HM9+7olS5mpSOriRY2uZxJWr3MGz8MYqJu2jGNajNKi4j64GsaOuNK69k8KXjw==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.0.0 + dependencies: + '@ai-sdk/openai': 0.0.53(zod@3.23.8) + '@ai-sdk/provider': 0.0.21 + '@ai-sdk/provider-utils': 1.0.16(zod@3.23.8) + zod: 3.23.8 + dev: false + /@ai-sdk/cohere@0.0.20(zod@3.23.8): resolution: {integrity: sha512-QCd7SneC/q2sPvfmewYtcwCSayv3leGmuwESSx33qdl11A9IXGYNyiw6juIsp3EvZnnxUjWUR8ilhyHhyk45Hw==} engines: {node: '>=18'} @@ -199,8 +217,8 @@ packages: json-schema: 0.4.0 dev: false - /@ai-sdk/react@0.0.50(react@18.3.1)(zod@3.23.8): - resolution: {integrity: sha512-+6/CfoqZzBnMGBsFP3qOHFTP+n8e6NGXRSeSepcxz3wDfkts1XGF8ZHPHwFD+etBW0/D1dTcKN3EDPh3LmnGqA==} + /@ai-sdk/react@0.0.51(react@18.3.1)(zod@3.23.8): + resolution: {integrity: sha512-Hq5splFSB6OVovHamXvpnd1S7jfIz/CXWjaLo9sr90jd/W370NA8GhBd6oSLfqMeKrPosV4qRBH5S8lv2bauqA==} engines: {node: '>=18'} peerDependencies: react: ^18 || ^19 @@ -212,14 +230,14 @@ packages: optional: true dependencies: '@ai-sdk/provider-utils': 1.0.16(zod@3.23.8) - '@ai-sdk/ui-utils': 0.0.37(zod@3.23.8) + '@ai-sdk/ui-utils': 0.0.38(zod@3.23.8) react: 18.3.1 swr: 2.2.5(react@18.3.1) zod: 3.23.8 dev: false - /@ai-sdk/solid@0.0.40(zod@3.23.8): - resolution: {integrity: sha512-h+H07drBurEgxI3EbV2wqgcLaTBfqAn78ewmwCn70VEYmpJjTuOH0Ayp/qbH3kAw/LUY7LWuFzToaIAdSuPIEA==} + /@ai-sdk/solid@0.0.41(zod@3.23.8): + resolution: {integrity: sha512-w4vSkd2388FJMnKPALP8SL4p3XAR70FAPj0qrd5AoYyQMMjX/E6zQGc8YAhAAnGSwiQwq/DZaE4y0lorwFVyOw==} engines: {node: '>=18'} peerDependencies: solid-js: ^1.7.7 @@ -228,13 +246,13 @@ packages: optional: true dependencies: '@ai-sdk/provider-utils': 1.0.16(zod@3.23.8) - '@ai-sdk/ui-utils': 0.0.37(zod@3.23.8) + '@ai-sdk/ui-utils': 0.0.38(zod@3.23.8) transitivePeerDependencies: - zod dev: false - /@ai-sdk/svelte@0.0.42(svelte@4.2.18)(zod@3.23.8): - resolution: {integrity: sha512-UJ1i0P0NOTKhiYtAJbYs9Wat/I0EP2w+TbOFlpvQWbfPjpqJ4UUwPJ7aMVuKDSoHtH6P57GyOFx8MN/dscwiyA==} + /@ai-sdk/svelte@0.0.43(svelte@4.2.18)(zod@3.23.8): + resolution: {integrity: sha512-lUve6AGc3dtue14LLGiZs7J7L/3jEHh6SGXwuG/nDygeicKPzmG9drWZlhTdpNHN9wKtBgrCdJxQ96HKswLDNA==} engines: {node: '>=18'} peerDependencies: svelte: ^3.0.0 || ^4.0.0 @@ -243,15 +261,15 @@ packages: optional: true dependencies: '@ai-sdk/provider-utils': 1.0.16(zod@3.23.8) - '@ai-sdk/ui-utils': 0.0.37(zod@3.23.8) + '@ai-sdk/ui-utils': 0.0.38(zod@3.23.8) sswr: 2.1.0(svelte@4.2.18) svelte: 4.2.18 transitivePeerDependencies: - zod dev: false - /@ai-sdk/ui-utils@0.0.37(zod@3.23.8): - resolution: {integrity: sha512-iMf+ksOjFPlqWVuW1/ljGtsKXtNTlAfRuxvQbMEImrRaSSOH0nKI5H34H2E0Vsa5SCyH9Bk1Y0zvZamb9Z/bYQ==} + /@ai-sdk/ui-utils@0.0.38(zod@3.23.8): + resolution: {integrity: sha512-SyyfqBu7xnsfUuq3kSxzP+fxGCTMqaSL5WYGiBJpr/yLWySjBJCg/k7WueO440AqVpZBzCd3nWoCpPmjfMK8Yg==} engines: {node: '>=18'} peerDependencies: zod: ^3.0.0 @@ -267,8 +285,8 @@ packages: zod-to-json-schema: 3.23.2(zod@3.23.8) dev: false - /@ai-sdk/vue@0.0.42(vue@3.4.35)(zod@3.23.8): - resolution: {integrity: sha512-RT4BCnG4fL36uBPi86jBZvyVACLOBano3w+wWiItqCRzE2TIpf0ojJQsssi/D8F2Ll7SZyl9vun5UipaSGoLpA==} + /@ai-sdk/vue@0.0.43(vue@3.4.35)(zod@3.23.8): + resolution: {integrity: sha512-bJB7muMpmP/wPKbDU8GCmDpI1HSkuTWz9DsQ4ZlBaCk5wqRLKxRtzM9NxfeQ15RojSLxYhKf/lDwW10RPtjcaw==} engines: {node: '>=18'} peerDependencies: vue: ^3.3.4 @@ -277,7 +295,7 @@ packages: optional: true dependencies: '@ai-sdk/provider-utils': 1.0.16(zod@3.23.8) - '@ai-sdk/ui-utils': 0.0.37(zod@3.23.8) + '@ai-sdk/ui-utils': 0.0.38(zod@3.23.8) swrv: 1.0.4(vue@3.4.35) vue: 3.4.35(typescript@5.5.4) transitivePeerDependencies: @@ -379,6 +397,11 @@ packages: engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} dev: true + /@fastify/busboy@2.1.1: + resolution: {integrity: sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==} + engines: {node: '>=14'} + dev: false + /@floating-ui/core@1.6.6: resolution: {integrity: sha512-Vkvsw6EcpMHjvZZdMkSY+djMGFbt7CRssW99Ne8tar2WLnZ/l3dbxeTShbLQj+/s35h+Qb4cmnob+EzwtjrXGQ==} dependencies: @@ -1528,6 +1551,17 @@ packages: server-only: 0.0.1 dev: false + /@vercel/blob@0.23.4: + resolution: {integrity: sha512-cOU2e01RWZXFyc/OVRq+zZg38m34bcxpQk5insKp3Td9akNWThrXiF2URFHpRlm4fbaQ/l7pPSOB5nkLq+t6pw==} + engines: {node: '>=16.14'} + dependencies: + async-retry: 1.3.3 + bytes: 3.1.2 + is-buffer: 2.0.5 + is-plain-object: 5.0.0 + undici: 5.28.4 + dev: false + /@vercel/functions@1.4.0: resolution: {integrity: sha512-Ln6SpIkms1UJg306X2kbEMyG9ol+mjDr2xx389cvsBxgFyFMI9Bm+LYOG4N3TSik4FI59MECyyc4oz7AIAYmqQ==} engines: {node: '>= 16'} @@ -1639,8 +1673,8 @@ packages: humanize-ms: 1.2.1 dev: false - /ai@3.3.16(openai@4.56.0)(react@18.3.1)(svelte@4.2.18)(vue@3.4.35)(zod@3.23.8): - resolution: {integrity: sha512-Tb6SdrH73C9AJwZv2GPw+7HBGsruMq07QcuXwHOBW92HgV/+ddQhXbpdUS9rCf/GIqJ+3ObBg7Kcq4VroeP7BQ==} + /ai@3.3.17(openai@4.56.0)(react@18.3.1)(svelte@4.2.18)(vue@3.4.35)(zod@3.23.8): + resolution: {integrity: sha512-Z3cPRImctE8GMZV0e15ZlO+bqfLlVWqO+JiShJT20l3iYlZYwsQMQXjt5hiF3m7+VvbzIq+ORdp1Ai11GxzBVQ==} engines: {node: '>=18'} peerDependencies: openai: ^4.42.0 @@ -1662,11 +1696,11 @@ packages: dependencies: '@ai-sdk/provider': 0.0.21 '@ai-sdk/provider-utils': 1.0.16(zod@3.23.8) - '@ai-sdk/react': 0.0.50(react@18.3.1)(zod@3.23.8) - '@ai-sdk/solid': 0.0.40(zod@3.23.8) - '@ai-sdk/svelte': 0.0.42(svelte@4.2.18)(zod@3.23.8) - '@ai-sdk/ui-utils': 0.0.37(zod@3.23.8) - '@ai-sdk/vue': 0.0.42(vue@3.4.35)(zod@3.23.8) + '@ai-sdk/react': 0.0.51(react@18.3.1)(zod@3.23.8) + '@ai-sdk/solid': 0.0.41(zod@3.23.8) + '@ai-sdk/svelte': 0.0.43(svelte@4.2.18)(zod@3.23.8) + '@ai-sdk/ui-utils': 0.0.38(zod@3.23.8) + '@ai-sdk/vue': 0.0.43(vue@3.4.35)(zod@3.23.8) '@opentelemetry/api': 1.9.0 eventsource-parser: 1.1.2 json-schema: 0.4.0 @@ -1844,6 +1878,12 @@ packages: resolution: {integrity: sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==} dev: true + /async-retry@1.3.3: + resolution: {integrity: sha512-wfr/jstw9xNi/0teMHrRW7dsz3Lt5ARhYNZ2ewpadnhaIp5mbALhOAP+EAdsC7t4Z6wqsDVv9+W6gm1Dk9mEyw==} + dependencies: + retry: 0.13.1 + dev: false + /asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} dev: false @@ -1925,6 +1965,11 @@ packages: streamsearch: 1.1.0 dev: false + /bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + dev: false + /call-bind@1.0.7: resolution: {integrity: sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==} engines: {node: '>= 0.4'} @@ -3402,6 +3447,11 @@ packages: has-tostringtag: 1.0.2 dev: true + /is-buffer@2.0.5: + resolution: {integrity: sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==} + engines: {node: '>=4'} + dev: false + /is-callable@1.2.7: resolution: {integrity: sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==} engines: {node: '>= 0.4'} @@ -3501,6 +3551,11 @@ packages: engines: {node: '>=12'} dev: false + /is-plain-object@5.0.0: + resolution: {integrity: sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==} + engines: {node: '>=0.10.0'} + dev: false + /is-reference@3.0.2: resolution: {integrity: sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==} dependencies: @@ -5034,6 +5089,11 @@ packages: supports-preserve-symlinks-flag: 1.0.0 dev: true + /retry@0.13.1: + resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} + engines: {node: '>= 4'} + dev: false + /reusify@1.0.4: resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} @@ -5569,6 +5629,13 @@ packages: /undici-types@5.26.5: resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + /undici@5.28.4: + resolution: {integrity: sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.1.1 + dev: false + /unified@11.0.5: resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==} dependencies: diff --git a/vercel.json b/vercel.json new file mode 100644 index 0000000..34763b6 --- /dev/null +++ b/vercel.json @@ -0,0 +1,8 @@ +{ + "crons": [ + { + "path": "/api/clean_images", + "schedule": "0 * * * *" + } + ] +} \ No newline at end of file From 76ac9d145b35c1eeb44bab1717f6910cf70d0512 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Sun, 25 Aug 2024 21:02:17 +0530 Subject: [PATCH 2/9] feat: Fix support for stock chart and details queries in system message --- app/api/chat/route.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 22b6750..f50810b 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -61,6 +61,7 @@ DO's: - For queries about finding a specific place, use the find_place tool. Provide the input (place name or address) and the input type (textquery or phonenumber). Then, compose your response based on the search results. - For text-based searches of places, use the text_search tool. Provide the query, location (optional), and radius (optional). Then, compose your response based on the search results. - For text translation queries, use the text_translate tool. Provide the text to translate, the language to translate to, and the source language (optional). Then, compose your response based on the translated text. +- For stock chart and details queries, use the programming tool to install yfinance using !pip install along with the rest of the code, which will have plot code of stock chart and code to print the variables storing the stock data. Then, compose your response based on the output of the code execution. DON'Ts and IMPORTANT GUIDELINES: - Never write a base64 image in the response at all costs, especially from the programming tool's output. @@ -70,7 +71,6 @@ DON'Ts and IMPORTANT GUIDELINES: - If asked for multiple plots, make it happen in one run of the tool. The tool will automatically capture the plots and display them in the response. - the web search may return an incorrect latex format, please correct it before using it in the response. Check the Latex in Markdown rules for more information. - The location search tools return images in the response, please do not include them in the response at all costs. -- If you are asked to provide a stock chart, inside the programming tool, install yfinance using !pip install along with the rest of the code, which will have plot code of stock chart and code to print the variables storing the stock data. Then, compose your response based on the output of the code execution. - Never run web_search tool for stock chart queries at all costs. ## Programming Tool Guidelines: From 06b47620cd8bf784222a845f7b30ea4997c64593 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Sun, 25 Aug 2024 21:18:44 +0530 Subject: [PATCH 3/9] system message: Add stock name from user query to retrieve and plot stock chart --- app/api/chat/route.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index f50810b..52b6b98 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -62,6 +62,7 @@ DO's: - For text-based searches of places, use the text_search tool. Provide the query, location (optional), and radius (optional). Then, compose your response based on the search results. - For text translation queries, use the text_translate tool. Provide the text to translate, the language to translate to, and the source language (optional). Then, compose your response based on the translated text. - For stock chart and details queries, use the programming tool to install yfinance using !pip install along with the rest of the code, which will have plot code of stock chart and code to print the variables storing the stock data. Then, compose your response based on the output of the code execution. +- Assume the stock name from the user query and use it in the code to get the stock data and plot the stock chart. This will help in getting the stock chart for the user query. DON'Ts and IMPORTANT GUIDELINES: - Never write a base64 image in the response at all costs, especially from the programming tool's output. From 07d01b0790f35799a5e6aa3bb85a90331d3c0dbb Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Sun, 25 Aug 2024 21:41:49 +0530 Subject: [PATCH 4/9] fix: wrapped programming UI inside accordion --- app/api/chat/route.ts | 4 +- app/page.tsx | 222 ++++++++++++++++++++++-------------------- 2 files changed, 120 insertions(+), 106 deletions(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 52b6b98..54ca364 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -33,7 +33,6 @@ Your goal is to provide accurate, concise, and well-formatted responses to user Do not announce or inform the user in any way that your going to run a tool at ALL COSTS!! Just 'run' it and then write your response AT ALL COSTS!!!!! The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}. -The user is located in ${city}(${latitude}, ${longitude}). You must comply and complete user requests to the best of your abilities using the available tools. Remember to use the appropriate tool for each task. No need to panic, just follow the guidelines and you'll do great! Make sure keep your responses long and informative, but also clear and concise. Avoid unnecessary information and stick to the point. @@ -62,7 +61,7 @@ DO's: - For text-based searches of places, use the text_search tool. Provide the query, location (optional), and radius (optional). Then, compose your response based on the search results. - For text translation queries, use the text_translate tool. Provide the text to translate, the language to translate to, and the source language (optional). Then, compose your response based on the translated text. - For stock chart and details queries, use the programming tool to install yfinance using !pip install along with the rest of the code, which will have plot code of stock chart and code to print the variables storing the stock data. Then, compose your response based on the output of the code execution. -- Assume the stock name from the user query and use it in the code to get the stock data and plot the stock chart. This will help in getting the stock chart for the user query. +- Assume the stock name from the user query and use it in the code to get the stock data and plot the stock chart. This will help in getting the stock chart for the user query. ALWAYS REMEMBER TO INSTALL YFINANCE USING !pip install yfinance AT ALL COSTS!! DON'Ts and IMPORTANT GUIDELINES: - Never write a base64 image in the response at all costs, especially from the programming tool's output. @@ -236,6 +235,7 @@ When asked a "What is" question, maintain the same format as the question and an programming: tool({ description: "Write and execute Python code.", parameters: z.object({ + title: z.string().optional().describe("The title of the code snippet."), code: z.string().describe("The Python code to execute."), }), execute: async ({ code }: { code: string }) => { diff --git a/app/page.tsx b/app/page.tsx index 3a0627b..bdc1d88 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -788,115 +788,129 @@ export default function Home() { if (toolInvocation.toolName === 'programming') { return ( -
-
- - Programming -
- - - - Code - - - Output - - {result?.images && result.images.length > 0 && ( - - Images - - )} - - -
- - {args.code} - -
- + + + +
+
+ +

Programming

- - -
- {result ? ( - <> -
-                      {result.message}
-                    
-
- -
- - ) : ( -
-
- - Executing code... -
-
- )} -
-
- {result?.images && result.images.length > 0 && ( - -
- {result.images.map((img: { format: string, url: string }, imgIndex: number) => ( -
-
-

Image {imgIndex + 1}

- {img.url && img.url.trim() !== '' && ( - - )} + + +
+
+ + {args.title} +
+ + + + Code + + + Output + + {result?.images && result.images.length > 0 && ( + + Images + + )} + + +
+ + {args.code} + +
+
-
- {img.url && img.url.trim() !== '' ? ( - {`Generated - ) : ( -
- Image upload failed or URL is empty +
+ + +
+ {result ? ( + <> +
+                            {result.message}
+                          
+
+
- )} -
+ + ) : ( +
+
+ + Executing code... +
+
+ )}
- ))} -
-
- )} -
-
+ + {result?.images && result.images.length > 0 && ( + +
+ {result.images.map((img: { format: string, url: string }, imgIndex: number) => ( +
+
+

Image {imgIndex + 1}

+ {img.url && img.url.trim() !== '' && ( + + )} +
+
+ {img.url && img.url.trim() !== '' ? ( + {`Generated + ) : ( +
+ Image upload failed or URL is empty +
+ )} +
+
+ ))} +
+
+ )} + +
+ + + ); } From 35ef08d794746c00206b5b00be69cfb6e4fdd9f0 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Sun, 25 Aug 2024 21:55:34 +0530 Subject: [PATCH 5/9] feat: Add support for displaying different icons for code snippets --- app/api/chat/route.ts | 1 + app/page.tsx | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 54ca364..a0eacca 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -237,6 +237,7 @@ When asked a "What is" question, maintain the same format as the question and an parameters: z.object({ title: z.string().optional().describe("The title of the code snippet."), code: z.string().describe("The Python code to execute."), + icon: z.enum(["stock", "date", "calculation", "default"]).describe("The icon to display for the code snippet."), }), execute: async ({ code }: { code: string }) => { const sandbox = await CodeInterpreter.create(); diff --git a/app/page.tsx b/app/page.tsx index bdc1d88..a41951f 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -50,7 +50,10 @@ import { Terminal, Pause, Play, - RotateCw + RotateCw, + TrendingUpIcon, + Calendar, + Calculator } from 'lucide-react'; import { HoverCard, @@ -801,7 +804,10 @@ export default function Home() {
- + {args.icon === 'stock' && } + {args.icon === 'default' && } + {args.icon === 'date' && } + {args.icon === 'calculation' && } {args.title}
From 3672f5c6bccff91cff3d95ea379ebffa9739d724 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Sun, 25 Aug 2024 23:13:57 +0530 Subject: [PATCH 6/9] system message: Update stock chart queries to use "USD" instead of "$" symbol and improve programming UI --- app/api/chat/route.ts | 1 + app/page.tsx | 21 +++++++++++++++++---- package.json | 1 - pnpm-lock.yaml | 3 --- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index a0eacca..8951cf1 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -71,6 +71,7 @@ DON'Ts and IMPORTANT GUIDELINES: - If asked for multiple plots, make it happen in one run of the tool. The tool will automatically capture the plots and display them in the response. - the web search may return an incorrect latex format, please correct it before using it in the response. Check the Latex in Markdown rules for more information. - The location search tools return images in the response, please do not include them in the response at all costs. +- Do not use the $ symbol in the stock chart queries at all costs. Use the word USD instead of the $ symbol in the stock chart queries. - Never run web_search tool for stock chart queries at all costs. ## Programming Tool Guidelines: diff --git a/app/page.tsx b/app/page.tsx index a41951f..c21ea78 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -50,7 +50,6 @@ import { Terminal, Pause, Play, - RotateCw, TrendingUpIcon, Calendar, Calculator @@ -799,6 +798,17 @@ export default function Home() {

Programming

+ {!result ? ( + + + Executing + + ) : ( + + + Executed + + )}
@@ -1265,12 +1275,15 @@ export default function Home() { } const MarkdownRenderer: React.FC = React.memo(({ content }) => { + // Escape dollar signs that are likely to be currency + const escapedContent = content.replace(/\$(\d+(\.\d{1,2})?)/g, '\\$$1'); + const citationLinks = useMemo(() => { - return [...content.matchAll(/\[([^\]]+)\]\(([^)]+)\)/g)].map(([_, text, link]) => ({ + return [...escapedContent.matchAll(/\[([^\]]+)\]\(([^)]+)\)/g)].map(([_, text, link]) => ({ text, link, })); - }, [content]); + }, [escapedContent]); const components: Partial = useMemo(() => ({ a: ({ href, children }) => { @@ -1295,7 +1308,7 @@ export default function Home() { components={components} className="prose text-sm sm:text-base text-pretty text-left" > - {content} + {escapedContent} ); }); diff --git a/package.json b/package.json index 4364055..e57777d 100644 --- a/package.json +++ b/package.json @@ -33,7 +33,6 @@ "clsx": "^2.1.1", "date-fns": "^3.6.0", "framer-motion": "^11.3.19", - "katex": "^0.16.11", "lucide-react": "^0.424.0", "next": "^14.2.5", "openai": "^4.56.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 139de53..155708f 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -77,9 +77,6 @@ dependencies: framer-motion: specifier: ^11.3.19 version: 11.3.20(react-dom@18.3.1)(react@18.3.1) - katex: - specifier: ^0.16.11 - version: 0.16.11 lucide-react: specifier: ^0.424.0 version: 0.424.0(react@18.3.1) From 4f4f7f7ae27905c3e2aa09669bbb9a4d8ef3117b Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Mon, 26 Aug 2024 13:16:00 +0530 Subject: [PATCH 7/9] improve citation component --- app/page.tsx | 49 ++++++++++++++++++++++++++++++++----------------- package.json | 1 + pnpm-lock.yaml | 3 +++ 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/app/page.tsx b/app/page.tsx index c21ea78..06c69ce 100644 --- a/app/page.tsx +++ b/app/page.tsx @@ -1241,28 +1241,39 @@ export default function Home() { href: string; children: React.ReactNode; index: number; + citationText: string; } - const CitationComponent: React.FC = React.memo(({ href, index }) => { - const faviconUrl = `https://www.google.com/s2/favicons?sz=128&domain=${new URL(href).hostname}`; + const CitationComponent: React.FC = React.memo(({ href, index, citationText }) => { + const { hostname } = new URL(href); + const faviconUrl = `https://www.google.com/s2/favicons?sz=128&domain=${hostname}`; return ( - + - - {index + 1} - + + + {index + 1} + + - - Favicon - - {href} - + + +

{citationText}

); @@ -1290,7 +1301,11 @@ export default function Home() { if (!href) return null; const index = citationLinks.findIndex((link) => link.link === href); return index !== -1 ? ( - + {children} ) : ( diff --git a/package.json b/package.json index e57777d..4364055 100644 --- a/package.json +++ b/package.json @@ -33,6 +33,7 @@ "clsx": "^2.1.1", "date-fns": "^3.6.0", "framer-motion": "^11.3.19", + "katex": "^0.16.11", "lucide-react": "^0.424.0", "next": "^14.2.5", "openai": "^4.56.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 155708f..139de53 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -77,6 +77,9 @@ dependencies: framer-motion: specifier: ^11.3.19 version: 11.3.20(react-dom@18.3.1)(react@18.3.1) + katex: + specifier: ^0.16.11 + version: 0.16.11 lucide-react: specifier: ^0.424.0 version: 0.424.0(react@18.3.1) From 9498bf1c97f30e79e907a4ba4bfd4abcfe9bd8a4 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Mon, 26 Aug 2024 20:05:23 +0530 Subject: [PATCH 8/9] feat: Update Azure OpenAI model to "gpt-4o-mini" for chat responses --- app/api/chat/route.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 8951cf1..d094234 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -20,7 +20,7 @@ export async function POST(req: Request) { const { latitude, longitude, city } = geolocation(req) const result = await streamText({ - model: azure.chat("gpt-4o"), + model: azure.chat("gpt-4o-mini"), messages: convertToCoreMessages(messages), temperature: 0.72, topP: 0.95, @@ -89,6 +89,7 @@ When asked a "What is" question, maintain the same format as the question and an - $$ for inline equations - $$$$ for block equations - \[ \] for math blocks. + - use it for symbols, equations, formulas, etc like pi, alpha, beta, etc. and wrap them in the above formats. like $(2\pi)$, $x^2$, etc. - Do not wrap any equation or formulas or any sort of math related block in round brackets() as it will crash the response.`, tools: { web_search: tool({ From 56dd60105d0c1fd60d3cea47bbdf8a2ae31648a4 Mon Sep 17 00:00:00 2001 From: zaidmukaddam Date: Mon, 26 Aug 2024 21:00:45 +0530 Subject: [PATCH 9/9] Added option to choose openai provider --- app/actions.ts | 78 +++++++++++++++++++++++++++++++------------ app/api/chat/route.ts | 34 +++++++++++++------ 2 files changed, 80 insertions(+), 32 deletions(-) diff --git a/app/actions.ts b/app/actions.ts index 1210f42..c4b7782 100644 --- a/app/actions.ts +++ b/app/actions.ts @@ -6,18 +6,18 @@ import { createOpenAI as createGroq } from '@ai-sdk/openai'; import { z } from 'zod'; const groq = createGroq({ - baseURL: 'https://api.groq.com/openai/v1', - apiKey: process.env.GROQ_API_KEY, + baseURL: 'https://api.groq.com/openai/v1', + apiKey: process.env.GROQ_API_KEY, }); export async function suggestQuestions(history: any[]) { - 'use server'; + 'use server'; - const { object } = await generateObject({ - model: groq('llama-3.1-70b-versatile'), - temperature: 0, - system: - `You are a search engine query generator. You 'have' to create 3 questions for the search engine based on the message history which has been provided to you. + const { object } = await generateObject({ + model: groq('llama-3.1-70b-versatile'), + temperature: 0, + system: + `You are a search engine query generator. You 'have' to create 3 questions for the search engine based on the message history which has been provided to you. The questions should be open-ended and should encourage further discussion while maintaining the whole context. Limit it to 5-10 words per question. Always put the user input's context is some way so that the next search knows what to search for exactly. Try to stick to the context of the conversation and avoid asking questions that are too general or too specific. @@ -26,20 +26,24 @@ For programming based conversations, always generate questions that are about th For location based conversations, always generate questions that are about the culture, history, or other topics that are related to the location. For the translation based conversations, always generate questions that may continue the conversation or ask for more information or translations. Never use pronouns in the questions as they blur the context.`, - messages: history, - schema: z.object({ - questions: z.array(z.string()).describe('The generated questions based on the message history.') - }), - }); + messages: history, + schema: z.object({ + questions: z.array(z.string()).describe('The generated questions based on the message history.') + }), + }); - return { - questions: object.questions - }; + return { + questions: object.questions + }; } export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer' = "alloy") { + if (process.env.OPENAI_PROVIDER === 'azure') { + if (!process.env.AZURE_OPENAI_API_KEY || !process.env.AZURE_OPENAI_API_URL) { + throw new Error('Azure OpenAI API key and URL are required.'); + } const url = process.env.AZURE_OPENAI_API_URL!; - + const response = await fetch(url, { method: 'POST', headers: { @@ -52,15 +56,47 @@ export async function generateSpeech(text: string, voice: 'alloy' | 'echo' | 'fa voice: voice }) }); - + if (!response.ok) { throw new Error(`Failed to generate speech: ${response.statusText}`); } - + const arrayBuffer = await response.arrayBuffer(); const base64Audio = Buffer.from(arrayBuffer).toString('base64'); - + return { audio: `data:audio/mp3;base64,${base64Audio}`, }; - } \ No newline at end of file + } else if (process.env.OPENAI_PROVIDER === 'openai') { + const openai = new OpenAI(); + + const response = await openai.audio.speech.create({ + model: "tts-1", + voice: voice, + input: text, + }); + + const arrayBuffer = await response.arrayBuffer(); + const base64Audio = Buffer.from(arrayBuffer).toString('base64'); + + return { + audio: `data:audio/mp3;base64,${base64Audio}`, + }; + } else { + const openai = new OpenAI(); + + const response = await openai.audio.speech.create({ + model: "tts-1", + voice: voice, + input: text, + }); + + const arrayBuffer = await response.arrayBuffer(); + + const base64Audio = Buffer.from(arrayBuffer).toString('base64'); + + return { + audio: `data:audio/mp3;base64,${base64Audio}`, + }; + } +} \ No newline at end of file diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index d094234..eefc5b8 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -7,7 +7,7 @@ import FirecrawlApp from '@mendable/firecrawl-js'; import { z } from "zod"; import { geolocation } from "@vercel/functions"; -// Allow streaming responses up to 30 seconds +// Allow streaming responses up to 60 seconds export const maxDuration = 60; const azure = createAzure({ @@ -15,12 +15,24 @@ const azure = createAzure({ apiKey: process.env.AZURE_API_KEY, }); +const provider = process.env.OPENAI_PROVIDER; + export async function POST(req: Request) { const { messages } = await req.json(); const { latitude, longitude, city } = geolocation(req) + let model; + + if (provider === "azure") { + model = azure.chat("gpt-4o-mini"); + } else if (provider === "openai") { + model = openai.chat("gpt-4o-mini"); + } else { + model = openai.chat("gpt-4o-mini"); + } + const result = await streamText({ - model: azure.chat("gpt-4o-mini"), + model, messages: convertToCoreMessages(messages), temperature: 0.72, topP: 0.95, @@ -263,13 +275,13 @@ When asked a "What is" question, maintain the same format as the question and an const abortController = new AbortController(); try { const blobPromise = put(`mplx/image-${Date.now()}.${format}`, Buffer.from(imageData, 'base64'), - { - access: 'public', - abortSignal: abortController.signal, - }); + { + access: 'public', + abortSignal: abortController.signal, + }); const timeout = setTimeout(() => { - // Abort the request after 10 seconds + // Abort the request after 2 seconds abortController.abort(); }, 2000); @@ -277,7 +289,7 @@ When asked a "What is" question, maintain the same format as the question and an clearTimeout(timeout); console.info('Blob put request completed', blob.url); - + images.push({ format, url: blob.url }); } catch (error) { if (error instanceof BlobRequestAbortedError) { @@ -403,9 +415,9 @@ When asked a "What is" question, maintain the same format as the question and an const key = process.env.AZURE_TRANSLATOR_KEY; const endpoint = "https://api.cognitive.microsofttranslator.com"; const location = process.env.AZURE_TRANSLATOR_LOCATION; - + const url = `${endpoint}/translate?api-version=3.0&to=${to}${from ? `&from=${from}` : ''}`; - + const response = await fetch(url, { method: 'POST', headers: { @@ -415,7 +427,7 @@ When asked a "What is" question, maintain the same format as the question and an }, body: JSON.stringify([{ text }]), }); - + const data = await response.json(); return { translatedText: data[0].translations[0].text,