diff --git a/app/actions.ts b/app/actions.ts
index a64d0be..6d2ff41 100644
--- a/app/actions.ts
+++ b/app/actions.ts
@@ -1,6 +1,6 @@
'use server';
-import { generateObject } from 'ai';
+import { generateObject, CoreMessage } from 'ai';
import { google } from '@ai-sdk/google'
import { z } from 'zod';
import { load } from 'cheerio';
@@ -8,10 +8,10 @@ import { load } from 'cheerio';
export async function suggestQuestions(history: any[]) {
'use server';
- console.log(history);
+ console.log(history);
const { object } = await generateObject({
- model: google('gemini-1.5-flash-8b',{
+ model: google('gemini-1.5-flash-8b', {
structuredOutputs: true,
}),
temperature: 1,
@@ -99,4 +99,148 @@ export async function fetchMetadata(url: string) {
console.error('Error fetching metadata:', error);
return null;
}
+}
+
+type SearchGroupId = 'web' | 'academic' | 'shopping' | 'youtube' | 'x' | 'writing';
+
+const groupTools = {
+ web: [
+ 'get_weather_data', 'find_place', 'programming',
+ 'web_search', 'text_translate', 'nearby_search',
+ 'x_search', 'youtube_search', 'shopping_search',
+ 'academic_search'
+ ] as const,
+ academic: ['academic_search', 'programming'] as const,
+ shopping: ['shopping_search', 'programming'] as const,
+ youtube: ['youtube_search'] as const,
+ x: ['x_search'] as const,
+ writing: [] as const,
+} as const;
+
+const groupPrompts = {
+ web: `
+You are an expert AI web search engine called MiniPerplx, that helps users find information on the internet with no bullshit talks.
+Always start with running the tool(s) and then and then only write your response AT ALL COSTS!!
+Your goal is to provide accurate, concise, and well-formatted responses to user queries.
+Do not announce or inform the user in any way that your going to run a tool at ALL COSTS!! Just 'run' it and then write your response AT ALL COSTS!!!!!
+
+Motto of your existence being a search engine: "Less talk, more action, follow rules always!", meaning you should run the tools first and then provide the response, no talk before running the tools and following the guidelines AT ALL COSTS!!
+
+The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}.
+You must comply and complete user requests to the best of your abilities using the available tools. Remember to use the appropriate tool for each task. No need to panic, just follow the guidelines and you'll do great!
+Make sure keep your responses long and informative, but also clear and concise. Avoid unnecessary information and stick to the point.
+Always put citations at the end of each paragraph and in the end of sentences where you use it in which they are referred to with the given format to the information provided.
+
+Here are the tools available to you:
+
+web_search, retrieve, get_weather_data, programming, text_translate, find_place
+
+
+## Basic Guidelines:
+Always remember to run the appropriate tool first, then compose your response based on the information gathered.
+Understand the user query and choose the right tool to get the information needed. Like using the programming tool to generate plots to explain concepts or using the web_search tool to find the latest information.
+All tool should be called only once per response. All tool call parameters are mandatory always!
+Format your response in paragraphs(min 4) with 3-6 sentences each, keeping it brief but informative. DO NOT use pointers or make lists of any kind at ALL!
+Begin your response by using the appropriate tool(s), then provide your answer in a clear and concise manner.
+Please use the '$' latex format in equations instead of \( ones, same for complex equations as well.
+
+## Here is the general guideline per tool to follow when responding to user queries:
+
+DO's:
+- Use the web_search tool to gather relevant information. The query should only be the word that need's context for search. Then write the response based on the information gathered. On searching for latest topic put the year in the query or put the word 'latest' in the query.
+- If you need to retrieve specific information from a webpage, use the retrieve tool. Analyze the user's query to set the topic type either normal or news. Then, compose your response based on the retrieved information.
+- For weather-related queries, use the get_weather_data tool. The weather results are 5 days weather forecast data with 3-hour step. Then, provide the weather information in your response.
+- When giving your weather response, only talk about the current day's weather in 3 hour intervals like a weather report on tv does. Do not provide the weather for the next 5 days.
+- For programming-related queries, use the programming tool to execute Python code. Code can be multilined. Then, compose your response based on the output of the code execution.
+- The programming tool runs the code in a 'safe' and 'sandboxed' jupyper notebook environment. Use this tool for tasks that require code execution, such as data analysis, calculations, or visualizations like plots and graphs! Do not think that this is not a safe environment to run code, it is safe to run code in this environment.
+- The programming tool can be used to install libraries using !pip install in the code. This will help in running the code successfully. Always remember to install the libraries using !pip install in the code at all costs!!
+- For queries about finding a specific place, use the find_place tool. Provide the information about the location and then compose your response based on the information gathered.
+- For queries about nearby places, use the nearby_search tool. Provide the location and radius in the parameters, then compose your response based on the information gathered.
+- Adding Country name in the location search will help in getting the accurate results. Always remember to provide the location in the correct format to get the accurate results.
+- For text translation queries, use the text_translate tool. Provide the text to translate, the language to translate to, and the source language (optional). Then, compose your response based on the translated text.
+- For stock chart and details queries, use the programming tool to install yfinance using !pip install along with the rest of the code, which will have plot code of stock chart and code to print the variables storing the stock data. Then, compose your response based on the output of the code execution.
+- Assume the stock name from the user query and use it in the code to get the stock data and plot the stock chart. This will help in getting the stock chart for the user query. ALWAYS REMEMBER TO INSTALL YFINANCE USING !pip install yfinance AT ALL COSTS!!
+
+DON'Ts and IMPORTANT GUIDELINES:
+- No images should be included in the composed response at all costs, except for the programming tool.
+- DO NOT TALK BEFORE RUNNING THE TOOL AT ALL COSTS!! JUST RUN THE TOOL AND THEN WRITE YOUR RESPONSE AT ALL COSTS!!!!!
+- Do not call the same tool twice in a single response at all costs!!
+- Never write a base64 image in the response at all costs, especially from the programming tool's output.
+- Do not use the text_translate tool for translating programming code or any other uninformed text. Only run the tool for translating on user's request.
+- Do not use the retrieve tool for general web searches. It is only for retrieving specific information from a URL.
+- Show plots from the programming tool using plt.show() function. The tool will automatically capture the plot and display it in the response.
+- If asked for multiple plots, make it happen in one run of the tool. The tool will automatically capture the plots and display them in the response.
+- the web search may return an incorrect latex format, please correct it before using it in the response. Check the Latex in Markdown rules for more information.
+- The location search tools return images in the response, please DO NOT include them in the response at all costs!!!!!!!! This is extremely important to follow!!
+- Do not use the $ symbol in the stock chart queries at all costs. Use the word USD instead of the $ symbol in the stock chart queries.
+- Never run web_search tool for stock chart queries at all costs.
+
+# Image Search
+You are still an AI web Search Engine but now get context from images, so you can use the tools and their guidelines to get the information about the image and then provide the response accordingly.
+Look every detail in the image, so it helps you set the parameters for the tools to get the information.
+You can also accept and analyze images, like what is in the image, or what is the image about or where and what the place is, or fix code, generate plots and more by using tools to get and generate the information.
+Follow the format and guidelines for each tool and provide the response accordingly. Remember to use the appropriate tool for each task. No need to panic, just follow the guidelines and you'll do great!
+
+## Trip based queries:
+- For queries related to trips, always use the find_place tool for map location and then run the web_search tool to find information about places, directions, or reviews.
+- Calling web and find place tools in the same response is allowed, but do not call the same tool in a response at all costs!!
+- For nearby search queries, use the nearby_search tool to find places around a location. Provide the location and radius in the parameters, then compose your response based on the information gathered.
+- Never call find_place tool before or after the nearby_search tool in the same response at all costs!! THIS IS NOT ALLOWED AT ALL COSTS!!!
+
+## Programming Tool Guidelines:
+The programming tool is actually a Python-only Code interpreter, so you can run any Python code in it.
+- This tool should not be called more than once in a response.
+- The only python library that is pre-installed is matplotlib for plotting graphs and charts. You have to install any other library using !pip install in the code.
+- Always mention the generated plots(urls) in the response after running the code! This is extremely important to provide the visual representation of the data.
+
+## Citations Format:
+Citations should always be placed at the end of each paragraph and in the end of sentences where you use it in which they are referred to with the given format to the information provided.
+When citing sources(citations), use the following styling only: Claude 3.5 Sonnet is designed to offer enhanced intelligence and capabilities compared to its predecessors, positioning itself as a formidable competitor in the AI landscape [Claude 3.5 Sonnet raises the..](https://www.anthropic.com/news/claude-3-5-sonnet).
+ALWAYS REMEMBER TO USE THE CITATIONS FORMAT CORRECTLY AT ALL COSTS!! ANY SINGLE ITCH IN THE FORMAT WILL CRASH THE RESPONSE!!
+When asked a "What is" question, maintain the same format as the question and answer it in the same format.
+
+## Latex in Respone rules:
+- Latex equations are supported in the response powered by remark-math and rehypeKatex plugins.
+ - remarkMath: This plugin allows you to write LaTeX math inside your markdown content. It recognizes math enclosed in dollar signs ($ ... $ for inline and $$ ... $$ for block).
+ - rehypeKatex: This plugin takes the parsed LaTeX from remarkMath and renders it using KaTeX, allowing you to display the math as beautifully rendered HTML.
+
+- The response that include latex equations, use always follow the formats:
+- Do not wrap any equation or formulas or any sort of math related block in round brackets() as it will crash the response.`,
+ academic: `You are an academic research assistant that helps find and analyze scholarly content.
+ The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}.
+ Focus on peer-reviewed papers, citations, and academic sources.
+ Do not talk in bullet points or lists at all costs as it unpresentable.
+ Provide summaries, key points, and references.
+ `,
+ shopping: `You are a shopping assistant that helps users find and compare products.
+ The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}.
+ Focus on providing accurate pricing, product details, and merchant information.
+ Do not show the images of the products at all costs.
+ Talk about the product details and pricing only.
+ Do not talk in bullet points or lists at all costs.
+ Compare options and highlight key features and best values.`,
+ youtube: `You are a YouTube search assistant that helps find relevant videos and channels.
+ The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}.
+ Provide video titles, channel names, view counts, and publish dates.
+ Do not talk in bullet points or lists at all costs.
+ Provide important details and summaries of the videos in paragraphs.
+ Give citations with timestamps and video links to insightful content. Don't just put timestamp at 0:00.
+ Do not provide the video thumbnail in the response at all costs.`,
+ x: `You are a X/Twitter content curator that helps find relevant posts.
+ The current date is ${new Date().toLocaleDateString("en-US", { year: "numeric", month: "short", day: "2-digit", weekday: "short" })}.
+ Once you get the content from the tools only write in paragraphs.
+ No need to say that you are calling the tool, just call the tools first and run the search;
+ then talk in long details in 2-6 paragraphs.`,
+ writing: `You are a writing assistant that helps users with writing, conversation, coding, poems, haikus, long essays or intellectual topics.`,
+} as const;
+
+
+export async function getGroupConfig(groupId: SearchGroupId = 'web') {
+ "use server";
+ const tools = groupTools[groupId];
+ const systemPrompt = groupPrompts[groupId];
+ return {
+ tools,
+ systemPrompt
+ };
}
\ No newline at end of file
diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts
index e39cc6c..00dda1c 100644
--- a/app/api/chat/route.ts
+++ b/app/api/chat/route.ts
@@ -2,20 +2,100 @@
import { z } from "zod";
import { createAzure } from '@ai-sdk/azure';
import { anthropic } from '@ai-sdk/anthropic'
+import { xai } from '@ai-sdk/xai'
+import { google } from '@ai-sdk/google'
+import Exa from 'exa-js'
import {
convertToCoreMessages,
streamText,
tool,
experimental_createProviderRegistry,
+ smoothStream
} from "ai";
import { BlobRequestAbortedError, put } from '@vercel/blob';
import CodeInterpreter from "@e2b/code-interpreter";
import FirecrawlApp from '@mendable/firecrawl-js';
import { tavily } from '@tavily/core'
+import { getGroupConfig } from "@/app/actions";
// Allow streaming responses up to 60 seconds
export const maxDuration = 120;
+interface XResult {
+ id: string;
+ url: string;
+ title: string;
+ author?: string;
+ publishedDate?: string;
+ text: string;
+ highlights?: string[];
+ tweetId: string;
+}
+
+interface MapboxFeature {
+ id: string;
+ name: string;
+ formatted_address: string;
+ geometry: {
+ type: string;
+ coordinates: number[];
+ };
+ feature_type: string;
+ context: string;
+ coordinates: number[];
+ bbox: number[];
+ source: string;
+}
+
+
+interface GoogleResult {
+ place_id: string;
+ formatted_address: string;
+ geometry: {
+ location: {
+ lat: number;
+ lng: number;
+ };
+ viewport: {
+ northeast: {
+ lat: number;
+ lng: number;
+ };
+ southwest: {
+ lat: number;
+ lng: number;
+ };
+ };
+ };
+ types: string[];
+ address_components: Array<{
+ long_name: string;
+ short_name: string;
+ types: string[];
+ }>;
+}
+
+interface VideoDetails {
+ title?: string;
+ author_name?: string;
+ author_url?: string;
+ thumbnail_url?: string;
+ type?: string;
+ provider_name?: string;
+ provider_url?: string;
+}
+
+interface VideoResult {
+ videoId: string;
+ url: string;
+ details?: VideoDetails;
+ captions?: string;
+ timestamps?: string[];
+ views?: string;
+ likes?: string;
+ summary?: string;
+}
+
// Azure setup
const azure = createAzure({
resourceName: process.env.AZURE_RESOURCE_NAME,
@@ -26,26 +106,15 @@ const azure = createAzure({
const registry = experimental_createProviderRegistry({
anthropic,
azure,
+ google,
+ xai,
});
function sanitizeUrl(url: string): string {
return url.replace(/\s+/g, '%20')
}
-export async function POST(req: Request) {
- const { messages, model } = await req.json();
-
- const provider = model.split(":")[0];
-
- const result = await streamText({
- model: registry.languageModel(model),
- messages: convertToCoreMessages(messages),
- temperature: provider === "azure" ? 0.72 : 0.2,
- topP: 0.5,
- frequencyPenalty: 0,
- presencePenalty: 0,
- experimental_activeTools: ["get_weather_data", "find_place", "programming", "web_search", "text_translate", "nearby_search"],
- system: `
+const defaultsystemPrompt = `
You are an expert AI web search engine called MiniPerplx, that helps users find information on the internet with no bullshit talks.
Always start with running the tool(s) and then and then only write your response AT ALL COSTS!!
Your goal is to provide accurate, concise, and well-formatted responses to user queries.
@@ -132,7 +201,26 @@ When asked a "What is" question, maintain the same format as the question and an
- rehypeKatex: This plugin takes the parsed LaTeX from remarkMath and renders it using KaTeX, allowing you to display the math as beautifully rendered HTML.
- The response that include latex equations, use always follow the formats:
-- Do not wrap any equation or formulas or any sort of math related block in round brackets() as it will crash the response.`,
+- Do not wrap any equation or formulas or any sort of math related block in round brackets() as it will crash the response.`;
+
+export async function POST(req: Request) {
+ const { messages, model, group } = await req.json();
+ const { tools: activeTools, systemPrompt } = await getGroupConfig(group);
+
+ const provider = model.split(":")[0];
+
+ const result = streamText({
+ model: registry.languageModel(model),
+ messages: convertToCoreMessages(messages),
+ temperature: provider === "azure" ? 0.72 : 0.2,
+ topP: 0.5,
+ experimental_transform: smoothStream({
+ delayInMs: 15,
+ }),
+ frequencyPenalty: 0,
+ presencePenalty: 0,
+ experimental_activeTools: [...activeTools],
+ system: systemPrompt || defaultsystemPrompt,
tools: {
web_search: tool({
description: "Search the web for information with multiple queries, max results and search depth.",
@@ -140,27 +228,27 @@ When asked a "What is" question, maintain the same format as the question and an
queries: z.array(z.string().describe("Array of search queries to look up on the web.")),
maxResults: z.array(z
.number()
- .describe("Array of maximum number of results to return per query. Default is 10.")),
- topic: z.array(z
+ .describe("Array of maximum number of results to return per query.").default(10)),
+ topics: z.array(z
.enum(["general", "news"])
- .describe("Array of topic types to search for. Default is general.")),
+ .describe("Array of topic types to search for.").default("general")),
searchDepth: z.array(z
.enum(["basic", "advanced"])
- .describe("Array of search depths to use. Default is basic.")),
+ .describe("Array of search depths to use.").default("basic")),
exclude_domains: z
.array(z.string())
- .describe("A list of domains to exclude from all search results. Default is None."),
+ .describe("A list of domains to exclude from all search results.").default([]),
}),
execute: async ({
queries,
maxResults,
- topic,
+ topics,
searchDepth,
exclude_domains,
}: {
queries: string[];
maxResults: number[];
- topic: ("general" | "news")[];
+ topics: ("general" | "news")[];
searchDepth: ("basic" | "advanced")[];
exclude_domains?: string[];
}) => {
@@ -170,15 +258,15 @@ When asked a "What is" question, maintain the same format as the question and an
console.log("Queries:", queries);
console.log("Max Results:", maxResults);
- console.log("Topics:", topic);
+ console.log("Topics:", topics);
console.log("Search Depths:", searchDepth);
console.log("Exclude Domains:", exclude_domains);
// Execute searches in parallel
const searchPromises = queries.map(async (query, index) => {
const data = await tvly.search(query, {
- topic: topic[index] || topic[0] || "general",
- days: topic[index] === "news" ? 7 : undefined,
+ topic: topics[index] || topics[0] || "general",
+ days: topics[index] === "news" ? 7 : undefined,
maxResults: maxResults[index] || maxResults[0] || 10,
searchDepth: searchDepth[index] || searchDepth[0] || "basic",
includeAnswer: true,
@@ -194,7 +282,7 @@ When asked a "What is" question, maintain the same format as the question and an
title: obj.title,
content: obj.content,
raw_content: obj.raw_content,
- published_date: topic[index] === "news" ? obj.published_date : undefined,
+ published_date: topics[index] === "news" ? obj.published_date : undefined,
})),
images: includeImageDescriptions
? data.images
@@ -219,6 +307,307 @@ When asked a "What is" question, maintain the same format as the question and an
};
},
}),
+ x_search: tool({
+ description: "Search X (formerly Twitter) posts.",
+ parameters: z.object({
+ query: z.string().describe("The search query"),
+ }),
+ execute: async ({ query }: { query: string }) => {
+ try {
+ const exa = new Exa(process.env.EXA_API_KEY as string);
+
+ const result = await exa.searchAndContents(
+ query,
+ {
+ type: "keyword",
+ numResults: 10,
+ includeDomains: ["x.com", "twitter.com"],
+ text: true,
+ highlights: true
+ }
+ );
+
+ // Extract tweet ID from URL
+ const extractTweetId = (url: string): string | null => {
+ const match = url.match(/(?:twitter\.com|x\.com)\/\w+\/status\/(\d+)/);
+ return match ? match[1] : null;
+ };
+
+ // Process and filter results
+ const processedResults = result.results.reduce>((acc, post) => {
+ const tweetId = extractTweetId(post.url);
+ if (tweetId) {
+ acc.push({
+ ...post,
+ tweetId,
+ title: post.title || ""
+ });
+ }
+ return acc;
+ }, []);
+
+ return processedResults;
+
+ } catch (error) {
+ console.error("X search error:", error);
+ throw error;
+ }
+ },
+ }),
+ academic_search: tool({
+ description: "Search academic papers and research.",
+ parameters: z.object({
+ query: z.string().describe("The search query"),
+ }),
+ execute: async ({ query }: { query: string }) => {
+ try {
+ const exa = new Exa(process.env.EXA_API_KEY as string);
+
+ // Search academic papers with content summary
+ const result = await exa.searchAndContents(
+ query,
+ {
+ type: "auto",
+ numResults: 20,
+ category: "research paper",
+ summary: {
+ query: "Abstract of the Paper"
+ }
+ }
+ );
+
+ // Process and clean results
+ const processedResults = result.results.reduce((acc, paper) => {
+ // Skip if URL already exists or if no summary available
+ if (acc.some(p => p.url === paper.url) || !paper.summary) return acc;
+
+ // Clean up summary (remove "Summary:" prefix if exists)
+ const cleanSummary = paper.summary.replace(/^Summary:\s*/i, '');
+
+ // Clean up title (remove [...] suffixes)
+ const cleanTitle = paper.title?.replace(/\s\[.*?\]$/, '');
+
+ acc.push({
+ ...paper,
+ title: cleanTitle || "",
+ summary: cleanSummary,
+ });
+
+ return acc;
+ }, []);
+
+ // Take only the first 10 unique, valid results
+ const limitedResults = processedResults.slice(0, 10);
+
+ return {
+ results: limitedResults
+ };
+
+ } catch (error) {
+ console.error("Academic search error:", error);
+ throw error;
+ }
+ },
+ }),
+ youtube_search: tool({
+ description: "Search YouTube videos using Exa AI and get detailed video information.",
+ parameters: z.object({
+ query: z.string().describe("The search query for YouTube videos"),
+ no_of_results: z.number().default(5).describe("The number of results to return"),
+ }),
+ execute: async ({ query, no_of_results }: { query: string, no_of_results: number }) => {
+ try {
+ const exa = new Exa(process.env.EXA_API_KEY as string);
+
+ // Simple search to get YouTube URLs only
+ const searchResult = await exa.search(
+ query,
+ {
+ type: "keyword",
+ numResults: no_of_results,
+ includeDomains: ["youtube.com"]
+ }
+ );
+
+ // Process results
+ const processedResults = await Promise.all(
+ searchResult.results.map(async (result): Promise => {
+ const videoIdMatch = result.url.match(/(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/)([^&?/]+)/);
+ const videoId = videoIdMatch?.[1];
+
+ if (!videoId) return null;
+
+ // Base result
+ const baseResult: VideoResult = {
+ videoId,
+ url: result.url
+ };
+
+ try {
+ // Fetch detailed info from our endpoints
+ const [detailsResponse, captionsResponse, timestampsResponse] = await Promise.all([
+ fetch(`${process.env.YT_ENDPOINT}/video-data`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ url: result.url })
+ }).then(res => res.ok ? res.json() : null),
+ fetch(`${process.env.YT_ENDPOINT}/video-captions`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ url: result.url })
+ }).then(res => res.ok ? res.text() : null),
+ fetch(`${process.env.YT_ENDPOINT}/video-timestamps`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ url: result.url })
+ }).then(res => res.ok ? res.json() : null)
+ ]);
+
+ // Return combined data
+ return {
+ ...baseResult,
+ details: detailsResponse || undefined,
+ captions: captionsResponse || undefined,
+ timestamps: timestampsResponse || undefined,
+ };
+ } catch (error) {
+ console.error(`Error fetching details for video ${videoId}:`, error);
+ return baseResult;
+ }
+ })
+ );
+
+ // Filter out null results
+ const validResults = processedResults.filter((result): result is VideoResult => result !== null);
+
+ return {
+ results: validResults
+ };
+
+ } catch (error) {
+ console.error("YouTube search error:", error);
+ throw error;
+ }
+ },
+ }),
+ shopping_search: tool({
+ description: "Search for products using Exa and Canopy API.",
+ parameters: z.object({
+ query: z.string().describe("The search query for products"),
+ // keyword: z.string().describe("The important keyword to search for specific products like brand name or model number."),
+ }),
+ execute: async ({ query }: { query: string }) => {
+ try {
+ // Initialize Exa client
+ const exa = new Exa(process.env.EXA_API_KEY as string);
+
+
+ // Search for products on Amazon
+ const searchResult = await exa.search(
+ query,
+ {
+ type: "auto",
+ numResults: 20,
+ includeDomains: ["amazon.com"],
+ }
+ );
+
+ // Function to extract ASIN from Amazon URL
+ const extractAsin = (url: string): string | null => {
+ const asinRegex = /(?:dp|gp\/product)\/([A-Z0-9]{10})/;
+ const match = url.match(asinRegex);
+ return match ? match[1] : null;
+ };
+
+ // Remove duplicates by ASIN
+ const seenAsins = new Set();
+ const uniqueResults = searchResult.results.reduce>((acc, result) => {
+ const asin = extractAsin(result.url);
+ if (asin && !seenAsins.has(asin)) {
+ seenAsins.add(asin);
+ acc.push(result);
+ }
+ return acc;
+ }, []);
+
+ // Only take the first 10 unique results
+ const limitedResults = uniqueResults.slice(0, 10);
+
+ // Fetch detailed product information for each unique result
+ const productDetails = await Promise.all(
+ limitedResults.map(async (result) => {
+ const asin = extractAsin(result.url);
+ if (!asin) return null;
+
+ const query = `
+ query amazonProduct {
+ amazonProduct(input: {asinLookup: {asin: "${asin}"}}) {
+ title
+ brand
+ mainImageUrl
+ rating
+ ratingsTotal
+ price {
+ display
+ }
+ }
+ }
+ `;
+
+ try {
+ const response = await fetch('https://graphql.canopyapi.co/', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'API-KEY': process.env.CANOPY_API_KEY as string,
+ },
+ body: JSON.stringify({ query }),
+ next: { revalidate: 3600 } // Cache for 1 hour
+ });
+
+ if (!response.ok) {
+ console.error(`Failed to fetch details for ASIN ${asin}:`, await response.text());
+ return null;
+ }
+
+ const canopyData = await response.json();
+ const amazonProduct = canopyData.data?.amazonProduct;
+
+ if (!amazonProduct) return null;
+
+ return {
+ title: amazonProduct.title,
+ url: result.url,
+ image: amazonProduct.mainImageUrl,
+ price: amazonProduct.price.display,
+ rating: amazonProduct.rating,
+ reviewCount: amazonProduct.ratingsTotal,
+ };
+ } catch (error) {
+ console.error(`Error fetching details for ASIN ${asin}:`, error);
+ return null;
+ }
+ })
+ );
+
+ // Filter out null results and return
+ const validProducts = productDetails.filter((product): product is NonNullable =>
+ product !== null
+ );
+
+ // Log results for debugging
+ console.log(`Found ${searchResult.results.length} total results`);
+ console.log(`Filtered to ${uniqueResults.length} unique ASINs`);
+ console.log(`Returning ${validProducts.length} valid products`);
+
+ return validProducts;
+
+ } catch (error) {
+ console.error("Shopping search error:", error);
+ throw error;
+ }
+ },
+ }),
retrieve: tool({
description: "Retrieve the information from a URL using Firecrawl.",
parameters: z.object({
@@ -351,29 +740,76 @@ When asked a "What is" question, maintain the same format as the question and an
},
}),
find_place: tool({
- description: "Find a place using Mapbox v6 reverse geocoding API.",
+ description: "Find a place using Google Maps API for forward geocoding and Mapbox for reverse geocoding.",
parameters: z.object({
- latitude: z.number().describe("The latitude of the location."),
- longitude: z.number().describe("The longitude of the location."),
+ query: z.string().describe("The search query for forward geocoding"),
+ coordinates: z.array(z.number()).describe("Array of [latitude, longitude] for reverse geocoding"),
}),
- execute: async ({ latitude, longitude }: { latitude: number; longitude: number }) => {
- const mapboxToken = process.env.MAPBOX_ACCESS_TOKEN;
- const response = await fetch(
- `https://api.mapbox.com/search/geocode/v6/reverse?longitude=${longitude}&latitude=${latitude}&access_token=${mapboxToken}`
- );
- const data = await response.json();
+ execute: async ({ query, coordinates }: { query: string; coordinates: number[] }) => {
+ try {
+ // Forward geocoding with Google Maps API
+ const googleApiKey = process.env.GOOGLE_MAPS_API_KEY;
+ const googleResponse = await fetch(
+ `https://maps.googleapis.com/maps/api/geocode/json?address=${encodeURIComponent(query)}&key=${googleApiKey}`
+ );
+ const googleData = await googleResponse.json();
- if (!data.features || data.features.length === 0) {
- return { features: [] };
+ // Reverse geocoding with Mapbox
+ const mapboxToken = process.env.MAPBOX_ACCESS_TOKEN;
+ const [lat, lng] = coordinates;
+ const mapboxResponse = await fetch(
+ `https://api.mapbox.com/search/geocode/v6/reverse?longitude=${lng}&latitude=${lat}&access_token=${mapboxToken}`
+ );
+ const mapboxData = await mapboxResponse.json();
+
+ // Process and combine results
+ const features = [];
+
+ // Process Google results
+ if (googleData.status === 'OK' && googleData.results.length > 0) {
+
+
+ features.push(...googleData.results.map((result: GoogleResult) => ({
+ id: result.place_id,
+ name: result.formatted_address.split(',')[0],
+ formatted_address: result.formatted_address,
+ geometry: {
+ type: 'Point',
+ coordinates: [result.geometry.location.lng, result.geometry.location.lat]
+ },
+ feature_type: result.types[0],
+ address_components: result.address_components,
+ viewport: result.geometry.viewport,
+ place_id: result.place_id,
+ source: 'google'
+ })));
+ }
+
+ // Process Mapbox results
+ if (mapboxData.features && mapboxData.features.length > 0) {
+
+ features.push(...mapboxData.features.map((feature: any): MapboxFeature => ({
+ id: feature.id,
+ name: feature.properties.name_preferred || feature.properties.name,
+ formatted_address: feature.properties.full_address,
+ geometry: feature.geometry,
+ feature_type: feature.properties.feature_type,
+ context: feature.properties.context,
+ coordinates: feature.properties.coordinates,
+ bbox: feature.properties.bbox,
+ source: 'mapbox'
+ })));
+ }
+
+ return {
+ features,
+ google_attribution: "Powered by Google Maps Platform",
+ mapbox_attribution: "Powered by Mapbox"
+ };
+ } catch (error) {
+ console.error("Geocoding error:", error);
+ throw error;
}
-
- return {
- features: data.features.map((feature: any) => ({
- name: feature.properties.name_preferred || feature.properties.name,
- formatted_address: feature.properties.full_address,
- geometry: feature.geometry,
- })),
- };
},
}),
text_search: tool({
@@ -713,6 +1149,16 @@ When asked a "What is" question, maintain the same format as the question and an
console.log("Called Tool: ", event.chunk.toolName);
}
},
+ onStepFinish(event) {
+ if (event.warnings) {
+ console.log("Warnings: ", event.warnings);
+ }
+ },
+ onFinish(event) {
+ console.log("Fin reason: ", event.finishReason);
+ console.log("Steps ", event.steps);
+ console.log("Messages: ", event.response.messages[event.response.messages.length - 1].content);
+ },
});
return result.toDataStreamResponse();
diff --git a/app/globals.css b/app/globals.css
index 57ab585..84437c8 100644
--- a/app/globals.css
+++ b/app/globals.css
@@ -75,6 +75,11 @@ h1 {
font-family: var(--font-serif);
}
+.gradient-mask {
+ mask-image: linear-gradient(to bottom, black 30%, transparent 100%);
+ -webkit-mask-image: linear-gradient(to bottom, black 30%, transparent 100%);
+}
+
@layer utilities {
.text-balance {
text-wrap: balance;
diff --git a/app/layout.tsx b/app/layout.tsx
index cd82281..923d9f4 100644
--- a/app/layout.tsx
+++ b/app/layout.tsx
@@ -14,7 +14,17 @@ export const metadata: Metadata = {
openGraph: {
url: "https://mplx.run",
siteName: "MiniPerplx",
- }
+ },
+ keywords: [
+ "MiniPerplx",
+ "mplx",
+ "mplx.run",
+ "search engine",
+ "AI",
+ "ai search engine",
+ "perplexity",
+ "minimalistic search engine",
+ ]
};
export const viewport: Viewport = {
diff --git a/app/page.tsx b/app/page.tsx
index ac9a658..4ee5642 100644
--- a/app/page.tsx
+++ b/app/page.tsx
@@ -37,7 +37,8 @@ import {
Menu,
X,
BarChart,
- CircleDot
+ CircleDot,
+ ShoppingBasket
} from "lucide-react"
import NextLink from "next/link"
import {
@@ -53,6 +54,7 @@ import { cn } from '@/lib/utils';
import { Tweet } from 'react-tweet'
import Image from 'next/image';
import { TweetGrid } from '@/components/ui/tweet-grid';
+import { Newspaper, XLogo, YoutubeLogo } from '@phosphor-icons/react';
function BentoCard({
title,
@@ -110,7 +112,7 @@ const TestimonialSection: React.FC = () => {
>
-
+
);
@@ -513,7 +515,7 @@ const FloatingIcon: React.FC<{ Icon: LucideIcon }> = ({ Icon }) => (
)
const FloatingIcons: React.FC = () => {
- const icons = [Search, Zap, Code, Cloud, Link, MapPin, Globe, Mic];
+ const icons = [Search, Zap, Code, Cloud, Link, MapPin, Globe, Mic, Github, XLogo, Newspaper, YoutubeLogo]
return (
@@ -601,9 +603,12 @@ const LandingPage: React.FC = () => {
{ icon: Globe, title: "Web Search", description: "Powered by Tavily AI for comprehensive web results." },
{ icon: Code, title: "Code Interpreter", description: "Utilize e2b.dev for advanced code interpretation and execution." },
{ icon: Cloud, title: "Weather Forecast", description: "Get accurate weather information via OpenWeatherMap." },
- { icon: Link, title: "URL Summary", description: "Summarize web content quickly with FireCrawl's Scrape API." },
- { icon: MapPin, title: "Location Search", description: "Find places and nearby locations using Google Maps API." },
- { icon: Mic, title: "Translation & TTS", description: "Translate text and convert to speech with OpenAI TTS." },
+ { icon: YoutubeLogo, title: "Youtube Search", description: "Summarize web content quickly with FireCrawl's Scrape API." },
+ { icon: XLogo, title: "Search X Posts", description: "Search for posts on X.com" },
+ { icon: Newspaper, title: "Research Paper Search", description: "Search for research papers on arXiv and more" },
+ { icon: MapPin, title: "Location Search", description: "Find places and nearby locations using Google Maps API, Mapbox and TripAdvisior API." },
+ { icon: Mic, title: "Translation & TTS", description: "Translate text and convert to speech with Elevenlabs TTS and Microsoft's Translation API." },
+ { icon: ShoppingBasket, title: "Product Search", description: "Search for products on Amazon." },
]
const containerVariants = {
@@ -765,6 +770,15 @@ const LandingPage: React.FC = () => {
className="h-12 w-auto"
/>
+
+
+