{
case 'openai-compatible':
const openai = createOpenAI({
apiKey: config.config.ai.apiKey,
- baseURL: config.config.ai.apiBase || 'https://api.openai.com/v1',
+ baseURL: config.config.ai.apiBase || 'https://api.openai.com/v1', // TODO: better default
})
return openai(config.config.ai.model)
default:
diff --git a/i18n.config.ts b/i18n.config.ts
new file mode 100644
index 0000000..88bee68
--- /dev/null
+++ b/i18n.config.ts
@@ -0,0 +1,12 @@
+import en from '~/i18n/en.json'
+import zh from '~/i18n/zh.json'
+
+export default defineI18nConfig(() => ({
+ legacy: false,
+ fallbackLocale: 'en',
+ availableLocales: ['en', 'zh'],
+ messages: {
+ en,
+ zh,
+ },
+}))
diff --git a/i18n/en.json b/i18n/en.json
new file mode 100644
index 0000000..106bd8b
--- /dev/null
+++ b/i18n/en.json
@@ -0,0 +1,74 @@
+{
+ "language": "English",
+ "index": {
+ "projectDescription": "This is a web UI for {0} that allows AI to search online and dig deeper on its own based on specific questions, and then output a research report.\nThis project features streaming AI responses for realtime feedback, and visualization of the research process using a tree structure.\nAll API requests are directly sent from your browser. No remote data stored.",
+ "missingConfigTitle": "Config not set",
+ "missingConfigDescription": "This project requires you to bring your own API keys."
+ },
+ "settings": {
+ "title": "Settings",
+ "disclaimer": "Settings are stored locally in your browser.",
+ "save": "Save",
+ "ai": {
+ "provider": "AI Provider",
+ "apiKey": "API Key",
+ "apiBase": "API Base URL",
+ "model": "Model",
+ "contextSize": "Context Size",
+ "contextSizeHelp": "The maximum size of the context in tokens. This is the maximum number of tokens that will be sent to the model. The default is 128,000 tokens.",
+ "providers": {
+ "openaiCompatible": {
+ "title": "OpenAI Compatible",
+ "description": "Currently only supports OpenAI compatible providers, e.g. Gemini, Together AI, DeepSeek, SiliconCloud, ...",
+ "apiBasePlaceholder": "https://api.openai.com/v1"
+ }
+ }
+ },
+ "webSearch": {
+ "provider": "Web Search Provider",
+ "providerHelp": "Currently only supports Tavily. It provides lots of free quota (1000 credits / month).\nGet one API key at {0}.",
+ "apiKey": "API Key"
+ }
+ },
+ "researchTopic": {
+ "title": "1. Research Topic",
+ "placeholder": "Whatever you want to research...",
+ "numOfQuestions": "Number of Questions",
+ "numOfQuestionsHelp": "The number of follow-up questions to clarify.",
+ "depth": "Depth",
+ "depthHelp": "Number of iterations.",
+ "breadth": "Breadth",
+ "breadthHelp": "Number of searches in the first iteration. The search width of each iteration is half of the previous one.",
+ "start": "Start Research",
+ "researching": "Researching..."
+ },
+ "modelFeedback": {
+ "title": "2. Model Feedback",
+ "description": "The AI will ask you some follow up questions to help you clarify the research direction.",
+ "waiting": "Waiting for model feedback...",
+ "submit": "Submit Answer",
+ "error": "Error getting feedback: {0}"
+ },
+ "webBrowsing": {
+ "title": "3. Web Browsing",
+ "description": "The AI will then search the web based on our research goal, and iterate until the depth is reached.",
+ "clickToView": "Click a child node to view details.",
+ "nodeDetails": "Node Details",
+ "startNode": {
+ "label": "Start",
+ "description": "This is the beginning of your deep research journey!"
+ },
+ "researchGoal": "Research Goal",
+ "visitedUrls": "Visited URLs",
+ "learnings": "Learnings",
+ "generating": "Generating..."
+ },
+ "researchReport": {
+ "title": "4. Research Report",
+ "exportPdf": "Export PDF",
+ "sources": "Sources",
+ "waiting": "Waiting for report...",
+ "generating": "Generating report...",
+ "error": "Generate report failed: {0}"
+ }
+}
\ No newline at end of file
diff --git a/i18n/zh.json b/i18n/zh.json
new file mode 100644
index 0000000..be2390a
--- /dev/null
+++ b/i18n/zh.json
@@ -0,0 +1,74 @@
+{
+ "language": "中文",
+ "index": {
+ "projectDescription": "Deep Research 是 {0} 的可视化 UI,可以让 AI 根据特定问题联网搜索并自行深挖,并输出研究报告。\n本项目可以流式传输 AI 的回答来实时反馈,并使用树状结构可视化搜索过程。\n全部 API 请求都在浏览器本地完成。",
+ "missingConfigTitle": "需要配置 API",
+ "missingConfigDescription": "本项目需要您自备 AI 和联网搜索服务的配置 (Bring Your Own Key)"
+ },
+ "settings": {
+ "title": "设置",
+ "disclaimer": "所有设置本地保存",
+ "save": "保存",
+ "ai": {
+ "provider": "AI 服务",
+ "apiKey": "API 密钥",
+ "apiBase": "API Base URL",
+ "model": "模型名称",
+ "contextSize": "上下文大小",
+ "contextSizeHelp": "上下文的最大大小(以 token 计)。这是将发送给模型的最大 token 数量。默认值为 128,000 个 token。",
+ "providers": {
+ "openaiCompatible": {
+ "title": "OpenAI Compatiible",
+ "description": "目前仅支持与 OpenAI 兼容的提供商,如 Gemini、Together AI、DeepSeek、SiliconCloud……",
+ "apiBasePlaceholder": "https://api.openai.com/v1"
+ }
+ }
+ },
+ "webSearch": {
+ "provider": "联网搜索服务",
+ "providerHelp": "目前仅支持 Tavily,每个月可以免费搜索 1000 次。\n请在 {0} 生成一个 API 密钥。",
+ "apiKey": "API 密钥"
+ }
+ },
+ "researchTopic": {
+ "title": "1. 研究主题",
+ "placeholder": "任何你想了解的内容...",
+ "numOfQuestions": "问题数量",
+ "numOfQuestionsHelp": "AI 询问你的问题数量。这些问题能让 AI 更好地了解你的研究目标。",
+ "depth": "研究深度 (Depth)",
+ "depthHelp": "联网搜索的迭代轮数。",
+ "breadth": "研究广度 (Breadth)",
+ "breadthHelp": "第一次迭代中的搜索次数。后续每轮迭代的搜索次数为上一轮的一半。",
+ "start": "开始研究",
+ "researching": "正在研究..."
+ },
+ "modelFeedback": {
+ "title": "2. 模型反馈",
+ "description": "AI 将会跟你确认一些细节,帮助你明确研究方向。",
+ "waiting": "等待模型反馈...",
+ "submit": "提交回答",
+ "error": "获取反馈失败:{0}"
+ },
+ "webBrowsing": {
+ "title": "3. 联网搜索",
+ "description": "AI 将根据上述信息联网搜索并自动迭代,直到迭代次数 = depth。",
+ "clickToView": "点击下面的节点查看搜索详情。",
+ "nodeDetails": "节点详情",
+ "startNode": {
+ "label": "Start",
+ "description": "这是本次研究的起点"
+ },
+ "researchGoal": "研究目标",
+ "visitedUrls": "访问网址",
+ "learnings": "结论",
+ "generating": "生成中..."
+ },
+ "researchReport": {
+ "title": "4. 研究报告",
+ "exportPdf": "导出 PDF",
+ "sources": "来源",
+ "waiting": "等待报告...",
+ "generating": "生成报告中...",
+ "error": "生成报告失败:{0}"
+ }
+}
\ No newline at end of file
diff --git a/lib/run.ts b/lib/run.ts
deleted file mode 100644
index 6423542..0000000
--- a/lib/run.ts
+++ /dev/null
@@ -1,93 +0,0 @@
-import * as fs from 'fs/promises'
-import * as readline from 'readline'
-
-import { deepResearch, writeFinalReport } from './deep-research'
-import { generateFeedback } from './feedback'
-
-const rl = readline.createInterface({
- input: process.stdin,
- output: process.stdout,
-})
-
-// Helper function to get user input
-function askQuestion(query: string): Promise {
- return new Promise((resolve) => {
- rl.question(query, (answer) => {
- resolve(answer)
- })
- })
-}
-
-// run the agent
-async function run() {
- // Get initial query
- const initialQuery = await askQuestion('What would you like to research? ')
-
- // Get breath and depth parameters
- const breadth =
- parseInt(
- await askQuestion(
- 'Enter research breadth (recommended 2-10, default 4): ',
- ),
- 10,
- ) || 4
- const depth =
- parseInt(
- await askQuestion('Enter research depth (recommended 1-5, default 2): '),
- 10,
- ) || 2
-
- console.log(`Creating research plan...`)
-
- // Generate follow-up questions
- const followUpQuestions = await generateFeedback({
- query: initialQuery,
- })
-
- console.log(
- '\nTo better understand your research needs, please answer these follow-up questions:',
- )
-
- // Collect answers to follow-up questions
- const answers: string[] = []
- for (const question of followUpQuestions) {
- const answer = await askQuestion(`\n${question}\nYour answer: `)
- answers.push(answer)
- }
-
- // Combine all information for deep research
- const combinedQuery = `
-Initial Query: ${initialQuery}
-Follow-up Questions and Answers:
-${followUpQuestions.map((q, i) => `Q: ${q}\nA: ${answers[i]}`).join('\n')}
-`
-
- console.log('\nResearching your topic...')
-
- const { learnings, visitedUrls } = await deepResearch({
- query: combinedQuery,
- breadth,
- depth,
- })
-
- console.log(`\n\nLearnings:\n\n${learnings.join('\n')}`)
- console.log(
- `\n\nVisited URLs (${visitedUrls.length}):\n\n${visitedUrls.join('\n')}`,
- )
- console.log('Writing final report...')
-
- const report = await writeFinalReport({
- prompt: combinedQuery,
- learnings,
- visitedUrls,
- })
-
- // Save report to file
- await fs.writeFile('output.md', report, 'utf-8')
-
- console.log(`\n\nFinal Report:\n\n${report}`)
- console.log('\nReport has been saved to output.md')
- rl.close()
-}
-
-run().catch(console.error)
diff --git a/nuxt.config.ts b/nuxt.config.ts
index b5219d5..cb9f56a 100644
--- a/nuxt.config.ts
+++ b/nuxt.config.ts
@@ -1,6 +1,24 @@
// https://nuxt.com/docs/api/configuration/nuxt-config
export default defineNuxtConfig({
- modules: ['@pinia/nuxt', '@nuxt/ui', '@nuxtjs/color-mode', '@vueuse/nuxt'],
+ modules: [
+ '@pinia/nuxt',
+ '@nuxt/ui',
+ '@nuxtjs/color-mode',
+ '@vueuse/nuxt',
+ '@nuxtjs/i18n',
+ ],
+
+ i18n: {
+ vueI18n: './i18n.config.ts',
+ strategy: 'no_prefix',
+ locales: ['en', 'zh'],
+ detectBrowserLanguage: {
+ alwaysRedirect: true,
+ useCookie: true,
+ cookieKey: 'i18n_redirected',
+ redirectOn: 'root',
+ },
+ },
colorMode: {
preference: 'system',
diff --git a/package.json b/package.json
index 9d4529b..22d1934 100644
--- a/package.json
+++ b/package.json
@@ -14,9 +14,9 @@
"@ai-sdk/ui-utils": "^1.1.11",
"@ai-sdk/vue": "^1.1.11",
"@iconify-json/lucide": "^1.2.26",
- "@mendable/firecrawl-js": "^1.16.0",
"@nuxt/ui": "3.0.0-alpha.12",
"@nuxtjs/color-mode": "^3.5.2",
+ "@nuxtjs/i18n": "9.2.0",
"@pinia/nuxt": "^0.10.0",
"@tailwindcss/typography": "^0.5.16",
"@tavily/core": "^0.0.3",
diff --git a/pages/index.vue b/pages/index.vue
index 4f00efb..c6340ca 100644
--- a/pages/index.vue
+++ b/pages/index.vue
@@ -3,27 +3,29 @@
-
- Deep Research Assistant
-
+
Deep Research
+
-
- This is a web UI for
-
+
+
dzhng/deep-research
-
- . It features streaming AI responses for realtime feedback, and
- viasualization of the research process using a tree structure.
-
- All API requests are directly sent from your browser. No remote data
- stored.
-