diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 7712d974..00000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -flynn.zhang@foxmail.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/LICENSE b/LICENSE index 542e91f4..abd17ab5 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,211 @@ -MIT License - -Copyright (c) 2023 Zhang Yifei - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +--- + +This product bundles various third-party components under other open source licenses. +This section summarizes those components and their licenses. See licenses/ +for text of these licenses. + +## MIT License + +[ChatGPTNextWeb/ChatGPT-Next-Web](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web?tab=readme-ov-file) diff --git a/README.md b/README.md index 633124ec..c9651bdf 100644 --- a/README.md +++ b/README.md @@ -1,391 +1,36 @@
-icon + -

NextChat (ChatGPT Next Web)

+# WebLLM Chat -English / [简体中文](./README_CN.md) + +Static Badge -One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. +**AI Conversations, Fully In-Browser.** -一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 - -[![Web][Web-image]][web-url] -[![Windows][Windows-image]][download-url] -[![MacOS][MacOS-image]][download-url] -[![Linux][Linux-image]][download-url] - -[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev) - -[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) - -[web-url]: https://chatgpt.nextweb.fun -[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases -[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge -[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows -[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple -[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) - -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![cover](./docs/images/cover.png) +[**Chat Now**](https://chat.webllm.ai/)
-## Features - -- **Deploy for free with one-click** on Vercel in under 1 minute -- Compact client (~5MB) on Linux/Windows/MacOS, [download it now](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) -- Fully compatible with self-deployed LLMs, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI) -- Privacy first, all data is stored locally in the browser -- Markdown support: LaTex, mermaid, code highlight, etc. -- Responsive design, dark mode and PWA -- Fast first screen loading speed (~100kb), support streaming response -- New in v2: create, share and debug your chat tools with prompt templates (mask) -- Awesome prompts powered by [awesome-chatgpt-prompts-zh](https://github.com/PlexPt/awesome-chatgpt-prompts-zh) and [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts) -- Automatically compresses chat history to support long conversations while also saving your tokens -- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia - -## Roadmap - -- [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) -- [x] User Prompt: user can edit and save custom prompts to prompt list -- [x] Prompt Template: create a new chat with pre-defined in-context prompts [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993) -- [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) -- [x] Desktop App with tauri -- [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. -- [ ] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - -## What's New - -- 🚀 v2.10.1 support Google Gemini Pro model. -- 🚀 v2.9.11 you can use azure endpoint now. -- 🚀 v2.8 now we have a client that runs across all platforms! -- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! -- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). - -## 主要功能 - -- 在 1 分钟内使用 Vercel **免费一键部署** -- 提供体积极小(~5MB)的跨平台客户端(Linux/Windows/MacOS), [下载地址](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) -- 完整的 Markdown 支持:LaTex 公式、Mermaid 流程图、代码高亮等等 -- 精心设计的 UI,响应式设计,支持深色模式,支持 PWA -- 极快的首屏加载速度(~100kb),支持流式响应 -- 隐私安全,所有数据保存在用户浏览器本地 -- 预制角色功能(面具),方便地创建、分享和调试你的个性化对话 -- 海量的内置 prompt 列表,来自[中文](https://github.com/PlexPt/awesome-chatgpt-prompts-zh)和[英文](https://github.com/f/awesome-chatgpt-prompts) -- 自动压缩上下文聊天记录,在节省 Token 的同时支持超长对话 -- 多国语言支持:English, 简体中文, 繁体中文, 日本語, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia -- 拥有自己的域名?好上加好,绑定后即可在任何地方**无障碍**快速访问 - -## 开发计划 - -- [x] 为每个对话设置系统 Prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) -- [x] 允许用户自行编辑内置 Prompt 列表 -- [x] 预制角色:使用预制角色快速定制新对话 [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993) -- [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) -- [x] 使用 tauri 打包桌面应用 -- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) -- [ ] 插件机制,支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - -## 最新动态 - -- 🚀 v2.0 已经发布,现在你可以使用面具功能快速创建预制对话了! 了解更多: [ChatGPT 提示词高阶技能:零次、一次和少样本提示](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)。 -- 💡 想要更方便地随时随地使用本项目?可以试下这款桌面插件:https://github.com/mushan0x0/AI0x0.com -- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 -- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 -- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 - -## Get Started - -> [简体中文 > 如何开始使用](./README_CN.md#开始使用) - -1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys); -2. Click - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password; -3. Enjoy :) - -## FAQ - -[简体中文 > 常见问题](./docs/faq-cn.md) - -[English > FAQ](./docs/faq-en.md) - -## Keep Updated - -> [简体中文 > 如何保持代码更新](./README_CN.md#保持更新) - -If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly. - -We recommend that you follow the steps below to re-deploy: - -- Delete the original repository; -- Use the fork button in the upper right corner of the page to fork this project; -- Choose and deploy in Vercel again, [please see the detailed tutorial](./docs/vercel-cn.md). - -### Enable Automatic Updates - -> If you encounter a failure of Upstream Sync execution, please manually sync fork once. - -After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: - -![Automatic Updates](./docs/images/enable-actions.jpg) - -![Enable Automatic Updates](./docs/images/enable-actions-sync.jpg) - -### Manually Updating Code - -If you want to update instantly, you can check out the [GitHub documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) to learn how to synchronize a forked project with upstream code. - -You can star or watch this project or follow author to get release notifications in time. - -## Access Password - -> [简体中文 > 如何增加访问密码](./README_CN.md#配置页面访问密码) - -This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this: - -``` -code1,code2,code3 -``` - -After adding or modifying this environment variable, please redeploy the project for the changes to take effect. - -## Environment Variables - -> [简体中文 > 如何配置 api key、访问密码、接口代理](./README_CN.md#环境变量) - -### `CODE` (optional) - -Access password, separated by comma. - -### `OPENAI_API_KEY` (required) - -Your openai api key, join multiple api keys with comma. - -### `BASE_URL` (optional) - -> Default: `https://api.openai.com` - -> Examples: `http://your-openai-proxy.com` - -Override openai api request base url. - -### `OPENAI_ORG_ID` (optional) - -Specify OpenAI organization ID. - -### `AZURE_URL` (optional) - -> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} - -Azure deploy url. - -### `AZURE_API_KEY` (optional) - -Azure Api Key. - -### `AZURE_API_VERSION` (optional) - -Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions). - -### `GOOGLE_API_KEY` (optional) - -Google Gemini Pro Api Key. - -### `GOOGLE_URL` (optional) - -Google Gemini Pro Api Url. - -### `ANTHROPIC_API_KEY` (optional) - -anthropic claude Api Key. - -### `ANTHROPIC_API_VERSION` (optional) - -anthropic claude Api version. - -### `ANTHROPIC_URL` (optional) - -anthropic claude Api Url. - -### `HIDE_USER_API_KEY` (optional) - -> Default: Empty - -If you do not want users to input their own API key, set this value to 1. - -### `DISABLE_GPT4` (optional) - -> Default: Empty - -If you do not want users to use GPT-4, set this value to 1. - -### `ENABLE_BALANCE_QUERY` (optional) - -> Default: Empty - -If you do want users to query balance, set this value to 1. - -### `DISABLE_FAST_LINK` (optional) - -> Default: Empty - -If you want to disable parse settings from url, set this to 1. - -### `CUSTOM_MODELS` (optional) - -> Default: Empty -> Example: `+llama,+claude-2,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list, and display `gpt-4-1106-preview` as `gpt-4-turbo`. - -To control custom models, use `+` to add a custom model, use `-` to hide a model, use `name=displayName` to customize model name, separated by comma. - -User `-all` to disable all default models, `+all` to enable all default models. - -### `WHITE_WEBDEV_ENDPOINTS` (可选) - -You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: -- Each address must be a complete endpoint -> `https://xxxx/yyy` -- Multiple addresses are connected by ', ' - -## Requirements - -NodeJS >= 18, Docker >= 20 - -## Development - -> [简体中文 > 如何进行二次开发](./README_CN.md#开发) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -Before starting development, you must create a new `.env.local` file at project root, and place your api key into it: - -``` -OPENAI_API_KEY= - -# if you are not able to access openai service, use this BASE_URL -BASE_URL=https://chatgpt1.nextweb.fun/api/proxy -``` - -### Local Development - -```shell -# 1. install nodejs and yarn first -# 2. config local env vars in `.env.local` -# 3. run -yarn install -yarn dev -``` - -## Deployment - -> [简体中文 > 如何部署到私人服务器](./README_CN.md#部署) - -### Docker (Recommended) - -```shell -docker pull yidadaa/chatgpt-next-web - -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=your-password \ - yidadaa/chatgpt-next-web -``` - -You can start service behind a proxy: - -```shell -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=your-password \ - -e PROXY_URL=http://localhost:7890 \ - yidadaa/chatgpt-next-web -``` - -If your proxy needs password, use: - -```shell --e PROXY_URL="http://127.0.0.1:7890 user pass" -``` - -### Shell - -```shell -bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) -``` - -## Synchronizing Chat Records (UpStash) - -| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md) - -## Documentation - -> Please go to the [docs][./docs] directory for more documentation instructions. - -- [Deploy with cloudflare (Deprecated)](./docs/cloudflare-pages-en.md) -- [Frequent Ask Questions](./docs/faq-en.md) -- [How to add a new translation](./docs/translation.md) -- [How to use Vercel (No English)](./docs/vercel-cn.md) -- [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md) - -## Screenshots - -![Settings](./docs/images/settings.png) - -![More](./docs/images/more.png) - -## Translation - -If you want to add a new translation, read this [document](./docs/translation.md). - -## Donation +## Introduction -[Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) +WebLLM Chat is an AI chat webapp that brings [WebLLM](https://github.com/mlc-ai/web-llm) with a user-friendly interface to deliver an fully open-source and accssible AI chat application. WebLLM Chat leverages WebGPU to run large language models (LLMs) natively in your browser, offering a seamless AI chatting experience with unprecedented privacy and performance. -## Special Thanks +## Key Features -### Sponsor +- **Browser-Native AI**: Experience cutting-edge language models running natively within your web browser with WebGPU acceleration, eliminating the need for server-side processing or cloud dependencies. +- **User-Friendly Interface**: Enjoy the intuitive and feature-rich user interface of [NextChat](), complete with markdown support, dark mode, and a responsive design optimized for various screen sizes. +- **Privacy-Focused**: With all data processing happening locally within your browser, WebLLM Chat prioritizes your privacy and ensures your data remains secure locally. +- **Open Source and Customizable**: Build and customize your own AI-powered applications with our open-source framework. -> 仅列出捐赠金额 >= 100RMB 的用户。 +WebLLM Chat is a pioneering initiative that combines the robust backend capabilities of WebLLM with the user-friendly interface of NextChat. As a part of the broader MLC.ai family, this project contributes to our mission of democratizing AI technology by making powerful tools accessible directly to end-users. By integrating with NextChat, WebLLM Chat not only enhances the chatting experience but also broadens the scope for deployment of self-hosted and customizable language models. -[@mushan0x0](https://github.com/mushan0x0) -[@ClarenceDan](https://github.com/ClarenceDan) -[@zhangjia](https://github.com/zhangjia) -[@hoochanlon](https://github.com/hoochanlon) -[@relativequantum](https://github.com/relativequantum) -[@desenmeng](https://github.com/desenmeng) -[@webees](https://github.com/webees) -[@chazzhou](https://github.com/chazzhou) -[@hauy](https://github.com/hauy) -[@Corwin006](https://github.com/Corwin006) -[@yankunsong](https://github.com/yankunsong) -[@ypwhs](https://github.com/ypwhs) -[@fxxxchao](https://github.com/fxxxchao) -[@hotic](https://github.com/hotic) -[@WingCH](https://github.com/WingCH) -[@jtung4](https://github.com/jtung4) -[@micozhu](https://github.com/micozhu) -[@jhansion](https://github.com/jhansion) -[@Sha1rholder](https://github.com/Sha1rholder) -[@AnsonHyq](https://github.com/AnsonHyq) -[@synwith](https://github.com/synwith) -[@piksonGit](https://github.com/piksonGit) -[@ouyangzhiping](https://github.com/ouyangzhiping) -[@wenjiavv](https://github.com/wenjiavv) -[@LeXwDeX](https://github.com/LeXwDeX) -[@Licoy](https://github.com/Licoy) -[@shangmin2009](https://github.com/shangmin2009) +## Community and Contributions -### Contributors +WebLLM Chat thrives on community involvement. We are committed to fostering an inclusive and innovative community where developers and AI enthusiasts can collaborate, contribute, and push the boundaries of what's possible in AI technology. Join us on Discord to connect with fellow developers and contribute to the project. - - - +## Acknowledgements -## LICENSE +WebLLM Chat is built upon the remarkable work of the [WebLLM](https://github.com/mlc-ai/web-llm/) and [NextChat](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web) projects. We extend our sincere gratitude to the developers and contributors of these projects for their invaluable efforts in advancing the field of browser-based AI and creating user-friendly chat interfaces. -[MIT](https://opensource.org/license/mit/) +Further more, this project is only possible thanks to the shoulders of open-source ecosystems that we stand on. We want to thank the Apache TVM community and developers of the TVM Unity effort. The open-source ML community members made these models publicly available. PyTorch and Hugging Face communities make these models accessible. We would like to thank the teams behind Vicuna, SentencePiece, LLaMA, Alpaca. We also would like to thank the WebAssembly, Emscripten, and WebGPU communities. Finally, thanks to Dawn and WebGPU developers. diff --git a/README_CN.md b/README_CN.md deleted file mode 100644 index 10b5fd03..00000000 --- a/README_CN.md +++ /dev/null @@ -1,242 +0,0 @@ -
-预览 - -

NextChat

- -一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 - -[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) - -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -![主界面](./docs/images/cover.png) - -
- -## 开始使用 - -1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); -2. 点击右侧按钮开始部署: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; -3. 部署完毕后,即可开始使用; -4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 - -## 保持更新 - -如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。 -推荐你按照下列步骤重新部署: - -- 删除掉原先的仓库; -- 使用页面右上角的 fork 按钮,fork 本项目; -- 在 Vercel 重新选择并部署,[请查看详细教程](./docs/vercel-cn.md#如何新建项目)。 - -### 打开自动更新 - -> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次! - -当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows,并启用 Upstream Sync Action,启用之后即可开启每小时定时自动更新: - -![自动更新](./docs/images/enable-actions.jpg) - -![启用自动更新](./docs/images/enable-actions-sync.jpg) - -### 手动更新代码 - -如果你想让手动立即更新,可以查看 [Github 的文档](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork) 了解如何让 fork 的项目与上游代码同步。 - -你可以 star/watch 本项目或者 follow 作者来及时获得新功能更新通知。 - -## 配置页面访问密码 - -> 配置密码后,用户需要在设置页手动填写访问码才可以正常聊天,否则会通过消息提示未授权状态。 - -> **警告**:请务必将密码的位数设置得足够长,最好 7 位以上,否则[会被爆破](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。 - -本项目提供有限的权限控制功能,请在 Vercel 项目控制面板的环境变量页增加名为 `CODE` 的环境变量,值为用英文逗号分隔的自定义密码: - -``` -code1,code2,code3 -``` - -增加或修改该环境变量后,请**重新部署**项目使改动生效。 - -## 环境变量 - -> 本项目大多数配置项都通过环境变量来设置,教程:[如何修改 Vercel 环境变量](./docs/vercel-cn.md)。 - -### `OPENAI_API_KEY` (必填项) - -OpanAI 密钥,你在 openai 账户页面申请的 api key,使用英文逗号隔开多个 key,这样可以随机轮询这些 key。 - -### `CODE` (可选) - -访问密码,可选,可以使用逗号隔开多个密码。 - -**警告**:如果不填写此项,则任何人都可以直接使用你部署后的网站,可能会导致你的 token 被急速消耗完毕,建议填写此选项。 - -### `BASE_URL` (可选) - -> Default: `https://api.openai.com` - -> Examples: `http://your-openai-proxy.com` - -OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填写此选项。 - -> 如果遇到 ssl 证书问题,请将 `BASE_URL` 的协议设置为 http。 - -### `OPENAI_ORG_ID` (可选) - -指定 OpenAI 中的组织 ID。 - -### `AZURE_URL` (可选) - -> 形如:https://{azure-resource-url}/openai/deployments/{deploy-name} - -Azure 部署地址。 - -### `AZURE_API_KEY` (可选) - -Azure 密钥。 - -### `AZURE_API_VERSION` (可选) - -Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 - -### `GOOGLE_API_KEY` (optional) - -Google Gemini Pro 密钥. - -### `GOOGLE_URL` (optional) - -Google Gemini Pro Api Url. - -### `ANTHROPIC_API_KEY` (optional) - -anthropic claude Api Key. - -### `ANTHROPIC_API_VERSION` (optional) - -anthropic claude Api version. - -### `ANTHROPIC_URL` (optional) - -anthropic claude Api Url. - -### `HIDE_USER_API_KEY` (可选) - -如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 - -### `DISABLE_GPT4` (可选) - -如果你不想让用户使用 GPT-4,将此环境变量设置为 1 即可。 - -### `ENABLE_BALANCE_QUERY` (可选) - -如果你想启用余额查询功能,将此环境变量设置为 1 即可。 - -### `DISABLE_FAST_LINK` (可选) - -如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 - -### `WHITE_WEBDEV_ENDPOINTS` (可选) - -如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: -- 每一个地址必须是一个完整的 endpoint -> `https://xxxx/xxx` -- 多个地址以`,`相连 - -### `CUSTOM_MODELS` (可选) - -> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。 -> 如果你想先禁用所有模型,再启用指定模型,可以使用 `-all,+gpt-3.5-turbo`,则表示仅启用 `gpt-3.5-turbo` - -用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 - -## 开发 - -点击下方按钮,开始二次开发: - -[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) - -在开始写代码之前,需要在项目根目录新建一个 `.env.local` 文件,里面填入环境变量: - -``` -OPENAI_API_KEY= - -# 中国大陆用户,可以使用本项目自带的代理进行开发,你也可以自由选择其他代理地址 -BASE_URL=https://b.nextweb.fun/api/proxy -``` - -### 本地开发 - -1. 安装 nodejs 18 和 yarn,具体细节请询问 ChatGPT; -2. 执行 `yarn install && yarn dev` 即可。⚠️ 注意:此命令仅用于本地开发,不要用于部署! -3. 如果你想本地部署,请使用 `yarn install && yarn build && yarn start` 命令,你可以配合 pm2 来守护进程,防止被杀死,详情询问 ChatGPT。 - -## 部署 - -### 容器部署 (推荐) - -> Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 - -> ⚠️ 注意:docker 版本在大多数时间都会落后最新的版本 1 到 2 天,所以部署后会持续出现“存在更新”的提示,属于正常现象。 - -```shell -docker pull yidadaa/chatgpt-next-web - -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=页面访问密码 \ - yidadaa/chatgpt-next-web -``` - -你也可以指定 proxy: - -```shell -docker run -d -p 3000:3000 \ - -e OPENAI_API_KEY=sk-xxxx \ - -e CODE=页面访问密码 \ - --net=host \ - -e PROXY_URL=http://127.0.0.1:7890 \ - yidadaa/chatgpt-next-web -``` - -如果你的本地代理需要账号密码,可以使用: - -```shell --e PROXY_URL="http://127.0.0.1:7890 user password" -``` - -如果你需要指定其他环境变量,请自行在上述命令中增加 `-e 环境变量=环境变量值` 来指定。 - -### 本地部署 - -在控制台运行下方命令: - -```shell -bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh) -``` - -⚠️ 注意:如果你安装过程中遇到了问题,请使用 docker 部署。 - -## 鸣谢 - -### 捐赠者 - -> 见英文版。 - -### 贡献者 - -[见项目贡献者列表](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) - -### 相关项目 - -- [one-api](https://github.com/songquanpeng/one-api): 一站式大模型额度管理平台,支持市面上所有主流大语言模型 - -## 开源协议 - -[MIT](https://opensource.org/license/mit/) diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts deleted file mode 100644 index 4264893d..00000000 --- a/app/api/anthropic/[...path]/route.ts +++ /dev/null @@ -1,189 +0,0 @@ -import { getServerSideConfig } from "@/app/config/server"; -import { - ANTHROPIC_BASE_URL, - Anthropic, - ApiPath, - DEFAULT_MODELS, - ModelProvider, -} from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { collectModelTable } from "@/app/utils/model"; - -const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); - -async function handle( - req: NextRequest, - { params }: { params: { path: string[] } }, -) { - console.log("[Anthropic Route] params ", params); - - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); - } - - const subpath = params.path.join("/"); - - if (!ALLOWD_PATH.has(subpath)) { - console.log("[Anthropic Route] forbidden path ", subpath); - return NextResponse.json( - { - error: true, - msg: "you are not allowed to request " + subpath, - }, - { - status: 403, - }, - ); - } - - const authResult = auth(req, ModelProvider.Claude); - if (authResult.error) { - return NextResponse.json(authResult, { - status: 401, - }); - } - - try { - const response = await request(req); - return response; - } catch (e) { - console.error("[Anthropic] ", e); - return NextResponse.json(prettyObject(e)); - } -} - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; - -const serverConfig = getServerSideConfig(); - -async function request(req: NextRequest) { - const controller = new AbortController(); - - let authHeaderName = "x-api-key"; - let authValue = - req.headers.get(authHeaderName) || - req.headers.get("Authorization")?.replaceAll("Bearer ", "").trim() || - serverConfig.anthropicApiKey || - ""; - - let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Anthropic, ""); - - let baseUrl = - serverConfig.anthropicUrl || serverConfig.baseUrl || ANTHROPIC_BASE_URL; - - if (!baseUrl.startsWith("http")) { - baseUrl = `https://${baseUrl}`; - } - - if (baseUrl.endsWith("/")) { - baseUrl = baseUrl.slice(0, -1); - } - - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); - - const timeoutId = setTimeout( - () => { - controller.abort(); - }, - 10 * 60 * 1000, - ); - - const fetchUrl = `${baseUrl}${path}`; - - const fetchOptions: RequestInit = { - headers: { - "Content-Type": "application/json", - "Cache-Control": "no-store", - [authHeaderName]: authValue, - "anthropic-version": - req.headers.get("anthropic-version") || - serverConfig.anthropicApiVersion || - Anthropic.Vision, - }, - method: req.method, - body: req.body, - redirect: "manual", - // @ts-ignore - duplex: "half", - signal: controller.signal, - }; - - // #1815 try to refuse some request to some models - if (serverConfig.customModels && req.body) { - try { - const modelTable = collectModelTable( - DEFAULT_MODELS, - serverConfig.customModels, - ); - const clonedBody = await req.text(); - fetchOptions.body = clonedBody; - - const jsonBody = JSON.parse(clonedBody) as { model?: string }; - - // not undefined and is false - if (modelTable[jsonBody?.model ?? ""].available === false) { - return NextResponse.json( - { - error: true, - message: `you are not allowed to use ${jsonBody?.model} model`, - }, - { - status: 403, - }, - ); - } - } catch (e) { - console.error(`[Anthropic] filter`, e); - } - } - console.log("[Anthropic request]", fetchOptions.headers, req.method); - try { - const res = await fetch(fetchUrl, fetchOptions); - - console.log( - "[Anthropic response]", - res.status, - " ", - res.headers, - res.url, - ); - // to prevent browser prompt for credentials - const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); - // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); - - return new Response(res.body, { - status: res.status, - statusText: res.statusText, - headers: newHeaders, - }); - } finally { - clearTimeout(timeoutId); - } -} diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts deleted file mode 100644 index ebd19289..00000000 --- a/app/api/google/[...path]/route.ts +++ /dev/null @@ -1,116 +0,0 @@ -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { getServerSideConfig } from "@/app/config/server"; -import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; - -async function handle( - req: NextRequest, - { params }: { params: { path: string[] } }, -) { - console.log("[Google Route] params ", params); - - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); - } - - const controller = new AbortController(); - - const serverConfig = getServerSideConfig(); - - let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; - - if (!baseUrl.startsWith("http")) { - baseUrl = `https://${baseUrl}`; - } - - if (baseUrl.endsWith("/")) { - baseUrl = baseUrl.slice(0, -1); - } - - let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", ""); - - console.log("[Proxy] ", path); - console.log("[Base Url]", baseUrl); - - const timeoutId = setTimeout( - () => { - controller.abort(); - }, - 10 * 60 * 1000, - ); - - const authResult = auth(req, ModelProvider.GeminiPro); - if (authResult.error) { - return NextResponse.json(authResult, { - status: 401, - }); - } - - const bearToken = req.headers.get("Authorization") ?? ""; - const token = bearToken.trim().replaceAll("Bearer ", "").trim(); - - const key = token ? token : serverConfig.googleApiKey; - - if (!key) { - return NextResponse.json( - { - error: true, - message: `missing GOOGLE_API_KEY in server env vars`, - }, - { - status: 401, - }, - ); - } - - const fetchUrl = `${baseUrl}/${path}?key=${key}`; - const fetchOptions: RequestInit = { - headers: { - "Content-Type": "application/json", - "Cache-Control": "no-store", - }, - method: req.method, - body: req.body, - // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body - redirect: "manual", - // @ts-ignore - duplex: "half", - signal: controller.signal, - }; - - try { - const res = await fetch(fetchUrl, fetchOptions); - // to prevent browser prompt for credentials - const newHeaders = new Headers(res.headers); - newHeaders.delete("www-authenticate"); - // to disable nginx buffering - newHeaders.set("X-Accel-Buffering", "no"); - - return new Response(res.body, { - status: res.status, - statusText: res.statusText, - headers: newHeaders, - }); - } finally { - clearTimeout(timeoutId); - } -} - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "bom1", - "cle1", - "cpt1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai/[...path]/route.ts deleted file mode 100644 index 77059c15..00000000 --- a/app/api/openai/[...path]/route.ts +++ /dev/null @@ -1,96 +0,0 @@ -import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; -import { getServerSideConfig } from "@/app/config/server"; -import { ModelProvider, OpenaiPath } from "@/app/constant"; -import { prettyObject } from "@/app/utils/format"; -import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { requestOpenai } from "../../common"; - -const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); - -function getModels(remoteModelRes: OpenAIListModelResponse) { - const config = getServerSideConfig(); - - if (config.disableGPT4) { - remoteModelRes.data = remoteModelRes.data.filter( - (m) => !m.id.startsWith("gpt-4"), - ); - } - - return remoteModelRes; -} - -async function handle( - req: NextRequest, - { params }: { params: { path: string[] } }, -) { - console.log("[OpenAI Route] params ", params); - - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); - } - - const subpath = params.path.join("/"); - - if (!ALLOWD_PATH.has(subpath)) { - console.log("[OpenAI Route] forbidden path ", subpath); - return NextResponse.json( - { - error: true, - msg: "you are not allowed to request " + subpath, - }, - { - status: 403, - }, - ); - } - - const authResult = auth(req, ModelProvider.GPT); - if (authResult.error) { - return NextResponse.json(authResult, { - status: 401, - }); - } - - try { - const response = await requestOpenai(req); - - // list models - if (subpath === OpenaiPath.ListModelPath && response.status === 200) { - const resJson = (await response.json()) as OpenAIListModelResponse; - const availableModels = getModels(resJson); - return NextResponse.json(availableModels, { - status: response.status, - }); - } - - return response; - } catch (e) { - console.error("[OpenAI] ", e); - return NextResponse.json(prettyObject(e)); - } -} - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/client/api.ts b/app/client/api.ts index 7bee546b..c1a56fed 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -6,9 +6,7 @@ import { ServiceProvider, } from "../constant"; import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; -import { ChatGPTApi } from "./platforms/openai"; -import { GeminiProApi } from "./platforms/google"; -import { ClaudeApi } from "./platforms/anthropic"; +import { WebLLMApi } from "./webllm"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -94,17 +92,8 @@ interface ChatProvider { export class ClientApi { public llm: LLMApi; - constructor(provider: ModelProvider = ModelProvider.GPT) { - switch (provider) { - case ModelProvider.GeminiPro: - this.llm = new GeminiProApi(); - break; - case ModelProvider.Claude: - this.llm = new ClaudeApi(); - break; - default: - this.llm = new ChatGPTApi(); - } + constructor() { + this.llm = new WebLLMApi(); } config() {} diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts deleted file mode 100644 index e90c8f05..00000000 --- a/app/client/platforms/anthropic.ts +++ /dev/null @@ -1,415 +0,0 @@ -import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, LLMApi, MultimodalContent } from "../api"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { getClientConfig } from "@/app/config/client"; -import { DEFAULT_API_HOST } from "@/app/constant"; -import { RequestMessage } from "@/app/typing"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; - -import Locale from "../../locales"; -import { prettyObject } from "@/app/utils/format"; -import { getMessageTextContent, isVisionModel } from "@/app/utils"; - -export type MultiBlockContent = { - type: "image" | "text"; - source?: { - type: string; - media_type: string; - data: string; - }; - text?: string; -}; - -export type AnthropicMessage = { - role: (typeof ClaudeMapper)[keyof typeof ClaudeMapper]; - content: string | MultiBlockContent[]; -}; - -export interface AnthropicChatRequest { - model: string; // The model that will complete your prompt. - messages: AnthropicMessage[]; // The prompt that you want Claude to complete. - max_tokens: number; // The maximum number of tokens to generate before stopping. - stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text. - temperature?: number; // Amount of randomness injected into the response. - top_p?: number; // Use nucleus sampling. - top_k?: number; // Only sample from the top K options for each subsequent token. - metadata?: object; // An object describing metadata about the request. - stream?: boolean; // Whether to incrementally stream the response using server-sent events. -} - -export interface ChatRequest { - model: string; // The model that will complete your prompt. - prompt: string; // The prompt that you want Claude to complete. - max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping. - stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text. - temperature?: number; // Amount of randomness injected into the response. - top_p?: number; // Use nucleus sampling. - top_k?: number; // Only sample from the top K options for each subsequent token. - metadata?: object; // An object describing metadata about the request. - stream?: boolean; // Whether to incrementally stream the response using server-sent events. -} - -export interface ChatResponse { - completion: string; - stop_reason: "stop_sequence" | "max_tokens"; - model: string; -} - -export type ChatStreamResponse = ChatResponse & { - stop?: string; - log_id: string; -}; - -const ClaudeMapper = { - assistant: "assistant", - user: "user", - system: "user", -} as const; - -const keys = ["claude-2, claude-instant-1"]; - -export class ClaudeApi implements LLMApi { - extractMessage(res: any) { - console.log("[Response] claude response: ", res); - - return res?.content?.[0]?.text; - } - async chat(options: ChatOptions): Promise { - const visionModel = isVisionModel(options.config.model); - - const accessStore = useAccessStore.getState(); - - const shouldStream = !!options.config.stream; - - const modelConfig = { - ...useAppConfig.getState().modelConfig, - ...useChatStore.getState().currentSession().mask.modelConfig, - ...{ - model: options.config.model, - }, - }; - - const messages = [...options.messages]; - - const keys = ["system", "user"]; - - // roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages - for (let i = 0; i < messages.length - 1; i++) { - const message = messages[i]; - const nextMessage = messages[i + 1]; - - if (keys.includes(message.role) && keys.includes(nextMessage.role)) { - messages[i] = [ - message, - { - role: "assistant", - content: ";", - }, - ] as any; - } - } - - const prompt = messages - .flat() - .filter((v) => { - if (!v.content) return false; - if (typeof v.content === "string" && !v.content.trim()) return false; - return true; - }) - .map((v) => { - const { role, content } = v; - const insideRole = ClaudeMapper[role] ?? "user"; - - if (!visionModel || typeof content === "string") { - return { - role: insideRole, - content: getMessageTextContent(v), - }; - } - return { - role: insideRole, - content: content - .filter((v) => v.image_url || v.text) - .map(({ type, text, image_url }) => { - if (type === "text") { - return { - type, - text: text!, - }; - } - const { url = "" } = image_url || {}; - const colonIndex = url.indexOf(":"); - const semicolonIndex = url.indexOf(";"); - const comma = url.indexOf(","); - - const mimeType = url.slice(colonIndex + 1, semicolonIndex); - const encodeType = url.slice(semicolonIndex + 1, comma); - const data = url.slice(comma + 1); - - return { - type: "image" as const, - source: { - type: encodeType, - media_type: mimeType, - data, - }, - }; - }), - }; - }); - - if (prompt[0]?.role === "assistant") { - prompt.unshift({ - role: "user", - content: ";", - }); - } - - const requestBody: AnthropicChatRequest = { - messages: prompt, - stream: shouldStream, - - model: modelConfig.model, - max_tokens: modelConfig.max_tokens, - temperature: modelConfig.temperature, - top_p: modelConfig.top_p, - // top_k: modelConfig.top_k, - top_k: 5, - }; - - const path = this.path(Anthropic.ChatPath); - - const controller = new AbortController(); - options.onController?.(controller); - - const payload = { - method: "POST", - body: JSON.stringify(requestBody), - signal: controller.signal, - headers: { - "Content-Type": "application/json", - Accept: "application/json", - "x-api-key": accessStore.anthropicApiKey, - "anthropic-version": accessStore.anthropicApiVersion, - Authorization: getAuthKey(accessStore.anthropicApiKey), - }, - }; - - if (shouldStream) { - try { - const context = { - text: "", - finished: false, - }; - - const finish = () => { - if (!context.finished) { - options.onFinish(context.text); - context.finished = true; - } - }; - - controller.signal.onabort = finish; - fetchEventSource(path, { - ...payload, - async onopen(res) { - const contentType = res.headers.get("content-type"); - console.log("response content type: ", contentType); - - if (contentType?.startsWith("text/plain")) { - context.text = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [context.text]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - context.text = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - let chunkJson: - | undefined - | { - type: "content_block_delta" | "content_block_stop"; - delta?: { - type: "text_delta"; - text: string; - }; - index: number; - }; - try { - chunkJson = JSON.parse(msg.data); - } catch (e) { - console.error("[Response] parse error", msg.data); - } - - if (!chunkJson || chunkJson.type === "content_block_stop") { - return finish(); - } - - const { delta } = chunkJson; - if (delta?.text) { - context.text += delta.text; - options.onUpdate?.(context.text, delta.text); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); - } - } else { - try { - controller.signal.onabort = () => options.onFinish(""); - - const res = await fetch(path, payload); - const resJson = await res.json(); - - const message = this.extractMessage(resJson); - options.onFinish(message); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); - } - } - } - async usage() { - return { - used: 0, - total: 0, - }; - } - async models() { - // const provider = { - // id: "anthropic", - // providerName: "Anthropic", - // providerType: "anthropic", - // }; - - return [ - // { - // name: "claude-instant-1.2", - // available: true, - // provider, - // }, - // { - // name: "claude-2.0", - // available: true, - // provider, - // }, - // { - // name: "claude-2.1", - // available: true, - // provider, - // }, - // { - // name: "claude-3-opus-20240229", - // available: true, - // provider, - // }, - // { - // name: "claude-3-sonnet-20240229", - // available: true, - // provider, - // }, - // { - // name: "claude-3-haiku-20240307", - // available: true, - // provider, - // }, - ]; - } - path(path: string): string { - const accessStore = useAccessStore.getState(); - - let baseUrl: string = ""; - - if (accessStore.useCustomConfig) { - baseUrl = accessStore.anthropicUrl; - } - - // if endpoint is empty, use default endpoint - if (baseUrl.trim().length === 0) { - const isApp = !!getClientConfig()?.isApp; - - baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy/anthropic" - : ApiPath.Anthropic; - } - - if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) { - baseUrl = "https://" + baseUrl; - } - - baseUrl = trimEnd(baseUrl, "/"); - - return `${baseUrl}/${path}`; - } -} - -function trimEnd(s: string, end = " ") { - if (end.length === 0) return s; - - while (s.endsWith(end)) { - s = s.slice(0, -end.length); - } - - return s; -} - -function bearer(value: string) { - return `Bearer ${value.trim()}`; -} - -function getAuthKey(apiKey = "") { - const accessStore = useAccessStore.getState(); - const isApp = !!getClientConfig()?.isApp; - let authKey = ""; - - if (apiKey) { - // use user's api key first - authKey = bearer(apiKey); - } else if ( - accessStore.enabledAccessControl() && - !isApp && - !!accessStore.accessCode - ) { - // or use access code - authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); - } - - return authKey; -} diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts deleted file mode 100644 index a786f527..00000000 --- a/app/client/platforms/google.ts +++ /dev/null @@ -1,279 +0,0 @@ -import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { getClientConfig } from "@/app/config/client"; -import { DEFAULT_API_HOST } from "@/app/constant"; -import { - getMessageTextContent, - getMessageImages, - isVisionModel, -} from "@/app/utils"; - -export class GeminiProApi implements LLMApi { - extractMessage(res: any) { - console.log("[Response] gemini-pro response: ", res); - - return ( - res?.candidates?.at(0)?.content?.parts.at(0)?.text || - res?.error?.message || - "" - ); - } - async chat(options: ChatOptions): Promise { - // const apiClient = this; - let multimodal = false; - const messages = options.messages.map((v) => { - let parts: any[] = [{ text: getMessageTextContent(v) }]; - if (isVisionModel(options.config.model)) { - const images = getMessageImages(v); - if (images.length > 0) { - multimodal = true; - parts = parts.concat( - images.map((image) => { - const imageType = image.split(";")[0].split(":")[1]; - const imageData = image.split(",")[1]; - return { - inline_data: { - mime_type: imageType, - data: imageData, - }, - }; - }), - ); - } - } - return { - role: v.role.replace("assistant", "model").replace("system", "user"), - parts: parts, - }; - }); - - // google requires that role in neighboring messages must not be the same - for (let i = 0; i < messages.length - 1; ) { - // Check if current and next item both have the role "model" - if (messages[i].role === messages[i + 1].role) { - // Concatenate the 'parts' of the current and next item - messages[i].parts = messages[i].parts.concat(messages[i + 1].parts); - // Remove the next item - messages.splice(i + 1, 1); - } else { - // Move to the next item - i++; - } - } - // if (visionModel && messages.length > 1) { - // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision")); - // } - const modelConfig = { - ...useAppConfig.getState().modelConfig, - ...useChatStore.getState().currentSession().mask.modelConfig, - ...{ - model: options.config.model, - }, - }; - const requestPayload = { - contents: messages, - generationConfig: { - // stopSequences: [ - // "Title" - // ], - temperature: modelConfig.temperature, - maxOutputTokens: modelConfig.max_tokens, - topP: modelConfig.top_p, - // "topK": modelConfig.top_k, - }, - safetySettings: [ - { - category: "HARM_CATEGORY_HARASSMENT", - threshold: "BLOCK_ONLY_HIGH", - }, - { - category: "HARM_CATEGORY_HATE_SPEECH", - threshold: "BLOCK_ONLY_HIGH", - }, - { - category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", - threshold: "BLOCK_ONLY_HIGH", - }, - { - category: "HARM_CATEGORY_DANGEROUS_CONTENT", - threshold: "BLOCK_ONLY_HIGH", - }, - ], - }; - - const accessStore = useAccessStore.getState(); - - let baseUrl = ""; - - if (accessStore.useCustomConfig) { - baseUrl = accessStore.googleUrl; - } - - const isApp = !!getClientConfig()?.isApp; - - let shouldStream = !!options.config.stream; - const controller = new AbortController(); - options.onController?.(controller); - try { - // let baseUrl = accessStore.googleUrl; - - if (!baseUrl) { - baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath(modelConfig.model) - : this.path(Google.ChatPath(modelConfig.model)); - } - - if (isApp) { - baseUrl += `?key=${accessStore.googleApiKey}`; - } - const chatPayload = { - method: "POST", - body: JSON.stringify(requestPayload), - signal: controller.signal, - headers: getHeaders(), - }; - - // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - REQUEST_TIMEOUT_MS, - ); - - if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - let existingTexts: string[] = []; - const finish = () => { - finished = true; - options.onFinish(existingTexts.join("")); - }; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - finish(); - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - fetch( - baseUrl.replace("generateContent", "streamGenerateContent"), - chatPayload, - ) - .then((response) => { - const reader = response?.body?.getReader(); - const decoder = new TextDecoder(); - let partialData = ""; - - return reader?.read().then(function processText({ - done, - value, - }): Promise { - if (done) { - if (response.status !== 200) { - try { - let data = JSON.parse(ensureProperEnding(partialData)); - if (data && data[0].error) { - options.onError?.(new Error(data[0].error.message)); - } else { - options.onError?.(new Error("Request failed")); - } - } catch (_) { - options.onError?.(new Error("Request failed")); - } - } - - console.log("Stream complete"); - // options.onFinish(responseText + remainText); - finished = true; - return Promise.resolve(); - } - - partialData += decoder.decode(value, { stream: true }); - - try { - let data = JSON.parse(ensureProperEnding(partialData)); - - const textArray = data.reduce( - (acc: string[], item: { candidates: any[] }) => { - const texts = item.candidates.map((candidate) => - candidate.content.parts - .map((part: { text: any }) => part.text) - .join(""), - ); - return acc.concat(texts); - }, - [], - ); - - if (textArray.length > existingTexts.length) { - const deltaArray = textArray.slice(existingTexts.length); - existingTexts = textArray; - remainText += deltaArray.join(""); - } - } catch (error) { - // console.log("[Response Animation] error: ", error,partialData); - // skip error message when parsing json - } - - return reader.read().then(processText); - }); - }) - .catch((error) => { - console.error("Error:", error); - }); - } else { - const res = await fetch(baseUrl, chatPayload); - clearTimeout(requestTimeoutId); - const resJson = await res.json(); - if (resJson?.promptFeedback?.blockReason) { - // being blocked - options.onError?.( - new Error( - "Message is being blocked for reason: " + - resJson.promptFeedback.blockReason, - ), - ); - } - const message = this.extractMessage(resJson); - options.onFinish(message); - } - } catch (e) { - console.log("[Request] failed to make a chat request", e); - options.onError?.(e as Error); - } - } - usage(): Promise { - throw new Error("Method not implemented."); - } - async models(): Promise { - return []; - } - path(path: string): string { - return "/api/google/" + path; - } -} - -function ensureProperEnding(str: string) { - if (str.startsWith("[") && !str.endsWith("]")) { - return str + "]"; - } - return str; -} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts deleted file mode 100644 index f3599263..00000000 --- a/app/client/platforms/openai.ts +++ /dev/null @@ -1,390 +0,0 @@ -"use client"; -import { - ApiPath, - DEFAULT_API_HOST, - DEFAULT_MODELS, - OpenaiPath, - REQUEST_TIMEOUT_MS, - ServiceProvider, -} from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - -import { - ChatOptions, - getHeaders, - LLMApi, - LLMModel, - LLMUsage, - MultimodalContent, -} from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; -import { makeAzurePath } from "@/app/azure"; -import { - getMessageTextContent, - getMessageImages, - isVisionModel, -} from "@/app/utils"; - -export interface OpenAIListModelResponse { - object: string; - data: Array<{ - id: string; - object: string; - root: string; - }>; -} - -interface RequestPayload { - messages: { - role: "system" | "user" | "assistant"; - content: string | MultimodalContent[]; - }[]; - stream?: boolean; - model: string; - temperature: number; - presence_penalty: number; - frequency_penalty: number; - top_p: number; - max_tokens?: number; -} - -export class ChatGPTApi implements LLMApi { - private disableListModels = true; - - path(path: string): string { - const accessStore = useAccessStore.getState(); - - let baseUrl = ""; - - if (accessStore.useCustomConfig) { - const isAzure = accessStore.provider === ServiceProvider.Azure; - - if (isAzure && !accessStore.isValidAzure()) { - throw Error( - "incomplete azure config, please check it in your settings page", - ); - } - - if (isAzure) { - path = makeAzurePath(path, accessStore.azureApiVersion); - } - - baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl; - } - - if (baseUrl.length === 0) { - const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp - ? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI - : ApiPath.OpenAI; - } - - if (baseUrl.endsWith("/")) { - baseUrl = baseUrl.slice(0, baseUrl.length - 1); - } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) { - baseUrl = "https://" + baseUrl; - } - - console.log("[Proxy Endpoint] ", baseUrl, path); - - return [baseUrl, path].join("/"); - } - - extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; - } - - async chat(options: ChatOptions) { - const visionModel = isVisionModel(options.config.model); - const messages = options.messages.map((v) => ({ - role: v.role, - content: visionModel ? v.content : getMessageTextContent(v), - })); - - const modelConfig = { - ...useAppConfig.getState().modelConfig, - ...useChatStore.getState().currentSession().mask.modelConfig, - ...{ - model: options.config.model, - }, - }; - - const requestPayload: RequestPayload = { - messages, - stream: options.config.stream, - model: modelConfig.model, - temperature: modelConfig.temperature, - presence_penalty: modelConfig.presence_penalty, - frequency_penalty: modelConfig.frequency_penalty, - top_p: modelConfig.top_p, - // max_tokens: Math.max(modelConfig.max_tokens, 1024), - // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. - }; - - // add max_tokens to vision model - if (visionModel && modelConfig.model.includes("preview")) { - requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); - } - - console.log("[Request] openai payload: ", requestPayload); - - const shouldStream = !!options.config.stream; - const controller = new AbortController(); - options.onController?.(controller); - - try { - const chatPath = this.path(OpenaiPath.ChatPath); - const chatPayload = { - method: "POST", - body: JSON.stringify(requestPayload), - signal: controller.signal, - headers: getHeaders(), - }; - - // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - REQUEST_TIMEOUT_MS, - ); - - if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); - } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - const textmoderation = json?.prompt_filter_results; - - if (delta) { - remainText += delta; - } - - if ( - textmoderation && - textmoderation.length > 0 && - ServiceProvider.Azure - ) { - const contentFilterResults = - textmoderation[0]?.content_filter_results; - console.log( - `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, - contentFilterResults, - ); - } - } catch (e) { - console.error("[Request] parse error", text, msg); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } else { - const res = await fetch(chatPath, chatPayload); - clearTimeout(requestTimeoutId); - - const resJson = await res.json(); - const message = this.extractMessage(resJson); - options.onFinish(message); - } - } catch (e) { - console.log("[Request] failed to make a chat request", e); - options.onError?.(e as Error); - } - } - async usage() { - const formatDate = (d: Date) => - `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d - .getDate() - .toString() - .padStart(2, "0")}`; - const ONE_DAY = 1 * 24 * 60 * 60 * 1000; - const now = new Date(); - const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); - const startDate = formatDate(startOfMonth); - const endDate = formatDate(new Date(Date.now() + ONE_DAY)); - - const [used, subs] = await Promise.all([ - fetch( - this.path( - `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`, - ), - { - method: "GET", - headers: getHeaders(), - }, - ), - fetch(this.path(OpenaiPath.SubsPath), { - method: "GET", - headers: getHeaders(), - }), - ]); - - if (used.status === 401) { - throw new Error(Locale.Error.Unauthorized); - } - - if (!used.ok || !subs.ok) { - throw new Error("Failed to query usage from openai"); - } - - const response = (await used.json()) as { - total_usage?: number; - error?: { - type: string; - message: string; - }; - }; - - const total = (await subs.json()) as { - hard_limit_usd?: number; - }; - - if (response.error && response.error.type) { - throw Error(response.error.message); - } - - if (response.total_usage) { - response.total_usage = Math.round(response.total_usage) / 100; - } - - if (total.hard_limit_usd) { - total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100; - } - - return { - used: response.total_usage, - total: total.hard_limit_usd, - } as LLMUsage; - } - - async models(): Promise { - if (this.disableListModels) { - return DEFAULT_MODELS.slice(); - } - - const res = await fetch(this.path(OpenaiPath.ListModelPath), { - method: "GET", - headers: { - ...getHeaders(), - }, - }); - - const resJson = (await res.json()) as OpenAIListModelResponse; - const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); - console.log("[Models]", chatModels); - - if (!chatModels) { - return []; - } - - return chatModels.map((m) => ({ - name: m.id, - available: true, - provider: { - id: "openai", - providerName: "OpenAI", - providerType: "openai", - }, - })); - } -} -export { OpenaiPath }; diff --git a/app/client/webllm.ts b/app/client/webllm.ts new file mode 100644 index 00000000..c79ca96e --- /dev/null +++ b/app/client/webllm.ts @@ -0,0 +1,54 @@ +import * as webllm from "@mlc-ai/web-llm"; + +import { ChatOptions, LLMApi } from "./api"; +import { ChatCompletionMessageParam } from "@mlc-ai/web-llm"; + +export class WebLLMApi implements LLMApi { + private currentModel = ""; + private engine: webllm.EngineInterface = new webllm.Engine(); + + async initModel( + model: string, + onUpdate?: (message: string, chunk: string) => void, + ) { + this.currentModel = model; + this.engine.setInitProgressCallback((report: webllm.InitProgressReport) => { + onUpdate?.(report.text, report.text); + }); + await this.engine.reload(this.currentModel); + } + + async chat(options: ChatOptions): Promise { + if (options.config.model != this.currentModel) { + await this.initModel(options.config.model, options.onUpdate); + } + + const reply = await this.engine.chat.completions.create({ + stream: false, + messages: options.messages as ChatCompletionMessageParam[], + }); + + if (reply.choices[0].message.content) { + options.onFinish(reply.choices[0].message.content); + } else { + options.onError?.(new Error("Empty response generated by LLM")); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + async models() { + return webllm.prebuiltAppConfig.model_list.map((record) => ({ + name: record.model_id, + available: true, + provider: { + id: "huggingface", + providerName: "huggingface", + providerType: "huggingface", + }, + })); + } +} diff --git a/app/components/chat.tsx b/app/components/chat.tsx index c8a79870..990cbe60 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -472,7 +472,6 @@ export function ChatActions(props: { props.setAttachImages([]); props.setUploading(false); } - // if current model is not available // switch to first available model const isUnavaliableModel = !models.some((m) => m.name === currentModel); @@ -670,6 +669,8 @@ function _Chat() { const config = useAppConfig(); const fontSize = config.fontSize; + const currentModel = chatStore.currentSession().mask.modelConfig.model; + const [showExport, setShowExport] = useState(false); const inputRef = useRef(null); @@ -949,9 +950,6 @@ function _Chat() { session.messages.at(0)?.content !== BOT_HELLO.content ) { const copiedHello = Object.assign({}, BOT_HELLO); - if (!accessStore.isAuthorized()) { - copiedHello.content = Locale.Error.Unauthorized; - } context.push(copiedHello); } diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 3b1f5e75..95706e31 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -6,8 +6,7 @@ import EmojiPicker, { import { ModelType } from "../store"; -import BotIcon from "../icons/bot.svg"; -import BlackBotIcon from "../icons/black-bot.svg"; +import MlcIcon from "../icons/mlc.svg"; export function getEmojiUrl(unified: string, style: EmojiStyle) { // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis @@ -35,12 +34,8 @@ export function AvatarPicker(props: { export function Avatar(props: { model?: ModelType; avatar?: string }) { if (props.model) { return ( -
- {props.model?.startsWith("gpt-4") ? ( - - ) : ( - - )} +
+
); } diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index 20e240d9..4c4d58f0 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -23,7 +23,7 @@ import CopyIcon from "../icons/copy.svg"; import LoadingIcon from "../icons/three-dots.svg"; import ChatGptIcon from "../icons/chatgpt.png"; import ShareIcon from "../icons/share.svg"; -import BotIcon from "../icons/bot.png"; +import MlcIcon from "../icons/mlc.png"; import DownloadIcon from "../icons/download.svg"; import { useEffect, useMemo, useRef, useState } from "react"; @@ -313,15 +313,7 @@ export function PreviewActions(props: { const onRenderMsgs = (msgs: ChatMessage[]) => { setShouldExport(false); - var api: ClientApi; - if (config.modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(config.modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } - + var api: ClientApi = new ClientApi(); api .share(msgs) .then((res) => { @@ -416,13 +408,9 @@ export function PreviewActions(props: { function ExportAvatar(props: { avatar: string }) { if (props.avatar === DEFAULT_MASK_AVATAR) { return ( - bot +
+ bot +
); } @@ -547,10 +535,8 @@ export function ImagePreviewer(props: {
-
NextChat
-
- github.com/Yidadaa/ChatGPT-Next-Web -
+
Web LLM Chat
+
chat.neet.coffee
& diff --git a/app/components/home.module.scss b/app/components/home.module.scss index b836d2be..97aa6b88 100644 --- a/app/components/home.module.scss +++ b/app/components/home.module.scss @@ -142,7 +142,15 @@ .sidebar-logo { position: absolute; right: 0; - bottom: 18px; + bottom: 20px; + height: calc(100% - 40px); + + svg { + width: 100%; + height: 100%; + + opacity: 0.7; + } } .sidebar-title { @@ -239,6 +247,11 @@ position: relative; display: flex; justify-content: center; + height: calc(100% - 22px); + + svg { + opacity: 0.7; + } } .sidebar-header-bar { @@ -333,6 +346,15 @@ align-items: center; height: 100%; width: 100%; + + .loading-content-logo { + position: relative; + width: 60px; + + svg { + width: 100%; + } + } } .rtl-screen { diff --git a/app/components/home.tsx b/app/components/home.tsx index ffac64fd..263d244f 100644 --- a/app/components/home.tsx +++ b/app/components/home.tsx @@ -6,7 +6,7 @@ import { useState, useEffect } from "react"; import styles from "./home.module.scss"; -import BotIcon from "../icons/bot.svg"; +import MlcIcon from "../icons/mlc.svg"; import LoadingIcon from "../icons/three-dots.svg"; import { getCSSVar, useMobileScreen } from "../utils"; @@ -34,7 +34,11 @@ import { identifyDefaultClaudeModel } from "../utils/checkers"; export function Loading(props: { noLogo?: boolean }) { return (
- {!props.noLogo && } + {!props.noLogo && ( +
+ +
+ )}
); @@ -171,14 +175,7 @@ function Screen() { export function useLoadData() { const config = useAppConfig(); - var api: ClientApi; - if (config.modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(config.modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } + var api: ClientApi = new ClientApi(); useEffect(() => { (async () => { const models = await api.llm.models(); diff --git a/app/components/sidebar.tsx b/app/components/sidebar.tsx index 69b2e71f..4999d8e6 100644 --- a/app/components/sidebar.tsx +++ b/app/components/sidebar.tsx @@ -5,7 +5,7 @@ import styles from "./home.module.scss"; import { IconButton } from "./button"; import SettingsIcon from "../icons/settings.svg"; import GithubIcon from "../icons/github.svg"; -import ChatGptIcon from "../icons/chatgpt.svg"; +import MlcIcon from "../icons/mlc.svg"; import AddIcon from "../icons/add.svg"; import CloseIcon from "../icons/close.svg"; import DeleteIcon from "../icons/delete.svg"; @@ -155,13 +155,13 @@ export function SideBar(props: { className?: string }) { >
- NextChat + Web LLM Chat
- Build your own AI assistant. + Run LLM inside the browser.
- +
diff --git a/app/constant.ts b/app/constant.ts index a3d9c206..9472a345 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -1,3 +1,5 @@ +import { availableParallelism } from "os"; + export const OWNER = "Yidadaa"; export const REPO = "ChatGPT-Next-Web"; export const REPO_URL = `https://github.com/${OWNER}/${REPO}`; @@ -76,6 +78,7 @@ export enum ModelProvider { GPT = "GPT", GeminiPro = "GeminiPro", Claude = "Claude", + WebLLM = "WebLLM", } export const Anthropic = { @@ -164,6 +167,15 @@ const anthropicModels = [ ]; export const DEFAULT_MODELS = [ + { + name: "Web LLM", + available: true, + provider: { + id: "mlc-ai", + providerName: "MLC AI", + providerType: "mlc-ai", + }, + }, ...openaiModels.map((name) => ({ name, available: true, diff --git a/app/icons/bot.png b/app/icons/bot.png deleted file mode 100644 index 80be63bf..00000000 Binary files a/app/icons/bot.png and /dev/null differ diff --git a/app/icons/mlc.png b/app/icons/mlc.png new file mode 100644 index 00000000..28d3b7db Binary files /dev/null and b/app/icons/mlc.png differ diff --git a/app/icons/mlc.svg b/app/icons/mlc.svg new file mode 100644 index 00000000..21ccbd0f --- /dev/null +++ b/app/icons/mlc.svg @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/app/store/chat.ts b/app/store/chat.ts index a5412eaa..edd1ac65 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -363,14 +363,7 @@ export const useChatStore = createPersistStore( ]); }); - var api: ClientApi; - if (modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } + var api: ClientApi = new ClientApi(); // make request api.llm.chat({ @@ -549,14 +542,7 @@ export const useChatStore = createPersistStore( const session = get().currentSession(); const modelConfig = session.mask.modelConfig; - var api: ClientApi; - if (modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } + var api: ClientApi = new ClientApi(); // remove error messages if any const messages = session.messages; diff --git a/app/store/update.ts b/app/store/update.ts index 7253caff..2e4d80d0 100644 --- a/app/store/update.ts +++ b/app/store/update.ts @@ -143,7 +143,7 @@ export const useUpdateStore = createPersistStore( })); try { - const api = new ClientApi(ModelProvider.GPT); + var api: ClientApi = new ClientApi(); const usage = await api.llm.usage(); if (usage) { diff --git a/app/styles/globals.scss b/app/styles/globals.scss index 20792cda..67868106 100644 --- a/app/styles/globals.scss +++ b/app/styles/globals.scss @@ -344,6 +344,26 @@ pre { } } +.bot-avatar { + height: 30px; + min-height: 30px; + width: 30px; + min-width: 30px; + display: flex; + align-items: center; + justify-content: center; + border: var(--border-in-light); + box-shadow: var(--card-shadow); + border-radius: 11px; + + svg { + height: 16px; + min-height: 16px; + width: 16px; + min-width: 16px; + } +} + .user-avatar { height: 30px; min-height: 30px; diff --git a/app/utils/hooks.ts b/app/utils/hooks.ts index 55d5d4fc..4c9c6e30 100644 --- a/app/utils/hooks.ts +++ b/app/utils/hooks.ts @@ -11,7 +11,12 @@ export function useAllModels() { [configStore.customModels, accessStore.customModels].join(","), accessStore.defaultModel, ); - }, [accessStore.customModels, configStore.customModels, configStore.models]); + }, [ + accessStore.customModels, + configStore.customModels, + configStore.models, + accessStore.defaultModel, + ]); return models; } diff --git a/app/utils/store.ts b/app/utils/store.ts index 684a1911..e2433dd4 100644 --- a/app/utils/store.ts +++ b/app/utils/store.ts @@ -1,3 +1,5 @@ +import zustymiddleware from "zustymiddlewarets"; + import { create } from "zustand"; import { combine, persist } from "zustand/middleware"; import { Updater } from "../typing"; @@ -32,33 +34,33 @@ export function createPersistStore( persistOptions: SecondParam>>, ) { return create( - persist( - combine( - { - ...state, - lastUpdateTime: 0, - }, - (set, get) => { - return { - ...methods(set, get as any), + // persist( + combine( + { + ...state, + lastUpdateTime: 0, + }, + (set, get) => { + return { + ...methods(set, get as any), - markUpdate() { - set({ lastUpdateTime: Date.now() } as Partial< - T & M & MakeUpdater - >); - }, - update(updater) { - const state = deepClone(get()); - updater(state); - set({ - ...state, - lastUpdateTime: Date.now(), - }); - }, - } as M & MakeUpdater; - }, - ), - persistOptions as any, + markUpdate() { + set({ lastUpdateTime: Date.now() } as Partial< + T & M & MakeUpdater + >); + }, + update(updater) { + const state = deepClone(get()); + updater(state); + set({ + ...state, + lastUpdateTime: Date.now(), + }); + }, + } as M & MakeUpdater; + }, ), + // persistOptions as any, + // ), ); } diff --git a/docs/cloudflare-pages-cn.md b/docs/cloudflare-pages-cn.md deleted file mode 100644 index 137bb9dc..00000000 --- a/docs/cloudflare-pages-cn.md +++ /dev/null @@ -1,42 +0,0 @@ -# Cloudflare Pages 部署指南 - -## 如何新建项目 - -在 Github 上 fork 本项目,然后登录到 dash.cloudflare.com 并进入 Pages。 - -1. 点击 "Create a project"。 -2. 选择 "Connect to Git"。 -3. 关联 Cloudflare Pages 和你的 GitHub 账号。 -4. 选中你 fork 的此项目。 -5. 点击 "Begin setup"。 -6. 对于 "Project name" 和 "Production branch",可以使用默认值,也可以根据需要进行更改。 -7. 在 "Build Settings" 中,选择 "Framework presets" 选项并选择 "Next.js"。 -8. 由于 node:buffer 的 bug,暂时不要使用默认的 "Build command"。请使用以下命令: - ``` - npx @cloudflare/next-on-pages@1.5.0 - ``` -9. 对于 "Build output directory",使用默认值并且不要修改。 -10. 不要修改 "Root Directory"。 -11. 对于 "Environment variables",点击 ">" 然后点击 "Add variable"。按照以下信息填写: - - - `NODE_VERSION=20.1` - - `NEXT_TELEMETRY_DISABLE=1` - - `OPENAI_API_KEY=你自己的API Key` - - `YARN_VERSION=1.22.19` - - `PHP_VERSION=7.4` - - 根据实际需要,可以选择填写以下选项: - - - `CODE= 可选填,访问密码,可以使用逗号隔开多个密码` - - `OPENAI_ORG_ID= 可选填,指定 OpenAI 中的组织 ID` - - `HIDE_USER_API_KEY=1 可选,不让用户自行填入 API Key` - - `DISABLE_GPT4=1 可选,不让用户使用 GPT-4` - - `ENABLE_BALANCE_QUERY=1 可选,启用余额查询功能` - - `DISABLE_FAST_LINK=1 可选,禁用从链接解析预制设置` - -12. 点击 "Save and Deploy"。 -13. 点击 "Cancel deployment",因为需要填写 Compatibility flags。 -14. 前往 "Build settings"、"Functions",找到 "Compatibility flags"。 -15. 在 "Configure Production compatibility flag" 和 "Configure Preview compatibility flag" 中填写 "nodejs_compat"。 -16. 前往 "Deployments",点击 "Retry deployment"。 -17. Enjoy. diff --git a/docs/cloudflare-pages-en.md b/docs/cloudflare-pages-en.md deleted file mode 100644 index c5d55043..00000000 --- a/docs/cloudflare-pages-en.md +++ /dev/null @@ -1,43 +0,0 @@ -# Cloudflare Pages Deployment Guide - -## How to create a new project - -Fork this project on GitHub, then log in to dash.cloudflare.com and go to Pages. - -1. Click "Create a project". -2. Choose "Connect to Git". -3. Connect Cloudflare Pages to your GitHub account. -4. Select the forked project. -5. Click "Begin setup". -6. For "Project name" and "Production branch", use the default values or change them as needed. -7. In "Build Settings", choose the "Framework presets" option and select "Next.js". -8. Do not use the default "Build command" due to a node:buffer bug. Instead, use the following command: - ``` - npx @cloudflare/next-on-pages --experimental-minify - ``` -9. For "Build output directory", use the default value and do not modify it. -10. Do not modify "Root Directory". -11. For "Environment variables", click ">" and then "Add variable". Fill in the following information: - - - `NODE_VERSION=20.1` - - `NEXT_TELEMETRY_DISABLE=1` - - `OPENAI_API_KEY=your_own_API_key` - - `YARN_VERSION=1.22.19` - - `PHP_VERSION=7.4` - - Optionally fill in the following based on your needs: - - - `CODE= Optional, access passwords, multiple passwords can be separated by commas` - - `OPENAI_ORG_ID= Optional, specify the organization ID in OpenAI` - - `HIDE_USER_API_KEY=1 Optional, do not allow users to enter their own API key` - - `DISABLE_GPT4=1 Optional, do not allow users to use GPT-4` - - `ENABLE_BALANCE_QUERY=1 Optional, allow users to query balance` - - `DISABLE_FAST_LINK=1 Optional, disable parse settings from url` - - `OPENAI_SB=1 Optional,use the third-party OpenAI-SB API` - -12. Click "Save and Deploy". -13. Click "Cancel deployment" because you need to fill in Compatibility flags. -14. Go to "Build settings", "Functions", and find "Compatibility flags". -15. Fill in "nodejs_compat" for both "Configure Production compatibility flag" and "Configure Preview compatibility flag". -16. Go to "Deployments" and click "Retry deployment". -17. Enjoy. diff --git a/docs/cloudflare-pages-es.md b/docs/cloudflare-pages-es.md deleted file mode 100644 index d9365ec2..00000000 --- a/docs/cloudflare-pages-es.md +++ /dev/null @@ -1,37 +0,0 @@ -# Guía de implementación de Cloudflare Pages - -## Cómo crear un nuevo proyecto - -Bifurca el proyecto en Github, luego inicia sesión en dash.cloudflare.com y ve a Pages. - -1. Haga clic en "Crear un proyecto". -2. Selecciona Conectar a Git. -3. Vincula páginas de Cloudflare a tu cuenta de GitHub. -4. Seleccione este proyecto que bifurcó. -5. Haga clic en "Comenzar configuración". -6. Para "Nombre del proyecto" y "Rama de producción", puede utilizar los valores predeterminados o cambiarlos según sea necesario. -7. En Configuración de compilación, seleccione la opción Ajustes preestablecidos de Framework y seleccione Siguiente.js. -8. Debido a los errores de node:buffer, no use el "comando Construir" predeterminado por ahora. Utilice el siguiente comando: - npx https://prerelease-registry.devprod.cloudflare.dev/next-on-pages/runs/4930842298/npm-package-next-on-pages-230 --experimental-minify -9. Para "Generar directorio de salida", utilice los valores predeterminados y no los modifique. -10. No modifique el "Directorio raíz". -11. Para "Variables de entorno", haga clic en ">" y luego haga clic en "Agregar variable". Rellene la siguiente información: - - * `NODE_VERSION=20.1` - * `NEXT_TELEMETRY_DISABLE=1` - * `OPENAI_API_KEY=你自己的API Key` - * `YARN_VERSION=1.22.19` - * `PHP_VERSION=7.4` - - Dependiendo de sus necesidades reales, puede completar opcionalmente las siguientes opciones: - - * `CODE= 可选填,访问密码,可以使用逗号隔开多个密码` - * `OPENAI_ORG_ID= 可选填,指定 OpenAI 中的组织 ID` - * `HIDE_USER_API_KEY=1 可选,不让用户自行填入 API Key` - * `DISABLE_GPT4=1 可选,不让用户使用 GPT-4` -12. Haga clic en "Guardar e implementar". -13. Haga clic en "Cancelar implementación" porque necesita rellenar los indicadores de compatibilidad. -14. Vaya a "Configuración de compilación", "Funciones" y busque "Indicadores de compatibilidad". -15. Rellene "nodejs_compat" en "Configurar indicador de compatibilidad de producción" y "Configurar indicador de compatibilidad de vista previa". -16. Vaya a "Implementaciones" y haga clic en "Reintentar implementación". -17. Disfrutar. diff --git a/docs/cloudflare-pages-ja.md b/docs/cloudflare-pages-ja.md deleted file mode 100644 index 6409a934..00000000 --- a/docs/cloudflare-pages-ja.md +++ /dev/null @@ -1,38 +0,0 @@ -# Cloudflare Pages 導入ガイド - -## 新規プロジェクトの作成方法 -GitHub でこのプロジェクトをフォークし、dash.cloudflare.com にログインして Pages にアクセスします。 - -1. "Create a project" をクリックする。 -2. "Connect to Git" を選択する。 -3. Cloudflare Pages を GitHub アカウントに接続します。 -4. フォークしたプロジェクトを選択します。 -5. "Begin setup" をクリックする。 -6. "Project name" と "Production branch" はデフォルト値を使用するか、必要に応じて変更してください。 -7. "Build Settings" で、"Framework presets" オプションを選択し、"Next.js" を選択します。 -8. node:buffer のバグのため、デフォルトの "Build command" は使用しないでください。代わりに、以下のコマンドを使用してください: - ``` - npx https://prerelease-registry.devprod.cloudflare.dev/next-on-pages/runs/4930842298/npm-package-next-on-pages-230 --experimental-minify - ``` -9. "Build output directory" はデフォルト値を使用し、変更しない。 -10. "Root Directory" を変更しない。 -11. "Environment variables" は、">" をクリックし、"Add variable" をクリックします。そして以下の情報を入力します: - - `NODE_VERSION=20.1` - - `NEXT_TELEMETRY_DISABLE=1` - - `OPENAI_API_KEY=your_own_API_key` - - `YARN_VERSION=1.22.19` - - `PHP_VERSION=7.4` - - 必要に応じて、以下の項目を入力してください: - - - `CODE= Optional, access passwords, multiple passwords can be separated by commas` - - `OPENAI_ORG_ID= Optional, specify the organization ID in OpenAI` - - `HIDE_USER_API_KEY=1 Optional, do not allow users to enter their own API key` - - `DISABLE_GPT4=1 Optional, do not allow users to use GPT-4` - -12. "Save and Deploy" をクリックする。 -13. 互換性フラグを記入する必要があるため、"Cancel deployment" をクリックする。 -14. "Build settings" の "Functions" から "Compatibility flags" を見つける。 -15. "Configure Production compatibility flag" と "Configure Preview compatibility flag" の両方に "nodejs_compat "を記入する。 -16. "Deployments" に移動し、"Retry deployment" をクリックします。 -17. お楽しみください。 diff --git a/docs/cloudflare-pages-ko.md b/docs/cloudflare-pages-ko.md deleted file mode 100644 index 68a96232..00000000 --- a/docs/cloudflare-pages-ko.md +++ /dev/null @@ -1,39 +0,0 @@ -## Cloudflare 페이지 배포 가이드 - -## 새 프로젝트를 만드는 방법 -이 프로젝트를 Github에서 포크한 다음 dash.cloudflare.com에 로그인하고 페이지로 이동합니다. - -1. "프로젝트 만들기"를 클릭합니다. -2. "Git에 연결"을 선택합니다. -3. Cloudflare 페이지를 GitHub 계정과 연결합니다. -4. 포크한 프로젝트를 선택합니다. -5. "설정 시작"을 클릭합니다. -6. "프로젝트 이름" 및 "프로덕션 브랜치"의 기본값을 사용하거나 필요에 따라 변경합니다. -7. "빌드 설정"에서 "프레임워크 프리셋" 옵션을 선택하고 "Next.js"를 선택합니다. -8. node:buffer 버그로 인해 지금은 기본 "빌드 명령어"를 사용하지 마세요. 다음 명령을 사용하세요: - `` - npx https://prerelease-registry.devprod.cloudflare.dev/next-on-pages/runs/4930842298/npm-package-next-on-pages-230 --experimental- minify - ``` -9. "빌드 출력 디렉토리"의 경우 기본값을 사용하고 수정하지 마십시오. -10. "루트 디렉토리"는 수정하지 마십시오. -11. "환경 변수"의 경우 ">"를 클릭한 다음 "변수 추가"를 클릭합니다. 다음에 따라 정보를 입력합니다: - - - node_version=20.1`. - - next_telemetry_disable=1`. - - `OPENAI_API_KEY=자신의 API 키` - - ``yarn_version=1.22.19`` - - ``php_version=7.4``. - - 실제 필요에 따라 다음 옵션을 선택적으로 입력합니다: - - - `CODE= 선택적으로 액세스 비밀번호를 입력하며 쉼표를 사용하여 여러 비밀번호를 구분할 수 있습니다`. - - `OPENAI_ORG_ID= 선택 사항, OpenAI에서 조직 ID 지정` - - `HIDE_USER_API_KEY=1 선택 사항, 사용자가 API 키를 입력하지 못하도록 합니다. - - `DISABLE_GPT4=1 옵션, 사용자가 GPT-4를 사용하지 못하도록 설정` 12. - -12. "저장 후 배포"를 클릭합니다. -13. 호환성 플래그를 입력해야 하므로 "배포 취소"를 클릭합니다. -14. "빌드 설정", "기능"으로 이동하여 "호환성 플래그"를 찾습니다. -"프로덕션 호환성 플래그 구성" 및 "프리뷰 호환성 플래그 구성"에서 "nodejs_compat"를 입력합니다. -16. "배포"로 이동하여 "배포 다시 시도"를 클릭합니다. -17. 즐기세요! \ No newline at end of file diff --git a/docs/faq-cn.md b/docs/faq-cn.md deleted file mode 100644 index 06a96852..00000000 --- a/docs/faq-cn.md +++ /dev/null @@ -1,228 +0,0 @@ -# 常见问题 - -## 如何快速获得帮助? - -1. 询问 ChatGPT / Bing / 百度 / Google 等。 -2. 询问网友。请提供问题的背景信息和碰到问题的详细描述。高质量的提问容易获得有用的答案。 - -# 部署相关问题 - -各种部署方式详细教程参考:https://rptzik3toh.feishu.cn/docx/XtrdduHwXoSCGIxeFLlcEPsdn8b - -## 为什么 Docker 部署版本一直提示更新 - -Docker 版本相当于稳定版,latest Docker 总是与 latest release version 一致,目前我们的发版频率是一到两天发一次,所以 Docker 版本会总是落后最新的提交一到两天,这在预期内。 - -## 如何部署在 Vercel 上 - -1. 注册 Github 账号,fork 该项目 -2. 注册 Vercel(需手机验证,可以用中国号码),连接你的 Github 账户 -3. Vercel 上新建项目,选择你在 Github fork 的项目,按需填写环境变量,开始部署。部署之后,你可以在有梯子的条件下,通过 vercel 提供的域名访问你的项目。 -4. 如果需要在国内无墙访问:在你的域名管理网站,添加一条域名的 CNAME 记录,指向 cname.vercel-dns.com。之后在 Vercel 上设置你的域名访问。 - -## 如何修改 Vercel 环境变量 - -- 进入 vercel 的控制台页面; -- 选中你的 NextChat 项目; -- 点击页面头部的 Settings 选项; -- 找到侧边栏的 Environment Variables 选项; -- 修改对应的值即可。 - -## 环境变量 CODE 是什么?必须设置吗? - -这是你自定义的访问密码,你可以选择: - -1. 不设置,删除该环境变量即可。谨慎:此时任何人可以访问你的项目。 -2. 部署项目时,设置环境变量 CODE(支持多个密码逗号分隔)。设置访问密码后,用户需要在设置界面输入访问密码才可以使用。参见[相关说明](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E9%85%8D%E7%BD%AE%E9%A1%B5%E9%9D%A2%E8%AE%BF%E9%97%AE%E5%AF%86%E7%A0%81) - -## 为什么我部署的版本没有流式响应 - -> 相关讨论:[#386](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/386) - -如果你使用 nginx 反向代理,需要在配置文件中增加下列代码: - -``` -# 不缓存,支持流式输出 -proxy_cache off; # 关闭缓存 -proxy_buffering off; # 关闭代理缓冲 -chunked_transfer_encoding on; # 开启分块传输编码 -tcp_nopush on; # 开启TCP NOPUSH选项,禁止Nagle算法 -tcp_nodelay on; # 开启TCP NODELAY选项,禁止延迟ACK算法 -keepalive_timeout 300; # 设定keep-alive超时时间为65秒 -``` - -如果你是在 netlify 部署,此问题依然等待解决,请耐心等待。 - -## 我部署好了,但是无法访问 - -请检查排除以下问题: - -- 服务启动了吗? -- 端口正确映射了吗? -- 防火墙开放端口了吗? -- 到服务器的路由通吗? -- 域名正确解析了吗? - -## 什么是代理,如何使用? - -由于 OpenAI 的 IP 限制,中国和其他一些国家/地区无法直接连接 OpenAI API,需要通过代理。你可以使用代理服务器(正向代理),或者已经设置好的 OpenAI API 反向代理。 - -- 正向代理例子:科学上网梯子。docker 部署的情况下,设置环境变量 HTTP_PROXY 为你的代理地址(例如:10.10.10.10:8002)。 -- 反向代理例子:可以用别人搭建的代理地址,或者通过 Cloudflare 免费设置。设置项目环境变量 BASE_URL 为你的代理地址。 - -## 国内服务器可以部署吗? - -可以但需要解决的问题: - -- 需要代理才能连接 github 和 openAI 等网站; -- 国内服务器要设置域名解析的话需要备案; -- 国内政策限制代理访问外网/ChatGPT 相关应用,可能被封。 - -## 为什么 docker 部署后出现网络错误? - -详见讨论:https://github.com/Yidadaa/ChatGPT-Next-Web/issues/1569 - -# 使用相关问题 - -## 为什么会一直提示“出错了,稍后重试吧” - -原因可能有很多,请依次排查: - -- 请先检查你的代码版本是否为最新版本,更新到最新版本后重试; -- 请检查 api key 是否设置正确,环境变量名称必须为全大写加下划线; -- 请检查 api key 是否可用; -- 如果经历了上述步骤依旧无法确定问题,请在 issue 区提交一个新 issue,并附上 vercel 的 runtime log 或者 docker 运行时的 log。 - -## 为什么 ChatGPT 的回复会乱码 - -设置界面 - 模型设置项中,有一项为 `temperature`,如果此值大于 1,那么就有可能造成回复乱码,将其调回 1 以内即可。 - -## 使用时提示“现在是未授权状态,请在设置页输入访问密码”? - -项目通过环境变量 CODE 设置了访问密码。第一次使用时,需要到设置中,输入访问码才可以使用。 - -## 使用时提示 "You exceeded your current quota, ..." - -API KEY 有问题。余额不足。 - -## 使用时遇到 "Error: Loading CSS chunk xxx failed..." - -为了减少首屏白屏时间,默认启用了分块编译,技术原理见下: - -- https://nextjs.org/docs/app/building-your-application/optimizing/lazy-loading -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 -- https://github.com/vercel/next.js/issues/38507 -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 - -然而 NextJS 的兼容性比较差,在比较老的浏览器上会导致此报错,可以在编译时关闭分块编译。 - -对于 Vercel 平台,在环境变量中增加 `DISABLE_CHUNK=1`,然后重新部署即可; -对于自行编译部署的项目,在构建时使用 `DISABLE_CHUNK=1 yarn build` 构建即可; -对于 Docker 用户,由于 Docker 打包时已经构建完毕,所以暂不支持关闭此特性。 - -注意,关闭此特性后,用户会在第一次访问网站时加载所有资源,如果用户网络状况较差,可能会引起较长时间的白屏,从而影响用户使用体验,所以自行考虑。 - -## 使用时遇到 "NotFoundError: Failed to execute 'removeChild' on 'Node': The node...." -请关闭浏览器自身的自动翻译功能,并关闭所有自动翻译插件。 - -# 网络服务相关问题 - -## Cloudflare 是什么? - -Cloudflare(CF)是一个提供 CDN,域名管理,静态页面托管,边缘计算函数部署等的网络服务供应商。常见的用途:购买和/或托管你的域名(解析、动态域名等),给你的服务器套上 CDN(可以隐藏 ip 免被墙),部署网站(CF Pages)。CF 免费提供大多数服务。 - -## Vercel 是什么? - -Vercel 是一个全球化的云平台,旨在帮助开发人员更快地构建和部署现代 Web 应用程序。本项目以及许多 Web 应用可以一键免费部署在 Vercel 上。无需懂代码,无需懂 linux,无需服务器,无需付费,无需设置 OpenAI API 代理。缺点是需要绑定域名才可以在国内无墙访问。 - -## 如何获得一个域名? - -1. 自己去域名供应商处注册,国外有 Namesilo(支持支付宝), Cloudflare 等等,国内有万网等等; -2. 免费的域名供应商:eu.org(二级域名)等; -3. 问朋友要一个免费的二级域名。 - -## 如何获得一台服务器 - -- 国外服务器供应商举例:亚马逊云,谷歌云,Vultr,Bandwagon,Hostdare,等等; - 国外服务器事项:服务器线路影响国内访问速度,推荐 CN2 GIA 和 CN2 线路的服务器。若服务器在国内访问困难(丢包严重等),可以尝试套 CDN(Cloudflare 等供应商)。 -- 国内服务器供应商:阿里云,腾讯等; - 国内服务器事项:解析域名需要备案;国内服务器带宽较贵;访问国外网站(Github, openAI 等)需要代理。 - -## 什么情况下服务器要备案? - -在中国大陆经营的网站按监管要求需要备案。实际操作中,服务器位于国内且有域名解析的情况下,服务器供应商会执行监管的备案要求,否则会关停服务。通常的规则如下: -|服务器位置|域名供应商|是否需要备案| -|---|---|---| -|国内|国内|是| -|国内|国外|是| -|国外|国外|否| -|国外|国内|通常否| - -换服务器供应商后需要转备案。 - -# OpenAI 相关问题 - -## 如何注册 OpenAI 账号? - -去 chat.openai.com 注册。你需要: - -- 一个良好的梯子(OpenAI 支持地区原生 IP 地址) -- 一个支持的邮箱(例如 Gmail 或者公司/学校邮箱,非 Outlook 或 qq 邮箱) -- 接收短信认证的方式(例如 SMS-activate 网站) - -## 怎么开通 OpenAI API? 怎么查询 API 余额? - -官网地址(需梯子):https://platform.openai.com/account/usage -有网友搭建了无需梯子的余额查询代理,请询问网友获取。请鉴别来源是否可靠,以免 API Key 泄露。 - -## 我新注册的 OpenAI 账号怎么没有 API 余额? - -(4 月 6 日更新)新注册账号通常会在 24 小时后显示 API 余额。当前新注册账号赠送 5 美元余额。 - -## 如何给 OpenAI API 充值? - -OpenAI 只接受指定地区的信用卡(中国信用卡无法使用)。一些途径举例: - -1. Depay 虚拟信用卡 -2. 申请国外信用卡 -3. 网上找人代充 - -## 如何使用 GPT-4 的 API 访问? - -- GPT-4 的 API 访问需要单独申请。到以下地址填写你的信息进入申请队列 waitlist(准备好你的 OpenAI 组织 ID):https://openai.com/waitlist/gpt-4-api - 之后等待邮件消息。 -- 开通 ChatGPT Plus 不代表有 GPT-4 权限,两者毫无关系。 - -## 如何使用 Azure OpenAI 接口 - -请参考:[#371](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/371) - -## 为什么我的 Token 消耗得这么快? - -> 相关讨论:[#518](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518) - -- 如果你有 GPT 4 的权限,并且日常在使用 GPT 4 api,那么由于 GPT 4 价格是 GPT 3.5 的 15 倍左右,你的账单金额会急速膨胀; -- 如果你在使用 GPT 3.5,并且使用频率并不高,仍然发现自己的账单金额在飞快增加,那么请马上按照以下步骤排查: - - 去 openai 官网查看你的 api key 消费记录,如果你的 token 每小时都有消费,并且每次都消耗了上万 token,那你的 key 一定是泄露了,请立即删除重新生成。**不要在乱七八糟的网站上查余额。** - - 如果你的密码设置很短,比如 5 位以内的字母,那么爆破成本是非常低的,建议你搜索一下 docker 的日志记录,确认是否有人大量尝试了密码组合,关键字:got access code -- 通过上述两个方法就可以定位到你的 token 被快速消耗的原因: - - 如果 openai 消费记录异常,但是 docker 日志没有问题,那么说明是 api key 泄露; - - 如果 docker 日志发现大量 got access code 爆破记录,那么就是密码被爆破了。 - -## API 是怎么计费的? - -OpenAI 网站计费说明:https://openai.com/pricing#language-models -OpenAI 根据 token 数收费,1000 个 token 通常可代表 750 个英文单词,或 500 个汉字。输入(Prompt)和输出(Completion)分别统计费用。 -|模型|用户输入(Prompt)计费|模型输出(Completion)计费|每次交互最大 token 数| -|----|----|----|----| -|gpt-3.5-turbo|$0.0015 / 1 千 tokens|$0.002 / 1 千 tokens|4096| -|gpt-3.5-turbo-16K|$0.003 / 1 千 tokens|$0.004 / 1 千 tokens|16384| -|gpt-4|$0.03 / 1 千 tokens|$0.06 / 1 千 tokens|8192| -|gpt-4-32K|$0.06 / 1 千 tokens|$0.12 / 1 千 tokens|32768| - -## gpt-3.5-turbo 和 gpt3.5-turbo-0301(或者 gpt3.5-turbo-mmdd)模型有什么区别? - -官方文档说明:https://platform.openai.com/docs/models/gpt-3-5 - -- gpt-3.5-turbo 是最新的模型,会不断得到更新。 -- gpt-3.5-turbo-0301 是 3 月 1 日定格的模型快照,不会变化,预期 3 个月后被新快照替代。 diff --git a/docs/faq-en.md b/docs/faq-en.md deleted file mode 100644 index 31b2d58e..00000000 --- a/docs/faq-en.md +++ /dev/null @@ -1,191 +0,0 @@ -# Frequently Asked Questions - -## How to get help quickly? - -1. Ask ChatGPT / Bing / Baidu / Google, etc. -2. Ask online friends. Please provide background information and a detailed description of the problem. High-quality questions are more likely to get useful answers. - -# Deployment Related Questions - -## Why does the Docker deployment version always prompt for updates - -The Docker version is equivalent to the stable version, and the latest Docker is always consistent with the latest release version. Currently, our release frequency is once every one to two days, so the Docker version will always be one to two days behind the latest commit, which is expected. - -## How to deploy on Vercel - -1. Register a Github account and fork this project. -2. Register Vercel (mobile phone verification required, Chinese number can be used), and connect your Github account. -3. Create a new project on Vercel, select the project you forked on Github, fill in the required environment variables, and start deploying. After deployment, you can access your project through the domain provided by Vercel. (Requires proxy in mainland China) - -- If you need to access it directly in China: At your DNS provider, add a CNAME record for the domain name, pointing to cname.vercel-dns.com. Then set up your domain access on Vercel. - -## How to modify Vercel environment variables - -- Enter the Vercel console page; -- Select your chatgpt-next-web project; -- Click on the Settings option at the top of the page; -- Find the Environment Variables option in the sidebar; -- Modify the corresponding values as needed. - -## What is the environment variable CODE? Is it necessary to set it? - -This is your custom access password, you can choose: - -1. Do not set it, delete the environment variable. Be cautious: anyone can access your project at this time. -2. When deploying the project, set the environment variable CODE (supports multiple passwords, separated by commas). After setting the access password, users need to enter the access password in the settings page to use it. See [related instructions](https://github.com/Yidadaa/ChatGPT-Next-Web#access-password) - -## Why doesn't the version I deployed have streaming response - -> Related discussion: [#386](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/386) - -If you use nginx reverse proxy, you need to add the following code to the configuration file: - -``` -# No caching, support streaming output -proxy_cache off; # Turn off caching -proxy_buffering off; # Turn off proxy buffering -chunked_transfer_encoding on; # Turn on chunked transfer encoding -tcp_nopush on; # Turn on TCP NOPUSH option, disable Nagle algorithm -tcp_nodelay on; # Turn on TCP NODELAY option, disable delay ACK algorithm -keepalive_timeout 300; # Set keep-alive timeout to 65 seconds -``` - -If you are deploying on netlify, this issue is still waiting to be resolved, please be patient. - -## I've deployed, but it's not accessible - -Please check and troubleshoot the following issues: - -- Is the service started? -- Is the port correctly mapped? -- Is the firewall port open? -- Is the route to the server okay? -- Is the domain name resolved correctly? - -## You may encounter an "Error: Loading CSS chunk xxx failed..." - -To reduce the initial white screen time, Next.js enables chunking by default. You can find the technical details here: - -- https://nextjs.org/docs/app/building-your-application/optimizing/lazy-loading -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 -- https://github.com/vercel/next.js/issues/38507 -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 - -However, Next.js has limited compatibility with older browsers, which can result in this error. - -You can disable chunking during building. - -For Vercel platform, you can add `DISABLE_CHUNK=1` to the environment variables and redeploy. -For self-deployed projects, you can use `DISABLE_CHUNK=1 yarn build` during the build process. -For Docker users, as the build is already completed during packaging, disabling this feature is currently not supported. - -Note that when you disable this feature, all resources will be loaded on the user's first visit. This may result in a longer white screen time if the user has a poor network connection, affecting the user experience. Please consider this when making a decision. - -# Usage Related Questions - -## Why does it always prompt "An error occurred, please try again later" - -There could be many reasons, please check the following in order: - -- First, check if your code version is the latest version, update to the latest version and try again; -- Check if the api key is set correctly, the environment variable name must be uppercase with underscores; -- Check if the api key is available; -- If you still cannot determine the problem after going through the above steps, please submit a new issue in the issue area and attach the runtime log of vercel or the log of docker runtime. - -## Why does ChatGPT's reply get garbled - -In the settings page - model settings, there is an item called `temperature`. If this value is greater than 1, it may cause garbled replies. Adjust it back to within 1. - -## It prompts "Now it's unauthorized, please enter the access password on the settings page" when using? - -The project has set an access password through the environment variable CODE. When using it for the first time, you need to go to settings and enter the access code to use. - -## It prompts "You exceeded your current quota, ..." when using? - -The API KEY is problematic. Insufficient balance. - -## What is a proxy and how to use it? - -Due to IP restrictions of OpenAI, China and some other countries/regions cannot directly connect to OpenAI API and need to go through a proxy. You can use a proxy server (forward proxy) or a pre-configured OpenAI API reverse proxy. - -- Forward proxy example: VPN ladder. In the case of docker deployment, set the environment variable HTTP_PROXY to your proxy address (http://address:port). -- Reverse proxy example: You can use someone else's proxy address or set it up for free through Cloudflare. Set the project environment variable BASE_URL to your proxy address. - -## Can I deploy it on a server in China? - -It is possible but there are issues to be addressed: - -- Proxy is required to connect to websites such as Github and OpenAI; -- Domain name resolution requires filing for servers in China; -- Chinese policy restricts proxy access to foreign websites/ChatGPT-related applications, which may be blocked. - -# Network Service Related Questions - -## What is Cloudflare? - -Cloudflare (CF) is a network service provider offering CDN, domain management, static page hosting, edge computing function deployment, and more. Common use cases: purchase and/or host your domain (resolution, dynamic domain, etc.), apply CDN to your server (can hide IP to avoid being blocked), deploy websites (CF Pages). CF offers most services for free. - -## What is Vercel? - -Vercel is a global cloud platform designed to help developers build and deploy modern web applications more quickly. This project and many web applications can be deployed on Vercel with a single click for free. No need to understand code, Linux, have a server, pay, or set up an OpenAI API proxy. The downside is that you need to bind a domain name to access it without restrictions in China. - -## How to obtain a domain name? - -1. Register with a domain provider, such as Namesilo (supports Alipay) or Cloudflare for international providers, and Wanwang for domestic providers in China. -2. Free domain name providers: eu.org (second-level domain), etc. -3. Ask friends for a free second-level domain. - -## How to obtain a server - -- Examples of international server providers: Amazon Web Services, Google Cloud, Vultr, Bandwagon, Hostdare, etc. - International server considerations: Server lines affect access speed in China; CN2 GIA and CN2 lines are recommended. If the server has difficulty accessing in China (serious packet loss, etc.), you can try using a CDN (from providers like Cloudflare). -- Domestic server providers: Alibaba Cloud, Tencent, etc. - Domestic server considerations: Domain name resolution requires filing; domestic server bandwidth is relatively expensive; accessing foreign websites (Github, OpenAI, etc.) requires a proxy. - -# OpenAI-related Questions - -## How to register an OpenAI account? - -Go to chat.openai.com to register. You will need: - -- A good VPN (OpenAI only allows native IP addresses of supported regions) -- A supported email (e.g., Gmail or a company/school email, not Outlook or QQ email) -- A way to receive SMS verification (e.g., SMS-activate website) - -## How to activate OpenAI API? How to check API balance? - -Official website (requires VPN): https://platform.openai.com/account/usage -Some users have set up a proxy to check the balance without a VPN; ask online friends for access. Please verify the source is reliable to avoid API Key leakage. - -## Why doesn't my new OpenAI account have an API balance? - -(Updated April 6th) Newly registered accounts usually display API balance within 24 hours. New accounts are currently given a $5 balance. - -## How to recharge OpenAI API? - -OpenAI only accepts credit cards from designated regions (Chinese credit cards cannot be used). If the credit cards from your region is not supported, some options include: - -1. Depay virtual credit card -2. Apply for a foreign credit card -3. Find someone online to top up - -## How to access the GPT-4 API? - -(Updated April 6th) Access to the GPT-4 API requires a separate application. Go to the following address and enter your information to join the waitlist (prepare your OpenAI organization ID): https://openai.com/waitlist/gpt-4-api -Wait for email updates afterwards. - -## How to use the Azure OpenAI interface - -Please refer to: [#371](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/371) - -## Why is my Token consumed so fast? - -> Related discussion: [#518](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518) - -- If you have GPT-4 access and use GPT-4 API regularly, your bill will increase rapidly since GPT-4 pricing is about 15 times higher than GPT-3.5; -- If you are using GPT-3.5 and not using it frequently, but still find your bill increasing fast, please troubleshoot immediately using these steps: - - Check your API key consumption record on the OpenAI website; if your token is consumed every hour and each time consumes tens of thousands of tokens, your key must have been leaked. Please delete it and regenerate it immediately. **Do not check your balance on random websites.** - - If your password is short, such as 5 characters or fewer, the cost of brute-forcing is very low. It is recommended to search docker logs to confirm whether someone has tried a large number of password combinations. Keyword: got access code -- By following these two methods, you can locate the reason for your token's rapid consumption: - - If the OpenAI consumption record is abnormal but the Docker log has no issues, it means your API key has been leaked; - - If the Docker log shows a large number of got access code brute-force attempts, your password has been cracked. diff --git a/docs/faq-es.md b/docs/faq-es.md deleted file mode 100644 index 11214a68..00000000 --- a/docs/faq-es.md +++ /dev/null @@ -1,205 +0,0 @@ -# Preguntas frecuentes - -## ¿Cómo puedo obtener ayuda rápidamente? - -1. Pregunte a ChatGPT / Bing / Baidu / Google, etc. -2. Pregunte a los internautas. Sírvase proporcionar información general sobre el problema y una descripción detallada del problema encontrado. Las preguntas de alta calidad facilitan la obtención de respuestas útiles. - -# Problemas relacionados con la implementación - -Referencia tutorial detallada para varios métodos de implementación: https://rptzik3toh.feishu.cn/docx/XtrdduHwXoSCGIxeFLlcEPsdn8b - -## ¿Por qué la versión de implementación de Docker sigue solicitando actualizaciones? - -La versión de Docker es equivalente a la versión estable, la última versión de Docker es siempre la misma que la última versión de lanzamiento, y la frecuencia de lanzamiento actual es de uno a dos días, por lo que la versión de Docker siempre se retrasará con respecto a la última confirmación de uno a dos días, lo que se espera. - -## Cómo implementar en Vercel - -1. Regístrese para obtener una cuenta de Github y bifurque el proyecto -2. Regístrese en Vercel (se requiere verificación de teléfono móvil, puede usar un número chino) y conéctese a su cuenta de Github -3. Cree un nuevo proyecto en Vercel, seleccione el proyecto que bifurcó en Github, complete las variables de entorno según sea necesario e inicie la implementación. Después de la implementación, puede acceder a su proyecto a través del nombre de dominio proporcionado por Vercel con una escalera. -4. Si necesitas acceder sin muros en China: En tu sitio web de administración de dominios, agrega un registro CNAME para tu nombre de dominio que apunte a cname.vercel-dns.com. Después de eso, configure el acceso a su dominio en Vercel. - -## Cómo modificar las variables de entorno de Vercel - -* Vaya a la página de la consola de Vercel; -* Seleccione su siguiente proyecto web chatgpt; -* Haga clic en la opción Configuración en el encabezado de la página; -* Busque la opción Variables de entorno en la barra lateral; -* Modifique el valor correspondiente. - -## ¿Qué es la variable de entorno CODE? ¿Es obligatorio configurar? - -Esta es su contraseña de acceso personalizada, puede elegir: - -1. Si no es así, elimine la variable de entorno. Precaución: Cualquier persona puede acceder a tu proyecto en este momento. -2. Cuando implemente el proyecto, establezca la variable de entorno CODE (admite varias comas de contraseña separadas). Después de establecer la contraseña de acceso, debe ingresar la contraseña de acceso en la interfaz de configuración antes de poder usarla. Ver[Instrucciones relacionadas](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E9%85%8D%E7%BD%AE%E9%A1%B5%E9%9D%A2%E8%AE%BF%E9%97%AE%E5%AF%86%E7%A0%81) - -## ¿Por qué la versión que implementé no transmite respuestas? - -> Debates relacionados:[#386](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/386) - -Si utiliza el proxy inverso nginx, debe agregar el siguiente código al archivo de configuración: - - # 不缓存,支持流式输出 - proxy_cache off; # 关闭缓存 - proxy_buffering off; # 关闭代理缓冲 - chunked_transfer_encoding on; # 开启分块传输编码 - tcp_nopush on; # 开启TCP NOPUSH选项,禁止Nagle算法 - tcp_nodelay on; # 开启TCP NODELAY选项,禁止延迟ACK算法 - keepalive_timeout 300; # 设定keep-alive超时时间为65秒 - -Si está implementando en Netlify y este problema aún está pendiente de resolución, tenga paciencia. - -## Lo implementé, pero no puedo acceder a él - -Marque para descartar los siguientes problemas: - -* ¿Se ha iniciado el servicio? -* ¿Los puertos están asignados correctamente? -* ¿El firewall está abriendo puertos? -* ¿Es transitable la ruta al servidor? -* ¿Se resuelve correctamente el nombre de dominio? - -## ¿Qué es un proxy y cómo lo uso? - -Debido a las restricciones de IP de OpenAI, China y algunos otros países no pueden conectarse directamente a las API de OpenAI y necesitan pasar por un proxy. Puede usar un servidor proxy (proxy de reenvío) o un proxy inverso de API OpenAI ya configurado. - -* Ejemplo de agente positivo: escalera científica de Internet. En el caso de la implementación de Docker, establezca la variable de entorno HTTP_PROXY en su dirección proxy (por ejemplo: 10.10.10.10:8002). -* Ejemplo de proxy inverso: puede usar una dirección proxy creada por otra persona o configurarla de forma gratuita a través de Cloudflare. Establezca la variable de entorno del proyecto BASE_URL en su dirección proxy. - -## ¿Se pueden implementar servidores domésticos? - -Sí, pero hay que resolverlo: - -* Requiere un proxy para conectarse a sitios como GitHub y openAI; -* Si el servidor doméstico desea configurar la resolución de nombres de dominio, debe registrarse; -* Las políticas nacionales restringen el acceso proxy a las aplicaciones relacionadas con Internet/ChatGPT y pueden bloquearse. - -## ¿Por qué recibo un error de red después de la implementación de Docker? - -Ver Discusión: https://github.com/Yidadaa/ChatGPT-Next-Web/issues/1569 para más detalles - -# Problemas relacionados con el uso - -## ¿Por qué sigues diciendo "Algo salió mal, inténtalo de nuevo más tarde"? - -Puede haber muchas razones, por favor solucione los problemas en orden: - -* Compruebe primero si la versión del código es la última versión, actualice a la última versión e inténtelo de nuevo; -* Compruebe si la clave API está configurada correctamente y si el nombre de la variable de entorno debe estar en mayúsculas y subrayado; -* Compruebe si la clave API está disponible; -* Si aún no puede identificar el problema después de los pasos anteriores, envíe un nuevo problema en el campo de problema con el registro de tiempo de ejecución de Verbel o el registro de tiempo de ejecución de Docker. - -## ¿Por qué la respuesta de ChatGPT es confusa? - -Interfaz de configuración: uno de los elementos de configuración del modelo es `temperature`, si este valor es mayor que 1, entonces existe el riesgo de una respuesta confusa, simplemente vuelva a llamarlo a dentro de 1. - -## Al usarlo, aparece "Ahora en un estado no autorizado, ingrese la contraseña de acceso en la pantalla de configuración"? - -El proyecto establece la contraseña de acceso a través de la variable de entorno CODE. Cuando lo use por primera vez, debe ingresar el código de acceso en la configuración para usarlo. - -## Use el mensaje "Excedió su cuota actual, ..." - -Hay un problema con la API KEY. Saldo insuficiente. - -# Problemas relacionados con el servicio de red - -## ¿Qué es Cloudflare? - -Cloudflare (CF) es un proveedor de servicios de red que proporciona CDN, administración de nombres de dominio, alojamiento de páginas estáticas, implementación de funciones de computación perimetral y más. Usos comunes: comprar y/o alojar su nombre de dominio (resolución, nombre de dominio dinámico, etc.), poner un CDN en su servidor (puede ocultar la IP de la pared), desplegar un sitio web (CF Pages). CF ofrece la mayoría de los servicios de forma gratuita. - -## ¿Qué es Vercel? - -Vercel es una plataforma global en la nube diseñada para ayudar a los desarrolladores a crear e implementar aplicaciones web modernas más rápido. Este proyecto, junto con muchas aplicaciones web, se puede implementar en Vercel de forma gratuita con un solo clic. Sin código, sin Linux, sin servidores, sin tarifas, sin agente API OpenAI. La desventaja es que necesita vincular el nombre de dominio para poder acceder a él sin muros en China. - -## ¿Cómo obtengo un nombre de dominio? - -1. Vaya al proveedor de nombres de dominio para registrarse, hay Namesilo (soporte Alipay), Cloudflare, etc. en el extranjero, y hay Wanwang en China; -2. Proveedores de nombres de dominio gratuitos: eu.org (nombre de dominio de segundo nivel), etc.; -3. Pídale a un amigo un nombre de dominio de segundo nivel gratuito. - -## Cómo obtener un servidor - -* Ejemplos de proveedores de servidores extranjeros: Amazon Cloud, Google Cloud, Vultr, Bandwagon, Hostdare, etc. - Asuntos de servidores extranjeros: Las líneas de servidor afectan las velocidades de acceso nacional, se recomiendan los servidores de línea CN2 GIA y CN2. Si el servidor es de difícil acceso en China (pérdida grave de paquetes, etc.), puede intentar configurar un CDN (Cloudflare y otros proveedores). -* Proveedores de servidores nacionales: Alibaba Cloud, Tencent, etc.; - Asuntos de servidores nacionales: La resolución de nombres de dominio requiere la presentación de ICP; El ancho de banda del servidor doméstico es más caro; El acceso a sitios web extranjeros (Github, openAI, etc.) requiere un proxy. - -## ¿En qué circunstancias debe grabarse el servidor? - -Los sitios web que operan en China continental deben presentar de acuerdo con los requisitos reglamentarios. En la práctica, si el servidor está ubicado en China y hay resolución de nombres de dominio, el proveedor del servidor implementará los requisitos reglamentarios de presentación, de lo contrario el servicio se cerrará. Las reglas habituales son las siguientes: -|ubicación del servidor|proveedor de nombres de dominio|si se requiere la presentación| -|---|---|---| -|Doméstico|Doméstico|Sí| -|nacional|extranjero|sí| -|extranjero|extranjero|no| -|extranjero|nacional|normalmente no| - -Después de cambiar de proveedor de servidores, debe transferir la presentación de ICP. - -# Problemas relacionados con OpenAI - -## ¿Cómo registro una cuenta OpenAI? - -Vaya a chat.openai.com para registrarse. Es necesario: - -* Una buena escalera (OpenAI admite direcciones IP nativas regionales) -* Un buzón compatible (por ejemplo, Gmail o trabajo/escuela, no buzón de Outlook o QQ) -* Cómo recibir autenticación por SMS (por ejemplo, sitio web de activación de SMS) - -## ¿Cómo activo la API de OpenAI? ¿Cómo verifico mi saldo de API? - -Dirección del sitio web oficial (se requiere escalera): https://platform.openai.com/account/usage -Algunos internautas han construido un agente de consulta de saldo sin escalera, por favor pídales a los internautas que lo obtengan. Identifique si la fuente es confiable para evitar la fuga de la clave API. - -## ¿Por qué mi cuenta OpenAI recién registrada no tiene un saldo API? - -(Actualizado el 6 de abril) Las cuentas recién registradas suelen mostrar el saldo de la API después de 24 horas. Se otorga un saldo de $ 5 a una cuenta recién registrada. - -## ¿Cómo puedo recargar la API de OpenAI? - -OpenAI solo acepta tarjetas de crédito en regiones seleccionadas (no se pueden usar tarjetas de crédito chinas). Algunos ejemplos de avenidas son: - -1. Depay tarjeta de crédito virtual -2. Solicitar una tarjeta de crédito extranjera -3. Encuentra a alguien para cobrar en línea - -## ¿Cómo utilizo el acceso a la API de GPT-4? - -* El acceso a la API para GPT-4 requiere una solicitud independiente. Ingrese a la cola de la solicitud completando su información en la lista de espera (prepare su ID de organización OpenAI): https://openai.com/waitlist/gpt-4-api - Espere el mensaje de correo después. -* Habilitar ChatGPT Plus no significa permisos GPT-4, y los dos no tienen nada que ver entre sí. - -## Uso de la interfaz de Azure OpenAI - -Por favor consulte:[#371](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/371) - -## ¿Por qué mi token se agota tan rápido? - -> Debates relacionados:[#518](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518) - -* Si tiene permisos de GPT 4 y usa las API de GPT 4 a diario, el monto de su factura aumentará rápidamente porque el precio de GPT 4 es aproximadamente 15 veces mayor que el de GPT 3.5; -* Si está usando GPT 3.5 y no lo usa con mucha frecuencia y aún nota que su factura aumenta rápidamente, siga estos pasos para solucionar problemas ahora: - * Vaya al sitio web oficial de OpenAI para verificar sus registros de consumo de API Key, si su token se consume cada hora y se consumen decenas de miles de tokens cada vez, entonces su clave debe haberse filtrado, elimine y regenere inmediatamente.**No verifique su saldo en un sitio web desordenado.** - * Si su contraseña se acorta, como letras dentro de 5 dígitos, entonces el costo de voladura es muy bajo, se recomienda que busque en el registro de Docker para ver si alguien ha probado muchas combinaciones de contraseñas, palabra clave: got access code -* A través de los dos métodos anteriores, puede localizar la razón por la cual su token se consume rápidamente: - * Si el registro de consumo de OpenAI es anormal, pero no hay ningún problema con el registro de Docker, entonces la clave API se filtra; - * Si el registro de Docker encuentra una gran cantidad de registros de código de acceso de obtención, entonces la contraseña ha sido destruida. - -## ¿Cómo se facturan las API? - -Instrucciones de facturación del sitio web de OpenAI: https://openai.com/pricing#language-models\ -OpenAI cobra en función del número de tokens, y 1,000 tokens generalmente representan 750 palabras en inglés o 500 caracteres chinos. Prompt y Completion cuentan los costos por separado.\ -|Modelo|Facturación de entrada de usuario (aviso)|Facturación de salida del modelo (finalización)|Número máximo de tokens por interacción| -|----|----|----|----| -|gpt-3.5|$0.002 / 1 mil tokens|$0.002 / 1 mil tokens|4096| -|gpt-4|$0.03 / 1 mil tokens|$0.06 / 1 mil tokens|8192| -|gpt-4-32K|$0.06 / 1 mil tokens|$0.12 / 1 mil tokens|32768| - -## ¿Cuál es la diferencia entre los modelos GPT-3.5-TURBO y GPT3.5-TURBO-0301 (o GPT3.5-TURBO-MMDD)? - -Descripción de la documentación oficial: https://platform.openai.com/docs/models/gpt-3-5 - -* GPT-3.5-Turbo es el último modelo y se actualiza constantemente. -* GPT-3.5-turbo-0301 es una instantánea del modelo congelada el 1 de marzo, no cambiará y se espera que sea reemplazada por una nueva instantánea en 3 meses. diff --git a/docs/faq-ja.md b/docs/faq-ja.md deleted file mode 100644 index 8d50ffab..00000000 --- a/docs/faq-ja.md +++ /dev/null @@ -1,191 +0,0 @@ -# よくある質問 - -## 早く助けを求めるには? - -1. ChatGPT / Bing / Baidu / Google などに尋ねてください。 -2. オンラインの友達に聞く。背景情報と問題の詳細な説明を提供してください。質の高い質問ほど、有益な回答を得られる可能性が高くなります。 - -# デプロイメントに関する質問 - -## なぜ Docker のデプロイバージョンは常に更新を要求するのか - -Docker のバージョンは安定版と同等であり、最新の Docker は常に最新のリリースバージョンと一致しています。現在、私たちのリリース頻度は1~2日に1回なので、Dockerのバージョンは常に最新のコミットから1~2日遅れており、これは予想されることです。 - -## Vercel での展開方法 - -1. GitHub アカウントを登録し、このプロジェクトをフォークする。 -2. Vercel を登録し(携帯電話認証が必要、中国の番号でも可)、GitHub アカウントを接続する。 -3. Vercel で新規プロジェクトを作成し、GitHub でフォークしたプロジェクトを選択し、必要な環境変数を入力し、デプロイを開始する。デプロイ後、Vercel が提供するドメインからプロジェクトにアクセスできます。(中国本土ではプロキシが必要) - -- 中国で直接アクセスする必要がある場合: DNS プロバイダーで、cname.vercel-dns.com を指すドメイン名の CNAME レコードを追加します。その後、Vercel でドメインアクセスを設定してください。 - -## Vercel 環境変数の変更方法 - -- Vercel のコンソールページに入ります; -- chatgpt-next-web プロジェクトを選択してください; -- ページ上部の設定オプションをクリックしてください; -- サイドバーで環境変数オプションを見つけます; -- 必要に応じて対応する値を変更してください。 - -## 環境変数 CODE とは何ですか?設定する必要がありますか? - -カスタムアクセスパスワードです: - -1. 設定しないで、環境変数を削除する。この時、誰でもあなたのプロジェクトにアクセスすることができます。 -2. プロジェクトをデプロイするときに、環境変数 CODE を設定する(カンマ区切りで複数のパスワードをサポート)。アクセスパスワードを設定した後、ユーザーはそれを使用するために設定ページでアクセスパスワードを入力する必要があります。[関連手順](https://github.com/Yidadaa/ChatGPT-Next-Web#access-password) - -## なぜ私がデプロイしたバージョンにはストリーミングレスポンスがないのでしょうか? - -> 関連する議論: [#386](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/386) - -nginx のリバースプロキシを使っている場合、設定ファイルに以下のコードを追加する必要があります: - -``` -# キャッシュなし、ストリーミング出力をサポート -proxy_cache off; # キャッシュをオフにする -proxy_buffering off; # プロキシバッファリングをオフにする -chunked_transfer_encoding on; # チャンク転送エンコーディングをオンにする -tcp_nopush on; # TCP NOPUSH オプションをオンにし、Nagleアルゴリズムを無効にする -tcp_nodelay on; # TCP NODELAY オプションをオンにし、遅延ACKアルゴリズムを無効にする -keepalive_timeout 300; # keep-alive のタイムアウトを 65 秒に設定する -``` - -netlify でデプロイしている場合、この問題はまだ解決待ちです。 - -## デプロイしましたが、アクセスできません。 - -以下の問題を確認し、トラブルシューティングを行ってください: - -- サービスは開始されていますか? -- ポートは正しくマッピングされていますか? -- ファイアウォールのポートは開いていますか? -- サーバーへのルートは問題ありませんか? -- ドメイン名は正しく解決されていますか? - -## "Error: Loading CSS chunk xxx failed..." と表示されることがあります。 - -Next.js では、最初のホワイトスクリーンの時間を短縮するために、デフォルトでチャンキングを有効にしています。技術的な詳細はこちらをご覧ください: - -- https://nextjs.org/docs/app/building-your-application/optimizing/lazy-loading -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 -- https://github.com/vercel/next.js/issues/38507 -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 - -ただし、Next.js は古いブラウザとの互換性に制限があるため、このエラーが発生することがあります。 - -ビルド時にチャンキングを無効にすることができます。 - -Vercel プラットフォームの場合は、環境変数に `DISABLE_CHUNK=1` を追加して再デプロイします。 -セルフデプロイのプロジェクトでは、ビルド時に `DISABLE_CHUNK=1 yarn build` を使用することができます。 -Docker ユーザーの場合、ビルドはパッケージング時にすでに完了しているため、この機能を無効にすることは現在サポートされていません。 - -この機能を無効にすると、ユーザーの最初の訪問時にすべてのリソースがロードされることに注意してください。その結果、ユーザーのネットワーク接続が悪い場合、ホワイト・スクリーンの時間が長くなり、ユーザーエクスペリエンスに影響を与える可能性があります。この点を考慮の上、ご判断ください。 - -# 使用法に関する質問 - -## なぜいつも "An error occurred, please try again later" と表示されるのですか? - -様々な原因が考えられますので、以下の項目を順番にチェックしてみてください: - -- まず、コードのバージョンが最新版かどうかを確認し、最新版にアップデートしてから再試行してください; -- api キーが正しく設定されているか確認してください。環境変数名は大文字とアンダースコアでなければなりません; -- api キーが使用可能かどうか確認する; -- 上記のステップを踏んでも問題が解決しない場合は、issue エリアに新しい issue を投稿し、vercel のランタイムログまたは docker のランタイムログを添付してください。 - -## ChatGPT の返信が文字化けするのはなぜですか? - -設定画面-機種設定の中に `temperature` という項目があります。この値が 1 より大きい場合、返信が文字化けすることがあります。1 以内に調整してください。 - -## 設定ページでアクセスパスワードを入力してください」と表示される。 - -プロジェクトでは環境変数 CODE でアクセスパスワードを設定しています。初めて使うときは、設定ページでアクセスコードを入力する必要があります。 - -## 使用すると、"You exceeded your current quota, ..." と表示される。 - -API KEY に問題があります。残高不足です。 - -## プロキシとは何ですか? - -OpenAI の IP 制限により、中国をはじめとする一部の国や地域では、OpenAI API に直接接続することができず、プロキシを経由する必要があります。プロキシサーバ(フォワードプロキシ)を利用するか、事前に設定された OpenAI API リバースプロキシを利用します。 - -- フォワードプロキシの例: VPN ラダー。docker デプロイの場合は、環境変数 HTTP_PROXY にプロキシアドレス (http://address:port) を設定します。 -- リバースプロキシの例: 他人のプロキシアドレスを使うか、Cloudflare を通じて無料で設定できる。プロジェクトの環境変数 BASE_URL にプロキシアドレスを設定してください。 - -## 中国のサーバーにデプロイできますか? - -可能ですが、対処すべき問題があります: - -- GitHub や OpenAI などのウェブサイトに接続するにはプロキシが必要です; -- GitHub や OpenAI のようなウェブサイトに接続するにはプロキシが必要です; -- 中国の政策により、海外のウェブサイト/ChatGPT 関連アプリケーションへのプロキシアクセスが制限されており、ブロックされる可能性があります。 - -# ネットワークサービス関連の質問 - -## クラウドフレアとは何ですか? - -Cloudflare(CF)は、CDN、ドメイン管理、静的ページホスティング、エッジコンピューティング機能展開などを提供するネットワークサービスプロバイダーです。一般的な使用例: メインの購入やホスティング(解決、ダイナミックドメインなど)、サーバーへの CDN の適用(ブロックされないように IP を隠すことができる)、ウェブサイト(CF Pages)の展開。CF はほとんどのサービスを無料で提供しています。 - -## Vercel とは? - -Vercel はグローバルなクラウドプラットフォームで、開発者がモダンなウェブアプリケーションをより迅速に構築、デプロイできるように設計されています。このプロジェクトや多くのウェブアプリケーションは、ワンクリックで Vercel 上に無料でデプロイできます。コードを理解する必要も、Linux を理解する必要も、サーバーを持つ必要も、お金を払う必要も、OpenAI API プロキシを設定する必要もありません。欠点は、中国の制限なしにアクセスするためにドメイン名をバインドする必要があることだ。 - -## ドメイン名の取得方法 - -1. Namesilo(アリペイ対応)や Cloudflare(海外プロバイダー)、Wanwang(中国国内プロバイダー)などのドメインプロバイダーに登録する。 -2. 無料ドメインプロバイダー: eu.org(セカンドレベルドメイン)など。 -3. 無料セカンドレベルドメインを友人に頼む。 - -## サーバーの取得方法 - -- 海外サーバープロバイダーの例 Amazon Web Services、Google Cloud、Vultr、Bandwagon、Hostdare など。 - 海外サーバーの注意点 サーバー回線は中国でのアクセス速度に影響するため、CN2 GIA、CN2 回線を推奨。もしサーバーが中国でアクセスしにくい場合(深刻なパケットロスなど)、CDN(Cloudflare のようなプロバイダーのもの)を使ってみるとよいでしょう。 -- 国内のサーバープロバイダー アリババクラウド、テンセントなど - 国内サーバーの注意点 ドメイン名の解決にはファイリングが必要。国内サーバーの帯域幅は比較的高い。海外のウェブサイト(GitHub、OpenAI など)へのアクセスにはプロキシが必要。 - -# OpenAI 関連の質問 - -## OpenAI のアカウントを登録するには? - -chat.openai.com にアクセスして登録してください。以下のものが必要です: - -- 優れた VPN (OpenAI はサポートされている地域のネイティブ IP アドレスしか許可しません) -- サポートされているメール (例: Gmail や会社/学校のメール。Outlook や QQ のメールは不可) -- SMS 認証を受ける方法(SMS-activate ウェブサイトなど) - -## OpenAI API を有効にするには?API 残高の確認方法は? - -公式ウェブサイト(VPN が必要): https://platform.openai.com/account/usage -VPN なしで残高を確認するためにプロキシを設定しているユーザーもいます。API キーの漏洩を避けるため、信頼できる情報源であることを確認してください。 - -## OpenAI の新規アカウントに API 残高がないのはなぜですか? - -(4月6日更新) 新規登録アカウントは通常 24 時間以内に API 残高が表示されます。現在、新規アカウントには 5 ドルの残高が与えられています。 - -## OpenAI API へのチャージ方法を教えてください。 - -OpenAI では、指定された地域のクレジットカードのみご利用いただけます(中国のクレジットカードはご利用いただけません)。お住まいの地域のクレジットカードに対応していない場合は、以下の方法があります: - -1. Depay バーチャルクレジットカード -2. 海外のクレジットカードを申し込む -3. オンラインでトップアップしてくれる人を探す - -## GPT-4 API にアクセスするには? - -(4月6日更新) GPT-4 API へのアクセスには別途申請が必要です。以下のアドレスにアクセスし、ウェイティングリストに参加するための情報を入力してください(OpenAI の組織 ID をご用意ください): https://openai.com/waitlist/gpt-4-api -その後、メールの更新をお待ちください。 - -## Azure OpenAI インターフェースの使い方 - -次を参照: [#371](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/371) - -## トークンの消費が速いのはなぜですか? - -> 関連する議論: [#518](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518) - -- GPT-4 にアクセスし、GPT-4 の API を定期的に使用している場合、GPT-4 の価格は GPT-3.5 の約 15 倍であるため、請求額が急激に増加します; -- GPT-3.5 を使用しており、頻繁に使用していないにもかかわらず、請求額が急速に増加している場合は、以下の手順で直ちにトラブルシューティングを行ってください: - - OpenAI のウェブサイトで API キーの消費記録を確認してください。トークンが 1 時間ごとに消費され、毎回数万トークンが消費される場合は、キーが流出している可能性があります。すぐに削除して再生成してください。**適当なサイトで残高を確認しないでください。** - - パスワードが 5 文字以下など短い場合、ブルートフォースによるコストは非常に低くなります。誰かが大量のパスワードの組み合わせを試したかどうかを確認するために、docker のログを検索することを推奨する。キーワード:アクセスコードの取得 -- これら 2 つの方法を実行することで、トークンが急速に消費された原因を突き止めることができます: - - OpenAI の消費記録に異常があるが、Docker ログに問題がない場合、API キーが流出したことを意味します; - - Docker ログにアクセスコード取得のブルートフォース試行回数が多い場合は、パスワードがクラックされています。 diff --git a/docs/faq-ko.md b/docs/faq-ko.md deleted file mode 100644 index b0d28917..00000000 --- a/docs/faq-ko.md +++ /dev/null @@ -1,230 +0,0 @@ -# 자주 묻는 질문 - -## 어떻게 빠르게 도움을 받을 수 있나요? - -1. ChatGPT / Bing / Baidu / Google 등에 질문합니다. -2. 인터넷 사용자에게 질문합니다. 문제의 배경 정보와 자세한 문제 설명을 제공하세요. 질 좋은 질문은 유용한 답변을 쉽게 받을 수 있습니다. - -# 배포 관련 질문 - -각종 배포 방법에 대한 자세한 튜토리얼 참조: [링크](https://rptzik3toh.feishu.cn/docx/XtrdduHwXoSCGIxeFLlcEPsdn8b) - -## 왜 Docker 배포 버전이 계속 업데이트 알림을 주나요? - -Docker 버전은 사실상 안정된 버전과 같습니다. latest Docker는 항상 latest release version과 일치합니다. 현재 우리의 발행 빈도는 하루 또는 이틀에 한 번이므로 Docker 버전은 항상 최신 커밋보다 하루나 이틀 뒤처집니다. 이것은 예상된 것입니다. - -## Vercel에서 어떻게 배포하나요? - -1. Github 계정을 등록하고, 이 프로젝트를 포크합니다. -2. Vercel을 등록합니다(휴대폰 인증 필요, 중국 번호 사용 가능), Github 계정을 연결합니다. -3. Vercel에서 새 프로젝트를 생성하고, Github에서 포크한 프로젝트를 선택합니다. 환경 변수를 필요에 따라 입력한 후 배포를 시작합니다. 배포 후에는 VPN이 있는 환경에서 Vercel이 제공하는 도메인으로 프로젝트에 접근할 수 있습니다. -4. 중국에서 방화벽 없이 접근하려면: 도메인 관리 사이트에서 도메인의 CNAME 레코드를 추가하고, cname.vercel-dns.com을 가리키게 합니다. 그런 다음 Vercel에서 도메인 접근을 설정합니다. - -## Vercel 환경 변수를 어떻게 수정하나요? - -- Vercel의 제어판 페이지로 이동합니다. -- NextChat 프로젝트를 선택합니다. -- 페이지 상단의 Settings 옵션을 클릭합니다. -- 사이드바의 Environment Variables 옵션을 찾습니다. -- 해당 값을 수정합니다. - -## 환경 변수 CODE는 무엇이며, 반드시 설정해야 하나요? - -이것은 당신이 사용자 정의한 접근 비밀번호입니다. 다음 중 하나를 선택할 수 있습니다: - -1. 설정하지 않습니다. 해당 환경 변수를 삭제합니다. 주의: 이 경우 누구나 프로젝트에 접근할 수 있습니다. -2. 프로젝트를 배포할 때 환경 변수 CODE를 설정합니다(여러 비밀번호는 쉼표로 구분). 접근 비밀번호를 설정하면 사용자는 설정 페이지에서 접근 비밀번호를 입력해야만 사용할 수 있습니다. [관련 설명 참조](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/README_CN.md#%E9%85%8D%E7%BD%AE%E9%A1%B5%E9%9D%A2%E8%AE%BF%E9%97%AE%E5%AF%86%E7%A0%81) - -## 왜 내 배포 버전에 스트리밍 응답이 없나요? - -> 관련 토론: [#386](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/386) - -nginx 리버스 프록시를 사용하는 경우, 설정 파일에 다음 코드를 추가해야 합니다: - -```nginx -# 캐시하지 않고, 스트리밍 출력 지원 -proxy_cache off; # 캐시 비활성화 -proxy_buffering off; # 프록시 버퍼링 비활성화 -chunked_transfer_encoding on; # 청크 전송 인코딩 활성화 -tcp_nopush on; # TCP NOPUSH 옵션 활성화, Nagle 알고리즘 금지 -tcp_nodelay on; # TCP NODELAY 옵션 활성화, 지연 ACK 알고리즘 금지 -keepalive_timeout 300; # keep-alive 타임아웃을 65초로 설정 -``` - -netlify에서 배포하는 경우, 이 문제는 아직 해결되지 않았습니다. 기다려 주십시오. - -## 배포했지만 액세스할 수 없는 경우. - -다음의 사항들을 확인해보세요: - -- 서비스가 배포 중인가요? -- 포트가 올바르게 매핑되었나요? -- 방화벽에서 포트가 열렸나요? -- 서버 경로가 유효한가요? -- 도메인 이름이 올바른가요? - -## 프록시란 무엇이며 어떻게 사용하나요? - -중국 및 일부 국가에서는 OpenAI의 IP 제한으로 인해 OpenAI API에 직접 연결할 수 없으며 프록시를 거쳐야 합니다. 프록시 서버(정방향 프록시)를 사용하거나 OpenAI API에 대해 설정된 역방향 프록시를 사용할 수 있습니다. - -- 정방향 프록시 예: 사이언티픽 인터넷 래더. 도커 배포의 경우 환경 변수 HTTP_PROXY를 프록시 주소(예: 10.10.10.10:8002)로 설정합니다. -- 역방향 프록시 예: 다른 사람이 구축한 프록시 주소를 사용하거나 Cloudflare를 통해 무료로 설정할 수 있습니다. 프로젝트 환경 변수 BASE_URL을 프록시 주소로 설정합니다. - -## 국내 서버를 배포할 수 있나요? - -예. 하지만 해결해야 할 문제가 있습니다: - -- github 및 openAI와 같은 사이트에 연결하려면 프록시가 필요합니다; -- 도메인 이름 확인을 설정하려면 국내 서버를 신청해야 합니다; -- 국내 정책에 따라 프록시가 엑스트라넷/ChatGPT 관련 애플리케이션에 액세스하지 못하도록 제한되어 차단될 수 있습니다. - -## 도커 배포 후 네트워크 오류가 발생하는 이유는 무엇인가요? - -https://github.com/Yidadaa/ChatGPT-Next-Web/issues/1569 에서 토론을 참조하세요. - -## 사용 관련 문제 - -## "문제가 발생했습니다, 나중에 다시 시도하세요"라는 메시지가 계속 뜨는 이유는 무엇인가요? - -여러 가지 이유가 있을 수 있으니 순서대로 확인해 주세요: - -- 코드 버전이 최신 버전인지 확인하고, 최신 버전으로 업데이트한 후 다시 시도해 주세요; -- API 키가 올바르게 설정되었는지 확인해주세요. 환경 변수 이름은 모두 대문자이며 밑줄이 있어야 합니다; -- API 키가 사용 가능한지 확인해 주세요; -- 위 단계를 수행한 후에도 문제를 확인할 수 없는 경우, 이슈 영역에 신규 이슈를 제출하고 버셀의 런타임 로그 또는 도커 런타임 로그를 첨부해 주시기 바랍니다. - -## ChatGPT 응답이 왜곡되는 이유는 무엇인가요? - -설정 - 모델 설정 섹션에 '온도'에 대한 값이 있는데, 이 값이 1보다 크면 응답이 왜곡될 수 있으니 1 이내로 다시 설정해 주세요. - -## "권한이 없는 상태입니다, 설정 페이지에서 액세스 비밀번호를 입력하세요"? - -프로젝트에서 환경 변수 CODE에 접근 비밀번호를 설정했습니다. 처음 사용할 때는 설정 페이지에서 액세스 코드를 입력해야 합니다. - -## 사용 시 "현재 할당량을 초과했습니다, ..."라는 메시지가 표시됩니다. - -API 키에 문제가 있습니다. 잔액이 부족합니다. - -## "오류: CSS 청크 xxx를 로드하지 못했습니다..."와 함께 사용. - -첫 번째 화이트 스크린 시간을 줄이기 위해 청크 컴파일이 기본적으로 활성화되어 있으며, 기술 원칙은 아래를 참조하세요: - -- https://nextjs.org/docs/app/building-your-application/optimizing/lazy-loading -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 -- https://github.com/vercel/next.js/issues/38507 -- https://stackoverflow.com/questions/55993890/how-can-i-disable-chunkcode-splitting-with-webpack4 - -그러나 NextJS는 호환성이 좋지 않아 구형 브라우저에서 이 오류가 발생할 수 있으므로 컴파일 시 청크 컴파일을 비활성화할 수 있습니다. - -버셀 플랫폼의 경우 환경 변수에 `DISABLE_CHUNK=1`을 추가하고 다시 배포합니다; -자체 컴파일 및 배포한 프로젝트의 경우, 빌드 시 `DISABLE_CHUNK=1 yarn build`를 사용하여 빌드합니다; -Docker 사용자의 경우, Docker가 프로젝트를 패키징할 때 이미 빌드하기 때문에 이 기능을 해제하는 것은 지원되지 않습니다. - -이 기능을 끄면 사용자가 웹사이트를 처음 방문할 때 모든 리소스를 로드하므로 인터넷 연결 상태가 좋지 않은 경우 흰색 화면이 길게 표시되어 사용자 경험에 영향을 줄 수 있으므로 사용자가 직접 고려하시기 바랍니다. - -"## NotFoundError: '노드': 노드....에서 'removeChild'를 실행하지 못했습니다." 오류가 발생했습니다. -브라우저의 자체 자동 번역 기능을 비활성화하고 모든 자동 번역 플러그인을 닫아주세요. - -## 웹 서비스 관련 문제 - -## 클라우드플레어란 무엇인가요? - -Cloudflare(CF)는 CDN, 도메인 관리, 정적 페이지 호스팅, 엣지 컴퓨팅 기능 배포 등을 제공하는 웹 서비스 제공업체입니다. 일반적인 용도: 도메인 구매 및/또는 호스팅(리졸브, 동적 도메인 등), 서버에 CDN 설치(벽에서 IP를 숨기는 기능), 웹사이트 배포(CF 페이지). CF는 이러한 서비스 대부분을 무료로 제공합니다. - -## Vercel이란 무엇인가요? - -Vercel은 개발자가 최신 웹 애플리케이션을 더 빠르게 빌드하고 배포할 수 있도록 설계된 글로벌 클라우드 플랫폼입니다. 이 프로젝트와 많은 웹 애플리케이션을 클릭 한 번으로 Vercel에 무료로 배포할 수 있습니다. 코드, 리눅스, 서버, 수수료가 필요 없고 OpenAI API 프록시를 설정할 필요도 없습니다. 단점은 중국에서 장벽 없이 액세스하려면 도메인 이름을 바인딩해야 한다는 것입니다. - -## 도메인 네임은 어떻게 얻나요? - -1) 도메인 네임 공급업체로 이동하여 해외에서는 Namesilo(알리페이 지원), 클라우드플레어 등, 중국에서는 월드와이드웹과 같은 도메인 네임을 등록합니다. 2) 무료 도메인 네임 공급업체: 예: eBay; -2. 무료 도메인 네임 제공업체: eu.org(두 번째 레벨 도메인 네임) 등..; -3. 친구에게 무료 2단계 도메인 네임을 요청합니다. - -## 서버를 얻는 방법 - -- 외국 서버 제공업체의 예: 아마존 클라우드, 구글 클라우드, 벌터, 밴드왜건, 호스트데어 등; - 해외 서버 문제: 서버 라인은 해당 국가의 액세스 속도에 영향을 미치므로 CN2 GIA 및 CN2 라인 서버를 권장합니다. 국내 서버의 접속에 문제가 있는 경우(심각한 패킷 손실 등) CDN(Cloudflare 및 기타 제공 업체)을 설정해 볼 수 있습니다. -- 국내 서버 제공업체: 알리윈, 텐센트 등; - 국내 서버 문제: 도메인 이름 확인을 신청해야 하며, 국내 서버 대역폭이 더 비싸고, 해외 사이트(Github, openAI 등)에 액세스하려면 프록시가 필요합니다. - -## 서버는 언제 신청해야 하나요? - -중국 본토에서 운영되는 웹사이트는 규제 요건에 따라 신고해야 합니다. 실제로 서버가 중국에 있고 도메인 네임 레졸루션이 있는 경우 서버 제공업체가 규제 신고 요건을 시행하며, 그렇지 않으면 서비스가 종료됩니다. 일반적인 규칙은 다음과 같습니다: -|서버 위치|도메인 네임 공급자|파일링 필요 여부| -|---|---|---| -|국내|국내|예 -|국내|외국|예 -|외국|외국인|아니요 -|외국|국내|일반적으로 아니요| - -서버 공급자를 전환한 후 파일링을 전환해야 합니다. - -## OpenAI 관련 질문 - -## OpenAI 계정은 어떻게 가입하나요? - -chat.openai.com으로 이동하여 등록하세요. 다음이 필요합니다: - -- 유효한 래더(OpenAI는 지역별 기본 IP 주소를 지원합니다) -- 지원되는 이메일 주소(예: Outlook이나 qq가 아닌 Gmail 또는 회사/학교 이메일) -- SMS 인증을 받을 수 있는 방법(예: SMS 활성화 웹사이트) - -## OpenAI API는 어떻게 열 수 있나요? API 잔액은 어떻게 확인하나요? - -공식 웹사이트 주소(래더 필요): https://platform.openai.com/account/usage -일부 사용자는 래더 없이 잔액 조회 에이전트를 구축한 경우가 있으니, 해당 사용자에게 요청해 주시기 바랍니다. API 키 유출을 방지하기 위해 신뢰할 수 있는 소스인지 확인하시기 바랍니다. - -## 새로 등록한 OpenAI 계정에 API 잔액이 없는 이유는 무엇인가요? - -(4월 6일 업데이트) 새로 등록된 계정은 일반적으로 24시간 후에 API 잔액이 표시됩니다. 현재 새로 등록된 계정에는 $5의 잔액이 표시됩니다. - -## OpenAI API를 충전하려면 어떻게 해야 하나요? - -OpenAI는 특정 지역의 신용카드만 사용할 수 있습니다(중국 신용카드는 사용할 수 없음). 충전 방법의 몇 가지 예는 다음과 같습니다: - -1. 가상 신용카드로 결제하기 -2. 해외 신용카드 신청 -3. 온라인에서 신용카드를 충전할 사람 찾기 - -## GPT-4 API 액세스는 어떻게 사용하나요? - -- GPT-4 API 액세스는 별도의 신청이 필요합니다. 다음 주소로 이동하여 정보를 입력하여 신청 대기열 대기자 명단에 들어가세요(OpenAI 조직 ID를 준비하세요): https://openai.com/waitlist/gpt-4-api. - 그런 다음 이메일 메시지를 기다립니다. -- ChatGPT Plus를 사용하도록 설정했다고 해서 GPT-4 권한이 있는 것은 아니며, 서로 관련이 없습니다. - -## Azure OpenAI 인터페이스 사용 방법 - -참조: [#371](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/371) - -## 내 토큰이 왜 이렇게 빨리 소모되나요? - -> 관련 토론: [#518](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518) - -- GPT 4에 액세스 권한이 있고 매일 GPT 4 API를 사용하는 경우, GPT 4 가격이 GPT 3.5의 약 15배이기 때문에 청구 금액이 급격히 증가합니다; -- GPT 3.5를 자주 사용하지 않는데도 요금이 급격하게 증가하는 경우 아래 단계를 따라 확인하시기 바랍니다: - - 오픈아이 공식 웹사이트로 이동하여 API 키 소비 기록을 확인하고, 매 시간마다 토큰이 소비되고 매번 수만 개의 토큰이 소비된다면 키가 유출된 것이므로 즉시 삭제하고 재생성하시기 바랍니다. 즉시 키를 삭제하고 다시 생성하시기 바랍니다. 지저분한 웹사이트에서 잔액을 확인하지 마세요. ** - - 비밀번호 설정이 5자리 이내의 문자와 같이 매우 짧으면 블라스팅 비용이 매우 낮습니다. 도커의 로그 기록을 검색하여 누군가 많은 수의 비밀번호 조합을 시도했는지 확인하는 것이 좋습니다. 키워드: 액세스 코드를 얻었습니다. -- 이 두 가지 방법을 사용하면 토큰이 소비되는 이유를 빠르게 찾을 수 있습니다: - - 오픈아이 소비 기록은 비정상적이지만 도커 로그는 정상이라면 API 키가 유출되고 있다는 뜻입니다; - - 도커 로그에서 액세스 코드 버스트 레코드가 많이 발견되면 비밀번호가 버스트된 것입니다. - - -## API의 가격은 어떻게 청구되나요? - -OpenAI의 청구 지침은 https://openai.com/pricing#language-models 에서 확인할 수 있습니다. -OpenAI는 토큰 수에 따라 요금을 청구하며, 일반적으로 1000토큰은 영어 단어 750개 또는 중국어 문자 500개를 나타냅니다. 입력(프롬프트)과 출력(완료)은 별도로 청구됩니다. - -|모델|사용자 입력(프롬프트) 청구 |모델 출력(완료) 청구 |인터랙션당 최대 토큰 수 | -|----|----|----|----| -|GPT-3.5-TURBO|$0.0015 / 1천 토큰|$0.002 / 1천 토큰|4096| -|GPT-3.5-TURBO-16K|$0.003 / 1천 토큰|$0.004 / 1천 토큰|16384| |GPT-4|$0.004 / 1천 토큰|16384 -|GPT-3.5-TURBO-16K|$0.003 / 1천 토큰|$0.004 / 1천 토큰|16384| |GPT-4|$0.03 / 1천 토큰|$0.06 / 1천 토큰|8192 -|GPT-4-32K|$0.06 / 1천 토큰|$0.12 / 1천 토큰|32768| - -## gpt-3.5-터보와 gpt3.5-터보-0301(또는 gpt3.5-터보-mmdd) 모델의 차이점은 무엇인가요? - -공식 문서 설명: https://platform.openai.com/docs/models/gpt-3-5 - -- GPT-3.5-TURBO는 최신 모델이며 지속적으로 업데이트될 예정입니다. -- gpt-3.5-turbo-0301은 3월 1일에 고정된 모델의 스냅샷으로, 변경되지 않으며 3개월 후에 새로운 스냅샷으로 대체될 예정입니다. \ No newline at end of file diff --git a/docs/images/cover.png b/docs/images/cover.png deleted file mode 100644 index 38157305..00000000 Binary files a/docs/images/cover.png and /dev/null differ diff --git a/docs/images/enable-actions-sync.jpg b/docs/images/enable-actions-sync.jpg deleted file mode 100644 index 4a69da92..00000000 Binary files a/docs/images/enable-actions-sync.jpg and /dev/null differ diff --git a/docs/images/enable-actions.jpg b/docs/images/enable-actions.jpg deleted file mode 100644 index a4f4f0f1..00000000 Binary files a/docs/images/enable-actions.jpg and /dev/null differ diff --git a/docs/images/head-cover.png b/docs/images/head-cover.png deleted file mode 100644 index 7fd4aeb5..00000000 Binary files a/docs/images/head-cover.png and /dev/null differ diff --git a/docs/images/icon.svg b/docs/images/icon.svg index 758a57eb..e122d32f 100644 --- a/docs/images/icon.svg +++ b/docs/images/icon.svg @@ -1 +1,87 @@ - \ No newline at end of file + +image/svg+xml + + + + + + + + + + + + + + + + diff --git a/docs/images/more.png b/docs/images/more.png deleted file mode 100644 index 70c0d315..00000000 Binary files a/docs/images/more.png and /dev/null differ diff --git a/docs/images/settings.png b/docs/images/settings.png deleted file mode 100644 index 31603daa..00000000 Binary files a/docs/images/settings.png and /dev/null differ diff --git a/docs/images/upstash-1.png b/docs/images/upstash-1.png deleted file mode 100644 index 253ee60c..00000000 Binary files a/docs/images/upstash-1.png and /dev/null differ diff --git a/docs/images/upstash-2.png b/docs/images/upstash-2.png deleted file mode 100644 index d1f255d6..00000000 Binary files a/docs/images/upstash-2.png and /dev/null differ diff --git a/docs/images/upstash-3.png b/docs/images/upstash-3.png deleted file mode 100644 index 5b210940..00000000 Binary files a/docs/images/upstash-3.png and /dev/null differ diff --git a/docs/images/upstash-4.png b/docs/images/upstash-4.png deleted file mode 100644 index a22ccc99..00000000 Binary files a/docs/images/upstash-4.png and /dev/null differ diff --git a/docs/images/upstash-5.png b/docs/images/upstash-5.png deleted file mode 100644 index 57f8b4f9..00000000 Binary files a/docs/images/upstash-5.png and /dev/null differ diff --git a/docs/images/upstash-6.png b/docs/images/upstash-6.png deleted file mode 100644 index 75770760..00000000 Binary files a/docs/images/upstash-6.png and /dev/null differ diff --git a/docs/images/upstash-7.png b/docs/images/upstash-7.png deleted file mode 100644 index 76fd0ea8..00000000 Binary files a/docs/images/upstash-7.png and /dev/null differ diff --git a/docs/images/vercel/vercel-create-1.jpg b/docs/images/vercel/vercel-create-1.jpg deleted file mode 100644 index f0bbd002..00000000 Binary files a/docs/images/vercel/vercel-create-1.jpg and /dev/null differ diff --git a/docs/images/vercel/vercel-create-2.jpg b/docs/images/vercel/vercel-create-2.jpg deleted file mode 100644 index 157768a8..00000000 Binary files a/docs/images/vercel/vercel-create-2.jpg and /dev/null differ diff --git a/docs/images/vercel/vercel-create-3.jpg b/docs/images/vercel/vercel-create-3.jpg deleted file mode 100644 index 2eaae1f9..00000000 Binary files a/docs/images/vercel/vercel-create-3.jpg and /dev/null differ diff --git a/docs/images/vercel/vercel-env-edit.jpg b/docs/images/vercel/vercel-env-edit.jpg deleted file mode 100644 index 5b115935..00000000 Binary files a/docs/images/vercel/vercel-env-edit.jpg and /dev/null differ diff --git a/docs/images/vercel/vercel-redeploy.jpg b/docs/images/vercel/vercel-redeploy.jpg deleted file mode 100644 index ee3483fa..00000000 Binary files a/docs/images/vercel/vercel-redeploy.jpg and /dev/null differ diff --git a/docs/synchronise-chat-logs-cn.md b/docs/synchronise-chat-logs-cn.md deleted file mode 100644 index 59f27742..00000000 --- a/docs/synchronise-chat-logs-cn.md +++ /dev/null @@ -1,31 +0,0 @@ -# 同步聊天记录 -## 准备工作 -- GitHub账号 -- 拥有自己搭建过的ChatGPT-Next-Web的服务器 -- [UpStash](https://upstash.com) - -## 开始教程 -1. 注册UpStash账号 -2. 创建数据库 - - ![注册登录](./images/upstash-1.png) - - ![创建数据库](./images/upstash-2.png) - - ![选择服务器](./images/upstash-3.png) - -3. 找到REST API,分别复制UPSTASH_REDIS_REST_URL和UPSTASH_REDIS_REST_TOKEN(⚠切记⚠:不要泄露Token!) - - ![复制](./images/upstash-4.png) - -4. UPSTASH_REDIS_REST_URL和UPSTASH_REDIS_REST_TOKEN复制到你的同步配置,点击**检查可用性** - - ![同步1](./images/upstash-5.png) - - 如果没什么问题,那就成功了 - - ![同步可用性完成的样子](./images/upstash-6.png) - -5. Success! - - ![好耶~!](./images/upstash-7.png) diff --git a/docs/synchronise-chat-logs-en.md b/docs/synchronise-chat-logs-en.md deleted file mode 100644 index 04d05607..00000000 --- a/docs/synchronise-chat-logs-en.md +++ /dev/null @@ -1,31 +0,0 @@ -# Synchronize Chat Logs with UpStash -## Prerequisites -- GitHub account -- Your own ChatGPT-Next-Web server set up -- [UpStash](https://upstash.com) - -## Getting Started -1. Register for an UpStash account. -2. Create a database. - - ![Register and Login](./images/upstash-1.png) - - ![Create Database](./images/upstash-2.png) - - ![Select Server](./images/upstash-3.png) - -3. Find the REST API and copy UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN (⚠Important⚠: Do not share your token!) - - ![Copy](./images/upstash-4.png) - -4. Copy UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN into your synchronization configuration, then click **Check Availability**. - - ![Synchronize 1](./images/upstash-5.png) - - If everything is in order, you've successfully completed this step. - - ![Sync Availability Check Completed](./images/upstash-6.png) - -5. Success! - - ![Great job~!](./images/upstash-7.png) \ No newline at end of file diff --git a/docs/synchronise-chat-logs-es.md b/docs/synchronise-chat-logs-es.md deleted file mode 100644 index 40135f1f..00000000 --- a/docs/synchronise-chat-logs-es.md +++ /dev/null @@ -1,31 +0,0 @@ -# Sincronizzare i Log delle Chat con UpStash -## Prerequisiti -- Account GitHub -- Server ChatGPT-Next-Web di propria configurazione -- [UpStash](https://upstash.com) - -## Per iniziare -1. Registrarsi per un account UpStash. -2. Creare un database. - - ![Registrarsi ed Accedere](./images/upstash-1.png) - - ![Creare un Database](./images/upstash-2.png) - - ![Selezionare il Server](./images/upstash-3.png) - -3. Trovare l'API REST e copiare UPSTASH_REDIS_REST_URL e UPSTASH_REDIS_REST_TOKEN (⚠Importante⚠: Non condividere il token!) - - ![Copia](./images/upstash-4.png) - -4. Copiare UPSTASH_REDIS_REST_URL e UPSTASH_REDIS_REST_TOKEN nella configurazione di sincronizzazione, quindi fare clic su **Verifica la Disponibilità**. - - ![Sincronizzazione 1](./images/upstash-5.png) - - Se tutto è in ordine, hai completato con successo questa fase. - - ![Verifica la Disponibilità della Sincronizzazione Completata](./images/upstash-6.png) - -5. Successo! - - ![Ottimo lavoro~!](./images/upstash-7.png) \ No newline at end of file diff --git a/docs/synchronise-chat-logs-ja.md b/docs/synchronise-chat-logs-ja.md deleted file mode 100644 index ba75110f..00000000 --- a/docs/synchronise-chat-logs-ja.md +++ /dev/null @@ -1,31 +0,0 @@ -# UpStashを使用してチャットログを同期する -## 事前準備 -- GitHubアカウント -- 自分自身でChatGPT-Next-Webのサーバーをセットアップしていること -- [UpStash](https://upstash.com) - -## 始める -1. UpStashアカウントを登録します。 -2. データベースを作成します。 - - ![登録とログイン](./images/upstash-1.png) - - ![データベースの作成](./images/upstash-2.png) - - ![サーバーの選択](./images/upstash-3.png) - -3. REST APIを見つけ、UPSTASH_REDIS_REST_URLとUPSTASH_REDIS_REST_TOKENをコピーします(⚠重要⚠:トークンを共有しないでください!) - - ![コピー](./images/upstash-4.png) - -4. UPSTASH_REDIS_REST_URLとUPSTASH_REDIS_REST_TOKENを同期設定にコピーし、次に「可用性を確認」をクリックします。 - - ![同期1](./images/upstash-5.png) - - すべてが正常であれば、このステップは成功です。 - - ![同期可用性チェックが完了しました](./images/upstash-6.png) - -5. 成功! - - ![お疲れ様でした~!](./images/upstash-7.png) \ No newline at end of file diff --git a/docs/synchronise-chat-logs-ko.md b/docs/synchronise-chat-logs-ko.md deleted file mode 100644 index 88e6e2dd..00000000 --- a/docs/synchronise-chat-logs-ko.md +++ /dev/null @@ -1,31 +0,0 @@ -# UpStash를 사용하여 채팅 기록 동기화 -## 사전 준비물 -- GitHub 계정 -- 자체 ChatGPT-Next-Web 서버 설정 -- [UpStash](https://upstash.com) - -## 시작하기 -1. UpStash 계정 등록 -2. 데이터베이스 생성 - - ![등록 및 로그인](./images/upstash-1.png) - - ![데이터베이스 생성](./images/upstash-2.png) - - ![서버 선택](./images/upstash-3.png) - -3. REST API를 찾아 UPSTASH_REDIS_REST_URL 및 UPSTASH_REDIS_REST_TOKEN을 복사합니다 (⚠주의⚠: 토큰을 공유하지 마십시오!) - - ![복사](./images/upstash-4.png) - -4. UPSTASH_REDIS_REST_URL 및 UPSTASH_REDIS_REST_TOKEN을 동기화 구성에 복사한 다음 **가용성 확인**을 클릭합니다. - - ![동기화 1](./images/upstash-5.png) - - 모든 것이 정상인 경우,이 단계를 성공적으로 완료했습니다. - - ![동기화 가용성 확인 완료](./images/upstash-6.png) - -5. 성공! - - ![잘 했어요~!](./images/upstash-7.png) \ No newline at end of file diff --git a/docs/translation.md b/docs/translation.md deleted file mode 100644 index ebe1d6d7..00000000 --- a/docs/translation.md +++ /dev/null @@ -1,12 +0,0 @@ -# How to add a new translation? - -Assume that we are adding a new translation for `new`. - -1. copy `app/locales/en.ts` to `app/locales/new.ts`; -2. edit `new.ts`, change `const en: LocaleType = ` to `const new: PartialLocaleType`, and `export default new;`; -3. edit `app/locales/index.ts`: -4. `import new from './new.ts'`; -5. add `new` to `ALL_LANGS`; -6. add `new: "new lang"` to `ALL_LANG_OPTIONS`; -7. translate the strings in `new.ts`; -8. submit a pull request, and the author will merge it. diff --git a/docs/user-manual-cn.md b/docs/user-manual-cn.md deleted file mode 100644 index 6109fcf5..00000000 --- a/docs/user-manual-cn.md +++ /dev/null @@ -1,101 +0,0 @@ -# 用户手册 User Manual - -> No english version yet, please read this doc with ChatGPT or other translation tools. - -本文档用于解释 NextChat 的部分功能介绍和设计原则。 - -## 面具 (Mask) - -### 什么是面具?它和提示词的区别是什么? - -面具 = 多个预设提示词 + 模型设置 + 对话设置。 - -其中预设提示词(Contextual Prompts)一般用于 In-Context Learning,用于让 ChatGPT 生成更加符合要求的输出,也可以增加系统约束或者输入有限的额外知识。 - -模型设置则顾名思义,使用此面具创建的对话都会默认使用对应的模型参数。 - -对话设置是与对话体验相关的一系列设置,我们会在下方的章节中依次介绍。 - -### 如何添加一个预设面具? - -目前仅能够通过编辑源代码的方式添加预设面具,请根据需要编辑 [mask](../app/masks/) 目录下对应语言的文件即可。 - -编辑步骤如下: - -1. 在 NextChat 中配置好一个面具; -2. 使用面具编辑页面的下载按钮,将面具保存为 JSON 格式; -3. 让 ChatGPT 帮你将 json 文件格式化为对应的 ts 代码; -4. 放入对应的 .ts 文件。 - -后续会增加使用旁加载的方式加载面具。 - -## 对话 (Chat) - -### 对话框上方的按钮的作用 - -在默认状态下,将鼠标移动到按钮上,即可查看按钮的文字说明,我们依次介绍: - -- 对话设置:当前对话的设置,它与全局设置的关系,请查看下一小节的说明; -- 颜色主题:点击即可在自动、暗黑、浅色之间轮换; -- 快捷指令:项目内置的快捷填充预设提示词,也可以在对话框中输入 / 进行搜索; -- 所有面具:进入面具页面; -- 清除聊天:插入一个清除标记,标记上方的聊天将不会发给 GPT,效果相当于清除了当前对话,当然,你也可以再次点击该按钮,可取消清除; -- 模型设置:更改当前对话的模型,注意,此按钮只会修改当前对话的模型,并不会修改全局默认模型。 - -### 对话内设置与全局设置的关系 - -目前有两处设置入口: - -1. 页面左下角的设置按钮,进入后是全局设置页; -2. 对话框上方的设置按钮,进入后是对话设置页。 - -在新建对话后,该对话的设置默认与全局设置保持同步,修改全局设置,则新建对话的对话内设置也会被同步修改。 - -一旦用户手动更改过对话内设置,则对话内设置将与全局设置断开同步,此时更改全局设置,将不会对该对话生效。 - -如果想恢复两者的同步关系,可以将“对话内设置 -> 使用全局设置”选项勾选。 - -### 对话内设置项的含义 - -点开对话框上方的按钮,进入对话内设置,内容从上到下依次为: - -- 预设提示词列表:可以增加、删除、排序预设提示词 -- 角色头像:顾名思义 -- 角色名称:顾名思义 -- 隐藏预设对话:隐藏后,预设提示词不会出现在聊天界面 -- 使用全局设置:用于表示当前对话是否使用全局对话设置 -- 模型设置选项:剩余的选项与全局设置选项含义一致,见下一小节 - -### 全局设置项的含义 - -- model / temperature / top_p / max_tokens / presence_penalty / frequency_penalty 均为 ChatGPT 的设置参数,详情请查阅 OpenAI 官方文档,再次不再赘述; -- 注入系统级提示信息、用户输入预处理:详情请看 [https://github.com/Yidadaa/ChatGPT-Next-Web/issues/2144](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/2144) -- 附带历史消息数:用户每次输入消息并发送时,所携带的最近 n 条消息数量; -- 历史消息长度压缩阈值:当已经产生的聊天字数达到该数值以后,则自动触发历史摘要功能; -- 历史摘要:是否启用历史摘要功能。 - -### 什么是历史摘要? - -历史摘要功能,也是历史消息压缩功能,是保证长对话场景下保持历史记忆的关键,合理使用该功能可以在不丢失历史话题信息的情况下,节省所使用的 token。 - -由于 ChatGPT API 的长度限制,我们以 3.5 模型为例,它只能接受小于 4096 tokens 的对话消息,一旦超出这个数值,就会报错。 - -同时为了让 ChatGPT 理解我们对话的上下文,往往会携带多条历史消息来提供上下文信息,而当对话进行一段时间之后,很容易就会触发长度限制。 - -为了解决此问题,我们增加了历史记录压缩功能,假设阈值为 1000 字符,那么每次用户产生的聊天记录超过 1000 字符时,都会将没有被总结过的消息,发送给 ChatGPT,让其产生一个 100 字所有的摘要。 - -这样,历史信息就从 1000 字压缩到了 100 字,这是一种有损压缩,但已能满足大多数使用场景。 - -### 什么时候应该关闭历史摘要? - -历史摘要可能会影响 ChatGPT 的对话质量,所以如果对话场景是翻译、信息提取等一次性对话场景,请直接关闭历史摘要功能,并将历史消息数设置为 0。 - -### 当用户发送一条消息时,有哪些信息被发送出去了? - -当用户在对话框输入了一条消息后,发送给 ChatGPT 的消息,包含以下几个部分: - -1. 系统级提示词:用于尽可能贴近 ChatGPT 官方 WebUI 的使用体验,可在设置中关闭此信息; -2. 历史摘要:作为长期记忆,提供长久但模糊的上下文信息; -3. 预设提示词:当前对话内设置的预设提示词,用于 In-Context Learning 或者注入系统级限制; -4. 最近 n 条对话记录:作为短期记忆,提供短暂但精确的上下文信息; -5. 用户当前输入的消息。 diff --git a/docs/vercel-cn.md b/docs/vercel-cn.md deleted file mode 100644 index 51018d5d..00000000 --- a/docs/vercel-cn.md +++ /dev/null @@ -1,39 +0,0 @@ -# Vercel 的使用说明 - -## 如何新建项目 -当你从 Github fork 本项目之后,需要重新在 Vercel 创建一个全新的 Vercel 项目来重新部署,你需要按照下列步骤进行。 - -![vercel-create-1](./images/vercel/vercel-create-1.jpg) -1. 进入 Vercel 控制台首页; -2. 点击 Add New; -3. 选择 Project。 - -![vercel-create-2](./images/vercel/vercel-create-2.jpg) -1. 在 Import Git Repository 处,搜索 chatgpt-next-web; -2. 选中新 fork 的项目,点击 Import。 - -![vercel-create-3](./images/vercel/vercel-create-3.jpg) -1. 在项目配置页,点开 Environmane Variables 开始配置环境变量; -2. 依次新增名为 OPENAI_API_KEY 和 CODE ([访问密码](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/357296986609c14de10bf210871d30e2f67a8784/docs/faq-cn.md#%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F-code-%E6%98%AF%E4%BB%80%E4%B9%88%E5%BF%85%E9%A1%BB%E8%AE%BE%E7%BD%AE%E5%90%97)) 的环境变量; -3. 填入环境变量对应的值; -4. 点击 Add 确认增加环境变量; -5. 请确保你添加了 OPENAI_API_KEY,否则无法使用; -6. 点击 Deploy,创建完成,耐心等待 5 分钟左右部署完成。 - -## 如何增加自定义域名 -[TODO] - -## 如何更改环境变量 -![vercel-env-edit](./images/vercel/vercel-env-edit.jpg) -1. 进去 Vercel 项目内部控制台,点击顶部的 Settings 按钮; -2. 点击左侧的 Environment Variables; -3. 点击已有条目的右侧按钮; -4. 选择 Edit 进行编辑,然后保存即可。 - -⚠️️ 注意:每次修改完环境变量,你都需要[重新部署项目](#如何重新部署)来让改动生效! - -## 如何重新部署 -![vercel-redeploy](./images/vercel/vercel-redeploy.jpg) -1. 进入 Vercel 项目内部控制台,点击顶部的 Deployments 按钮; -2. 选择列表最顶部一条的右侧按钮; -3. 点击 Redeploy 即可重新部署。 diff --git a/docs/vercel-es.md b/docs/vercel-es.md deleted file mode 100644 index 6cbe533b..00000000 --- a/docs/vercel-es.md +++ /dev/null @@ -1,48 +0,0 @@ -# Instrucciones de uso de Verbel - -## Cómo crear un nuevo proyecto - -Cuando bifurca este proyecto desde Github y necesita crear un nuevo proyecto de Vercel en Vercel para volver a implementarlo, debe seguir los pasos a continuación. - -![vercel-create-1](./images/vercel/vercel-create-1.jpg) - -1. Vaya a la página de inicio de la consola de Vercel; -2. Haga clic en Agregar nuevo; -3. Seleccione Proyecto. - -![vercel-create-2](./images/vercel/vercel-create-2.jpg) - -1. En Import Git Repository, busque chatgpt-next-web; -2. Seleccione el proyecto de la nueva bifurcación y haga clic en Importar. - -![vercel-create-3](./images/vercel/vercel-create-3.jpg) - -1. En la página de configuración del proyecto, haga clic en Variables de entorno para configurar las variables de entorno; -2. Agregar variables de entorno denominadas OPENAI_API_KEY y CODE; -3. Rellenar los valores correspondientes a las variables de entorno; -4. Haga clic en Agregar para confirmar la adición de variables de entorno; -5. Asegúrese de agregar OPENAI_API_KEY, de lo contrario no funcionará; -6. Haga clic en Implementar, créelo y espere pacientemente unos 5 minutos a que se complete la implementación. - -## Cómo agregar un nombre de dominio personalizado - -\[TODO] - -## Cómo cambiar las variables de entorno - -![vercel-env-edit](./images/vercel/vercel-env-edit.jpg) - -1. Vaya a la consola interna del proyecto Vercel y haga clic en el botón Configuración en la parte superior; -2. Haga clic en Variables de entorno a la izquierda; -3. Haga clic en el botón a la derecha de una entrada existente; -4. Seleccione Editar para editarlo y, a continuación, guárdelo. - -⚠️️ Nota: Lo necesita cada vez que modifique las variables de entorno[Volver a implementar el proyecto](#如何重新部署)para que los cambios surtan efecto! - -## Cómo volver a implementar - -![vercel-redeploy](./images/vercel/vercel-redeploy.jpg) - -1. Vaya a la consola interna del proyecto Vercel y haga clic en el botón Implementaciones en la parte superior; -2. Seleccione el botón derecho del artículo superior de la lista; -3. Haga clic en Volver a implementar para volver a implementar. diff --git a/docs/vercel-ja.md b/docs/vercel-ja.md deleted file mode 100644 index dfdd034c..00000000 --- a/docs/vercel-ja.md +++ /dev/null @@ -1,48 +0,0 @@ -# Vercel 使用説明書 - -## 新規プロジェクトの作成方法 - -このプロジェクトを GitHub からフォークし、Vercel で新しい Vercel プロジェクトを作成して再デプロイする必要がある場合は、以下の手順に従ってください。 - -![vercel-create-1](./images/vercel/vercel-create-1.jpg) - -1. Vercel コンソールのホームページにアクセスします; -2. 新規追加をクリックする; -3. プロジェクトを選択します。 - -![vercel-create-2](./images/vercel/vercel-create-2.jpg) - -1. Git リポジトリのインポートで、chatgpt-next-web を検索します; -2 .新しいフォークプロジェクトを選択し、インポートをクリックします。 - -![vercel-create-3](./images/vercel/vercel-create-3.jpg) - -1. Project Settings ページで、Environment Variables をクリックして環境変数を設定する; -2. OPENAI_API_KEY と CODE という名前の環境変数を追加します; -3. 環境変数に対応する値を入力します; -4. Add をクリックして、環境変数の追加を確認する; -5. OPENAI_API_KEY を必ず追加してください; -6. Deploy をクリックして作成し、デプロイが完了するまで約 5 分間辛抱強く待つ。 - -## カスタムドメイン名の追加方法 - -\[TODO] - -## 環境変数の変更方法 - -![vercel-env-edit](./images/vercel/vercel-env-edit.jpg) - -1. 内部 Vercel プロジェクトコンソールに移動し、上部の設定ボタンをクリックします; -2. 左側の Environment Variables をクリックします; -3. 既存のエントリーの右側のボタンをクリックします; -4. 編集を選択して編集し、保存する。 - -⚠️️ 注意: [プロジェクトの再デプロイ](#再実装の方法)環境変数を変更するたびに、変更を有効にするために必要です! - -## 再実装の方法 - -![vercel-redeploy](./images/vercel/vercel-redeploy.jpg) - -1. Vercelプロジェクトの内部コンソールに移動し、一番上のDeploymentsボタンをクリックします; -2. リストの一番上の項目の右のボタンを選択します; -3. 再デプロイをクリックして再デプロイします。 diff --git a/docs/vercel-ko.md b/docs/vercel-ko.md deleted file mode 100644 index 725a827d..00000000 --- a/docs/vercel-ko.md +++ /dev/null @@ -1,39 +0,0 @@ -# Vercel 사용 방법 - -## 새 프로젝트 생성 방법 -이 프로젝트를 Github에서 포크한 후, 다시 배포하려면 Vercel에서 새로운 Vercel 프로젝트를 생성해야 하며, 다음 단계를 따라야 합니다. - -![vercel-create-1](./images/vercel/vercel-create-1.jpg) -1. Vercel 콘솔 홈 페이지로 이동합니다; -2. 새로 추가를 클릭합니다; -3. 프로젝트를 선택합니다. - -![vercel-create-2](./images/vercel/vercel-create-2.jpg) -1. Git 리포지토리 가져오기에서 chatgpt-next-web을 검색합니다. 2. 새 포크를 선택합니다; -2. 새로 포크된 프로젝트를 선택하고 가져오기를 클릭합니다. - -![vercel-create-3](./images/vercel/vercel-create-3.jpg) -1. 프로젝트 구성 페이지에서 환경 변수 설정을 클릭하여 환경 변수 설정을 시작합니다; -2. OPENAI_API_KEY, CODE ([Access Code](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/357296986609c14de10bf210871d30e2f67a8784/docs/faq-cn.md#%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F-code-%E6%98%AF%E4%BB%80%E4%B9%88%E5%BF%85%E9%A1%BB%E8%AE%BE%E7%BD%AE%E5%90%97)). 환경 변수를 설정합니다; -3. 환경 변수의 값을 입력합니다; -4. 추가를 클릭하여 환경 변수 추가를 확인합니다; -5. OPENAI_API_KEY를 추가해야 하며, 그렇지 않으면 작동하지 않습니다; -6. 배포를 클릭하여 도메인 이름 생성을 완료하고 배포가 완료될 때까지 약 5분간 기다립니다. - -## 사용자 정의 도메인 네임 추가 방법 -[TODO] - -## 환경 변수 변경 방법 -![vercel-env-edit](./images/vercel/vercel-env-edit.jpg) -1. 버셀 프로젝트의 내부 콘솔로 이동하여 상단의 설정 버튼을 클릭합니다; -2. 왼쪽의 환경 변수를 클릭합니다; -3. 기존 항목 오른쪽에 있는 버튼을 클릭합니다; -4. 편집을 선택하여 수정하고 저장합니다. - -⚠️️ 참고: 환경 변수를 변경할 때마다 [프로젝트를 재배포](#如何重新部署)해야 변경 사항을 적용할 수 있습니다! - -## 재배포 방법 -![vercel-redeploy](./images/vercel/vercel-redeploy.jpg) -1. 버셀 내부 프로젝트 콘솔로 이동하여 상단의 배포 버튼을 클릭합니다; -2. 목록에서 맨 위 항목 오른쪽에 있는 버튼을 선택합니다; -3. 재배포를 클릭하여 재배포합니다. \ No newline at end of file diff --git a/licenses/LICENSE.ChatGPT-Next-Web.txt b/licenses/LICENSE.ChatGPT-Next-Web.txt new file mode 100644 index 00000000..9470dc96 --- /dev/null +++ b/licenses/LICENSE.ChatGPT-Next-Web.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Zhang Yifei + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/next.config.mjs b/next.config.mjs index daaeba46..389c9aed 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -8,7 +8,7 @@ console.log("[Next] build with chunk: ", !disableChunk); /** @type {import('next').NextConfig} */ const nextConfig = { - webpack(config) { + webpack(config, { isServer }) { config.module.rules.push({ test: /\.svg$/, use: ["@svgr/webpack"], @@ -24,6 +24,16 @@ const nextConfig = { child_process: false, }; + if (!isServer) { + config.resolve.fallback = { + ...config.resolve.fallback, // if you miss it, all the other options in fallback, specified + // by next.js will be dropped. Doesn't make much sense, but how it is + fs: false, // the solution + module: false, + perf_hooks: false, + }; + } + return config; }, output: mode, diff --git a/package.json b/package.json index 9dbae820..90f7c192 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { - "name": "nextchat", + "name": "web-llm-chat", "private": false, - "license": "mit", + "license": "Apache-2.0", "scripts": { "dev": "next dev", "build": "cross-env BUILD_MODE=standalone next build", @@ -18,6 +18,7 @@ "dependencies": { "@fortaine/fetch-event-source": "^3.0.6", "@hello-pangea/dnd": "^16.5.0", + "@mlc-ai/web-llm": "^0.2.35", "@next/third-parties": "^14.1.0", "@svgr/webpack": "^6.5.1", "@vercel/analytics": "^0.1.11", @@ -45,8 +46,8 @@ }, "devDependencies": { "@tauri-apps/cli": "1.5.11", - "@types/node": "^20.11.30", - "@types/react": "^18.2.70", + "@types/node": "^20.12.11", + "@types/react": "^18.3.1", "@types/react-dom": "^18.2.7", "@types/react-katex": "^3.0.0", "@types/spark-md5": "^3.0.4", @@ -58,8 +59,9 @@ "husky": "^8.0.0", "lint-staged": "^13.2.2", "prettier": "^3.0.2", - "typescript": "5.2.2", - "webpack": "^5.88.1" + "typescript": "^5.4.5", + "webpack": "^5.88.1", + "zustymiddlewarets": "^1.4.2" }, "resolutions": { "lint-staged/yaml": "^2.2.2" diff --git a/public/favicon.ico b/public/favicon.ico index b5e8234c..805bf39f 100644 Binary files a/public/favicon.ico and b/public/favicon.ico differ diff --git a/yarn.lock b/yarn.lock index 66924bf4..e5c99b66 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1218,6 +1218,11 @@ "@jridgewell/resolve-uri" "3.1.0" "@jridgewell/sourcemap-codec" "1.4.14" +"@mlc-ai/web-llm@^0.2.35": + version "0.2.35" + resolved "https://registry.yarnpkg.com/@mlc-ai/web-llm/-/web-llm-0.2.35.tgz#69259fa5cce0615c851aeef7e75edea03925e174" + integrity sha512-ud56bL4A1jluQSP24TPAMvda08AN89WC7NlpjIqCcYWw3xd4wDYmgJ9RnusqFYE8hLF+jrEYG3yUvAbWDHgfoQ== + "@next/env@13.4.9": version "13.4.9" resolved "https://registry.yarnpkg.com/@next/env/-/env-13.4.9.tgz#b77759514dd56bfa9791770755a2482f4d6ca93e" @@ -1594,13 +1599,20 @@ resolved "https://registry.yarnpkg.com/@types/ms/-/ms-0.7.31.tgz#31b7ca6407128a3d2bbc27fe2d21b345397f6197" integrity sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA== -"@types/node@*", "@types/node@^20.11.30": +"@types/node@*": version "20.11.30" resolved "https://registry.yarnpkg.com/@types/node/-/node-20.11.30.tgz#9c33467fc23167a347e73834f788f4b9f399d66f" integrity sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw== dependencies: undici-types "~5.26.4" +"@types/node@^20.12.11": + version "20.12.11" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.12.11.tgz#c4ef00d3507000d17690643278a60dc55a9dc9be" + integrity sha512-vDg9PZ/zi+Nqp6boSOT7plNuthRugEKixDv5sFTIpkE89MmNtEArAShI4mxuX2+UrLEe9pxC1vm2cjm9YlWbJw== + dependencies: + undici-types "~5.26.4" + "@types/parse-json@^4.0.0": version "4.0.0" resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" @@ -1625,7 +1637,7 @@ dependencies: "@types/react" "*" -"@types/react@*", "@types/react@^18.2.70": +"@types/react@*": version "18.2.70" resolved "https://registry.yarnpkg.com/@types/react/-/react-18.2.70.tgz#89a37f9e0a6a4931f4259c598f40fd44dd6abf71" integrity sha512-hjlM2hho2vqklPhopNkXkdkeq6Lv8WSZTpr7956zY+3WS5cfYUewtCzsJLsbW5dEv3lfSeQ4W14ZFeKC437JRQ== @@ -1634,6 +1646,14 @@ "@types/scheduler" "*" csstype "^3.0.2" +"@types/react@^18.3.1": + version "18.3.1" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.3.1.tgz#fed43985caa834a2084d002e4771e15dfcbdbe8e" + integrity sha512-V0kuGBX3+prX+DQ/7r2qsv1NsdfnCLnTgnRJ1pYnxykBhGMz+qj+box5lq7XsO5mtZsBqpjwwTu/7wszPfMBcw== + dependencies: + "@types/prop-types" "*" + csstype "^3.0.2" + "@types/scheduler@*": version "0.16.3" resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.3.tgz#cef09e3ec9af1d63d2a6cc5b383a737e24e6dcf5" @@ -5845,10 +5865,10 @@ typed-array-length@^1.0.4: for-each "^0.3.3" is-typed-array "^1.1.9" -typescript@5.2.2: - version "5.2.2" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.2.2.tgz#5ebb5e5a5b75f085f22bc3f8460fba308310fa78" - integrity sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w== +typescript@^5.4.5: + version "5.4.5" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.5.tgz#42ccef2c571fdbd0f6718b1d1f5e6e5ef006f611" + integrity sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ== unbox-primitive@^1.0.2: version "1.0.2" @@ -6197,6 +6217,20 @@ zustand@^4.3.8: dependencies: use-sync-external-store "1.2.0" +zustand@^4.5.1: + version "4.5.2" + resolved "https://registry.yarnpkg.com/zustand/-/zustand-4.5.2.tgz#fddbe7cac1e71d45413b3682cdb47b48034c3848" + integrity sha512-2cN1tPkDVkwCy5ickKrI7vijSjPksFRfqS6237NzT0vqSsztTNnQdHw9mmN7uBdk3gceVXU0a+21jFzFzAc9+g== + dependencies: + use-sync-external-store "1.2.0" + +zustymiddlewarets@^1.4.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/zustymiddlewarets/-/zustymiddlewarets-1.4.2.tgz#8fff09e275f2349a67af94c873b137c467b58f6c" + integrity sha512-fwXF02TgFtrtxSwgyQg/mlFGU1lDC8bgDDQiKTARt2TqC508jTiBD/6ztb0yZ4Qp+fVBjfbOJN1JLx2PXNW+HQ== + dependencies: + zustand "^4.5.1" + zwitch@^2.0.0: version "2.0.4" resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-2.0.4.tgz#c827d4b0acb76fc3e685a4c6ec2902d51070e9d7"