diff --git a/.github/workflows/deploy-to-node.yaml b/.github/workflows/deploy-to-node.yaml
index a34e550334..f04484c6ba 100644
--- a/.github/workflows/deploy-to-node.yaml
+++ b/.github/workflows/deploy-to-node.yaml
@@ -81,6 +81,7 @@ jobs:
BACKEND_CORS_ORIGINS: ${{ vars.BACKEND_CORS_ORIGINS }}
WEB_INFERENCE_SERVER_HOST: ${{ vars.WEB_INFERENCE_SERVER_HOST }}
WEB_ENABLE_CHAT: ${{ vars.WEB_ENABLE_CHAT }}
+ WEB_BYE: ${{ vars.WEB_BYE }}
WEB_ENABLE_DRAFTS_WITH_PLUGINS: ${{ vars.WEB_ENABLE_DRAFTS_WITH_PLUGINS }}
WEB_NUM_GENERATED_DRAFTS: ${{ vars.WEB_NUM_GENERATED_DRAFTS }}
WEB_CURRENT_ANNOUNCEMENT: ${{ vars.WEB_CURRENT_ANNOUNCEMENT }}
diff --git a/README.md b/README.md
index b0bb4562d6..f8ed80496f 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,10 @@
+

diff --git a/ansible/deploy-to-node.yaml b/ansible/deploy-to-node.yaml
index f36e020710..33898bc19c 100644
--- a/ansible/deploy-to-node.yaml
+++ b/ansible/deploy-to-node.yaml
@@ -284,6 +284,7 @@
INFERENCE_SERVER_API_KEY:
"{{ lookup('ansible.builtin.env', 'WEB_INFERENCE_SERVER_API_KEY') }}"
ENABLE_CHAT: "{{ lookup('ansible.builtin.env', 'WEB_ENABLE_CHAT') }}"
+ BYE: "{{ lookup('ansible.builtin.env', 'WEB_BYE') }}"
ENABLE_DRAFTS_WITH_PLUGINS:
"{{ lookup('ansible.builtin.env',
'WEB_ENABLE_DRAFTS_WITH_PLUGINS')}}"
diff --git a/docs/blog/2023-10-25-open-assistant-is-completed/index.mdx b/docs/blog/2023-10-25-open-assistant-is-completed/index.mdx
new file mode 100644
index 0000000000..f650c1b3cf
--- /dev/null
+++ b/docs/blog/2023-10-25-open-assistant-is-completed/index.mdx
@@ -0,0 +1,20 @@
+---
+title: OpenAssistant is Completed!
+description: OpenAssistant is Completed!
+authors: [yk]
+tags: [open-assistant, youtube]
+image: https://img.youtube.com/vi/gqtmUHhaplo/0.jpg
+---
+
+import ReactPlayer from "react-player";
+
+
+
+The final published oasst2 dataset can be found on HuggingFace at
+[OpenAssistant/oasst2](https://huggingface.co/datasets/OpenAssistant/oasst2).
+
+
diff --git a/docs/docs/faq.md b/docs/docs/faq.md
index 91c277cb66..8566b9dfe2 100644
--- a/docs/docs/faq.md
+++ b/docs/docs/faq.md
@@ -15,12 +15,12 @@ In this page, there are some of the most frequently asked questions.
-We have released candidate supervised finetuning (SFT) models using both Pythia
-and LLaMa, as well as candidate reward models for reinforcement learning from
-human feedback training using Pythia, which you can try, and are beginning the
-process of applying (RLHF). We have also released the first version of the
-OpenAssistant Conversations dataset
-[here](https://huggingface.co/datasets/OpenAssistant/oasst1).
+This project has concluded. We have released supervised finetuning (SFT) models
+using Llama 2, LLaMa, Falcon, Pythia, and StabeLM as well as reinforcement
+learning from human feedback trained models and reward models, all of which are
+available at [here](https://huggingface.co/OpenAssistant). In addition to our
+models, we have released three datasets from OpenAssistant conversations, and a
+[research paper](https://arxiv.org/abs/2304.07327).
@@ -31,9 +31,8 @@ OpenAssistant Conversations dataset
-You can play with our best candidate model
-[here](https://open-assistant.io/chat) and provide thumbs up/down responses to
-help us improve the model in future!
+Our online demonstration is no longer available, but the models remain available
+to download [here](https://huggingface.co/OpenAssistant).
@@ -44,37 +43,18 @@ help us improve the model in future!
-The candidate Pythia SFT models are
+All of our models are
[available on HuggingFace](https://huggingface.co/OpenAssistant) and can be
-loaded via the HuggingFace Transformers library. As such you may be able to use
-them with sufficient hardware. There are also spaces on HF which can be used to
-chat with the OA candidate without your own hardware. However, these models are
-not final and can produce poor or undesirable outputs.
+loaded via the HuggingFace Transformers library or other runners if converted.
+As such you may be able to use them with sufficient hardware. There are also
+spaces on HF which can be used to chat with the OA candidate without your own
+hardware. However, some of these models are not final and can produce poor or
+undesirable outputs.
-LLaMa SFT models cannot be released directly due to Meta's license but XOR
+LLaMa (v1) SFT models cannot be released directly due to Meta's license but XOR
weights are released on the HuggingFace org. Follow the process in the README
-there to obtain a full model from these XOR weights.
-
-
-
-
-
-
-### Is there an API available?
-
-
-
-There is no API currently available for Open Assistant. Any mention of an API in
-documentation is referencing the website's internal API. We understand that an
-API is a highly requested feature, but unfortunately, we can't provide one at
-this time due to a couple of reasons. Firstly, the inference system is already
-under high load and running off of compute from our sponsors. Secondly, the
-project's primary goal is currently data collection and model training, not
-providing a product.
-
-However, if you're looking to run inference, you can host the model yourself
-either on your own hardware or with a cloud provider. We appreciate your
-understanding and patience as we continue to develop this project.
+there to obtain a full model from these XOR weights. Llama 2 models are not
+required to be XORed.
@@ -102,15 +82,13 @@ inference setup and UI locally unless you wish to assist in development.
All Open Assistant code is licensed under Apache 2.0. This means it is available
for a wide range of uses including commercial use.
-The Open Assistant Pythia based models are released as full weights and will be
-licensed under the Apache 2.0 license.
-
-The Open Assistant LLaMa based models will be released only as delta weights
-meaning you will need the original LLaMa weights to use them, and the license
-restrictions will therefore be those placed on the LLaMa weights.
+Open Assistant models are released under the license of their respective base
+models, be that Llama 2, Falcon, Pythia, or StableLM. LLaMa (not 2) models are
+only released as XOR weights, meaning you will need the original LLaMa weights
+to use them.
-The Open Assistant data is released under a Creative Commons license allowing a
-wide range of uses including commercial use.
+The Open Assistant data is released under Apache-2.0 allowing a wide range of
+uses including commercial use.
@@ -138,9 +116,8 @@ you to everyone who has taken part!
-The model code, weights, and data are free. We are additionally hosting a free
-public instance of our best current model for as long as we can thanks to
-compute donation from Stability AI via LAION!
+The model code, weights, and data are free. Our free public instance of our best
+models is not longer available due to the project's conclusion.
@@ -151,10 +128,9 @@ compute donation from Stability AI via LAION!
-The current smallest (Pythia) model is 12B parameters and is challenging to run
-on consumer hardware, but can run on a single professional GPU. In future there
-may be smaller models and we hope to make progress on methods like integer
-quantisation which can help run the model on smaller hardware.
+The current smallest models are 7B parameters and are challenging to run on
+consumer hardware, but can run on a single professional GPU or be quantized to
+run on more widely available hardware.
@@ -165,13 +141,7 @@ quantisation which can help run the model on smaller hardware.
-If you want to help in the data collection for training the model, go to the
-website [https://open-assistant.io/](https://open-assistant.io/).
-
-If you want to contribute code, take a look at the
-[tasks in GitHub](https://github.com/orgs/LAION-AI/projects/3) and comment on an
-issue stating your wish to be assigned. You can also take a look at this
-[contributing guide](https://github.com/LAION-AI/Open-Assistant/blob/main/CONTRIBUTING.md).
+This project has now concluded.
@@ -190,104 +160,6 @@ well as accelerate, DeepSpeed, bitsandbytes, NLTK, and other libraries.
-## Questions about the data collection website
-
-
-
-
-### Can I use ChatGPT to help in training Open Assistant, for instance, by generating answers?
-
-
-
-No, it is against their terms of service to use it to help train other models.
-See
-[this issue](https://github.com/LAION-AI/Open-Assistant/issues/471#issuecomment-1374392299).
-ChatGPT-like answers will be removed.
-
-
-
-
-
-
-### What should I do if I don't know how to complete the task as an assistant?
-
-
-Skip it.
-
-
-
-
-
-### Should I fact check the answers by the assistant?
-
-
-
-Yes, you should try. If you are not sure, skip the task.
-
-
-
-
-
-
-### How can I see my score?
-
-
-
-In your [account settings](https://open-assistant.io/account).
-
-
-
-
-
-
-### Can we see how many data points have been collected?
-
-
-
-You can see a regularly updated interface at
-[https://open-assistant.io/stats](https://open-assistant.io/stats).
-
-
-
-
-
-
-### How do I write and label prompts?
-
-
-
-Check the
-[guidelines](https://projects.laion.ai/Open-Assistant/docs/guides/guidelines).
-
-
-
-
-
-
-### Where can I report a bug or create a new feature request?
-
-
-
-In the [GitHub issues](https://github.com/LAION-AI/Open-Assistant/issues).
-
-
-
-
-
-
-### Why am I not allowed to write about this topic, even though it isn't illegal?
-
-
-
-We want to ensure that the Open Assistant dataset is as accessible as possible.
-As such, it's necessary to avoid any harmful or offensive content that could be
-grounds for removal on sites such as Hugging Face. Likewise, we want the model
-to be trained to reject as few questions as possible, so it's important to not
-include prompts that leave the assistant with no other choice but to refuse in
-order to avoid the generation of harmful content.
-
-
-
## Questions about the development process
diff --git a/docs/docs/intro.md b/docs/docs/intro.md
index 326502bfe3..98f50762ca 100644
--- a/docs/docs/intro.md
+++ b/docs/docs/intro.md
@@ -1,3 +1,9 @@
+# Notice
+
+**Open Assistant has now concluded.** Please see
+[this video](https://www.youtube.com/watch?v=gqtmUHhaplo) for more information.
+Thanks you to all those who made this project possible.
+
# Introduction
> The FAQ page is available at
diff --git a/website/src/hooks/env/BrowserEnv.ts b/website/src/hooks/env/BrowserEnv.ts
index 4d863c0d9a..8ed2f357f9 100644
--- a/website/src/hooks/env/BrowserEnv.ts
+++ b/website/src/hooks/env/BrowserEnv.ts
@@ -1,6 +1,7 @@
import { createContext, useContext } from "react";
export interface BrowserConfig {
+ BYE: boolean;
ENABLE_CHAT: boolean;
ENABLE_DRAFTS_WITH_PLUGINS: boolean;
NUM_GENERATED_DRAFTS: number;
diff --git a/website/src/pages/api/config.ts b/website/src/pages/api/config.ts
index 36c4a6b53d..33c9d8c70a 100644
--- a/website/src/pages/api/config.ts
+++ b/website/src/pages/api/config.ts
@@ -4,6 +4,7 @@ import { BrowserConfig } from "src/types/Config";
// don't put sensitive information here
const config: BrowserConfig = {
+ BYE: boolean(process.env.BYE),
ENABLE_CHAT: boolean(process.env.ENABLE_CHAT),
ENABLE_DRAFTS_WITH_PLUGINS: boolean(process.env.ENABLE_DRAFTS_WITH_PLUGINS),
NUM_GENERATED_DRAFTS: Number(process.env.NUM_GENERATED_DRAFTS),
diff --git a/website/src/pages/bye.tsx b/website/src/pages/bye.tsx
new file mode 100644
index 0000000000..0ad9018b04
--- /dev/null
+++ b/website/src/pages/bye.tsx
@@ -0,0 +1,58 @@
+import Image from "next/image";
+import Link from "next/link";
+import { Container } from "src/components/Container";
+export { getStaticProps } from "src/lib/defaultServerSideProps";
+
+const ByePage = () => {
+ return (
+
+
+
+
+
+
+
+
+
OpenAssistant has finished!
+
+ OpenAssistant collected data from over 13'000 humans and released it to the public. Data, models,
+ and code are publicly available.
+
+
Links:
+
+
+ If you're looking to support other open-data projects, check out these:
+
+
+
+
+
+
+
+ );
+};
+
+export default ByePage;
diff --git a/website/src/pages/chat/[id].tsx b/website/src/pages/chat/[id].tsx
index b8bb0b81db..c55645afe1 100644
--- a/website/src/pages/chat/[id].tsx
+++ b/website/src/pages/chat/[id].tsx
@@ -10,7 +10,8 @@ export { getServerSideProps } from "src/lib/defaultServerSideProps";
import useSWRImmutable from "swr/immutable";
const Chat = () => {
- const { query } = useRouter();
+ const router = useRouter();
+ const { query } = router;
const id = query.id as string;
const { t } = useTranslation(["common", "chat"]);
const { data: modelInfos } = useSWRImmutable("/api/chat/models", get, {
diff --git a/website/src/pages/chat/index.tsx b/website/src/pages/chat/index.tsx
index 3e2856b459..68fcdeef90 100644
--- a/website/src/pages/chat/index.tsx
+++ b/website/src/pages/chat/index.tsx
@@ -1,12 +1,22 @@
import Head from "next/head";
+import { useRouter } from "next/router";
import { useTranslation } from "next-i18next";
import React from "react";
import { ChatListBase } from "src/components/Chat/ChatListBase";
import { DashboardLayout } from "src/components/Layout";
export { getStaticProps } from "src/lib/defaultServerSideProps";
+import { useBrowserConfig } from "src/hooks/env/BrowserEnv";
const ChatList = () => {
const { t } = useTranslation();
+ const { BYE } = useBrowserConfig();
+ const router = useRouter();
+
+ React.useEffect(() => {
+ if (BYE) {
+ router.push("/bye");
+ }
+ }, [router, BYE]);
return (
<>
diff --git a/website/src/pages/contributors.tsx b/website/src/pages/contributors.tsx
new file mode 100644
index 0000000000..8f38f71269
--- /dev/null
+++ b/website/src/pages/contributors.tsx
@@ -0,0 +1,13 @@
+import { useRouter } from "next/router";
+import { useEffect } from "react";
+
+const ContributorsPage = () => {
+ const router = useRouter();
+ useEffect(() => {
+ router.push("https://ykilcher.com/oa-contributors");
+ }, [router]);
+
+ return null;
+};
+
+export default ContributorsPage;
diff --git a/website/src/pages/dashboard.tsx b/website/src/pages/dashboard.tsx
index 62d2bbdf99..17282b61d2 100644
--- a/website/src/pages/dashboard.tsx
+++ b/website/src/pages/dashboard.tsx
@@ -1,7 +1,8 @@
import { Button, Card, CardBody, Flex, Heading } from "@chakra-ui/react";
import Head from "next/head";
+import { useRouter } from "next/router";
import { useTranslation } from "next-i18next";
-import { useMemo } from "react";
+import { useEffect, useMemo } from "react";
import { LeaderboardWidget, TaskOption, WelcomeCard } from "src/components/Dashboard";
import { DashboardLayout } from "src/components/Layout";
import { get } from "src/lib/api";
@@ -17,7 +18,8 @@ import useSWR from "swr";
const Dashboard = () => {
const { t } = useTranslation(["dashboard", "common", "tasks"]);
- const { ENABLE_CHAT } = useBrowserConfig();
+ const { ENABLE_CHAT, BYE } = useBrowserConfig();
+ const router = useRouter();
const lang = useCurrentLocale();
const { data } = useSWR(API_ROUTES.AVAILABLE_TASK({ lang }), get, {
refreshInterval: 2 * 60 * 1000, //2 minutes
@@ -55,6 +57,12 @@ const Dashboard = () => {
},
};
+ useEffect(() => {
+ if (BYE) {
+ router.push("/bye");
+ }
+ }, [BYE, router]);
+
return (
<>
diff --git a/website/src/pages/index.tsx b/website/src/pages/index.tsx
index 3c33b94367..60e04cd284 100644
--- a/website/src/pages/index.tsx
+++ b/website/src/pages/index.tsx
@@ -8,16 +8,22 @@ import { CallToAction } from "src/components/CallToAction";
import { Faq } from "src/components/Faq";
import { Hero } from "src/components/Hero";
export { getDefaultServerSideProps as getStaticProps } from "src/lib/defaultServerSideProps";
+import { useBrowserConfig } from "src/hooks/env/BrowserEnv";
const Home = () => {
+ const { BYE } = useBrowserConfig();
const router = useRouter();
const { status } = useSession();
const { t } = useTranslation();
useEffect(() => {
+ if (BYE) {
+ router.push("/bye");
+ }
+
if (status === "authenticated") {
router.push("/dashboard");
}
- }, [router, status]);
+ }, [router, status, BYE]);
return (
<>
diff --git a/website/src/types/Config.ts b/website/src/types/Config.ts
index 087cc55faa..37de5d9070 100644
--- a/website/src/types/Config.ts
+++ b/website/src/types/Config.ts
@@ -1,4 +1,5 @@
export interface BrowserConfig {
+ BYE: boolean;
ENABLE_CHAT: boolean;
ENABLE_DRAFTS_WITH_PLUGINS: boolean; // Whether draft messages should be generated if plugins are in use
NUM_GENERATED_DRAFTS: number;
diff --git a/website/types/env.d.ts b/website/types/env.d.ts
index 58667dd643..c338c506d7 100644
--- a/website/types/env.d.ts
+++ b/website/types/env.d.ts
@@ -9,6 +9,7 @@ declare global {
ADMIN_USERS: string;
MODERATOR_USERS: string;
INFERENCE_SERVER_HOST: string;
+ BYE: boolean;
ENABLE_CHAT: boolean;
ENABLE_DRAFTS_WITH_PLUGINS: boolean;
NUM_GENERATED_DRAFTS: number;