Skip to content

Commit

Permalink
Merge pull request #21 from firstbatchxyz/erhant/parser-logic-update
Browse files Browse the repository at this point in the history
feat: historic messages & refactors
  • Loading branch information
erhant authored Nov 17, 2024
2 parents ca1031f + 4c745ad commit f992045
Show file tree
Hide file tree
Showing 14 changed files with 826 additions and 829 deletions.
1,080 changes: 475 additions & 605 deletions Cargo.lock

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "dkn-oracle"
description = "Dria Knowledge Network: Oracle Node"
version = "0.1.7"
version = "0.1.8"
edition = "2021"
license = "Apache-2.0"
readme = "README.md"
Expand Down Expand Up @@ -36,7 +36,8 @@ bytes = "1.7.1"
rand = "0.8.5"
reqwest = "0.12.5"

# hex, serde
# b64, hex, serde
base64 = "0.22.1"
hex = "0.4.3"
hex-literal = "0.4.1"
serde = "1.0.204"
Expand All @@ -49,4 +50,3 @@ clap = { version = "4.5.13", features = ["derive", "env"] }
# there are many unused stuff here, but everything breaks if you use the minimal set
# because Bundlr SDK is not maintained at all
bundlr-sdk = { version = "0.5.0" }
base64 = "0.22.1"
11 changes: 6 additions & 5 deletions src/commands/coordinator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,20 +92,21 @@ pub async fn run_oracle(
}

// handle new tasks with subscription
let event_poller = node
.subscribe_to_tasks()
.await
.wrap_err("could not subscribe to tasks")?;
log::info!(
"Subscribed to LLMOracleCoordinator ({}) as {}",
"Subscribing to LLMOracleCoordinator ({}) as {}",
node.addresses.coordinator,
kinds
.iter()
.map(|kind| kind.to_string())
.collect::<Vec<String>>()
.join(", ")
);
let event_poller = node
.subscribe_to_tasks()
.await
.wrap_err("could not subscribe to tasks")?;

log::info!("Listening for events...");
event_poller
.into_stream()
.for_each(|log| async {
Expand Down
1 change: 1 addition & 0 deletions src/commands/token.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ pub async fn claim_rewards(node: &DriaOracle) -> Result<()> {
// transfer rewards
node.transfer_from(node.addresses.coordinator, node.address(), allowance.amount)
.await?;
log::info!("Rewards claimed: {}.", allowance);
}

Ok(())
Expand Down
21 changes: 12 additions & 9 deletions src/compute/handlers/generation.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::{
compute::WorkflowsExt,
compute::Request,
contracts::{bytes32_to_string, bytes_to_string},
data::Arweave,
mine_nonce, DriaOracle,
Expand All @@ -8,7 +8,7 @@ use alloy::{
primitives::{FixedBytes, U256},
rpc::types::TransactionReceipt,
};
use dkn_workflows::{DriaWorkflowsConfig, Executor};
use dkn_workflows::DriaWorkflowsConfig;
use eyre::{Context, Result};

/// Handles a generation request.
Expand Down Expand Up @@ -50,14 +50,17 @@ pub async fn handle_generation(
// execute task
log::debug!("Executing the workflow");
let protocol_string = bytes32_to_string(&protocol)?;
let executor = Executor::new(model);
let (output, metadata, use_storage) = executor
.execute_raw(&request.input, &protocol_string)
.await?;
let mut input = Request::try_parse_bytes(&request.input).await?;
let output = input.execute(model, Some(node)).await?;
log::debug!("Output: {}", output);

// post-processing
log::debug!("Post-processing the output");
let (output, metadata, use_storage) = Request::post_process(output, &protocol_string).await?;

// do the Arweave trick for large inputs
log::debug!("Uploading to Arweave if required");
let arweave = Arweave::new_from_env().wrap_err("could not create Arweave instance")?;
// uploading to storage
log::debug!("Uploading output to storage");
let arweave = Arweave::new_from_env()?;
let output = if use_storage {
arweave.put_if_large(output).await?
} else {
Expand Down
2 changes: 1 addition & 1 deletion src/compute/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ mod nonce;
pub use nonce::mine_nonce;

mod workflows;
pub use workflows::WorkflowsExt;
pub use workflows::*;
149 changes: 0 additions & 149 deletions src/compute/workflows/executor.rs

This file was deleted.

56 changes: 3 additions & 53 deletions src/compute/workflows/mod.rs
Original file line number Diff line number Diff line change
@@ -1,56 +1,6 @@
mod executor;
pub use executor::WorkflowsExt;

mod postprocess;

#[cfg(test)]
mod tests {
use super::*;
use alloy::primitives::Bytes;
use dkn_workflows::{Entry, Executor, Model, ProgramMemory};

#[tokio::test]
#[ignore = "run this manually"]
async fn test_ollama_generation() {
dotenvy::dotenv().unwrap();
let executor = Executor::new(Model::Llama3_1_8B);
let (output, _, _) = executor
.execute_raw(&Bytes::from_static(b"What is the result of 2 + 2?"), "")
.await
.unwrap();

// funny test but it should pass
println!("Output:\n{}", output);
// assert!(output.contains('4')); // FIXME: make this use bytes
}

#[tokio::test]
#[ignore = "run this manually"]
async fn test_openai_generation() {
dotenvy::dotenv().unwrap();
let executor = Executor::new(Model::Llama3_1_8B);
let (output, _, _) = executor
.execute_raw(&Bytes::from_static(b"What is the result of 2 + 2?"), "")
.await
.unwrap();

// funny test but it should pass
println!("Output:\n{}", output);
// assert!(output.contains('4')); // FIXME: make this use bytes
}
mod presets;

/// Test the generation workflow with a plain input.
#[tokio::test]
async fn test_workflow_plain() {
dotenvy::dotenv().unwrap();
let executor = Executor::new(Model::GPT4o);
let mut memory = ProgramMemory::new();
let workflow = executor.get_generation_workflow().unwrap();
let input = Entry::try_value_or_str("What is 2 + 2?");
let output = executor
.execute(Some(&input), workflow, &mut memory)
.await
.unwrap();
println!("Output:\n{}", output);
}
}
mod requests;
pub use requests::Request;
52 changes: 52 additions & 0 deletions src/compute/workflows/presets/chat.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
{
"name": "LLM generation",
"description": "Directly generate text with input",
"config": {
"max_steps": 10,
"max_time": 50,
"tools": [""]
},
"external_memory": {
"context": [""],
"question": [""],
"answer": [""]
},
"tasks": [
{
"id": "A",
"name": "Generate with history",
"description": "Expects an array of messages for generation",
"messages": [],
"inputs": [],
"operator": "generation",
"outputs": [
{
"type": "write",
"key": "result",
"value": "__result"
}
]
},
{
"id": "__end",
"name": "end",
"description": "End of the task",
"messages": [{ "role": "user", "content": "End of the task" }],
"inputs": [],
"operator": "end",
"outputs": []
}
],
"steps": [
{
"source": "A",
"target": "__end"
}
],
"return_value": {
"input": {
"type": "read",
"key": "result"
}
}
}
11 changes: 8 additions & 3 deletions src/compute/workflows/presets/generation.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,13 @@
{
"id": "A",
"name": "Generate",
"description": "",
"prompt": "{text}",
"description": "Executes a simple generation request",
"messages": [
{
"role": "user",
"content": "{{text}}"
}
],
"inputs": [
{
"name": "text",
Expand All @@ -40,7 +45,7 @@
"id": "__end",
"name": "end",
"description": "End of the task",
"prompt": "End of the task",
"messages": [{ "role": "user", "content": "End of the task" }],
"inputs": [],
"operator": "end",
"outputs": []
Expand Down
Loading

0 comments on commit f992045

Please sign in to comment.