From 75b199c81778e3989713ea81d02b2294437b78e7 Mon Sep 17 00:00:00 2001 From: tuky191 <25343132+tuky191@users.noreply.github.com> Date: Wed, 6 Nov 2024 17:03:04 +0100 Subject: [PATCH] Dockerize Nexus (#5) * initial dockerfile * docker compose for sui network * add ollama * add faucet * use local images * add publish packages * add events * finish docker compose * fix bug * update tags * cleanup * cleanup * un ignore docker .env * use tag testnet-v1.28.3 * run github action on specific paths only * fix python formatting and compose service startup * windows compatibility * add nexusctl * nexusctl * nexusctl * fixes for windows * update just * fixes for just on macos * fix just for linux * use python shell in just * change quotes to single * check if brew is installed * update readme * update readme * reformat nexusctl * change image name * update docker readme * use base config for validator compose * Update docker/README.md Co-authored-by: Christos KK Loverdos * Update docker/nexus/Dockerfile Co-authored-by: Christos KK Loverdos * Update docker/nexus/Dockerfile Co-authored-by: Christos KK Loverdos * use EOF notation * fix ignored build args * make setting up venv in build a separate script * update github action to trigger on PR * use set dockerfile syntax --------- Co-authored-by: Christos KK Loverdos --- .dockerignore | 2 + .gitattributes | 2 + .github/workflows/python.yml | 8 +- .github/workflows/talus-agentic-framework.yml | 7 +- .gitignore | 3 + docker/.env | 12 + docker/README.md | 33 ++ docker/containers.just | 31 ++ docker/docker-compose-nollama.yaml | 3 + docker/docker-compose.yaml | 4 + docker/nexus/Dockerfile | 29 ++ docker/nexus/bin/bootstrap_model.py | 82 ++++ docker/nexus/bin/setup_venv.sh | 29 ++ docker/nexus/bin/start_events.py | 83 ++++ docker/nexus/compose.yaml | 120 +++++ docker/nexusctl.py | 409 ++++++++++++++++++ docker/ollama/Dockerfile | 15 + docker/ollama/compose.yaml | 21 + docker/sui/.gitignore | 2 + docker/sui/Dockerfile | 35 ++ docker/sui/bin/publish_package.sh | 26 ++ docker/sui/compose.yaml | 236 ++++++++++ docker/sui/genesis/.dockerignore | 1 + docker/sui/genesis/Dockerfile | 28 ++ docker/sui/genesis/compose-validators.yaml | 54 +++ docker/sui/genesis/generate.py | 147 +++++++ docker/sui/genesis/overlays/common.yaml | 20 + docker/sui/genesis/requirements.txt | 2 + docker/sui/genesis/static/client.yaml | 10 + docker/sui/genesis/static/fullnode.yaml | 23 + docker/sui/genesis/static/sui.keystore | 3 + examples/.python-version | 1 + examples/README.md | 67 +-- examples/example.just | 27 +- examples/main.py | 313 ++++---------- justfile | 156 +------ nexus_sdk/.python-version | 1 + nexus_sdk/pyproject.toml | 15 + nexus_sdk/src/nexus_sdk/__init__.py | 2 + nexus_sdk/src/nexus_sdk/utils.py | 44 +- offchain/events/src/nexus_events/sui_event.py | 39 +- .../server/controllers/inference.py | 12 +- 42 files changed, 1704 insertions(+), 453 deletions(-) create mode 100644 .dockerignore create mode 100644 .gitattributes create mode 100644 docker/.env create mode 100644 docker/README.md create mode 100644 docker/containers.just create mode 100644 docker/docker-compose-nollama.yaml create mode 100644 docker/docker-compose.yaml create mode 100644 docker/nexus/Dockerfile create mode 100644 docker/nexus/bin/bootstrap_model.py create mode 100644 docker/nexus/bin/setup_venv.sh create mode 100644 docker/nexus/bin/start_events.py create mode 100644 docker/nexus/compose.yaml create mode 100644 docker/nexusctl.py create mode 100644 docker/ollama/Dockerfile create mode 100644 docker/ollama/compose.yaml create mode 100644 docker/sui/.gitignore create mode 100644 docker/sui/Dockerfile create mode 100644 docker/sui/bin/publish_package.sh create mode 100644 docker/sui/compose.yaml create mode 100644 docker/sui/genesis/.dockerignore create mode 100644 docker/sui/genesis/Dockerfile create mode 100644 docker/sui/genesis/compose-validators.yaml create mode 100755 docker/sui/genesis/generate.py create mode 100644 docker/sui/genesis/overlays/common.yaml create mode 100644 docker/sui/genesis/requirements.txt create mode 100644 docker/sui/genesis/static/client.yaml create mode 100644 docker/sui/genesis/static/fullnode.yaml create mode 100644 docker/sui/genesis/static/sui.keystore create mode 100644 examples/.python-version create mode 100644 nexus_sdk/.python-version create mode 100644 nexus_sdk/pyproject.toml diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..94a346b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +./offchain/events/build +./offchain/tools/build \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9842673 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Force LF line endings for all files +* text=auto eol=lf \ No newline at end of file diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index b9b98f1..7ec07b8 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -1,8 +1,12 @@ # Github workflow to check python code name: Python -on: [push] - +on: + push: + paths: + - "examples/**" + - "nexus_sdk/**" + - "offchain/**" jobs: # https://black.readthedocs.io/en/stable/integrations/github_actions.html formatting-check: diff --git a/.github/workflows/talus-agentic-framework.yml b/.github/workflows/talus-agentic-framework.yml index 965c1f1..7e7cd0b 100644 --- a/.github/workflows/talus-agentic-framework.yml +++ b/.github/workflows/talus-agentic-framework.yml @@ -1,8 +1,13 @@ # Github workflow to build and test the Talus Agentic Framework project name: Talus Agentic Framework -on: [push] +on: + pull_request: + push: + paths: + - "onchain/**" + - "e2e_tests/**" env: # defines what Sui version to install from the Sui's Github release page # https://github.com/MystenLabs/sui/releases diff --git a/.gitignore b/.gitignore index 4935560..68be31b 100644 --- a/.gitignore +++ b/.gitignore @@ -106,6 +106,7 @@ celerybeat.pid # Environments .env +!docker/.env .venv env/ venv/ @@ -162,3 +163,5 @@ nohup.out .idea *.iml .vscode +./docker/sui/genesis/files/ +./docker/sui/genesis/files/.venv diff --git a/docker/.env b/docker/.env new file mode 100644 index 0000000..204055e --- /dev/null +++ b/docker/.env @@ -0,0 +1,12 @@ +SUI_TAG=testnet-v1.28.3 +LLAMA_MODEL_VERSION=llama3.2:1b +LLAMA_MODEL_VERSION_TAG=llama3.2-1b +RPC_URL=http://fullnode1:9000 +WS_URL=ws://fullnode1:9000 +MODEL_URL=http://ollama:11434 +FAUCET_URL=http://faucet:5003/gas +TOOL_URL=http://tools:8080/tool/use +LLM_ASSISTANT_URL=http://tools:8080/predict +OLLAMA_DEVICE_DRIVER=nvidia +OLLAMA_DEVICE_COUNT=all +OLLAMA_DEVICE_CAPABILITIES=gpu diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..4e3ccff --- /dev/null +++ b/docker/README.md @@ -0,0 +1,33 @@ +# README + +## Infrastructure description + +Local infra consists of following services + +- sui + - 4 validators + - faucet + - fullnode +- nexus + - events + - tools + - examples +- ollama (only on windows and linux) + +There are also a few startup services + +- sui + - build-suitools + - builds a sui image to a tag specified in .env + - build-genesis + - runs generate.py to generate new sui genesis.blob and validator.yaml + - publish-package + - builds and publishes the nexus smart contracts from ./onchain directory + - bootstrap-model + - bootstraps a Llama model on the Sui blockchain by creating a node and the model using nexus_sdk, then saves their details for future use. + +## Troubleshooting + +If you encounter trouble building the `build-genesis` image, try switching the context to default. + +`docker context use default` diff --git a/docker/containers.just b/docker/containers.just new file mode 100644 index 0000000..790470e --- /dev/null +++ b/docker/containers.just @@ -0,0 +1,31 @@ +set shell := ["python3", "-c"] + +[private] +default: + @__import__('os').system("just -l containers") + +[private] +[no-cd] +check: + @import os, sys; from subprocess import call; result = call("docker ps | grep -q 'examples'", shell=True); \ + print("Docker environment is already running.") if result == 0 else (print("Docker environment is not running. Starting environment...") or os.system("just containers start")) + +# Builds the Docker containers using Docker Compose +[no-cd] +build: + @print("Building Docker containers..."); __import__('os').system("python3 ./docker/nexusctl.py create") + +# Starts the Docker containers using Docker Compose +[no-cd] +start: + @print("Starting Docker containers..."); __import__('os').system("python3 ./docker/nexusctl.py start") + +# Stops the Docker containers using Docker Compose +[no-cd] +stop: + @print("Stopping Docker containers..."); __import__('os').system("python3 ./docker/nexusctl.py stop") + +# Deletes all Docker volumes related to the project using Docker Compose +[no-cd] +clean: + @print("Deleting Docker volumes..."); __import__('os').system("python3 ./docker/nexusctl.py delete") diff --git a/docker/docker-compose-nollama.yaml b/docker/docker-compose-nollama.yaml new file mode 100644 index 0000000..ef7bd8d --- /dev/null +++ b/docker/docker-compose-nollama.yaml @@ -0,0 +1,3 @@ +include: + - ./sui/compose.yaml + - ./nexus/compose.yaml diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml new file mode 100644 index 0000000..296619f --- /dev/null +++ b/docker/docker-compose.yaml @@ -0,0 +1,4 @@ +include: + - ./sui/compose.yaml + - ./ollama/compose.yaml + - ./nexus/compose.yaml diff --git a/docker/nexus/Dockerfile b/docker/nexus/Dockerfile new file mode 100644 index 0000000..de8c2f2 --- /dev/null +++ b/docker/nexus/Dockerfile @@ -0,0 +1,29 @@ +#syntax=docker/dockerfile:1 + +FROM python:3.10-slim AS builder + +ARG INSTALL_RUST=false + +ENV INSTALL_RUST=${INSTALL_RUST} + +WORKDIR /app + +RUN ls -lta + +COPY . . + +COPY --from=nexus bin/setup_venv.sh /usr/local/bin/setup_venv.sh + +RUN chmod +x /usr/local/bin/setup_venv.sh + +RUN /usr/local/bin/setup_venv.sh + +FROM python:3.10-slim AS runtime + +WORKDIR /app + +COPY --from=builder /app /app + +EXPOSE 8080 + +CMD ["bash", "-c", "source .venv/bin/activate && uvicorn src.nexus_tools.server.main:app --host 0.0.0.0 --port 8080"] diff --git a/docker/nexus/bin/bootstrap_model.py b/docker/nexus/bin/bootstrap_model.py new file mode 100644 index 0000000..4b6108f --- /dev/null +++ b/docker/nexus/bin/bootstrap_model.py @@ -0,0 +1,82 @@ +# Import necessary modules +import json +from pathlib import Path +from nexus_sdk import get_sui_client_with_airdrop, create_node, create_model +import os + +shared_dir = Path(os.getenv("SHARED_DIR", ".")) +package_id_file = Path(shared_dir) / "package_id.json" +keystore_path = Path(shared_dir) / "sui.keystore" + +rpc_url = os.getenv("RPC_URL", "http://localhost:9000") +ws_url = os.getenv("WS_URL", "ws://localhost:9000") +faucet_url = os.getenv("FAUCET_URL", "http://localhost:5003/gas") + + +# Decoupled function to create node and model and save details to a file. +def create_and_save_node_and_model(client, package_id): + node_id = create_example_node(client, package_id) + llama_id, llama_owner_cap_id = create_llama_model(client, package_id, node_id) + + # Save the node details to a JSON file + shared_dir = Path(os.getenv("SHARED_DIR", ".")) + shared_dir.mkdir(parents=True, exist_ok=True) + node_details = { + "node_id": node_id, + "llama_id": llama_id, + "llama_owner_cap_id": llama_owner_cap_id, + } + with open(shared_dir / "node_details.json", "w") as f: + json.dump(node_details, f, indent=4) + + return node_id, llama_id, llama_owner_cap_id + + +# Creates a new node owned object. +def create_example_node(client, package_id): + node_id = create_node(client, package_id, "LocalNode", "CPU", 16) + if not node_id: + raise Exception("Failed to create node") + return node_id + + +# Creates llama model representation on chain. +# Returns the model ID and the model owner capability ID. +def create_llama_model(client, package_id, node_id): + model_id, model_owner_cap_id = create_model( + client=client, + package_id=package_id, + node_id=node_id, + name="llama3.2:1b", + model_hash=b"llama3.2_1b_hash", + url=os.getenv("MODEL_URL", "http://localhost:11434"), + token_price=1000, + capacity=1000000, + num_params=1000000000, + description="llama3.2 1b", + max_context_length=8192, + is_fine_tuned=False, + family="Llama3.2", + vendor="Meta", + is_open_source=True, + datasets=["test"], + ) + if not model_id: + raise Exception("Failed to create model") + return model_id, model_owner_cap_id + + +if __name__ == "__main__": + + client = get_sui_client_with_airdrop( + rpc_url=rpc_url, + ws_url=ws_url, + faucet_url=faucet_url, + keystore_path=keystore_path, + ) + with open(package_id_file, "r") as f: + package_id_list = json.load(f) + package_id = package_id_list[0] + + create_and_save_node_and_model(client, package_id) + print("environment prepared successfully") diff --git a/docker/nexus/bin/setup_venv.sh b/docker/nexus/bin/setup_venv.sh new file mode 100644 index 0000000..de0072b --- /dev/null +++ b/docker/nexus/bin/setup_venv.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +if [ "$INSTALL_RUST" = "true" ]; then + apt-get update + apt-get install -y --no-install-recommends curl build-essential + curl https://sh.rustup.rs -sSf | sh -s -- -y + . $HOME/.cargo/env + rustup update + rustup default stable +fi + +pip install uv +uv venv -p "$PYTHON_VERSION" +export OSTYPE=${OSTYPE:-linux-gnu} +. .venv/bin/activate + +if [ "$INSTALL_RUST" = "true" ]; then + . $HOME/.cargo/env +fi + +if [ -f "pyproject.toml" ]; then + uv pip install . +else + for dir in */; do + if [ -f "$dir/pyproject.toml" ]; then + uv pip install "$dir" + fi + done +fi diff --git a/docker/nexus/bin/start_events.py b/docker/nexus/bin/start_events.py new file mode 100644 index 0000000..8ec0491 --- /dev/null +++ b/docker/nexus/bin/start_events.py @@ -0,0 +1,83 @@ +import os +import json +import subprocess +from pathlib import Path + +# Set paths +shared_dir = Path(os.getenv("SHARED_DIR", ".")) +keystore_path = Path(shared_dir) / "sui.keystore" + +# Extract details from JSON files +package_id_path = Path(shared_dir) / "package_id.json" +node_details_path = Path(shared_dir) / "node_details.json" + + +rpc_url = os.getenv("RPC_URL", "http://localhost:9000") +ws_url = os.getenv("WS_URL", "ws://localhost:9000") +tool_url = os.getenv("TOOL_URL", "http://0.0.0.0:8080/tool/use") + +# Load package ID +try: + with open(package_id_path, "r") as f: + package_id = json.load(f)[0] +except (FileNotFoundError, IndexError, json.JSONDecodeError) as e: + print(f"Error: Unable to load package ID from {package_id_path}. Details: {e}") + exit(1) + +# Load node details +try: + with open(node_details_path, "r") as f: + node_details = json.load(f) + model_owner_cap_id = node_details.get("llama_owner_cap_id") +except (FileNotFoundError, json.JSONDecodeError) as e: + print(f"Error: Unable to load node details from {node_details_path}. Details: {e}") + exit(1) + +if not model_owner_cap_id: + print("Error: Model owner capability ID is missing.") + exit(1) + +# Load SUI private key from keystore JSON +try: + with open(keystore_path, "r") as f: + keys = json.load(f) + if not keys: + raise ValueError( + "Sui keystore file is empty. Please check your Sui configuration." + ) + private_key = keys[0] # Assuming the first key is used +except (FileNotFoundError, json.JSONDecodeError, ValueError) as e: + print(f"Error: Unable to load SUI private key from {keystore_path}. Details: {e}") + exit(1) + +# Set environment variables +os.environ["PACKAGE_ID"] = package_id +os.environ["SUI_PRIVATE_KEY"] = private_key +os.environ["MODEL_OWNER_CAP_ID"] = model_owner_cap_id + +# Command to run the Python script +command = [ + "python", + "events/src/nexus_events/sui_event.py", + "--packageid", + package_id, + "--privkey", + private_key, + "--modelownercapid", + model_owner_cap_id, + "--rpc", + rpc_url, + "--ws", + ws_url, + "--toolurl", + tool_url, # New argument for tool URL +] + +print(f"Running command: {' '.join(command)}") + +# Execute the command +try: + subprocess.run(command, check=True) +except subprocess.CalledProcessError as e: + print(f"Error: Failed to execute command. Details: {e}") + exit(1) diff --git a/docker/nexus/compose.yaml b/docker/nexus/compose.yaml new file mode 100644 index 0000000..89658ed --- /dev/null +++ b/docker/nexus/compose.yaml @@ -0,0 +1,120 @@ +services: + tools: + container_name: tools + image: talusnetwork/nexus-tools:latest + build: + context: "../../offchain/tools/" + dockerfile: "../../docker/nexus/Dockerfile" + additional_contexts: + nexus: ../../docker/nexus + ports: + - "8080:8080" + restart: unless-stopped + environment: + OLLAMA_HOST: ${MODEL_URL} + logging: + driver: "json-file" + options: + max-file: "10" + max-size: "1g" + depends_on: + build-suitools: + condition: service_completed_successfully + publish-package: + condition: service_completed_successfully + bootstrap-model: + condition: service_completed_successfully + + events: + container_name: events + image: talusnetwork/nexus-events:latest + build: + context: "../../offchain/" + dockerfile: "../docker/nexus/Dockerfile" + additional_contexts: + nexus: ../../docker/nexus + args: + INSTALL_RUST: "true" + command: > + bash -c "source .venv/bin/activate && python start_events.py" + logging: + driver: "json-file" + options: + max-file: "10" + max-size: "1g" + environment: + RPC_URL: ${RPC_URL} + WS_URL: ${WS_URL} + TOOL_URL: ${TOOL_URL} + LLM_ASSISTANT_URL: ${LLM_ASSISTANT_URL} + SHARED_DIR: /app/shared + volumes: + - shared:/app/shared + - ./bin/start_events.py:/app/start_events.py:ro + restart: unless-stopped + depends_on: + build-suitools: + condition: service_completed_successfully + publish-package: + condition: service_completed_successfully + bootstrap-model: + condition: service_completed_successfully + tools: + condition: service_started + + bootstrap-model: + container_name: bootstrap-model + image: talusnetwork/nexus-bootstrap-model:latest + build: + context: "../../nexus_sdk/" + dockerfile: "../docker/nexus/Dockerfile" + additional_contexts: + nexus: ../../docker/nexus + args: + INSTALL_RUST: "true" + environment: + RPC_URL: ${RPC_URL} + WS_URL: ${WS_URL} + MODEL_URL: ${MODEL_URL} + FAUCET_URL: ${FAUCET_URL} + SHARED_DIR: /app/shared + volumes: + - ./bin/bootstrap_model.py:/app/bootstrap_model.py + - shared:/app/shared + command: > + bash -c "source .venv/bin/activate && python bootstrap_model.py" + restart: on-failure + depends_on: + publish-package: + condition: service_completed_successfully + faucet: + condition: service_healthy + + examples: + image: talusnetwork/nexus-examples:latest + container_name: examples + build: + context: "../../nexus_sdk/" + dockerfile: "../docker/nexus/Dockerfile" + additional_contexts: + nexus: ../../docker/nexus + args: + INSTALL_RUST: "true" + environment: + RPC_URL: ${RPC_URL} + WS_URL: ${WS_URL} + SHARED_DIR: /app/shared + volumes: + - ../../examples:/app/examples + - shared:/app/shared + command: ["tail", "-f", "/dev/null"] + restart: on-failure + depends_on: + publish-package: + condition: service_completed_successfully + bootstrap-model: + condition: service_completed_successfully + tools: + condition: service_started + events: + condition: service_started diff --git a/docker/nexusctl.py b/docker/nexusctl.py new file mode 100644 index 0000000..5b3f139 --- /dev/null +++ b/docker/nexusctl.py @@ -0,0 +1,409 @@ +import os +import platform +import subprocess +import sys +import re +import argparse +import time +import signal + +# Set OLLAMA_PID_FILE to a path in the /tmp directory +OLLAMA_PID_FILE = "/tmp/ollama.pid" + + +def load_env_file(env_path): + """Loads environment variables from a .env file and adds them to os.environ.""" + try: + with open(env_path, "r") as file: + for line in file: + line = line.strip() + if line and not line.startswith("#"): + key, value = line.split("=", 1) + key, value = key.strip(), value.strip() + os.environ[key] = value + except FileNotFoundError: + print(f"Environment file {env_path} not found.") + sys.exit(1) + except Exception as e: + print(f"Error loading environment file: {e}") + sys.exit(1) + + +def get_docker_compose_version(): + try: + result = subprocess.run( + ["docker", "compose", "version"], capture_output=True, text=True, check=True + ) + version_line = result.stdout.strip() + return version_line + except subprocess.CalledProcessError: + print( + "Error checking Docker Compose version. Make sure Docker Compose is installed." + ) + sys.exit(1) + + +def extract_major_minor(version_line): + try: + match = re.search(r"v?(\d+)\.(\d+)", version_line) + if match: + major = int(match.group(1)) + minor = int(match.group(2)) + return major, minor + else: + print("Could not parse Docker Compose version string.") + sys.exit(1) + except Exception as e: + print(f"Unexpected error while extracting version: {e}") + sys.exit(1) + + +def check_docker_compose_version(): + version_line = get_docker_compose_version() + if version_line: + print(f"Docker Compose version line: {version_line}") + major, minor = extract_major_minor(version_line) + if major < 2 or (major == 2 and minor < 20): + print( + f"Docker Compose version {major}.{minor} is too old. Please update to version 2.20 or higher." + ) + sys.exit(1) + else: + print(f"Docker Compose version {major}.{minor} is sufficient.") + + +def check_brew_installed(): + try: + subprocess.run( + ["brew", "--version"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=True, + ) + print("Brew is installed.") + except subprocess.CalledProcessError: + print( + "Brew is not installed. Please install Homebrew from https://brew.sh/ and try again." + ) + sys.exit(1) + except FileNotFoundError: + print( + "Brew is not installed. Please install Homebrew from https://brew.sh/ and try again." + ) + sys.exit(1) + + +def detect_gpu_and_set_env(): + os_type = platform.system() + if os_type == "Windows": + print("Running on Windows") + if ( + subprocess.run( + ["where", "nvidia-smi"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ).returncode + == 0 + ): + print("NVIDIA GPU detected") + os.environ["OLLAMA_DEVICE_DRIVER"] = "nvidia" + os.environ["OLLAMA_DEVICE_COUNT"] = "all" + os.environ["OLLAMA_DEVICE_CAPABILITIES"] = "gpu" + else: + print("No GPU detected") + os.environ["OLLAMA_DEVICE_DRIVER"] = "" + os.environ["OLLAMA_DEVICE_COUNT"] = "" + os.environ["OLLAMA_DEVICE_CAPABILITIES"] = "" + elif os_type == "Linux": + print("Running on Linux") + if ( + subprocess.run( + ["which", "nvidia-smi"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ).returncode + == 0 + ): + print("NVIDIA GPU detected") + os.environ["OLLAMA_DEVICE_DRIVER"] = "nvidia" + os.environ["OLLAMA_DEVICE_COUNT"] = "all" + os.environ["OLLAMA_DEVICE_CAPABILITIES"] = "gpu" + else: + print("No GPU detected") + os.environ["OLLAMA_DEVICE_DRIVER"] = "" + os.environ["OLLAMA_DEVICE_COUNT"] = "" + os.environ["OLLAMA_DEVICE_CAPABILITIES"] = "" + elif os_type == "Darwin": + print("Running on macOS") + os.environ["OLLAMA_DEVICE_DRIVER"] = "" + os.environ["OLLAMA_DEVICE_COUNT"] = "" + os.environ["OLLAMA_DEVICE_CAPABILITIES"] = "" + else: + print(f"Unsupported OS: {os_type}") + sys.exit(1) + + +def start_ollama_serve(): + os_type = platform.system() + if os_type == "Darwin": + # Check if brew is installed + check_brew_installed() + + # Check if ollama is installed with brew + try: + if ( + subprocess.run( + ["brew", "list", "ollama"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ).returncode + != 0 + ): + print("Ollama not found, installing using brew...") + subprocess.run(["brew", "install", "ollama"], check=True) + else: + print("Ollama is already installed.") + except subprocess.CalledProcessError as e: + print(f"Failed to install ollama: {e}") + sys.exit(1) + + # Start the ollama serve process and pull the model + try: + model_version = os.getenv("LLAMA_MODEL_VERSION") + if not model_version: + print("LLAMA_MODEL_VERSION environment variable is not set.") + sys.exit(1) + + # Start ollama serve in the background + print("Starting Ollama server...") + serve_command = "nohup ollama serve &" + subprocess.Popen( + serve_command, + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + # Wait for the server to start and capture the correct PID + print("Waiting for Ollama to start...") + time.sleep(5) # Give it some time to properly start + + # Find the process PID using pgrep + result = subprocess.run( + ["pgrep", "-f", "ollama serve"], capture_output=True, text=True + ) + if result.stdout: + pid = int(result.stdout.strip()) + print(f"Ollama server started with PID: {pid}") + + # Save the PID to a file + with open(OLLAMA_PID_FILE, "w") as pid_file: + pid_file.write(str(pid)) + print(f"Saved Ollama PID to {OLLAMA_PID_FILE}") + else: + print("Failed to find the Ollama serve process.") + sys.exit(1) + + # Pull the specified model + print(f"Pulling model version: {model_version}") + subprocess.run(["ollama", "pull", model_version], check=True) + + except subprocess.CalledProcessError as e: + print(f"Error while starting Ollama or pulling model: {e}") + sys.exit(1) + + +def change_to_docker_directory(): + try: + os.chdir("./docker") + print(f"Changed working directory to: {os.getcwd()}") + except FileNotFoundError: + print( + "Directory './docker' does not exist. Please make sure the directory is correct." + ) + sys.exit(1) + except Exception as e: + print(f"Unexpected error while changing directory: {e}") + sys.exit(1) + + +def docker_compose_up(): + change_to_docker_directory() + os_type = platform.system() + + compose_file = "docker-compose.yaml" + if os_type == "Darwin": + # Use a different compose file for macOS + compose_file = "docker-compose-nollama.yaml" + print("Using docker-compose-nollama.yaml for macOS") + + # Override the MODEL_URL environment variable on macOS + os.environ["MODEL_URL"] = "http://host.docker.internal:11434" + + # Load environment variables from the .env file + load_env_file(".env") + + # Ensure the environment has the updated MODEL_URL for macOS + env_vars = os.environ.copy() + if os_type == "Darwin": + env_vars["MODEL_URL"] = "http://host.docker.internal:11434" + + try: + # Use subprocess.run with env argument to override environment variables + subprocess.run( + ["docker", "compose", "-f", compose_file, "up", "-d"], + check=True, + env=env_vars, # Pass our modified environment with correct MODEL_URL + ) + except subprocess.CalledProcessError as e: + print(f"Failed to run Docker Compose up: {e}") + sys.exit(1) + + +def docker_compose_build(): + change_to_docker_directory() + os_type = platform.system() + + compose_file = "docker-compose.yaml" + if os_type == "Darwin": + # Use a different compose file for macOS + compose_file = "docker-compose-nollama.yaml" + print("Using docker-compose-nollama.yaml for macOS") + + # Override the MODEL_URL environment variable on macOS + os.environ["MODEL_URL"] = "http://host.docker.internal:11434" + + # Load environment variables from the .env file + load_env_file(".env") + + # Ensure the environment has the updated MODEL_URL for macOS + env_vars = os.environ.copy() + if os_type == "Darwin": + env_vars["MODEL_URL"] = "http://host.docker.internal:11434" + + try: + # Use subprocess.run with env argument to override environment variables + subprocess.run( + ["docker", "compose", "-f", compose_file, "build"], + check=True, + env=env_vars, # Pass our modified environment with correct MODEL_URL + ) + except subprocess.CalledProcessError as e: + print(f"Failed to run Docker Compose build: {e}") + sys.exit(1) + + +def docker_compose_down(): + change_to_docker_directory() + os_type = platform.system() + + compose_file = "docker-compose.yaml" + if os_type == "Darwin": + # Use a different compose file for macOS + compose_file = "docker-compose-nollama.yaml" + print("Using docker-compose-nollama.yaml for macOS") + + try: + subprocess.run(["docker", "compose", "-f", compose_file, "down"], check=True) + except subprocess.CalledProcessError as e: + print(f"Failed to run Docker Compose down: {e}") + sys.exit(1) + + # If running on macOS, stop the Ollama serve process + if os_type == "Darwin": + stop_ollama_serve() + + +def docker_delete_volumes(): + try: + # List of volumes to delete + volumes_to_delete = [ + "docker_fullnode1-db", + "docker_genesis", + "docker_shared", + "docker_validator1-db", + "docker_validator2-db", + "docker_validator3-db", + "docker_validator4-db", + ] + for volume in volumes_to_delete: + subprocess.run(["docker", "volume", "rm", volume], check=True) + print("All specified Docker volumes deleted successfully.") + except subprocess.CalledProcessError as e: + print(f"Failed to delete Docker volumes: {e}") + sys.exit(1) + + +def stop_ollama_serve(): + """Stops the Ollama server and all related processes if they are running.""" + if os.path.exists(OLLAMA_PID_FILE): + try: + with open(OLLAMA_PID_FILE, "r") as pid_file: + pid = int(pid_file.read().strip()) + print(f"Stopping Ollama server with PID: {pid}") + os.kill(pid, signal.SIGTERM) # Send SIGTERM to the main ollama process + + # Remove the PID file + os.remove(OLLAMA_PID_FILE) + print(f"PID file {OLLAMA_PID_FILE} deleted.") + + # Find and kill any remaining related processes (e.g., ollama_llama_server) + related_processes = subprocess.run( + ["pgrep", "-f", "ollama"], capture_output=True, text=True + ) + if related_processes.stdout: + pids = related_processes.stdout.strip().split("\n") + for related_pid in pids: + print(f"Killing related Ollama process with PID: {related_pid}") + os.kill( + int(related_pid), signal.SIGTERM + ) # Use SIGTERM to allow graceful shutdown + except ProcessLookupError: + print( + f"Error stopping Ollama server: No such process with PID {pid}. It may have already been stopped." + ) + except Exception as e: + print(f"Error stopping Ollama server: {e}") + else: + print( + f"PID file {OLLAMA_PID_FILE} not found. Ollama server may not be running." + ) + + +def main(): + parser = argparse.ArgumentParser( + description="Control the Docker Compose deployment and overall environment." + ) + parser.add_argument( + "command", + choices=["start", "stop", "delete", "create"], + help="Command to execute: start, stop, delete, create", + ) + args = parser.parse_args() + + # Check Docker Compose version + check_docker_compose_version() + + # Load environment variables from .env into the environment + load_env_file("./docker/.env") + + # Perform actions based on the command + if args.command == "start": + detect_gpu_and_set_env() + if platform.system() == "Darwin": + start_ollama_serve() + docker_compose_up() + elif args.command == "stop": + detect_gpu_and_set_env() + docker_compose_down() + elif args.command == "delete": + detect_gpu_and_set_env() + docker_compose_down() + docker_delete_volumes() + elif args.command == "create": + detect_gpu_and_set_env() + docker_compose_build() + + +if __name__ == "__main__": + main() diff --git a/docker/ollama/Dockerfile b/docker/ollama/Dockerfile new file mode 100644 index 0000000..bf43a71 --- /dev/null +++ b/docker/ollama/Dockerfile @@ -0,0 +1,15 @@ +#syntax=docker/dockerfile:1 + +FROM ollama/ollama:0.3.2 + +ARG LLAMA_MODEL_VERSION=llama + +RUN apt-get update && apt-get install -y curl + +RUN nohup bash -c "ollama serve &" && \ + until curl -s http://localhost:11434 | grep "Ollama is running"; do echo "Waiting for Ollama to start..."; sleep 2; done && \ + ollama pull ${LLAMA_MODEL_VERSION} + +EXPOSE 11434 + +CMD ["serve"] \ No newline at end of file diff --git a/docker/ollama/compose.yaml b/docker/ollama/compose.yaml new file mode 100644 index 0000000..cf6aa02 --- /dev/null +++ b/docker/ollama/compose.yaml @@ -0,0 +1,21 @@ +services: + ollama: + image: talusnetwork/ollama:${LLAMA_MODEL_VERSION_TAG} + container_name: ollama + deploy: + resources: + reservations: + devices: + - driver: ${OLLAMA_DEVICE_DRIVER} + count: ${OLLAMA_DEVICE_COUNT} + capabilities: ["${OLLAMA_DEVICE_CAPABILITIES}"] + build: + context: "." + args: + LLAMA_MODEL_VERSION: ${LLAMA_MODEL_VERSION} + ports: + - "11434:11434" + restart: unless-stopped + depends_on: + build-suitools: + condition: service_completed_successfully diff --git a/docker/sui/.gitignore b/docker/sui/.gitignore new file mode 100644 index 0000000..edfdcee --- /dev/null +++ b/docker/sui/.gitignore @@ -0,0 +1,2 @@ +genesis/.venv +genesis/files \ No newline at end of file diff --git a/docker/sui/Dockerfile b/docker/sui/Dockerfile new file mode 100644 index 0000000..89323ff --- /dev/null +++ b/docker/sui/Dockerfile @@ -0,0 +1,35 @@ +#syntax=docker/dockerfile:1 + +FROM rust:1.81-bullseye AS builder + +ARG PROFILE=release +ARG GIT_REVISION +ARG SUI_TAG=testnet-v1.29.1 + +ENV GIT_REVISION=$GIT_REVISION + +RUN apt-get update && apt-get install -y git cmake clang libpq5 libpq-dev + +WORKDIR /sui + +RUN git clone --depth 1 --branch ${SUI_TAG} https://github.com/MystenLabs/sui.git . + +RUN cargo build --profile ${PROFILE} \ + --bin sui-node \ + --bin sui \ + --bin sui-faucet + +FROM debian:bullseye-slim AS runtime + +WORKDIR /sui + +RUN apt-get update && apt-get install -y libpq5 libpq-dev ca-certificates git jq curl + +COPY --from=builder /sui/target/release/sui-node /usr/local/bin +COPY --from=builder /sui/target/release/sui /usr/local/bin +COPY --from=builder /sui/target/release/sui-faucet /usr/local/bin + +ARG BUILD_DATE +ARG GIT_REVISION +LABEL build-date=$BUILD_DATE +LABEL git-revision=$GIT_REVISION diff --git a/docker/sui/bin/publish_package.sh b/docker/sui/bin/publish_package.sh new file mode 100644 index 0000000..ae20865 --- /dev/null +++ b/docker/sui/bin/publish_package.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Step 0: Check ENV for SHARED_DIR and build PACKAGE_ID_FILE location +SHARED_DIR="${SHARED_DIR:-/shared}" +PACKAGE_ID_FILE="$SHARED_DIR/package_id.json" + +if [ ! -f "$PACKAGE_ID_FILE" ]; then + echo "Package ID file not found. Proceeding to publish package." + + # Step 1: Navigate to /opt/sui/onchain + cd /opt/sui/onchain || { echo "Failed to navigate to /opt/sui/onchain"; exit 1; } + + # Step 2: Run the sui client publish command and extract the package_id + PACKAGE_ID=$(sui client publish --skip-dependency-verification --json | jq -r '.objectChanges[] | select(.type == "published") | .packageId') + + # Step 3: Save the package_id to the specified PACKAGE_ID_FILE + if [ "$PACKAGE_ID" != "null" ] && [ -n "$PACKAGE_ID" ]; then + echo "[\"$PACKAGE_ID\"]" > "$PACKAGE_ID_FILE" + echo "Package ID saved to $PACKAGE_ID_FILE" + else + echo "Failed to extract a valid package ID" + exit 1 + fi +else + echo "Package ID file already exists. No action taken." +fi diff --git a/docker/sui/compose.yaml b/docker/sui/compose.yaml new file mode 100644 index 0000000..3e66c7e --- /dev/null +++ b/docker/sui/compose.yaml @@ -0,0 +1,236 @@ +x-validator-base: &validator-base + image: talusnetwork/sui-tools:${SUI_TAG:-latest} + environment: &validator-env + - RUST_BACKTRACE=1 + - RUST_LOG=info,sui_core=info,sui_network=info,sui_node=info,narwhal=info,narwhal-primary::helper=info,jsonrpsee=error + - RPC_WORKER_THREAD=12 + - NEW_CHECKPOINT_WARNING_TIMEOUT_MS=30000 + - NEW_CHECKPOINT_PANIC_TIMEOUT_MS=60000 + logging: &validator-logging + driver: "json-file" + options: + max-file: "10" + max-size: "1g" + restart: on-failure + depends_on: + build-genesis: + condition: service_completed_successfully + build-suitools: + condition: service_completed_successfully + +services: + build-suitools: + container_name: build-suitools + restart: "no" + image: talusnetwork/sui-tools:${SUI_TAG} + build: + context: "." + dockerfile: Dockerfile + args: + SUI_TAG: ${SUI_TAG} + + build-genesis: + image: talusnetwork/build-sui-genesis:${SUI_TAG} + container_name: build-genesis + build: + context: "./genesis" + dockerfile: Dockerfile + args: + SUI_TAG: ${SUI_TAG} + pull_policy: never + volumes: + - genesis:/opt/sui/genesis/files + environment: + - SUI_TAG=${SUI_TAG} + restart: "no" + depends_on: + build-suitools: + condition: service_completed_successfully + + publish-package: + image: talusnetwork/sui-tools:${SUI_TAG} + container_name: publish-package + environment: + - RUST_BACKTRACE=1 + - RUST_LOG=error + - SUI_CONFIG_DIR=/opt/sui/config + - SHARED_DIR=/app/shared + - RPC_URL=${RPC_URL} + + volumes: + - shared:/app/shared + - ./bin/publish_package.sh:/opt/sui/publish_package.sh:ro + - ./genesis/static/client.yaml:/opt/sui/config/client.yaml:rw + - ./genesis/static/sui.keystore:/opt/sui/config/sui.keystore:ro + - ../../onchain:/opt/sui/onchain + command: ["bash", "/opt/sui/publish_package.sh"] + restart: on-failure + depends_on: + build-suitools: + condition: service_completed_successfully + + validator1: + <<: *validator-base + container_name: validator1 + hostname: validator1 + volumes: + - validator1-db:/opt/sui/db:rw + - genesis:/opt/sui/config + command: + [ + "/usr/local/bin/sui-node", + "--config-path", + "/opt/sui/config/validator1-8080.yaml", + ] + + validator2: + <<: *validator-base + container_name: validator2 + hostname: validator2 + volumes: + - validator2-db:/opt/sui/db:rw + - genesis:/opt/sui/config + command: + [ + "/usr/local/bin/sui-node", + "--config-path", + "/opt/sui/config/validator2-8080.yaml", + ] + + validator3: + <<: *validator-base + container_name: validator3 + hostname: validator3 + volumes: + - validator3-db:/opt/sui/db:rw + - genesis:/opt/sui/config + command: + [ + "/usr/local/bin/sui-node", + "--config-path", + "/opt/sui/config/validator3-8080.yaml", + ] + + validator4: + <<: *validator-base + container_name: validator4 + hostname: validator4 + volumes: + - validator4-db:/opt/sui/db:rw + - genesis:/opt/sui/config + command: + [ + "/usr/local/bin/sui-node", + "--config-path", + "/opt/sui/config/validator4-8080.yaml", + ] + + fullnode1: + image: talusnetwork/sui-tools:${SUI_TAG} + hostname: fullnode1 + container_name: fullnode1 + ports: + - "9000:9000" + environment: + - RUST_BACKTRACE=1 + - RUST_LOG=info,sui_core=info,sui_network=info,sui_node=info,narwhal=info,narwhal-primary::helper=info,jsonrpsee=error + - RPC_WORKER_THREAD=12 + - NEW_CHECKPOINT_WARNING_TIMEOUT_MS=30000 + - NEW_CHECKPOINT_PANIC_TIMEOUT_MS=60000 + - SUI_CONFIG_DIR=/opt/sui/config + - RPC_URL=${RPC_URL} + volumes: + - fullnode1-db:/opt/sui/db:rw + - genesis:/opt/sui/config + - ./genesis/static/fullnode.yaml:/opt/sui/config/fullnode.yaml:ro + - ./genesis/static/client.yaml:/opt/sui/config/client.yaml:rw + healthcheck: + test: + [ + "CMD", + "curl", + "-f", + "-X", + "POST", + "-H", + "Content-Type: application/json", + "-d", + '{"jsonrpc":"2.0","method":"sui_getChainIdentifier","id":1}', + "http://localhost:9000", + ] + interval: 5s + timeout: 10s + retries: 3 + start_period: 120s + command: + [ + "/usr/local/bin/sui-node", + "--config-path", + "/opt/sui/config/fullnode.yaml", + ] + restart: on-failure + logging: + driver: "json-file" + options: + max-file: "10" + max-size: "1g" + depends_on: + build-genesis: + condition: service_completed_successfully + build-suitools: + condition: service_completed_successfully + + faucet: + image: talusnetwork/sui-tools:${SUI_TAG} + hostname: faucet + container_name: faucet + environment: + - RUST_BACKTRACE=1 + - RUST_LOG=info,sui_core=info,sui_network=info,sui_node=info,narwhal=info,narwhal-primary::helper=info,jsonrpsee=error + - SUI_CONFIG_DIR=/opt/sui/config + - HOST_IP=0.0.0.0 + - RPC_URL=${RPC_URL} + ports: + - "5003:5003" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:5003"] + interval: 5s + timeout: 10s + retries: 3 + start_period: 120s + volumes: + - genesis:/opt/sui/config + - ./genesis/static/client.yaml:/opt/sui/config/client.yaml:rw + - ./genesis/static/sui.keystore:/opt/sui/config/sui.keystore:ro + command: + [ + "/usr/local/bin/sui-faucet", + "--write-ahead-log", + "/tmp/faucet.wal", + "--host-ip", + "0.0.0.0", + "--amount", + "10000000000000", + ] + restart: on-failure + logging: + driver: "json-file" + options: + max-file: "10" + max-size: "1g" + depends_on: + build-genesis: + condition: service_completed_successfully + build-suitools: + condition: service_completed_successfully + fullnode1: + condition: service_healthy + +volumes: + genesis: + validator1-db: + validator2-db: + validator3-db: + validator4-db: + fullnode1-db: + shared: diff --git a/docker/sui/genesis/.dockerignore b/docker/sui/genesis/.dockerignore new file mode 100644 index 0000000..d17b2ff --- /dev/null +++ b/docker/sui/genesis/.dockerignore @@ -0,0 +1 @@ +./files/* \ No newline at end of file diff --git a/docker/sui/genesis/Dockerfile b/docker/sui/genesis/Dockerfile new file mode 100644 index 0000000..08c3586 --- /dev/null +++ b/docker/sui/genesis/Dockerfile @@ -0,0 +1,28 @@ +# Stage 1: Build sui tools and dependencies +ARG SUI_TAG=testnet-v1.29.1 +FROM talusnetwork/sui-tools:${SUI_TAG} AS sui-builder + +FROM python:3.9-slim-bullseye + +ENV PYTHONUNBUFFERED=1 +ENV TARGET_DIR=/opt/sui/genesis/files + +RUN apt-get update && apt-get install -y libpq5 libpq-dev ca-certificates libc6 + +WORKDIR /opt/sui/genesis + +# Copy necessary sui binary and libraries from the sui-builder stage +COPY --from=sui-builder /usr/local/bin/sui /usr/local/bin/sui + +# Copy the necessary files for genesis creation +COPY overlays overlays +COPY static static +COPY compose-validators.yaml compose-validators.yaml +COPY requirements.txt . +COPY generate.py . + +# Install Python dependencies for genesis preparation +RUN python3 -m pip install -r requirements.txt + +# Set the entrypoint to run the generation script with the target directory +CMD ["/bin/bash", "-c", "python3 generate.py --genesis-template compose-validators.yaml --target-directory ${TARGET_DIR}"] diff --git a/docker/sui/genesis/compose-validators.yaml b/docker/sui/genesis/compose-validators.yaml new file mode 100644 index 0000000..e3db52f --- /dev/null +++ b/docker/sui/genesis/compose-validators.yaml @@ -0,0 +1,54 @@ +accounts: +- address: '0xd59d79516a4ed5b6825e80826c075a12bdd2759aaeb901df2f427f5f880c8f60' + gas_amounts: + - 750000000000000000 + - 750000000000000000 +- address: '0x160ef6ce4f395208a12119c5011bf8d8ceb760e3159307c819bd0197d154d384' + gas_amounts: + - 20000000000000000 + - 20000000000000000 + - 20000000000000000 + - 20000000000000000 + - 20000000000000000 +parameters: + allow_insertion_of_extra_objects: false + epoch_duration_ms: 120000 +validator_config_info: +- commission_rate: 0 + consensus_address: /ip4/127.0.0.1/tcp/8083/http + gas_price: 1000 + name: validator1 + narwhal_primary_address: /dns/validator1/udp/8081 + narwhal_worker_address: /dns/validator1/udp/8082 + network_address: /dns/validator1/tcp/8080/http + p2p_address: /dns/validator1/udp/8084 + stake: 20000000000000000 + genesis: + genesis-file-location: /opt/sui/genesis.blob +- commission_rate: 0 + consensus_address: /ip4/127.0.0.1/tcp/8083/http + gas_price: 1000 + name: validator2 + narwhal_primary_address: /dns/validator2/udp/8081 + narwhal_worker_address: /dns/validator2/udp/8082 + network_address: /dns/validator2/tcp/8080/http + p2p_address: /dns/validator2/udp/8084 + stake: 20000000000000000 +- commission_rate: 0 + consensus_address: /ip4/127.0.0.1/tcp/8083/http + gas_price: 1000 + name: validator3 + narwhal_primary_address: /dns/validator3/udp/8081 + narwhal_worker_address: /dns/validator3/udp/8082 + network_address: /dns/validator3/tcp/8080/http + p2p_address: /dns/validator3/udp/8084 + stake: 20000000000000000 +- commission_rate: 0 + consensus_address: /ip4/127.0.0.1/tcp/8083/http + gas_price: 1000 + name: validator4 + narwhal_primary_address: /dns/validator4/udp/8081 + narwhal_worker_address: /dns/validator4/udp/8082 + network_address: /dns/validator4/tcp/8080/http + p2p_address: /dns/validator4/udp/8084 + stake: 20000000000000000 diff --git a/docker/sui/genesis/generate.py b/docker/sui/genesis/generate.py new file mode 100755 index 0000000..771c867 --- /dev/null +++ b/docker/sui/genesis/generate.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# Copyright (c) Mysten Labs, Inc. +# SPDX-License-Identifier: Apache-2.0 + + +import argparse +import re +import os.path +import subprocess +import sys +import typing +import yaml + +import hiyapyco # https://pypi.org/project/HiYaPyCo/ + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + +# Use the base directory to construct absolute paths +_COMMON_OVERLAY_PATH = os.path.join(BASE_DIR, "overlays", "common.yaml") + + +def parse_overlays(overlay_type: str) -> str: + overlays: str = "" + + with open(_COMMON_OVERLAY_PATH, "r") as f: + common_overlays = yaml.safe_load(f)[overlay_type] + overlays = yaml.safe_dump(common_overlays) + overlays = hiyapyco.load([overlays]) + + return hiyapyco.dump(overlays) + + +def get_network_addresses(genesis_config: typing.Dict) -> typing.List: + network_adr_pattern = ( + r"/(?Pdns|ip4|ip6|unix)/(?P
[^/]*)(/udp|/tcp)?/(?P\d+)?" + ) + network_addresses = [] + for validator in genesis_config.get("validator_config_info"): + match = re.search(network_adr_pattern, validator["network_address"]) + network_addresses.append(f'{match.group("address")}-{match.group("port")}.yaml') + return network_addresses + + +def set_validator_name(genesis_config: typing.Dict) -> typing.Dict: + network_adr_pattern = ( + r"/(?Pdns|ip4|ip6|unix)/(?P
[^/]*)(/udp|/tcp)?/(?P\d+)?" + ) + for validator in genesis_config.get("validator_config_info"): + match = re.search(network_adr_pattern, validator["network_address"]) + validator["name"] = match.group("address") + return genesis_config + + +def main(args: argparse.ArgumentParser) -> None: + # create target directory if it doesn't exist + _ = subprocess.run(["mkdir", "-p", "z", f"{args.target_directory}"], check=True) + + # load genesis template + with open(args.genesis_template, "r") as f: + genesis_config = yaml.safe_load(f) + + validator_network_addresses = get_network_addresses(genesis_config) + + # set the validator name based on their address + genesis_config = set_validator_name(genesis_config) + + # write genesis configuration file + with open(f"{args.target_directory}/genesis.yaml", "w") as f: + f.write(yaml.safe_dump(genesis_config)) + + # run genesis with newly created genesis configuration file + _ = subprocess.run( + [ + "sui", + "genesis", + "--from-config", + f"{args.target_directory}/genesis.yaml", + "--working-dir", + "z", + "-f", + ], + # this should be inherited from the parent process, but just in case + env=os.environ, + check=True, + ) + + # parse validator overlays + overlays = parse_overlays("validator") + + # process validator overlays + for validator in validator_network_addresses: + with open(f"z/{validator}", "r") as f: + validator_config = f.read() + + merged_yaml = hiyapyco.load( + [validator_config, overlays], method=hiyapyco.METHOD_MERGE + ) + merged_yaml = hiyapyco.dump(merged_yaml) + + with open(f"{args.target_directory}/{validator}", "w") as f: + f.write(merged_yaml) + + # move other required files to target + subprocess.run(["mv", "z/genesis.blob", f"{args.target_directory}/"], check=True) + + _ = subprocess.run(["rm", "-rf", "z"], check=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-g", + "--genesis-template", + type=str, + help="template to use for genesis.yaml generation", + required=False, + ) + parser.add_argument( + "-t", + "--target-directory", + type=str, + help="target directory for generated genesis and configuration files", + default="target", + required=False, + ) + parser.add_argument( + "-o", + "--override-generation", + type=str, + help="do not generate and use override configuration instead", + required=False, + ) + parser.add_argument( + "-p", + "--protocol-config-override", + type=str, + help="protocol config override to set", + required=False, + ) + args = parser.parse_args() + + # exit if configuration already exists + if os.path.exists(f"{args.target_directory}/genesis.blob"): + print("configuration already exists, not generating") + sys.exit(0) + + main(args) diff --git a/docker/sui/genesis/overlays/common.yaml b/docker/sui/genesis/overlays/common.yaml new file mode 100644 index 0000000..ac21697 --- /dev/null +++ b/docker/sui/genesis/overlays/common.yaml @@ -0,0 +1,20 @@ +validator: + network-address: /ip4/0.0.0.0/tcp/8080/http + metrics-address: 0.0.0.0:9184 + json-rpc-address: 0.0.0.0:9000 + admin-interface-port: 1337 + genesis: + genesis-file-location: /opt/sui/config/genesis.blob + db-path: /opt/sui/db/authorities_db + consensus-config: + db-path: /opt/sui/db/consensus_db + narwhal-config: + max_header_delay: 2000ms + expensive-safety-check-config: + enable-epoch-sui-conservation-check: false + enable-deep-per-tx-sui-conservation-check: false + force-disable-epoch-sui-conservation-check: false + enable-state-consistency-check: false + force-disable-state-consistency-check: false + enable-move-vm-paranoid-checks: false + epoch_duration_ms: 120000 diff --git a/docker/sui/genesis/requirements.txt b/docker/sui/genesis/requirements.txt new file mode 100644 index 0000000..0838732 --- /dev/null +++ b/docker/sui/genesis/requirements.txt @@ -0,0 +1,2 @@ +hiyapyco>=0.5.1 +setuptools diff --git a/docker/sui/genesis/static/client.yaml b/docker/sui/genesis/static/client.yaml new file mode 100644 index 0000000..35cabd3 --- /dev/null +++ b/docker/sui/genesis/static/client.yaml @@ -0,0 +1,10 @@ +--- +keystore: + File: /opt/sui/config/sui.keystore +envs: + - alias: localnet + rpc: "http://fullnode1:9000" + ws: ~ + basic_auth: ~ +active_env: localnet +active_address: "0xd59d79516a4ed5b6825e80826c075a12bdd2759aaeb901df2f427f5f880c8f60" diff --git a/docker/sui/genesis/static/fullnode.yaml b/docker/sui/genesis/static/fullnode.yaml new file mode 100644 index 0000000..1dfe3c9 --- /dev/null +++ b/docker/sui/genesis/static/fullnode.yaml @@ -0,0 +1,23 @@ +--- +db-path: /opt/sui/db +network-address: /ip4/0.0.0.0/tcp/8080/http +json-rpc-address: "0.0.0.0:9000" +metrics-address: "0.0.0.0:9184" +admin-interface-port: 1337 +enable-event-processing: true +grpc-load-shed: ~ +grpc-concurrency-limit: ~ +p2p-config: + listen-address: "0.0.0.0:8084" +genesis: + genesis-file-location: /opt/sui/config/genesis.blob +authority-store-pruning-config: + num-latest-epoch-dbs-to-retain: 3 + epoch-db-pruning-period-secs: 3600 + num-epochs-to-retain: 18446744073709551615 + max-checkpoints-in-batch: 5 + max-transactions-in-batch: 1000 + use-range-deletion: true + pruning-run-delay-seconds: 60 +state-debug-dump-config: + dump-file-directory: /opt/sui/state_debug_dump diff --git a/docker/sui/genesis/static/sui.keystore b/docker/sui/genesis/static/sui.keystore new file mode 100644 index 0000000..169a8ea --- /dev/null +++ b/docker/sui/genesis/static/sui.keystore @@ -0,0 +1,3 @@ +[ + "ACWkfiQ6x7FxJ74IZAgQYRl67TTtLLDlP2RhJjPI6tTQ" +] diff --git a/examples/.python-version b/examples/.python-version new file mode 100644 index 0000000..c8cfe39 --- /dev/null +++ b/examples/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/examples/README.md b/examples/README.md index 3adc560..edf1cfa 100644 --- a/examples/README.md +++ b/examples/README.md @@ -5,76 +5,38 @@ We have built a few examples to showcase Nexus agents. Before you can use the examples or build your own agents, you need to install a few things first, as shown in the next section. -When you run either of the examples, you will be prompted to start two services: - -- [`tools`][tools_README] which you need to start only once for all examples, and -- [`events`][events_README] which you need to start for each example anew and once the example finished, you can stop it. - -You will be given exact instructions on how to start and stop these services when you run either example. - - [Examples](#examples) - [Environment setup](#environment-setup) - [Operating System](#operating-system) - [Helper tools](#helper-tools) - - [Operating System packages](#operating-system-packages) - - [Python and virtual environment](#python-and-virtual-environment) - - [Suibase](#suibase) - - [`PATH`](#path) - - [Ollama](#ollama) + - [Docker](#docker) - [Example: Instagram Post Planner](#example-instagram-post-planner) - [Example: Trip Planner](#example-trip-planner) - [Example: CLI Cluster](#example-cli-cluster) - [Tools](#tools) + - [Events](#events) ## Environment setup ### Operating System -We assume Ubuntu `22.04 LTS`. +We support macOS, windows and linux. ### Helper tools You need to install the following tools by following their official installation instructions: -- [`cargo`][cargo] +- [`docker`][docker] - [`just`][just] (on Linux install it with "Pre-Built Binaries" rather than with `apt` because of an outdated version) -- [`uv`][uv] - -We use `just` as a general command runner, and `uv` to manage Python and virtual environments -(`.venv`). The [`justfile`][justfile] contains installation instructions for everything we -describe here, and you can run `just` from this folder, as it will automatically discover the -`justfile` at the top level. - -### Operating System packages - -You can install dependencies with `just apt-setup`. - -### Python and virtual environment - -We install and use Python `3.10`. +- [`python`][python] -From inside the working copy of the repository, run `just venv-setup` to: +We use `just` as a general command runner, just run `just` for available commands. -- install Python, -- create the necessary `.venv`, -- install all the needed dependencies in the `.venv`. +### Docker -### Suibase +We use Docker to create a consistent local environment for all examples, ensuring compatibility across macOS, Windows, and Linux. By packaging dependencies into isolated containers, we aim to provide a uniform environment that minimizes compatibility issues. To run these examples, you’ll need Docker Compose version 2.20 or higher. -Talus smart contracts are written in Sui Move, and until our testnet is ready we use a -compatible (from the Sui Move point of view) chain, based on [`Suibase`][suibase]. - -You can download and install Suibase with `just suibase-setup`. - -### `PATH` - -Make sure `~/.local/bin` is in your `PATH`. Suibase requires this, as it installs its -executables there. - -### Ollama - -For the LLM component we install and use [Ollama][ollama] with the `llama3.2:1b` model. You can -install both with `just ollama-setup`. +**Note for macOS users:** While Ollama can run in a container on macOS, it experiences poor performance due to Docker Desktop for macOS lacking GPU acceleration support. To ensure better performance, Ollama is running directly on the host instead of within a container. ## Example: Instagram Post Planner @@ -102,17 +64,18 @@ Run with `just example cli-cluster`. Agents can use tools to enhance their capabilities. Please refer to the [`tools` README][tools_README] for a list of available tools, and instructions on how to add new ones. +## Events + +Events allow offchain systems to respond to onchain actions, automating tool execution and model inference based on specific triggers. Please refer to the [`events` README][events_README] for more details. + -[cargo]: https://doc.rust-lang.org/cargo/getting-started/installation.html +[docker]: https://docs.docker.com/engine/install/ [just]: https://github.com/casey/just -[uv]: https://github.com/astral-sh/uv -[suibase]: https://suibase.io/ -[ollama]: https://ollama.com/ +[python]: https://www.python.org/downloads/ [tools_README]: ../offchain/tools/README.md [events_README]: ../offchain/events/README.md [ig_post_planner]: ./ig_post_planner.py [trip_planner]: ./trip_planner.py [cli_cluster]: ./cli_cluster.py -[justfile]: ../justfile [design_cluster]: ../onchain/README.md#cluster diff --git a/examples/example.just b/examples/example.just index d9acd1f..c5d0fe6 100644 --- a/examples/example.just +++ b/examples/example.just @@ -1,26 +1,29 @@ -# Examples assume that the dependencies are already installed in venv +set shell := [ "python3", "-c"] [private] -default: - @just -l example +default: version-check + @__import__('os').system("just -l example") + +[private] +version-check: + @import sys; major, minor = sys.version_info[:2]; \ + assert (major, minor) >= (3, 7), "This script requires at least Python 3.7. Please link \"python3\" to Python 3.7 or higher and try again." + # Runs an example that prompts the user for a description of their post. [no-cd] ig-post-planner: - #!/usr/bin/env bash - source .venv/bin/activate - python3 examples/main.py "ig_post_planner" + @__import__('os').system("just containers check") + @__import__('os').system("docker exec -it examples /bin/bash -c \"source .venv/bin/activate && python examples/main.py ig_post_planner\"") # Runs an example that prompts the user for description of their trip. [no-cd] trip-planner: - #!/usr/bin/env bash - source .venv/bin/activate - python3 examples/main.py "trip_planner" + @__import__('os').system("just containers check") + @__import__('os').system("docker exec -it examples /bin/bash -c \"source .venv/bin/activate && python examples/main.py trip_planner\"") # Runs an example that prompts the user for description of their cluster. [no-cd] cli-cluster: - #!/usr/bin/env bash - source .venv/bin/activate - python3 examples/main.py "cli_cluster" + @__import__('os').system("just containers check") + @__import__('os').system("docker exec -it examples /bin/bash -c \"source .venv/bin/activate && python examples/main.py cli_cluster\"") diff --git a/examples/main.py b/examples/main.py index c01da6a..d1a1fd6 100644 --- a/examples/main.py +++ b/examples/main.py @@ -1,48 +1,27 @@ -# -# This script accepts one argument: the name of the example to run. -# -# Available examples: -# - trip_planner -# - ig_post_planner -# - cli_cluster -# -# ```bash -# python main.py ${EXAMPLE_NAME} -# ``` -# -# # Requirements -# -# - Suibase "localnet" CLI -# - Nexus SDK installed -# - `offchain/tools` installed -# - `offchain/events` installed -# -# # Steps -# -# This script prepares all resources necessary to run an example. -# 1. Starts localnet with Suibase -# 2. Publishes the talus package -# 3. Gets the Sui keypair from the environment and airdrops SUI -# 4. Creates a node and a model -# 5. Asks the user to Talus services -# 6. Runs the example that the user selected - -import json import os -import re -import subprocess import sys +import subprocess +import json +import argparse +from pathlib import Path from cli_cluster import run_cli_cluster_example from colorama import init as colorama_init from ig_post_planner import run_ig_post_planner_example -from nexus_sdk import get_sui_client, create_node, create_model -from pathlib import Path from trip_planner import run_trip_planner_example +from nexus_sdk import get_sui_client -# we know that this script is located in the ./examples directory, so we go +# We know that this script is located in the ./examples directory, so we go # one level up to get the root directory of the repository repo_root_dir = Path(__file__).resolve().parent.parent +# Define paths to shared resources +shared_dir = Path(os.getenv("SHARED_DIR", ".")) +keystore_path = Path(shared_dir) / "sui.keystore" +package_id_path = Path(shared_dir) / "package_id.json" +node_details_path = Path(shared_dir) / "node_details.json" + +rpc_url = os.getenv("RPC_URL", "http://localhost:9000") +ws_url = os.getenv("WS_URL", "ws://localhost:9000") # Maps example name to a function that runs it. # In essence, this is the source of truth for supported examples. @@ -53,216 +32,94 @@ } -def main(): - colorama_init() - - example_name = sys.argv[1] - if example_name not in EXAMPLES: - raise ValueError( - f"Unknown example name: {example_name}. Available examples: {EXAMPLES.keys()}" - ) - - # 1. - print("Starting localnet...") - start_localnet() - - # 2. - print("Publishing Talus package...") - package_id = publish_talus_package() - - # 3. - print("Preparing Sui address...") - sui_address = get_sui_address() - airdrop_sui(sui_address) - private_key = get_sui_address_private_key(sui_address) - client = get_sui_client(private_key) - - # 4. - print("Creating node and model...") - node_id = create_example_node(client, package_id) - llama_id, llama_owner_cap_id = create_llama_model(client, package_id, node_id) +# Runs given command and returns the output. +def run_command(command, cwd=None): + result = subprocess.run( + command, cwd=cwd, shell=True, capture_output=True, text=True + ) + if result.returncode != 0: + print(f"Error executing command: {command}") + print(f"Error output: {result.stdout}\n\n{result.stderr}") + raise Exception(f"Command failed: {command}") + return result.stdout - # 5. - ask_user_to_start_talus_services(private_key, package_id, llama_owner_cap_id) - # 6. +def load_configuration(): + """Load the required configuration from predefined paths.""" + # Load package ID try: - print() - EXAMPLES[example_name](client, package_id, llama_id, llama_owner_cap_id) - print() - print(f"Example {example_name} finished") - except Exception as e: - print(f"Failed to run example {example_name}: {e}") - - -def start_localnet(): - run_command("localnet start") - - status_output = run_command("localnet status") - - # "OK" is printed in green color, so we cannot do a simple string comparison - ansi_escape = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]") - clean_status_output = ansi_escape.sub("", status_output) - if "localnet OK" not in clean_status_output: - print() - print("Output of localnet status:") - print(status_output) - raise Exception("Failed to start localnet. Try `$ localnet regen`") + with open(package_id_path, "r") as f: + package_id = json.load(f)[0] + except (FileNotFoundError, IndexError, json.JSONDecodeError) as e: + print(f"Error: Unable to load package ID from {package_id_path}. Details: {e}") + sys.exit(1) - -# Assumes localnet being started and Suibase installed in the default path. -def publish_talus_package(): - # TODO: https://github.com/Talus-Network/TAF/issues/9 - run_command( - "localnet publish --skip-dependency-verification", - cwd=repo_root_dir / "onchain", - ) - - package_id = None - published_data_path = os.path.expanduser( - "~/suibase/workdirs/localnet/published-data/talus/most-recent/package-id.json" - ) - if not os.path.exists(published_data_path): - raise FileNotFoundError( - f"Published data file not found at {published_data_path}. Please ensure the Talus package has been published." + # Load node details + try: + with open(node_details_path, "r") as f: + node_details = json.load(f) + llama_id = node_details.get("llama_id") + llama_owner_cap_id = node_details.get("llama_owner_cap_id") + except (FileNotFoundError, json.JSONDecodeError) as e: + print( + f"Error: Unable to load node details from {node_details_path}. Details: {e}" ) - with open(published_data_path) as f: - data = json.load(f) - if not data: - raise ValueError( - "Published data file is empty. Please check your Talus package publication." - ) - package_id = data[0] - - if not package_id: - raise ValueError("Failed to extract PACKAGE_ID from the published data file.") - - return package_id - - -# Uses the suibase CLI to get the currently active address -def get_sui_address(): - return run_command("lsui client active-address").strip() + sys.exit(1) + if not llama_id or not llama_owner_cap_id: + print("Error: Llama ID or Llama Owner Capability ID is missing.") + sys.exit(1) -# Airdrops some SUI to the localnet faucet address and returns the address. -def airdrop_sui(address): - # trims whitespaces and new lines - run_command(f"localnet faucet {address}") - - -# Reads the private key for the given address from the Sui keystore. -def get_sui_address_private_key(for_address): - all_addresses_json = run_command("lsui client addresses --json") - # Find the position of the address in the list of .addresses. - # Each address is a two-element list: [method, public_key] - all_addresses = json.loads(all_addresses_json) - position = None - for i, [_, address] in enumerate(all_addresses["addresses"]): - if address == for_address: - position = i - break - - if position is None: - raise ValueError(f"Address '{for_address}' not found in client addresses") - - keystore_path = os.path.expanduser( - "~/suibase/workdirs/localnet/config/sui.keystore" - ) - if not os.path.exists(keystore_path): - raise FileNotFoundError( - f"Sui client file not found at {keystore_path}. Please ensure Sui is properly set up." + # Load SUI private key from keystore JSON + try: + with open(keystore_path, "r") as f: + keys = json.load(f) + if not keys: + raise ValueError( + "Sui keystore file is empty. Please check your Sui configuration." + ) + private_key = keys[0] # Assuming the first key is used + except (FileNotFoundError, json.JSONDecodeError, ValueError) as e: + print( + f"Error: Unable to load SUI private key from {keystore_path}. Details: {e}" ) + sys.exit(1) - with open(keystore_path) as f: - keys = json.load(f) - if not keys: - raise ValueError( - "Sui keystore file is empty. Please check your Sui configuration." - ) - return keys[position] - - -# Creates a new node owned object. -def create_example_node(client, package_id): - node_id = create_node(client, package_id, "LocalNode", "CPU", 16) - if not node_id: - raise Exception("Failed to create node") + return package_id, llama_id, llama_owner_cap_id, private_key - return node_id +def main(): + colorama_init() -# Creates llama model representation on chain. -# -# Returns the model ID and the model owner capability ID. -def create_llama_model(client, package_id, node_id): - model_id, model_owner_cap_id = create_model( - client=client, - package_id=package_id, - node_id=node_id, - name="llama3.2:1b", - model_hash=b"llama3.2_1b_hash", - url="http://localhost:11434", - token_price=1000, - capacity=1000000, - num_params=1000000000, - description="llama3.2 1b", - max_context_length=8192, - is_fine_tuned=False, - family="Llama3.2", - vendor="Meta", - is_open_source=True, - datasets=["test"], + # Argument parsing + parser = argparse.ArgumentParser( + description="Run a specific example with Sui client." ) - if not model_id: - raise Exception("Failed to create model") - - return model_id, model_owner_cap_id - - -# Prints how to start Talus services. -def ask_user_to_start_talus_services(private_key, package_id, model_owner_cap_id): - print() - print("You need to start the Talus services.") - print("Open a new terminal for both the LLM Assistant and the Event Listener.") - print() - - # check if something is running on port 8080 - llm_assistant_cmd = f""" - just start-tools - """ - print("First you need to start the LLM assistant unless it's running already.") - print("Start the LLM Assistant with the following command:") - print(llm_assistant_cmd) - - input("Press enter when ready...") - print() - - inferenced_cmd = f""" - just start-events \\ - --packageid {package_id} \\ - --privkey {private_key} \\ - --modelownercapid {model_owner_cap_id} - """ - print( - "Next, let's start the Event Listener for this example with the following command:" + parser.add_argument( + "example_name", + help="The name of the example to run. Available examples: trip_planner, ig_post_planner, cli_cluster", ) - print() - print(inferenced_cmd) + args = parser.parse_args() - input("Press enter when ready...") + # Validate the example name + example_name = args.example_name + if example_name not in EXAMPLES: + raise ValueError( + f"Unknown example name: {example_name}. Available examples: {list(EXAMPLES.keys())}" + ) + # Load configuration from known paths + package_id, llama_id, llama_owner_cap_id, private_key = load_configuration() + # Create the Sui client -# Runs given command and returns the output. -def run_command(command, cwd=None): - result = subprocess.run( - command, cwd=cwd, shell=True, capture_output=True, text=True - ) - if result.returncode != 0: - print(f"Error executing command: {command}") - print(f"Error output: {result.stdout}\n\n{result.stderr}") - raise Exception(f"Command failed: {command}") - return result.stdout + client = get_sui_client(private_key, rpc_url=rpc_url, ws_url=ws_url) + # Run the selected example + try: + print(f"\nRunning example: {example_name}\n") + EXAMPLES[example_name](client, package_id, llama_id, llama_owner_cap_id) + print(f"\nExample {example_name} finished successfully.") + except Exception as e: + print(f"Failed to run example {example_name}: {e}") if __name__ == "__main__": diff --git a/justfile b/justfile index 13e0633..d6a6155 100644 --- a/justfile +++ b/justfile @@ -1,148 +1,24 @@ -python_version := "3.10" -llama_version := "llama3.2:1b" +set shell := [ "python3", "-c"] [private] -default: - @just -l +default: version-check + @__import__('os').system("just -l") +[private] +version-check: + @import sys; major, minor = sys.version_info[:2]; \ + assert (major, minor) >= (3, 7), "This script requires at least Python 3.7. Please link \"python3\" to Python 3.7 or higher and try again." + # Commands for running examples mod example 'examples/example.just' -# Installs `uv`. -uv-setup: - #!/usr/bin/env bash - set -eu - - # See: https://github.com/astral-sh/uv - if ! command -v uv; then - curl -LsSf https://astral.sh/uv/install.sh | sh - fi - - uv --version - -# Installs python using `uv`. -python-setup: uv-setup - #!/usr/bin/env bash - set -eu - export RUST_LOG=warn - - uv python install {{ python_version }} - -# Creates a `.venv` and installs all the dependencies. -venv-setup: python-setup - #!/usr/bin/env bash - set -eu - export RUST_LOG=warn - - # Create the venv - uv venv -p {{ python_version }} - - # Install everything - uv pip install ./nexus_sdk/ - uv pip install ./offchain/events - uv pip install ./offchain/tools - - uv pip install -r ./examples/requirements.txt - - -# lightweight check to see if .venv exists, instead of using `venv-setup` -[private] -venv-exists: - @test -d .venv || (echo "Please run 'just venv-setup' first" && exit 1) - -# Starts a ptpython shell with the `.venv` activated. -python-shell: venv-exists - #!/usr/bin/env bash - source .venv/bin/activate - ptpython - -# Installs ollama. -ollama-setup: - curl -fsSL https://ollama.com/install.sh | sh - ollama pull {{ llama_version }} - -# Installs OS-level dependencies. -apt-setup: - #!/usr/bin/env bash - - # These should already be installed ... - sudo apt install -y git-all curl wget python3 - - sudo apt install -y cmake libssl-dev pkg-config lsof - -# below is from christos PR (https://github.com/Talus-Network/protochain/pull/19): -# Installs `suibase` and sets up `localnet`. -suibase-setup: - #!/usr/bin/env bash - set -euo pipefail - - # Suibase installs everything in ~/.local/bin. - # So this must be in the PATH. - # We abort if it is not because other scripts depend on it. - if [[ ":$PATH:" != *":$HOME/.local/bin:"* ]]; then - echo '=======================================' - echo 'ERROR: ~/.local/bin is NOT in your PATH' - echo 'Suibase installs everything in ~/.local/bin and heavily relies on it.' - echo 'Please add it to your PATH and try again.' - echo '=======================================' - exit 1 - fi - - # install suibase - if [[ ! -d ~/suibase ]]; then - echo Installing suibase - echo - git clone https://github.com/sui-base/suibase.git ~/suibase - cd ~/suibase - ./install - - # Because Suibase does not support pagination of Github Sui's releases, - # we just use the latest release. - # While we could use 'force_tag' in the Suibase config, with the cadence - # of Sui releases this would break every now and then. - # Instead, we rely on the fact that the features we use should be stable - # and we don't _expect_ breaking changes. - localnet create - localnet update - else - echo ~/suibase exists - fi - -# Starts LLM and other tools in an uvicorn server on port 8080. -start-tools: - #!/usr/bin/env bash - source .venv/bin/activate - uvicorn offchain.tools.src.nexus_tools.server.main:app --host 0.0.0.0 --port 8080 - -# Starts Sui event listener that invokes tools and submits completions. -# See `offchain/events` for more information about flags/envs. -start-events +args: - #!/usr/bin/env bash - source .venv/bin/activate - python3 offchain/events/src/nexus_events/sui_event.py {{args}} - -############################################ -## devnet -############################################ -# Sets up `devnet` (which is `localnet` from suibase) -devnet-setup: suibase-setup - echo - type lsui localnet - localnet set-active - -devnet-status: devnet-setup - echo - localnet status - echo - localnet links || true - -devnet-start: devnet-setup - #!/usr/bin/env bash - echo - RUST_LOG=warn localnet start - - - - +# Build, Start, Stop, or Clean Up docker containers +mod containers 'docker/containers.just' +# Builds and starts the entire environment +infra-up: version-check + @print("Building and starting the entire environment..."); __import__('os').system("just containers build"); __import__('os').system("just containers start") +# Shuts down and cleans up the environment +infra-down: version-check + @print("Stopping and cleaning up the entire environment..."); __import__('os').system("just containers stop"); __import__('os').system("just containers clean") diff --git a/nexus_sdk/.python-version b/nexus_sdk/.python-version new file mode 100644 index 0000000..c8cfe39 --- /dev/null +++ b/nexus_sdk/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/nexus_sdk/pyproject.toml b/nexus_sdk/pyproject.toml new file mode 100644 index 0000000..dd137e7 --- /dev/null +++ b/nexus_sdk/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "nexus-sdk" +version = "0.1.0" +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + "pysui==0.52.0", + "asyncio", + "ptpython", + "colorama" +] +description = "Nexus SDK Library" +authors = [ + { name="Talus", email="hi@talus.network" } +] \ No newline at end of file diff --git a/nexus_sdk/src/nexus_sdk/__init__.py b/nexus_sdk/src/nexus_sdk/__init__.py index cbf0083..fff6a88 100644 --- a/nexus_sdk/src/nexus_sdk/__init__.py +++ b/nexus_sdk/src/nexus_sdk/__init__.py @@ -1,6 +1,7 @@ from .node import create_node from .model import create_model from .utils import get_sui_client +from .utils import get_sui_client_with_airdrop from .cluster import ( create_cluster, create_agent_for_cluster, @@ -18,4 +19,5 @@ "execute_cluster", "get_cluster_execution_response", "get_sui_client", + "get_sui_client_with_airdrop", ] diff --git a/nexus_sdk/src/nexus_sdk/utils.py b/nexus_sdk/src/nexus_sdk/utils.py index 104aef4..a5062b2 100644 --- a/nexus_sdk/src/nexus_sdk/utils.py +++ b/nexus_sdk/src/nexus_sdk/utils.py @@ -1,5 +1,8 @@ +import json +from pathlib import Path +from pysui import SuiConfig from pysui.sui.sui_clients.sync_client import SuiClient -from pysui.sui.sui_config import SuiConfig +from pysui.abstracts.client_keypair import SignatureScheme # Returns Sui client with the given private key. @@ -15,3 +18,42 @@ def get_sui_client( prv_keys=[private_key], ) ) + + +# Utility function to create a Sui client with airdrop (faucet) +def get_sui_client_with_airdrop( + rpc_url="http://localhost:9000", + ws_url="ws://localhost:9000", + faucet_url="http://localhost:5003/gas", + keystore_path=Path("./sui.keystore"), +): + + if not keystore_path.exists(): + keystore_path.parent.mkdir(parents=True, exist_ok=True) + keystore_path.touch() + sui_config = SuiConfig.user_config(rpc_url=rpc_url, ws_url=ws_url) + + _, address = sui_config.create_new_keypair_and_address( + scheme=SignatureScheme.ED25519 + ) + + sui_config._faucet_url = faucet_url + + client = SuiClient(sui_config) + + result = client.get_gas_from_faucet() + if not result: + raise Exception("Failed to get gas from faucet") + + sui_config._write_keypairs(keystore_path) + print(f"New wallet created and funded. Address: {address}") + return client + else: + with open(keystore_path, "r") as f: + keys = json.load(f) + if not keys: + raise ValueError( + "Sui keystore file is empty. Please check your Sui configuration." + ) + private_key = keys[0] # Assuming the first key is used + return get_sui_client(private_key, rpc_url=rpc_url, ws_url=ws_url) diff --git a/offchain/events/src/nexus_events/sui_event.py b/offchain/events/src/nexus_events/sui_event.py index c8359eb..040ce68 100755 --- a/offchain/events/src/nexus_events/sui_event.py +++ b/offchain/events/src/nexus_events/sui_event.py @@ -10,8 +10,6 @@ from typing import Any import sys import os -import sys -import os import signal root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) @@ -19,14 +17,8 @@ from nexus_tools.server.tools.tools import TOOLS, TOOL_ARGS_MAPPING from pysui.sui.sui_clients.sync_client import SuiClient as SyncClient from pysui.sui.sui_txn import SyncTransaction -from pysui.sui.sui_types.scalars import ObjectID, SuiU64, SuiU8, SuiString, SuiBoolean -from pysui.sui.sui_txresults.complex_tx import ( - SubscribedEvent, - SubscribedEventParms, - Event, -) -from pysui.sui.sui_types.collections import SuiArray -import hashlib +from pysui.sui.sui_types.scalars import ObjectID, SuiString, SuiBoolean +from pysui.sui.sui_txresults.complex_tx import SubscribedEvent from nexus_events.offchain import OffChain import json import unicodedata @@ -40,10 +32,9 @@ off_chain = OffChain() -async def call_use_tool(name, args): +async def call_use_tool(name, args, url): """calls /tool/use endpoint with tool name and args, called by event handler""" - print(f"Calling /tool/use with name: {name}, args: {args}") - url = "http://0.0.0.0:8080/tool/use" + print(f"Calling /tool/use with name: {name}, args: {args}, url: {url}") try: if name not in TOOLS: @@ -89,7 +80,11 @@ def sanitize_text(text): async def prompt_event_handler( - client: SuiClient, package_id: str, model_owner_cap_id: str, event: SubscribedEvent + client: SuiClient, + package_id: str, + model_owner_cap_id: str, + event: SubscribedEvent, + tool_url: str, ) -> Any: """Handler captures the move event type for each received.""" try: @@ -113,7 +108,7 @@ async def prompt_event_handler( tool_args = parsed_json["tool"]["fields"]["args"] print(f"Calling tool '{tool_name}' with args: {tool_args}") - tool_result = await call_use_tool(tool_name, tool_args) + tool_result = await call_use_tool(tool_name, tool_args, tool_url) tool_result = tool_result["result"] print(f"tool_result: {tool_result}") @@ -194,11 +189,17 @@ def main(): default=(os.getenv("MODEL_OWNER_CAP_ID")), help="Model owner capability object ID (required)", ) + parser.add_argument( + "--toolurl", + default="http://0.0.0.0:8080/tool/use", + help="URL to call /tool/use endpoint", + ) args = parser.parse_args() package_id = args.packageid model_owner_cap_id = args.modelownercapid + tool_url = args.toolurl config = SuiConfig.user_config( rpc_url=args.rpc, ws_url=args.ws, prv_keys=[args.privkey] @@ -212,6 +213,7 @@ def main(): package_id, model_owner_cap_id, cursor=next_cursor, + tool_url=tool_url, ) @@ -225,6 +227,7 @@ def process_next_event_page( package_id: str, model_owner_cap_id: str, cursor: EventID, + tool_url: str, ): prompt_event_type = f"{package_id}::prompt::RequestForCompletionEvent" event_filter = MoveEventTypeQuery(prompt_event_type) @@ -248,7 +251,11 @@ def process_next_event_page( print(f"Processing {len(events)} events") for event in events: - asyncio.run(prompt_event_handler(client, package_id, model_owner_cap_id, event)) + asyncio.run( + prompt_event_handler( + client, package_id, model_owner_cap_id, event, tool_url + ) + ) # Set the cursor to the last event. # Also next fetch will skip the first event (the last event of this fetch) diff --git a/offchain/tools/src/nexus_tools/server/controllers/inference.py b/offchain/tools/src/nexus_tools/server/controllers/inference.py index 0dff310..4c274c5 100644 --- a/offchain/tools/src/nexus_tools/server/controllers/inference.py +++ b/offchain/tools/src/nexus_tools/server/controllers/inference.py @@ -1,12 +1,22 @@ +import os import ollama +from ollama import Client class Inference: @staticmethod def prompt(prompt, model, max_tokens=1000, temperature=1.0): + # Fetch the URL from environment variable, defaulting to localhost if not provided + ollama_host = os.getenv("OLLAMA_HOST", "http://localhost:11434") + + # Create a custom client with the specified host + client = Client(host=ollama_host) + + # Set up options for the request options = {"temperature": temperature, "num_predict": max_tokens} - response = ollama.chat( + # Make the request using the client + response = client.chat( model=model, options=options, messages=[