Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SELA end2end #1621

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions metagpt/ext/sela/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,26 @@ cd metagpt/ext/sela
pip install -r requirements.txt
```

#### Quick Start

- **Example : Running SELA on the House Price Prediction Task**
- To run the project, simply execute the following command:
```bash
python run_sela.py
```
- Explanation of `run_sela.py`:
```bash
requirement = ("Optimize dataset using MCTS with 10 rollouts. "
"This is a 05_house-prices-advanced-regression-techniques dataset."
"Your goal is to predict the target column `SalePrice`."
"Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target."
"Report rmse on the eval data. Do not plot or make any visualizations.")
data_dir = "Path/to/dataset"

sela = SELA()
await sela.run(requirement, data_dir)
```

#### Running Experiments

- **Examples:**
Expand Down
6 changes: 5 additions & 1 deletion metagpt/ext/sela/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,11 @@ def get_split_dataset_path(dataset_name, config):
datasets_dir = config["datasets_dir"]
if dataset_name in config["datasets"]:
dataset = config["datasets"][dataset_name]
data_path = os.path.join(datasets_dir, dataset["dataset"])
# Check whether `dataset["dataset"]` is already the suffix of `datasets_dir`. If it isn't, perform path concatenation.
if datasets_dir.rpartition("/")[-1] == dataset["dataset"]:
data_path = datasets_dir
else:
data_path = Path(datasets_dir) / dataset["dataset"]
split_datasets = {
"train": os.path.join(data_path, "split_train.csv"),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe use Path from pathlib instead of os.path

"dev": os.path.join(data_path, "split_dev.csv"),
Expand Down
10 changes: 6 additions & 4 deletions metagpt/ext/sela/insights/instruction_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,20 @@


class InstructionGenerator:
data_config = DATA_CONFIG

def __init__(self, state, use_fixed_insights, from_scratch):
def __init__(self, state, use_fixed_insights, from_scratch, data_config=None):
self.data_config = data_config if data_config is not None else DATA_CONFIG
self.state = state
self.file_path = state["exp_pool_path"]
if state["custom_dataset_dir"]:
with open(f"{state['custom_dataset_dir']}/description.md", "r", encoding="utf-8") as file:
self.dataset_info = file.read()
else:
dataset_info_path = (
f"{self.data_config['datasets_dir']}/{state['dataset_config']['dataset']}/dataset_info.json"
f"{self.data_config['datasets_dir']}/dataset_info.json"
if self.data_config["datasets_dir"].rpartition("/")[-1] == state["dataset_config"]["dataset"]
else f"{self.data_config['datasets_dir']}/{state['dataset_config']['dataset']}/dataset_info.json"
)

with open(dataset_info_path, "r") as file:
self.dataset_info = json.load(file)
self.use_fixed_insights = use_fixed_insights
Expand Down
22 changes: 22 additions & 0 deletions metagpt/ext/sela/run_sela.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import fire
from runner.sela import SELA

requirement = (
"Optimize dataset using MCTS with 10 rollouts. "
"This is a 05_house-prices-advanced-regression-techniques dataset."
"Your goal is to predict the target column `SalePrice`."
"Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target."
"Report rmse on the eval data. Do not plot or make any visualizations."
)

data_dir = "/Path/to/SELA-datasets/05_house-prices-advanced-regression-techniques"


async def main():
# Initialize Sela and run
sela = SELA()
await sela.run(requirement, data_dir)


if __name__ == "__main__":
fire.Fire(main)
6 changes: 3 additions & 3 deletions metagpt/ext/sela/runner/mcts.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
class MCTSRunner(Runner):
result_path: str = "results/mcts"

def __init__(self, args, tree_mode=None, **kwargs):
def __init__(self, args, data_config=None, tree_mode=None, **kwargs):
if args.special_instruction == "image":
self.start_task_id = 1 # start from datapreprocessing if it is image task
else:
Expand All @@ -23,7 +23,7 @@ def __init__(self, args, tree_mode=None, **kwargs):
elif args.eval_func == "mlebench":
self.eval_func = node_evaluate_score_mlebench

super().__init__(args, **kwargs)
super().__init__(args, data_config=data_config, **kwargs)
self.tree_mode = tree_mode

async def run_experiment(self):
Expand All @@ -35,7 +35,7 @@ async def run_experiment(self):
mcts = Random(root_node=None, max_depth=depth, use_fixed_insights=use_fixed_insights)
else:
mcts = MCTS(root_node=None, max_depth=depth, use_fixed_insights=use_fixed_insights)
best_nodes = await mcts.search(state=self.state, args=self.args)
best_nodes = await mcts.search(state=self.state, args=self.args, data_config=self.data_config)
best_node = best_nodes["global_best"]
dev_best_node = best_nodes["dev_best"]
score_dict = best_nodes["scores"]
Expand Down
4 changes: 2 additions & 2 deletions metagpt/ext/sela/runner/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@

class Runner:
result_path: str = "results/base"
data_config = DATA_CONFIG
start_task_id = 1

def __init__(self, args, **kwargs):
def __init__(self, args, data_config=None, **kwargs):
self.args = args
self.start_time_raw = datetime.datetime.now()
self.start_time = self.start_time_raw.strftime("%Y%m%d%H%M")
self.data_config = data_config if data_config is not None else DATA_CONFIG
self.state = create_initial_state(
self.args.task,
start_task_id=self.start_task_id,
Expand Down
155 changes: 155 additions & 0 deletions metagpt/ext/sela/runner/sela.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
import argparse
import json
import os
from typing import Optional

from metagpt.ext.sela.runner.custom import CustomRunner
from metagpt.ext.sela.runner.mcts import MCTSRunner
from metagpt.ext.sela.runner.random_search import RandomSearchRunner
from metagpt.ext.sela.runner.runner import Runner
from metagpt.llm import LLM
from metagpt.utils.common import CodeParser

SELA_INSTRUCTION = """
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

REQ_PARSING_PROMPT

You are an assistant for configuring machine learning experiments.

Given the requirement:
{requirement}

Your task:
1. Extract **experiment configurations** from the requirement if explicitly mentioned, such as:
- "rollouts: 10"
- "exp_mode: mcts"
- "max_depth: 4"

2. Extract **experiment data information**, including:
- **dataset**: Dataset name
- **metric**: Evaluation metric
- **target_col**: Target column
- **user_requirement**: Specific instructions or dataset handling requirements

Output a JSON object with two parts:
- "config": A dictionary of explicitly mentioned configurations, using keys:
- "task": str (a noun based on the dataset name, customizable, e.g., "titanic")
- "exp_mode": str (e.g., "mcts", "rs", "base", "custom", "greedy", "autogluon")
- "rollouts": int
- "max_depth": int
- "rs_mode": str (e.g., "single", "set")
- "special_instruction": str (e.g., "text", "image")
- "data_info": A dictionary of experiment data information, with keys:
- "dataset": str (e.g., "04_titanic")
- "metric": str (e.g., "f1", "rmse")
- "target_col": str (e.g., "Survived")
- "user_requirement": str

Example output:
{{
"config": {{
"task": "titanic",
"exp_mode": "mcts",
"rollouts": 10
}},
"data_info": {{
"dataset": "04_titanic",
"metric": "f1",
"target_col": "Survived",
"user_requirement": "Predict the target column `Survived`. Perform data analysis, preprocessing, feature engineering, and modeling. Report f1 on eval data. Do not include visualizations."
}}
}}

Return only the JSON object.
"""
DEFAULT_CONFIG = {
"name": "",
"reflection": True,
"no_reflection": False,
"exp_mode": "mcts",
"rollouts": 10,
"load_tree": False,
"role_timeout": 1000,
"use_fixed_insights": False,
"low_is_better": False,
"start_task_id": 2,
"from_scratch": True,
"eval_func": "sela",
"custom_dataset_dir": None,
"max_depth": 4,
"rs_mode": "single",
"is_multimodal": True,
"num_experiments": 1,
"external_eval": True,
"no_external_eval": False,
"special_instruction": None,
}


class SELA:
def __init__(self, use_llm: bool = True):
"""
Initialize the SELA class.
Args:
use_llm: Whether to use LLM (Language Model) to parse the requirement.
"""
self.llm = LLM() if use_llm else None

async def _parse_requirement(self, requirement: str) -> dict:
"""
Use LLM to analyze the experiment requirement and extract configurations.
"""
if not self.llm:
raise ValueError("LLM is not initialized. Cannot parse the requirement.")
response = await self.llm.aask(SELA_INSTRUCTION.format(requirement=json.dumps(requirement)))
print(f"LLM Response: {response}")
parsed_response = self._parse_json(response)
return {
"config": {**DEFAULT_CONFIG, **parsed_response.get("config", {})},
"data_info": parsed_response.get("data_info", {}),
}

@staticmethod
def _parse_json(json_string: str) -> dict:
"""
Extract and parse JSON content from the given string using CodeParser.
"""
try:
json_code = CodeParser.parse_code("", json_string, "json")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you dont need try except, let the error be raise up

import json

return json.loads(json_code)
except ValueError:
raise ValueError(f"Invalid JSON format: {json_string}")

def _select_runner(self, config: argparse.Namespace, data_config: dict):
"""
Select the appropriate experiment runner based on the experiment mode.
"""
runners = {
"mcts": lambda: MCTSRunner(config, data_config),
"greedy": lambda: MCTSRunner(tree_mode="greedy"),
"random": lambda: MCTSRunner(tree_mode="random"),
"rs": lambda: RandomSearchRunner(config),
"base": lambda: Runner(config),
"custom": lambda: CustomRunner(config),
}
if config.exp_mode not in runners:
raise ValueError(f"Invalid exp_mode: {config.exp_mode}")
return runners[config.exp_mode]()

async def run(self, requirement: str, data_dir: Optional[str] = None):
"""
Run the experiment with the given requirement and data directory.
"""
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Dataset directory not found: {data_dir}")

config_all = await self._parse_requirement(requirement)
config_exp, data_info = config_all["config"], config_all["data_info"]

data_config = {
"datasets_dir": data_dir,
"work_dir": "../../workspace",
"role_dir": "storage/SELA",
"datasets": {config_exp.get("task"): data_info},
}

await self._select_runner(argparse.Namespace(**config_exp), data_config).run_experiment()
4 changes: 2 additions & 2 deletions metagpt/ext/sela/search/tree_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,15 +410,15 @@ def get_score_order_dict(self):
scores["test_raw"].append(node.raw_reward["test_score"])
return scores

async def search(self, state: dict, args):
async def search(self, state: dict, args, data_config):
reflection = args.reflection
load_tree = args.load_tree
rollouts = args.rollouts
from_scratch = args.from_scratch
role, root = initialize_di_root_node(state, reflection=reflection)
self.root_node = root
self.instruction_generator = InstructionGenerator(
state=state, use_fixed_insights=self.use_fixed_insights, from_scratch=from_scratch
state=state, use_fixed_insights=self.use_fixed_insights, from_scratch=from_scratch, data_config=data_config
)
await self.instruction_generator.initialize()

Expand Down
Loading