diff --git a/CHANGELOG.md b/CHANGELOG.md index 85c4c0a41..0c20493be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,10 +7,14 @@ ## Dependencies - Allow numpy >= 2.x (#1146) +# Examples +- Add warmstarting example (#1120) + # 2.2.0 -## Features +## Examples - Add example to specify total budget (fidelity units) instead of n_trials for multi-fidelity/Hyperband (#1121) +- Add example for warmstarting (#1120) ## Dependencies - Update numpy NaN (#1122) and restrict numpy diff --git a/docs/advanced_usage/5.1_warmstarting.rst b/docs/advanced_usage/5.1_warmstarting.rst new file mode 100644 index 000000000..fb255681e --- /dev/null +++ b/docs/advanced_usage/5.1_warmstarting.rst @@ -0,0 +1,120 @@ +Warmstarting SMAC +====================================== + +With the ask and tell interface, we can support warmstarting SMAC. We can communicate rich +information about the previous trials to SMAC using `TrialInfo` and `TrialValue` instances. + +We can communicate using the following objects: + +.. code-block:: python + + class TrialValue: + """Values of a trial. + + Parameters + ---------- + cost : float | list[float] + time : float, defaults to 0.0 + status : StatusType, defaults to StatusType.SUCCESS + starttime : float, defaults to 0.0 + endtime : float, defaults to 0.0 + additional_info : dict[str, Any], defaults to {} + """ + + class TrialInfo: + """Information about a trial. + + Parameters + ---------- + config : Configuration + instance : str | None, defaults to None + seed : int | None, defaults to None + budget : float | None, defaults to None + """ + + +Usage Example +~~~~~~~~~~~~~ +See `examples/1_basics/8_warmstart.py`. + + +.. code-block:: python + + from __future__ import annotations + + from smac.scenario import Scenario + from smac.facade import HyperparameterOptimizationFacade + from ConfigSpace import Configuration, ConfigurationSpace, Float + from smac.runhistory.dataclasses import TrialValue, TrialInfo + + + class Rosenbrock2D: + @property + def configspace(self) -> ConfigurationSpace: + cs = ConfigurationSpace(seed=0) + x0 = Float("x0", (-5, 10), default=-3) + x1 = Float("x1", (-5, 10), default=-4) + cs.add([x0, x1]) + + return cs + + def evaluate(self, config: Configuration, seed: int = 0) -> float: + """The 2-dimensional Rosenbrock function as a toy model. + The Rosenbrock function is well know in the optimization community and + often serves as a toy problem. It can be defined for arbitrary + dimensions. The minimium is always at x_i = 1 with a function value of + zero. All input parameters are continuous. The search domain for + all x's is the interval [-5, 10]. + """ + x1 = config["x0"] + x2 = config["x1"] + + cost = 100.0 * (x2 - x1**2.0) ** 2.0 + (1 - x1) ** 2.0 + return cost + + + if __name__ == "__main__": + SEED = 12345 + task = Rosenbrock2D() + + # Previous evaluations + # X vectors need to be connected to the configuration space + configurations = [ + Configuration(task.configspace, {'x0':1, 'x1':2}), + Configuration(task.configspace, {'x0':-1, 'x1':3}), + Configuration(task.configspace, {'x0':5, 'x1':5}), + ] + costs = [task.evaluate(c, seed=SEED) for c in configurations] + + # Define optimization problem and budget + scenario = Scenario(task.configspace, deterministic=False, n_trials=30) + intensifier = HyperparameterOptimizationFacade.get_intensifier(scenario, max_config_calls=1) + smac = HyperparameterOptimizationFacade( + scenario, + task.evaluate, + intensifier=intensifier, + overwrite=True, + + # Modify the initial design to use our custom initial design + initial_design=HyperparameterOptimizationFacade.get_initial_design( + scenario, + n_configs=0, # Do not use the default initial design + additional_configs=configurations # Use the configurations previously evaluated as initial design + # This only passes the configurations but not the cost! + # So in order to actually use the custom, pre-evaluated initial design + # we need to tell those trials, like below. + ) + ) + + # Convert previously evaluated configurations into TrialInfo and TrialValue instances to pass to SMAC + trial_infos = [TrialInfo(config=c, seed=SEED) for c in configurations] + trial_values = [TrialValue(cost=c) for c in costs] + + # Warmstart SMAC with the trial information and values + for info, value in zip(trial_infos, trial_values): + smac.tell(info, value) + + # Optimize as usual + smac.optimize() + +For more details on ask and tell consult `advanced_usage/5_ask_and_tell`. diff --git a/docs/advanced_usage/index.rst b/docs/advanced_usage/index.rst index 51fdf764a..87feb3b8a 100644 --- a/docs/advanced_usage/index.rst +++ b/docs/advanced_usage/index.rst @@ -18,6 +18,7 @@ Navigation 3_multi_objective 4_instances 5_ask_and_tell + 5.1_warmstarting 6_commandline 7_stopping_criteria 8_logging diff --git a/examples/1_basics/8_warmstart.py b/examples/1_basics/8_warmstart.py new file mode 100644 index 000000000..eef95d173 --- /dev/null +++ b/examples/1_basics/8_warmstart.py @@ -0,0 +1,84 @@ +""" +Warmstarting SMAC +====================================== + +With the ask and tell interface, we can support warmstarting SMAC. We can communicate rich +information about the previous trials to SMAC using `TrialInfo` and `TrialValue` instances. +For more details on ask and tell consult `advanced_usage/5_ask_and_tell`. +""" +from __future__ import annotations + +from smac.scenario import Scenario +from smac.facade import HyperparameterOptimizationFacade +from ConfigSpace import Configuration, ConfigurationSpace, Float +from smac.runhistory.dataclasses import TrialValue, TrialInfo + + +class Rosenbrock2D: + @property + def configspace(self) -> ConfigurationSpace: + cs = ConfigurationSpace(seed=0) + x0 = Float("x0", (-5, 10), default=-3) + x1 = Float("x1", (-5, 10), default=-4) + cs.add([x0, x1]) + + return cs + + def evaluate(self, config: Configuration, seed: int = 0) -> float: + """The 2-dimensional Rosenbrock function as a toy model. + The Rosenbrock function is well know in the optimization community and + often serves as a toy problem. It can be defined for arbitrary + dimensions. The minimium is always at x_i = 1 with a function value of + zero. All input parameters are continuous. The search domain for + all x's is the interval [-5, 10]. + """ + x1 = config["x0"] + x2 = config["x1"] + + cost = 100.0 * (x2 - x1**2.0) ** 2.0 + (1 - x1) ** 2.0 + return cost + + +if __name__ == "__main__": + SEED = 12345 + task = Rosenbrock2D() + + # Previous evaluations + # X vectors need to be connected to the configuration space + configurations = [ + Configuration(task.configspace, {'x0':1, 'x1':2}), + Configuration(task.configspace, {'x0':-1, 'x1':3}), + Configuration(task.configspace, {'x0':5, 'x1':5}), + ] + costs = [task.evaluate(c, seed=SEED) for c in configurations] + + # Define optimization problem and budget + scenario = Scenario(task.configspace, deterministic=False, n_trials=30) + intensifier = HyperparameterOptimizationFacade.get_intensifier(scenario, max_config_calls=1) + smac = HyperparameterOptimizationFacade( + scenario, + task.evaluate, + intensifier=intensifier, + overwrite=True, + + # Modify the initial design to use our custom initial design + initial_design=HyperparameterOptimizationFacade.get_initial_design( + scenario, + n_configs=0, # Do not use the default initial design + additional_configs=configurations # Use the configurations previously evaluated as initial design + # This only passes the configurations but not the cost! + # So in order to actually use the custom, pre-evaluated initial design + # we need to tell those trials, like below. + ) + ) + + # Convert previously evaluated configurations into TrialInfo and TrialValue instances to pass to SMAC + trial_infos = [TrialInfo(config=c, seed=SEED) for c in configurations] + trial_values = [TrialValue(cost=c) for c in costs] + + # Warmstart SMAC with the trial information and values + for info, value in zip(trial_infos, trial_values): + smac.tell(info, value) + + # Optimize as usual + smac.optimize() \ No newline at end of file diff --git a/tests/test_runhistory/test_runhistory_encoder.py b/tests/test_runhistory/test_runhistory_encoder.py index 638c7fdbf..767f2ad12 100644 --- a/tests/test_runhistory/test_runhistory_encoder.py +++ b/tests/test_runhistory/test_runhistory_encoder.py @@ -104,7 +104,6 @@ def test_transform(runhistory, make_scenario, configspace_small, configs): assert Y.tolist() != Y1.tolist() assert ((X <= upper) & (X >= lower)).all() - def test_transform_conditionals(runhistory, make_scenario, configspace_large): scenario = make_scenario(configspace_large)