diff --git a/DESCRIPTION b/DESCRIPTION
index b0d0955b..47a032c2 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -102,7 +102,6 @@ Collate:
'ResultAssignerSurrogate.R'
'Surrogate.R'
'SurrogateLearner.R'
- 'SurrogateLearnerAsync.R'
'SurrogateLearnerCollection.R'
'TunerADBO.R'
'TunerAsyncMbo.R'
diff --git a/NAMESPACE b/NAMESPACE
index fde8732f..e5b96257 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -27,7 +27,6 @@ export(ResultAssignerArchive)
export(ResultAssignerSurrogate)
export(Surrogate)
export(SurrogateLearner)
-export(SurrogateLearnerAsync)
export(SurrogateLearnerCollection)
export(TunerADBO)
export(TunerAsyncMbo)
diff --git a/man-roxygen/field_archive_surrogate_is_async.R b/man-roxygen/field_archive_surrogate_is_async.R
new file mode 100644
index 00000000..f835cec9
--- /dev/null
+++ b/man-roxygen/field_archive_surrogate_is_async.R
@@ -0,0 +1,2 @@
+#' @field archive_is_async (`bool(1)``)\cr
+#' Whether the [bbotk::Archive] is an asynchronous one.
diff --git a/man-roxygen/field_properties.R b/man-roxygen/field_properties.R
index c9fc652b..299eb381 100644
--- a/man-roxygen/field_properties.R
+++ b/man-roxygen/field_properties.R
@@ -2,4 +2,4 @@
#' Set of properties of the optimizer.
#' Must be a subset of [`bbotk_reflections$optimizer_properties`][bbotk::bbotk_reflections].
#' MBO in principle is very flexible and by default we assume that the optimizer has all properties.
-#' When fully initialized, properties are determined based on the `loop_function` and `surrogate`.
+#' When fully initialized, properties are determined based on the loop, e.g., the `loop_function`, and `surrogate`.
diff --git a/man/AcqFunction.Rd b/man/AcqFunction.Rd
index d102b453..24f48a24 100644
--- a/man/AcqFunction.Rd
+++ b/man/AcqFunction.Rd
@@ -69,6 +69,7 @@ Set of required packages.}
\itemize{
\item \href{#method-AcqFunction-new}{\code{AcqFunction$new()}}
\item \href{#method-AcqFunction-update}{\code{AcqFunction$update()}}
+\item \href{#method-AcqFunction-reset}{\code{AcqFunction$reset()}}
\item \href{#method-AcqFunction-eval_many}{\code{AcqFunction$eval_many()}}
\item \href{#method-AcqFunction-eval_dt}{\code{AcqFunction$eval_dt()}}
\item \href{#method-AcqFunction-clone}{\code{AcqFunction$clone()}}
@@ -147,6 +148,18 @@ Can be implemented by subclasses.
\if{html}{\out{
}}
\describe{
-\item{\code{optimizer}}{(\link[bbotk:Optimizer]{bbotk::Optimizer}).}
+\item{\code{optimizer}}{(\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}).}
\item{\code{terminator}}{(\link[bbotk:Terminator]{bbotk::Terminator}).}
@@ -122,6 +122,7 @@ Set of hyperparameters.}
\item \href{#method-AcqOptimizer-format}{\code{AcqOptimizer$format()}}
\item \href{#method-AcqOptimizer-print}{\code{AcqOptimizer$print()}}
\item \href{#method-AcqOptimizer-optimize}{\code{AcqOptimizer$optimize()}}
+\item \href{#method-AcqOptimizer-reset}{\code{AcqOptimizer$reset()}}
\item \href{#method-AcqOptimizer-clone}{\code{AcqOptimizer$clone()}}
}
}
@@ -137,7 +138,7 @@ Creates a new instance of this \link[R6:R6Class]{R6} class.
\subsection{Arguments}{
\if{html}{\out{
}}
\describe{
-\item{\code{optimizer}}{(\link[bbotk:Optimizer]{bbotk::Optimizer}).}
+\item{\code{optimizer}}{(\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch}).}
\item{\code{terminator}}{(\link[bbotk:Terminator]{bbotk::Terminator}).}
@@ -186,6 +187,18 @@ Optimize the acquisition function.
\subsection{Returns}{
\code{\link[data.table:data.table]{data.table::data.table()}} with 1 row per candidate.
}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-AcqOptimizer-reset}{}}}
+\subsection{Method \code{reset()}}{
+Reset the acquisition function optimizer.
+
+Currently not used.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{AcqOptimizer$reset()}\if{html}{\out{
}}
+}
+
}
\if{html}{\out{
}}
\if{html}{\out{
}}
diff --git a/man/ResultAssigner.Rd b/man/ResultAssigner.Rd
index 01e0211a..864d27a6 100644
--- a/man/ResultAssigner.Rd
+++ b/man/ResultAssigner.Rd
@@ -74,7 +74,7 @@ Assigns the result, i.e., the final point(s) to the instance.
\subsection{Arguments}{
\if{html}{\out{
}}
\describe{
-\item{\code{instance}}{(\link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} | \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit})\cr
+\item{\code{instance}}{(\link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} | \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit} |\link[bbotk:OptimInstanceAsyncSingleCrit]{bbotk::OptimInstanceAsyncSingleCrit} | \link[bbotk:OptimInstanceAsyncMultiCrit]{bbotk::OptimInstanceAsyncMultiCrit})\cr
The \link[bbotk:OptimInstance]{bbotk::OptimInstance} the final result should be assigned to.}
}
\if{html}{\out{
}}
diff --git a/man/Surrogate.Rd b/man/Surrogate.Rd
index db339406..03a341a8 100644
--- a/man/Surrogate.Rd
+++ b/man/Surrogate.Rd
@@ -25,6 +25,9 @@ Id used when printing.}
\item{\code{archive}}{(\link[bbotk:Archive]{bbotk::Archive} | \code{NULL})\cr
\link[bbotk:Archive]{bbotk::Archive} of the \link[bbotk:OptimInstance]{bbotk::OptimInstance}.}
+\item{\code{archive_is_async}}{(`bool(1)``)\cr
+Whether the \link[bbotk:Archive]{bbotk::Archive} is an asynchronous one.}
+
\item{\code{n_learner}}{(\code{integer(1)})\cr
Returns the number of surrogate models.}
@@ -67,6 +70,7 @@ Retrieves the currently active predict type, e.g. \code{"response"}.}
\itemize{
\item \href{#method-Surrogate-new}{\code{Surrogate$new()}}
\item \href{#method-Surrogate-update}{\code{Surrogate$update()}}
+\item \href{#method-Surrogate-reset}{\code{Surrogate$reset()}}
\item \href{#method-Surrogate-predict}{\code{Surrogate$predict()}}
\item \href{#method-Surrogate-format}{\code{Surrogate$format()}}
\item \href{#method-Surrogate-print}{\code{Surrogate$print()}}
@@ -110,7 +114,7 @@ Parameter space description depending on the subclass.}
\if{latex}{\out{\hypertarget{method-Surrogate-update}{}}}
\subsection{Method \code{update()}}{
Train learner with new data.
-Subclasses must implement \verb{$private.update()}.
+Subclasses must implement \code{private.update()} and \code{private.update_async()}.
\subsection{Usage}{
\if{html}{\out{
}}\preformatted{Surrogate$update()}\if{html}{\out{
}}
}
@@ -120,6 +124,20 @@ Subclasses must implement \verb{$private.update()}.
}
}
\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-Surrogate-reset}{}}}
+\subsection{Method \code{reset()}}{
+Reset the surrogate model.
+Subclasses must implement \code{private$.reset()}.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{Surrogate$reset()}\if{html}{\out{
}}
+}
+
+\subsection{Returns}{
+\code{NULL}
+}
+}
+\if{html}{\out{
}}
\if{html}{\out{
}}
\if{latex}{\out{\hypertarget{method-Surrogate-predict}{}}}
\subsection{Method \code{predict()}}{
diff --git a/man/SurrogateLearner.Rd b/man/SurrogateLearner.Rd
index 27f6f69d..b2531b5b 100644
--- a/man/SurrogateLearner.Rd
+++ b/man/SurrogateLearner.Rd
@@ -30,6 +30,11 @@ Should errors during updating the surrogate be caught and propagated to the \cod
the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation?
Default is \code{TRUE}.
}
+\item{\code{impute_method}}{\code{character(1)}\cr
+Method to impute missing values in the case of updating on an asynchronous \link[bbotk:ArchiveAsync]{bbotk::ArchiveAsync} with pending evaluations.
+Can be \code{"mean"} to use mean imputation or \code{"random"} to sample values uniformly at random between the empirical minimum and maximum.
+Default is \code{"random"}.
+}
}
}
@@ -110,6 +115,7 @@ Retrieves the currently active predict type, e.g. \code{"response"}.}
diff --git a/man/SurrogateLearnerAsync.Rd b/man/SurrogateLearnerAsync.Rd
deleted file mode 100644
index 5a2b4303..00000000
--- a/man/SurrogateLearnerAsync.Rd
+++ /dev/null
@@ -1,106 +0,0 @@
-% Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/SurrogateLearnerAsync.R
-\name{SurrogateLearnerAsync}
-\alias{SurrogateLearnerAsync}
-\title{Surrogate Model Containing a Single Learner}
-\description{
-Surrogate model containing a single \link[mlr3:LearnerRegr]{mlr3::LearnerRegr}.
-The surrogate model is updated on the \link[mlr3tuning:ArchiveAsyncTuning]{mlr3tuning::ArchiveAsyncTuning}.
-Unevaluated points are imputed with the mean or random values.
-}
-\section{Parameters}{
-
-\describe{
-\item{\code{assert_insample_perf}}{\code{logical(1)}\cr
-Should the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr} be asserted after updating the surrogate?
-If the assertion fails (i.e., the insample performance based on the \code{perf_measure} does not meet the
-\code{perf_threshold}), an error is thrown.
-Default is \code{FALSE}.
-}
-\item{\code{perf_measure}}{\link[mlr3:MeasureRegr]{mlr3::MeasureRegr}\cr
-Performance measure which should be use to assert the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr}.
-Only relevant if \code{assert_insample_perf = TRUE}.
-Default is \link[mlr3:mlr_measures_regr.rsq]{mlr3::mlr_measures_regr.rsq}.
-}
-\item{\code{perf_threshold}}{\code{numeric(1)}\cr
-Threshold the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr} should be asserted against.
-Only relevant if \code{assert_insample_perf = TRUE}.
-Default is \code{0}.
-}
-\item{\code{catch_errors}}{\code{logical(1)}\cr
-Should errors during updating the surrogate be caught and propagated to the \code{loop_function} which can then handle
-the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation?
-Default is \code{TRUE}.
-}
-\item{\code{impute_method}}{\code{character(1)}\cr
-Method to impute missing values in the surrogate model.
-}
-}
-}
-
-\section{Super classes}{
-\code{\link[mlr3mbo:Surrogate]{mlr3mbo::Surrogate}} -> \code{\link[mlr3mbo:SurrogateLearner]{mlr3mbo::SurrogateLearner}} -> \code{SurrogateLearnerAsync}
-}
-\section{Methods}{
-\subsection{Public methods}{
-\itemize{
-\item \href{#method-SurrogateLearnerAsync-new}{\code{SurrogateLearnerAsync$new()}}
-\item \href{#method-SurrogateLearnerAsync-clone}{\code{SurrogateLearnerAsync$clone()}}
-}
-}
-\if{html}{\out{
-
Inherited methods
-
-
-}}
-\if{html}{\out{
}}
-\if{html}{\out{
}}
-\if{latex}{\out{\hypertarget{method-SurrogateLearnerAsync-new}{}}}
-\subsection{Method \code{new()}}{
-Creates a new instance of this \link[R6:R6Class]{R6} class.
-\subsection{Usage}{
-\if{html}{\out{
}}\preformatted{SurrogateLearnerAsync$new(learner, archive = NULL, cols_x = NULL, col_y = NULL)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{
}}
-\describe{
-\item{\code{learner}}{(\link[mlr3:LearnerRegr]{mlr3::LearnerRegr}).}
-
-\item{\code{archive}}{(\link[bbotk:Archive]{bbotk::Archive} | \code{NULL})\cr
-\link[bbotk:Archive]{bbotk::Archive} of the \link[bbotk:OptimInstance]{bbotk::OptimInstance}.}
-
-\item{\code{cols_x}}{(\code{character()} | \code{NULL})\cr
-Column id's of variables that should be used as features.
-By default, automatically inferred based on the archive.}
-
-\item{\code{col_y}}{(\code{character(1)} | \code{NULL})\cr
-Column id of variable that should be used as a target.
-By default, automatically inferred based on the archive.}
-}
-\if{html}{\out{
}}
-}
-}
-\if{html}{\out{
}}
-\if{html}{\out{
}}
-\if{latex}{\out{\hypertarget{method-SurrogateLearnerAsync-clone}{}}}
-\subsection{Method \code{clone()}}{
-The objects of this class are cloneable with this method.
-\subsection{Usage}{
-\if{html}{\out{
}}\preformatted{SurrogateLearnerAsync$clone(deep = FALSE)}\if{html}{\out{
}}
-}
-
-\subsection{Arguments}{
-\if{html}{\out{
}}
-\describe{
-\item{\code{deep}}{Whether to make a deep clone.}
-}
-\if{html}{\out{
}}
-}
-}
-}
diff --git a/man/SurrogateLearnerCollection.Rd b/man/SurrogateLearnerCollection.Rd
index 83783625..74c232a7 100644
--- a/man/SurrogateLearnerCollection.Rd
+++ b/man/SurrogateLearnerCollection.Rd
@@ -32,6 +32,11 @@ Should errors during updating the surrogate be caught and propagated to the \cod
the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation?
Default is \code{TRUE}.
}
+\item{\code{impute_method}}{\code{character(1)}\cr
+Method to impute missing values in the case of updating on an asynchronous \link[bbotk:ArchiveAsync]{bbotk::ArchiveAsync} with pending evaluations.
+Can be \code{"mean"} to use mean imputation or \code{"random"} to sample values uniformly at random between the empirical minimum and maximum.
+Default is \code{"random"}.
+}
}
}
@@ -68,6 +73,8 @@ if (requireNamespace("mlr3learners") &
surrogate$learner
+ surrogate$learner[["y1"]]$model
+
surrogate$learner[["y2"]]$model
}
}
@@ -116,6 +123,7 @@ Retrieves the currently active predict type, e.g. \code{"response"}.}
diff --git a/man/acqo.Rd b/man/acqo.Rd
index 43114a0c..b697cf1c 100644
--- a/man/acqo.Rd
+++ b/man/acqo.Rd
@@ -7,8 +7,8 @@
acqo(optimizer, terminator, acq_function = NULL, callbacks = NULL, ...)
}
\arguments{
-\item{optimizer}{(\link[bbotk:Optimizer]{bbotk::Optimizer})\cr
-\link[bbotk:Optimizer]{bbotk::Optimizer} that is to be used.}
+\item{optimizer}{(\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch})\cr
+\link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch} that is to be used.}
\item{terminator}{(\link[bbotk:Terminator]{bbotk::Terminator})\cr
\link[bbotk:Terminator]{bbotk::Terminator} that is to be used.}
diff --git a/man/default_acqfunction.Rd b/man/default_acqfunction.Rd
index 66f6ddaa..e7b53720 100644
--- a/man/default_acqfunction.Rd
+++ b/man/default_acqfunction.Rd
@@ -7,15 +7,17 @@
default_acqfunction(instance)
}
\arguments{
-\item{instance}{(\link[bbotk:OptimInstance]{bbotk::OptimInstance}).}
+\item{instance}{(\link[bbotk:OptimInstance]{bbotk::OptimInstance}).
+An object that inherits from \link[bbotk:OptimInstance]{bbotk::OptimInstance}.}
}
\value{
\link{AcqFunction}
}
\description{
Chooses a default acquisition function, i.e. the criterion used to propose future points.
-For single-objective optimization, defaults to \link{mlr_acqfunctions_ei}.
-For multi-objective optimization, defaults to \link{mlr_acqfunctions_smsego}.
+For synchronous single-objective optimization, defaults to \link{mlr_acqfunctions_ei}.
+For synchronous multi-objective optimization, defaults to \link{mlr_acqfunctions_smsego}.
+For asynchronous single-objective optimization, defaults to \link{mlr_acqfunctions_stochastic_cb}.
}
\seealso{
Other mbo_defaults:
diff --git a/man/default_surrogate.Rd b/man/default_surrogate.Rd
index 839793c3..c783861e 100644
--- a/man/default_surrogate.Rd
+++ b/man/default_surrogate.Rd
@@ -4,7 +4,12 @@
\alias{default_surrogate}
\title{Default Surrogate}
\usage{
-default_surrogate(instance, learner = NULL, n_learner = NULL, force_rf = FALSE)
+default_surrogate(
+ instance,
+ learner = NULL,
+ n_learner = NULL,
+ force_random_forest = FALSE
+)
}
\arguments{
\item{instance}{(\link[bbotk:OptimInstance]{bbotk::OptimInstance})\cr
@@ -14,10 +19,10 @@ An object that inherits from \link[bbotk:OptimInstance]{bbotk::OptimInstance}.}
If specified, this learner will be used instead of the defaults described above.}
\item{n_learner}{(\code{NULL} | \code{integer(1)}).
-Number of learners to be considered in the construction of the \link{SurrogateLearner} or \link{SurrogateLearnerCollection}.
+Number of learners to be considered in the construction of the \link{Surrogate}.
If not specified will be based on the number of objectives as stated by the instance.}
-\item{force_rf}{(\code{logical(1)}).
+\item{force_random_forest}{(\code{logical(1)}).
If \code{TRUE}, a random forest is constructed even if the parameter space is numeric-only.}
}
\value{
@@ -47,11 +52,8 @@ Out of range imputation makes sense for tree-based methods and is usually hard t
In the case of dependencies, the following learner is used as a fallback:
\code{lrn("regr.featureless")}.
-If the instance is of class \link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} the learner is wrapped as a
-\link{SurrogateLearner}.
-
-If the instance is of class \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit} multiple deep clones of the learner are
-wrapped as a \link{SurrogateLearnerCollection}.
+If \code{n_learner} is \code{1}, the learner is wrapped as a \link{SurrogateLearner}.
+Otherwise, if \code{n_learner} is larger than \code{1}, multiple deep clones of the learner are wrapped as a \link{SurrogateLearnerCollection}.
}
\references{
\itemize{
diff --git a/man/mlr_acqfunctions_aei.Rd b/man/mlr_acqfunctions_aei.Rd
index dfdbee55..7c7c2c79 100644
--- a/man/mlr_acqfunctions_aei.Rd
+++ b/man/mlr_acqfunctions_aei.Rd
@@ -127,6 +127,7 @@ This corresponds to the \code{nugget} estimate when using a \link[mlr3learners:m
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_cb.Rd b/man/mlr_acqfunctions_cb.Rd
index 9cf4ab51..72493345 100644
--- a/man/mlr_acqfunctions_cb.Rd
+++ b/man/mlr_acqfunctions_cb.Rd
@@ -102,6 +102,7 @@ Other Acquisition Function:
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
mlr3mbo::AcqFunction$update()
diff --git a/man/mlr_acqfunctions_ehvi.Rd b/man/mlr_acqfunctions_ehvi.Rd
index 32650a7d..895e84e8 100644
--- a/man/mlr_acqfunctions_ehvi.Rd
+++ b/man/mlr_acqfunctions_ehvi.Rd
@@ -106,6 +106,7 @@ Signs are corrected with respect to assuming minimization of objectives.}
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_ehvigh.Rd b/man/mlr_acqfunctions_ehvigh.Rd
index f122a903..cc7e5a52 100644
--- a/man/mlr_acqfunctions_ehvigh.Rd
+++ b/man/mlr_acqfunctions_ehvigh.Rd
@@ -125,6 +125,7 @@ Nodes are scaled by a factor of \code{sqrt(2)} and weights are normalized under
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_ei.Rd b/man/mlr_acqfunctions_ei.Rd
index dd7bc9d9..d07624be 100644
--- a/man/mlr_acqfunctions_ei.Rd
+++ b/man/mlr_acqfunctions_ei.Rd
@@ -115,6 +115,7 @@ In the case of maximization, this already includes the necessary change of sign.
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_eips.Rd b/man/mlr_acqfunctions_eips.Rd
index ba79851b..fd7df6d9 100644
--- a/man/mlr_acqfunctions_eips.Rd
+++ b/man/mlr_acqfunctions_eips.Rd
@@ -121,6 +121,7 @@ In the case of maximization, this already includes the necessary change of sign.
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_mean.Rd b/man/mlr_acqfunctions_mean.Rd
index e201f9b2..cd6a7400 100644
--- a/man/mlr_acqfunctions_mean.Rd
+++ b/man/mlr_acqfunctions_mean.Rd
@@ -87,6 +87,7 @@ Other Acquisition Function:
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
mlr3mbo::AcqFunction$update()
diff --git a/man/mlr_acqfunctions_multi.Rd b/man/mlr_acqfunctions_multi.Rd
index d3e78c09..4cd43abd 100644
--- a/man/mlr_acqfunctions_multi.Rd
+++ b/man/mlr_acqfunctions_multi.Rd
@@ -14,7 +14,7 @@ the surrogate is the same for all acquisition functions.
If acquisition functions have not been initialized with a surrogate, the surrogate passed during construction or lazy initialization
will be used for all acquisition functions.
-For optimization, \link{AcqOptimizer} can be used as for any other \link{AcqFunction}, however, the \link[bbotk:Optimizer]{bbotk::Optimizer} wrapped within the \link{AcqOptimizer}
+For optimization, \link{AcqOptimizer} can be used as for any other \link{AcqFunction}, however, the \link[bbotk:OptimizerBatch]{bbotk::OptimizerBatch} wrapped within the \link{AcqOptimizer}
must support multi-objective optimization as indicated via the \code{multi-crit} property.
}
\section{Dictionary}{
@@ -115,6 +115,7 @@ Points to the ids of the individual acquisition functions.}
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_pi.Rd b/man/mlr_acqfunctions_pi.Rd
index a7b8f179..a388c9f8 100644
--- a/man/mlr_acqfunctions_pi.Rd
+++ b/man/mlr_acqfunctions_pi.Rd
@@ -104,6 +104,7 @@ In the case of maximization, this already includes the necessary change of sign.
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
}}
diff --git a/man/mlr_acqfunctions_sd.Rd b/man/mlr_acqfunctions_sd.Rd
index d80fe16b..c40e4515 100644
--- a/man/mlr_acqfunctions_sd.Rd
+++ b/man/mlr_acqfunctions_sd.Rd
@@ -87,6 +87,7 @@ Other Acquisition Function:
bbotk::Objective$print()
mlr3mbo::AcqFunction$eval_dt()
mlr3mbo::AcqFunction$eval_many()
+
mlr3mbo::AcqFunction$reset()
mlr3mbo::AcqFunction$update()
diff --git a/man/mlr_acqfunctions_smsego.Rd b/man/mlr_acqfunctions_smsego.Rd
index 53de144f..4e4118b5 100644
--- a/man/mlr_acqfunctions_smsego.Rd
+++ b/man/mlr_acqfunctions_smsego.Rd
@@ -23,6 +23,15 @@ described in Horn et al. (2015).
}
}
+\section{Note}{
+
+\itemize{
+\item This acquisition function always also returns its current epsilon values in a list column (\code{acq_epsilon}).
+This value will be logged into the \link[bbotk:ArchiveBatch]{bbotk::ArchiveBatch} of the \link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch} of the \link{AcqOptimizer} and
+therefore also in the \link[bbotk:Archive]{bbotk::Archive} of the actual \link[bbotk:OptimInstance]{bbotk::OptimInstance} that is to be optimized.
+}
+}
+
\examples{
if (requireNamespace("mlr3learners") &
requireNamespace("DiceKriging") &
@@ -104,7 +113,7 @@ Epsilon used for the additive epsilon dominance.}
\item{\code{progress}}{(\code{numeric(1)})\cr
Optimization progress (typically, the number of function evaluations left).
-Note that this requires the \link[bbotk:OptimInstance]{bbotk::OptimInstance} to be terminated via a \link[bbotk:mlr_terminators_evals]{bbotk::TerminatorEvals}.}
+Note that this requires the \link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch} to be terminated via a \link[bbotk:mlr_terminators_evals]{bbotk::TerminatorEvals}.}
}
\if{html}{\out{
}}
}
@@ -113,6 +122,7 @@ Note that this requires the \link[bbotk:OptimInstance]{bbotk::OptimInstance} to
\itemize{
\item \href{#method-AcqFunctionSmsEgo-new}{\code{AcqFunctionSmsEgo$new()}}
\item \href{#method-AcqFunctionSmsEgo-update}{\code{AcqFunctionSmsEgo$update()}}
+\item \href{#method-AcqFunctionSmsEgo-reset}{\code{AcqFunctionSmsEgo$reset()}}
\item \href{#method-AcqFunctionSmsEgo-clone}{\code{AcqFunctionSmsEgo$clone()}}
}
}
@@ -158,6 +168,17 @@ Update the acquisition function and set \code{ys_front}, \code{ref_point} and \c
\if{html}{\out{
}}\preformatted{AcqFunctionSmsEgo$update()}\if{html}{\out{
}}
}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-AcqFunctionSmsEgo-reset}{}}}
+\subsection{Method \code{reset()}}{
+Reset the acquisition function.
+Resets \code{epsilon}.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{AcqFunctionSmsEgo$reset()}\if{html}{\out{
}}
+}
+
}
\if{html}{\out{
}}
\if{html}{\out{
}}
diff --git a/man/mlr_acqfunctions_stochastic_cb.Rd b/man/mlr_acqfunctions_stochastic_cb.Rd
index 6ab76688..19255f06 100644
--- a/man/mlr_acqfunctions_stochastic_cb.Rd
+++ b/man/mlr_acqfunctions_stochastic_cb.Rd
@@ -6,8 +6,11 @@
\title{Acquisition Function Stochastic Confidence Bound}
\description{
Lower / Upper Confidence Bound with lambda sampling and decay.
-The initial lambda value is drawn from an uniform distribution between \code{min_lambda} and \code{max_lambda} or from an exponential distribution with rate \code{1 / lambda}.
-The lambda value is updated after each evaluation by the formula \code{lambda * exp(-rate * (t \%\% period))}.
+The initial \eqn{\lambda} is drawn from an uniform distribution between \code{min_lambda} and \code{max_lambda} or from an exponential distribution with rate \code{1 / lambda}.
+\eqn{\lambda} is updated after each update by the formula \code{lambda * exp(-rate * (t \%\% period))}, where \code{t} is the number of times the acquisition function has been updated.
+
+While this acquisition function usually would be used within an asynchronous optimizer, e.g., \link{OptimizerAsyncMbo},
+it can in principle also be used in synchronous optimizers, e.g., \link{OptimizerMbo}.
}
\section{Dictionary}{
@@ -23,16 +26,16 @@ acqf("stochastic_cb")
\itemize{
\item \code{"lambda"} (\code{numeric(1)})\cr
-Lambda value for sampling from the exponential distribution.
+\eqn{\lambda} value for sampling from the exponential distribution.
Defaults to \code{1.96}.
\item \code{"min_lambda"} (\code{numeric(1)})\cr
-Minimum value of lambda for sampling from the uniform distribution.
+Minimum value of \eqn{\lambda}for sampling from the uniform distribution.
Defaults to \code{0.01}.
\item \code{"max_lambda"} (\code{numeric(1)})\cr
-Maximum value of lambda for sampling from the uniform distribution.
+Maximum value of \eqn{\lambda} for sampling from the uniform distribution.
Defaults to \code{10}.
\item \code{"distribution"} (\code{character(1)})\cr
-Distribution to sample lambda from.
+Distribution to sample \eqn{\lambda} from.
One of \code{c("uniform", "exponential")}.
Defaults to \code{uniform}.
\item \code{"rate"} (\code{numeric(1)})\cr
@@ -40,19 +43,60 @@ Rate of the exponential decay.
Defaults to \code{0} i.e. no decay.
\item \code{"period"} (\code{integer(1)})\cr
Period of the exponential decay.
-Defaults to \code{NULL} i.e. the decay has no period.
+Defaults to \code{NULL}, i.e., the decay has no period.
+}
+}
+
+\section{Note}{
+
+\itemize{
+\item This acquisition function always also returns its current (\code{acq_lambda}) and original (\code{acq_lambda_0}) \eqn{\lambda}.
+These values will be logged into the \link[bbotk:ArchiveBatch]{bbotk::ArchiveBatch} of the \link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch} of the \link{AcqOptimizer} and
+therefore also in the \link[bbotk:Archive]{bbotk::Archive} of the actual \link[bbotk:OptimInstance]{bbotk::OptimInstance} that is to be optimized.
}
}
+\examples{
+if (requireNamespace("mlr3learners") &
+ requireNamespace("DiceKriging") &
+ requireNamespace("rgenoud")) {
+ library(bbotk)
+ library(paradox)
+ library(mlr3learners)
+ library(data.table)
+
+ fun = function(xs) {
+ list(y = xs$x ^ 2)
+ }
+ domain = ps(x = p_dbl(lower = -10, upper = 10))
+ codomain = ps(y = p_dbl(tags = "minimize"))
+ objective = ObjectiveRFun$new(fun = fun, domain = domain, codomain = codomain)
+
+ instance = OptimInstanceBatchSingleCrit$new(
+ objective = objective,
+ terminator = trm("evals", n_evals = 5))
+
+ instance$eval_batch(data.table(x = c(-6, -5, 3, 9)))
+
+ learner = default_gp()
+
+ surrogate = srlrn(learner, archive = instance$archive)
+
+ acq_function = acqf("stochastic_cb", surrogate = surrogate, lambda = 3)
+
+ acq_function$surrogate$update()
+ acq_function$update()
+ acq_function$eval_dt(data.table(x = c(-1, 0, 1)))
+}
+}
\references{
\itemize{
\item Snoek, Jasper, Larochelle, Hugo, Adams, P R (2012).
\dQuote{Practical Bayesian Optimization of Machine Learning Algorithms.}
In Pereira F, Burges CJC, Bottou L, Weinberger KQ (eds.), \emph{Advances in Neural Information Processing Systems}, volume 25, 2951--2959.
-\item Egele R, Guyon I, Vishwanath V, Balaprakash P (2023).
+\item Egelé, Romain, Guyon, Isabelle, Vishwanath, Venkatram, Balaprakash, Prasanna (2023).
\dQuote{Asynchronous Decentralized Bayesian Optimization for Large Scale Hyperparameter Optimization.}
-\emph{arXiv}.
-\url{https://arxiv.org/abs/2207.00479}.
+In \emph{2023 IEEE 19th International Conference on e-Science (e-Science)}, 1--10.
}
}
\seealso{
@@ -81,6 +125,7 @@ Other Acquisition Function:
\itemize{
\item \href{#method-AcqFunctionStochasticCB-new}{\code{AcqFunctionStochasticCB$new()}}
\item \href{#method-AcqFunctionStochasticCB-update}{\code{AcqFunctionStochasticCB$update()}}
+\item \href{#method-AcqFunctionStochasticCB-reset}{\code{AcqFunctionStochasticCB$reset()}}
\item \href{#method-AcqFunctionStochasticCB-clone}{\code{AcqFunctionStochasticCB$clone()}}
}
}
@@ -128,7 +173,7 @@ Creates a new instance of this \link[R6:R6Class]{R6} class.
\item{\code{rate}}{(\code{numeric(1)}).}
-\item{\code{period}}{(\code{integer(1)}).}
+\item{\code{period}}{(\code{NULL} | \code{integer(1)}).}
}
\if{html}{\out{
}}
}
@@ -143,6 +188,17 @@ Samples and decays lambda.
\if{html}{\out{}}
+\describe{
+\item{\code{inst}}{(\link[bbotk:OptimInstanceAsyncSingleCrit]{bbotk::OptimInstanceAsyncSingleCrit}).}
+}
+\if{html}{\out{
}}
+}
+\subsection{Returns}{
+\code{\link[data.table:data.table]{data.table::data.table()}}
+}
}
\if{html}{\out{}}\preformatted{OptimizerAsyncMbo$new(
id = "async_mbo",
surrogate = NULL,
acq_function = NULL,
acq_optimizer = NULL,
+ result_assigner = NULL,
param_set = NULL,
- param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"),
- properties = c("dependencies", "single-crit", "async"),
- packages = c("mlr3mbo", "rush"),
label = "Asynchronous Model Based Optimization",
man = "mlr3mbo::OptimizerAsyncMbo"
)}\if{html}{\out{
}}
@@ -116,19 +193,11 @@ The acquisition function.}
\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr
The acquisition function optimizer.}
-\item{\code{param_set}}{\link[paradox:ParamSet]{paradox::ParamSet}\cr
-Set of control parameters.}
-
-\item{\code{param_classes}}{(\code{character()})\cr
-Supported parameter classes that the optimizer can optimize, as given in the \code{\link[paradox:ParamSet]{paradox::ParamSet}} \verb{$class} field.}
+\item{\code{result_assigner}}{(\link{ResultAssigner} | \code{NULL})\cr
+The result assigner.}
-\item{\code{properties}}{(\code{character()})\cr
-Set of properties of the optimizer.
-Must be a subset of \code{\link[=bbotk_reflections]{bbotk_reflections$optimizer_properties}}.}
-
-\item{\code{packages}}{(\code{character()})\cr
-Set of required packages.
-A warning is signaled by the constructor if at least one of the packages is not installed, but loaded (not attached) later on-demand via \code{\link[=requireNamespace]{requireNamespace()}}.}
+\item{\code{param_set}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr
+Set of control parameters.}
\item{\code{label}}{(\code{character(1)})\cr
Label for this object.
@@ -140,13 +209,39 @@ The referenced help package can be opened via method \verb{$help()}.}
}
\if{html}{\out{}}
}
+}
+\if{html}{\out{}}
\describe{
-\item{\code{inst}}{(\link{OptimInstanceAsyncSingleCrit} | \link{OptimInstanceAsyncMultiCrit}).}
+\item{\code{inst}}{(\link[bbotk:OptimInstanceAsyncSingleCrit]{bbotk::OptimInstanceAsyncSingleCrit}).}
}
\if{html}{\out{
}}
}
diff --git a/man/mlr_optimizers_mbo.Rd b/man/mlr_optimizers_mbo.Rd
index 680bc3f7..3f5cf30c 100644
--- a/man/mlr_optimizers_mbo.Rd
+++ b/man/mlr_optimizers_mbo.Rd
@@ -15,7 +15,7 @@ By optimizing a comparably cheap to evaluate acquisition function defined on the
Detailed descriptions of different MBO flavors are provided in the documentation of the respective \link{loop_function}.
-Termination is handled via a \link[bbotk:Terminator]{bbotk::Terminator} part of the \link[bbotk:OptimInstance]{bbotk::OptimInstance} to be optimized.
+Termination is handled via a \link[bbotk:Terminator]{bbotk::Terminator} part of the \link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch} to be optimized.
Note that in general the \link{Surrogate} is updated one final time on all available data after the optimization process has terminated.
However, in certain scenarios this is not always possible or meaningful, e.g., when using \code{\link[=bayesopt_parego]{bayesopt_parego()}} for multi-objective optimization
@@ -23,14 +23,17 @@ which uses a surrogate that relies on a scalarization of the objectives.
It is therefore recommended to manually inspect the \link{Surrogate} after optimization if it is to be used, e.g., for visualization purposes to make
sure that it has been properly updated on all available data.
If this final update of the \link{Surrogate} could not be performed successfully, a warning will be logged.
+
+By specifying a \link{ResultAssigner}, one can alter how the final result is determined after optimization, e.g.,
+simply based on the evaluations logged in the archive \link{ResultAssignerArchive} or based on the \link{Surrogate} via \link{ResultAssignerSurrogate}.
}
\section{Archive}{
-The \link[bbotk:Archive]{bbotk::Archive} holds the following additional columns that are specific to MBO algorithms:
+The \link[bbotk:ArchiveBatch]{bbotk::ArchiveBatch} holds the following additional columns that are specific to MBO algorithms:
\itemize{
-\item \verb{[acq_function$id]} (\code{numeric(1)})\cr
+\item \code{acq_function$id} (\code{numeric(1)})\cr
The value of the acquisition function.
-\item \code{.already_evaluated} (\verb{logical(1))}\cr
+\item \code{".already_evaluated"} (\verb{logical(1))}\cr
Whether this point was already evaluated. Depends on the \code{skip_already_evaluated} parameter of the \link{AcqOptimizer}.
}
}
@@ -130,7 +133,7 @@ This corresponds to the values given by a \link[paradox:ParamSet]{paradox::Param
Set of properties of the optimizer.
Must be a subset of \code{\link[bbotk:bbotk_reflections]{bbotk_reflections$optimizer_properties}}.
MBO in principle is very flexible and by default we assume that the optimizer has all properties.
-When fully initialized, properties are determined based on the \code{loop_function} and \code{surrogate}.}
+When fully initialized, properties are determined based on the loop, e.g., the \code{loop_function}, and \code{surrogate}.}
\item{\code{packages}}{(\code{character()})\cr
Set of required packages.
@@ -145,6 +148,7 @@ Required packages are determined based on the \code{acq_function}, \code{surroga
\item \href{#method-OptimizerMbo-new}{\code{OptimizerMbo$new()}}
\item \href{#method-OptimizerMbo-print}{\code{OptimizerMbo$print()}}
\item \href{#method-OptimizerMbo-reset}{\code{OptimizerMbo$reset()}}
+\item \href{#method-OptimizerMbo-optimize}{\code{OptimizerMbo$optimize()}}
\item \href{#method-OptimizerMbo-clone}{\code{OptimizerMbo$clone()}}
}
}
@@ -153,7 +157,6 @@ Required packages are determined based on the \code{acq_function}, \code{surroga
}}\preformatted{OptimizerMbo$new(
loop_function = NULL,
@@ -232,6 +235,27 @@ Sets the following fields to \code{NULL}:
\if{html}{\out{
}}\preformatted{OptimizerMbo$reset()}\if{html}{\out{
}}
}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-OptimizerMbo-optimize}{}}}
+\subsection{Method \code{optimize()}}{
+Performs the optimization and writes optimization result into \link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch}.
+The optimization result is returned but the complete optimization path is stored in \link[bbotk:ArchiveBatch]{bbotk::ArchiveBatch} of \link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch}.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{OptimizerMbo$optimize(inst)}\if{html}{\out{
}}
+}
+
+\subsection{Arguments}{
+\if{html}{\out{
}}
+\describe{
+\item{\code{inst}}{(\link[bbotk:OptimInstanceBatch]{bbotk::OptimInstanceBatch}).}
+}
+\if{html}{\out{
}}
+}
+\subsection{Returns}{
+\link[data.table:data.table]{data.table::data.table}.
+}
}
\if{html}{\out{
}}
\if{html}{\out{
}}
diff --git a/man/mlr_result_assigners_archive.Rd b/man/mlr_result_assigners_archive.Rd
index aa453a08..9bda8c60 100644
--- a/man/mlr_result_assigners_archive.Rd
+++ b/man/mlr_result_assigners_archive.Rd
@@ -68,7 +68,7 @@ Assigns the result, i.e., the final point(s) to the instance.
\subsection{Arguments}{
\if{html}{\out{
}}
\describe{
-\item{\code{instance}}{(\link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} | \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit})\cr
+\item{\code{instance}}{(\link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} | \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit} |\link[bbotk:OptimInstanceAsyncSingleCrit]{bbotk::OptimInstanceAsyncSingleCrit} | \link[bbotk:OptimInstanceAsyncMultiCrit]{bbotk::OptimInstanceAsyncMultiCrit})\cr
The \link[bbotk:OptimInstance]{bbotk::OptimInstance} the final result should be assigned to.}
}
\if{html}{\out{
}}
diff --git a/man/mlr_result_assigners_surrogate.Rd b/man/mlr_result_assigners_surrogate.Rd
index 68d57759..0d38d082 100644
--- a/man/mlr_result_assigners_surrogate.Rd
+++ b/man/mlr_result_assigners_surrogate.Rd
@@ -8,7 +8,7 @@
Result assigner that chooses the final point(s) based on a surrogate mean prediction of all evaluated points in the \link[bbotk:Archive]{bbotk::Archive}.
This is especially useful in the case of noisy objective functions.
-In the case of operating on an \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit} the \link{SurrogateLearnerCollection} must use as many learners as there are objective functions.
+In the case of operating on an \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit} or \link[bbotk:OptimInstanceAsyncMultiCrit]{bbotk::OptimInstanceAsyncMultiCrit} the \link{SurrogateLearnerCollection} must use as many learners as there are objective functions.
}
\examples{
result_assigner = ras("surrogate")
@@ -82,7 +82,7 @@ If \verb{$surrogate} is \code{NULL}, \code{default_surrogate(instance)} is used
\subsection{Arguments}{
\if{html}{\out{
}}
\describe{
-\item{\code{instance}}{(\link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} | \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit})\cr
+\item{\code{instance}}{(\link[bbotk:OptimInstanceBatchSingleCrit]{bbotk::OptimInstanceBatchSingleCrit} | \link[bbotk:OptimInstanceBatchMultiCrit]{bbotk::OptimInstanceBatchMultiCrit} |\link[bbotk:OptimInstanceAsyncSingleCrit]{bbotk::OptimInstanceAsyncSingleCrit} | \link[bbotk:OptimInstanceAsyncMultiCrit]{bbotk::OptimInstanceAsyncMultiCrit})\cr
The \link[bbotk:OptimInstance]{bbotk::OptimInstance} the final result should be assigned to.}
}
\if{html}{\out{
}}
diff --git a/man/mlr_tuners_adbo.Rd b/man/mlr_tuners_adbo.Rd
index 90fb6f0d..19369a35 100644
--- a/man/mlr_tuners_adbo.Rd
+++ b/man/mlr_tuners_adbo.Rd
@@ -3,60 +3,113 @@
\name{mlr_tuners_adbo}
\alias{mlr_tuners_adbo}
\alias{TunerADBO}
-\title{Asynchronous Decentralized Bayesian Optimization}
+\title{TunerAsync using Asynchronous Decentralized Bayesian Optimization}
\description{
\code{TunerADBO} class that implements Asynchronous Decentralized Bayesian Optimization (ADBO).
ADBO is a variant of Asynchronous Model Based Optimization (AMBO) that uses \link{AcqFunctionStochasticCB} with exponential lambda decay.
-}
-\note{
-The lambda parameter of the upper confidence bound acquisition function controls the trade-off between exploration and exploitation.
-A large lambda value leads to more exploration, while a small lambda value leads to more exploitation.
-The initial lambda value is drawn from an exponential distribution with rate \code{1 / lambda}.
-ADBO can use periodic exponential decay to reduce lambda periodically with the formula \code{lambda * exp(-rate * (t \%\% period))}.
-The surrogate model is always a random forest and die acquisition optimizer is random search with a budget of 10,000 evaluations.
+This is a minimal interface internally passing on to \link{OptimizerAsyncMbo}.
+For additional information and documentation see \link{OptimizerAsyncMbo}.
}
\section{Parameters}{
\describe{
-\item{\code{lambda}}{\code{numeric(1)}\cr
-Lambda value for sampling from the exponential distribution.}
-\item{\code{rate}}{\code{numeric(1)}\cr
-Rate of the exponential decay.}
-\item{\code{period}}{\code{integer(1)}\cr
-Period of the exponential decay.}
-\item{\code{initial_design_size}}{\code{integer(1)}\cr
-Size of the initial design.
-Defaults to \code{100}.}
-
\item{\code{initial_design}}{\code{data.table::data.table()}\cr
Initial design of the optimization.
-If \code{NULL}, a design of size \code{design_size} is generated with \code{design_function}.}
+If \code{NULL}, a design of size \code{design_size} is generated with the specified \code{design_function}.
+Default is \code{NULL}.}
\item{\code{design_size}}{\code{integer(1)}\cr
-Size of the initial design.}
+Size of the initial design if it is to be generated.
+Default is \code{100}.}
\item{\code{design_function}}{\code{character(1)}\cr
-Function to generate the initial design.
-One of \code{c("random", "sobol", "lhs")}.}
+Sampling function to generate the initial design.
+Can be \code{random} \link[paradox:generate_design_random]{paradox::generate_design_random}, \code{lhs} \link[paradox:generate_design_lhs]{paradox::generate_design_lhs}, or \code{sobol} \link[paradox:generate_design_sobol]{paradox::generate_design_sobol}.
+Default is \code{sobol}.}
\item{\code{n_workers}}{\code{integer(1)}\cr
Number of parallel workers.
-If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush_plan()}} are used.}
+If \code{NULL}, all rush workers specified via \code{\link[rush:rush_plan]{rush::rush_plan()}} are used.
+Default is \code{NULL}.}
}
}
+\examples{
+\donttest{
+if (requireNamespace("rush") &
+ requireNamespace("mlr3learners") &
+ requireNamespace("DiceKriging") &
+ requireNamespace("rgenoud")) {
+
+ library(mlr3)
+ library(mlr3tuning)
+
+ # single-objective
+ task = tsk("wine")
+ learner = lrn("classif.rpart", cp = to_tune(lower = 1e-4, upper = 1, logscale = TRUE))
+ resampling = rsmp("cv", folds = 3)
+ measure = msr("classif.acc")
+
+ instance = TuningInstanceAsyncSingleCrit$new(
+ task = task,
+ learner = learner,
+ resampling = resampling,
+ measure = measure,
+ terminator = trm("evals", n_evals = 10))
+
+ rush::rush_plan(n_workers=2)
+
+ tnr("adbo", design_size = 4, n_workers = 2)$optimize(instance)
+}
+}
+}
\references{
\itemize{
-\item Egele R, Guyon I, Vishwanath V, Balaprakash P (2023).
+\item Egelé, Romain, Guyon, Isabelle, Vishwanath, Venkatram, Balaprakash, Prasanna (2023).
\dQuote{Asynchronous Decentralized Bayesian Optimization for Large Scale Hyperparameter Optimization.}
-\emph{arXiv}.
-\url{https://arxiv.org/abs/2207.00479}.
+In \emph{2023 IEEE 19th International Conference on e-Science (e-Science)}, 1--10.
}
}
\section{Super classes}{
\code{\link[mlr3tuning:Tuner]{mlr3tuning::Tuner}} -> \code{\link[mlr3tuning:TunerAsync]{mlr3tuning::TunerAsync}} -> \code{\link[mlr3tuning:TunerAsyncFromOptimizerAsync]{mlr3tuning::TunerAsyncFromOptimizerAsync}} -> \code{TunerADBO}
}
+\section{Active bindings}{
+\if{html}{\out{
}}
+\describe{
+\item{\code{surrogate}}{(\link{Surrogate} | \code{NULL})\cr
+The surrogate.}
+
+\item{\code{acq_function}}{(\link{AcqFunction} | \code{NULL})\cr
+The acquisition function.}
+
+\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr
+The acquisition function optimizer.}
+
+\item{\code{result_assigner}}{(\link{ResultAssigner} | \code{NULL})\cr
+The result assigner.}
+
+\item{\code{param_classes}}{(\code{character()})\cr
+Supported parameter classes that the optimizer can optimize.
+Determined based on the \code{surrogate} and the \code{acq_optimizer}.
+This corresponds to the values given by a \link[paradox:ParamSet]{paradox::ParamSet}'s
+\verb{$class} field.}
+
+\item{\code{properties}}{(\code{character()})\cr
+Set of properties of the optimizer.
+Must be a subset of \code{\link[bbotk:bbotk_reflections]{bbotk_reflections$optimizer_properties}}.
+MBO in principle is very flexible and by default we assume that the optimizer has all properties.
+When fully initialized, properties are determined based on the loop, e.g., the \code{loop_function}, and \code{surrogate}.}
+
+\item{\code{packages}}{(\code{character()})\cr
+Set of required packages.
+A warning is signaled prior to optimization if at least one of the packages is not installed, but loaded (not attached) later on-demand via \code{\link[=requireNamespace]{requireNamespace()}}.
+Required packages are determined based on the \code{acq_function}, \code{surrogate} and the \code{acq_optimizer}.}
+}
+\if{html}{\out{
}}
+}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-TunerADBO-new}{\code{TunerADBO$new()}}
+\item \href{#method-TunerADBO-print}{\code{TunerADBO$print()}}
+\item \href{#method-TunerADBO-reset}{\code{TunerADBO$reset()}}
\item \href{#method-TunerADBO-clone}{\code{TunerADBO$clone()}}
}
}
@@ -65,7 +118,6 @@ If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush
@@ -79,6 +131,32 @@ Creates a new instance of this \link[R6:R6Class]{R6} class.
\if{html}{\out{
}}\preformatted{TunerADBO$new()}\if{html}{\out{
}}
}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-TunerADBO-print}{}}}
+\subsection{Method \code{print()}}{
+Print method.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{TunerADBO$print()}\if{html}{\out{
}}
+}
+
+\subsection{Returns}{
+(\code{character()}).
+}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-TunerADBO-reset}{}}}
+\subsection{Method \code{reset()}}{
+Reset the tuner.
+Sets the following fields to \code{NULL}:
+\code{surrogate}, \code{acq_function}, \code{acq_optimizer}, \code{result_assigner}
+Resets parameter values \code{design_size} and \code{design_function} to their defaults.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{TunerADBO$reset()}\if{html}{\out{
}}
+}
+
}
\if{html}{\out{
}}
\if{html}{\out{
}}
diff --git a/man/mlr_tuners_async_mbo.Rd b/man/mlr_tuners_async_mbo.Rd
index 3dbe203c..b7f15d1a 100644
--- a/man/mlr_tuners_async_mbo.Rd
+++ b/man/mlr_tuners_async_mbo.Rd
@@ -3,34 +3,105 @@
\name{mlr_tuners_async_mbo}
\alias{mlr_tuners_async_mbo}
\alias{TunerAsyncMbo}
-\title{Asynchronous Model Based Tuning}
+\title{TunerAsync using Asynchronous Model Based Optimization}
\description{
-\code{TunerAsyncMbo} class that implements asynchronous Model Based Tuning (MBO).
+\code{TunerAsyncMbo} class that implements Asynchronous Model Based Optimization (AMBO).
+This is a minimal interface internally passing on to \link{OptimizerAsyncMbo}.
+For additional information and documentation see \link{OptimizerAsyncMbo}.
}
\section{Parameters}{
\describe{
\item{\code{initial_design}}{\code{data.table::data.table()}\cr
Initial design of the optimization.
-If \code{NULL}, a design of size \code{design_size} is generated with \code{design_function}.}
+If \code{NULL}, a design of size \code{design_size} is generated with the specified \code{design_function}.
+Default is \code{NULL}.}
\item{\code{design_size}}{\code{integer(1)}\cr
-Size of the initial design.}
+Size of the initial design if it is to be generated.
+Default is \code{100}.}
\item{\code{design_function}}{\code{character(1)}\cr
-Function to generate the initial design.
-One of \code{c("random", "sobol", "lhs")}.}
+Sampling function to generate the initial design.
+Can be \code{random} \link[paradox:generate_design_random]{paradox::generate_design_random}, \code{lhs} \link[paradox:generate_design_lhs]{paradox::generate_design_lhs}, or \code{sobol} \link[paradox:generate_design_sobol]{paradox::generate_design_sobol}.
+Default is \code{sobol}.}
\item{\code{n_workers}}{\code{integer(1)}\cr
Number of parallel workers.
-If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush_plan()}} are used.}
+If \code{NULL}, all rush workers specified via \code{\link[rush:rush_plan]{rush::rush_plan()}} are used.
+Default is \code{NULL}.}
}
}
+\examples{
+\donttest{
+if (requireNamespace("rush") &
+ requireNamespace("mlr3learners") &
+ requireNamespace("DiceKriging") &
+ requireNamespace("rgenoud")) {
+
+ library(mlr3)
+ library(mlr3tuning)
+
+ # single-objective
+ task = tsk("wine")
+ learner = lrn("classif.rpart", cp = to_tune(lower = 1e-4, upper = 1, logscale = TRUE))
+ resampling = rsmp("cv", folds = 3)
+ measure = msr("classif.acc")
+
+ instance = TuningInstanceAsyncSingleCrit$new(
+ task = task,
+ learner = learner,
+ resampling = resampling,
+ measure = measure,
+ terminator = trm("evals", n_evals = 10))
+
+ rush::rush_plan(n_workers=2)
+
+ tnr("async_mbo", design_size = 4, n_workers = 2)$optimize(instance)
+}
+}
+}
\section{Super classes}{
\code{\link[mlr3tuning:Tuner]{mlr3tuning::Tuner}} -> \code{\link[mlr3tuning:TunerAsync]{mlr3tuning::TunerAsync}} -> \code{\link[mlr3tuning:TunerAsyncFromOptimizerAsync]{mlr3tuning::TunerAsyncFromOptimizerAsync}} -> \code{TunerAsyncMbo}
}
+\section{Active bindings}{
+\if{html}{\out{
}}
+\describe{
+\item{\code{surrogate}}{(\link{Surrogate} | \code{NULL})\cr
+The surrogate.}
+
+\item{\code{acq_function}}{(\link{AcqFunction} | \code{NULL})\cr
+The acquisition function.}
+
+\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr
+The acquisition function optimizer.}
+
+\item{\code{result_assigner}}{(\link{ResultAssigner} | \code{NULL})\cr
+The result assigner.}
+
+\item{\code{param_classes}}{(\code{character()})\cr
+Supported parameter classes that the optimizer can optimize.
+Determined based on the \code{surrogate} and the \code{acq_optimizer}.
+This corresponds to the values given by a \link[paradox:ParamSet]{paradox::ParamSet}'s
+\verb{$class} field.}
+
+\item{\code{properties}}{(\code{character()})\cr
+Set of properties of the optimizer.
+Must be a subset of \code{\link[bbotk:bbotk_reflections]{bbotk_reflections$optimizer_properties}}.
+MBO in principle is very flexible and by default we assume that the optimizer has all properties.
+When fully initialized, properties are determined based on the loop, e.g., the \code{loop_function}, and \code{surrogate}.}
+
+\item{\code{packages}}{(\code{character()})\cr
+Set of required packages.
+A warning is signaled prior to optimization if at least one of the packages is not installed, but loaded (not attached) later on-demand via \code{\link[=requireNamespace]{requireNamespace()}}.
+Required packages are determined based on the \code{acq_function}, \code{surrogate} and the \code{acq_optimizer}.}
+}
+\if{html}{\out{
}}
+}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-TunerAsyncMbo-new}{\code{TunerAsyncMbo$new()}}
+\item \href{#method-TunerAsyncMbo-print}{\code{TunerAsyncMbo$print()}}
+\item \href{#method-TunerAsyncMbo-reset}{\code{TunerAsyncMbo$reset()}}
\item \href{#method-TunerAsyncMbo-clone}{\code{TunerAsyncMbo$clone()}}
}
}
@@ -39,7 +110,6 @@ If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush
@@ -49,6 +119,10 @@ If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush
\if{latex}{\out{\hypertarget{method-TunerAsyncMbo-new}{}}}
\subsection{Method \code{new()}}{
Creates a new instance of this \link[R6:R6Class]{R6} class.
+For more information on default values for \code{surrogate}, \code{acq_function}, \code{acq_optimizer}, and \code{result_assigner}, see \code{?mbo_defaults}.
+
+Note that all the parameters below are simply passed to the \link{OptimizerAsyncMbo} and
+the respective fields are simply (settable) active bindings to the fields of the \link{OptimizerAsyncMbo}.
\subsection{Usage}{
\if{html}{\out{
}}\preformatted{TunerAsyncMbo$new(
surrogate = NULL,
@@ -70,11 +144,37 @@ The acquisition function.}
\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr
The acquisition function optimizer.}
-\item{\code{param_set}}{\link[paradox:ParamSet]{paradox::ParamSet}\cr
+\item{\code{param_set}}{(\link[paradox:ParamSet]{paradox::ParamSet})\cr
Set of control parameters.}
}
\if{html}{\out{
}}
}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-TunerAsyncMbo-print}{}}}
+\subsection{Method \code{print()}}{
+Print method.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{TunerAsyncMbo$print()}\if{html}{\out{
}}
+}
+
+\subsection{Returns}{
+(\code{character()}).
+}
+}
+\if{html}{\out{
}}
+\if{html}{\out{
}}
+\if{latex}{\out{\hypertarget{method-TunerAsyncMbo-reset}{}}}
+\subsection{Method \code{reset()}}{
+Reset the tuner.
+Sets the following fields to \code{NULL}:
+\code{surrogate}, \code{acq_function}, \code{acq_optimizer}, \code{result_assigner}
+Resets parameter values \code{design_size} and \code{design_function} to their defaults.
+\subsection{Usage}{
+\if{html}{\out{
}}\preformatted{TunerAsyncMbo$reset()}\if{html}{\out{
}}
+}
+
}
\if{html}{\out{
}}
\if{html}{\out{
}}
diff --git a/man/mlr_tuners_mbo.Rd b/man/mlr_tuners_mbo.Rd
index 6e5261f8..a8249d6c 100644
--- a/man/mlr_tuners_mbo.Rd
+++ b/man/mlr_tuners_mbo.Rd
@@ -86,7 +86,7 @@ This corresponds to the values given by a \link[paradox:ParamSet]{paradox::Param
Set of properties of the optimizer.
Must be a subset of \code{\link[bbotk:bbotk_reflections]{bbotk_reflections$optimizer_properties}}.
MBO in principle is very flexible and by default we assume that the optimizer has all properties.
-When fully initialized, properties are determined based on the \code{loop_function} and \code{surrogate}.}
+When fully initialized, properties are determined based on the loop, e.g., the \code{loop_function}, and \code{surrogate}.}
\item{\code{packages}}{(\code{character()})\cr
Set of required packages.
@@ -118,7 +118,7 @@ Required packages are determined based on the \code{acq_function}, \code{surroga
\if{latex}{\out{\hypertarget{method-TunerMbo-new}{}}}
\subsection{Method \code{new()}}{
Creates a new instance of this \link[R6:R6Class]{R6} class.
-For more information on default values for \code{loop_function}, \code{surrogate}, \code{acq_function} and \code{acq_optimizer}, see \code{?mbo_defaults}.
+For more information on default values for \code{loop_function}, \code{surrogate}, \code{acq_function}, \code{acq_optimizer}, and \code{result_assigner}, see \code{?mbo_defaults}.
Note that all the parameters below are simply passed to the \link{OptimizerMbo} and
the respective fields are simply (settable) active bindings to the fields of the \link{OptimizerMbo}.
diff --git a/tests/testthat/test_AcqFunctionSmsEgo.R b/tests/testthat/test_AcqFunctionSmsEgo.R
index b6cdece9..e4f40728 100644
--- a/tests/testthat/test_AcqFunctionSmsEgo.R
+++ b/tests/testthat/test_AcqFunctionSmsEgo.R
@@ -25,7 +25,8 @@ test_that("AcqFunctionSmsEgo works", {
acqf$progress = 1
acqf$update()
res = acqf$eval_dt(xdt)
- expect_data_table(res, ncols = 1L, nrows = 5L, any.missing = FALSE)
- expect_named(res, acqf$id)
+ expect_data_table(res, ncols = 2L, nrows = 5L, any.missing = FALSE)
+ expect_named(res)
+ expect_setequal(colnames(res), c(acqf$id, "acq_epsilon"))
})
diff --git a/tests/testthat/test_AcqFunctionStochasticCB.R b/tests/testthat/test_AcqFunctionStochasticCB.R
index 267565bc..802e3aa6 100644
--- a/tests/testthat/test_AcqFunctionStochasticCB.R
+++ b/tests/testthat/test_AcqFunctionStochasticCB.R
@@ -3,11 +3,11 @@ test_that("AcqFunctionStochasticCB works in defaults", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 1L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_cb")
@@ -17,8 +17,8 @@ test_that("AcqFunctionStochasticCB works in defaults", {
design_size = 5,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_rush_reset(instance$rush)
@@ -29,22 +29,22 @@ test_that("AcqFunctionStochasticCB works with uniform sampling", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 2L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_cb", distribution = "uniform", min_lambda = 1, max_lambda = 3)
optimizer = opt("async_mbo",
design_function = "sobol",
- design_size = 5,
+ design_size = 5L,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_numeric(instance$archive$data$acq_lambda, lower = 1, upper = 3)
@@ -56,11 +56,11 @@ test_that("AcqFunctionStochasticCB works with exponential sampling", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 2L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_cb", distribution = "exponential", lambda = 1.96)
@@ -70,10 +70,10 @@ test_that("AcqFunctionStochasticCB works with exponential sampling", {
design_size = 5,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
- expect_numeric(unique(instance$archive$data$acq_lambda), len = 3)
+ expect_numeric(unique(instance$archive$data$acq_lambda), len = 3L)
expect_rush_reset(instance$rush)
})
@@ -84,22 +84,22 @@ test_that("AcqFunctionStochasticCB works with lambda decay", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 1)
+ rush::rush_plan(n_workers = 1L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_cb", rate = 0.5)
optimizer = opt("async_mbo",
design_function = "sobol",
- design_size = 5,
+ design_size = 5L,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_numeric(-instance$archive$data$acq_lambda, sorted = TRUE)
@@ -112,25 +112,25 @@ test_that("AcqFunctionStochasticCB works with periodic lambda decay", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 1)
+ rush::rush_plan(n_workers = 1L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_cb", rate = 0.5, period = 2)
optimizer = opt("async_mbo",
design_function = "sobol",
- design_size = 5,
+ design_size = 5L,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
- expect_numeric(unique(instance$archive$data$acq_lambda), len = 3)
+ expect_numeric(unique(instance$archive$data$acq_lambda), len = 3L)
expect_rush_reset(instance$rush)
})
diff --git a/tests/testthat/test_AcqFunctionStochasticEI.R b/tests/testthat/test_AcqFunctionStochasticEI.R
index 37e5e70b..8629d0b3 100644
--- a/tests/testthat/test_AcqFunctionStochasticEI.R
+++ b/tests/testthat/test_AcqFunctionStochasticEI.R
@@ -3,22 +3,22 @@ test_that("AcqFunctionStochasticEI works in defaults", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 1)
+ rush::rush_plan(n_workers = 1L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_ei")
optimizer = opt("async_mbo",
design_function = "sobol",
- design_size = 5,
+ design_size = 5L,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c(".already_evaluated", "acq_ei", "acq_epsilon_0", "acq_epsilon"))
expect_numeric(-instance$archive$data$acq_epsilon, sorted = TRUE)
@@ -30,22 +30,22 @@ test_that("AcqFunctionStochasticEI works with multiple workers", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 2L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 20),
+ terminator = trm("evals", n_evals = 20L),
)
acq_function = acqf("stochastic_ei")
optimizer = opt("async_mbo",
design_function = "sobol",
- design_size = 5,
+ design_size = 5L,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c(".already_evaluated", "acq_ei", "acq_epsilon_0", "acq_epsilon"))
expect_rush_reset(instance$rush)
@@ -57,24 +57,24 @@ test_that("AcqFunctionStochasticEI works with periodic epsilon decay", {
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 1)
+ rush::rush_plan(n_workers = 1L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
acq_function = acqf("stochastic_ei", rate = 0.5, period = 2)
optimizer = opt("async_mbo",
design_function = "sobol",
- design_size = 5,
+ design_size = 5L,
acq_function = acq_function)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c(".already_evaluated", "acq_ei", "acq_epsilon_0", "acq_epsilon"))
- expect_numeric(unique(instance$archive$data$acq_epsilon), len = 3)
+ expect_numeric(unique(instance$archive$data$acq_epsilon), len = 3L)
expect_rush_reset(instance$rush)
})
diff --git a/tests/testthat/test_OptimizerADBO.R b/tests/testthat/test_OptimizerADBO.R
index 746bb1b3..6952fd77 100644
--- a/tests/testthat/test_OptimizerADBO.R
+++ b/tests/testthat/test_OptimizerADBO.R
@@ -1,18 +1,18 @@
-test_that("adbo works in defaults", {
+test_that("OptimizerADBO works in defaults", {
skip_on_cran()
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 2L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 20),
+ terminator = trm("evals", n_evals = 20L),
)
- optimizer = opt("adbo", design_function = "sobol", design_size = 5)
+ optimizer = opt("adbo", design_function = "sobol", design_size = 5L)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 20)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 20L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_rush_reset(instance$rush)
diff --git a/tests/testthat/test_OptimizerAsyncMbo.R b/tests/testthat/test_OptimizerAsyncMbo.R
index 2b611f9c..6f4afdc2 100644
--- a/tests/testthat/test_OptimizerAsyncMbo.R
+++ b/tests/testthat/test_OptimizerAsyncMbo.R
@@ -1,40 +1,40 @@
-test_that("async optimizer works in defaults", {
+test_that("OptimizerAsyncMbo works in defaults", {
skip_on_cran()
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 2L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
- optimizer = opt("async_mbo", design_function = "sobol", design_size = 5)
+ optimizer = opt("async_mbo", design_function = "sobol", design_size = 5L)
- expect_data_table(optimizer$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(optimizer$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_rush_reset(instance$rush)
})
-test_that("async optimizer works with evaluations in archive", {
+test_that("OptimizerAsyncMbo works with evaluations in archive", {
skip_on_cran()
skip_if_not_installed("rush")
flush_redis()
- rush::rush_plan(n_workers = 2)
+ rush::rush_plan(n_workers = 2L)
instance = oi_async(
objective = OBJ_2D,
search_space = PS_2D,
- terminator = trm("evals", n_evals = 10),
+ terminator = trm("evals", n_evals = 10L),
)
optimizer = opt("async_random_search")
optimizer$optimize(instance)
- instance$terminator$param_set$values$n_evals = 40
+ instance$terminator$param_set$values$n_evals = 40L
optimizer = opt("async_mbo")
optimizer$optimize(instance)
diff --git a/tests/testthat/test_SurrogateLearner.R b/tests/testthat/test_SurrogateLearner.R
index 08f46d7e..5dae4d03 100644
--- a/tests/testthat/test_SurrogateLearner.R
+++ b/tests/testthat/test_SurrogateLearner.R
@@ -50,11 +50,12 @@ test_that("param_set", {
inst = MAKE_INST_1D()
surrogate = SurrogateLearner$new(learner = REGR_FEATURELESS, archive = inst$archive)
expect_r6(surrogate$param_set, "ParamSet")
- expect_setequal(surrogate$param_set$ids(), c("assert_insample_perf", "perf_measure", "perf_threshold", "catch_errors"))
+ expect_setequal(surrogate$param_set$ids(), c("assert_insample_perf", "perf_measure", "perf_threshold", "catch_errors", "impute_method"))
expect_equal(surrogate$param_set$class[["assert_insample_perf"]], "ParamLgl")
expect_equal(surrogate$param_set$class[["perf_measure"]], "ParamUty")
expect_equal(surrogate$param_set$class[["perf_threshold"]], "ParamDbl")
expect_equal(surrogate$param_set$class[["catch_errors"]], "ParamLgl")
+ expect_equal(surrogate$param_set$class[["impute_method"]], "ParamFct")
expect_error({surrogate$param_set = list()}, regexp = "param_set is read-only.")
})
diff --git a/tests/testthat/test_SurrogateLearnerCollection.R b/tests/testthat/test_SurrogateLearnerCollection.R
index aa050284..b95ecf55 100644
--- a/tests/testthat/test_SurrogateLearnerCollection.R
+++ b/tests/testthat/test_SurrogateLearnerCollection.R
@@ -60,11 +60,12 @@ test_that("param_set", {
inst = MAKE_INST(OBJ_1D_2, PS_1D, trm("evals", n_evals = 5L))
surrogate = SurrogateLearnerCollection$new(learner = list(REGR_FEATURELESS, REGR_FEATURELESS$clone(deep = TRUE)), archive = inst$archive)
expect_r6(surrogate$param_set, "ParamSet")
- expect_setequal(surrogate$param_set$ids(), c("assert_insample_perf", "perf_measures", "perf_thresholds", "catch_errors"))
+ expect_setequal(surrogate$param_set$ids(), c("assert_insample_perf", "perf_measures", "perf_thresholds", "catch_errors", "impute_method"))
expect_equal(surrogate$param_set$class[["assert_insample_perf"]], "ParamLgl")
expect_equal(surrogate$param_set$class[["perf_measures"]], "ParamUty")
expect_equal(surrogate$param_set$class[["perf_thresholds"]], "ParamUty")
expect_equal(surrogate$param_set$class[["catch_errors"]], "ParamLgl")
+ expect_equal(surrogate$param_set$class[["impute_method"]], "ParamFct")
expect_error({surrogate$param_set = list()}, regexp = "param_set is read-only.")
})
diff --git a/tests/testthat/test_TunerADBO.R b/tests/testthat/test_TunerADBO.R
index 8dc51a53..53c50d3f 100644
--- a/tests/testthat/test_TunerADBO.R
+++ b/tests/testthat/test_TunerADBO.R
@@ -1,27 +1,26 @@
-
-test_that("async mbo tuner works", {
+test_that("TunerADBO works", {
skip_on_cran()
skip_if_not_installed("rush")
flush_redis()
learner = lrn("classif.rpart",
- minsplit = to_tune(2, 128),
+ minsplit = to_tune(2L, 128L),
cp = to_tune(1e-04, 1e-1))
- rush::rush_plan(n_workers = 4)
+ rush::rush_plan(n_workers = 4L)
instance = ti_async(
task = tsk("pima"),
learner = learner,
- resampling = rsmp("cv", folds = 3),
+ resampling = rsmp("cv", folds = 3L),
measure = msr("classif.ce"),
- terminator = trm("evals", n_evals = 20),
+ terminator = trm("evals", n_evals = 20L),
store_benchmark_result = FALSE
)
- tuner = tnr("adbo", design_size = 10)
+ tuner = tnr("adbo", design_size = 10L)
- expect_data_table(tuner$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(tuner$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_rush_reset(instance$rush)
diff --git a/tests/testthat/test_TunerAsyncMbo.R b/tests/testthat/test_TunerAsyncMbo.R
index dce8021e..31d6da9a 100644
--- a/tests/testthat/test_TunerAsyncMbo.R
+++ b/tests/testthat/test_TunerAsyncMbo.R
@@ -1,27 +1,26 @@
-
-test_that("async mbo tuner works", {
+test_that("TunerAsyncMbo works", {
skip_on_cran()
skip_if_not_installed("rush")
flush_redis()
learner = lrn("classif.rpart",
- minsplit = to_tune(2, 128),
+ minsplit = to_tune(2L, 128L),
cp = to_tune(1e-04, 1e-1))
- rush::rush_plan(n_workers = 4)
+ rush::rush_plan(n_workers = 4L)
instance = ti_async(
task = tsk("pima"),
learner = learner,
- resampling = rsmp("cv", folds = 3),
+ resampling = rsmp("cv", folds = 3L),
measure = msr("classif.ce"),
- terminator = trm("evals", n_evals = 20),
+ terminator = trm("evals", n_evals = 20L),
store_benchmark_result = FALSE
)
- tuner = tnr("async_mbo", design_size = 4)
+ tuner = tnr("async_mbo", design_size = 4L)
- expect_data_table(tuner$optimize(instance), nrows = 1)
- expect_data_table(instance$archive$data, min.rows = 10)
+ expect_data_table(tuner$optimize(instance), nrows = 1L)
+ expect_data_table(instance$archive$data, min.rows = 10L)
expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "acq_lambda_0", "acq_lambda"))
expect_rush_reset(instance$rush)