Skip to content

Commit

Permalink
Update priors and examples
Browse files Browse the repository at this point in the history
  • Loading branch information
LukasFehring committed Jan 2, 2025
1 parent 9f2e387 commit 3232f13
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 11 deletions.
78 changes: 73 additions & 5 deletions examples/1_basics/6_priors.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
ConfigurationSpace,
NormalFloatHyperparameter,
UniformIntegerHyperparameter,
UniformFloatHyperparameter,
)
from sklearn.datasets import load_digits
from sklearn.exceptions import ConvergenceWarning
Expand All @@ -38,7 +39,7 @@

class MLP:
@property
def configspace(self) -> ConfigurationSpace:
def prior_configspace(self) -> ConfigurationSpace:
# Build Configuration Space which defines all parameters and their ranges.
# To illustrate different parameter types,
# we use continuous, integer and categorical parameters.
Expand Down Expand Up @@ -100,7 +101,67 @@ def configspace(self) -> ConfigurationSpace:
)

# Add all hyperparameters at once:
cs.add([n_layer, n_neurons, activation, optimizer, batch_size, learning_rate_init])
cs.add(
[n_layer, n_neurons, activation, optimizer, batch_size, learning_rate_init]
)

return cs

@property
def configspace(self) -> ConfigurationSpace:
# Build Configuration Space which defines all parameters and their ranges.
# To illustrate different parameter types,
# we use continuous, integer and categorical parameters.
cs = ConfigurationSpace()

# We do not have an educated belief on the number of layers beforehand
n_layer = UniformIntegerHyperparameter(
"n_layer",
lower=1,
upper=5,
)

# Define network width without a specific prior
n_neurons = UniformIntegerHyperparameter(
"n_neurons",
lower=8,
upper=256,
)

# Define activation functions without specific weights
activation = CategoricalHyperparameter(
"activation",
["logistic", "tanh", "relu"],
default_value="relu",
)

# Define optimizer without specific weights
optimizer = CategoricalHyperparameter(
"optimizer",
["sgd", "adam"],
default_value="adam",
)

# Define batch size without specific distribution
batch_size = UniformIntegerHyperparameter(
"batch_size",
16,
512,
default_value=128,
)

# Define learning rate range without log-normal prior
learning_rate_init = UniformFloatHyperparameter(
"learning_rate_init",
lower=1e-5,
upper=1.0,
default_value=1e-3,
)

# Add all hyperparameters at once:
cs.add(
[n_layer, n_neurons, activation, optimizer, batch_size, learning_rate_init]
)

return cs

Expand All @@ -119,8 +180,12 @@ def train(self, config: Configuration, seed: int = 0) -> float:
)

# Returns the 5-fold cross validation accuracy
cv = StratifiedKFold(n_splits=5, random_state=seed, shuffle=True) # to make CV splits consistent
score = cross_val_score(classifier, digits.data, digits.target, cv=cv, error_score="raise")
cv = StratifiedKFold(
n_splits=5, random_state=seed, shuffle=True
) # to make CV splits consistent
score = cross_val_score(
classifier, digits.data, digits.target, cv=cv, error_score="raise"
)

return 1 - np.mean(score)

Expand All @@ -140,8 +205,11 @@ def train(self, config: Configuration, seed: int = 0) -> float:

# We define the prior acquisition function, which conduct the optimization using priors over the optimum
acquisition_function = PriorAcquisitionFunction(
acquisition_function=HyperparameterOptimizationFacade.get_acquisition_function(scenario),
acquisition_function=HyperparameterOptimizationFacade.get_acquisition_function(
scenario
),
decay_beta=scenario.n_trials / 10, # Proven solid value
prior_configsapce=mlp.prior_configspace,
)

# We only want one config call (use only one seed in this example)
Expand Down
15 changes: 9 additions & 6 deletions smac/acquisition/function/prior_acquisition_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from smac.model.abstract_model import AbstractModel
from smac.model.random_forest.abstract_random_forest import AbstractRandomForest
from smac.utils.logging import get_logger
from ConfigSpace import ConfigurationSpace

__copyright__ = "Copyright 2022, automl.org"
__license__ = "3-clause BSD"
Expand Down Expand Up @@ -49,6 +50,7 @@ def __init__(
self,
acquisition_function: AbstractAcquisitionFunction,
decay_beta: float,
prior_configspace: ConfigurationSpace,
prior_floor: float = 1e-12,
discretize: bool = False,
discrete_bins_factor: float = 10.0,
Expand All @@ -58,8 +60,9 @@ def __init__(
self._functions: list[AbstractAcquisitionFunction] = []
self._eta: float | None = None

self._hyperparameters: dict[Any, Configuration] | None = None
self._decay_beta = decay_beta
self._prior_configspace = prior_configspace
self._hyperparameters: dict[Any, Configuration] | None = dict(self._prior_configspace)
self._prior_floor = prior_floor
self._discretize = discretize
self._discrete_bins_factor = discrete_bins_factor
Expand All @@ -72,10 +75,12 @@ def __init__(
acquisition_type = self._acquisition_function

self._rescale = isinstance(acquisition_type, (LCB, TS))

# Variables needed to adapt the weighting of the prior
self._initial_design_size = None # The amount of datapoints in the initial design
self._iteration_number = 1 # The amount of configurations the prior was used in the selection of configurations. It starts at 1
self._initial_design_size = (
None # The amount of datapoints in the initial design
)
self._iteration_number = 1 # The amount of configurations the prior was used in the selection of configurations. It starts at 1

@property
def name(self) -> str: # noqa: D102
Expand Down Expand Up @@ -103,8 +108,6 @@ def model(self) -> AbstractModel | None: # noqa: D102
@model.setter
def model(self, model: AbstractModel) -> None:
self._model = model
# TODO replace deprecated method
self._hyperparameters = model._configspace.get_hyperparameters_dict()

if isinstance(model, AbstractRandomForest):
if not self._discretize:
Expand Down

0 comments on commit 3232f13

Please sign in to comment.