Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reimplement control scaling #9925

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,8 @@ everest = [
"decorator",
"resdata",
"colorama",
"ropt[pandas]>=0.1,<0.11",
"ropt-dakota>=0.1,<0.11",
"ropt[pandas]>=0.11,<0.12",
"ropt-dakota>=0.11,<0.12",
]

[tool.setuptools]
Expand Down
95 changes: 7 additions & 88 deletions src/ert/run_models/everest_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from everest.config import ControlConfig, ControlVariableGuessListConfig, EverestConfig
from everest.everest_storage import EverestStorage, OptimalResult
from everest.optimizer.everest2ropt import everest2ropt
from everest.optimizer.transforms import get_transforms
from everest.simulator.everest_to_ert import everest_to_ert_config
from everest.strings import EVEREST

Expand Down Expand Up @@ -96,7 +97,8 @@ def __init__(
)

self._everest_config = everest_config
self._ropt_config = everest2ropt(everest_config)
self._transforms = get_transforms(everest_config)
self._ropt_config = everest2ropt(everest_config, transforms=self._transforms)

self._sim_callback = simulation_callback
self._opt_callback = optimization_callback
Expand Down Expand Up @@ -190,7 +192,8 @@ def run_experiment(
self.ever_storage = EverestStorage(
output_dir=Path(self._everest_config.optimization_output_dir),
)
self.ever_storage.observe_optimizer(optimizer)
self.ever_storage.init(self._everest_config)
self.ever_storage.observe_optimizer(optimizer, self._transforms)

# Run the optimization:
optimizer_exit_code = optimizer.run().exit_code
Expand All @@ -210,92 +213,8 @@ def run_experiment(
self._exit_code = EverestExitCode.COMPLETED

def _create_optimizer(self) -> BasicOptimizer:
RESULT_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"functions.weighted_objective": "Total-Objective",
"linear_constraints.violations": "IC-violation",
"nonlinear_constraints.violations": "OC-violation",
"functions.objectives": "Objective",
"functions.constraints": "Constraint",
"evaluations.variables": "Control",
"linear_constraints.values": "IC-diff",
"nonlinear_constraints.values": "OC-diff",
"functions.scaled_objectives": "Scaled-Objective",
"functions.scaled_constraints": "Scaled-Constraint",
"evaluations.scaled_variables": "Scaled-Control",
"nonlinear_constraints.scaled_values": "Scaled-OC-diff",
"nonlinear_constraints.scaled_violations": "Scaled-OC-violation",
}
GRADIENT_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"gradients.weighted_objective": "Total-Gradient",
"gradients.objectives": "Grad-objective",
"gradients.constraints": "Grad-constraint",
}
SIMULATION_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"realization": "Realization",
"evaluations.evaluation_ids": "Simulation",
"evaluations.variables": "Control",
"evaluations.objectives": "Objective",
"evaluations.constraints": "Constraint",
"evaluations.scaled_variables": "Scaled-Control",
"evaluations.scaled_objectives": "Scaled-Objective",
"evaluations.scaled_constraints": "Scaled-Constraint",
}
PERTURBATIONS_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"realization": "Realization",
"evaluations.perturbed_evaluation_ids": "Simulation",
"evaluations.perturbed_variables": "Control",
"evaluations.perturbed_objectives": "Objective",
"evaluations.perturbed_constraints": "Constraint",
"evaluations.scaled_perturbed_variables": "Scaled-Control",
"evaluations.scaled_perturbed_objectives": "Scaled-Objective",
"evaluations.scaled_perturbed_constraints": "Scaled-Constraint",
}
MIN_HEADER_LEN = 3

# Initialize the optimizer with output tables. `min_header_len` is set
# to ensure that all tables have the same number of header lines,
# simplifying code that reads them as fixed width tables. `maximize` is
# set because ropt reports minimization results, while everest wants
# maximization results, necessitating a conversion step.
ropt_output_folder = Path(self._everest_config.optimization_output_dir)
optimizer = (
BasicOptimizer(
enopt_config=self._ropt_config, evaluator=self._forward_model_evaluator
)
.add_table(
columns=RESULT_COLUMNS,
path=ropt_output_folder / "results.txt",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
.add_table(
columns=GRADIENT_COLUMNS,
path=ropt_output_folder / "gradients.txt",
table_type="gradients",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
.add_table(
columns=SIMULATION_COLUMNS,
path=ropt_output_folder / "simulations.txt",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
.add_table(
columns=PERTURBATIONS_COLUMNS,
path=ropt_output_folder / "perturbations.txt",
table_type="gradients",
min_header_len=MIN_HEADER_LEN,
maximize=True,
)
optimizer = BasicOptimizer(
enopt_config=self._ropt_config, evaluator=self._forward_model_evaluator
)

# Before each batch evaluation we check if we should abort:
Expand Down
30 changes: 0 additions & 30 deletions src/everest/api/everest_data_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import polars
import polars as pl
from ropt.enums import ConstraintType

from ert.storage import open_storage
from everest.config import EverestConfig
Expand Down Expand Up @@ -53,35 +52,6 @@ def output_constraint_names(self):
else []
)

def input_constraint(self, control):
# Note: This function is weird, its existence is probably not well-justified
# consider removing!
initial_values = self._ever_storage.data.controls
control_spec = initial_values.filter(
pl.col("control_name") == control
).to_dicts()[0]
return {
"min": control_spec.get("lower_bounds"),
"max": control_spec.get("upper_bounds"),
}

def output_constraint(self, constraint):
"""
:return: a dictionary with two keys: "type" and "right_hand_side".
"type" has three options:
["lower_bound", "upper_bound", "target"]
"right_hand_side" is a constant real number that indicates
the constraint bound/target.
"""

constraint_dict = self._ever_storage.data.nonlinear_constraints.filter(
polars.col("constraint_name") == constraint
).to_dicts()[0]
return {
"type": ConstraintType(constraint_dict["constraint_type"]).name.lower(),
"right_hand_side": constraint_dict["constraint_rhs_value"],
}

@property
def realizations(self):
return sorted(
Expand Down
14 changes: 14 additions & 0 deletions src/everest/config/everest_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -621,6 +621,20 @@ def control_names(self):
controls = self.controls or []
return [control.name for control in controls]

@property
def formatted_control_names(self) -> list[str]:
names = []
for control in self.controls:
for variable in control.variables:
if isinstance(variable, ControlVariableGuessListConfig):
for index in range(1, len(variable.initial_guess) + 1):
names.append(f"{control.name}_{variable.name}-{index}")
elif variable.index is not None:
names.append(f"{control.name}_{variable.name}-{variable.index}")
else:
names.append(f"{control.name}_{variable.name}")
return names

@property
def objective_names(self) -> list[str]:
return [objective.name for objective in self.objective_functions]
Expand Down
8 changes: 2 additions & 6 deletions src/everest/config/simulator_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,17 +79,13 @@ class SimulatorConfig(BaseModel, extra="forbid"): # type: ignore
If not specified, a default value of 1 will be used.""",
)
enable_cache: bool = Field(
default=False,
default=True,
description="""Enable forward model result caching.

If enabled, objective and constraint function results are cached for
each realization. If the optimizer requests an evaluation that has
already been done before, these cached values will be re-used without
running the forward model again.

This option is disabled by default, since it will not be necessary for
the most common use of a standard optimization with a continuous
optimizer.""",
running the forward model again.""",
)

@field_validator("queue_system", mode="before")
Expand Down
16 changes: 1 addition & 15 deletions src/everest/config/utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from collections.abc import Generator, Iterator
from collections.abc import Generator
from typing import Any

from .control_config import ControlConfig
Expand Down Expand Up @@ -129,17 +129,3 @@ def _inject_defaults(
]:
if var_dict.get(key) is None:
var_dict[key] = getattr(control, key)


def control_tuples(
controls: list[ControlConfig],
) -> Iterator[tuple[str, str, int] | tuple[str, str]]:
for control in controls:
for variable in control.variables:
if isinstance(variable, ControlVariableGuessListConfig):
for index in range(1, len(variable.initial_guess) + 1):
yield (control.name, variable.name, index)
elif variable.index is not None:
yield (control.name, variable.name, variable.index)
else:
yield (control.name, variable.name)
Loading
Loading