From fe421a5dee1226df4d605ad9dc5b23d114d6c03b Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Wed, 30 Oct 2024 22:02:28 +0000 Subject: [PATCH 01/17] Update to using pydantic for config --- src/calliope/attrdict.py | 4 + src/calliope/config.py | 318 +++++++++++++++++++++++++ src/calliope/config/config_schema.yaml | 156 ------------ src/calliope/model.py | 107 +++------ src/calliope/preprocess/data_tables.py | 3 - 5 files changed, 361 insertions(+), 227 deletions(-) create mode 100644 src/calliope/config.py diff --git a/src/calliope/attrdict.py b/src/calliope/attrdict.py index bd94df7b..f17cf0ef 100644 --- a/src/calliope/attrdict.py +++ b/src/calliope/attrdict.py @@ -9,6 +9,7 @@ import numpy as np import ruamel.yaml as ruamel_yaml +from ruamel.yaml.scalarstring import walk_tree from typing_extensions import Self from calliope.util.tools import relative_path @@ -355,6 +356,9 @@ def to_yaml(self, path=None): result = result.as_dict() + # handle multi-line strings. + walk_tree(result) + if path is not None: with open(path, "w") as f: yaml_.dump(result, f) diff --git a/src/calliope/config.py b/src/calliope/config.py new file mode 100644 index 00000000..75a9bf79 --- /dev/null +++ b/src/calliope/config.py @@ -0,0 +1,318 @@ +# Copyright (C) since 2013 Calliope contributors listed in AUTHORS. +# Licensed under the Apache 2.0 License (see LICENSE file). +"""Implements the Calliope configuration class.""" + +from collections.abc import Hashable +from datetime import datetime +from pathlib import Path +from typing import Annotated, Literal, Self, TypeVar, overload + +from pydantic import AfterValidator, BaseModel, Field, model_validator +from pydantic_core import PydanticCustomError + +from calliope.attrdict import AttrDict +from calliope.util import tools + +MODES_T = Literal["plan", "operate", "spores"] +CONFIG_T = Literal["init", "build", "solve"] + +# == +# Taken from https://github.com/pydantic/pydantic-core/pull/820#issuecomment-1670475909 +T = TypeVar("T", bound=Hashable) + + +def _validate_unique_list(v: list[T]) -> list[T]: + if len(v) != len(set(v)): + raise PydanticCustomError("unique_list", "List must be unique") + return v + + +UniqueList = Annotated[ + list[T], + AfterValidator(_validate_unique_list), + Field(json_schema_extra={"uniqueItems": True}), +] +# == + + +def hide_from_schema(to_hide: list[str]): + """Hide fields from the generated schema. + + Args: + to_hide (list[str]): List of fields to hide. + """ + + def _hide_from_schema(schema: dict): + for hide in to_hide: + schema.get("properties", {}).pop(hide, None) + return schema + + return _hide_from_schema + + +class ConfigBaseModel(BaseModel): + """A base class for creating pydantic models for Calliope configuration options.""" + + _kwargs: dict = {} + + def update(self, update_dict: dict, deep: bool = False) -> Self: + """Return a new iteration of the model with updated fields. + + Updates are validated and stored in the parent class in the `_kwargs` key. + + Args: + update_dict (dict): Dictionary with which to update the base model. + deep (bool, optional): Set to True to make a deep copy of the model. Defaults to False. + + Returns: + BaseModel: New model instance. + """ + updated = super().model_copy(update=update_dict, deep=deep) + updated.model_validate(updated) + self._kwargs = update_dict + return updated + + @overload + def model_yaml_schema(self, filepath: str | Path) -> None: ... + + @overload + def model_yaml_schema(self, filepath: None = None) -> str: ... + + def model_yaml_schema(self, filepath: str | Path | None = None) -> None | str: + """Generate a YAML schema for the class. + + Args: + filepath (str | Path | None, optional): If given, save schema to given path. Defaults to None. + + Returns: + None | str: If `filepath` is given, returns None. Otherwise, returns the YAML string. + """ + return AttrDict(self.model_json_schema()).to_yaml(filepath) + + +class ModeBaseModel(BaseModel): + """Mode-specific configuration, which will be hidden from the string representation of the model if that mode is not activated.""" + + _mode: str + + @model_validator(mode="after") + def update_repr(self) -> Self: + """Hide config from model string representation if mode is not activated.""" + for key, val in self.model_fields.items(): + if key.startswith(self._mode): + val.repr = self.mode == self._mode + return self + + +class Init(ConfigBaseModel): + """All configuration options used when initialising a Calliope model.""" + + model_config = { + "extra": "forbid", + "frozen": True, + "json_schema_extra": hide_from_schema(["def_path"]), + "revalidate_instances": "always", + "use_attribute_docstrings": True, + } + + def_path: Path = Field(default=".", repr=False, exclude=True) + name: str | None = Field(default=None) + """Model name""" + + calliope_version: str | None = Field(default=None) + """Calliope framework version this model is intended for""" + + time_subset: tuple[datetime, datetime] | None = Field(default=None) + """ + Subset of timesteps as an two-element list giving the **inclusive** range. + For example, ["2005-01", "2005-04"] will create a time subset from "2005-01-01 00:00:00" to "2005-04-31 23:59:59". + + Strings must be ISO8601-compatible, i.e. of the form `YYYY-mm-dd HH:MM:SS` (e.g, '2005-01 ', '2005-01-01', '2005-01-01 00:00', ...) + """ + + time_resample: str | None = Field(default=None, pattern="^[0-9]+[a-zA-Z]") + """Setting to adjust time resolution, e.g. '2h' for 2-hourly""" + + time_cluster: Path | None = Field(default=None) + """ + Setting to cluster the timeseries. + Must be a path to a file where each date is linked to a representative date that also exists in the timeseries. + """ + + time_format: str = Field(default="ISO8601") + """ + Timestamp format of all time series data when read from file. + 'ISO8601' means '%Y-%m-%d %H:%M:%S'. + """ + + distance_unit: Literal["km", "m"] = Field(default="km") + """ + Unit of transmission link `distance` (m - metres, km - kilometres). + Automatically derived distances from lat/lon coordinates will be given in this unit. + """ + + @model_validator(mode="before") + @classmethod + def abs_path(cls, data): + """Add model definition path.""" + if "time_cluster" in data: + data["time_cluster"] = tools.relative_path( + data["def_path"], data["time_cluster"] + ) + return data + + +class BuildBase(BaseModel): + """Base configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" + + model_config = {"extra": "allow", "revalidate_instances": "always"} + add_math: UniqueList[str] = Field(default=[]) + """ + List of references to files which contain additional mathematical formulations to be applied on top of or instead of the base mode math. + If referring to an pre-defined Calliope math file (see documentation for available files), do not append the reference with ".yaml". + If referring to your own math file, ensure the file type is given as a suffix (".yaml" or ".yml"). + Relative paths will be assumed to be relative to the model definition file given when creating a calliope Model (`calliope.Model(model_definition=...)`) + """ + + ignore_mode_math: bool = Field(default=False) + """ + If True, do not initialise the mathematical formulation with the pre-defined math for the given run `mode`. + This option can be used to completely re-define the Calliope mathematical formulation. + """ + + backend: Literal["pyomo", "gurobi"] = Field(default="pyomo") + """Module with which to build the optimisation problem.""" + + ensure_feasibility: bool = Field(default=False) + """ + Whether to include decision variables in the model which will meet unmet demand or consume unused supply in the model so that the optimisation solves successfully. + This should only be used as a debugging option (as any unmet demand/unused supply is a sign of improper model formulation). + """ + + mode: MODES_T = Field(default="plan") + """Mode in which to run the optimisation.""" + + objective: str = Field(default="min_cost_optimisation") + """Name of internal objective function to use, from those defined in the pre-defined math and any applied additional math.""" + + pre_validate_math_strings: bool = Field(default=True) + """ + If true, the Calliope math definition will be scanned for parsing errors _before_ undertaking the much more expensive operation of building the optimisation problem. + You can switch this off (e.g., if you know there are no parsing errors) to reduce overall build time. + """ + + +class BuildOperate(ModeBaseModel): + """Operate mode configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" + + _mode = "operate" + + operate_window: str = Field(default=None) + """ + Operate mode rolling `window`, given as a pandas frequency string. + See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. + """ + + operate_horizon: str = Field(default=None) + """ + Operate mode rolling `horizon`, given as a pandas frequency string. + See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. + Must be ≥ `operate_window` + """ + + operate_use_cap_results: bool = Field(default=False) + """If the model already contains `plan` mode results, use those optimal capacities as input parameters to the `operate` mode run.""" + + +class Build(ConfigBaseModel, BuildOperate, BuildBase): + """All configuration options used when building a Calliope optimisation problem (`calliope.Model.build`). + + Additional configuration items will be passed onto math string parsing and can therefore be accessed in the `where` strings by `config.[item-name]`, + where "[item-name]" is the name of your own configuration item. + """ + + +class SolveBase(BaseModel): + """Base configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + + model_config = { + "extra": "forbid", + "revalidate_instances": "always", + "json_schema_extra": hide_from_schema(["mode"]), + } + + mode: Literal["plan", "spores", "operate"] = Field(default="plan", repr=False) + + save_logs: Path | None = Field(default=None) + """If given, should be a path to a directory in which to save optimisation logs.""" + + solver_io: str | None = Field(default=None) + """ + Some solvers have different interfaces that perform differently. + For instance, setting `solver_io="python"` when using the solver `gurobi` tends to reduce the time to send the optimisation problem to the solver. + """ + + solver_options: dict = Field(default={}) + """Any solver options, as key-value pairs, to pass to the chosen solver""" + + solver: str = Field(default="cbc") + """Solver to use. Any solvers that have Pyomo interfaces can be used. Refer to the Pyomo documentation for the latest list.""" + + zero_threshold: float = Field(default=1e-10) + """On postprocessing the optimisation results, values smaller than this threshold will be considered as optimisation artefacts and will be set to zero.""" + + shadow_prices: UniqueList[str] = Field(default=[]) + """Names of model constraints.""" + + +class SolveSpores(ModeBaseModel): + """SPORES configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + + _mode = "spores" + + mode: MODES_T = Field(default=None) + + spores_number: int = Field(default=3) + """SPORES mode number of iterations after the initial base run.""" + + spores_score_cost_class: str = Field(default="spores_score") + """SPORES mode cost class to vary between iterations after the initial base run.""" + + spores_slack_cost_group: str = Field(default=None) + """SPORES mode cost class to keep below the given `slack` (usually "monetary").""" + + spores_save_per_spore: bool = Field(default=False) + """ + Whether or not to save the result of each SPORES mode run between iterations. + If False, will consolidate all iterations into one dataset after completion of N iterations (defined by `spores_number`) and save that one dataset. + """ + + spores_save_per_spore_path: Path | None = Field(default=None) + """If saving per spore, the path to save to.""" + + spores_skip_cost_op: bool = Field(default=False) + """If the model already contains `plan` mode results, use those as the initial base run results and start with SPORES iterations immediately.""" + + @model_validator(mode="after") + def save_per_spore_path(self) -> Self: + """Ensure that path is given if saving per spore.""" + if self.spores_save_per_spore: + if self.spores_save_per_spore_path is None: + raise ValueError( + "Must define `spores_save_per_spore_path` if you want to save each SPORES result separately." + ) + elif not self.spores_save_per_spore_path.is_dir(): + raise ValueError("`spores_save_per_spore_path` must be a directory.") + return self + + +class Solve(ConfigBaseModel, SolveSpores, SolveBase): + """All configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + + +class CalliopeConfig(ConfigBaseModel): + """Calliope configuration class.""" + + init: Init + build: Build + solve: Solve diff --git a/src/calliope/config/config_schema.yaml b/src/calliope/config/config_schema.yaml index b9ebe627..41a8c06e 100644 --- a/src/calliope/config/config_schema.yaml +++ b/src/calliope/config/config_schema.yaml @@ -15,172 +15,16 @@ properties: init: type: object description: All configuration options used when initialising a Calliope model - additionalProperties: false - properties: - name: - type: ["null", string] - default: null - description: Model name - calliope_version: - type: ["null", string] - default: null - description: Calliope framework version this model is intended for - time_subset: - oneOf: - - type: "null" - - type: array - minItems: 2 - maxItems: 2 - items: - type: string - description: ISO8601 format datetime strings of the form `YYYY-mm-dd HH:MM:SS` (e.g, '2005-01', '2005-01-01', '2005-01-01 00:00', ...) - default: null - description: >- - Subset of timesteps as an two-element list giving the **inclusive** range. - For example, ['2005-01', '2005-04'] will create a time subset from '2005-01-01 00:00:00' to '2005-04-31 23:59:59'. - time_resample: - type: ["null", string] - default: null - description: setting to adjust time resolution, e.g. "2h" for 2-hourly - pattern: "^[0-9]+[a-zA-Z]" - time_cluster: - type: ["null", string] - default: null - description: setting to cluster the timeseries, must be a path to a file where each date is linked to a representative date that also exists in the timeseries. - time_format: - type: string - default: "ISO8601" - description: Timestamp format of all time series data when read from file. "ISO8601" means "%Y-%m-%d %H:%M:%S". - distance_unit: - type: string - default: km - description: >- - Unit of transmission link `distance` (m - metres, km - kilometres). - Automatically derived distances from lat/lon coordinates will be given in this unit. - enum: [m, km] build: type: object description: > All configuration options used when building a Calliope optimisation problem (`calliope.Model.build`). Additional configuration items will be passed onto math string parsing and can therefore be accessed in the `where` strings by `config.[item-name]`, where "[item-name]" is the name of your own configuration item. - additionalProperties: true - properties: - add_math: - type: array - default: [] - description: List of references to files which contain additional mathematical formulations to be applied on top of or instead of the base mode math. - uniqueItems: true - items: - type: string - description: > - If referring to an pre-defined Calliope math file (see documentation for available files), do not append the reference with ".yaml". - If referring to your own math file, ensure the file type is given as a suffix (".yaml" or ".yml"). - Relative paths will be assumed to be relative to the model definition file given when creating a calliope Model (`calliope.Model(model_definition=...)`). - ignore_mode_math: - type: boolean - default: false - description: >- - If True, do not initialise the mathematical formulation with the pre-defined math for the given run `mode`. - This option can be used to completely re-define the Calliope mathematical formulation. - backend: - type: string - default: pyomo - description: Module with which to build the optimisation problem - ensure_feasibility: - type: boolean - default: false - description: > - whether to include decision variables in the model which will meet unmet demand or consume unused supply in the model so that the optimisation solves successfully. - This should only be used as a debugging option (as any unmet demand/unused supply is a sign of improper model formulation). - mode: - type: string - default: plan - description: Mode in which to run the optimisation. - enum: [plan, spores, operate] - objective: - type: string - default: min_cost_optimisation - description: Name of internal objective function to use, from those defined in the pre-defined math and any applied additional math. - operate_window: - type: string - description: >- - Operate mode rolling `window`, given as a pandas frequency string. - See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. - operate_horizon: - type: string - description: >- - Operate mode rolling `horizon`, given as a pandas frequency string. - See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. - Must be ≥ `operate_window` - operate_use_cap_results: - type: boolean - default: false - description: If the model already contains `plan` mode results, use those optimal capacities as input parameters to the `operate` mode run. - pre_validate_math_strings: - type: boolean - default: true - description: >- - If true, the Calliope math definition will be scanned for parsing errors _before_ undertaking the much more expensive operation of building the optimisation problem. - You can switch this off (e.g., if you know there are no parsing errors) to reduce overall build time. solve: type: object description: All configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`). - additionalProperties: false - properties: - spores_number: - type: integer - default: 3 - description: SPORES mode number of iterations after the initial base run. - spores_score_cost_class: - type: string - default: spores_score - description: SPORES mode cost class to vary between iterations after the initial base run. - spores_slack_cost_group: - type: string - description: SPORES mode cost class to keep below the given `slack` (usually "monetary"). - spores_save_per_spore: - type: boolean - default: false - description: Whether or not to save the result of each SPORES mode run between iterations. If False, will consolidate all iterations into one dataset after completion of N iterations (defined by `spores_number`) and save that one dataset. - spores_save_per_spore_path: - type: string - description: If saving per spore, the path to save to. - spores_skip_cost_op: - type: boolean - default: false - description: If the model already contains `plan` mode results, use those as the initial base run results and start with SPORES iterations immediately. - save_logs: - type: ["null", string] - default: null - description: If given, should be a path to a directory in which to save optimisation logs. - solver_io: - type: ["null", string] - default: null - description: > - Some solvers have different interfaces that perform differently. - For instance, setting `solver_io="python"` when using the solver `gurobi` tends to reduce the time to send the optimisation problem to the solver. - solver_options: - type: ["null", object] - default: null - description: Any solver options, as key-value pairs, to pass to the chosen solver - solver: - type: string - default: cbc - description: Solver to use. Any solvers that have Pyomo interfaces can be used. Refer to the Pyomo documentation for the latest list. - zero_threshold: - type: number - default: 1e-10 - description: On postprocessing the optimisation results, values smaller than this threshold will be considered as optimisation artefacts and will be set to zero. - shadow_prices: - type: array - uniqueItems: true - items: - type: string - description: Names of model constraints. - default: [] - description: List of constraints for which to extract shadow prices. Shadow prices will be added as variables to the model results as `shadow_price_{constraintname}`. parameters: type: [object, "null"] diff --git a/src/calliope/model.py b/src/calliope/model.py index ee8c5a77..811c9676 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -12,7 +12,7 @@ import xarray as xr import calliope -from calliope import backend, exceptions, io, preprocess +from calliope import backend, config, exceptions, io, preprocess from calliope.attrdict import AttrDict from calliope.postprocess import postprocess as postprocess_results from calliope.preprocess.data_tables import DataTable @@ -43,7 +43,7 @@ class Model: """A Calliope Model.""" _TS_OFFSET = pd.Timedelta(1, unit="nanoseconds") - ATTRS_SAVED = ("_def_path", "applied_math") + ATTRS_SAVED = ("_def_path", "applied_math", "config") def __init__( self, @@ -74,7 +74,7 @@ def __init__( **kwargs: initialisation overrides. """ self._timings: dict = {} - self.config: AttrDict + self.config: config.CalliopeConfig self.defaults: AttrDict self.applied_math: preprocess.CalliopeMath self._def_path: str | None = None @@ -162,19 +162,22 @@ def _init_from_model_def_dict( "model_run_creation", comment="Model: preprocessing stage 1 (model_run)", ) - model_config = AttrDict(extract_from_schema(CONFIG_SCHEMA, "default")) - model_config.union(model_definition.pop("config"), allow_override=True) - init_config = update_then_validate_config("init", model_config) + model_config = config.CalliopeConfig(model_definition.pop("config")) - if init_config["time_cluster"] is not None: - init_config["time_cluster"] = relative_path( - self._def_path, init_config["time_cluster"] + if model_config.init.data["time_cluster"] is not None: + model_config.init.update( + { + "time_cluster": relative_path( + self._def_path, model_config.init.data["time_cluster"] + ) + } ) + model_config.init.validate() param_metadata = {"default": extract_from_schema(MODEL_SCHEMA, "default")} attributes = { - "calliope_version_defined": init_config["calliope_version"], + "calliope_version_defined": model_config.init.data["calliope_version"], "calliope_version_initialised": calliope.__version__, "applied_overrides": applied_overrides, "scenario": scenario, @@ -185,13 +188,15 @@ def _init_from_model_def_dict( for table_name, table_dict in model_definition.pop("data_tables", {}).items(): table_dict, _ = climb_template_tree(table_dict, templates, table_name) data_tables.append( - DataTable( - init_config, table_name, table_dict, data_table_dfs, self._def_path - ) + DataTable(table_name, table_dict, data_table_dfs, self._def_path) ) model_data_factory = ModelDataFactory( - init_config, model_definition, data_tables, attributes, param_metadata + model_config.init.data, + model_definition, + data_tables, + attributes, + param_metadata, ) model_data_factory.build() @@ -204,9 +209,10 @@ def _init_from_model_def_dict( comment="Model: preprocessing stage 2 (model_data)", ) - self._add_observed_dict("config", model_config) + self._model_data.attrs["name"] = model_config.init.data["name"] + + self.config = model_config - self._model_data.attrs["name"] = init_config["name"] log_time( LOGGER, self._timings, @@ -229,9 +235,10 @@ def _init_from_model_data(self, model_data: xr.Dataset) -> None: self.applied_math = preprocess.CalliopeMath.from_dict( model_data.attrs.pop("applied_math") ) + if "config" in model_data.attrs: + self.config = config.CalliopeConfig(model_data.attrs.pop("config")) self._model_data = model_data - self._add_model_data_methods() if self.results: self._is_solved = True @@ -243,47 +250,6 @@ def _init_from_model_data(self, model_data: xr.Dataset) -> None: comment="Model: loaded model_data", ) - def _add_model_data_methods(self): - """Add observed data to `model`. - - 1. Filter model dataset to produce views on the input/results data - 2. Add top-level configuration dictionaries simultaneously to the model data attributes and as attributes of this class. - - """ - self._add_observed_dict("config") - - def _add_observed_dict(self, name: str, dict_to_add: dict | None = None) -> None: - """Add the same dictionary as property of model object and an attribute of the model xarray dataset. - - Args: - name (str): - Name of dictionary which will be set as the model property name and - (if necessary) the dataset attribute name. - dict_to_add (dict | None, optional): - If given, set as both the model property and the dataset attribute, - otherwise set an existing dataset attribute as a model property of the - same name. Defaults to None. - - Raises: - exceptions.ModelError: If `dict_to_add` is not given, it must be an attribute of model data. - TypeError: `dict_to_add` must be a dictionary. - """ - if dict_to_add is None: - try: - dict_to_add = self._model_data.attrs[name] - except KeyError: - raise exceptions.ModelError( - f"Expected the model property `{name}` to be a dictionary attribute of the model dataset. If you are loading the model from a NetCDF file, ensure it is a valid Calliope model." - ) - if not isinstance(dict_to_add, dict): - raise TypeError( - f"Attempted to add dictionary property `{name}` to model, but received argument of type `{type(dict_to_add).__name__}`" - ) - else: - dict_to_add = AttrDict(dict_to_add) - self._model_data.attrs[name] = dict_to_add - setattr(self, name, dict_to_add) - def build( self, force: bool = False, add_math_dict: dict | None = None, **kwargs ) -> None: @@ -310,30 +276,35 @@ def build( comment="Model: backend build starting", ) - backend_config = {**self.config["build"], **kwargs} - mode = backend_config["mode"] + self.config.build.data_temp = kwargs + latest_build_config = self.config.build.data + mode = self.config.build.mode if mode == "operate": if not self._model_data.attrs["allow_operate_mode"]: raise exceptions.ModelError( "Unable to run this model in operate (i.e. dispatch) mode, probably because " "there exist non-uniform timesteps (e.g. from time clustering)" ) - start_window_idx = backend_config.pop("start_window_idx", 0) + start_window_idx = self.config.build.data.pop("start_window_idx", 0) backend_input = self._prepare_operate_mode_inputs( - start_window_idx, **backend_config + start_window_idx, **self.config.build.data ) else: backend_input = self._model_data - init_math_list = [] if backend_config.get("ignore_mode_math") else [mode] + init_math_list = [] if self.config.build.data["ignore_mode_math"] else [mode] end_math_list = [] if add_math_dict is None else [add_math_dict] - full_math_list = init_math_list + backend_config["add_math"] + end_math_list + full_math_list = ( + init_math_list + self.config.build.data["add_math"] + end_math_list + ) LOGGER.debug(f"Math preprocessing | Loading math: {full_math_list}") model_math = preprocess.CalliopeMath(full_math_list, self._def_path) - backend_name = backend_config.pop("backend") self.backend = backend.get_model_backend( - backend_name, backend_input, model_math, **backend_config + latest_build_config["backend"], + backend_input, + model_math, + **latest_build_config, ) self.backend.add_optimisation_components() @@ -370,7 +341,7 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: exceptions.ModelError: Some preprocessing steps will stop a run mode of "operate" from being possible. """ # Check that results exist and are non-empty - if not self._is_built: + if not self.is_built: raise exceptions.ModelError( "You must build the optimisation problem (`.build()`) " "before you can run it." @@ -388,7 +359,7 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: else: to_drop = [] - run_mode = self.backend.inputs.attrs["config"]["build"]["mode"] + run_mode = self.config.build.data["mode"] self._model_data.attrs["timestamp_solve_start"] = log_time( LOGGER, self._timings, diff --git a/src/calliope/preprocess/data_tables.py b/src/calliope/preprocess/data_tables.py index 4a90fbf3..83233e2b 100644 --- a/src/calliope/preprocess/data_tables.py +++ b/src/calliope/preprocess/data_tables.py @@ -51,7 +51,6 @@ class DataTable: def __init__( self, - model_config: dict, table_name: str, data_table: DataTableDict, data_table_dfs: dict[str, pd.DataFrame] | None = None, @@ -60,7 +59,6 @@ def __init__( """Load and format a data table from file / in-memory object. Args: - model_config (dict): Model initialisation configuration dictionary. table_name (str): name of the data table. data_table (DataTableDict): Data table definition dictionary. data_table_dfs (dict[str, pd.DataFrame] | None, optional): @@ -75,7 +73,6 @@ def __init__( self.input = data_table self.dfs = data_table_dfs if data_table_dfs is not None else dict() self.model_definition_path = model_definition_path - self.config = model_config self.columns = self._listify_if_defined("columns") self.index = self._listify_if_defined("rows") From 1f96c4760200ef9ba635898569a6c88a0e4a549a Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Wed, 6 Nov 2024 23:40:50 +0000 Subject: [PATCH 02/17] Update config to have operate and spores as subdicts; fix use of config throughout src --- docs/hooks/generate_readable_schema.py | 3 +- src/calliope/backend/__init__.py | 14 +- src/calliope/backend/backend_model.py | 33 +-- src/calliope/backend/gurobi_backend_model.py | 11 +- src/calliope/backend/parsing.py | 1 + src/calliope/backend/pyomo_backend_model.py | 11 +- src/calliope/backend/where_parser.py | 6 +- src/calliope/cli.py | 11 +- src/calliope/config.py | 207 +++++++++++------- .../national_scale/scenarios.yaml | 5 +- .../example_models/urban_scale/scenarios.yaml | 5 +- src/calliope/model.py | 133 +++++------ src/calliope/postprocess/postprocess.py | 4 +- src/calliope/preprocess/data_tables.py | 4 +- src/calliope/preprocess/model_data.py | 21 +- src/calliope/preprocess/scenarios.py | 7 - src/calliope/util/schema.py | 14 -- src/calliope/util/tools.py | 4 +- tests/common/util.py | 4 +- tests/conftest.py | 5 +- tests/test_core_model.py | 43 +--- tests/test_preprocess_model_data.py | 11 +- 22 files changed, 268 insertions(+), 289 deletions(-) diff --git a/docs/hooks/generate_readable_schema.py b/docs/hooks/generate_readable_schema.py index 89ae232e..8b799265 100644 --- a/docs/hooks/generate_readable_schema.py +++ b/docs/hooks/generate_readable_schema.py @@ -14,12 +14,13 @@ import jsonschema2md from mkdocs.structure.files import File +from calliope import AttrDict, config from calliope.util import schema TEMPDIR = tempfile.TemporaryDirectory() SCHEMAS = { - "config_schema": schema.CONFIG_SCHEMA, + "config_schema": AttrDict.from_yaml(config.CalliopeConfig().model_yaml_schema()), "model_schema": schema.MODEL_SCHEMA, "math_schema": schema.MATH_SCHEMA, "data_table_schema": schema.DATA_TABLE_SCHEMA, diff --git a/src/calliope/backend/__init__.py b/src/calliope/backend/__init__.py index d37395d8..84929792 100644 --- a/src/calliope/backend/__init__.py +++ b/src/calliope/backend/__init__.py @@ -15,19 +15,19 @@ from calliope.preprocess import CalliopeMath if TYPE_CHECKING: + from calliope import config from calliope.backend.backend_model import BackendModel def get_model_backend( - name: str, data: xr.Dataset, math: CalliopeMath, **kwargs + build_config: "config.Build", data: xr.Dataset, math: CalliopeMath ) -> "BackendModel": """Assign a backend using the given configuration. Args: - name (str): name of the backend to use. + build_config: Build configuration options. data (Dataset): model data for the backend. math (CalliopeMath): Calliope math. - **kwargs: backend keyword arguments corresponding to model.config.build. Raises: exceptions.BackendError: If invalid backend was requested. @@ -35,10 +35,10 @@ def get_model_backend( Returns: BackendModel: Initialized backend object. """ - match name: + match build_config.backend: case "pyomo": - return PyomoBackendModel(data, math, **kwargs) + return PyomoBackendModel(data, math, build_config) case "gurobi": - return GurobiBackendModel(data, math, **kwargs) + return GurobiBackendModel(data, math, build_config) case _: - raise BackendError(f"Incorrect backend '{name}' requested.") + raise BackendError(f"Incorrect backend '{build_config.backend}' requested.") diff --git a/src/calliope/backend/backend_model.py b/src/calliope/backend/backend_model.py index c52d74ab..21603864 100644 --- a/src/calliope/backend/backend_model.py +++ b/src/calliope/backend/backend_model.py @@ -26,17 +26,13 @@ import numpy as np import xarray as xr -from calliope import exceptions +from calliope import config, exceptions from calliope.attrdict import AttrDict from calliope.backend import helper_functions, parsing from calliope.exceptions import warn as model_warn from calliope.io import load_config from calliope.preprocess.model_math import ORDERED_COMPONENTS_T, CalliopeMath -from calliope.util.schema import ( - MODEL_SCHEMA, - extract_from_schema, - update_then_validate_config, -) +from calliope.util.schema import MODEL_SCHEMA, extract_from_schema if TYPE_CHECKING: from calliope.backend.parsing import T as Tp @@ -69,20 +65,20 @@ class BackendModelGenerator(ABC): _PARAM_UNITS = extract_from_schema(MODEL_SCHEMA, "x-unit") _PARAM_TYPE = extract_from_schema(MODEL_SCHEMA, "x-type") - def __init__(self, inputs: xr.Dataset, math: CalliopeMath, **kwargs): + def __init__( + self, inputs: xr.Dataset, math: CalliopeMath, build_config: config.Build + ): """Abstract base class to build a representation of the optimisation problem. Args: inputs (xr.Dataset): Calliope model data. math (CalliopeMath): Calliope math. - **kwargs (Any): build configuration overrides. + build_config: Build configuration options. """ self._dataset = xr.Dataset() self.inputs = inputs.copy() self.inputs.attrs = deepcopy(inputs.attrs) - self.inputs.attrs["config"]["build"] = update_then_validate_config( - "build", self.inputs.attrs["config"], **kwargs - ) + self.config = build_config self.math: CalliopeMath = deepcopy(math) self._solve_logger = logging.getLogger(__name__ + ".") @@ -200,6 +196,7 @@ def _check_inputs(self): "equation_name": "", "backend_interface": self, "input_data": self.inputs, + "build_config": self.config, "helper_functions": helper_functions._registry["where"], "apply_where": True, "references": set(), @@ -246,7 +243,7 @@ def add_optimisation_components(self) -> None: # The order of adding components matters! # 1. Variables, 2. Global Expressions, 3. Constraints, 4. Objectives self._add_all_inputs_as_parameters() - if self.inputs.attrs["config"]["build"]["pre_validate_math_strings"]: + if self.config.pre_validate_math_strings: self._validate_math_string_parsing() for components in typing.get_args(ORDERED_COMPONENTS_T): component = components.removesuffix("s") @@ -399,7 +396,7 @@ def _add_all_inputs_as_parameters(self) -> None: if param_name in self.parameters.keys(): continue elif ( - self.inputs.attrs["config"]["build"]["mode"] != "operate" + self.config.mode != "operate" and param_name in extract_from_schema(MODEL_SCHEMA, "x-operate-param").keys() ): @@ -606,7 +603,11 @@ class BackendModel(BackendModelGenerator, Generic[T]): """Calliope's backend model functionality.""" def __init__( - self, inputs: xr.Dataset, math: CalliopeMath, instance: T, **kwargs + self, + inputs: xr.Dataset, + math: CalliopeMath, + instance: T, + build_config: config.Build, ) -> None: """Abstract base class to build backend models that interface with solvers. @@ -614,9 +615,9 @@ def __init__( inputs (xr.Dataset): Calliope model data. math (CalliopeMath): Calliope math. instance (T): Interface model instance. - **kwargs: build configuration overrides. + build_config: Build configuration options. """ - super().__init__(inputs, math, **kwargs) + super().__init__(inputs, math, build_config) self._instance = instance self.shadow_prices: ShadowPrices self._has_verbose_strings: bool = False diff --git a/src/calliope/backend/gurobi_backend_model.py b/src/calliope/backend/gurobi_backend_model.py index 2d2e0a48..ab02d9d4 100644 --- a/src/calliope/backend/gurobi_backend_model.py +++ b/src/calliope/backend/gurobi_backend_model.py @@ -14,6 +14,7 @@ import pandas as pd import xarray as xr +from calliope import config from calliope.backend import backend_model, parsing from calliope.exceptions import BackendError, BackendWarning from calliope.exceptions import warn as model_warn @@ -41,19 +42,21 @@ class GurobiBackendModel(backend_model.BackendModel): """gurobipy-specific backend functionality.""" - def __init__(self, inputs: xr.Dataset, math: CalliopeMath, **kwargs) -> None: + def __init__( + self, inputs: xr.Dataset, math: CalliopeMath, build_config: config.Build + ) -> None: """Gurobi solver interface class. Args: inputs (xr.Dataset): Calliope model data. math (CalliopeMath): Calliope math. - **kwargs: passed directly to the solver. + build_config: Build configuration options. """ if importlib.util.find_spec("gurobipy") is None: raise ImportError( "Install the `gurobipy` package to build the optimisation problem with the Gurobi backend." ) - super().__init__(inputs, math, gurobipy.Model(), **kwargs) + super().__init__(inputs, math, gurobipy.Model(), build_config) self._instance: gurobipy.Model self.shadow_prices = GurobiShadowPrices(self) @@ -144,7 +147,7 @@ def _objective_setter( ) -> xr.DataArray: expr = element.evaluate_expression(self, references=references) - if name == self.inputs.attrs["config"].build.objective: + if name == self.config.objective: self._instance.setObjective(expr.item(), sense=sense) self.log("objectives", name, "Objective activated.") diff --git a/src/calliope/backend/parsing.py b/src/calliope/backend/parsing.py index 33c9ea47..5cdd0808 100644 --- a/src/calliope/backend/parsing.py +++ b/src/calliope/backend/parsing.py @@ -311,6 +311,7 @@ def evaluate_where( helper_functions=helper_functions._registry["where"], input_data=backend_interface.inputs, backend_interface=backend_interface, + build_config=backend_interface.config, references=references if references is not None else set(), apply_where=True, ) diff --git a/src/calliope/backend/pyomo_backend_model.py b/src/calliope/backend/pyomo_backend_model.py index 5ba41ba0..46ea3b32 100644 --- a/src/calliope/backend/pyomo_backend_model.py +++ b/src/calliope/backend/pyomo_backend_model.py @@ -26,6 +26,7 @@ from pyomo.opt import SolverFactory # type: ignore from pyomo.util.model_size import build_model_size_report # type: ignore +from calliope import config from calliope.exceptions import BackendError, BackendWarning from calliope.exceptions import warn as model_warn from calliope.preprocess import CalliopeMath @@ -58,15 +59,17 @@ class PyomoBackendModel(backend_model.BackendModel): """Pyomo-specific backend functionality.""" - def __init__(self, inputs: xr.Dataset, math: CalliopeMath, **kwargs) -> None: + def __init__( + self, inputs: xr.Dataset, math: CalliopeMath, build_config: config.Build + ) -> None: """Pyomo solver interface class. Args: inputs (xr.Dataset): Calliope model data. math (CalliopeMath): Calliope math. - **kwargs: passed directly to the solver. + build_config: Build configuration options. """ - super().__init__(inputs, math, pmo.block(), **kwargs) + super().__init__(inputs, math, pmo.block(), build_config) self._instance.parameters = pmo.parameter_dict() self._instance.variables = pmo.variable_dict() @@ -185,7 +188,7 @@ def _objective_setter( ) -> xr.DataArray: expr = element.evaluate_expression(self, references=references) objective = pmo.objective(expr.item(), sense=sense) - if name == self.inputs.attrs["config"].build.objective: + if name == self.config.objective: text = "activated" objective.activate() else: diff --git a/src/calliope/backend/where_parser.py b/src/calliope/backend/where_parser.py index f434a9bf..06f782f6 100644 --- a/src/calliope/backend/where_parser.py +++ b/src/calliope/backend/where_parser.py @@ -17,6 +17,7 @@ from calliope.exceptions import BackendError if TYPE_CHECKING: + from calliope import config from calliope.backend.backend_model import BackendModel @@ -34,6 +35,7 @@ class EvalAttrs(TypedDict): helper_functions: dict[str, Callable] apply_where: NotRequired[bool] references: NotRequired[set] + build_config: config.Build class EvalWhere(expression_parser.EvalToArrayStr): @@ -118,9 +120,7 @@ def as_math_string(self) -> str: # noqa: D102, override return rf"\text{{config.{self.config_option}}}" def as_array(self) -> xr.DataArray: # noqa: D102, override - config_val = ( - self.eval_attrs["input_data"].attrs["config"].build[self.config_option] - ) + config_val = getattr(self.eval_attrs["build_config"], self.config_option) if not isinstance(config_val, int | float | str | bool | np.bool_): raise BackendError( diff --git a/src/calliope/cli.py b/src/calliope/cli.py index a9d811d2..4059de7e 100644 --- a/src/calliope/cli.py +++ b/src/calliope/cli.py @@ -278,9 +278,9 @@ def run( # Else run the model, then save outputs else: click.secho("Starting model run...") - + kwargs = {} if save_logs: - model.config.set_key("solve.save_logs", save_logs) + kwargs["solve.save_logs"] = save_logs if save_csv is None and save_netcdf is None: click.secho( @@ -292,14 +292,13 @@ def run( # If save_netcdf is used, override the 'save_per_spore_path' to point to a # directory of the same name as the planned netcdf - if save_netcdf and model.config.solve.spores_save_per_spore: - model.config.set_key( - "solve.spores_save_per_spore_path", + if save_netcdf and model.config.solve.spores.save_per_spore: + kwargs["solve.spores_save_per_spore_path"] = ( save_netcdf.replace(".nc", "/spore_{}.nc"), ) model.build() - model.solve() + model.solve(**kwargs) termination = model._model_data.attrs.get( "termination_condition", "unknown" ) diff --git a/src/calliope/config.py b/src/calliope/config.py index 75a9bf79..79ae5941 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -5,8 +5,9 @@ from collections.abc import Hashable from datetime import datetime from pathlib import Path -from typing import Annotated, Literal, Self, TypeVar, overload +from typing import Annotated, Literal, Self, TypeVar, get_args, overload +import jsonref from pydantic import AfterValidator, BaseModel, Field, model_validator from pydantic_core import PydanticCustomError @@ -67,7 +68,16 @@ def update(self, update_dict: dict, deep: bool = False) -> Self: Returns: BaseModel: New model instance. """ - updated = super().model_copy(update=update_dict, deep=deep) + new_dict: dict = {} + # Iterate through dict to be updated and convert any sub-dicts into their respective pydantic model objects + for key, val in update_dict.items(): + key_class = getattr(self, key) + if isinstance(key_class, ConfigBaseModel): + new_dict[key] = key_class.update(val) + key_class._kwargs = val + else: + new_dict[key] = val + updated = super().model_copy(update=new_dict, deep=deep) updated.model_validate(updated) self._kwargs = update_dict return updated @@ -87,20 +97,31 @@ def model_yaml_schema(self, filepath: str | Path | None = None) -> None | str: Returns: None | str: If `filepath` is given, returns None. Otherwise, returns the YAML string. """ - return AttrDict(self.model_json_schema()).to_yaml(filepath) + schema_dict = jsonref.replace_refs(self.model_json_schema()) + return AttrDict(schema_dict).to_yaml(filepath) + @property + def applied_keyword_overrides(self) -> dict: + """Most recently applied keyword overrides used to update this configuration. -class ModeBaseModel(BaseModel): + Returns: + dict: Description of applied overrides. + """ + return self._kwargs + + +class ModeBaseModel(ConfigBaseModel): """Mode-specific configuration, which will be hidden from the string representation of the model if that mode is not activated.""" - _mode: str + mode: MODES_T = Field(default="plan") + """Mode in which to run the optimisation.""" @model_validator(mode="after") def update_repr(self) -> Self: """Hide config from model string representation if mode is not activated.""" for key, val in self.model_fields.items(): - if key.startswith(self._mode): - val.repr = self.mode == self._mode + if key in get_args(MODES_T): + val.repr = self.mode == key return self @@ -108,6 +129,7 @@ class Init(ConfigBaseModel): """All configuration options used when initialising a Calliope model.""" model_config = { + "title": "init", "extra": "forbid", "frozen": True, "json_schema_extra": hide_from_schema(["def_path"]), @@ -116,6 +138,8 @@ class Init(ConfigBaseModel): } def_path: Path = Field(default=".", repr=False, exclude=True) + """The path to the main model definition YAML file, if one has been used to instantiate the Calliope Model class.""" + name: str | None = Field(default=None) """Model name""" @@ -155,17 +179,52 @@ class Init(ConfigBaseModel): @classmethod def abs_path(cls, data): """Add model definition path.""" - if "time_cluster" in data: + if data.get("time_cluster", None) is not None: data["time_cluster"] = tools.relative_path( data["def_path"], data["time_cluster"] ) return data -class BuildBase(BaseModel): +class BuildOperate(ConfigBaseModel): + """Operate mode configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" + + model_config = { + "title": "operate", + "extra": "forbid", + "json_schema_extra": hide_from_schema(["start_window_idx"]), + "revalidate_instances": "always", + "use_attribute_docstrings": True, + } + + window: str = Field(default="24h") + """ + Operate mode rolling `window`, given as a pandas frequency string. + See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. + """ + + horizon: str = Field(default="48h") + """ + Operate mode rolling `horizon`, given as a pandas frequency string. + See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. + Must be ≥ `window` + """ + + use_cap_results: bool = Field(default=False) + """If the model already contains `plan` mode results, use those optimal capacities as input parameters to the `operate` mode run.""" + + start_window_idx: int = Field(default=0, repr=False, exclude=True) + """Which time window to build. This is used to track the window when re-building the model part way through solving in `operate` mode.""" + + +class Build(ModeBaseModel): """Base configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" - model_config = {"extra": "allow", "revalidate_instances": "always"} + model_config = { + "title": "build", + "extra": "allow", + "revalidate_instances": "always", + } add_math: UniqueList[str] = Field(default=[]) """ List of references to files which contain additional mathematical formulations to be applied on top of or instead of the base mode math. @@ -189,9 +248,6 @@ class BuildBase(BaseModel): This should only be used as a debugging option (as any unmet demand/unused supply is a sign of improper model formulation). """ - mode: MODES_T = Field(default="plan") - """Mode in which to run the optimisation.""" - objective: str = Field(default="min_cost_optimisation") """Name of internal objective function to use, from those defined in the pre-defined math and any applied additional math.""" @@ -201,48 +257,56 @@ class BuildBase(BaseModel): You can switch this off (e.g., if you know there are no parsing errors) to reduce overall build time. """ + operate: BuildOperate = BuildOperate() -class BuildOperate(ModeBaseModel): - """Operate mode configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" - _mode = "operate" +class SolveSpores(ConfigBaseModel): + """SPORES configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + + number: int = Field(default=3) + """SPORES mode number of iterations after the initial base run.""" - operate_window: str = Field(default=None) - """ - Operate mode rolling `window`, given as a pandas frequency string. - See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. - """ + score_cost_class: str = Field(default="score") + """SPORES mode cost class to vary between iterations after the initial base run.""" + + slack_cost_group: str = Field(default=None) + """SPORES mode cost class to keep below the given `slack` (usually "monetary").""" - operate_horizon: str = Field(default=None) + save_per_spore: bool = Field(default=False) """ - Operate mode rolling `horizon`, given as a pandas frequency string. - See [here](https://pandas.pydata.org/docs/user_guide/timeseries.html#offset-aliases) for a list of frequency aliases. - Must be ≥ `operate_window` + Whether or not to save the result of each SPORES mode run between iterations. + If False, will consolidate all iterations into one dataset after completion of N iterations (defined by `number`) and save that one dataset. """ - operate_use_cap_results: bool = Field(default=False) - """If the model already contains `plan` mode results, use those optimal capacities as input parameters to the `operate` mode run.""" - + save_per_spore_path: Path | None = Field(default=None) + """If saving per spore, the path to save to.""" -class Build(ConfigBaseModel, BuildOperate, BuildBase): - """All configuration options used when building a Calliope optimisation problem (`calliope.Model.build`). + skip_cost_op: bool = Field(default=False) + """If the model already contains `plan` mode results, use those as the initial base run results and start with SPORES iterations immediately.""" - Additional configuration items will be passed onto math string parsing and can therefore be accessed in the `where` strings by `config.[item-name]`, - where "[item-name]" is the name of your own configuration item. - """ + @model_validator(mode="after") + def require_save_per_spore_path(self) -> Self: + """Ensure that path is given if saving per spore.""" + if self.save_per_spore: + if self.save_per_spore_path is None: + raise ValueError( + "Must define `save_per_spore_path` if you want to save each SPORES result separately." + ) + elif not self.save_per_spore_path.is_dir(): + raise ValueError("`save_per_spore_path` must be a directory.") + return self -class SolveBase(BaseModel): +class Solve(ModeBaseModel): """Base configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" model_config = { + "title": "solve", "extra": "forbid", "revalidate_instances": "always", "json_schema_extra": hide_from_schema(["mode"]), } - mode: Literal["plan", "spores", "operate"] = Field(default="plan", repr=False) - save_logs: Path | None = Field(default=None) """If given, should be a path to a directory in which to save optimisation logs.""" @@ -264,55 +328,38 @@ class SolveBase(BaseModel): shadow_prices: UniqueList[str] = Field(default=[]) """Names of model constraints.""" + spores: SolveSpores = SolveSpores() -class SolveSpores(ModeBaseModel): - """SPORES configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" - - _mode = "spores" - mode: MODES_T = Field(default=None) - - spores_number: int = Field(default=3) - """SPORES mode number of iterations after the initial base run.""" - - spores_score_cost_class: str = Field(default="spores_score") - """SPORES mode cost class to vary between iterations after the initial base run.""" - - spores_slack_cost_group: str = Field(default=None) - """SPORES mode cost class to keep below the given `slack` (usually "monetary").""" - - spores_save_per_spore: bool = Field(default=False) - """ - Whether or not to save the result of each SPORES mode run between iterations. - If False, will consolidate all iterations into one dataset after completion of N iterations (defined by `spores_number`) and save that one dataset. - """ - - spores_save_per_spore_path: Path | None = Field(default=None) - """If saving per spore, the path to save to.""" - - spores_skip_cost_op: bool = Field(default=False) - """If the model already contains `plan` mode results, use those as the initial base run results and start with SPORES iterations immediately.""" +class CalliopeConfig(ConfigBaseModel): + """Calliope configuration class.""" - @model_validator(mode="after") - def save_per_spore_path(self) -> Self: - """Ensure that path is given if saving per spore.""" - if self.spores_save_per_spore: - if self.spores_save_per_spore_path is None: - raise ValueError( - "Must define `spores_save_per_spore_path` if you want to save each SPORES result separately." - ) - elif not self.spores_save_per_spore_path.is_dir(): - raise ValueError("`spores_save_per_spore_path` must be a directory.") - return self + model_config = {"title": "config"} + init: Init = Init() + build: Build = Build() + solve: Solve = Solve() + @model_validator(mode="before") + @classmethod + def update_solve_mode(cls, data): + """Solve mode should match build mode.""" + data["solve"]["mode"] = data["build"]["mode"] + return data -class Solve(ConfigBaseModel, SolveSpores, SolveBase): - """All configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + def update(self, update_dict: dict, deep: bool = False) -> Self: + """Return a new iteration of the model with updated fields. + Updates are validated and stored in the parent class in the `_kwargs` key. -class CalliopeConfig(ConfigBaseModel): - """Calliope configuration class.""" + Args: + update_dict (dict): Dictionary with which to update the base model. + deep (bool, optional): Set to True to make a deep copy of the model. Defaults to False. - init: Init - build: Build - solve: Solve + Returns: + BaseModel: New model instance. + """ + update_dict_temp = AttrDict(update_dict) + if update_dict_temp.get_key("build.mode", None) is not None: + update_dict_temp.set_key("solve.mode", update_dict_temp["build"]["mode"]) + updated = super().update(update_dict_temp.as_dict(), deep=deep) + return updated diff --git a/src/calliope/example_models/national_scale/scenarios.yaml b/src/calliope/example_models/national_scale/scenarios.yaml index 58a3dc81..0e34f8f9 100644 --- a/src/calliope/example_models/national_scale/scenarios.yaml +++ b/src/calliope/example_models/national_scale/scenarios.yaml @@ -70,8 +70,9 @@ overrides: init.time_subset: ["2005-01-01", "2005-01-10"] build: mode: operate - operate_window: 12h - operate_horizon: 24h + operate: + window: 12h + horizon: 24h nodes: region1.techs.ccgt.flow_cap: 30000 diff --git a/src/calliope/example_models/urban_scale/scenarios.yaml b/src/calliope/example_models/urban_scale/scenarios.yaml index 12d114cb..d754496d 100644 --- a/src/calliope/example_models/urban_scale/scenarios.yaml +++ b/src/calliope/example_models/urban_scale/scenarios.yaml @@ -51,8 +51,9 @@ overrides: init.time_subset: ["2005-07-01", "2005-07-10"] build: mode: operate - operate_window: 2h - operate_horizon: 48h + operate: + window: 2h + horizon: 48h nodes: X1: diff --git a/src/calliope/model.py b/src/calliope/model.py index 811c9676..e6088c21 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -22,10 +22,9 @@ CONFIG_SCHEMA, MODEL_SCHEMA, extract_from_schema, - update_then_validate_config, validate_dict, ) -from calliope.util.tools import climb_template_tree, relative_path +from calliope.util.tools import climb_template_tree if TYPE_CHECKING: from calliope.backend.backend_model import BackendModel @@ -43,7 +42,7 @@ class Model: """A Calliope Model.""" _TS_OFFSET = pd.Timedelta(1, unit="nanoseconds") - ATTRS_SAVED = ("_def_path", "applied_math", "config") + ATTRS_SAVED = ("applied_math", "config") def __init__( self, @@ -77,7 +76,6 @@ def __init__( self.config: config.CalliopeConfig self.defaults: AttrDict self.applied_math: preprocess.CalliopeMath - self._def_path: str | None = None self.backend: BackendModel self._is_built: bool = False self._is_solved: bool = False @@ -88,20 +86,24 @@ def __init__( LOGGER, self._timings, "model_creation", comment="Model: initialising" ) if isinstance(model_definition, xr.Dataset): + if kwargs: + raise exceptions.ModelError( + "Cannot apply initialisation configuration overrides when loading data from an xarray Dataset." + ) self._init_from_model_data(model_definition) else: if isinstance(model_definition, dict): model_def_dict = AttrDict(model_definition) else: - self._def_path = str(model_definition) + kwargs["def_path"] = str(model_definition) model_def_dict = AttrDict.from_yaml(model_definition) (model_def, applied_overrides) = preprocess.load_scenario_overrides( - model_def_dict, scenario, override_dict, **kwargs + model_def_dict, scenario, override_dict ) self._init_from_model_def_dict( - model_def, applied_overrides, scenario, data_table_dfs + model_def, applied_overrides, scenario, data_table_dfs, **kwargs ) self._model_data.attrs["timestamp_model_creation"] = timestamp_model_creation @@ -144,6 +146,7 @@ def _init_from_model_def_dict( applied_overrides: str, scenario: str | None, data_table_dfs: dict[str, pd.DataFrame] | None = None, + **kwargs, ) -> None: """Initialise the model using pre-processed YAML files and optional dataframes/dicts. @@ -152,6 +155,7 @@ def _init_from_model_def_dict( applied_overrides (str): overrides specified by users scenario (str | None): scenario specified by users data_table_dfs (dict[str, pd.DataFrame] | None, optional): files with additional model information. Defaults to None. + **kwargs: Initialisation configuration overrides. """ # First pass to check top-level keys are all good validate_dict(model_definition, CONFIG_SCHEMA, "Model definition") @@ -163,21 +167,12 @@ def _init_from_model_def_dict( comment="Model: preprocessing stage 1 (model_run)", ) - model_config = config.CalliopeConfig(model_definition.pop("config")) - - if model_config.init.data["time_cluster"] is not None: - model_config.init.update( - { - "time_cluster": relative_path( - self._def_path, model_config.init.data["time_cluster"] - ) - } - ) - model_config.init.validate() + model_config = config.CalliopeConfig(**model_definition.pop("config")) + init_config = model_config.update({"init": kwargs}).init param_metadata = {"default": extract_from_schema(MODEL_SCHEMA, "default")} attributes = { - "calliope_version_defined": model_config.init.data["calliope_version"], + "calliope_version_defined": init_config.calliope_version, "calliope_version_initialised": calliope.__version__, "applied_overrides": applied_overrides, "scenario": scenario, @@ -188,15 +183,10 @@ def _init_from_model_def_dict( for table_name, table_dict in model_definition.pop("data_tables", {}).items(): table_dict, _ = climb_template_tree(table_dict, templates, table_name) data_tables.append( - DataTable(table_name, table_dict, data_table_dfs, self._def_path) + DataTable(table_name, table_dict, data_table_dfs, init_config.def_path) ) - model_data_factory = ModelDataFactory( - model_config.init.data, - model_definition, - data_tables, - attributes, - param_metadata, + init_config, model_definition, data_tables, attributes, param_metadata ) model_data_factory.build() @@ -209,8 +199,10 @@ def _init_from_model_def_dict( comment="Model: preprocessing stage 2 (model_data)", ) - self._model_data.attrs["name"] = model_config.init.data["name"] + self._model_data.attrs["name"] = init_config.name + # Unlike at the build and solve phases, we store the init config overrides in the main model config. + model_config.init = init_config self.config = model_config log_time( @@ -229,14 +221,13 @@ def _init_from_model_data(self, model_data: xr.Dataset) -> None: model_data (xr.Dataset): Model dataset with input parameters as arrays and configuration stored in the dataset attributes dictionary. """ - if "_def_path" in model_data.attrs: - self._def_path = model_data.attrs.pop("_def_path") if "applied_math" in model_data.attrs: self.applied_math = preprocess.CalliopeMath.from_dict( model_data.attrs.pop("applied_math") ) if "config" in model_data.attrs: - self.config = config.CalliopeConfig(model_data.attrs.pop("config")) + self.config = config.CalliopeConfig(**model_data.attrs.pop("config")) + self.config.update(model_data.attrs.pop("config_kwarg_overrides")) self._model_data = model_data @@ -276,35 +267,26 @@ def build( comment="Model: backend build starting", ) - self.config.build.data_temp = kwargs - latest_build_config = self.config.build.data - mode = self.config.build.mode + this_build_config = self.config.update({"build": kwargs}).build + mode = this_build_config.mode if mode == "operate": if not self._model_data.attrs["allow_operate_mode"]: raise exceptions.ModelError( "Unable to run this model in operate (i.e. dispatch) mode, probably because " "there exist non-uniform timesteps (e.g. from time clustering)" ) - start_window_idx = self.config.build.data.pop("start_window_idx", 0) - backend_input = self._prepare_operate_mode_inputs( - start_window_idx, **self.config.build.data - ) + backend_input = self._prepare_operate_mode_inputs(this_build_config.operate) else: backend_input = self._model_data - init_math_list = [] if self.config.build.data["ignore_mode_math"] else [mode] + init_math_list = [] if this_build_config.ignore_mode_math else [mode] end_math_list = [] if add_math_dict is None else [add_math_dict] - full_math_list = ( - init_math_list + self.config.build.data["add_math"] + end_math_list - ) + full_math_list = init_math_list + this_build_config.add_math + end_math_list LOGGER.debug(f"Math preprocessing | Loading math: {full_math_list}") - model_math = preprocess.CalliopeMath(full_math_list, self._def_path) + model_math = preprocess.CalliopeMath(full_math_list, self.config.init.def_path) self.backend = backend.get_model_backend( - latest_build_config["backend"], - backend_input, - model_math, - **latest_build_config, + this_build_config, backend_input, model_math ) self.backend.add_optimisation_components() @@ -359,23 +341,27 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: else: to_drop = [] - run_mode = self.config.build.data["mode"] + kwargs["mode"] = self.config.build.applied_keyword_overrides.get( + "mode", self.config.build.mode + ) + + this_solve_config = self.config.update({"solve": kwargs}).solve self._model_data.attrs["timestamp_solve_start"] = log_time( LOGGER, self._timings, "solve_start", - comment=f"Optimisation model | starting model in {run_mode} mode.", + comment=f"Optimisation model | starting model in {this_solve_config.mode} mode.", ) - solver_config = update_then_validate_config("solve", self.config, **kwargs) - - shadow_prices = solver_config.get("shadow_prices", []) + shadow_prices = this_solve_config.shadow_prices self.backend.shadow_prices.track_constraints(shadow_prices) - if run_mode == "operate": - results = self._solve_operate(**solver_config) + if this_solve_config.mode == "operate": + results = self._solve_operate(**this_solve_config.model_dump()) else: - results = self.backend._solve(warmstart=warmstart, **solver_config) + results = self.backend._solve( + warmstart=warmstart, **this_solve_config.model_dump() + ) log_time( LOGGER, @@ -388,7 +374,7 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: # Add additional post-processed result variables to results if results.attrs["termination_condition"] in ["optimal", "feasible"]: results = postprocess_results.postprocess_model_results( - results, self._model_data + results, self._model_data, self.config.solve.zero_threshold ) log_time( @@ -405,7 +391,6 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: self._model_data = xr.merge( [results, self._model_data], compat="override", combine_attrs="no_conflicts" ) - self._add_model_data_methods() self._model_data.attrs["timestamp_solve_complete"] = log_time( LOGGER, @@ -440,6 +425,7 @@ def to_netcdf(self, path): saved_attrs[attr] = dict(getattr(self, attr)) else: saved_attrs[attr] = getattr(self, attr) + saved_attrs["config_kwarg_overrides"] = self.config.applied_keyword_overrides io.save_netcdf(self._model_data, path, **saved_attrs) @@ -478,28 +464,24 @@ def info(self) -> str: return "\n".join(info_strings) def _prepare_operate_mode_inputs( - self, start_window_idx: int = 0, **config_kwargs + self, operate_config: config.BuildOperate ) -> xr.Dataset: """Slice the input data to just the length of operate mode time horizon. Args: - start_window_idx (int, optional): - Set the operate `window` to start at, based on integer index. - This is used when re-initialising the backend model for shorter time horizons close to the end of the model period. - Defaults to 0. - **config_kwargs: kwargs related to operate mode configuration. + operate_config (config.BuildOperate): operate mode configuration options. Returns: xr.Dataset: Slice of input data. """ - window = config_kwargs["operate_window"] - horizon = config_kwargs["operate_horizon"] self._model_data.coords["windowsteps"] = pd.date_range( self.inputs.timesteps[0].item(), self.inputs.timesteps[-1].item(), - freq=window, + freq=operate_config.window, + ) + horizonsteps = self._model_data.coords["windowsteps"] + pd.Timedelta( + operate_config.horizon ) - horizonsteps = self._model_data.coords["windowsteps"] + pd.Timedelta(horizon) # We require an offset because pandas / xarray slicing is _inclusive_ of both endpoints # where we only want it to be inclusive of the left endpoint. # Except in the last time horizon, where we want it to include the right endpoint. @@ -509,11 +491,11 @@ def _prepare_operate_mode_inputs( self._model_data.coords["horizonsteps"] = clipped_horizonsteps - self._TS_OFFSET sliced_inputs = self._model_data.sel( timesteps=slice( - self._model_data.windowsteps[start_window_idx], - self._model_data.horizonsteps[start_window_idx], + self._model_data.windowsteps[operate_config.start_window_idx], + self._model_data.horizonsteps[operate_config.start_window_idx], ) ) - if config_kwargs.get("operate_use_cap_results", False): + if operate_config.use_cap_results: to_parameterise = extract_from_schema(MODEL_SCHEMA, "x-operate-param") if not self._is_solved: raise exceptions.ModelError( @@ -536,10 +518,7 @@ def _solve_operate(self, **solver_config) -> xr.Dataset: """ if self.backend.inputs.timesteps[0] != self._model_data.timesteps[0]: LOGGER.info("Optimisation model | Resetting model to first time window.") - self.build( - force=True, - **{"mode": "operate", **self.backend.inputs.attrs["config"]["build"]}, - ) + self.build(force=True, **self.config.build.applied_keyword_overrides) LOGGER.info("Optimisation model | Running first time window.") @@ -566,11 +545,9 @@ def _solve_operate(self, **solver_config) -> xr.Dataset: "Optimisation model | Reaching the end of the timeseries. " "Re-building model with shorter time horizon." ) - self.build( - force=True, - start_window_idx=idx + 1, - **self.backend.inputs.attrs["config"]["build"], - ) + build_kwargs = AttrDict(self.config.build.applied_keyword_overrides) + build_kwargs.set_key("operate.start_window_idx", idx + 1) + self.build(force=True, **build_kwargs) else: self.backend._dataset.coords["timesteps"] = new_inputs.timesteps self.backend.inputs.coords["timesteps"] = new_inputs.timesteps diff --git a/src/calliope/postprocess/postprocess.py b/src/calliope/postprocess/postprocess.py index 402b928e..327b1ce2 100644 --- a/src/calliope/postprocess/postprocess.py +++ b/src/calliope/postprocess/postprocess.py @@ -11,7 +11,7 @@ def postprocess_model_results( - results: xr.Dataset, model_data: xr.Dataset + results: xr.Dataset, model_data: xr.Dataset, zero_threshold: float ) -> xr.Dataset: """Post-processing of model results. @@ -22,11 +22,11 @@ def postprocess_model_results( Args: results (xarray.Dataset): Output from the solver backend. model_data (xarray.Dataset): Calliope model data. + zero_threshold (float): Numbers below this value will be assumed to be zero Returns: xarray.Dataset: input-results dataset. """ - zero_threshold = model_data.config.solve.zero_threshold results["capacity_factor"] = capacity_factor(results, model_data) results["systemwide_capacity_factor"] = capacity_factor( results, model_data, systemwide=True diff --git a/src/calliope/preprocess/data_tables.py b/src/calliope/preprocess/data_tables.py index 83233e2b..a9e7acf2 100644 --- a/src/calliope/preprocess/data_tables.py +++ b/src/calliope/preprocess/data_tables.py @@ -54,7 +54,7 @@ def __init__( table_name: str, data_table: DataTableDict, data_table_dfs: dict[str, pd.DataFrame] | None = None, - model_definition_path: Path | None = None, + model_definition_path: Path = Path("."), ): """Load and format a data table from file / in-memory object. @@ -64,7 +64,7 @@ def __init__( data_table_dfs (dict[str, pd.DataFrame] | None, optional): If given, a dictionary mapping table names in `data_table` to in-memory pandas DataFrames. Defaults to None. - model_definition_path (Path | None, optional): + model_definition_path (Path, optional): If given, the path to the model definition YAML file, relative to which data table filepaths will be set. If None, relative data table filepaths will be considered relative to the current working directory. Defaults to None. diff --git a/src/calliope/preprocess/model_data.py b/src/calliope/preprocess/model_data.py index 7c6d6cc3..89b21386 100644 --- a/src/calliope/preprocess/model_data.py +++ b/src/calliope/preprocess/model_data.py @@ -15,6 +15,7 @@ from calliope import exceptions from calliope.attrdict import AttrDict +from calliope.config import Init from calliope.preprocess import data_tables, time from calliope.util.schema import MODEL_SCHEMA, validate_dict from calliope.util.tools import climb_template_tree, listify @@ -70,7 +71,7 @@ class ModelDataFactory: def __init__( self, - model_config: dict, + init_config: Init, model_definition: ModelDefinition, data_tables: list[data_tables.DataTable], attributes: dict, @@ -81,13 +82,13 @@ def __init__( This includes resampling/clustering timeseries data as necessary. Args: - model_config (dict): Model initialisation configuration (i.e., `config.init`). + init_config (Init): Model initialisation configuration (i.e., `config.init`). model_definition (ModelDefinition): Definition of model nodes and technologies, and their potential `templates`. data_tables (list[data_tables.DataTable]): Pre-loaded data tables that will be used to initialise the dataset before handling definitions given in `model_definition`. attributes (dict): Attributes to attach to the model Dataset. param_attributes (dict[str, dict]): Attributes to attach to the generated model DataArrays. """ - self.config: dict = model_config + self.config: Init = init_config self.model_definition: ModelDefinition = model_definition.copy() self.dataset = xr.Dataset(attrs=AttrDict(attributes)) self.tech_data_from_tables = AttrDict() @@ -244,7 +245,7 @@ def update_time_dimension_and_params(self): raise exceptions.ModelError( "Must define at least one timeseries parameter in a Calliope model." ) - time_subset = self.config.get("time_subset", None) + time_subset = self.config.time_subset if time_subset is not None: self.dataset = time.subset_timeseries(self.dataset, time_subset) self.dataset = time.add_inferred_time_params(self.dataset) @@ -252,11 +253,11 @@ def update_time_dimension_and_params(self): # By default, the model allows operate mode self.dataset.attrs["allow_operate_mode"] = 1 - if self.config["time_resample"] is not None: - self.dataset = time.resample(self.dataset, self.config["time_resample"]) - if self.config["time_cluster"] is not None: + if self.config.time_resample is not None: + self.dataset = time.resample(self.dataset, self.config.time_resample) + if self.config.time_cluster is not None: self.dataset = time.cluster( - self.dataset, self.config["time_cluster"], self.config["time_format"] + self.dataset, self.config.time_cluster, self.config.time_format ) def clean_data_from_undefined_members(self): @@ -324,7 +325,7 @@ def add_link_distances(self): self.dataset.longitude.sel(nodes=node2).item(), )["s12"] distance_array = pd.Series(distances).rename_axis(index="techs").to_xarray() - if self.config["distance_unit"] == "km": + if self.config.distance_unit == "km": distance_array /= 1000 else: LOGGER.debug( @@ -660,7 +661,7 @@ def _add_to_dataset(self, to_add: xr.Dataset, id_: str): """ to_add_numeric_dims = self._update_numeric_dims(to_add, id_) to_add_numeric_ts_dims = time.timeseries_to_datetime( - to_add_numeric_dims, self.config["time_format"], id_ + to_add_numeric_dims, self.config.time_format, id_ ) self.dataset = xr.merge( [to_add_numeric_ts_dims, self.dataset], diff --git a/src/calliope/preprocess/scenarios.py b/src/calliope/preprocess/scenarios.py index 473544fb..88e382a1 100644 --- a/src/calliope/preprocess/scenarios.py +++ b/src/calliope/preprocess/scenarios.py @@ -15,7 +15,6 @@ def load_scenario_overrides( model_definition: dict, scenario: str | None = None, override_dict: dict | None = None, - **kwargs, ) -> tuple[AttrDict, str]: """Apply user-defined overrides to the model definition. @@ -28,8 +27,6 @@ def load_scenario_overrides( override_dict (dict | None, optional): Overrides to apply _after_ `scenario` overrides. Defaults to None. - **kwargs: - initialisation overrides. Returns: tuple[AttrDict, str]: @@ -88,10 +85,6 @@ def load_scenario_overrides( _log_overrides(model_def_dict, model_def_with_overrides) - model_def_with_overrides.union( - AttrDict({"config.init": kwargs}), allow_override=True - ) - return (model_def_with_overrides, ";".join(applied_overrides)) diff --git a/src/calliope/util/schema.py b/src/calliope/util/schema.py index bd98cc77..361cd9a9 100644 --- a/src/calliope/util/schema.py +++ b/src/calliope/util/schema.py @@ -25,20 +25,6 @@ def reset(): importlib.reload(sys.modules[__name__]) -def update_then_validate_config( - config_key: str, config_dict: AttrDict, **update_kwargs -) -> AttrDict: - """Return an updated version of the configuration schema.""" - to_validate = deepcopy(config_dict[config_key]) - to_validate.union(AttrDict(update_kwargs), allow_override=True) - validate_dict( - {"config": {config_key: to_validate}}, - CONFIG_SCHEMA, - f"`{config_key}` configuration", - ) - return to_validate - - def update_model_schema( top_level_property: Literal["nodes", "techs", "parameters"], new_entries: dict, diff --git a/src/calliope/util/tools.py b/src/calliope/util/tools.py index dee2f6ca..3d8d4320 100644 --- a/src/calliope/util/tools.py +++ b/src/calliope/util/tools.py @@ -15,7 +15,7 @@ T = TypeVar("T") -def relative_path(base_path_file, path) -> Path: +def relative_path(base_path_file: str | Path, path: str | Path) -> Path: """Path standardization. If ``path`` is not absolute, it is interpreted as relative to the @@ -23,7 +23,7 @@ def relative_path(base_path_file, path) -> Path: """ # Check if base_path_file is a string because it might be an AttrDict path = Path(path) - if path.is_absolute() or base_path_file is None: + if path.is_absolute(): return path else: base_path_file = Path(base_path_file) diff --git a/tests/common/util.py b/tests/common/util.py index 8ae70da8..94f90dc2 100644 --- a/tests/common/util.py +++ b/tests/common/util.py @@ -95,9 +95,7 @@ def build_lp( math (dict | None, optional): All constraint/global expression/objective math to apply. Defaults to None. backend_name (Literal["pyomo"], optional): Backend to use to create the LP file. Defaults to "pyomo". """ - math = calliope.preprocess.CalliopeMath( - ["plan", *model.config.build.get("add_math", [])] - ) + math = calliope.preprocess.CalliopeMath(["plan", *model.config.build.add_math]) math_to_add = calliope.AttrDict() if isinstance(math_data, dict): diff --git a/tests/conftest.py b/tests/conftest.py index 3d4694c5..0334d0b4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,10 +5,11 @@ import pytest import xarray as xr +from calliope import config from calliope.attrdict import AttrDict from calliope.backend import latex_backend_model, pyomo_backend_model from calliope.preprocess import CalliopeMath -from calliope.util.schema import CONFIG_SCHEMA, MODEL_SCHEMA, extract_from_schema +from calliope.util.schema import MODEL_SCHEMA, extract_from_schema from .common.util import build_test_model as build_model @@ -33,7 +34,7 @@ def foreach(request): @pytest.fixture(scope="session") def config_defaults(): - return AttrDict(extract_from_schema(CONFIG_SCHEMA, "default")) + return AttrDict(config.CalliopeConfig().model_dump()) @pytest.fixture(scope="session") diff --git a/tests/test_core_model.py b/tests/test_core_model.py index e16ebfa4..ddd97800 100644 --- a/tests/test_core_model.py +++ b/tests/test_core_model.py @@ -9,7 +9,6 @@ import calliope.preprocess from .common.util import build_test_model as build_model -from .common.util import check_error_or_warning LOGGER = "calliope.model" @@ -32,40 +31,6 @@ def test_info(self, national_scale_example): def test_info_simple_model(self, simple_supply): simple_supply.info() - def test_update_observed_dict(self, national_scale_example): - national_scale_example.config.build["backend"] = "foo" - assert national_scale_example._model_data.attrs["config"].build.backend == "foo" - - def test_add_observed_dict_from_model_data( - self, national_scale_example, dict_to_add - ): - national_scale_example._model_data.attrs["foo"] = dict_to_add - national_scale_example._add_observed_dict("foo") - assert national_scale_example.foo == dict_to_add - assert national_scale_example._model_data.attrs["foo"] == dict_to_add - - def test_add_observed_dict_from_dict(self, national_scale_example, dict_to_add): - national_scale_example._add_observed_dict("bar", dict_to_add) - assert national_scale_example.bar == dict_to_add - assert national_scale_example._model_data.attrs["bar"] == dict_to_add - - def test_add_observed_dict_not_available(self, national_scale_example): - with pytest.raises(calliope.exceptions.ModelError) as excinfo: - national_scale_example._add_observed_dict("baz") - assert check_error_or_warning( - excinfo, - "Expected the model property `baz` to be a dictionary attribute of the model dataset", - ) - assert not hasattr(national_scale_example, "baz") - - def test_add_observed_dict_not_dict(self, national_scale_example): - with pytest.raises(TypeError) as excinfo: - national_scale_example._add_observed_dict("baz", "bar") - assert check_error_or_warning( - excinfo, - "Attempted to add dictionary property `baz` to model, but received argument of type `str`", - ) - class TestOperateMode: @contextmanager @@ -127,9 +92,7 @@ def rerun_operate_log(self, request, operate_model_and_log): def test_backend_build_mode(self, operate_model_and_log): """Verify that we have run in operate mode""" operate_model, _ = operate_model_and_log - assert ( - operate_model.backend.inputs.attrs["config"]["build"]["mode"] == "operate" - ) + assert operate_model.backend.config.mode == "operate" def test_operate_mode_success(self, operate_model_and_log): """Solving in operate mode should lead to an optimal solution.""" @@ -153,8 +116,8 @@ def test_reset_model_window(self, rerun_operate_log): def test_end_of_horizon(self, operate_model_and_log): """Check that increasingly shorter time horizons are logged as model rebuilds.""" operate_model, log = operate_model_and_log - config = operate_model.backend.inputs.attrs["config"]["build"] - if config["operate_window"] != config["operate_horizon"]: + config = operate_model.backend.config.operate + if config.operate_window != config.operate_horizon: assert "Reaching the end of the timeseries." in log else: assert "Reaching the end of the timeseries." not in log diff --git a/tests/test_preprocess_model_data.py b/tests/test_preprocess_model_data.py index 48bc519c..e3208e1a 100644 --- a/tests/test_preprocess_model_data.py +++ b/tests/test_preprocess_model_data.py @@ -202,10 +202,14 @@ def test_add_link_distances_missing_distance( @pytest.mark.parametrize(("unit", "expected"), [("m", 343834), ("km", 343.834)]) def test_add_link_distances_no_da( - self, my_caplog, model_data_factory_w_params: ModelDataFactory, unit, expected + self, + mocker, + my_caplog, + model_data_factory_w_params: ModelDataFactory, + unit, + expected, ): - _default_distance_unit = model_data_factory_w_params.config["distance_unit"] - model_data_factory_w_params.config["distance_unit"] = unit + mocker.patch.object(ModelDataFactory, "config.distance_unit", return_value=unit) model_data_factory_w_params.clean_data_from_undefined_members() model_data_factory_w_params.dataset["latitude"] = ( pd.Series({"A": 51.507222, "B": 48.8567}) @@ -220,7 +224,6 @@ def test_add_link_distances_no_da( del model_data_factory_w_params.dataset["distance"] model_data_factory_w_params.add_link_distances() - model_data_factory_w_params.config["distance_unit"] = _default_distance_unit assert "Link distance matrix automatically computed" in my_caplog.text assert ( model_data_factory_w_params.dataset["distance"].dropna("techs") From 4f8168443e2e2638ac29742312a95362f2ed51f7 Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:15:36 +0000 Subject: [PATCH 03/17] Minor cleanup --- docs/hooks/generate_readable_schema.py | 4 +++- requirements/base.txt | 3 ++- src/calliope/config.py | 8 ++++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/hooks/generate_readable_schema.py b/docs/hooks/generate_readable_schema.py index 8b799265..7bcfa205 100644 --- a/docs/hooks/generate_readable_schema.py +++ b/docs/hooks/generate_readable_schema.py @@ -20,7 +20,9 @@ TEMPDIR = tempfile.TemporaryDirectory() SCHEMAS = { - "config_schema": AttrDict.from_yaml(config.CalliopeConfig().model_yaml_schema()), + "config_schema": AttrDict.from_yaml_string( + config.CalliopeConfig().model_yaml_schema() + ), "model_schema": schema.MODEL_SCHEMA, "math_schema": schema.MATH_SCHEMA, "data_table_schema": schema.DATA_TABLE_SCHEMA, diff --git a/requirements/base.txt b/requirements/base.txt index 2bf5f664..6305e13d 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -4,6 +4,7 @@ geographiclib >= 2, < 3 ipdb >= 0.13, < 0.14 ipykernel < 7 jinja2 >= 3, < 4 +jsonref >= 1.1, < 2 jsonschema >= 4, < 5 natsort >= 8, < 9 netcdf4 >= 1.2, < 1.7 @@ -13,4 +14,4 @@ pyomo >= 6.5, < 6.7.2 pyparsing >= 3.0, < 3.1 ruamel.yaml >= 0.18, < 0.19 typing-extensions >= 4, < 5 -xarray >= 2024.1, < 2024.4 \ No newline at end of file +xarray >= 2024.1, < 2024.4 diff --git a/src/calliope/config.py b/src/calliope/config.py index 79ae5941..e07ee2cb 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -97,8 +97,12 @@ def model_yaml_schema(self, filepath: str | Path | None = None) -> None | str: Returns: None | str: If `filepath` is given, returns None. Otherwise, returns the YAML string. """ - schema_dict = jsonref.replace_refs(self.model_json_schema()) - return AttrDict(schema_dict).to_yaml(filepath) + # By default, the schema uses $ref/$def cross-referencing for each pydantic model class, + # but this isn't very readable when rendered in our documentation. + # So, we resolve references and then delete all the `$defs` + schema_dict = AttrDict(jsonref.replace_refs(self.model_json_schema())) + schema_dict.del_key("$defs") + return schema_dict.to_yaml(filepath) @property def applied_keyword_overrides(self) -> dict: From e35ee0eafde6f11ff8800ef63a0e2f65bb6cbdcc Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:24:09 +0100 Subject: [PATCH 04/17] Add config obj: simplification suggestions (#711) * Removed mode redundancy to simplify the configuration * Simplify config schema extraction --- docs/hooks/generate_readable_schema.py | 6 +-- requirements/base.txt | 1 + src/calliope/attrdict.py | 20 +++---- src/calliope/backend/where_parser.py | 3 +- src/calliope/config.py | 74 +++++--------------------- src/calliope/model.py | 30 +++++------ 6 files changed, 39 insertions(+), 95 deletions(-) diff --git a/docs/hooks/generate_readable_schema.py b/docs/hooks/generate_readable_schema.py index 7bcfa205..be72c513 100644 --- a/docs/hooks/generate_readable_schema.py +++ b/docs/hooks/generate_readable_schema.py @@ -14,15 +14,13 @@ import jsonschema2md from mkdocs.structure.files import File -from calliope import AttrDict, config +from calliope import config from calliope.util import schema TEMPDIR = tempfile.TemporaryDirectory() SCHEMAS = { - "config_schema": AttrDict.from_yaml_string( - config.CalliopeConfig().model_yaml_schema() - ), + "config_schema": config.CalliopeConfig().model_no_ref_schema(), "model_schema": schema.MODEL_SCHEMA, "math_schema": schema.MATH_SCHEMA, "data_table_schema": schema.DATA_TABLE_SCHEMA, diff --git a/requirements/base.txt b/requirements/base.txt index 6305e13d..65e0713e 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -15,3 +15,4 @@ pyparsing >= 3.0, < 3.1 ruamel.yaml >= 0.18, < 0.19 typing-extensions >= 4, < 5 xarray >= 2024.1, < 2024.4 +pydantic >= 2.9.2 diff --git a/src/calliope/attrdict.py b/src/calliope/attrdict.py index f17cf0ef..3e12e402 100644 --- a/src/calliope/attrdict.py +++ b/src/calliope/attrdict.py @@ -329,12 +329,8 @@ def as_dict_flat(self): d[k] = self.get_key(k) return d - def to_yaml(self, path=None): - """Conversion to YAML. - - Saves the AttrDict to the ``path`` as a YAML file or returns a YAML string - if ``path`` is None. - """ + def to_yaml(self, path: str | None = None) -> str: + """Return a serialised YAML string.""" result = self.copy() yaml_ = ruamel_yaml.YAML() yaml_.indent = 2 @@ -359,13 +355,13 @@ def to_yaml(self, path=None): # handle multi-line strings. walk_tree(result) - if path is not None: + stream = io.StringIO() + yaml_.dump(result, stream) + yaml_str = stream.getvalue() + if path: with open(path, "w") as f: - yaml_.dump(result, f) - else: - stream = io.StringIO() - yaml_.dump(result, stream) - return stream.getvalue() + f.write(yaml_str) + return yaml_str def keys_nested(self, subkeys_as="list"): """Returns all keys in the AttrDict, including nested keys. diff --git a/src/calliope/backend/where_parser.py b/src/calliope/backend/where_parser.py index 06f782f6..ad020958 100644 --- a/src/calliope/backend/where_parser.py +++ b/src/calliope/backend/where_parser.py @@ -13,16 +13,15 @@ import xarray as xr from typing_extensions import NotRequired, TypedDict +from calliope import config from calliope.backend import expression_parser from calliope.exceptions import BackendError if TYPE_CHECKING: - from calliope import config from calliope.backend.backend_model import BackendModel pp.ParserElement.enablePackrat() - BOOLEANTYPE = np.bool_ | np.typing.NDArray[np.bool_] diff --git a/src/calliope/config.py b/src/calliope/config.py index e07ee2cb..68af4c11 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -5,7 +5,7 @@ from collections.abc import Hashable from datetime import datetime from pathlib import Path -from typing import Annotated, Literal, Self, TypeVar, get_args, overload +from typing import Annotated, Literal, Self, TypeVar import jsonref from pydantic import AfterValidator, BaseModel, Field, model_validator @@ -82,27 +82,16 @@ def update(self, update_dict: dict, deep: bool = False) -> Self: self._kwargs = update_dict return updated - @overload - def model_yaml_schema(self, filepath: str | Path) -> None: ... - - @overload - def model_yaml_schema(self, filepath: None = None) -> str: ... - - def model_yaml_schema(self, filepath: str | Path | None = None) -> None | str: - """Generate a YAML schema for the class. - - Args: - filepath (str | Path | None, optional): If given, save schema to given path. Defaults to None. + def model_no_ref_schema(self) -> AttrDict: + """Generate an AttrDict with the schema replacing $ref/$def for better readability. Returns: - None | str: If `filepath` is given, returns None. Otherwise, returns the YAML string. + AttrDict: class schema. """ - # By default, the schema uses $ref/$def cross-referencing for each pydantic model class, - # but this isn't very readable when rendered in our documentation. - # So, we resolve references and then delete all the `$defs` - schema_dict = AttrDict(jsonref.replace_refs(self.model_json_schema())) + schema_dict = AttrDict(super().model_json_schema()) + schema_dict = AttrDict(jsonref.replace_refs(schema_dict)) schema_dict.del_key("$defs") - return schema_dict.to_yaml(filepath) + return schema_dict @property def applied_keyword_overrides(self) -> dict: @@ -114,21 +103,6 @@ def applied_keyword_overrides(self) -> dict: return self._kwargs -class ModeBaseModel(ConfigBaseModel): - """Mode-specific configuration, which will be hidden from the string representation of the model if that mode is not activated.""" - - mode: MODES_T = Field(default="plan") - """Mode in which to run the optimisation.""" - - @model_validator(mode="after") - def update_repr(self) -> Self: - """Hide config from model string representation if mode is not activated.""" - for key, val in self.model_fields.items(): - if key in get_args(MODES_T): - val.repr = self.mode == key - return self - - class Init(ConfigBaseModel): """All configuration options used when initialising a Calliope model.""" @@ -221,7 +195,7 @@ class BuildOperate(ConfigBaseModel): """Which time window to build. This is used to track the window when re-building the model part way through solving in `operate` mode.""" -class Build(ModeBaseModel): +class Build(ConfigBaseModel): """Base configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" model_config = { @@ -229,6 +203,10 @@ class Build(ModeBaseModel): "extra": "allow", "revalidate_instances": "always", } + + mode: MODES_T = Field(default="plan") + """Mode in which to run the optimisation.""" + add_math: UniqueList[str] = Field(default=[]) """ List of references to files which contain additional mathematical formulations to be applied on top of or instead of the base mode math. @@ -301,14 +279,13 @@ def require_save_per_spore_path(self) -> Self: return self -class Solve(ModeBaseModel): +class Solve(ConfigBaseModel): """Base configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" model_config = { "title": "solve", "extra": "forbid", "revalidate_instances": "always", - "json_schema_extra": hide_from_schema(["mode"]), } save_logs: Path | None = Field(default=None) @@ -342,28 +319,3 @@ class CalliopeConfig(ConfigBaseModel): init: Init = Init() build: Build = Build() solve: Solve = Solve() - - @model_validator(mode="before") - @classmethod - def update_solve_mode(cls, data): - """Solve mode should match build mode.""" - data["solve"]["mode"] = data["build"]["mode"] - return data - - def update(self, update_dict: dict, deep: bool = False) -> Self: - """Return a new iteration of the model with updated fields. - - Updates are validated and stored in the parent class in the `_kwargs` key. - - Args: - update_dict (dict): Dictionary with which to update the base model. - deep (bool, optional): Set to True to make a deep copy of the model. Defaults to False. - - Returns: - BaseModel: New model instance. - """ - update_dict_temp = AttrDict(update_dict) - if update_dict_temp.get_key("build.mode", None) is not None: - update_dict_temp.set_key("solve.mode", update_dict_temp["build"]["mode"]) - updated = super().update(update_dict_temp.as_dict(), deep=deep) - return updated diff --git a/src/calliope/model.py b/src/calliope/model.py index e6088c21..bc4eb938 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -267,26 +267,26 @@ def build( comment="Model: backend build starting", ) - this_build_config = self.config.update({"build": kwargs}).build - mode = this_build_config.mode + build_config = self.config.update({"build": kwargs}).build + mode = build_config.mode if mode == "operate": if not self._model_data.attrs["allow_operate_mode"]: raise exceptions.ModelError( "Unable to run this model in operate (i.e. dispatch) mode, probably because " "there exist non-uniform timesteps (e.g. from time clustering)" ) - backend_input = self._prepare_operate_mode_inputs(this_build_config.operate) + backend_input = self._prepare_operate_mode_inputs(build_config.operate) else: backend_input = self._model_data - init_math_list = [] if this_build_config.ignore_mode_math else [mode] + init_math_list = [] if build_config.ignore_mode_math else [mode] end_math_list = [] if add_math_dict is None else [add_math_dict] - full_math_list = init_math_list + this_build_config.add_math + end_math_list + full_math_list = init_math_list + build_config.add_math + end_math_list LOGGER.debug(f"Math preprocessing | Loading math: {full_math_list}") model_math = preprocess.CalliopeMath(full_math_list, self.config.init.def_path) self.backend = backend.get_model_backend( - this_build_config, backend_input, model_math + build_config, backend_input, model_math ) self.backend.add_optimisation_components() @@ -341,26 +341,24 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: else: to_drop = [] - kwargs["mode"] = self.config.build.applied_keyword_overrides.get( - "mode", self.config.build.mode - ) - - this_solve_config = self.config.update({"solve": kwargs}).solve + solve_config = self.config.update({"solve": kwargs}).solve + # FIXME: find a way to avoid overcomplicated passing of settings between modes + mode = self.config.update(self.config.applied_keyword_overrides).build.mode self._model_data.attrs["timestamp_solve_start"] = log_time( LOGGER, self._timings, "solve_start", - comment=f"Optimisation model | starting model in {this_solve_config.mode} mode.", + comment=f"Optimisation model | starting model in {mode} mode.", ) - shadow_prices = this_solve_config.shadow_prices + shadow_prices = solve_config.shadow_prices self.backend.shadow_prices.track_constraints(shadow_prices) - if this_solve_config.mode == "operate": - results = self._solve_operate(**this_solve_config.model_dump()) + if mode == "operate": + results = self._solve_operate(**solve_config.model_dump()) else: results = self.backend._solve( - warmstart=warmstart, **this_solve_config.model_dump() + warmstart=warmstart, **solve_config.model_dump() ) log_time( From a40a03df5674ca452b420badda62797a281350d0 Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:14:50 +0100 Subject: [PATCH 05/17] Fix 'where' parsing for configuration: add dot attribute tools method. --- src/calliope/backend/where_parser.py | 5 +++- src/calliope/config.py | 2 +- src/calliope/util/tools.py | 24 +++++++++++++++ tests/conftest.py | 26 +++++----------- tests/test_backend_where_parser.py | 15 +++++++++- tests/test_preprocess_model_data.py | 11 ++++--- tests/test_tools.py | 45 ++++++++++++++++++++++++++++ 7 files changed, 100 insertions(+), 28 deletions(-) create mode 100644 tests/test_tools.py diff --git a/src/calliope/backend/where_parser.py b/src/calliope/backend/where_parser.py index ad020958..9bb81eac 100644 --- a/src/calliope/backend/where_parser.py +++ b/src/calliope/backend/where_parser.py @@ -16,6 +16,7 @@ from calliope import config from calliope.backend import expression_parser from calliope.exceptions import BackendError +from calliope.util import tools if TYPE_CHECKING: from calliope.backend.backend_model import BackendModel @@ -119,7 +120,9 @@ def as_math_string(self) -> str: # noqa: D102, override return rf"\text{{config.{self.config_option}}}" def as_array(self) -> xr.DataArray: # noqa: D102, override - config_val = getattr(self.eval_attrs["build_config"], self.config_option) + config_val = tools.get_dot_attr( + self.eval_attrs["build_config"], self.config_option + ) if not isinstance(config_val, int | float | str | bool | np.bool_): raise BackendError( diff --git a/src/calliope/config.py b/src/calliope/config.py index 3550792d..fb77498a 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -258,7 +258,7 @@ class SolveSpores(ConfigBaseModel): score_cost_class: str = Field(default="score") """SPORES mode cost class to vary between iterations after the initial base run.""" - slack_cost_group: str = Field(default=None) + slack_cost_group: str = Field(default="monetary") """SPORES mode cost class to keep below the given `slack` (usually "monetary").""" save_per_spore: bool = Field(default=False) diff --git a/src/calliope/util/tools.py b/src/calliope/util/tools.py index f63ba627..f4c99b32 100644 --- a/src/calliope/util/tools.py +++ b/src/calliope/util/tools.py @@ -47,3 +47,27 @@ def listify(var: Any) -> list: else: var = [var] return var + + +def get_dot_attr(var: Any, attr: str) -> Any: + """Get nested attributes in dot notation. + + Works for nested objects (e.g., dictionaries, pydantic models). + + Args: + var (Any): Object to extract nested attributes from. + attr (str): Name of the attribute (e.g., "foo.bar"). + + Returns: + Any: Value at the given location. + """ + levels = attr.split(".", 1) + + if isinstance(var, dict): + value = var[levels[0]] + else: + value = getattr(var, levels[0]) + + if len(levels) > 1: + value = get_dot_attr(value, levels[1]) + return value diff --git a/tests/conftest.py b/tests/conftest.py index 0334d0b4..eee0935e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,8 +33,8 @@ def foreach(request): @pytest.fixture(scope="session") -def config_defaults(): - return AttrDict(config.CalliopeConfig().model_dump()) +def default_config(): + return config.CalliopeConfig() @pytest.fixture(scope="session") @@ -173,7 +173,7 @@ def dummy_model_math(): @pytest.fixture(scope="module") -def dummy_model_data(config_defaults, model_defaults): +def dummy_model_data(model_defaults): coords = { dim: ( ["foo", "bar"] @@ -280,20 +280,6 @@ def dummy_model_data(config_defaults, model_defaults): for param in model_data.data_vars.values(): param.attrs["is_result"] = 0 - dummy_config = AttrDict( - { - "build": { - "foo": True, - "FOO": "baz", - "foo1": np.inf, - "bar": {"foobar": "baz"}, - "a_b": 0, - "b_a": [1, 2], - } - } - ) - dummy_config.union(config_defaults) - model_data.attrs["config"] = dummy_config model_data.attrs["defaults"] = AttrDict( { @@ -345,8 +331,10 @@ def populate_backend_model(backend): @pytest.fixture(scope="module") -def dummy_pyomo_backend_model(dummy_model_data, dummy_model_math): - backend = pyomo_backend_model.PyomoBackendModel(dummy_model_data, dummy_model_math) +def dummy_pyomo_backend_model(dummy_model_data, dummy_model_math, default_config): + backend = pyomo_backend_model.PyomoBackendModel( + dummy_model_data, dummy_model_math, default_config.build + ) return populate_backend_model(backend) diff --git a/tests/test_backend_where_parser.py b/tests/test_backend_where_parser.py index def6f621..db33c984 100644 --- a/tests/test_backend_where_parser.py +++ b/tests/test_backend_where_parser.py @@ -83,7 +83,19 @@ def where(bool_operand, helper_function, data_var, comparison, subset): @pytest.fixture -def eval_kwargs(dummy_pyomo_backend_model): +def dummy_build_config(): + return { + "foo": True, + "FOO": "baz", + "foo1": np.inf, + "bar": {"foobar": "baz"}, + "a_b": 0, + "b_a": [1, 2], + } + + +@pytest.fixture +def eval_kwargs(dummy_pyomo_backend_model, dummy_build_config): return { "input_data": dummy_pyomo_backend_model.inputs, "backend_interface": dummy_pyomo_backend_model, @@ -91,6 +103,7 @@ def eval_kwargs(dummy_pyomo_backend_model): "equation_name": "foo", "return_type": "array", "references": set(), + "build_config": dummy_build_config, } diff --git a/tests/test_preprocess_model_data.py b/tests/test_preprocess_model_data.py index 39514f9a..5c5a9824 100644 --- a/tests/test_preprocess_model_data.py +++ b/tests/test_preprocess_model_data.py @@ -20,21 +20,20 @@ def model_def(): model_def_override, _ = prepare_model_definition( model_def_path, scenario="simple_supply,empty_tech_node" ) - return model_def_override, model_def_path + return model_def_override @pytest.fixture -def init_config(config_defaults, model_def): - model_def_dict, _ = model_def - config_defaults.union(model_def_dict.pop("config"), allow_override=True) +def init_config(default_config, model_def): + config_defaults = AttrDict(default_config.model_dump()) + config_defaults.union(model_def.pop("config"), allow_override=True) return config_defaults["init"] @pytest.fixture def model_data_factory(model_def, init_config, model_defaults): - model_def_dict, _ = model_def return ModelDataFactory( - init_config, model_def_dict, [], {"foo": "bar"}, {"default": model_defaults} + init_config, model_def, [], {"foo": "bar"}, {"default": model_defaults} ) diff --git a/tests/test_tools.py b/tests/test_tools.py new file mode 100644 index 00000000..6cce8191 --- /dev/null +++ b/tests/test_tools.py @@ -0,0 +1,45 @@ +import pytest + +from calliope.util import tools + + +class TestListify: + @pytest.mark.parametrize( + ("var", "expected"), [(True, [True]), (1, [1]), ("foobar", ["foobar"])] + ) + def test_non_iterable(self, var, expected): + """Listification should work for any kind of object.""" + assert tools.listify(var) == expected + + @pytest.mark.parametrize( + ("var", "expected"), + [([1, 2, 3, 4], [1, 2, 3, 4]), ({"foo": "bar", "bar": "foo"}, ["foo", "bar"])], + ) + def test_iterable(self, var, expected): + """Iterable objects should be returned as lists.""" + assert tools.listify(var) == expected + + @pytest.mark.parametrize(("var", "expected"), [([], []), (None, []), ({}, [])]) + def test_empty(self, var, expected): + """Empty iterables, None and similars should be returned as an empty list.""" + assert tools.listify(var) == expected + + +@pytest.mark.parametrize( + ("attr", "expected"), + [ + ("init.time_format", "ISO8601"), + ("build.backend", "pyomo"), + ("build.operate.window", "24h"), + ("build.pre_validate_math_strings", True), + ], +) +class TestDotAttr: + def test_pydantic_access(self, default_config, attr, expected): + """Dot access of pydantic attributes should be possible.""" + assert tools.get_dot_attr(default_config, attr) == expected + + def test_dict_access(self, default_config, attr, expected): + """Dot access of dictionary items should be possible.""" + config_dict = default_config.model_dump() + assert tools.get_dot_attr(config_dict, attr) == expected From 66e3881c21bcfc66f7ed216c1dd9653aa0d2aff1 Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Thu, 12 Dec 2024 17:44:11 +0100 Subject: [PATCH 06/17] Fix tests: backends, math docs, data preprocessing --- src/calliope/backend/backend_model.py | 2 +- src/calliope/backend/gurobi_backend_model.py | 2 +- src/calliope/backend/latex_backend_model.py | 7 +- src/calliope/backend/pyomo_backend_model.py | 2 +- .../postprocess/math_documentation.py | 2 +- tests/conftest.py | 10 ++- tests/test_backend_latex_backend.py | 74 +++++++++---------- tests/test_backend_parsing.py | 8 +- tests/test_backend_pyomo.py | 7 +- tests/test_preprocess_model_data.py | 18 ++--- 10 files changed, 66 insertions(+), 66 deletions(-) diff --git a/src/calliope/backend/backend_model.py b/src/calliope/backend/backend_model.py index 4d0de1c3..f556c64e 100644 --- a/src/calliope/backend/backend_model.py +++ b/src/calliope/backend/backend_model.py @@ -606,8 +606,8 @@ def __init__( self, inputs: xr.Dataset, math: CalliopeMath, - instance: T, build_config: config.Build, + instance: T, ) -> None: """Abstract base class to build backend models that interface with solvers. diff --git a/src/calliope/backend/gurobi_backend_model.py b/src/calliope/backend/gurobi_backend_model.py index ab02d9d4..6dbbde3b 100644 --- a/src/calliope/backend/gurobi_backend_model.py +++ b/src/calliope/backend/gurobi_backend_model.py @@ -56,7 +56,7 @@ def __init__( raise ImportError( "Install the `gurobipy` package to build the optimisation problem with the Gurobi backend." ) - super().__init__(inputs, math, gurobipy.Model(), build_config) + super().__init__(inputs, math, build_config, gurobipy.Model()) self._instance: gurobipy.Model self.shadow_prices = GurobiShadowPrices(self) diff --git a/src/calliope/backend/latex_backend_model.py b/src/calliope/backend/latex_backend_model.py index c33229b0..5b901cc3 100644 --- a/src/calliope/backend/latex_backend_model.py +++ b/src/calliope/backend/latex_backend_model.py @@ -12,6 +12,7 @@ import pandas as pd import xarray as xr +from calliope import config from calliope.backend import backend_model, parsing from calliope.exceptions import ModelError from calliope.preprocess import CalliopeMath @@ -305,19 +306,19 @@ def __init__( self, inputs: xr.Dataset, math: CalliopeMath, + build_config: config.Build, include: Literal["all", "valid"] = "all", - **kwargs, ) -> None: """Interface to build a string representation of the mathematical formulation using LaTeX math notation. Args: inputs (xr.Dataset): model data. math (CalliopeMath): Calliope math. + build_config: Build configuration options. include (Literal["all", "valid"], optional): Defines whether to include all possible math equations ("all") or only those for which at least one index item in the "where" string is valid ("valid"). Defaults to "all". - **kwargs: for the backend model generator. """ - super().__init__(inputs, math, **kwargs) + super().__init__(inputs, math, build_config) self.include = include def add_parameter( # noqa: D102, override diff --git a/src/calliope/backend/pyomo_backend_model.py b/src/calliope/backend/pyomo_backend_model.py index 46ea3b32..a0caadfc 100644 --- a/src/calliope/backend/pyomo_backend_model.py +++ b/src/calliope/backend/pyomo_backend_model.py @@ -69,7 +69,7 @@ def __init__( math (CalliopeMath): Calliope math. build_config: Build configuration options. """ - super().__init__(inputs, math, pmo.block(), build_config) + super().__init__(inputs, math, build_config, pmo.block()) self._instance.parameters = pmo.parameter_dict() self._instance.variables = pmo.variable_dict() diff --git a/src/calliope/postprocess/math_documentation.py b/src/calliope/postprocess/math_documentation.py index ebfb3193..e1210f64 100644 --- a/src/calliope/postprocess/math_documentation.py +++ b/src/calliope/postprocess/math_documentation.py @@ -30,7 +30,7 @@ def __init__( """ self.name: str = model.name + " math" self.backend: LatexBackendModel = LatexBackendModel( - model._model_data, model.applied_math, include, **kwargs + model._model_data, model.applied_math, model.config.build, include ) self.backend.add_optimisation_components() diff --git a/tests/conftest.py b/tests/conftest.py index eee0935e..5c3a9e8b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -339,14 +339,16 @@ def dummy_pyomo_backend_model(dummy_model_data, dummy_model_math, default_config @pytest.fixture(scope="module") -def dummy_latex_backend_model(dummy_model_data, dummy_model_math): - backend = latex_backend_model.LatexBackendModel(dummy_model_data, dummy_model_math) +def dummy_latex_backend_model(dummy_model_data, dummy_model_math, default_config): + backend = latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math, default_config.build + ) return populate_backend_model(backend) @pytest.fixture(scope="class") -def valid_latex_backend(dummy_model_data, dummy_model_math): +def valid_latex_backend(dummy_model_data, dummy_model_math, default_config): backend = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math, include="valid" + dummy_model_data, dummy_model_math, default_config.build, include="valid" ) return populate_backend_model(backend) diff --git a/tests/test_backend_latex_backend.py b/tests/test_backend_latex_backend.py index e28b0830..f55da6a3 100644 --- a/tests/test_backend_latex_backend.py +++ b/tests/test_backend_latex_backend.py @@ -9,6 +9,14 @@ from .common.util import check_error_or_warning +@pytest.fixture +def temp_dummy_latex_backend_model(dummy_model_data, dummy_model_math, default_config): + """Function scoped model definition to avoid cross-test contamination.""" + return latex_backend_model.LatexBackendModel( + dummy_model_data, dummy_model_math, default_config.build + ) + + class TestLatexBackendModel: def test_inputs(self, dummy_latex_backend_model, dummy_model_data): assert dummy_latex_backend_model.inputs.equals(dummy_model_data) @@ -406,14 +414,9 @@ def test_create_obj_list(self, dummy_latex_backend_model): ), ], ) - def test_generate_math_doc( - self, dummy_model_data, dummy_model_math, format, expected - ): - backend_model = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math - ) - backend_model._add_all_inputs_as_parameters() - backend_model.add_global_expression( + def test_generate_math_doc(self, temp_dummy_latex_backend_model, format, expected): + temp_dummy_latex_backend_model._add_all_inputs_as_parameters() + temp_dummy_latex_backend_model.add_global_expression( "expr", { "equations": [{"expression": "no_dims + 2"}], @@ -421,14 +424,11 @@ def test_generate_math_doc( "default": 0, }, ) - doc = backend_model.generate_math_doc(format=format) + doc = temp_dummy_latex_backend_model.generate_math_doc(format=format) assert doc == expected - def test_generate_math_doc_no_params(self, dummy_model_data, dummy_model_math): - backend_model = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math - ) - backend_model.add_global_expression( + def test_generate_math_doc_no_params(self, temp_dummy_latex_backend_model): + temp_dummy_latex_backend_model.add_global_expression( "expr", { "equations": [{"expression": "1 + 2"}], @@ -436,7 +436,7 @@ def test_generate_math_doc_no_params(self, dummy_model_data, dummy_model_math): "default": 0, }, ) - doc = backend_model.generate_math_doc(format="md") + doc = temp_dummy_latex_backend_model.generate_math_doc(format="md") assert doc == textwrap.dedent( r""" @@ -457,12 +457,9 @@ def test_generate_math_doc_no_params(self, dummy_model_data, dummy_model_math): ) def test_generate_math_doc_mkdocs_features_tabs( - self, dummy_model_data, dummy_model_math + self, temp_dummy_latex_backend_model ): - backend_model = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math - ) - backend_model.add_global_expression( + temp_dummy_latex_backend_model.add_global_expression( "expr", { "equations": [{"expression": "1 + 2"}], @@ -470,7 +467,9 @@ def test_generate_math_doc_mkdocs_features_tabs( "default": 0, }, ) - doc = backend_model.generate_math_doc(format="md", mkdocs_features=True) + doc = temp_dummy_latex_backend_model.generate_math_doc( + format="md", mkdocs_features=True + ) assert doc == textwrap.dedent( r""" @@ -500,13 +499,10 @@ def test_generate_math_doc_mkdocs_features_tabs( ) def test_generate_math_doc_mkdocs_features_admonition( - self, dummy_model_data, dummy_model_math + self, temp_dummy_latex_backend_model ): - backend_model = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math - ) - backend_model._add_all_inputs_as_parameters() - backend_model.add_global_expression( + temp_dummy_latex_backend_model._add_all_inputs_as_parameters() + temp_dummy_latex_backend_model.add_global_expression( "expr", { "equations": [{"expression": "no_dims + 1"}], @@ -514,7 +510,9 @@ def test_generate_math_doc_mkdocs_features_admonition( "default": 0, }, ) - doc = backend_model.generate_math_doc(format="md", mkdocs_features=True) + doc = temp_dummy_latex_backend_model.generate_math_doc( + format="md", mkdocs_features=True + ) assert doc == textwrap.dedent( r""" @@ -558,13 +556,12 @@ def test_generate_math_doc_mkdocs_features_admonition( ) def test_generate_math_doc_mkdocs_features_not_in_md( - self, dummy_model_data, dummy_model_math + self, temp_dummy_latex_backend_model ): - backend_model = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math - ) with pytest.raises(exceptions.ModelError) as excinfo: - backend_model.generate_math_doc(format="rst", mkdocs_features=True) + temp_dummy_latex_backend_model.generate_math_doc( + format="rst", mkdocs_features=True + ) assert check_error_or_warning( excinfo, @@ -679,12 +676,9 @@ def test_get_variable_bounds_string(self, dummy_latex_backend_model): } assert refs == {"multi_dim_var"} - def test_param_type(self, dummy_model_data, dummy_model_math): - backend_model = latex_backend_model.LatexBackendModel( - dummy_model_data, dummy_model_math - ) - backend_model._add_all_inputs_as_parameters() - backend_model.add_global_expression( + def test_param_type(self, temp_dummy_latex_backend_model): + temp_dummy_latex_backend_model._add_all_inputs_as_parameters() + temp_dummy_latex_backend_model.add_global_expression( "expr", { "equations": [{"expression": "1 + flow_cap_max"}], @@ -692,7 +686,7 @@ def test_param_type(self, dummy_model_data, dummy_model_math): "default": 0, }, ) - doc = backend_model.generate_math_doc(format="md") + doc = temp_dummy_latex_backend_model.generate_math_doc(format="md") assert doc == textwrap.dedent( r""" diff --git a/tests/test_backend_parsing.py b/tests/test_backend_parsing.py index 8847738c..be4189a2 100644 --- a/tests/test_backend_parsing.py +++ b/tests/test_backend_parsing.py @@ -219,14 +219,18 @@ def _equation_slice_obj(name): @pytest.fixture -def dummy_backend_interface(dummy_model_data, dummy_model_math): +def dummy_backend_interface(dummy_model_data, dummy_model_math, default_config): # ignore the need to define the abstract methods from backend_model.BackendModel with patch.multiple(backend_model.BackendModel, __abstractmethods__=set()): class DummyBackendModel(backend_model.BackendModel): def __init__(self): backend_model.BackendModel.__init__( - self, dummy_model_data, dummy_model_math, instance=None + self, + dummy_model_data, + dummy_model_math, + default_config.build, + instance=None, ) self._dataset = dummy_model_data.copy(deep=True) diff --git a/tests/test_backend_pyomo.py b/tests/test_backend_pyomo.py index 710d147a..89588232 100755 --- a/tests/test_backend_pyomo.py +++ b/tests/test_backend_pyomo.py @@ -1636,7 +1636,8 @@ def test_add_run_mode_custom_math(self, caplog, mode): m = build_model({}, "simple_supply,two_hours,investment_costs") math = calliope.preprocess.CalliopeMath([mode]) - backend = PyomoBackendModel(m.inputs, math, mode=mode) + build_config = m.config.build.update({"mode": mode}) + backend = PyomoBackendModel(m.inputs, math, build_config) assert backend.math == math @@ -2247,7 +2248,9 @@ def validate_math(self): def _validate_math(math_dict: dict): m = build_model({}, "simple_supply,investment_costs") math = calliope.preprocess.CalliopeMath(["plan", math_dict]) - backend = calliope.backend.PyomoBackendModel(m._model_data, math) + backend = calliope.backend.PyomoBackendModel( + m._model_data, math, m.config.build + ) backend._add_all_inputs_as_parameters() backend._validate_math_string_parsing() diff --git a/tests/test_preprocess_model_data.py b/tests/test_preprocess_model_data.py index 5c5a9824..0a6266db 100644 --- a/tests/test_preprocess_model_data.py +++ b/tests/test_preprocess_model_data.py @@ -25,9 +25,8 @@ def model_def(): @pytest.fixture def init_config(default_config, model_def): - config_defaults = AttrDict(default_config.model_dump()) - config_defaults.union(model_def.pop("config"), allow_override=True) - return config_defaults["init"] + updated_config = default_config.update(model_def["config"]) + return updated_config.init @pytest.fixture @@ -198,14 +197,10 @@ def test_add_link_distances_missing_distance( @pytest.mark.parametrize(("unit", "expected"), [("m", 343834), ("km", 343.834)]) def test_add_link_distances_no_da( - self, - mocker, - my_caplog, - model_data_factory_w_params: ModelDataFactory, - unit, - expected, + self, my_caplog, model_data_factory_w_params: ModelDataFactory, unit, expected ): - mocker.patch.object(ModelDataFactory, "config.distance_unit", return_value=unit) + new_config = model_data_factory_w_params.config.update({"distance_unit": unit}) + model_data_factory_w_params.config = new_config model_data_factory_w_params.clean_data_from_undefined_members() model_data_factory_w_params.dataset["latitude"] = ( pd.Series({"A": 51.507222, "B": 48.8567}) @@ -434,7 +429,8 @@ def test_prepare_param_dict_not_lookup(self, model_data_factory: ModelDataFactor def test_prepare_param_dict_no_broadcast_allowed( self, model_data_factory, param_data ): - model_data_factory.config.broadcast_param_data = False + new_config = model_data_factory.config.update({"broadcast_param_data": False}) + model_data_factory.config = new_config param_dict = {"data": param_data, "index": [["foo"], ["bar"]], "dims": "foobar"} with pytest.raises(exceptions.ModelError) as excinfo: # noqa: PT011, false positive model_data_factory._prepare_param_dict("foo", param_dict) From 166f8db8cafcbefeb9907a61969b9c0cd09f8fc0 Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Thu, 12 Dec 2024 19:32:15 +0100 Subject: [PATCH 07/17] Fix tests: operate window / horizon access --- src/calliope/config.py | 2 +- .../test_model/energy_cap_per_storage_cap.yaml | 4 ++-- tests/common/test_model/scenarios.yaml | 4 ++-- tests/test_backend_pyomo.py | 4 ++-- tests/test_core_model.py | 12 +++++++----- 5 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/calliope/config.py b/src/calliope/config.py index fb77498a..dcb5509b 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -207,7 +207,7 @@ class Build(ConfigBaseModel): model_config = { "title": "build", - "extra": "allow", + "extra": "forbid", "revalidate_instances": "always", } diff --git a/tests/common/test_model/energy_cap_per_storage_cap.yaml b/tests/common/test_model/energy_cap_per_storage_cap.yaml index ec1a81f8..4b2d34a8 100644 --- a/tests/common/test_model/energy_cap_per_storage_cap.yaml +++ b/tests/common/test_model/energy_cap_per_storage_cap.yaml @@ -50,5 +50,5 @@ overrides: techs.my_storage.flow_cap_per_storage_cap_min: 1 config: build.mode: operate - solve.operate_window: 24 - solve.operate_horizon: 24 + solve.operate.window: 24 + solve.operate.horizon: 24 diff --git a/tests/common/test_model/scenarios.yaml b/tests/common/test_model/scenarios.yaml index f1531511..40993d20 100644 --- a/tests/common/test_model/scenarios.yaml +++ b/tests/common/test_model/scenarios.yaml @@ -415,8 +415,8 @@ overrides: config.build.mode: operate config.init.time_subset: ["2005-01-01", "2005-01-02"] config.build.ensure_feasibility: true - config.build.operate_window: 6h - config.build.operate_horizon: 12h + config.build.operate.window: 6h + config.build.operate.horizon: 12h investment_costs: templates: diff --git a/tests/test_backend_pyomo.py b/tests/test_backend_pyomo.py index 89588232..3e9b4333 100755 --- a/tests/test_backend_pyomo.py +++ b/tests/test_backend_pyomo.py @@ -1648,8 +1648,8 @@ def test_add_run_mode_custom_math_before_build(self, caplog): m = build_model( { - "config.build.operate_window": "12H", - "config.build.operate_horizon": "12H", + "config.build.operate.window": "12H", + "config.build.operate.horizon": "12H", }, "simple_supply,two_hours,investment_costs", ) diff --git a/tests/test_core_model.py b/tests/test_core_model.py index ddd97800..ed393c9f 100644 --- a/tests/test_core_model.py +++ b/tests/test_core_model.py @@ -69,9 +69,11 @@ def operate_model_and_log(self, request): model.build( force=True, mode="operate", - operate_use_cap_results=True, - operate_window=request.param[0], - operate_horizon=request.param[1], + operate={ + "use_cap_results": True, + "window": request.param[0], + "horizon": request.param[1], + }, ) with self.caplog_session(request) as caplog: @@ -116,8 +118,8 @@ def test_reset_model_window(self, rerun_operate_log): def test_end_of_horizon(self, operate_model_and_log): """Check that increasingly shorter time horizons are logged as model rebuilds.""" operate_model, log = operate_model_and_log - config = operate_model.backend.config.operate - if config.operate_window != config.operate_horizon: + config = operate_model.backend.config + if config.operate.window != config.operate.horizon: assert "Reaching the end of the timeseries." in log else: assert "Reaching the end of the timeseries." not in log From 3f2f9be4e5b23ed51f0a2a753deac1591da11d6d Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Sat, 14 Dec 2024 16:04:05 +0100 Subject: [PATCH 08/17] Implement 'simplified' approach - Model 'build' and 'solve' no longer use a 'temporary' configuration - Simplified the pydantic configuration schema, and made it fully 'frozen' and non extensible - Moved data table loading into the Data generation portion - Fixed most tests --- src/calliope/config.py | 73 ++++++-------------------- src/calliope/model.py | 72 ++++++++++++------------- src/calliope/preprocess/data_tables.py | 2 +- src/calliope/preprocess/model_data.py | 27 +++++++--- src/calliope/util/tools.py | 4 +- tests/test_backend_module.py | 5 +- tests/test_backend_where_parser.py | 2 +- tests/test_core_preprocess.py | 21 ++++---- tests/test_io.py | 6 +-- tests/test_preprocess_data_sources.py | 71 +++++++++---------------- tests/test_preprocess_model_data.py | 22 ++++++-- 11 files changed, 131 insertions(+), 174 deletions(-) diff --git a/src/calliope/config.py b/src/calliope/config.py index dcb5509b..edbca50c 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -5,14 +5,14 @@ from collections.abc import Hashable from datetime import datetime from pathlib import Path -from typing import Annotated, Literal, Self, TypeVar +from typing import Annotated, Literal, TypeVar import jsonref from pydantic import AfterValidator, BaseModel, Field, model_validator from pydantic_core import PydanticCustomError +from typing_extensions import Self from calliope.attrdict import AttrDict -from calliope.util import tools MODES_T = Literal["plan", "operate", "spores"] CONFIG_T = Literal["init", "build", "solve"] @@ -54,13 +54,16 @@ def _hide_from_schema(schema: dict): class ConfigBaseModel(BaseModel): """A base class for creating pydantic models for Calliope configuration options.""" - _kwargs: dict = {} + model_config = { + "extra": "forbid", + "frozen": True, + "revalidate_instances": "always", + "use_attribute_docstrings": True, + } def update(self, update_dict: dict, deep: bool = False) -> Self: """Return a new iteration of the model with updated fields. - Updates are validated and stored in the parent class in the `_kwargs` key. - Args: update_dict (dict): Dictionary with which to update the base model. deep (bool, optional): Set to True to make a deep copy of the model. Defaults to False. @@ -74,12 +77,10 @@ def update(self, update_dict: dict, deep: bool = False) -> Self: key_class = getattr(self, key) if isinstance(key_class, ConfigBaseModel): new_dict[key] = key_class.update(val) - key_class._kwargs = val else: new_dict[key] = val updated = super().model_copy(update=new_dict, deep=deep) updated.model_validate(updated) - self._kwargs = update_dict return updated def model_no_ref_schema(self) -> AttrDict: @@ -93,31 +94,10 @@ def model_no_ref_schema(self) -> AttrDict: schema_dict.del_key("$defs") return schema_dict - @property - def applied_keyword_overrides(self) -> dict: - """Most recently applied keyword overrides used to update this configuration. - - Returns: - dict: Description of applied overrides. - """ - return self._kwargs - class Init(ConfigBaseModel): """All configuration options used when initialising a Calliope model.""" - model_config = { - "title": "init", - "extra": "forbid", - "frozen": True, - "json_schema_extra": hide_from_schema(["def_path"]), - "revalidate_instances": "always", - "use_attribute_docstrings": True, - } - - def_path: Path = Field(default=".", repr=False, exclude=True) - """The path to the main model definition YAML file, if one has been used to instantiate the Calliope Model class.""" - name: str | None = Field(default=None) """Model name""" @@ -142,7 +122,7 @@ class Init(ConfigBaseModel): time_resample: str | None = Field(default=None, pattern="^[0-9]+[a-zA-Z]") """Setting to adjust time resolution, e.g. '2h' for 2-hourly""" - time_cluster: Path | None = Field(default=None) + time_cluster: str | None = Field(default=None) """ Setting to cluster the timeseries. Must be a path to a file where each date is linked to a representative date that also exists in the timeseries. @@ -160,24 +140,16 @@ class Init(ConfigBaseModel): Automatically derived distances from lat/lon coordinates will be given in this unit. """ - @model_validator(mode="before") - @classmethod - def abs_path(cls, data): - """Add model definition path.""" - if data.get("time_cluster", None) is not None: - data["time_cluster"] = tools.relative_path( - data["def_path"], data["time_cluster"] - ) - return data - class BuildOperate(ConfigBaseModel): """Operate mode configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" model_config = { - "title": "operate", "extra": "forbid", - "json_schema_extra": hide_from_schema(["start_window_idx"]), + "frozen": True, + "json_schema_extra": hide_from_schema( + ["start_window_idx"] + ), # FIXME-remove, config should not be altered by our code "revalidate_instances": "always", "use_attribute_docstrings": True, } @@ -205,12 +177,6 @@ class BuildOperate(ConfigBaseModel): class Build(ConfigBaseModel): """Base configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" - model_config = { - "title": "build", - "extra": "forbid", - "revalidate_instances": "always", - } - mode: MODES_T = Field(default="plan") """Mode in which to run the optimisation.""" @@ -267,7 +233,7 @@ class SolveSpores(ConfigBaseModel): If False, will consolidate all iterations into one dataset after completion of N iterations (defined by `number`) and save that one dataset. """ - save_per_spore_path: Path | None = Field(default=None) + save_per_spore_path: str | None = Field(default=None) """If saving per spore, the path to save to.""" skip_cost_op: bool = Field(default=False) @@ -281,7 +247,7 @@ def require_save_per_spore_path(self) -> Self: raise ValueError( "Must define `save_per_spore_path` if you want to save each SPORES result separately." ) - elif not self.save_per_spore_path.is_dir(): + elif not Path(self.save_per_spore_path).is_dir(): raise ValueError("`save_per_spore_path` must be a directory.") return self @@ -289,13 +255,7 @@ def require_save_per_spore_path(self) -> Self: class Solve(ConfigBaseModel): """Base configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" - model_config = { - "title": "solve", - "extra": "forbid", - "revalidate_instances": "always", - } - - save_logs: Path | None = Field(default=None) + save_logs: str | None = Field(default=None) """If given, should be a path to a directory in which to save optimisation logs.""" solver_io: str | None = Field(default=None) @@ -322,7 +282,6 @@ class Solve(ConfigBaseModel): class CalliopeConfig(ConfigBaseModel): """Calliope configuration class.""" - model_config = {"title": "config"} init: Init = Init() build: Build = Build() solve: Solve = Solve() diff --git a/src/calliope/model.py b/src/calliope/model.py index 4fd6979b..23800416 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -15,7 +15,6 @@ from calliope import backend, config, exceptions, io, preprocess from calliope.attrdict import AttrDict from calliope.postprocess import postprocess as postprocess_results -from calliope.preprocess.data_tables import DataTable from calliope.preprocess.model_data import ModelDataFactory from calliope.util.logging import log_time from calliope.util.schema import ( @@ -41,7 +40,7 @@ class Model: """A Calliope Model.""" _TS_OFFSET = pd.Timedelta(1, unit="nanoseconds") - ATTRS_SAVED = ("applied_math", "config") + ATTRS_SAVED = ("applied_math", "config", "def_path") def __init__( self, @@ -76,6 +75,7 @@ def __init__( self.defaults: AttrDict self.applied_math: preprocess.CalliopeMath self.backend: BackendModel + self.def_path: str | None = None self._is_built: bool = False self._is_solved: bool = False @@ -93,7 +93,7 @@ def __init__( else: if not isinstance(model_definition, dict): # Only file definitions allow relative files. - kwargs["def_path"] = str(model_definition) + self.def_path = str(model_definition) self._init_from_model_definition( model_definition, scenario, override_dict, data_table_dfs, **kwargs ) @@ -163,23 +163,23 @@ def _init_from_model_definition( comment="Model: preprocessing stage 1 (model_run)", ) model_config = config.CalliopeConfig(**model_def_full.pop("config")) - init_config = model_config.init param_metadata = {"default": extract_from_schema(MODEL_SCHEMA, "default")} attributes = { - "calliope_version_defined": init_config.calliope_version, + "calliope_version_defined": model_config.init.calliope_version, "calliope_version_initialised": calliope.__version__, "applied_overrides": applied_overrides, "scenario": scenario, "defaults": param_metadata["default"], } - data_tables: list[DataTable] = [] - for table_name, table_dict in model_def_full.pop("data_tables", {}).items(): - data_tables.append( - DataTable(table_name, table_dict, data_table_dfs, init_config.def_path) - ) + # FIXME-config: remove config input once model_def_full uses pydantic model_data_factory = ModelDataFactory( - init_config, model_def_full, data_tables, attributes, param_metadata + model_config.init, + model_def_full, + self.def_path, + data_table_dfs, + attributes, + param_metadata, ) model_data_factory.build() @@ -192,10 +192,7 @@ def _init_from_model_definition( comment="Model: preprocessing stage 2 (model_data)", ) - self._model_data.attrs["name"] = init_config.name - - # Unlike at the build and solve phases, we store the init config overrides in the main model config. - model_config.init = init_config # FIXME-config: unnecessary? + self._model_data.attrs["name"] = model_config.init.name self.config = model_config log_time( @@ -220,7 +217,6 @@ def _init_from_model_data(self, model_data: xr.Dataset) -> None: ) if "config" in model_data.attrs: self.config = config.CalliopeConfig(**model_data.attrs.pop("config")) - self.config.update(model_data.attrs.pop("config_kwarg_overrides")) self._model_data = model_data @@ -260,26 +256,26 @@ def build( comment="Model: backend build starting", ) - build_config = self.config.update({"build": kwargs}).build - mode = build_config.mode + self.config = self.config.update({"build": kwargs}) + mode = self.config.build.mode if mode == "operate": if not self._model_data.attrs["allow_operate_mode"]: raise exceptions.ModelError( "Unable to run this model in operate (i.e. dispatch) mode, probably because " "there exist non-uniform timesteps (e.g. from time clustering)" ) - backend_input = self._prepare_operate_mode_inputs(build_config.operate) + backend_input = self._prepare_operate_mode_inputs(self.config.build.operate) else: backend_input = self._model_data - init_math_list = [] if build_config.ignore_mode_math else [mode] + init_math_list = [] if self.config.build.ignore_mode_math else [mode] end_math_list = [] if add_math_dict is None else [add_math_dict] - full_math_list = init_math_list + build_config.add_math + end_math_list + full_math_list = init_math_list + self.config.build.add_math + end_math_list LOGGER.debug(f"Math preprocessing | Loading math: {full_math_list}") - model_math = preprocess.CalliopeMath(full_math_list, self.config.init.def_path) + model_math = preprocess.CalliopeMath(full_math_list, self.def_path) self.backend = backend.get_model_backend( - build_config, backend_input, model_math + self.config.build, backend_input, model_math ) self.backend.add_optimisation_components() @@ -334,24 +330,23 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: else: to_drop = [] - solve_config = self.config.update({"solve": kwargs}).solve - # FIXME: find a way to avoid overcomplicated passing of settings between modes - mode = self.config.update(self.config.applied_keyword_overrides).build.mode + self.config = self.config.update({"solve": kwargs}) + + shadow_prices = self.config.solve.shadow_prices + self.backend.shadow_prices.track_constraints(shadow_prices) + + mode = self.config.build.mode self._model_data.attrs["timestamp_solve_start"] = log_time( LOGGER, self._timings, "solve_start", comment=f"Optimisation model | starting model in {mode} mode.", ) - - shadow_prices = solve_config.shadow_prices - self.backend.shadow_prices.track_constraints(shadow_prices) - if mode == "operate": - results = self._solve_operate(**solve_config.model_dump()) + results = self._solve_operate(**self.config.solve.model_dump()) else: results = self.backend._solve( - warmstart=warmstart, **solve_config.model_dump() + warmstart=warmstart, **self.config.solve.model_dump() ) log_time( @@ -393,12 +388,10 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: self._is_solved = True - def run(self, force_rerun=False, **kwargs): + def run(self, force_rerun=False): """Run the model. If ``force_rerun`` is True, any existing results will be overwritten. - - Additional kwargs are passed to the backend. """ exceptions.warn( "`run()` is deprecated and will be removed in a " @@ -412,11 +405,12 @@ def to_netcdf(self, path): """Save complete model data (inputs and, if available, results) to a NetCDF file at the given `path`.""" saved_attrs = {} for attr in set(self.ATTRS_SAVED) & set(self.__dict__.keys()): - if not isinstance(getattr(self, attr), str | list | None): + if attr == "config": + saved_attrs[attr] = self.config.model_dump() + elif not isinstance(getattr(self, attr), str | list | None): saved_attrs[attr] = dict(getattr(self, attr)) else: saved_attrs[attr] = getattr(self, attr) - saved_attrs["config_kwarg_overrides"] = self.config.applied_keyword_overrides io.save_netcdf(self._model_data, path, **saved_attrs) @@ -509,7 +503,7 @@ def _solve_operate(self, **solver_config) -> xr.Dataset: """ if self.backend.inputs.timesteps[0] != self._model_data.timesteps[0]: LOGGER.info("Optimisation model | Resetting model to first time window.") - self.build(force=True, **self.config.build.applied_keyword_overrides) + self.build(force=True) LOGGER.info("Optimisation model | Running first time window.") @@ -536,7 +530,7 @@ def _solve_operate(self, **solver_config) -> xr.Dataset: "Optimisation model | Reaching the end of the timeseries. " "Re-building model with shorter time horizon." ) - build_kwargs = AttrDict(self.config.build.applied_keyword_overrides) + build_kwargs = AttrDict() build_kwargs.set_key("operate.start_window_idx", idx + 1) self.build(force=True, **build_kwargs) else: diff --git a/src/calliope/preprocess/data_tables.py b/src/calliope/preprocess/data_tables.py index 37d68453..5a6b8acb 100644 --- a/src/calliope/preprocess/data_tables.py +++ b/src/calliope/preprocess/data_tables.py @@ -53,7 +53,7 @@ def __init__( table_name: str, data_table: DataTableDict, data_table_dfs: dict[str, pd.DataFrame] | None = None, - model_definition_path: Path = Path("."), + model_definition_path: str | Path | None = None, ): """Load and format a data table from file / in-memory object. diff --git a/src/calliope/preprocess/model_data.py b/src/calliope/preprocess/model_data.py index 1df04224..65597798 100644 --- a/src/calliope/preprocess/model_data.py +++ b/src/calliope/preprocess/model_data.py @@ -5,6 +5,7 @@ import itertools import logging from copy import deepcopy +from pathlib import Path from typing import Literal import numpy as np @@ -18,7 +19,7 @@ from calliope.config import Init from calliope.preprocess import data_tables, time from calliope.util.schema import MODEL_SCHEMA, validate_dict -from calliope.util.tools import listify +from calliope.util.tools import listify, relative_path LOGGER = logging.getLogger(__name__) @@ -72,8 +73,9 @@ class ModelDataFactory: def __init__( self, init_config: Init, - model_definition: ModelDefinition, - data_tables: list[data_tables.DataTable], + model_definition: AttrDict, + definition_path: str | Path | None, + data_table_dfs: dict[str, pd.DataFrame] | None, attributes: dict, param_attributes: dict[str, dict], ): @@ -84,7 +86,8 @@ def __init__( Args: init_config (Init): Model initialisation configuration (i.e., `config.init`). model_definition (ModelDefinition): Definition of model nodes and technologies, and their potential `templates`. - data_tables (list[data_tables.DataTable]): Pre-loaded data tables that will be used to initialise the dataset before handling definitions given in `model_definition`. + definition_path (Path, None): Path to the main model definition file. Defaults to None. + data_table_dfs: (dict[str, pd.DataFrame], None): Dataframes with model data. Defaults to None. attributes (dict): Attributes to attach to the model Dataset. param_attributes (dict[str, dict]): Attributes to attach to the generated model DataArrays. """ @@ -92,7 +95,17 @@ def __init__( self.model_definition: ModelDefinition = model_definition.copy() self.dataset = xr.Dataset(attrs=AttrDict(attributes)) self.tech_data_from_tables = AttrDict() - self.init_from_data_tables(data_tables) + self.definition_path: str | Path | None = definition_path + tables = [] + for table_name, table_dict in model_definition.get_key( + "data_tables", {} + ).items(): + tables.append( + data_tables.DataTable( + table_name, table_dict, data_table_dfs, self.definition_path + ) + ) + self.init_from_data_tables(tables) flipped_attributes: dict[str, dict] = dict() for key, val in param_attributes.items(): @@ -256,7 +269,9 @@ def update_time_dimension_and_params(self): self.dataset = time.resample(self.dataset, self.config.time_resample) if self.config.time_cluster is not None: self.dataset = time.cluster( - self.dataset, self.config.time_cluster, self.config.time_format + self.dataset, + relative_path(self.definition_path, self.config.time_cluster), + self.config.time_format, ) def clean_data_from_undefined_members(self): diff --git a/src/calliope/util/tools.py b/src/calliope/util/tools.py index f4c99b32..e30c854c 100644 --- a/src/calliope/util/tools.py +++ b/src/calliope/util/tools.py @@ -11,7 +11,7 @@ T = TypeVar("T") -def relative_path(base_path_file: str | Path, path: str | Path) -> Path: +def relative_path(base_path_file: str | Path | None, path: str | Path) -> Path: """Path standardization. If ``path`` is not absolute, it is interpreted as relative to the @@ -19,7 +19,7 @@ def relative_path(base_path_file: str | Path, path: str | Path) -> Path: """ # Check if base_path_file is a string because it might be an AttrDict path = Path(path) - if path.is_absolute(): + if path.is_absolute() or base_path_file is None: return path else: base_path_file = Path(base_path_file) diff --git a/tests/test_backend_module.py b/tests/test_backend_module.py index f220a3b9..6451344c 100644 --- a/tests/test_backend_module.py +++ b/tests/test_backend_module.py @@ -2,7 +2,7 @@ import pytest -from calliope import backend +from calliope import AttrDict, backend from calliope.backend.backend_model import BackendModel from calliope.exceptions import BackendError @@ -19,7 +19,8 @@ def test_valid_model_backend(simple_supply, valid_backend): @pytest.mark.parametrize("spam", ["not_real", None, True, 1]) def test_invalid_model_backend(spam, simple_supply): """Backend requests should catch invalid setups.""" + invalid_config = AttrDict({"backend": spam}) with pytest.raises(BackendError): backend.get_model_backend( - spam, simple_supply._model_data, simple_supply.applied_math + invalid_config, simple_supply._model_data, simple_supply.applied_math ) diff --git a/tests/test_backend_where_parser.py b/tests/test_backend_where_parser.py index db33c984..725e8939 100644 --- a/tests/test_backend_where_parser.py +++ b/tests/test_backend_where_parser.py @@ -248,7 +248,7 @@ def test_config_missing_from_data(self, config_option, eval_kwargs, config_strin parsed_[0].eval(**eval_kwargs) @pytest.mark.parametrize( - ("config_string", "type_"), [("config.b_a", "list"), ("config.bar", "AttrDict")] + ("config_string", "type_"), [("config.b_a", "list"), ("config.bar", "dict")] ) def test_config_fail_datatype( self, config_option, eval_kwargs, config_string, type_ diff --git a/tests/test_core_preprocess.py b/tests/test_core_preprocess.py index 0ee2f38c..ba6e40de 100644 --- a/tests/test_core_preprocess.py +++ b/tests/test_core_preprocess.py @@ -2,6 +2,7 @@ import pandas as pd import pytest +from pydantic import ValidationError import calliope import calliope.exceptions as exceptions @@ -60,11 +61,11 @@ def override(param): return read_rich_yaml(f"config.init.time_subset: {param}") # should fail: one string in list - with pytest.raises(exceptions.ModelError): + with pytest.raises(ValidationError): build_model(override_dict=override(["2005-01"]), scenario="simple_supply") # should fail: three strings in list - with pytest.raises(exceptions.ModelError): + with pytest.raises(ValidationError): build_model( override_dict=override(["2005-01-01", "2005-01-02", "2005-01-03"]), scenario="simple_supply", @@ -81,7 +82,7 @@ def override(param): ) # should fail: must be a list, not a string - with pytest.raises(exceptions.ModelError): + with pytest.raises(ValidationError): model = build_model( override_dict=override("2005-01"), scenario="simple_supply" ) @@ -147,19 +148,15 @@ def test_single_timestep(self): class TestChecks: - @pytest.mark.parametrize("top_level_key", ["init", "solve"]) + @pytest.mark.parametrize( + "top_level_key", ["init", "build", "solve", "build.operate", "solve.spores"] + ) def test_unrecognised_config_keys(self, top_level_key): - """Check that the only keys allowed in 'model' and 'run' are those in the - model defaults - """ + """Check that no extra keys are allowed in the configuration.""" override = {f"config.{top_level_key}.nonsensical_key": "random_string"} - with pytest.raises(exceptions.ModelError) as excinfo: + with pytest.raises(ValidationError): build_model(override_dict=override, scenario="simple_supply") - assert check_error_or_warning( - excinfo, - "Additional properties are not allowed ('nonsensical_key' was unexpected)", - ) def test_model_version_mismatch(self): """Model config says config.init.calliope_version = 0.1, which is not what we diff --git a/tests/test_io.py b/tests/test_io.py index 802026a1..f839e616 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -186,10 +186,8 @@ def test_save_csv_not_optimal(self): with pytest.warns(exceptions.ModelWarning): model.to_csv(out_path, dropna=False) - @pytest.mark.parametrize("attr", ["config"]) - def test_dicts_as_model_attrs_and_property(self, model_from_file, attr): - assert attr in model_from_file._model_data.attrs.keys() - assert hasattr(model_from_file, attr) + def test_config_reload(self, model_from_file, model): + assert model_from_file.config.model_dump() == model.config.model_dump() def test_defaults_as_model_attrs_not_property(self, model_from_file): assert "defaults" in model_from_file._model_data.attrs.keys() diff --git a/tests/test_preprocess_data_sources.py b/tests/test_preprocess_data_sources.py index a250f04c..abe019a9 100644 --- a/tests/test_preprocess_data_sources.py +++ b/tests/test_preprocess_data_sources.py @@ -6,16 +6,10 @@ import calliope from calliope.preprocess import data_tables -from calliope.util.schema import CONFIG_SCHEMA, extract_from_schema from .common.util import check_error_or_warning -@pytest.fixture(scope="module") -def init_config(): - return calliope.AttrDict(extract_from_schema(CONFIG_SCHEMA, "default"))["init"] - - @pytest.fixture(scope="class") def data_dir(tmp_path_factory): filepath = tmp_path_factory.mktemp("data_tables") @@ -39,12 +33,12 @@ def _generate_data_table_dict(filename, df, rows, columns): class TestDataTableUtils: @pytest.fixture(scope="class") - def table_obj(self, init_config, generate_data_table_dict): + def table_obj(self, generate_data_table_dict): df = pd.Series({"bar": 0, "baz": 1}) table_dict = generate_data_table_dict( "foo.csv", df, rows="test_row", columns=None ) - ds = data_tables.DataTable(init_config, "ds_name", table_dict) + ds = data_tables.DataTable("ds_name", table_dict) ds.input["foo"] = ["foobar"] return ds @@ -130,9 +124,9 @@ def multi_row_multi_col_data(self, generate_data_table_dict): "multi_row_multi_col_file.csv", df, rows="test_row", columns="test_col" ) - def test_multi_row_no_col(self, init_config, multi_row_no_col_data): + def test_multi_row_no_col(self, multi_row_no_col_data): expected_df, table_dict = multi_row_no_col_data - ds = data_tables.DataTable(init_config, "ds_name", table_dict) + ds = data_tables.DataTable("ds_name", table_dict) test_param = ds.dataset["test_param"] assert not set(["test_row"]).symmetric_difference(test_param.dims) pd.testing.assert_series_equal( @@ -147,9 +141,9 @@ def test_multi_row_no_col(self, init_config, multi_row_no_col_data): "multi_row_multi_col_data", ], ) - def test_multi_row_one_col(self, init_config, request, data_table_ref): + def test_multi_row_one_col(self, request, data_table_ref): expected_df, table_dict = request.getfixturevalue(data_table_ref) - ds = data_tables.DataTable(init_config, "ds_name", table_dict) + ds = data_tables.DataTable("ds_name", table_dict) test_param = ds.dataset["test_param"] assert not set(["test_row", "test_col"]).symmetric_difference(test_param.dims) pd.testing.assert_series_equal( @@ -164,14 +158,11 @@ def test_multi_row_one_col(self, init_config, request, data_table_ref): "multi_row_multi_col_data", ], ) - def test_load_from_df(self, init_config, request, data_table_ref): + def test_load_from_df(self, request, data_table_ref): expected_df, table_dict = request.getfixturevalue(data_table_ref) table_dict["data"] = data_table_ref ds = data_tables.DataTable( - init_config, - "ds_name", - table_dict, - data_table_dfs={data_table_ref: expected_df}, + "ds_name", table_dict, data_table_dfs={data_table_ref: expected_df} ) test_param = ds.dataset["test_param"] assert not set(["test_row", "test_col"]).symmetric_difference(test_param.dims) @@ -179,12 +170,12 @@ def test_load_from_df(self, init_config, request, data_table_ref): test_param.to_series(), expected_df.stack(), check_names=False ) - def test_load_from_df_must_be_df(self, init_config, multi_row_no_col_data): + def test_load_from_df_must_be_df(self, multi_row_no_col_data): expected_df, table_dict = multi_row_no_col_data table_dict["data"] = "foo" with pytest.raises(calliope.exceptions.ModelError) as excinfo: data_tables.DataTable( - init_config, "ds_name", table_dict, data_table_dfs={"foo": expected_df} + "ds_name", table_dict, data_table_dfs={"foo": expected_df} ) assert check_error_or_warning(excinfo, "Data table must be a pandas DataFrame.") @@ -237,9 +228,9 @@ def multi_row_multi_col_data(self, generate_data_table_dict): columns=["test_col1", "test_col2"], ) - def test_multi_row_no_col(self, init_config, multi_row_no_col_data): + def test_multi_row_no_col(self, multi_row_no_col_data): expected_df, table_dict = multi_row_no_col_data - ds = data_tables.DataTable(init_config, "ds_name", table_dict) + ds = data_tables.DataTable("ds_name", table_dict) test_param = ds.dataset["test_param"] assert not set(["test_row1", "test_row2"]).symmetric_difference(test_param.dims) pd.testing.assert_series_equal( @@ -257,9 +248,9 @@ def test_multi_row_no_col(self, init_config, multi_row_no_col_data): "multi_row_multi_col_data", ], ) - def test_multi_row_one_col(self, init_config, request, data_table_ref): + def test_multi_row_one_col(self, request, data_table_ref): expected_df, table_dict = request.getfixturevalue(data_table_ref) - ds = data_tables.DataTable(init_config, "ds_name", table_dict) + ds = data_tables.DataTable("ds_name", table_dict) test_param = ds.dataset["test_param"] all_dims = table_dict["rows"] + table_dict["columns"] assert not set(all_dims).symmetric_difference(test_param.dims) @@ -273,7 +264,7 @@ def test_multi_row_one_col(self, init_config, request, data_table_ref): class TestDataTableSelectDropAdd: @pytest.fixture(scope="class") - def table_obj(self, init_config): + def table_obj(self): def _table_obj(**table_dict_kwargs): df = pd.DataFrame( { @@ -291,9 +282,7 @@ def _table_obj(**table_dict_kwargs): "columns": "parameters", **table_dict_kwargs, } - ds = data_tables.DataTable( - init_config, "ds_name", table_dict, data_table_dfs={"df": df} - ) + ds = data_tables.DataTable("ds_name", table_dict, data_table_dfs={"df": df}) return ds return _table_obj @@ -357,7 +346,7 @@ def test_drop_one(self, table_obj): class TestDataTableRenameDims: @pytest.fixture(scope="class") - def multi_row_one_col_data(self, data_dir, init_config, dummy_int): + def multi_row_one_col_data(self, data_dir, dummy_int): """Fixture to create the xarray dataset from the data table, including dimension name mapping.""" def _multi_row_one_col_data( @@ -377,7 +366,7 @@ def _multi_row_one_col_data( "add_dims": {"parameters": "test_param"}, "rename_dims": mapping, } - ds = data_tables.DataTable(init_config, "ds_name", table_dict) + ds = data_tables.DataTable("ds_name", table_dict) return ds.dataset return _multi_row_one_col_data @@ -416,7 +405,7 @@ def test_rename(self, dummy_int, multi_row_one_col_data, mapping, idx, col): class TestDataTableMalformed: @pytest.fixture(scope="class") - def table_obj(self, init_config): + def table_obj(self): def _table_obj(**table_dict_kwargs): df = pd.DataFrame( { @@ -433,9 +422,7 @@ def _table_obj(**table_dict_kwargs): "rows": ["test_row1", "test_row2"], **table_dict_kwargs, } - ds = data_tables.DataTable( - init_config, "ds_name", table_dict, data_table_dfs={"df": df} - ) + ds = data_tables.DataTable("ds_name", table_dict, data_table_dfs={"df": df}) return ds return _table_obj @@ -479,7 +466,7 @@ def test_check_for_protected_params(self, table_obj): class TestDataTableLookupDictFromParam: @pytest.fixture(scope="class") - def table_obj(self, init_config): + def table_obj(self): df = pd.DataFrame( { "FOO": {("foo1", "bar1"): 1, ("foo1", "bar2"): 1}, @@ -491,9 +478,7 @@ def table_obj(self, init_config): "rows": ["techs", "carriers"], "columns": "parameters", } - ds = data_tables.DataTable( - init_config, "ds_name", table_dict, data_table_dfs={"df": df} - ) + ds = data_tables.DataTable("ds_name", table_dict, data_table_dfs={"df": df}) return ds @pytest.mark.parametrize( @@ -518,13 +503,11 @@ def test_carrier_info_dict_from_model_data_var_missing_dim(self, table_obj): class TestDataTableTechDict: @pytest.fixture(scope="class") - def table_obj(self, init_config): + def table_obj(self): def _table_obj(df_dict, rows="techs"): df = pd.DataFrame(df_dict) table_dict = {"data": "df", "rows": rows, "columns": "parameters"} - ds = data_tables.DataTable( - init_config, "ds_name", table_dict, data_table_dfs={"df": df} - ) + ds = data_tables.DataTable("ds_name", table_dict, data_table_dfs={"df": df}) return ds return _table_obj @@ -584,13 +567,11 @@ def test_tech_dict_empty(self, table_obj): class TestDataTableNodeDict: @pytest.fixture(scope="class") - def table_obj(self, init_config): + def table_obj(self): def _table_obj(df_dict, rows=["nodes", "techs"]): df = pd.DataFrame(df_dict) table_dict = {"data": "df", "rows": rows, "columns": "parameters"} - ds = data_tables.DataTable( - init_config, "ds_name", table_dict, data_table_dfs={"df": df} - ) + ds = data_tables.DataTable("ds_name", table_dict, data_table_dfs={"df": df}) return ds return _table_obj diff --git a/tests/test_preprocess_model_data.py b/tests/test_preprocess_model_data.py index 0a6266db..9f5a0b96 100644 --- a/tests/test_preprocess_model_data.py +++ b/tests/test_preprocess_model_data.py @@ -15,11 +15,18 @@ @pytest.fixture -def model_def(): - model_def_path = Path(__file__).parent / "common" / "test_model" / "model.yaml" +def model_path(): + return Path(__file__).parent / "common" / "test_model" / "model.yaml" + + +@pytest.fixture +def model_def(model_path): model_def_override, _ = prepare_model_definition( - model_def_path, scenario="simple_supply,empty_tech_node" + model_path, scenario="simple_supply,empty_tech_node" ) + # Erase data tables for simplicity + # FIXME: previous tests omitted this. Either update tests or remove the data_table from the test model. + model_def_override.del_key("data_tables") return model_def_override @@ -30,9 +37,14 @@ def init_config(default_config, model_def): @pytest.fixture -def model_data_factory(model_def, init_config, model_defaults): +def model_data_factory(model_path, model_def, init_config, model_defaults): return ModelDataFactory( - init_config, model_def, [], {"foo": "bar"}, {"default": model_defaults} + init_config, + model_def, + model_path, + [], + {"foo": "bar"}, + {"default": model_defaults}, ) From 7319ec12df8e049d7a9f064a172af4eec36dd100 Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Sun, 15 Dec 2024 13:59:50 +0100 Subject: [PATCH 09/17] Fix datetime bug to aviud time_subset without hours cutting to hour 0. --- src/calliope/config.py | 5 ++--- tests/test_backend_module.py | 3 ++- tests/test_core_model.py | 2 +- tests/test_core_preprocess.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/calliope/config.py b/src/calliope/config.py index edbca50c..0fc12e0d 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -3,7 +3,6 @@ """Implements the Calliope configuration class.""" from collections.abc import Hashable -from datetime import datetime from pathlib import Path from typing import Annotated, Literal, TypeVar @@ -111,7 +110,7 @@ class Init(ConfigBaseModel): Defaults to False to mitigate unexpected broadcasting when applying overrides. """ - time_subset: tuple[datetime, datetime] | None = Field(default=None) + time_subset: tuple[str, str] | None = Field(default=None) """ Subset of timesteps as an two-element list giving the **inclusive** range. For example, ["2005-01", "2005-04"] will create a time subset from "2005-01-01 00:00:00" to "2005-04-31 23:59:59". @@ -149,7 +148,7 @@ class BuildOperate(ConfigBaseModel): "frozen": True, "json_schema_extra": hide_from_schema( ["start_window_idx"] - ), # FIXME-remove, config should not be altered by our code + ), # FIXME-remove, config should not be altered within calliope our code "revalidate_instances": "always", "use_attribute_docstrings": True, } diff --git a/tests/test_backend_module.py b/tests/test_backend_module.py index 6451344c..93e3ca46 100644 --- a/tests/test_backend_module.py +++ b/tests/test_backend_module.py @@ -10,8 +10,9 @@ @pytest.mark.parametrize("valid_backend", ["pyomo", "gurobi"]) def test_valid_model_backend(simple_supply, valid_backend): """Requesting a valid model backend must result in a backend instance.""" + build_config = simple_supply.config.build.update({"backend": valid_backend}) backend_obj = backend.get_model_backend( - valid_backend, simple_supply._model_data, simple_supply.applied_math + build_config, simple_supply._model_data, simple_supply.applied_math ) assert isinstance(backend_obj, BackendModel) diff --git a/tests/test_core_model.py b/tests/test_core_model.py index ed393c9f..55aa2a4e 100644 --- a/tests/test_core_model.py +++ b/tests/test_core_model.py @@ -151,7 +151,7 @@ def test_build_operate_not_allowed_build(self): class TestBuild: - @pytest.fixture(scope="class") + @pytest.fixture def init_model(self): return build_model({}, "simple_supply,two_hours,investment_costs") diff --git a/tests/test_core_preprocess.py b/tests/test_core_preprocess.py index ba6e40de..4d3eeaf6 100644 --- a/tests/test_core_preprocess.py +++ b/tests/test_core_preprocess.py @@ -95,7 +95,7 @@ def override(param): assert check_error_or_warning( error, - "subset time range ['2005-03', '2005-04'] is outside the input data time range [2005-01-01 00:00:00, 2005-01-05 23:00:00]", + "subset time range ('2005-03', '2005-04') is outside the input data time range [2005-01-01 00:00:00, 2005-01-05 23:00:00]", ) # should fail: time subset out of range of input data From 020aa4b9cae7316c7387947a653a5d3995854708 Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Sun, 15 Dec 2024 14:46:10 +0100 Subject: [PATCH 10/17] Move start_window_idx out of the configuration. It is now a state variable in Model. --- src/calliope/config.py | 13 ------------- src/calliope/model.py | 11 ++++++----- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/src/calliope/config.py b/src/calliope/config.py index 0fc12e0d..8bde6828 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -143,16 +143,6 @@ class Init(ConfigBaseModel): class BuildOperate(ConfigBaseModel): """Operate mode configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" - model_config = { - "extra": "forbid", - "frozen": True, - "json_schema_extra": hide_from_schema( - ["start_window_idx"] - ), # FIXME-remove, config should not be altered within calliope our code - "revalidate_instances": "always", - "use_attribute_docstrings": True, - } - window: str = Field(default="24h") """ Operate mode rolling `window`, given as a pandas frequency string. @@ -169,9 +159,6 @@ class BuildOperate(ConfigBaseModel): use_cap_results: bool = Field(default=False) """If the model already contains `plan` mode results, use those optimal capacities as input parameters to the `operate` mode run.""" - start_window_idx: int = Field(default=0, repr=False, exclude=True) - """Which time window to build. This is used to track the window when re-building the model part way through solving in `operate` mode.""" - class Build(ConfigBaseModel): """Base configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" diff --git a/src/calliope/model.py b/src/calliope/model.py index 23800416..b94f66ae 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -76,6 +76,7 @@ def __init__( self.applied_math: preprocess.CalliopeMath self.backend: BackendModel self.def_path: str | None = None + self._start_window_idx: int = 0 self._is_built: bool = False self._is_solved: bool = False @@ -476,8 +477,8 @@ def _prepare_operate_mode_inputs( self._model_data.coords["horizonsteps"] = clipped_horizonsteps - self._TS_OFFSET sliced_inputs = self._model_data.sel( timesteps=slice( - self._model_data.windowsteps[operate_config.start_window_idx], - self._model_data.horizonsteps[operate_config.start_window_idx], + self._model_data.windowsteps[self._start_window_idx], + self._model_data.horizonsteps[self._start_window_idx], ) ) if operate_config.use_cap_results: @@ -530,9 +531,8 @@ def _solve_operate(self, **solver_config) -> xr.Dataset: "Optimisation model | Reaching the end of the timeseries. " "Re-building model with shorter time horizon." ) - build_kwargs = AttrDict() - build_kwargs.set_key("operate.start_window_idx", idx + 1) - self.build(force=True, **build_kwargs) + self._start_window_idx = idx + 1 + self.build(force=True) else: self.backend._dataset.coords["timesteps"] = new_inputs.timesteps self.backend.inputs.coords["timesteps"] = new_inputs.timesteps @@ -549,6 +549,7 @@ def _solve_operate(self, **solver_config) -> xr.Dataset: step_results = self.backend._solve(warmstart=False, **solver_config) + self._start_window_idx = 0 results_list.append(step_results.sel(timesteps=slice(windowstep, None))) results = xr.concat(results_list, dim="timesteps", combine_attrs="no_conflicts") results.attrs["termination_condition"] = ",".join( From 93ba743b56db540392f65c860911803f2397261b Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Sun, 15 Dec 2024 16:50:43 +0100 Subject: [PATCH 11/17] Update model documentation and CHANGELOG --- CHANGELOG.md | 4 ++++ docs/creating/config.md | 4 ++-- docs/creating/parameters.md | 2 +- docs/creating/scenarios.md | 33 +++------------------------------ docs/migrating.md | 10 +++++----- docs/running.md | 2 +- mkdocs.yml | 2 +- 7 files changed, 17 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e29e9487..fe23bf1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ### User-facing changes +|changed| to ensure the model configuration always remains in sync with the results, `kwargs` in `model.build()` and `model.solve()` now directly affect `model.config` + |changed| `template:` can now be used anywhere within YAML definition files, not just in the `nodes`, `techs` and `data_tables` sections. |changed| "An overview of the Calliope terminology" information admonition to remove self-references and improve understandability. @@ -33,6 +35,8 @@ This change has occurred to avoid confusion between data "sources" and model ene ### Internal changes +|changed| Model configuration now uses `pydantic`. + |changed| Model definition reading is now defined in a single place (preprocess/model_definition.py). |changed| Moved YAML reading/importing functionality out of `AttrDict`. It is now part of our `io` functionality. diff --git a/docs/creating/config.md b/docs/creating/config.md index 70b60f9d..63f54203 100644 --- a/docs/creating/config.md +++ b/docs/creating/config.md @@ -42,7 +42,7 @@ To test your model pipeline, `config.init.time_subset` is a good way to limit yo See our [time adjustment page](../advanced/time.md) for more details. !!! info "See also" - The full set of available configuration options is documented in the [configuration schema][model-configuration-schema]. + The full set of available configuration options is documented in the [configuration schema](../reference/config_schema.md). This provides you with a description of each configuration option and the default which will be used if you do not provide a value. ## Deep-dive into some key configuration options @@ -83,7 +83,7 @@ In `plan` mode, capacities are determined by the model, whereas in `operate` mod In `spores` mode, the model is first run in `plan` mode, then run `N` number of times to find alternative system configurations with similar monetary cost, but maximally different choice of technology capacity and location (node). In most cases, you will want to use the `plan` mode. -In fact, you can use a set of results from using `plan` model to initialise both the `operate` (`config.build.operate_use_cap_results`) and `spores` modes. +In fact, you can use a set of results from using `plan` model to initialise both the `operate` (`config.build.operate.use_cap_results`) and `spores` modes. ### `config.solve.solver` diff --git a/docs/creating/parameters.md b/docs/creating/parameters.md index 20961937..760e3f4d 100644 --- a/docs/creating/parameters.md +++ b/docs/creating/parameters.md @@ -52,7 +52,7 @@ Which will add the new dimension `my_new_dim` to your model: `model.inputs.my_ne `foreach: [my_new_dim]`. !!! warning - The `parameter` section should not be used for large datasets (e.g., indexing over the time dimension) as it will have a high memory overhead on loading the data. + The `parameter` section should not be used for large datasets (e.g., indexing over the time dimension) as it will have a high memory overhead when loading the data. ## Broadcasting data along indexed dimensions diff --git a/docs/creating/scenarios.md b/docs/creating/scenarios.md index fb6f2069..b98b7cb2 100644 --- a/docs/creating/scenarios.md +++ b/docs/creating/scenarios.md @@ -37,33 +37,6 @@ Scenarios consist of a name and a list of override names which together form tha Scenarios and overrides can be used to generate scripts that run a single Calliope model many times, either sequentially, or in parallel on a high-performance cluster (see the section on [generating scripts to repeatedly run variations of a model](../advanced/scripts.md)). -## Importing other YAML files in overrides - -When using overrides, it is possible to have [`import` statements](yaml.md#relative-file-imports) for more flexibility. -This can be useful if many overrides are defined which share large parts of model configuration, such as different levels of interconnection between model zones -The following example illustrates this: - -```yaml -overrides: - some_override: - techs: - some_tech.constraints.flow_cap_max: 10 - import: [additional_definitions.yaml] -``` - -`additional_definitions.yaml`: - -```yaml -techs: - some_other_tech.constraints.flow_out_eff: 0.1 -``` - -This is equivalent to the following override: - -```yaml -overrides: - some_override: - techs: - some_tech.constraints.flow_cap_max: 10 - some_other_tech.constraints.flow_out_eff: 0.1 -``` +???+ warning + Overrides are executed _after_ `imports:` but _before_ `templates:`. + This means it is possible to override template values, but not the files imported in your model definition. diff --git a/docs/migrating.md b/docs/migrating.md index b704ba6b..2ca19821 100644 --- a/docs/migrating.md +++ b/docs/migrating.md @@ -340,9 +340,9 @@ Along with [changing the YAML hierarchy of model configuration](#model-and-run- * `model.subset_time` → `config.init.time_subset` * `model.time: {function: resample, function_options: {'resolution': '6H'}}` → `config.init.time_resample` -* `run.operation.window` → `config.build.operate_window` -* `run.operation.horizon` → `config.build.operate_horizon` -* `run.operation.use_cap_results` → `config.build.operate_use_cap_results` +* `run.operation.window` → `config.build.operate.window` +* `run.operation.horizon` → `config.build.operate.horizon` +* `run.operation.use_cap_results` → `config.build.operate.use_cap_results` We have also moved some _data_ out of the configuration and into the [top-level `parameters` section](creating/parameters.md): @@ -516,8 +516,8 @@ Therefore, `24H` is equivalent to `24` in v0.6 if you are using hourly resolutio init: time_resample: 6H build: - operate_window: 12H - operate_horizon: 24H + operate.window: 12H + operate.horizon: 24H ``` !!! warning diff --git a/docs/running.md b/docs/running.md index 82b90d03..fe4102bc 100644 --- a/docs/running.md +++ b/docs/running.md @@ -25,7 +25,7 @@ The `calliope run` command takes the following options: * `--scenario={scenario}` and `--override_dict={yaml_string}`: Specify a scenario, or one or several overrides, to apply to the model, or apply specific overrides from a YAML string (see below for more information). * `--help`: Show all available options. -Multiple options can be specified, for example, saving NetCDF, CSV, and HTML plots simultaneously. +Multiple options can be specified, for example, saving NetCDF and CSV simultaneously. ```shell $ calliope run testmodel/model.yaml --save_netcdf=results.nc --save_csv=outputs diff --git a/mkdocs.yml b/mkdocs.yml index 2db9eee6..7e81f126 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -156,7 +156,7 @@ nav: - reference/api/attrdict.md - reference/api/exceptions.md - reference/api/logging.md - - reference/config_schema.md + - Model configuration schema: reference/config_schema.md - reference/data_table_schema.md - reference/model_schema.md - reference/math_schema.md From 19902d96bd67f74fa9bbe7169af9c7e7df548c9d Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Sun, 15 Dec 2024 18:49:05 +0100 Subject: [PATCH 12/17] Fix broken documentation link. --- docs/creating/config.md | 2 +- mkdocs.yml | 2 +- src/calliope/config.py | 8 ++++++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/creating/config.md b/docs/creating/config.md index 63f54203..307dc9ee 100644 --- a/docs/creating/config.md +++ b/docs/creating/config.md @@ -42,7 +42,7 @@ To test your model pipeline, `config.init.time_subset` is a good way to limit yo See our [time adjustment page](../advanced/time.md) for more details. !!! info "See also" - The full set of available configuration options is documented in the [configuration schema](../reference/config_schema.md). + The full set of available configuration options is documented in the [configuration schema][model-configuration-schema]. This provides you with a description of each configuration option and the default which will be used if you do not provide a value. ## Deep-dive into some key configuration options diff --git a/mkdocs.yml b/mkdocs.yml index 7e81f126..2db9eee6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -156,7 +156,7 @@ nav: - reference/api/attrdict.md - reference/api/exceptions.md - reference/api/logging.md - - Model configuration schema: reference/config_schema.md + - reference/config_schema.md - reference/data_table_schema.md - reference/model_schema.md - reference/math_schema.md diff --git a/src/calliope/config.py b/src/calliope/config.py index 8bde6828..30895d2d 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -268,6 +268,14 @@ class Solve(ConfigBaseModel): class CalliopeConfig(ConfigBaseModel): """Calliope configuration class.""" + model_config = { + "title": "Model configuration schema", + "extra": "forbid", + "frozen": True, + "revalidate_instances": "always", + "use_attribute_docstrings": True, + } + init: Init = Init() build: Build = Build() solve: Solve = Solve() From 15d371c7d44af3550ca7180ad3f6aac9c7786ffe Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Wed, 18 Dec 2024 17:02:39 +0000 Subject: [PATCH 13/17] Add info level logging of config updates; remove unused method --- src/calliope/config.py | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/src/calliope/config.py b/src/calliope/config.py index 30895d2d..9f471005 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -2,6 +2,7 @@ # Licensed under the Apache 2.0 License (see LICENSE file). """Implements the Calliope configuration class.""" +import logging from collections.abc import Hashable from pathlib import Path from typing import Annotated, Literal, TypeVar @@ -16,6 +17,8 @@ MODES_T = Literal["plan", "operate", "spores"] CONFIG_T = Literal["init", "build", "solve"] +LOGGER = logging.getLogger(__name__) + # == # Taken from https://github.com/pydantic/pydantic-core/pull/820#issuecomment-1670475909 T = TypeVar("T", bound=Hashable) @@ -35,21 +38,6 @@ def _validate_unique_list(v: list[T]) -> list[T]: # == -def hide_from_schema(to_hide: list[str]): - """Hide fields from the generated schema. - - Args: - to_hide (list[str]): List of fields to hide. - """ - - def _hide_from_schema(schema: dict): - for hide in to_hide: - schema.get("properties", {}).pop(hide, None) - return schema - - return _hide_from_schema - - class ConfigBaseModel(BaseModel): """A base class for creating pydantic models for Calliope configuration options.""" @@ -71,12 +59,16 @@ def update(self, update_dict: dict, deep: bool = False) -> Self: BaseModel: New model instance. """ new_dict: dict = {} - # Iterate through dict to be updated and convert any sub-dicts into their respective pydantic model objects - for key, val in update_dict.items(): + # Iterate through dict to be updated and convert any sub-dicts into their respective pydantic model objects. + # Wrapped in `AttrDict` to allow users to define dot notation nested configuration. + for key, val in AttrDict(update_dict).items(): key_class = getattr(self, key) if isinstance(key_class, ConfigBaseModel): new_dict[key] = key_class.update(val) else: + LOGGER.info( + f"Updating {self.model_config["title"]} `{key}`: {key_class} -> {val}" + ) new_dict[key] = val updated = super().model_copy(update=new_dict, deep=deep) updated.model_validate(updated) @@ -97,6 +89,7 @@ def model_no_ref_schema(self) -> AttrDict: class Init(ConfigBaseModel): """All configuration options used when initialising a Calliope model.""" + model_config = {"title": "Model initialisation configuration"} name: str | None = Field(default=None) """Model name""" @@ -143,6 +136,7 @@ class Init(ConfigBaseModel): class BuildOperate(ConfigBaseModel): """Operate mode configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" + model_config = {"title": "Model build operate mode configuration"} window: str = Field(default="24h") """ Operate mode rolling `window`, given as a pandas frequency string. @@ -163,6 +157,7 @@ class BuildOperate(ConfigBaseModel): class Build(ConfigBaseModel): """Base configuration options used when building a Calliope optimisation problem (`calliope.Model.build`).""" + model_config = {"title": "Model build configuration"} mode: MODES_T = Field(default="plan") """Mode in which to run the optimisation.""" @@ -204,6 +199,7 @@ class Build(ConfigBaseModel): class SolveSpores(ConfigBaseModel): """SPORES configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + model_config = {"title": "Model solve SPORES mode configuration"} number: int = Field(default=3) """SPORES mode number of iterations after the initial base run.""" @@ -241,6 +237,7 @@ def require_save_per_spore_path(self) -> Self: class Solve(ConfigBaseModel): """Base configuration options used when solving a Calliope optimisation problem (`calliope.Model.solve`).""" + model_config = {"title": "Model Solve Configuration"} save_logs: str | None = Field(default=None) """If given, should be a path to a directory in which to save optimisation logs.""" From 5ccdcd98397d886d7cdf98193a7491a0718ce36f Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Wed, 18 Dec 2024 17:04:56 +0000 Subject: [PATCH 14/17] Update quotes in quotes for py310 --- src/calliope/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/calliope/config.py b/src/calliope/config.py index 9f471005..3eab9993 100644 --- a/src/calliope/config.py +++ b/src/calliope/config.py @@ -67,7 +67,7 @@ def update(self, update_dict: dict, deep: bool = False) -> Self: new_dict[key] = key_class.update(val) else: LOGGER.info( - f"Updating {self.model_config["title"]} `{key}`: {key_class} -> {val}" + f"Updating {self.model_config['title']} `{key}`: {key_class} -> {val}" ) new_dict[key] = val updated = super().model_copy(update=new_dict, deep=deep) From 0308a8bde5764f18ead304ba2ea0492231cdb8ff Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Wed, 18 Dec 2024 18:32:14 +0000 Subject: [PATCH 15/17] Add explicit config tests --- tests/test_config.py | 184 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 tests/test_config.py diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 00000000..8866cac7 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,184 @@ +import logging + +import numpy as np +import pydantic +import pytest +from pydantic_core import ValidationError + +from calliope import config + + +class TestUniqueList: + @pytest.fixture(scope="module") + def unique_list_model(self): + return pydantic.create_model("Model", unique_list=(config.UniqueList, ...)) + + @pytest.fixture(scope="module") + def unique_str_list_model(self): + return pydantic.create_model("Model", unique_list=(config.UniqueList[str], ...)) + + @pytest.mark.parametrize( + "valid_list", + [[1, 2, 3], [1.0, 1.1, 1.2], ["1", "2", "3"], ["1", 1, "foo"], [None, np.nan]], + ) + def test_unique_list(self, unique_list_model, valid_list): + "When there's no fixed type for list entries, they just have to be unique _within_ types" + model = unique_list_model(unique_list=valid_list) + assert model.unique_list == valid_list + + @pytest.mark.parametrize("valid_list", [[1, 2, 3], ["1", "2", "3"], ["foo", "bar"]]) + def test_unique_str_list(self, unique_list_model, valid_list): + "When there's a fixed type for list entries, they have to be unique when coerced to that type" + model = unique_list_model(unique_list=valid_list) + assert model.unique_list == valid_list + + @pytest.mark.parametrize( + "invalid_list", + [[1, 1, 2], [1, 1.0], ["1", "foo", "foo"], [None, None], [1, True], [0, False]], + ) + def test_not_unique_list(self, unique_list_model, invalid_list): + "When there's no fixed type for list entries, duplicate entries of the _same_ type is not allowed (includes int == bool)" + with pytest.raises(ValidationError, match="List must be unique"): + unique_list_model(unique_list=invalid_list) + + @pytest.mark.parametrize( + "invalid_list", [[1, 1, 2], ["foo", 1, "foo"], ["1", "foo", "foo"]] + ) + def test_not_unique_str_list(self, unique_list_model, invalid_list): + "When there's a fixed type for list entries, they have to be unique when coerced to that type" + with pytest.raises(ValidationError, match="List must be unique"): + unique_list_model(unique_list=invalid_list) + + +class TestUpdate: + @pytest.fixture(scope="module") + def config_model_flat(self): + return pydantic.create_model( + "Model", + __base__=config.ConfigBaseModel, + model_config={"title": "TITLE"}, + foo=(str, "bar"), + foobar=(int, 1), + ) + + @pytest.fixture(scope="module") + def config_model_nested(self, config_model_flat): + return pydantic.create_model( + "Model", + __base__=config.ConfigBaseModel, + model_config={"title": "TITLE 2"}, + nested=(config_model_flat, config_model_flat()), + top_level_foobar=(int, 10), + ) + + @pytest.fixture(scope="module") + def config_model_double_nested(self, config_model_nested): + return pydantic.create_model( + "Model", + __base__=config.ConfigBaseModel, + model_config={"title": "TITLE 3"}, + extra_nested=(config_model_nested, config_model_nested()), + ) + + @pytest.mark.parametrize( + ("to_update", "expected"), + [ + ({"foo": "baz"}, {"foo": "baz", "foobar": 1}), + ({"foobar": 2}, {"foo": "bar", "foobar": 2}), + ({"foo": "baz", "foobar": 2}, {"foo": "baz", "foobar": 2}), + ], + ) + def test_update_flat(self, config_model_flat, to_update, expected): + model = config_model_flat() + model_dict = model.model_dump() + + new_model = model.update(to_update) + + assert new_model.model_dump() == expected + assert model.model_dump() == model_dict + + @pytest.mark.parametrize( + ("to_update", "expected"), + [ + ( + {"top_level_foobar": 20}, + {"top_level_foobar": 20, "nested": {"foo": "bar", "foobar": 1}}, + ), + ( + {"nested": {"foobar": 2}}, + {"top_level_foobar": 10, "nested": {"foo": "bar", "foobar": 2}}, + ), + ( + {"top_level_foobar": 20, "nested": {"foobar": 2}}, + {"top_level_foobar": 20, "nested": {"foo": "bar", "foobar": 2}}, + ), + ( + {"top_level_foobar": 20, "nested.foobar": 2}, + {"top_level_foobar": 20, "nested": {"foo": "bar", "foobar": 2}}, + ), + ], + ) + def test_update_nested(self, config_model_nested, to_update, expected): + model = config_model_nested() + model_dict = model.model_dump() + + new_model = model.update(to_update) + + assert new_model.model_dump() == expected + assert model.model_dump() == model_dict + + @pytest.mark.parametrize( + "to_update", + [ + {"extra_nested.nested.foobar": 2}, + {"extra_nested": {"nested": {"foobar": 2}}}, + ], + ) + def test_update_extra_nested(self, config_model_double_nested, to_update): + model = config_model_double_nested() + model_dict = model.model_dump() + + new_model = model.update(to_update) + + assert new_model.extra_nested.nested.foobar == 2 + assert model.model_dump() == model_dict + + @pytest.mark.parametrize( + "to_update", + [ + {"extra_nested.nested.foobar": "foo"}, + {"extra_nested.top_level_foobar": "foo"}, + ], + ) + def test_update_extra_nested_validation_error( + self, config_model_double_nested, to_update + ): + model = config_model_double_nested() + + with pytest.raises(ValidationError, match="1 validation error for TITLE"): + model.update(to_update) + + @pytest.mark.parametrize( + ("to_update", "expected"), + [ + ({"extra_nested.nested.foobar": 2}, ["Updating TITLE `foobar`: 1 -> 2"]), + ( + {"extra_nested.top_level_foobar": 2}, + ["Updating TITLE 2 `top_level_foobar`: 10 -> 2"], + ), + ( + {"extra_nested.nested.foobar": 2, "extra_nested.top_level_foobar": 3}, + [ + "Updating TITLE `foobar`: 1 -> 2", + "Updating TITLE 2 `top_level_foobar`: 10 -> 3", + ], + ), + ], + ) + def test_logging(self, caplog, config_model_double_nested, to_update, expected): + caplog.set_level(logging.INFO) + + model = config_model_double_nested() + model.update(to_update) + + assert all(log_text in caplog.text for log_text in expected) From bf6cc0afe9dbdd6723a6f80dedb3aa7b1748fc2e Mon Sep 17 00:00:00 2001 From: Bryn Pickering <17178478+brynpickering@users.noreply.github.com> Date: Wed, 18 Dec 2024 22:23:33 +0000 Subject: [PATCH 16/17] Add model_no_ref_schema tests --- tests/test_config.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/tests/test_config.py b/tests/test_config.py index 8866cac7..ce18e0dd 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -182,3 +182,42 @@ def test_logging(self, caplog, config_model_double_nested, to_update, expected): model.update(to_update) assert all(log_text in caplog.text for log_text in expected) + + +class TestNoRefSchema: + @pytest.fixture(scope="module") + def config_model(self): + sub_model = pydantic.create_model( + "SubModel", + __base__=config.ConfigBaseModel, + model_config={"title": "TITLE"}, + foo=(str, "bar"), + foobar=(int, 1), + ) + model = pydantic.create_model( + "Model", + __base__=config.ConfigBaseModel, + model_config={"title": "TITLE 2"}, + nested=(sub_model, sub_model()), + ) + return model + + def test_config_model_no_defs(self, config_model): + model = config_model() + json_schema = model.model_json_schema() + no_defs_json_schema = model.model_no_ref_schema() + assert "$defs" in json_schema + assert "$defs" not in no_defs_json_schema + + def test_config_model_no_resolved_refs(self, config_model): + model = config_model() + json_schema = model.model_json_schema() + no_defs_json_schema = model.model_no_ref_schema() + assert json_schema["properties"]["nested"] == { + "$ref": "#/$defs/SubModel", + "default": {"foo": "bar", "foobar": 1}, + } + assert ( + no_defs_json_schema["properties"]["nested"] + == json_schema["$defs"]["SubModel"] + ) From 45ade7ddc63f341a48e000e7f3141e6ef7adf831 Mon Sep 17 00:00:00 2001 From: Ivan Ruiz Manuel <72193617+irm-codebase@users.noreply.github.com> Date: Tue, 7 Jan 2025 14:04:47 +0100 Subject: [PATCH 17/17] Add tests for new model error messages. --- src/calliope/model.py | 6 ++---- tests/test_core_model.py | 25 +++++++++++++++++-------- tests/test_io.py | 13 +++++++++++++ 3 files changed, 32 insertions(+), 12 deletions(-) diff --git a/src/calliope/model.py b/src/calliope/model.py index b94f66ae..10831347 100644 --- a/src/calliope/model.py +++ b/src/calliope/model.py @@ -312,14 +312,14 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: exceptions.ModelError: Cannot run the model if there are already results loaded, unless `force` is True. exceptions.ModelError: Some preprocessing steps will stop a run mode of "operate" from being possible. """ - # Check that results exist and are non-empty if not self.is_built: raise exceptions.ModelError( "You must build the optimisation problem (`.build()`) " "before you can run it." ) - if hasattr(self, "results"): + to_drop = [] + if hasattr(self, "results"): # Check that results exist and are non-empty if self.results.data_vars and not force: raise exceptions.ModelError( "This model object already has results. " @@ -328,8 +328,6 @@ def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None: ) else: to_drop = self.results.data_vars - else: - to_drop = [] self.config = self.config.update({"solve": kwargs}) diff --git a/tests/test_core_model.py b/tests/test_core_model.py index 55aa2a4e..4aae7bfc 100644 --- a/tests/test_core_model.py +++ b/tests/test_core_model.py @@ -83,14 +83,6 @@ def operate_model_and_log(self, request): return model, log - @pytest.fixture(scope="class") - def rerun_operate_log(self, request, operate_model_and_log): - """Solve in operate mode a second time, to trigger new log messages.""" - with self.caplog_session(request) as caplog: - with caplog.at_level(logging.INFO): - operate_model_and_log[0].solve(force=True) - return caplog.text - def test_backend_build_mode(self, operate_model_and_log): """Verify that we have run in operate mode""" operate_model, _ = operate_model_and_log @@ -111,6 +103,14 @@ def test_not_reset_model_window(self, operate_model_and_log): _, log = operate_model_and_log assert "Resetting model to first time window." not in log + @pytest.fixture + def rerun_operate_log(self, request, operate_model_and_log): + """Solve in operate mode a second time, to trigger new log messages.""" + with self.caplog_session(request) as caplog: + with caplog.at_level(logging.INFO): + operate_model_and_log[0].solve(force=True) + return caplog.text + def test_reset_model_window(self, rerun_operate_log): """The backend model time window needs resetting back to the start on rerunning in operate mode.""" assert "Resetting model to first time window." in rerun_operate_log @@ -149,6 +149,15 @@ def test_build_operate_not_allowed_build(self): ): m.build(mode="operate") + def test_build_operate_use_cap_results_error(self): + """Requesting to use capacity results should return an error if the model is not pre-solved.""" + m = build_model({}, "simple_supply,operate,var_costs,investment_costs") + with pytest.raises( + calliope.exceptions.ModelError, + match="Cannot use plan mode capacity results in operate mode if a solution does not yet exist for the model.", + ): + m.build(mode="operate", operate={"use_cap_results": True}) + class TestBuild: @pytest.fixture diff --git a/tests/test_io.py b/tests/test_io.py index f839e616..28ca4c39 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -66,6 +66,19 @@ def model_csv_dir(self, tmpdir_factory, model): def test_save_netcdf(self, model_file): assert os.path.isfile(model_file) + @pytest.mark.parametrize( + "kwargs", + [{"name": "foobar"}, {"calliope_version": "0.7.0", "time_resample": "2h"}], + ) + def test_model_from_file_kwarg_error(self, model_file, kwargs): + """Passing kwargs when reading model dataset files should fail.""" + model_data = calliope.io.read_netcdf(model_file) + with pytest.raises( + exceptions.ModelError, + match="Cannot apply initialisation configuration overrides when loading data from an xarray Dataset.", + ): + calliope.Model(model_data, **kwargs) + @pytest.mark.parametrize( ("attr", "expected_type", "expected_val"), [