Skip to content

calliope.Model(model_definition, scenario=None, override_dict=None, data_table_dfs=None, **kwargs)

A Calliope Model.

Returns a new Model from YAML model configuration files or a fully specified dictionary.

Parameters:

Name Type Description Default
model_definition str | Path | dict | Dataset

If str or Path, must be the path to a model configuration file. If dict or AttrDict, must fully specify the model. If an xarray dataset, must be a valid calliope model.

required
scenario str | None

Comma delimited string of pre-defined scenarios to apply to the model. Defaults to None.

None
override_dict dict | None

Additional overrides to apply to config. These will be applied after applying any defined scenario overrides. Defaults to None.

None
data_table_dfs dict[str, DataFrame] | None

Model definition data_table entries can reference in-memory pandas DataFrames. The referenced data must be supplied here as a dictionary of those DataFrames. Defaults to None.

None
**kwargs

initialisation overrides.

{}
Source code in src/calliope/model.py
def __init__(
    self,
    model_definition: str | Path | dict | xr.Dataset,
    scenario: str | None = None,
    override_dict: dict | None = None,
    data_table_dfs: dict[str, pd.DataFrame] | None = None,
    **kwargs,
):
    """Returns a new Model from YAML model configuration files or a fully specified dictionary.

    Args:
        model_definition (str | Path | dict | xr.Dataset):
            If str or Path, must be the path to a model configuration file.
            If dict or AttrDict, must fully specify the model.
            If an xarray dataset, must be a valid calliope model.
        scenario (str | None, optional):
            Comma delimited string of pre-defined `scenarios` to apply to the model.
            Defaults to None.
        override_dict (dict | None, optional):
            Additional overrides to apply to `config`.
            These will be applied *after* applying any defined `scenario` overrides.
            Defaults to None.
        data_table_dfs (dict[str, pd.DataFrame] | None, optional):
            Model definition `data_table` entries can reference in-memory pandas DataFrames.
            The referenced data must be supplied here as a dictionary of those DataFrames.
            Defaults to None.
        **kwargs: initialisation overrides.
    """
    self._timings: dict = {}
    self.config: config_schema.CalliopeConfig
    self.defaults: AttrDict
    self.applied_math: preprocess.CalliopeMath
    self.backend: BackendModel
    self.def_path: str | None = None
    self._start_window_idx: int = 0
    self._is_built: bool = False
    self._is_solved: bool = False

    # try to set logging output format assuming python interactive. Will
    # use CLI logging format if model called from CLI
    timestamp_model_creation = log_time(
        LOGGER, self._timings, "model_creation", comment="Model: initialising"
    )
    if isinstance(model_definition, xr.Dataset):
        if kwargs:
            raise exceptions.ModelError(
                "Cannot apply initialisation configuration overrides when loading data from an xarray Dataset."
            )
        self._init_from_model_data(model_definition)
    else:
        if not isinstance(model_definition, dict):
            # Only file definitions allow relative files.
            self.def_path = str(model_definition)
        self._init_from_model_definition(
            model_definition, scenario, override_dict, data_table_dfs, **kwargs
        )

    self._model_data.attrs["timestamp_model_creation"] = timestamp_model_creation
    version_def = self._model_data.attrs["calliope_version_defined"]
    version_init = self._model_data.attrs["calliope_version_initialised"]
    if version_def is not None and not version_init.startswith(version_def):
        exceptions.warn(
            f"Model configuration specifies calliope version {version_def}, "
            f"but you are running {version_init}. Proceed with caution!"
        )

ATTRS_SAVED = ('applied_math', 'config', 'def_path') class-attribute instance-attribute

applied_math instance-attribute

backend instance-attribute

config instance-attribute

def_path = None instance-attribute

defaults instance-attribute

inputs property

Get model input data.

is_built property

Get built status.

is_solved property

Get solved status.

name property

Get the model name.

results property

Get model result data.

build(force=False, add_math_dict=None, **kwargs)

Build description of the optimisation problem in the chosen backend interface.

Parameters:

Name Type Description Default
force bool

If force is True, any existing results will be overwritten. Defaults to False.

False
add_math_dict dict | None

Additional math to apply on top of the YAML base / additional math files. Content of this dictionary will override any matching key:value pairs in the loaded math files.

None
**kwargs

build configuration overrides.

{}
Source code in src/calliope/model.py
def build(
    self, force: bool = False, add_math_dict: dict | None = None, **kwargs
) -> None:
    """Build description of the optimisation problem in the chosen backend interface.

    Args:
        force (bool, optional):
            If ``force`` is True, any existing results will be overwritten.
            Defaults to False.
        add_math_dict (dict | None, optional):
            Additional math to apply on top of the YAML base / additional math files.
            Content of this dictionary will override any matching key:value pairs in the loaded math files.
        **kwargs: build configuration overrides.
    """
    if self._is_built and not force:
        raise exceptions.ModelError(
            "This model object already has a built optimisation problem. Use model.build(force=True) "
            "to force the existing optimisation problem to be overwritten with a new one."
        )
    self._model_data.attrs["timestamp_build_start"] = log_time(
        LOGGER,
        self._timings,
        "build_start",
        comment="Model: backend build starting",
    )

    self.config = self.config.update({"build": kwargs})
    mode = self.config.build.mode
    if mode == "operate":
        if not self._model_data.attrs["allow_operate_mode"]:
            raise exceptions.ModelError(
                "Unable to run this model in operate (i.e. dispatch) mode, probably because "
                "there exist non-uniform timesteps (e.g. from time clustering)"
            )
        backend_input = self._prepare_operate_mode_inputs(self.config.build.operate)
    else:
        backend_input = self._model_data

    init_math_list = [] if self.config.build.ignore_mode_math else [mode]
    end_math_list = [] if add_math_dict is None else [add_math_dict]
    full_math_list = init_math_list + self.config.build.add_math + end_math_list
    LOGGER.debug(f"Math preprocessing | Loading math: {full_math_list}")
    model_math = preprocess.CalliopeMath(full_math_list, self.def_path)

    self.backend = backend.get_model_backend(
        self.config.build, backend_input, model_math
    )
    self.backend.add_optimisation_components()

    self.applied_math = model_math

    self._model_data.attrs["timestamp_build_complete"] = log_time(
        LOGGER,
        self._timings,
        "build_complete",
        comment="Model: backend build complete",
    )
    self._is_built = True

info()

Generate basic description of the model, combining its name and a rough indication of the model size.

Returns:

Name Type Description
str str

Basic description of the model.

Source code in src/calliope/model.py
def info(self) -> str:
    """Generate basic description of the model, combining its name and a rough indication of the model size.

    Returns:
        str: Basic description of the model.
    """
    info_strings = []
    model_name = self.name
    info_strings.append(f"Model name:   {model_name}")
    msize = dict(self._model_data.dims)
    msize_exists = self._model_data.definition_matrix.sum()
    info_strings.append(
        f"Model size:   {msize} ({msize_exists.item()} valid node:tech:carrier combinations)"
    )
    return "\n".join(info_strings)

run(force_rerun=False)

Run the model.

If force_rerun is True, any existing results will be overwritten.

Source code in src/calliope/model.py
def run(self, force_rerun=False):
    """Run the model.

    If ``force_rerun`` is True, any existing results will be overwritten.
    """
    exceptions.warn(
        "`run()` is deprecated and will be removed in a "
        "future version. Use `model.build()` followed by `model.solve()`.",
        FutureWarning,
    )
    self.build(force=force_rerun)
    self.solve(force=force_rerun)

solve(force=False, warmstart=False, **kwargs)

Solve the built optimisation problem.

Parameters:

Name Type Description Default
force bool

If force is True, any existing results will be overwritten. Defaults to False.

False
warmstart bool

If True and the optimisation problem has already been run in this session (i.e., force is not True), the next optimisation will be run with decision variables initially set to their previously optimal values. If the optimisation problem is similar to the previous run, this can decrease the solution time. Warmstart will not work with some solvers (e.g., CBC, GLPK). Defaults to False.

False
**kwargs

solve configuration overrides.

{}

Raises:

Type Description
ModelError

Optimisation problem must already be built.

ModelError

Cannot run the model if there are already results loaded, unless force is True.

ModelError

Some preprocessing steps will stop a run mode of "operate" from being possible.

Source code in src/calliope/model.py
def solve(self, force: bool = False, warmstart: bool = False, **kwargs) -> None:
    """Solve the built optimisation problem.

    Args:
        force (bool, optional):
            If ``force`` is True, any existing results will be overwritten.
            Defaults to False.
        warmstart (bool, optional):
            If True and the optimisation problem has already been run in this session
            (i.e., `force` is not True), the next optimisation will be run with
            decision variables initially set to their previously optimal values.
            If the optimisation problem is similar to the previous run, this can
            decrease the solution time.
            Warmstart will not work with some solvers (e.g., CBC, GLPK).
            Defaults to False.
        **kwargs: solve configuration overrides.

    Raises:
        exceptions.ModelError: Optimisation problem must already be built.
        exceptions.ModelError: Cannot run the model if there are already results loaded, unless `force` is True.
        exceptions.ModelError: Some preprocessing steps will stop a run mode of "operate" from being possible.
    """
    if not self.is_built:
        raise exceptions.ModelError(
            "You must build the optimisation problem (`.build()`) "
            "before you can run it."
        )

    to_drop = []
    if hasattr(self, "results"):  # Check that results exist and are non-empty
        if self.results.data_vars and not force:
            raise exceptions.ModelError(
                "This model object already has results. "
                "Use model.solve(force=True) to force"
                "the results to be overwritten with a new run."
            )
        else:
            to_drop = self.results.data_vars

    self.config = self.config.update({"solve": kwargs})

    shadow_prices = self.config.solve.shadow_prices
    self.backend.shadow_prices.track_constraints(shadow_prices)

    mode = self.config.build.mode
    self._model_data.attrs["timestamp_solve_start"] = log_time(
        LOGGER,
        self._timings,
        "solve_start",
        comment=f"Optimisation model | starting model in {mode} mode.",
    )
    if mode == "operate":
        results = self._solve_operate(**self.config.solve.model_dump())
    else:
        results = self.backend._solve(
            warmstart=warmstart, **self.config.solve.model_dump()
        )

    log_time(
        LOGGER,
        self._timings,
        "solver_exit",
        time_since_solve_start=True,
        comment="Backend: solver finished running",
    )

    # Add additional post-processed result variables to results
    if results.attrs["termination_condition"] in ["optimal", "feasible"]:
        results = postprocess_results.postprocess_model_results(
            results, self._model_data, self.config.solve.zero_threshold
        )

    log_time(
        LOGGER,
        self._timings,
        "postprocess_complete",
        time_since_solve_start=True,
        comment="Postprocessing: ended",
    )

    self._model_data = self._model_data.drop_vars(to_drop)

    self._model_data.attrs.update(results.attrs)
    self._model_data = xr.merge(
        [results, self._model_data], compat="override", combine_attrs="no_conflicts"
    )

    self._model_data.attrs["timestamp_solve_complete"] = log_time(
        LOGGER,
        self._timings,
        "solve_complete",
        time_since_solve_start=True,
        comment="Backend: model solve completed",
    )

    self._is_solved = True

to_csv(path, dropna=True, allow_overwrite=False)

Save complete model data (inputs and, if available, results) as a set of CSV files to the given path.

Parameters:

Name Type Description Default
path str | Path

file path to save at.

required
dropna bool

If True, NaN values are dropped when saving, resulting in significantly smaller CSV files. Defaults to True

True
allow_overwrite bool

If True, allow the option to overwrite the directory contents if it already exists. This will overwrite CSV files one at a time, so if the dataset has different arrays to the previous saved models, you will get a mix of old and new files. Defaults to False.

False
Source code in src/calliope/model.py
def to_csv(
    self, path: str | Path, dropna: bool = True, allow_overwrite: bool = False
):
    """Save complete model data (inputs and, if available, results) as a set of CSV files to the given ``path``.

    Args:
        path (str | Path): file path to save at.
        dropna (bool, optional):
            If True, NaN values are dropped when saving, resulting in significantly smaller CSV files.
            Defaults to True
        allow_overwrite (bool, optional):
            If True, allow the option to overwrite the directory contents if it already exists.
            This will overwrite CSV files one at a time, so if the dataset has different arrays to the previous saved models, you will get a mix of old and new files.
            Defaults to False.

    """
    io.save_csv(self._model_data, path, dropna, allow_overwrite)

to_netcdf(path)

Save complete model data (inputs and, if available, results) to a NetCDF file at the given path.

Source code in src/calliope/model.py
def to_netcdf(self, path):
    """Save complete model data (inputs and, if available, results) to a NetCDF file at the given `path`."""
    saved_attrs = {}
    for attr in set(self.ATTRS_SAVED) & set(self.__dict__.keys()):
        if attr == "config":
            saved_attrs[attr] = self.config.model_dump()
        elif not isinstance(getattr(self, attr), str | list | None):
            saved_attrs[attr] = dict(getattr(self, attr))
        else:
            saved_attrs[attr] = getattr(self, attr)

    io.save_netcdf(self._model_data, path, **saved_attrs)