Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Correct parsing of TS_NMDH for plexos parser #97

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions src/r2x/defaults/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,18 @@
"fuel": "GAS",
"type": null
},
{
"fuel": "GAS",
"type": "CC"
},
{
"fuel": "GAS",
"type": "IG"
},
{
"fuel": "GAS",
"type": "IT"
},
{
"fuel": "OT",
"type": null
Expand Down
41 changes: 41 additions & 0 deletions src/r2x/defaults/reeds_us_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,20 @@
"optional": true,
"units": "tonne"
},
"co2_incentive": {
"column_mapping": {
"allt": "year",
"i": "tech",
"r": "region",
"v": "vintage",
"value": "incentive"
},
"description": "Capture incentive in $/ton",
"fname": "co2_captured_incentive.csv",
"input": true,
"optional": true,
"units": "$/ton"
},
"cost_vom": {
"column_mapping": {
"i": "tech",
Expand Down Expand Up @@ -105,6 +119,21 @@
"optional": true,
"units": "MW"
},
"emission_capture_rate": {
"column_mapping": {
"e": "pollutant",
"i": "tech",
"r": "region",
"t": "year",
"v": "vintage",
"value": "capture_rate"
},
"description": "Capture rate for given CCS technology.",
"fname": "capture_rate.csv",
"input": true,
"optional": true,
"units": "ton/MWh"
},
"emission_rates": {
"column_mapping": {
"eall": "emission_type",
Expand Down Expand Up @@ -438,6 +467,18 @@
"fname": "tran_out.csv",
"units": "MW"
},
"upgrade_link": {
"column_mapping": {
"*to": "to",
"delta": "delta",
"from": "from"
},
"description": "Upgrade for certain technologies",
"fname": "upgrade_link.csv",
"input": true,
"optional": true,
"units": "-"
},
"years": {
"column_mapping": {},
"fname": "modeledyears.csv",
Expand Down
3 changes: 3 additions & 0 deletions src/r2x/exporter/plexos.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from plexosdb import PlexosSQLite
from plexosdb.enums import ClassEnum, CollectionEnum
from r2x.exporter.utils import (
apply_extract_key,
apply_flatten_key,
apply_pint_deconstruction,
apply_property_map,
Expand Down Expand Up @@ -50,6 +51,7 @@
NESTED_ATTRIBUTES = ["ext", "bus", "services"]
TIME_SERIES_PROPERTIES = ["Min Provision", "Static Risk"]
DEFAULT_XML_TEMPLATE = "master_9.2R6_btu.xml"
EXT_PROPERTIES = {"UoS Charge"}


def cli_arguments(parser: ArgumentParser):
Expand Down Expand Up @@ -229,6 +231,7 @@ def insert_component_properties(
export_records = get_export_records(
records,
partial(apply_operation_cost),
partial(apply_extract_key, key="ext", keys_to_extract=EXT_PROPERTIES),
partial(apply_flatten_key, keys_to_flatten={"active_power_limits", "active_power_flow_limits"}),
partial(apply_property_map, property_map=property_map),
partial(apply_pint_deconstruction, unit_map=self.default_units),
Expand Down
36 changes: 36 additions & 0 deletions src/r2x/exporter/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,3 +318,39 @@ def apply_flatten_key(d: dict[str, Any], keys_to_flatten: set[str]) -> dict[str,
flattened_dict[key] = val

return flattened_dict


def apply_extract_key(d: dict[str, Any], key: str, keys_to_extract: set[str]) -> dict[str, Any]:
"""Extract keys from a nested dictionary and put it in first level.

Parameters
----------
d : dict
The input dictionary, where some values are dictionaries to be flattened.
key: dict
Key that has a dictionary
keys_to_extract : list of str
The keys in the nested dictionary that will be extracted

Returns
-------
dict
A new dictionary with the selected keys flattened. Other keys remain unchanged.

Examples
--------
>>> d = {"x": {"min": 1, "max": 2}, "y": {"min": 5, "max": 10}, "z": 42}
>>> flatten_selected_keys(d, ["x"])
{'x_min': 1, 'x_max': 2, 'y': {'min': 5, 'max': 10}, 'z': 42}

>>> flatten_selected_keys(d, ["y"])
{'x': {'min': 1, 'max': 2}, 'y_min': 5, 'y_max': 10, 'z': 42}
"""
if key not in d.keys() or any(k in d.keys() for k in keys_to_extract):
return d

if not any(k in d[key].keys() for k in keys_to_extract):
return d

extracted_keys = {key: value for key, value in d[key].items() if key in keys_to_extract}
return {**d, **extracted_keys}
4 changes: 4 additions & 0 deletions src/r2x/parser/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,10 @@ def csv_handler(fpath: Path, csv_file_encoding="utf8", **kwargs) -> pl.DataFrame
logger.warning("File {} could not be parse due to dtype problems. See error.", fpath)
raise

if data_file.is_empty():
logger.debug("File {} is empty. Skipping it.", fpath)
return

data_file = pl_lowercase(data_file)

return data_file
Expand Down
5 changes: 3 additions & 2 deletions src/r2x/parser/plexos.py
Original file line number Diff line number Diff line change
Expand Up @@ -1225,7 +1225,7 @@ def _set_unit_capacity(self, record): # noqa: C901
return record

def _get_active_power_limits(self, record) -> MinMax:
assert record["base_power"] is not None
# assert record["base_power"] is not None
if active_power_min := record.get("min_rated_capacity"):
if isinstance(active_power_min, SingleTimeSeries):
active_power_min = np.nanmin(active_power_min.data)
Expand Down Expand Up @@ -1385,6 +1385,7 @@ def _data_file_handler(
parsed_file = parsed_file.unique(subset=columns_to_check).sort(pl.all())

# We reconcile the time series data using the hourly time stamp given by the solve year

parsed_file = reconcile_timeseries(parsed_file, hourly_time_index=self.hourly_time_index)
assert (
"value" in parsed_file.columns
Expand All @@ -1398,7 +1399,7 @@ def _create_columns_to_check(self, column_type: DATAFILE_COLUMNS):
for column in column_type.value
if column in ["name", "pattern", "year", "datetime", "month", "day", "period", "hour"]
]
if column_type == DATAFILE_COLUMNS.TS_YMDH:
if column_type == DATAFILE_COLUMNS.TS_YMDH or column_type == DATAFILE_COLUMNS.TS_NMDH:
columns_to_check.append("hour")
if column_type == DATAFILE_COLUMNS.TS_NM:
columns_to_check.append("month")
Expand Down
24 changes: 12 additions & 12 deletions src/r2x/parser/reeds.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,9 +691,9 @@ def _construct_hydro_budgets(self) -> None:
hydro_cf,
month_hrs,
)
month_of_hour = np.array(
[dt.astype("datetime64[M]").astype(int) % 12 + 1 for dt in self.hourly_time_index]
)
# month_of_hour = np.array(
# [dt.astype("datetime64[M]").astype(int) % 12 + 1 for dt in self.hourly_time_index]
# )
month_of_day = np.array(
[dt.astype("datetime64[M]").astype(int) % 12 + 1 for dt in self.daily_time_index]
)
Expand All @@ -707,9 +707,9 @@ def _construct_hydro_budgets(self) -> None:
region = generator.bus.name
hydro_ratings = hydro_data.filter((pl.col("tech") == tech) & (pl.col("region") == region))

hourly_time_series = np.zeros(len(month_of_hour), dtype=float)
if self.config.feature_flags.get("daily-budgets", None):
hourly_time_series = np.zeros(len(month_of_day), dtype=float)
# hourly_time_series = np.zeros(len(month_of_hour), dtype=float)
# if self.config.feature_flags.get("daily-budgets", None):
hourly_time_series = np.zeros(len(month_of_day), dtype=float)

for row in hydro_ratings.iter_rows(named=True):
month = row["month"]
Expand All @@ -719,12 +719,12 @@ def _construct_hydro_budgets(self) -> None:
month_max_budget = (
generator.active_power * Percentage(row["hydro_cf"], "") * Time(row["hrs"], "h")
)
if self.config.feature_flags.get("daily-budgets", None):
daily_max_budget = month_max_budget / (row["hrs"] / 24)
hourly_time_series[month_of_day == month] = daily_max_budget.magnitude
else:
month_indices = month_of_hour == month
hourly_time_series[month_indices] = month_max_budget.magnitude
# if self.config.feature_flags.get("daily-budgets", None):
daily_max_budget = month_max_budget / (row["hrs"] / 24)
hourly_time_series[month_of_day == month] = daily_max_budget.magnitude
# else:
# month_indices = month_of_hour == month
# hourly_time_series[month_indices] = month_max_budget.magnitude

ts = SingleTimeSeries.from_array(
Energy(hourly_time_series / 1e3, "GWh"),
Expand Down
93 changes: 93 additions & 0 deletions src/r2x/plugins/ccs_credit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
"""Plugin to add CCS incentive to the model.

This plugin is only applicable for ReEDs, but could work with similarly arrange data
"""

import polars as pl
from loguru import logger

from r2x.api import System
from r2x.units import ureg
from r2x.config import Scenario
from r2x.models.generators import Generator
from r2x.parser.handler import BaseParser


def update_system(
config: Scenario,
system: System,
parser: BaseParser | None = None,
) -> System:
"""Apply CCS incentive to CCS eligible technologies.

The incentive is calculated with the capture incentive ($/ton) and capture rate
(ton/MWh), to produce a subtractor ($/MWh) implemented with PLEXOS' "Use of
Service Charge".

Parameters
----------
config : Scenario
The scenario configuration.
parser : BaseParser
The parser object used for parsing.
system : System
The system object to be updated.

Notes
-----
The names of some of the columns for the parser data are specified in the `reeds_us_mapping.json`.
"""
if not config.output_model == "plexos" and not config.input_model == "reeds-US":
msg = "Plugin `ccs_credit.py` is not compatible with a model that is not Plexos or ReEDs input."
raise NotImplementedError(msg)

if parser is None:
msg = "Missing parser information for ccs_credit. Skipping plugin."
logger.debug(msg)
return system

required_files = ["co2_incentive", "emission_capture_rate", "upgrade_link"]
if parser is not None:
if not all(key in parser.data for key in required_files):
logger.warning("Missing required files for ccs_credit. Skipping plugin.")
return system

production_rate = parser.data["emission_capture_rate"]

# Some technologies on ReEDS are eligible for incentive but have not been upgraded yet. Since the
# co2_incentive does not capture all the possible technologies, we get the technologies before upgrading
# and if they exist in the system we apply the incentive.
incentive = parser.data["co2_incentive"].join(
parser.data["upgrade_link"], left_on="tech", right_on="to", how="left"
)
ccs_techs = incentive["tech"].unique()
ccs_techs = ccs_techs.unique().extend(incentive["from"].unique())

for generator in system.get_components(
Generator, filter_func=lambda gen: gen.ext and gen.ext["reeds_tech"] in ccs_techs
):
reeds_tech = generator.ext["reeds_tech"]
reeds_vintage = generator.ext["reeds_vintage"]
reeds_tech_mask = (
(pl.col("tech") == reeds_tech)
& (pl.col("region") == generator.bus.name)
& (pl.col("vintage") == reeds_vintage)
)
generator_production_rate = production_rate.filter(reeds_tech_mask)

if generator_production_rate.is_empty():
msg = f"Generator {generator.name=} does not appear on the production rate file. Skipping it."
logger.debug(msg)
continue

upgrade_mask = (
(pl.col("from") == reeds_tech)
& (pl.col("region") == generator.bus.name)
& (pl.col("vintage") == reeds_vintage)
)
generator_incentive = incentive.filter(reeds_tech_mask.or_(upgrade_mask))["incentive"].item()
generator.ext["UoS Charge"] = ureg.Quantity(
-generator_incentive * generator_production_rate["capture_rate"].item(),
"usd/MWh", # Negative quantity to capture incentive in the objetive function.
)
return system
27 changes: 27 additions & 0 deletions tests/test_exporter_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from pint import Quantity
from r2x.exporter.utils import (
apply_default_value,
apply_extract_key,
apply_flatten_key,
apply_property_map,
apply_unnest_key,
Expand Down Expand Up @@ -176,3 +177,29 @@ def test_apply_default_value():
default_value_map = {"year": 2024, "month": "October"}
result = apply_default_value(component, default_value_map)
assert result == {"year": 2024, "month": "October"}


def test_extract_key():
component = {"name": "example", "ext": {"TestNested": 1.0}}
result = apply_extract_key(component, key="ext", keys_to_extract={"TestNested"})
assert result is not None
assert result.get("TestNested", None) is not None
assert result["TestNested"] == 1.0

component = {"name": "example", "ext": {"TestNested": 1.0, "TestNested2": "test"}}
result = apply_extract_key(component, key="ext", keys_to_extract={"TestNested", "TestNested2"})
assert result is not None
assert result.get("TestNested", None) is not None
assert result["TestNested"] == 1.0
assert result.get("TestNested2", None) is not None
assert result["TestNested2"] == "test"

component = {"name": "example"}
result = apply_extract_key(component, key="ext", keys_to_extract={"TestNested", "TestNested2"})
assert result is not None
assert result == component

component = {"name": "example", "ext": {"TestNested": 1.0, "TestNested2": "test"}}
result = apply_extract_key(component, key="ext", keys_to_extract={"Test"})
assert result is not None
assert result == component
Loading