Skip to content

Commit

Permalink
Merge branch 'main' into ps/reeds
Browse files Browse the repository at this point in the history
  • Loading branch information
pesap authored Sep 18, 2024
2 parents cea5a34 + e963011 commit c8d8cf8
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 26 deletions.
9 changes: 7 additions & 2 deletions src/r2x/api.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""R2X API for data model."""

import csv
import json
from collections.abc import Callable
from os import PathLike
from pathlib import Path
Expand Down Expand Up @@ -51,7 +52,7 @@ def export_component_to_csv(
# Get desired components to offload to csv
components = map(
lambda component: component.model_dump(
exclude={"ext"}, exclude_none=True, mode="json", context={"magnitude_only": True}
exclude={}, exclude_none=True, mode="json", context={"magnitude_only": True}
),
self.get_components(component, filter_func=filter_func),
)
Expand Down Expand Up @@ -163,7 +164,11 @@ def _export_dict_to_csv(
writer.writeheader()
for row in data:
filter_row = {
key: value if not isinstance(value, dict) else value.get(unnest_key)
key: json.dumps(value)
if key == "ext" and isinstance(value, dict)
else value
if not isinstance(value, dict)
else value.get(unnest_key)
for key, value in row.items()
}
writer.writerow(filter_row)
Expand Down
19 changes: 3 additions & 16 deletions src/r2x/exporter/sienna.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,16 +147,11 @@ def process_branch_data(self, fname: str = "branch.csv") -> None:
"b",
"rate",
"branch_type",
"rating_up",
"rating_down",
"ext",
]

# NOTE: We need to decide what we do if the user provides a rate or bi-directional rate
# if "rate" in output_df.columns:
# output_df["rate"] = output_df["rate"].fillna(
# (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2
# )
# else:
# output_df["rate"] = (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2

self.system.export_component_to_csv(
ACBranch,
fpath=self.output_folder / fname,
Expand All @@ -169,7 +164,6 @@ def process_branch_data(self, fname: str = "branch.csv") -> None:
"rating": "rate",
"b": "primary_shunt",
},
# restval=0.0,
)
logger.info(f"File {fname} created.")

Expand All @@ -189,13 +183,6 @@ def process_dc_branch_data(self, fname="dc_branch.csv") -> None:
"loss",
]

# NOTE: We need to decide what we do if the user provides a rate or bi-directional rate
# if "rate" in output_df.columns:
# output_df["rate"] = output_df["rate"].fillna(
# (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2
# )
# else:
# output_df["rate"] = (output_df["rating_up"] + np.abs(output_df["rating_down"])) / 2
self.system.export_component_to_csv(
DCBranch,
fpath=self.output_folder / fname,
Expand Down
4 changes: 4 additions & 0 deletions src/r2x/parser/parser_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ def prepare_ext_field(valid_fields, extra_fields):
"""Cleanses the extra fields by removing any timeseries data"""
if extra_fields:
# Implement any filtering of ext_data here
# logger.debug("Extra fields: {}", extra_fields)
# remove any non eligible datatypes from extra fields
eligible_datatypes = [str, int, float, bool]
extra_fields = {k: v for k, v in extra_fields.items() if type(v) in eligible_datatypes}
valid_fields["ext"] = extra_fields
else:
valid_fields["ext"] = {}
Expand Down
77 changes: 69 additions & 8 deletions src/r2x/parser/plexos.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@
"timeslice_tag": pl.String,
"timeslice": pl.String,
"timeslice_value": pl.Float32,
"data_text": pl.String,
}
COLUMNS = [
"name",
Expand Down Expand Up @@ -286,6 +287,7 @@ def _get_fuel_prices(self):
"variable",
"action",
"variable_tag",
"variable_default",
"timeslice",
"timeslice_value",
]
Expand Down Expand Up @@ -421,7 +423,7 @@ def _construct_branches(self, default_model=MonitoredLine):
)
for line in lines_pivot.iter_rows(named=True):
line_properties_mapped = {self.property_map.get(key, key): value for key, value in line.items()}
line_properties_mapped["rating"] = line_properties_mapped.pop("max_power_flow", None)
line_properties_mapped["rating"] = line_properties_mapped.get("max_power_flow", None)
line_properties_mapped["rating_up"] = line_properties_mapped.pop("max_power_flow", None)
line_properties_mapped["rating_down"] = line_properties_mapped.pop("min_power_flow", None)

Expand Down Expand Up @@ -517,7 +519,6 @@ def _construct_generators(self):
system_generators = (pl.col("child_class_name") == ClassEnum.Generator.name) & (
pl.col("parent_class_name") == ClassEnum.System.name
)

system_generators = self._get_model_data(system_generators)
if getattr(self.config.feature_flags, "plexos-csv", None):
system_generators.write_csv("generators.csv")
Expand Down Expand Up @@ -568,6 +569,7 @@ def _construct_generators(self):
"variable",
"action",
"variable_tag",
"variable_default",
"timeslice",
"timeslice_value",
]
Expand Down Expand Up @@ -609,7 +611,6 @@ def _construct_generators(self):
# TODO(pesap): Remove base_mva once it is not required field on PowerSystems.jl
# https://github.com/NREL/R2X/issues/39
mapped_records["base_mva"] = 1

valid_fields, ext_data = field_filter(mapped_records, model_map.model_fields)

ts_fields = {k: v for k, v in mapped_records.items() if isinstance(v, SingleTimeSeries)}
Expand Down Expand Up @@ -681,7 +682,6 @@ def _add_generator_reserves(self):
)
continue
reserve_map.mapping[reserve_object.name].append(generator.name)

return

def _construct_batteries(self):
Expand All @@ -708,6 +708,7 @@ def _construct_batteries(self):
"variable",
"action",
"variable_tag",
"variable_default",
"timeslice",
"timeslice_value",
]
Expand All @@ -725,7 +726,6 @@ def _construct_batteries(self):
mapped_records["prime_mover_type"] = PrimeMoversType.BA

valid_fields, ext_data = field_filter(mapped_records, GenericBattery.model_fields)

valid_fields = self._set_unit_availability(valid_fields)
if valid_fields is None:
continue
Expand Down Expand Up @@ -785,6 +785,7 @@ def _add_battery_reserves(self):
parent_class=ClassEnum.Reserve,
collection=CollectionEnum.Batteries,
)

for battery in self.system.get_components(GenericBattery):
reserves = [membership for membership in generator_memberships if membership[3] == battery.name]
if reserves:
Expand Down Expand Up @@ -1103,7 +1104,7 @@ def _get_model_data(self, data_filter) -> pl.DataFrame:
variable_filter = (
(pl.col("child_class_name") == ClassEnum.Variable.name)
& (pl.col("parent_class_name") == ClassEnum.System.name)
& (pl.col("data_file").is_not_null())
& (pl.col("property_name") != "Sampling Method")
)
variable_scenario_data = None
if scenario_specific_data is not None and scenario_filter is not None:
Expand All @@ -1115,8 +1116,65 @@ def _get_model_data(self, data_filter) -> pl.DataFrame:
variable_base_data = self.plexos_data.filter(variable_filter & base_case_filter)
if variable_base_data is not None and variable_scenario_data is not None:
variable_data = pl.concat([variable_scenario_data, variable_base_data])
system_data = self._join_variable_data(system_data, variable_data)

return self._join_variable_data(system_data, variable_data)
# Get System Data Files
# drop column named data_file and replace it with correct scenario-filtered datafile
# system_data.drop_in_place("data_file")
datafile_data = None
datafile_filter = (pl.col("child_class_name") == ClassEnum.DataFile.value) & (
pl.col("parent_class_name") == ClassEnum.System.name
)
datafile_scenario_data = None
if scenario_specific_data is not None and scenario_filter is not None:
datafile_scenario_data = self.plexos_data.filter(datafile_filter & scenario_filter)

if datafile_scenario_data is not None:
datafile_base_data = self.plexos_data.filter(datafile_filter & pl.col("scenario").is_null())
else:
datafile_base_data = self.plexos_data.filter(datafile_filter & base_case_filter)
if datafile_base_data is not None and datafile_scenario_data is not None:
datafile_data = pl.concat([datafile_scenario_data, datafile_base_data])
system_data = self._join_datafile_data(system_data, datafile_data)
return system_data

def _join_datafile_data(self, system_data, datafile_data):
"""Join system data with datafile data."""
# Filter datafiles
if datafile_data.height > 0:
results = []
grouped = datafile_data.group_by("name")
for group_name, group_df in grouped:
if group_df.height > 1:
# Check if any scenario_name exists
scenario_exists = group_df.filter(pl.col("scenario").is_not_null())

if scenario_exists.height > 0:
# Select the first row with a scenario_name
selected_row = scenario_exists[0]
else:
# If no scenario_name, select the row with the lowest band_id
selected_row = group_df.sort("band").head(1)[0]
else:
# If the group has only one row, select that row
selected_row = group_df[0]
results.append(
{
"name": group_name[0],
"data_file_sc": selected_row["data_text"][0],
}
)
datafiles_filtered = pl.DataFrame(results)
system_data = system_data.join(
datafiles_filtered, left_on="data_file_tag", right_on="name", how="left"
)
# replace system_Data["data_file"] with system_data["data_file_sc"]
system_data.drop_in_place("data_file")
system_data = system_data.rename({"data_file_sc": "data_file"})
else:
# NOTE: We might want to include this at the instead of each function call
system_data = system_data.with_columns(pl.lit(None).alias("data_file_sc"))
return system_data

def _join_variable_data(self, system_data, variable_data):
"""Join system data with variable data."""
Expand All @@ -1138,12 +1196,12 @@ def _join_variable_data(self, system_data, variable_data):
else:
# If the group has only one row, select that row
selected_row = group_df[0]

results.append(
{
"name": group_name[0],
"variable_name": selected_row["data_file_tag"][0],
"variable": selected_row["data_file"][0],
"variable_default": selected_row["property_value"][0],
}
)
variables_filtered = pl.DataFrame(results)
Expand Down Expand Up @@ -1415,9 +1473,12 @@ def _get_value(self, prop_value, unit, record, record_name):
if data_file is None and record.get("data_file"):
return None

var_default = record.get("variable_default")
variable = (
self._csv_file_handler(record.get("variable_tag"), record.get("variable"))
if record.get("variable")
else var_default
if var_default != 0
else None
)

Expand Down

0 comments on commit c8d8cf8

Please sign in to comment.