Example #1
0
    def get_dataframe(self,
                      element_class,
                      prop,
                      element_name,
                      real_only=False,
                      **kwargs):
        """Return the dataframe for an element.

        Parameters
        ----------
        element_class : str
        prop : str
        element_name : str
        real_only : bool
            If dtype of any column is complex, drop the imaginary component.
        kwargs : **kwargs
            Filter on options. Option values can be strings or regular expressions.

        Returns
        -------
        pd.DataFrame

        Raises
        ------
        InvalidParameter
            Raised if the element is not stored.

        """
        if element_name not in self._elem_props:
            raise InvalidParameter(f"element {element_name} is not stored")

        elem_group = self._group[element_class][element_name]
        dataset = elem_group[prop]
        df = DatasetBuffer.to_dataframe(dataset)

        if kwargs:
            options = self._check_options(element_class, prop, **kwargs)
            columns = ValueStorageBase.get_columns(df, element_name, options,
                                                   **kwargs)
            df = df[columns]

        if self._data_format_version == "1.0.0":
            dataset_property_type = DatasetPropertyType.ELEMENT_PROPERTY
        else:
            dataset_property_type = get_dataset_property_type(dataset)
        if dataset_property_type == DatasetPropertyType.FILTERED:
            timestamp_path = get_timestamp_path(dataset)
            timestamp_dataset = self._hdf_store[timestamp_path]
            df["Timestamp"] = DatasetBuffer.to_datetime(timestamp_dataset)
            df.set_index("Timestamp", inplace=True)
        else:
            self._add_indices_to_dataframe(df)

        if real_only:
            for column in df.columns:
                if df[column].dtype == np.complex:
                    df[column] = [x.real for x in df[column]]

        return df
Example #2
0
def test_dataset_buffer__max_num_bytes():
    filename = os.path.join(tempfile.gettempdir(), "store.h5")
    try:
        with h5py.File(filename, "w") as store:
            columns = ("1", "2", "3", "4")
            dataset = DatasetBuffer(store, "data", 100, np.float, columns)
            assert dataset.max_num_bytes() == 3200
    finally:
        if os.path.exists(filename):
            os.remove(filename)
Example #3
0
def test_dataset_buffer__compute_chunk_count():
    one_year_at_5_minutes = 60 / 5 * 24 * 365
    assert DatasetBuffer.compute_chunk_count(num_columns=4,
                                             max_size=96,
                                             dtype=np.float) == 96
    assert DatasetBuffer.compute_chunk_count(num_columns=4,
                                             max_size=one_year_at_5_minutes,
                                             dtype=np.float) == 1024
    assert DatasetBuffer.compute_chunk_count(num_columns=6,
                                             max_size=one_year_at_5_minutes,
                                             dtype=np.complex) == 341
Example #4
0
    def __init__(self,
                 value,
                 hdf_store,
                 path,
                 max_size,
                 dataset_property_type,
                 max_chunk_bytes=None,
                 store_timestamp=False):
        group_name = os.path.dirname(path)
        basename = os.path.basename(path)
        try:
            if basename in hdf_store[group_name].keys():
                raise InvalidParameter(f"duplicate dataset name {basename}")
        except KeyError:
            # Don't bother checking each sub path.
            pass

        dtype = self._TYPE_MAPPING.get(value.value_type)
        assert dtype is not None
        scaleoffset = None
        if dtype == np.float:
            scaleoffset = 4
        elif dtype == np.int:
            scaleoffset = 0
        attributes = {"type": dataset_property_type.value}
        timestamp_path = None

        if store_timestamp:
            timestamp_path = self.timestamp_path(path)
            self._timestamps = DatasetBuffer(
                hdf_store,
                timestamp_path,
                max_size,
                np.float,
                ["Timestamp"],
                scaleoffset=scaleoffset,
                max_chunk_bytes=max_chunk_bytes,
                attributes={"type": DatasetPropertyType.TIMESTAMP.value},
            )
            attributes["timestamp_path"] = timestamp_path
        else:
            self._timestamps = None

        self._dataset = DatasetBuffer(
            hdf_store,
            path,
            max_size,
            dtype,
            value.make_columns(),
            scaleoffset=scaleoffset,
            max_chunk_bytes=max_chunk_bytes,
            attributes=attributes,
        )
Example #5
0
def test_dataset_buffer__write_value():
    filename = os.path.join(tempfile.gettempdir(), "store.h5")
    try:
        with h5py.File(filename, "w") as store:
            columns = ("1", "2", "3", "4")
            max_size = 5000
            dataset = DatasetBuffer(store, "data", max_size, float, columns,
                                    max_chunk_bytes=128 * 1024)
            assert dataset.chunk_count == 4096
            for i in range(max_size):
                data = np.ones(4)
                dataset.write_value(data)
            assert dataset._buf_index == max_size - dataset.chunk_count
            dataset.flush_data()
            assert dataset._buf_index == 0

        with h5py.File(filename, "r") as store:
            data = store["data"][:]
            assert len(data) == max_size
            actual_columns = DatasetBuffer.get_columns(store["data"])
            assert [x for x in actual_columns] == list(columns)
            for i in range(max_size):
                for j in range(4):
                    assert data[i][j] == 1.0

            df = DatasetBuffer.to_dataframe(store["data"])
            assert isinstance(df, pd.DataFrame)
            assert len(df) == max_size
            assert df.iloc[0, 0] == 1.0
    finally:
        if os.path.exists(filename):
            os.remove(filename)
Example #6
0
    def _parse_datasets(self):
        for elem_class in self._elem_classes:
            class_group = self._group[elem_class]
            if "ElementProperties" in class_group:
                prop_group = class_group["ElementProperties"]
                for prop, dataset in prop_group.items():
                    dataset_property_type = get_dataset_property_type(dataset)
                    if dataset_property_type == DatasetPropertyType.TIME_STEP:
                        continue
                    if dataset_property_type == DatasetPropertyType.VALUE:
                        self._elem_values_by_prop[elem_class][prop] = []
                        prop_names = self._elem_values_by_prop
                    elif dataset_property_type in (
                            DatasetPropertyType.PER_TIME_POINT,
                            DatasetPropertyType.FILTERED,
                    ):
                        self._elem_data_by_prop[elem_class][prop] = []
                        prop_names = self._elem_data_by_prop
                    else:
                        continue

                    self._props_by_class[elem_class].append(prop)
                    self._elem_indices_by_prop[elem_class][prop] = {}
                    names = DatasetBuffer.get_names(dataset)
                    self._column_ranges_per_elem[elem_class][prop] = \
                        DatasetBuffer.get_column_ranges(dataset)
                    for i, name in enumerate(names):
                        self._elems_by_class[elem_class].add(name)
                        prop_names[elem_class][prop].append(name)
                        self._elem_indices_by_prop[elem_class][prop][name] = i
                        self._elem_props[name].append(prop)
            else:
                self._elems_by_class[elem_class] = set()

            summed_elem_props = self._group[elem_class].get(
                "SummedElementProperties", [])
            for prop in summed_elem_props:
                dataset = self._group[elem_class]["SummedElementProperties"][
                    prop]
                dataset_property_type = get_dataset_property_type(dataset)
                if dataset_property_type == DatasetPropertyType.VALUE:
                    df = DatasetBuffer.to_dataframe(dataset)
                    assert len(df) == 1
                    self._summed_elem_props[elem_class][prop] = {
                        x: df[x].values[0]
                        for x in df.columns
                    }
                elif dataset_property_type == DatasetPropertyType.PER_TIME_POINT:
                    self._summed_elem_timeseries_props[elem_class].append(prop)
Example #7
0
def test_dataset_buffer__compute_chunk_count():
    one_year_at_5_minutes = 60 / 5 * 24 * 365
    assert DatasetBuffer.compute_chunk_count(
        num_columns=4,
        max_size=96,
        dtype=float
    ) == 96
    assert DatasetBuffer.compute_chunk_count(
        num_columns=4,
        max_size=one_year_at_5_minutes,
        dtype=float,
        max_chunk_bytes=128 * 1024,
    ) == 4096
    assert DatasetBuffer.compute_chunk_count(
        num_columns=6,
        max_size=one_year_at_5_minutes,
        dtype=complex,
        max_chunk_bytes=128 * 1024,
    ) == 1365
Example #8
0
    def InitializeDataStore(self, hdf_store, num_steps, MC_scenario_number=None):
        if MC_scenario_number is not None:
            self._scenario = self._base_scenario + f"_MC{MC_scenario_number}"
        self._hdf_store = hdf_store
        self._time_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Timestamp",
            max_size=num_steps,
            dtype=float,
            columns=("Timestamp",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._frequency_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Frequency",
            max_size=num_steps,
            dtype=float,
            columns=("Frequency",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._mode_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Mode",
            max_size=num_steps,
            dtype="S10",
            columns=("Mode",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._cur_step = 0

        base_path = "Exports/" + self._scenario
        for metric in self._iter_metrics():
            metric.initialize_data_store(hdf_store, base_path, num_steps)
Example #9
0
    def InitializeDataStore(self,
                            hdf_store,
                            num_steps,
                            MC_scenario_number=None):
        if MC_scenario_number is not None:
            self._scenario = self._base_scenario + f"_MC{MC_scenario_number}"
        self._hdf_store = hdf_store
        self._time_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Timestamp",
            max_size=num_steps,
            dtype=float,
            columns=("Timestamp", ),
            max_chunk_bytes=self._max_chunk_bytes)
        self._frequency_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Frequency",
            max_size=num_steps,
            dtype=float,
            columns=("Frequency", ),
            max_chunk_bytes=self._max_chunk_bytes)
        self._mode_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Mode",
            max_size=num_steps,
            dtype="S10",
            columns=("Mode", ),
            max_chunk_bytes=self._max_chunk_bytes)

        for element in self._elements:
            element.initialize_data_store(hdf_store, self._scenario, num_steps)
Example #10
0
    def get_summed_element_dataframe(self,
                                     element_class,
                                     prop,
                                     real_only=False,
                                     abs_val=False,
                                     group=None):
        """Return the dataframe for a summed element property.

        Parameters
        ----------
        element_class : str
        prop : str
        group : str | None
            Specify a group name if sum_groups was assigned.
        real_only : bool
            If dtype of any column is complex, drop the imaginary component.
        abs_val : bool
            If dtype of any column is complex, compute its absolute value.

        Returns
        -------
        pd.DataFrame

        Raises
        ------
        InvalidParameter
            Raised if the element class is not stored.

        """
        if group is not None:
            prop = ValueStorageBase.DELIMITER.join((prop, group))
        if element_class not in self._summed_elem_timeseries_props:
            raise InvalidParameter(f"{element_class} is not stored")
        if prop not in self._summed_elem_timeseries_props[element_class]:
            raise InvalidParameter(f"{prop} is not stored")

        elem_group = self._group[element_class]["SummedElementProperties"]
        dataset = elem_group[prop]
        df = DatasetBuffer.to_dataframe(dataset)
        self._add_indices_to_dataframe(df)

        if real_only:
            for column in df.columns:
                if df[column].dtype == complex:
                    df[column] = np.real(df[column])
        elif abs_val:
            for column in df.columns:
                if df[column].dtype == complex:
                    df[column] = df[column].apply(np.absolute)

        return df
def test_export_overloads(mocked_func, simulation_settings):
    data1 = {
        "property": "ExportLoadingsMetric",
        "store_values_type": "all",
        "opendss_classes": ["Lines", "Transformers"],
    }
    prop1 = ExportListProperty("CktElement", data1)
    data2 = {
        "property": "ExportLoadingsMetric",
        "store_values_type": "max",
        "opendss_classes": ["Lines", "Transformers"],
    }
    prop2 = ExportListProperty("CktElement", data2)
    num_time_steps = NUM_LOADINGS_FILES
    metric = ExportLoadingsMetric(prop1, OBJS, simulation_settings)
    metric.add_property(prop2)
    with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
        metric.initialize_data_store(hdf_store, "", num_time_steps)
        global overloads_file_id
        for i in range(num_time_steps):
            metric.append_values(i)
            overloads_file_id += 1
        metric.close()

        dataset1 = hdf_store[
            "CktElement/ElementProperties/ExportLoadingsMetric"]
        assert dataset1.attrs["length"] == num_time_steps
        assert dataset1.attrs["type"] == "per_time_point"
        df = DatasetBuffer.to_dataframe(dataset1)
        assert isinstance(df, pd.DataFrame)
        assert [x for x in df["Line.one__Loading"].values] == LINE_1_VALUES
        assert [x for x in df["Line.two__Loading"].values] == LINE_2_VALUES
        assert [x for x in df["Transformer.one__Loading"].values
                ] == TRANSFORMER_1_VALUES
        assert [x for x in df["Transformer.two__Loading"].values
                ] == TRANSFORMER_2_VALUES

        dataset2 = hdf_store[
            "CktElement/ElementProperties/ExportLoadingsMetricMax"]
        assert dataset2.attrs["length"] == 1
        assert dataset2.attrs["type"] == "value"
        assert dataset2[0][0] == max(LINE_1_VALUES)
        assert dataset2[0][1] == max(LINE_2_VALUES)
        assert dataset2[0][2] == max(TRANSFORMER_1_VALUES)
        assert dataset2[0][3] == max(TRANSFORMER_2_VALUES)
Example #12
0
 def _export_summed_element_timeseries(self, path, fmt, compress):
     for elem_class in self._summed_elem_timeseries_props:
         for prop in self._summed_elem_timeseries_props[elem_class]:
             fields = prop.split(ValueStorageBase.DELIMITER)
             if len(fields) == 1:
                 base = ValueStorageBase.DELIMITER.join([elem_class, prop])
             else:
                 assert len(fields) == 2, fields
                 # This will be <elem_class>__<prop>__<group>
                 base = ValueStorageBase.DELIMITER.join([elem_class, prop])
             filename = os.path.join(path,
                                     base + "." + fmt.replace(".", ""))
             dataset = self._group[elem_class]["SummedElementProperties"][
                 prop]
             prop_type = get_dataset_property_type(dataset)
             if prop_type == DatasetPropertyType.PER_TIME_POINT:
                 df = DatasetBuffer.to_dataframe(dataset)
                 self._finalize_dataframe(df, dataset)
                 write_dataframe(df, filename, compress=compress)
Example #13
0
    def get_full_dataframe(self,
                           element_class,
                           prop,
                           real_only=False,
                           abs_val=False,
                           **kwargs):
        """Return a dataframe containing all data.  The dataframe is copied.

        Parameters
        ----------
        element_class : str
        prop : str
        real_only : bool
            If dtype of any column is complex, drop the imaginary component.
        abs_val : bool
            If dtype of any column is complex, compute its absolute value.
        kwargs
            Filter on options; values can be strings or regular expressions.

        Returns
        -------
        pd.DataFrame

        """
        if prop not in self.list_element_properties(element_class):
            raise InvalidParameter(f"property {prop} is not stored")

        dataset = self._group[f"{element_class}/ElementProperties/{prop}"]
        df = DatasetBuffer.to_dataframe(dataset)
        if kwargs:
            options = self._check_options(element_class, prop, **kwargs)
            names = self._elems_by_class.get(element_class, set())
            columns = ValueStorageBase.get_columns(df, names, options,
                                                   **kwargs)
            columns = list(columns)
            columns.sort()
            df = df[columns]
        self._finalize_dataframe(df,
                                 dataset,
                                 real_only=real_only,
                                 abs_val=abs_val)
        return df
Example #14
0
    def _finalize_dataframe(self, df, dataset, real_only=False, abs_val=False):
        if df.empty:
            return
        dataset_property_type = get_dataset_property_type(dataset)
        if dataset_property_type == DatasetPropertyType.FILTERED:
            time_step_path = get_time_step_path(dataset)
            time_step_dataset = self._hdf_store[time_step_path]
            df["TimeStep"] = DatasetBuffer.to_datetime(time_step_dataset)
            df.set_index("TimeStep", inplace=True)
        else:
            self._add_indices_to_dataframe(df)

        if real_only:
            for column in df.columns:
                if df[column].dtype == complex:
                    df[column] = np.real(df[column])
        elif abs_val:
            for column in df.columns:
                if df[column].dtype == complex:
                    df[column] = df[column].apply(np.absolute)
Example #15
0
    def _get_elem_prop_dataframe(self,
                                 elem_class,
                                 prop,
                                 name,
                                 dataset,
                                 real_only=False,
                                 abs_val=False,
                                 **kwargs):
        col_range = self._get_element_column_range(elem_class, prop, name)
        df = DatasetBuffer.to_dataframe(dataset, column_range=col_range)

        if kwargs:
            options = self._check_options(elem_class, prop, **kwargs)
            columns = ValueStorageBase.get_columns(df, name, options, **kwargs)
            df = df[columns]

        self._finalize_dataframe(df,
                                 dataset,
                                 real_only=real_only,
                                 abs_val=abs_val)
        return df
Example #16
0
    def _get_filtered_dataframe(self,
                                elem_class,
                                prop,
                                name,
                                dataset,
                                real_only=False,
                                abs_val=False,
                                **kwargs):
        indices_df = self._get_indices_df()
        elem_index = self._elem_indices_by_prop[elem_class][prop][name]
        length = dataset.attrs["length"]
        data_vals = dataset[:length]

        # The time_step_dataset has these columns:
        # 1. time step index
        # 2. element index
        # Each row describes the source data in the dataset row.
        path = dataset.attrs["time_step_path"]
        time_step_data = self._hdf_store[path][:length]

        assert length == self._hdf_store[path].attrs["length"]
        data = []
        timestamps = []
        for i in range(length):
            stored_elem_index = time_step_data[:, 1][i]
            if stored_elem_index == elem_index:
                ts_index = time_step_data[:, 0][i]
                # TODO DT: more than one column?
                val = data_vals[i, 0]
                # TODO: profile this vs a df operation at end
                if real_only:
                    val = val.real
                elif abs_val:
                    val = abs(val)
                data.append(val)
                timestamps.append(indices_df.iloc[ts_index, 0])

        columns = self._fix_columns(name, DatasetBuffer.get_columns(dataset))
        return pd.DataFrame(data, columns=columns, index=timestamps)
Example #17
0
class ResultData:
    """Exports data to files."""

    METADATA_FILENAME = "metadata.json"
    INDICES_BASENAME = "indices"

    def __init__(self, options, system_paths, dss_objects,
                 dss_objects_by_class, dss_buses, dss_solver, dss_command,
                 dss_instance):
        if options["Logging"]["Pre-configured logging"]:
            logger_tag = __name__
        else:
            logger_tag = getLoggerTag(options)
        self._logger = logging.getLogger(logger_tag)
        self._dss_solver = dss_solver
        self._results = {}
        self._buses = dss_buses
        self._objects_by_element = dss_objects
        self._objects_by_class = dss_objects_by_class
        self.system_paths = system_paths
        self._elements = []
        self._options = options

        self._dss_command = dss_command
        self._dss_instance = dss_instance
        self._start_day = options["Project"]["Start Day"]
        self._end_day = options["Project"]["End Day"]
        self._time_dataset = None
        self._frequency_dataset = None
        self._mode_dataset = None
        self._simulation_mode = []
        self._hdf_store = None
        self._scenario = options["Project"]["Active Scenario"]
        self._base_scenario = options["Project"]["Active Scenario"]
        self._export_format = options["Exports"]["Export Format"]
        self._export_compression = options["Exports"]["Export Compression"]
        self._export_iteration_order = options["Exports"][
            "Export Iteration Order"]
        self._max_chunk_bytes = options["Exports"]["HDF Max Chunk Bytes"]
        self._export_dir = os.path.join(
            self.system_paths["Export"],
            options["Project"]["Active Scenario"],
        )
        # Use / because this is used in HDFStore
        self._export_relative_dir = f"Exports/" + options["Project"][
            "Active Scenario"]
        self._store_frequency = False
        self._store_mode = False
        self.CurrentResults = {}
        if options["Project"]["Simulation Type"] == "Dynamic" or \
                options["Frequency"]["Enable frequency sweep"]:
            self._store_frequency = True
            self._store_mode = True

        if options["Exports"]["Export Mode"] == "byElement":
            raise InvalidParameter(
                "Export Mode 'byElement' is not supported by ResultData")

        pathlib.Path(self._export_dir).mkdir(parents=True, exist_ok=True)

        export_list_filename = os.path.join(
            system_paths["ExportLists"],
            "Exports.toml",
        )
        if not os.path.exists(export_list_filename):
            export_list_filename = os.path.join(
                system_paths["ExportLists"],
                "ExportMode-byClass.toml",
            )
        self._export_list = ExportListReader(export_list_filename)
        Reports.append_required_exports(self._export_list, options)
        self._create_exports()

    def _create_exports(self):
        elements = {}  # element name to ElementData
        for elem_class in self._export_list.list_element_classes():
            if elem_class == "Buses":
                objs = self._buses
            elif elem_class in self._objects_by_class:
                objs = self._objects_by_class[elem_class]
            else:
                continue
            for name, obj in objs.items():
                if not obj.Enabled:
                    continue
                for prop in self._export_list.iter_export_properties(
                        elem_class=elem_class):
                    if prop.custom_function is None and not obj.IsValidAttribute(
                            prop.name):
                        raise InvalidParameter(
                            f"{name} / {prop.name} cannot be exported")
                    if prop.should_store_name(name):
                        if name not in elements:
                            elements[name] = ElementData(
                                name,
                                obj,
                                max_chunk_bytes=self._max_chunk_bytes,
                                options=self._options)
                        elements[name].append_property(prop)
                        self._logger.debug("Store %s %s name=%s", elem_class,
                                           prop.name, name)

        self._elements = elements.values()

    def InitializeDataStore(self,
                            hdf_store,
                            num_steps,
                            MC_scenario_number=None):
        if MC_scenario_number is not None:
            self._scenario = self._base_scenario + f"_MC{MC_scenario_number}"
        self._hdf_store = hdf_store
        self._time_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Timestamp",
            max_size=num_steps,
            dtype=float,
            columns=("Timestamp", ),
            max_chunk_bytes=self._max_chunk_bytes)
        self._frequency_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Frequency",
            max_size=num_steps,
            dtype=float,
            columns=("Frequency", ),
            max_chunk_bytes=self._max_chunk_bytes)
        self._mode_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Mode",
            max_size=num_steps,
            dtype="S10",
            columns=("Mode", ),
            max_chunk_bytes=self._max_chunk_bytes)

        for element in self._elements:
            element.initialize_data_store(hdf_store, self._scenario, num_steps)

    def UpdateResults(self):
        self.CurrentResults.clear()

        timestamp = self._dss_solver.GetDateTime().timestamp()
        self._time_dataset.write_value(timestamp)
        self._frequency_dataset.write_value(self._dss_solver.getFrequency())
        self._mode_dataset.write_value(self._dss_solver.getMode())

        for elem in self._elements:
            data = elem.append_values(timestamp)
            self.CurrentResults.update(data)
        return self.CurrentResults

    def ExportResults(self, fileprefix=""):
        self.FlushData()
        for element in self._elements:
            element.export_change_counts()
            element.export_sums()

        metadata = {
            "event_log": None,
            "element_info_files": [],
        }

        if self._options["Exports"]["Export Event Log"]:
            self._export_event_log(metadata)
        if self._options["Exports"]["Export Elements"]:
            self._export_elements(metadata)
            self._export_feeder_head_info(metadata)
        if self._options["Exports"]["Export PV Profiles"]:
            self._export_pv_profiles()

        filename = os.path.join(self._export_dir, self.METADATA_FILENAME)
        dump_data(metadata, filename, indent=4)
        self._logger.info("Exported metadata to %s", filename)
        self._hdf_store = None

    def FlushData(self):
        for dataset in (self._time_dataset, self._frequency_dataset,
                        self._mode_dataset):
            dataset.flush_data()
        for element in self._elements:
            element.flush_data()

    def _export_event_log(self, metadata):
        # TODO: move to a base class
        event_log = "event_log.csv"
        file_path = os.path.join(self._export_dir, event_log)
        if os.path.exists(file_path):
            os.remove(file_path)

        orig = os.getcwd()
        os.chdir(self._export_dir)
        try:
            cmd = "Export EventLog {}".format(event_log)
            out = self._dss_command(cmd)
            if out != event_log:
                raise Exception(f"Failed to export EventLog:  {out}")
            self._logger.info("Exported OpenDSS event log to %s", out)
            metadata["event_log"] = self._export_relative_dir + f"/{event_log}"
        finally:
            os.chdir(orig)

    def _export_dataframe(self, df, basename):
        filename = basename + "." + self._export_format
        write_dataframe(df, filename, compress=self._export_compression)
        self._logger.info("Exported %s", filename)

    def _find_feeder_head_line(self):
        dss = self._dss_instance
        feeder_head_line = None

        flag = dss.Topology.First()

        while flag > 0:

            if 'line' in dss.Topology.BranchName().lower():
                feeder_head_line = dss.Topology.BranchName()
                break

            else:
                flag = dss.Topology.Next()

        return feeder_head_line

    def _get_feeder_head_loading(self):
        dss = self._dss_instance
        head_line = self._find_feeder_head_line()
        if head_line is not None:
            flag = dss.Circuit.SetActiveElement(head_line)

            if flag > 0:
                n_phases = dss.CktElement.NumPhases()
                max_amp = dss.CktElement.NormalAmps()
                Currents = dss.CktElement.CurrentsMagAng()[:2 * n_phases]
                Current_magnitude = Currents[::2]

                max_flow = max(max(Current_magnitude), 1e-10)
                loading = max_flow / max_amp

                return loading

            else:
                return None
        else:
            return None

    def _reverse_powerflow(self):
        dss = self._dss_instance

        reverse_pf = max(dss.Circuit.TotalPower(
        )) > 0  # total substation power is an injection(-) or a consumption(+)

        return reverse_pf

    def _export_feeder_head_info(self, metadata):
        """
        Gets feeder head information comprising:
        1- The name of the feeder head line
        2- The feeder head loading in per unit
        3- The feeder head load in (kW, kVar). Negative in case of power injection
        4- The reverse power flow flag. True if power is flowing back to the feeder head, False otherwise
        """

        dss = self._dss_instance
        if not "feeder_head_info_files" in metadata.keys():
            metadata["feeder_head_info_files"] = []

        df_dict = {
            "FeederHeadLine": self._find_feeder_head_line(),
            "FeederHeadLoading": self._get_feeder_head_loading(),
            "FeederHeadLoad": dss.Circuit.TotalPower(),
            "ReversePowerFlow": self._reverse_powerflow()
        }

        #df = pd.DataFrame.from_dict(df_dict)

        filename = "FeederHeadInfo"
        fname = filename + ".json"

        relpath = os.path.join(self._export_relative_dir, fname)
        filepath = os.path.join(self._export_dir, fname)
        #write_dataframe(df, filepath)
        dump_data(df_dict, filepath)
        metadata["feeder_head_info_files"].append(relpath)
        self._logger.info("Exported %s information to %s.", filename, filepath)

    def _export_elements(self, metadata):
        dss = self._dss_instance
        exports = (
            # TODO: opendssdirect does not provide a function to export Bus information.
            ("CapacitorsInfo", dss.Capacitors.Count,
             dss.utils.capacitors_to_dataframe),
            ("FusesInfo", dss.Fuses.Count, dss.utils.fuses_to_dataframe),
            ("GeneratorsInfo", dss.Generators.Count,
             dss.utils.generators_to_dataframe),
            ("IsourceInfo", dss.Isource.Count, dss.utils.isource_to_dataframe),
            ("LinesInfo", dss.Lines.Count, dss.utils.lines_to_dataframe),
            ("LoadsInfo", dss.Loads.Count, dss.utils.loads_to_dataframe),
            ("MetersInfo", dss.Meters.Count, dss.utils.meters_to_dataframe),
            ("MonitorsInfo", dss.Monitors.Count,
             dss.utils.monitors_to_dataframe),
            ("PVSystemsInfo", dss.PVsystems.Count,
             dss.utils.pvsystems_to_dataframe),
            ("ReclosersInfo", dss.Reclosers.Count,
             dss.utils.reclosers_to_dataframe),
            ("RegControlsInfo", dss.RegControls.Count,
             dss.utils.regcontrols_to_dataframe),
            ("RelaysInfo", dss.Relays.Count, dss.utils.relays_to_dataframe),
            ("SensorsInfo", dss.Sensors.Count, dss.utils.sensors_to_dataframe),
            ("TransformersInfo", dss.Transformers.Count,
             dss.utils.transformers_to_dataframe),
            ("VsourcesInfo", dss.Vsources.Count,
             dss.utils.vsources_to_dataframe),
            ("XYCurvesInfo", dss.XYCurves.Count,
             dss.utils.xycurves_to_dataframe),
            # TODO This can be very large. Consider making it configurable.
            #("LoadShapeInfo", dss.LoadShape.Count, dss.utils.loadshape_to_dataframe),
        )

        for filename, count_func, get_func in exports:
            if count_func() > 0:
                df = get_func()
                # Always record in CSV format for readability.
                # There are also warning messages from PyTables because the
                # data may contain strings.
                fname = filename + ".csv"
                relpath = os.path.join(self._export_relative_dir, fname)
                filepath = os.path.join(self._export_dir, fname)
                write_dataframe(df, filepath)
                metadata["element_info_files"].append(relpath)
                self._logger.info("Exported %s information to %s.", filename,
                                  filepath)

        self._export_transformers(metadata)

    def _export_transformers(self, metadata):
        dss = self._dss_instance
        df_dict = {
            "Transformer": [],
            "HighSideConnection": [],
            "NumPhases": []
        }

        dss.Circuit.SetActiveClass("Transformer")
        flag = dss.ActiveClass.First()
        while flag > 0:
            name = dss.CktElement.Name()
            df_dict["Transformer"].append(name)
            df_dict["HighSideConnection"].append(
                dss.Properties.Value("conns").split("[")[1].split(",")
                [0].strip(" ").lower())
            df_dict["NumPhases"].append(dss.CktElement.NumPhases())
            flag = dss.ActiveClass.Next()

        df = pd.DataFrame.from_dict(df_dict)

        relpath = os.path.join(self._export_relative_dir,
                               "TransformersPhaseInfo.csv")
        filepath = os.path.join(self._export_dir, "TransformersPhaseInfo.csv")
        write_dataframe(df, filepath)
        metadata["element_info_files"].append(relpath)
        self._logger.info("Exported transformer phase information to %s",
                          filepath)

    def _export_pv_profiles(self):
        dss = self._dss_instance
        pv_systems = self._objects_by_class.get("PVSystems")
        if pv_systems is None:
            raise InvalidConfiguration("PVSystems are not exported")

        pv_infos = []
        profiles = set()
        for full_name, obj in pv_systems.items():
            profile_name = obj.GetParameter("yearly").lower()
            if profile_name != "":
                profiles.add(profile_name)
            pv_infos.append({
                "irradiance": obj.GetParameter("irradiance"),
                "name": full_name,
                "pmpp": obj.GetParameter("pmpp"),
                "load_shape_profile": profile_name,
            })

        pmult_sums = {}
        dss.LoadShape.First()
        sim_resolution = self._options["Project"]["Step resolution (sec)"]
        while True:
            name = dss.LoadShape.Name().lower()
            if name in profiles:
                sinterval = dss.LoadShape.SInterval()
                assert sim_resolution >= sinterval
                offset = int(sim_resolution / dss.LoadShape.SInterval())
                pmult_sums[name] = sum(dss.LoadShape.PMult()[::offset])
            if dss.LoadShape.Next() == 0:
                break

        for pv_info in pv_infos:
            profile = pv_info["load_shape_profile"]
            if profile == "":
                pv_info["load_shape_pmult_sum"] = 0
            else:
                pv_info["load_shape_pmult_sum"] = pmult_sums[profile]

        data = {"pv_systems": pv_infos}
        filename = os.path.join(self._export_dir, "pv_profiles.json")
        dump_data(data, filename, indent=2)
        self._logger.info("Exported PV profile information to %s", filename)

    @staticmethod
    def get_units(prop, index=None):
        units = unit_info.get(prop)
        if units is None:
            raise InvalidParameter(f"no units are stored for {prop}")

        if isinstance(units, dict):
            if index is None:
                raise InvalidParameter(f"index must be provided for {prop}")
            if index == 0:
                return units["E"]
            if index == 1:
                return units["O"]
            raise InvalidParameter("index must be 0 or 1")

        return units

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        total = 0
        for element in self._elements:
            total += element.max_num_bytes()
        return total
Example #18
0
    def __init__(self,
                 values,
                 hdf_store,
                 path,
                 max_size,
                 elem_names,
                 dataset_property_type,
                 max_chunk_bytes=None,
                 store_time_step=False):
        group_name = os.path.dirname(path)
        basename = os.path.basename(path)
        try:
            if basename in hdf_store[group_name]:
                raise InvalidParameter(f"duplicate dataset name {basename}")
        except KeyError:
            # Don't bother checking each sub path.
            pass

        dtype = values[0].value_type
        scaleoffset = None
        # There is no np.float128 on Windows.
        if dtype in (float, np.float32, np.float64, np.longdouble):
            scaleoffset = 4
        time_step_path = None
        max_size = max_size * len(values) if store_time_step else max_size

        if store_time_step:
            # Store indices for time step and element.
            # Each row of this dataset corresponds to a row in the data.
            # This will be required to interpret the raw data.
            attributes = {"type": DatasetPropertyType.TIME_STEP.value}
            time_step_path = self.time_step_path(path)
            self._time_steps = DatasetBuffer(
                hdf_store,
                time_step_path,
                max_size,
                int,
                ["Time", "Name"],
                scaleoffset=0,
                max_chunk_bytes=max_chunk_bytes,
                attributes=attributes,
            )
            columns = []
            tmp_columns = values[0].make_columns()
            for column in tmp_columns:
                fields = column.split(ValueStorageBase.DELIMITER)
                fields[0] = "AllNames"
                columns.append(ValueStorageBase.DELIMITER.join(fields))
            column_ranges = [0, len(tmp_columns)]
        else:
            columns = []
            column_ranges = []
            col_index = 0
            for value in values:
                tmp_columns = value.make_columns()
                col_range = (col_index, len(tmp_columns))
                column_ranges.append(col_range)
                for column in tmp_columns:
                    columns.append(column)
                    col_index += 1
            self._time_steps = None

        attributes = {"type": dataset_property_type.value}
        if store_time_step:
            attributes["time_step_path"] = time_step_path

        self._dataset = DatasetBuffer(
            hdf_store,
            path,
            max_size,
            dtype,
            columns,
            scaleoffset=scaleoffset,
            max_chunk_bytes=max_chunk_bytes,
            attributes=attributes,
            names=elem_names,
            column_ranges_per_name=column_ranges,
        )
Example #19
0
def test_export_powers(mocked_func, simulation_settings):
    data1 = {
        "property": "ExportPowersMetric",
        "store_values_type": "all",
        "opendss_classes": ["Lines", "Loads", "PVSystems", "Transformers"],
    }
    prop1 = ExportListProperty("CktElement", data1)
    data2 = {
        "property": "ExportPowersMetric",
        "store_values_type": "max",
        "opendss_classes": ["Lines", "Loads", "PVSystems", "Transformers"],
    }
    prop2 = ExportListProperty("CktElement", data2)
    data3 = {
        "property": "ExportPowersMetric",
        "store_values_type": "sum",
        "opendss_classes": ["Lines", "Loads", "PVSystems", "Transformers"],
    }
    prop3 = ExportListProperty("CktElement", data3)
    num_time_steps = NUM_POWERS_FILES
    metric = ExportPowersMetric(prop1, OBJS, simulation_settings)
    metric.add_property(prop2)
    metric.add_property(prop3)
    with h5py.File(STORE_FILENAME, mode="w", driver="core") as hdf_store:
        metric.initialize_data_store(hdf_store, "", num_time_steps)
        global powers_file_id
        for i in range(num_time_steps):
            metric.append_values(i)
            powers_file_id += 1
        metric.close()

        dataset1 = hdf_store["CktElement/ElementProperties/ExportPowersMetric"]
        assert dataset1.attrs["length"] == num_time_steps
        assert dataset1.attrs["type"] == "per_time_point"
        df = DatasetBuffer.to_dataframe(dataset1)
        assert isinstance(df, pd.DataFrame)
        assert [x for x in df["Load.one__Powers"].values] == LOAD_1_VALUES
        assert [x for x in df["Load.two__Powers"].values] == LOAD_2_VALUES
        assert [x for x in df["PVSystem.one__Powers"].values
                ] == PV_SYSTEM_1_VALUES
        assert [x for x in df["PVSystem.two__Powers"].values
                ] == PV_SYSTEM_2_VALUES

        dataset2 = hdf_store[
            "CktElement/ElementProperties/ExportPowersMetricMax"]
        assert dataset2.attrs["length"] == 1
        assert dataset2.attrs["type"] == "value"
        # Loads are at the index 2, PVSystems at 4
        assert dataset2[0][2] == max(LOAD_1_VALUES)
        assert dataset2[0][3] == max(LOAD_2_VALUES)
        assert dataset2[0][4] == max(PV_SYSTEM_1_VALUES)
        assert dataset2[0][5] == max(PV_SYSTEM_2_VALUES)

        dataset3 = hdf_store[
            "CktElement/ElementProperties/ExportPowersMetricSum"]
        assert dataset3.attrs["length"] == 1
        assert dataset3.attrs["type"] == "value"
        assert dataset3[0][2] == sum(LOAD_1_VALUES)
        assert dataset3[0][3] == sum(LOAD_2_VALUES)
        assert dataset3[0][4] == sum(PV_SYSTEM_1_VALUES)
        assert dataset3[0][5] == sum(PV_SYSTEM_2_VALUES)
Example #20
0
class ValueContainer:
    """Container for a sequence of instances of ValueStorageBase."""

    # These could potentially be reduced in bit lengths. Compression probably
    # makes that unnecessary.
    _TYPE_MAPPING = {
        float: np.float,
        int: np.int,
        complex: np.complex,
    }

    def __init__(self,
                 value,
                 hdf_store,
                 path,
                 max_size,
                 dataset_property_type,
                 max_chunk_bytes=None,
                 store_timestamp=False):
        group_name = os.path.dirname(path)
        basename = os.path.basename(path)
        try:
            if basename in hdf_store[group_name].keys():
                raise InvalidParameter(f"duplicate dataset name {basename}")
        except KeyError:
            # Don't bother checking each sub path.
            pass

        dtype = self._TYPE_MAPPING.get(value.value_type)
        assert dtype is not None
        scaleoffset = None
        if dtype == np.float:
            scaleoffset = 4
        elif dtype == np.int:
            scaleoffset = 0
        attributes = {"type": dataset_property_type.value}
        timestamp_path = None

        if store_timestamp:
            timestamp_path = self.timestamp_path(path)
            self._timestamps = DatasetBuffer(
                hdf_store,
                timestamp_path,
                max_size,
                np.float,
                ["Timestamp"],
                scaleoffset=scaleoffset,
                max_chunk_bytes=max_chunk_bytes,
                attributes={"type": DatasetPropertyType.TIMESTAMP.value},
            )
            attributes["timestamp_path"] = timestamp_path
        else:
            self._timestamps = None

        self._dataset = DatasetBuffer(
            hdf_store,
            path,
            max_size,
            dtype,
            value.make_columns(),
            scaleoffset=scaleoffset,
            max_chunk_bytes=max_chunk_bytes,
            attributes=attributes,
        )

    @staticmethod
    def timestamp_path(path):
        return path + "Timestamp"

    def append(self, value, timestamp=None):
        """Append a value to the container.

        Parameters
        ----------
        value : ValueStorageBase
        timestamp : float | None

        """
        self._dataset.write_value(value.value)
        if self._timestamps is not None:
            assert timestamp is not None
            self._timestamps.write_value(timestamp)

    def flush_data(self):
        """Flush any outstanding data to disk."""
        self._dataset.flush_data()
        if self._timestamps is not None:
            self._timestamps.flush_data()

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        return self._dataset.max_num_bytes()
Example #21
0
class ResultData:
    """Exports data to files."""

    METADATA_FILENAME = "metadata.json"
    INDICES_BASENAME = "indices"

    def __init__(self, settings: SimulationSettingsModel, system_paths, dss_objects,
                 dss_objects_by_class, dss_buses, dss_solver, dss_command,
                 dss_instance):
        self._logger = logger
        self._dss_solver = dss_solver
        self._results = {}
        self._buses = dss_buses
        self._objects_by_element = dss_objects
        self._objects_by_class = dss_objects_by_class
        self.system_paths = system_paths
        self._element_metrics = {}  # (elem_class, prop_name) to OpenDssPropertyMetric
        self._summed_element_metrics = {}
        self._settings = settings
        self._cur_step = 0
        self._current_results = {}

        self._dss_command = dss_command
        self._start_day = dss_solver.StartDay
        self._end_day = dss_solver.EndDay
        self._time_dataset = None
        self._frequency_dataset = None
        self._mode_dataset = None
        self._simulation_mode = []
        self._hdf_store = None
        self._scenario = settings.project.active_scenario
        self._base_scenario = settings.project.active_scenario
        self._export_format = settings.exports.export_format
        self._export_compression = settings.exports.export_compression
        self._max_chunk_bytes = settings.exports.hdf_max_chunk_bytes
        self._export_dir = os.path.join(
            self.system_paths["Export"],
            settings.project.active_scenario,
        )
        # Use / because this is used in HDFStore
        self._export_relative_dir = "Exports/" + settings.project.active_scenario
        self._store_frequency = False
        self._store_mode = False
        if settings.frequency.enable_frequency_sweep:
            self._store_frequency = True
            self._store_mode = True

        pathlib.Path(self._export_dir).mkdir(parents=True, exist_ok=True)

        export_list_filename = os.path.join(
            system_paths["ExportLists"],
            "Exports.toml",
        )
        if not os.path.exists(export_list_filename):
            export_list_filename = os.path.join(
                system_paths["ExportLists"],
                "ExportMode-byClass.toml",
            )
        self._export_list = ExportListReader(export_list_filename)
        Reports.append_required_exports(self._export_list, settings)
        dump_data(
            self._export_list.serialize(),
            os.path.join(self._export_dir, "ExportsActual.toml"),
        )
        self._circuit_metrics = {}
        self._create_exports()

    def _create_exports(self):
        for elem_class in self._export_list.list_element_classes():
            if elem_class in ("Buses", "Nodes"):
                objs = self._buses
            elif elem_class in self._objects_by_class:
                objs = self._objects_by_class[elem_class]
            elif elem_class == "FeederHead":
                objs = self._objects_by_class["Circuits"]
            elif elem_class != "CktElement":  # TODO
                continue
            for prop in self._export_list.iter_export_properties(elem_class=elem_class):
                if prop.opendss_classes:
                    dss_objs = []
                    for cls in prop.opendss_classes:
                        if cls not in self._objects_by_class:
                            logger.warning("Export class=%s is not present in the circuit", cls)
                            continue

                        for obj in self._objects_by_class[cls].values():
                            if obj.Enabled and prop.should_store_name(obj.FullName):
                                dss_objs.append(obj)
                else:
                    dss_objs = [x for x in objs.values() if x.Enabled and prop.should_store_name(x.FullName)]
                if prop.custom_metric is None:
                    self._add_opendss_metric(prop, dss_objs)
                else:
                    self._add_custom_metric(prop, dss_objs)

    def _add_opendss_metric(self, prop, dss_objs):
        obj = dss_objs[0]
        if not obj.IsValidAttribute(prop.name):
            raise InvalidParameter(f"{obj.FullName} / {prop.name} cannot be exported")
        key = (prop.elem_class, prop.name)
        if prop.sum_elements:
            metric = self._summed_element_metrics.get(key)
            if metric is None:
                if prop.sum_groups:
                    cls = SummedElementsByGroupOpenDssPropertyMetric
                else:
                    cls = SummedElementsOpenDssPropertyMetric
                metric = cls(prop, dss_objs, self._settings)
                self._summed_element_metrics[key] = metric
            else:
                metric.add_dss_obj(obj)
        else:
            metric = self._element_metrics.get(key)
            if metric is None:
                metric = OpenDssPropertyMetric(prop, dss_objs, self._settings)
                self._element_metrics[key] = metric
            else:
                metric.add_property(prop)

    def _add_custom_metric(self, prop, dss_objs):
        cls = prop.custom_metric
        if cls.is_circuit_wide():
            metric = self._circuit_metrics.get(cls)
            if metric is None:
                metric = cls(prop, dss_objs, self._settings)
                self._circuit_metrics[cls] = metric
            else:
                metric.add_property(prop)
        else:
            key = (prop.elem_class, prop.name)
            metric = self._element_metrics.get(key)
            if metric is None:
                metric = cls(prop, dss_objs, self._settings)
                self._element_metrics[key] = metric
            else:
                metric.add_property(prop)

    def InitializeDataStore(self, hdf_store, num_steps, MC_scenario_number=None):
        if MC_scenario_number is not None:
            self._scenario = self._base_scenario + f"_MC{MC_scenario_number}"
        self._hdf_store = hdf_store
        self._time_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Timestamp",
            max_size=num_steps,
            dtype=float,
            columns=("Timestamp",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._frequency_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Frequency",
            max_size=num_steps,
            dtype=float,
            columns=("Frequency",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._mode_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Mode",
            max_size=num_steps,
            dtype="S10",
            columns=("Mode",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._cur_step = 0

        base_path = "Exports/" + self._scenario
        for metric in self._iter_metrics():
            metric.initialize_data_store(hdf_store, base_path, num_steps)

    def _iter_metrics(self):
        for metric in self._element_metrics.values():
            yield metric
        for metric in self._summed_element_metrics.values():
            yield metric
        for metric in self._circuit_metrics.values():
            yield metric

    @property
    def CurrentResults(self):
        return self._current_results

    @track_timing(timer_stats_collector)
    def UpdateResults(self, store_nan=False):
        self._current_results.clear()

        # Get the number of seconds since the Epoch without any timezone conversions.
        timestamp = (self._dss_solver.GetDateTime() - datetime.utcfromtimestamp(0)).total_seconds()
        self._time_dataset.write_value([timestamp])
        self._frequency_dataset.write_value([self._dss_solver.getFrequency()])
        self._mode_dataset.write_value([self._dss_solver.getMode()])

        for metric in self._iter_metrics():
            with Timer(timer_stats_collector, metric.label()):
                data = metric.append_values(self._cur_step, store_nan=store_nan)

            if isinstance(data, dict):
                # TODO: reconsider
                # Something is only returned for OpenDSS properties
                self._current_results.update(data)

        self._cur_step += 1
        return self._current_results

    def ExportResults(self):
        metadata = {
            "event_log": None,
            "element_info_files": [],
        }

        if self._settings.exports.export_event_log:
            self._export_event_log(metadata)
        if self._settings.exports.export_elements:
            self._export_elements(metadata, set(self._settings.exports.export_element_types))
            self._export_feeder_head_info(metadata)
        if self._settings.exports.export_pv_profiles:
            self._export_pv_profiles()
        if self._settings.exports.export_node_names_by_type:
            self._export_node_names_by_type()

        filename = os.path.join(self._export_dir, self.METADATA_FILENAME)
        dump_data(metadata, filename, indent=4)
        self._logger.info("Exported metadata to %s", filename)
        self._hdf_store = None

    def Close(self):
        for dataset in (self._time_dataset, self._frequency_dataset, self._mode_dataset):
            dataset.flush_data()
        for metric in self._iter_metrics():
            metric.close()

    def _export_event_log(self, metadata):
        event_log = "event_log.csv"
        file_path = os.path.join(self._export_dir, event_log)
        if os.path.exists(file_path):
            os.remove(file_path)

        orig = os.getcwd()
        os.chdir(self._export_dir)
        try:
            cmd = "Export EventLog {}".format(event_log)
            out = self._dss_command(cmd)
            if out != event_log:
                raise Exception(f"Failed to export EventLog:  {out}")
            self._logger.info("Exported OpenDSS event log to %s", out)
            metadata["event_log"] = self._export_relative_dir + f"/{event_log}"
        finally:
            os.chdir(orig)

    def _export_dataframe(self, df, basename):
        filename = basename + "." + self._export_format
        write_dataframe(df, filename, compress=self._export_compression)
        self._logger.info("Exported %s", filename)

    def _find_feeder_head_line(self):
        feeder_head_line = None
        flag = dss.Topology.First()
        while flag > 0:
            if 'line' in dss.Topology.BranchName().lower():
                feeder_head_line = dss.Topology.BranchName()
                break
            else:
                flag = dss.Topology.Next()

        return feeder_head_line

    def _get_feeder_head_loading(self):
        head_line = self._find_feeder_head_line()
        if head_line is not None:
            flag = dss.Circuit.SetActiveElement(head_line)

            if flag>0:
                n_phases = dss.CktElement.NumPhases()
                max_amp = dss.CktElement.NormalAmps()
                Currents = dss.CktElement.CurrentsMagAng()[:2*n_phases]
                Current_magnitude = Currents[::2]

                max_flow = max(max(Current_magnitude), 1e-10)
                loading = max_flow/max_amp

                return loading
            else:
                return None
        else:
            return None

    def _reverse_powerflow(self):
        reverse_pf = dss.Circuit.TotalPower()[0] > 0 # total substation power is an injection(-) or a consumption(+)
        return reverse_pf

    def _export_feeder_head_info(self, metadata):
        """
        Gets feeder head information comprising:
        1- The name of the feeder head line
        2- The feeder head loading in per unit
        3- The feeder head load in (kW, kVar). Negative in case of power injection
        4- The reverse power flow flag. True if power is flowing back to the feeder head, False otherwise
        """
        if not "feeder_head_info_files" in metadata.keys():
            metadata["feeder_head_info_files"] = []

        total_power = dss.Circuit.TotalPower()
        df_dict = {"FeederHeadLine": self._find_feeder_head_line(),
                   "FeederHeadLoading": self._get_feeder_head_loading(),
                   "FeederHeadLoadKW": total_power[0],
                   "FeederHeadLoadKVar": total_power[1],
                   "ReversePowerFlow": self._reverse_powerflow()
                  }

        filename = "FeederHeadInfo"
        fname = filename + ".json"
        relpath = os.path.join(self._export_relative_dir, fname)
        filepath = os.path.join(self._export_dir, fname)

        #write_dataframe(df, filepath)
        dump_data(df_dict, filepath)
        metadata["feeder_head_info_files"].append(relpath)
        self._logger.info("Exported %s information to %s.", filename, filepath)

    def _export_elements(self, metadata, element_types):
        exports = [
            # TODO: opendssdirect does not provide a function to export Bus information.
            ("Capacitor", "CapacitorsInfo", dss.Capacitors.Count),
            ("Fuse", "FusesInfo", dss.Fuses.Count),
            ("Generator", "GeneratorsInfo", dss.Generators.Count),
            ("Isource", "IsourceInfo", dss.Isource.Count),
            ("Line", "LinesInfo", dss.Lines.Count),
            ("Load", "LoadsInfo", dss.Loads.Count),
            ("Monitor", "MonitorsInfo", dss.Monitors.Count),
            ("PVSystem", "PVSystemsInfo", dss.PVsystems.Count),
            ("Recloser", "ReclosersInfo", dss.Reclosers.Count),
            ("RegControl", "RegControlsInfo", dss.RegControls.Count),
            ("Relay", "RelaysInfo", dss.Relays.Count),
            ("Sensor", "SensorsInfo", dss.Sensors.Count),
            ("Transformer", "TransformersInfo", dss.Transformers.Count),
            ("Vsource", "VsourcesInfo", dss.Vsources.Count),
            ("XYCurve", "XYCurvesInfo", dss.XYCurves.Count),
            # TODO This can be very large. Consider making it configurable.
            #("LoadShape", "LoadShapeInfo", dss.LoadShape.Count),
        ]
        if element_types:
            types = set()
            for elem_type in element_types:
                if elem_type.endswith("s"):
                    # Maintain compatibility with old format used plural names.
                    elem_type = elem_type[:-1]
                types.add(elem_type)
            exports = [x for x in exports if x[0] in types]

        for class_name, filename, count_func in exports:
            df = dss.utils.class_to_dataframe(class_name)
            # Always record in CSV format for readability.
            # There are also warning messages from PyTables because the
            # data may contain strings.
            fname = filename + ".csv"
            relpath = os.path.join(self._export_relative_dir, fname)
            filepath = os.path.join(self._export_dir, fname)
            write_dataframe(df, filepath)
            metadata["element_info_files"].append(relpath)
            self._logger.info("Exported %s information to %s.", filename, filepath)

        if not element_types or "Transformer" in element_types or "Transformers" in element_types:
            self._export_transformers(metadata)

    def _export_transformers(self, metadata):
        df_dict = {"Transformer": [], "HighSideConnection": [], "NumPhases": []}

        dss.Circuit.SetActiveClass("Transformer")
        flag = dss.ActiveClass.First()
        while flag > 0:
            name = dss.CktElement.Name()
            df_dict["Transformer"].append(name)
            df_dict["HighSideConnection"].append(dss.Properties.Value("conns").split("[")[1].split(",")[0].strip(" ").lower())
            df_dict["NumPhases"].append(dss.CktElement.NumPhases())
            flag = dss.ActiveClass.Next()

        df = pd.DataFrame.from_dict(df_dict)

        relpath = os.path.join(self._export_relative_dir, "TransformersPhaseInfo.csv")
        filepath = os.path.join(self._export_dir, "TransformersPhaseInfo.csv")
        write_dataframe(df, filepath)
        metadata["element_info_files"].append(relpath)
        self._logger.info("Exported transformer phase information to %s", filepath)

    def _export_pv_profiles(self):
        granularity = self._settings.reports.granularity
        pv_systems = self._objects_by_class.get("PVSystems")
        if pv_systems is None:
            logger.info("No PVSystems are present")
            return

        pv_infos = []
        profiles = set()
        for full_name, obj in pv_systems.items():
            profile_name = obj.GetParameter("yearly").lower()
            if profile_name != "":
                profiles.add(profile_name)
            pv_infos.append({
                "irradiance": obj.GetParameter("irradiance"),
                "name": full_name,
                "pmpp": obj.GetParameter("pmpp"),
                "load_shape_profile": profile_name,
            })

        pmult_sums = {}
        if dss.LoadShape.First() == 0:
            self._logger.warning("There are no load shapes.")
            return

        sim_resolution = self._settings.project.step_resolution_sec
        per_time_point = (
            ReportGranularity.PER_ELEMENT_PER_TIME_POINT,
            ReportGranularity.ALL_ELEMENTS_PER_TIME_POINT,
        )
        load_shape_data = {}
        while True:
            name = dss.LoadShape.Name().lower()
            if name in profiles:
                sinterval = dss.LoadShape.SInterval()
                assert sim_resolution >= sinterval, f"{sim_resolution} >= {sinterval}"
                df = create_loadshape_pmult_dataframe_for_simulation(self._settings)
                sum_values = df.iloc[:, 0].sum()
                if granularity in per_time_point:
                    load_shape_data[name] = df.iloc[:, 0].values
                    pmult_sums[name] = sum_values
                else:
                    pmult_sums[name] = sum_values
            if dss.LoadShape.Next() == 0:
                break

        if load_shape_data and granularity in per_time_point:
            filename = os.path.join(self._export_dir, PV_LOAD_SHAPE_FILENAME)
            index = create_datetime_index_from_settings(self._settings)
            df = pd.DataFrame(load_shape_data, index=index)
            write_dataframe(df, filename, compress=True)

        for pv_info in pv_infos:
            profile = pv_info["load_shape_profile"]
            if profile == "":
                pv_info["load_shape_pmult_sum"] = 0
            else:
                pv_info["load_shape_pmult_sum"] = pmult_sums[profile]

        data = {"pv_systems": pv_infos}
        filename = os.path.join(self._export_dir, PV_PROFILES_FILENAME)
        dump_data(data, filename, indent=2)
        self._logger.info("Exported PV profile information to %s", filename)

    def _export_node_names_by_type(self):
        data = get_node_names_by_type()
        filename = os.path.join(self._export_dir, NODE_NAMES_BY_TYPE_FILENAME)
        dump_data(data, filename, indent=2)
        self._logger.info("Exported node names by type to %s", filename)

    @staticmethod
    def get_units(prop, index=None):
        units = unit_info.get(prop)
        if units is None:
            raise InvalidParameter(f"no units are stored for {prop}")

        if isinstance(units, dict):
            if index is None:
                raise InvalidParameter(f"index must be provided for {prop}")
            if index == 0:
                return units["E"]
            if index == 1:
                return units["O"]
            raise InvalidParameter("index must be 0 or 1")

        return units

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        total = 0
        for metric in self._iter_metrics():
            total += metric.max_num_bytes()
        return total
Example #22
0
    def get_filtered_dataframes(self,
                                element_class,
                                prop,
                                real_only=False,
                                abs_val=False):
        """Return the dataframes for all elements.

        Calling this is much more efficient than calling get_dataframe for each
        element.

        Parameters
        ----------
        element_class : str
        prop : str
        element_name : str
        real_only : bool
            If dtype of any column is complex, drop the imaginary component.
        abs_val : bool
            If dtype of any column is complex, compute its absolute value.

        Returns
        -------
        dict
            key = str (name), val = pd.DataFrame
            The dict will be empty if no data was stored.

        """
        if prop not in self.list_element_properties(element_class):
            logger.debug("%s/%s is not stored", element_class, prop)
            return {}

        dataset = self._group[f"{element_class}/ElementProperties/{prop}"]
        columns = DatasetBuffer.get_columns(dataset)
        names = DatasetBuffer.get_names(dataset)
        length = dataset.attrs["length"]
        indices_df = self._get_indices_df()
        data_vals = dataset[:length]
        elem_data = defaultdict(list)
        elem_timestamps = defaultdict(list)

        # The time_step_dataset has these columns:
        # 1. time step index
        # 2. element index
        # Each row describes the source data in the dataset row.
        path = dataset.attrs["time_step_path"]
        assert length == self._hdf_store[path].attrs["length"]
        time_step_data = self._hdf_store[path][:length]

        for i in range(length):
            ts_index = time_step_data[:, 0][i]
            elem_index = time_step_data[:, 1][i]
            # TODO DT: more than one column?
            val = data_vals[i, 0]
            if real_only:
                val = val.real
            elif abs_val:
                val = abs(val)
            elem_data[elem_index].append(val)
            elem_timestamps[elem_index].append(indices_df.iloc[ts_index, 0])

        dfs = {}
        for elem_index, vals in elem_data.items():
            elem_name = names[elem_index]
            cols = self._fix_columns(elem_name, columns)
            dfs[elem_name] = pd.DataFrame(
                vals,
                columns=cols,
                index=elem_timestamps[elem_index],
            )
        return dfs
Example #23
0
class ValueContainer:
    """Container for a sequence of instances of ValueStorageBase."""
    def __init__(self,
                 values,
                 hdf_store,
                 path,
                 max_size,
                 elem_names,
                 dataset_property_type,
                 max_chunk_bytes=None,
                 store_time_step=False):
        group_name = os.path.dirname(path)
        basename = os.path.basename(path)
        try:
            if basename in hdf_store[group_name]:
                raise InvalidParameter(f"duplicate dataset name {basename}")
        except KeyError:
            # Don't bother checking each sub path.
            pass

        dtype = values[0].value_type
        scaleoffset = None
        # There is no np.float128 on Windows.
        if dtype in (float, np.float32, np.float64, np.longdouble):
            scaleoffset = 4
        time_step_path = None
        max_size = max_size * len(values) if store_time_step else max_size

        if store_time_step:
            # Store indices for time step and element.
            # Each row of this dataset corresponds to a row in the data.
            # This will be required to interpret the raw data.
            attributes = {"type": DatasetPropertyType.TIME_STEP.value}
            time_step_path = self.time_step_path(path)
            self._time_steps = DatasetBuffer(
                hdf_store,
                time_step_path,
                max_size,
                int,
                ["Time", "Name"],
                scaleoffset=0,
                max_chunk_bytes=max_chunk_bytes,
                attributes=attributes,
            )
            columns = []
            tmp_columns = values[0].make_columns()
            for column in tmp_columns:
                fields = column.split(ValueStorageBase.DELIMITER)
                fields[0] = "AllNames"
                columns.append(ValueStorageBase.DELIMITER.join(fields))
            column_ranges = [0, len(tmp_columns)]
        else:
            columns = []
            column_ranges = []
            col_index = 0
            for value in values:
                tmp_columns = value.make_columns()
                col_range = (col_index, len(tmp_columns))
                column_ranges.append(col_range)
                for column in tmp_columns:
                    columns.append(column)
                    col_index += 1
            self._time_steps = None

        attributes = {"type": dataset_property_type.value}
        if store_time_step:
            attributes["time_step_path"] = time_step_path

        self._dataset = DatasetBuffer(
            hdf_store,
            path,
            max_size,
            dtype,
            columns,
            scaleoffset=scaleoffset,
            max_chunk_bytes=max_chunk_bytes,
            attributes=attributes,
            names=elem_names,
            column_ranges_per_name=column_ranges,
        )

    @staticmethod
    def time_step_path(path):
        return path + "TimeStep"

    def append(self, values):
        """Append a value to the container.

        Parameters
        ----------
        value : list
            list of ValueStorageBase

        """
        if isinstance(values[0].value, list):
            vals = [x for y in values for x in y.value]
        else:
            vals = [x.value for x in values]

        self._dataset.write_value(vals)

    def append_by_time_step(self, value, time_step, elem_index):
        """Append a value to the container.

        Parameters
        ----------
        value : ValueStorageBase
        time_step : int
        elem_index : int

        """
        if isinstance(value.value, list):
            vals = [x for x in value.value]
        else:
            vals = value.value

        self._dataset.write_value(vals)
        self._time_steps.write_value([time_step, elem_index])

    def flush_data(self):
        """Flush any outstanding data to disk."""
        self._dataset.flush_data()
        if self._time_steps is not None:
            self._time_steps.flush_data()

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        return self._dataset.max_num_bytes()