Esempio n. 1
0
def test_dataset_buffer__write_value():
    filename = os.path.join(tempfile.gettempdir(), "store.h5")
    try:
        with h5py.File(filename, "w") as store:
            columns = ("1", "2", "3", "4")
            max_size = 2000
            dataset = DatasetBuffer(store, "data", max_size, np.float, columns)
            assert dataset._chunk_size == 1024
            for i in range(max_size):
                data = np.ones(4)
                dataset.write_value(data)
            assert dataset._buf_index == 2000 - dataset._chunk_size
            dataset.flush_data()
            assert dataset._buf_index == 0

        with h5py.File(filename, "r") as store:
            data = store["data"][:]
            assert len(data) == max_size
            assert [x for x in store["data"].attrs["columns"]] == list(columns)
            for i in range(max_size):
                for j in range(4):
                    assert data[i][j] == 1.0

            df = DatasetBuffer.to_dataframe(store["data"])
            assert isinstance(df, pd.DataFrame)
            assert len(df) == max_size
            assert df.iloc[0, 0] == 1.0
    finally:
        if os.path.exists(filename):
            os.remove(filename)
Esempio n. 2
0
class ResultData:
    """Exports data to files."""

    METADATA_FILENAME = "metadata.json"
    INDICES_BASENAME = "indices"

    def __init__(self, options, system_paths, dss_objects,
                 dss_objects_by_class, dss_buses, dss_solver, dss_command,
                 dss_instance):
        if options["Logging"]["Pre-configured logging"]:
            logger_tag = __name__
        else:
            logger_tag = getLoggerTag(options)
        self._logger = logging.getLogger(logger_tag)
        self._dss_solver = dss_solver
        self._results = {}
        self._buses = dss_buses
        self._objects_by_element = dss_objects
        self._objects_by_class = dss_objects_by_class
        self.system_paths = system_paths
        self._elements = []
        self._options = options

        self._dss_command = dss_command
        self._dss_instance = dss_instance
        self._start_day = options["Project"]["Start Day"]
        self._end_day = options["Project"]["End Day"]
        self._time_dataset = None
        self._frequency_dataset = None
        self._mode_dataset = None
        self._simulation_mode = []
        self._hdf_store = None
        self._scenario = options["Project"]["Active Scenario"]
        self._base_scenario = options["Project"]["Active Scenario"]
        self._export_format = options["Exports"]["Export Format"]
        self._export_compression = options["Exports"]["Export Compression"]
        self._export_iteration_order = options["Exports"][
            "Export Iteration Order"]
        self._max_chunk_bytes = options["Exports"]["HDF Max Chunk Bytes"]
        self._export_dir = os.path.join(
            self.system_paths["Export"],
            options["Project"]["Active Scenario"],
        )
        # Use / because this is used in HDFStore
        self._export_relative_dir = f"Exports/" + options["Project"][
            "Active Scenario"]
        self._store_frequency = False
        self._store_mode = False
        self.CurrentResults = {}
        if options["Project"]["Simulation Type"] == "Dynamic" or \
                options["Frequency"]["Enable frequency sweep"]:
            self._store_frequency = True
            self._store_mode = True

        if options["Exports"]["Export Mode"] == "byElement":
            raise InvalidParameter(
                "Export Mode 'byElement' is not supported by ResultData")

        pathlib.Path(self._export_dir).mkdir(parents=True, exist_ok=True)

        export_list_filename = os.path.join(
            system_paths["ExportLists"],
            "Exports.toml",
        )
        if not os.path.exists(export_list_filename):
            export_list_filename = os.path.join(
                system_paths["ExportLists"],
                "ExportMode-byClass.toml",
            )
        self._export_list = ExportListReader(export_list_filename)
        Reports.append_required_exports(self._export_list, options)
        self._create_exports()

    def _create_exports(self):
        elements = {}  # element name to ElementData
        for elem_class in self._export_list.list_element_classes():
            if elem_class == "Buses":
                objs = self._buses
            elif elem_class in self._objects_by_class:
                objs = self._objects_by_class[elem_class]
            else:
                continue
            for name, obj in objs.items():
                if not obj.Enabled:
                    continue
                for prop in self._export_list.iter_export_properties(
                        elem_class=elem_class):
                    if prop.custom_function is None and not obj.IsValidAttribute(
                            prop.name):
                        raise InvalidParameter(
                            f"{name} / {prop.name} cannot be exported")
                    if prop.should_store_name(name):
                        if name not in elements:
                            elements[name] = ElementData(
                                name,
                                obj,
                                max_chunk_bytes=self._max_chunk_bytes,
                                options=self._options)
                        elements[name].append_property(prop)
                        self._logger.debug("Store %s %s name=%s", elem_class,
                                           prop.name, name)

        self._elements = elements.values()

    def InitializeDataStore(self,
                            hdf_store,
                            num_steps,
                            MC_scenario_number=None):
        if MC_scenario_number is not None:
            self._scenario = self._base_scenario + f"_MC{MC_scenario_number}"
        self._hdf_store = hdf_store
        self._time_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Timestamp",
            max_size=num_steps,
            dtype=float,
            columns=("Timestamp", ),
            max_chunk_bytes=self._max_chunk_bytes)
        self._frequency_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Frequency",
            max_size=num_steps,
            dtype=float,
            columns=("Frequency", ),
            max_chunk_bytes=self._max_chunk_bytes)
        self._mode_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Mode",
            max_size=num_steps,
            dtype="S10",
            columns=("Mode", ),
            max_chunk_bytes=self._max_chunk_bytes)

        for element in self._elements:
            element.initialize_data_store(hdf_store, self._scenario, num_steps)

    def UpdateResults(self):
        self.CurrentResults.clear()

        timestamp = self._dss_solver.GetDateTime().timestamp()
        self._time_dataset.write_value(timestamp)
        self._frequency_dataset.write_value(self._dss_solver.getFrequency())
        self._mode_dataset.write_value(self._dss_solver.getMode())

        for elem in self._elements:
            data = elem.append_values(timestamp)
            self.CurrentResults.update(data)
        return self.CurrentResults

    def ExportResults(self, fileprefix=""):
        self.FlushData()
        for element in self._elements:
            element.export_change_counts()
            element.export_sums()

        metadata = {
            "event_log": None,
            "element_info_files": [],
        }

        if self._options["Exports"]["Export Event Log"]:
            self._export_event_log(metadata)
        if self._options["Exports"]["Export Elements"]:
            self._export_elements(metadata)
            self._export_feeder_head_info(metadata)
        if self._options["Exports"]["Export PV Profiles"]:
            self._export_pv_profiles()

        filename = os.path.join(self._export_dir, self.METADATA_FILENAME)
        dump_data(metadata, filename, indent=4)
        self._logger.info("Exported metadata to %s", filename)
        self._hdf_store = None

    def FlushData(self):
        for dataset in (self._time_dataset, self._frequency_dataset,
                        self._mode_dataset):
            dataset.flush_data()
        for element in self._elements:
            element.flush_data()

    def _export_event_log(self, metadata):
        # TODO: move to a base class
        event_log = "event_log.csv"
        file_path = os.path.join(self._export_dir, event_log)
        if os.path.exists(file_path):
            os.remove(file_path)

        orig = os.getcwd()
        os.chdir(self._export_dir)
        try:
            cmd = "Export EventLog {}".format(event_log)
            out = self._dss_command(cmd)
            if out != event_log:
                raise Exception(f"Failed to export EventLog:  {out}")
            self._logger.info("Exported OpenDSS event log to %s", out)
            metadata["event_log"] = self._export_relative_dir + f"/{event_log}"
        finally:
            os.chdir(orig)

    def _export_dataframe(self, df, basename):
        filename = basename + "." + self._export_format
        write_dataframe(df, filename, compress=self._export_compression)
        self._logger.info("Exported %s", filename)

    def _find_feeder_head_line(self):
        dss = self._dss_instance
        feeder_head_line = None

        flag = dss.Topology.First()

        while flag > 0:

            if 'line' in dss.Topology.BranchName().lower():
                feeder_head_line = dss.Topology.BranchName()
                break

            else:
                flag = dss.Topology.Next()

        return feeder_head_line

    def _get_feeder_head_loading(self):
        dss = self._dss_instance
        head_line = self._find_feeder_head_line()
        if head_line is not None:
            flag = dss.Circuit.SetActiveElement(head_line)

            if flag > 0:
                n_phases = dss.CktElement.NumPhases()
                max_amp = dss.CktElement.NormalAmps()
                Currents = dss.CktElement.CurrentsMagAng()[:2 * n_phases]
                Current_magnitude = Currents[::2]

                max_flow = max(max(Current_magnitude), 1e-10)
                loading = max_flow / max_amp

                return loading

            else:
                return None
        else:
            return None

    def _reverse_powerflow(self):
        dss = self._dss_instance

        reverse_pf = max(dss.Circuit.TotalPower(
        )) > 0  # total substation power is an injection(-) or a consumption(+)

        return reverse_pf

    def _export_feeder_head_info(self, metadata):
        """
        Gets feeder head information comprising:
        1- The name of the feeder head line
        2- The feeder head loading in per unit
        3- The feeder head load in (kW, kVar). Negative in case of power injection
        4- The reverse power flow flag. True if power is flowing back to the feeder head, False otherwise
        """

        dss = self._dss_instance
        if not "feeder_head_info_files" in metadata.keys():
            metadata["feeder_head_info_files"] = []

        df_dict = {
            "FeederHeadLine": self._find_feeder_head_line(),
            "FeederHeadLoading": self._get_feeder_head_loading(),
            "FeederHeadLoad": dss.Circuit.TotalPower(),
            "ReversePowerFlow": self._reverse_powerflow()
        }

        #df = pd.DataFrame.from_dict(df_dict)

        filename = "FeederHeadInfo"
        fname = filename + ".json"

        relpath = os.path.join(self._export_relative_dir, fname)
        filepath = os.path.join(self._export_dir, fname)
        #write_dataframe(df, filepath)
        dump_data(df_dict, filepath)
        metadata["feeder_head_info_files"].append(relpath)
        self._logger.info("Exported %s information to %s.", filename, filepath)

    def _export_elements(self, metadata):
        dss = self._dss_instance
        exports = (
            # TODO: opendssdirect does not provide a function to export Bus information.
            ("CapacitorsInfo", dss.Capacitors.Count,
             dss.utils.capacitors_to_dataframe),
            ("FusesInfo", dss.Fuses.Count, dss.utils.fuses_to_dataframe),
            ("GeneratorsInfo", dss.Generators.Count,
             dss.utils.generators_to_dataframe),
            ("IsourceInfo", dss.Isource.Count, dss.utils.isource_to_dataframe),
            ("LinesInfo", dss.Lines.Count, dss.utils.lines_to_dataframe),
            ("LoadsInfo", dss.Loads.Count, dss.utils.loads_to_dataframe),
            ("MetersInfo", dss.Meters.Count, dss.utils.meters_to_dataframe),
            ("MonitorsInfo", dss.Monitors.Count,
             dss.utils.monitors_to_dataframe),
            ("PVSystemsInfo", dss.PVsystems.Count,
             dss.utils.pvsystems_to_dataframe),
            ("ReclosersInfo", dss.Reclosers.Count,
             dss.utils.reclosers_to_dataframe),
            ("RegControlsInfo", dss.RegControls.Count,
             dss.utils.regcontrols_to_dataframe),
            ("RelaysInfo", dss.Relays.Count, dss.utils.relays_to_dataframe),
            ("SensorsInfo", dss.Sensors.Count, dss.utils.sensors_to_dataframe),
            ("TransformersInfo", dss.Transformers.Count,
             dss.utils.transformers_to_dataframe),
            ("VsourcesInfo", dss.Vsources.Count,
             dss.utils.vsources_to_dataframe),
            ("XYCurvesInfo", dss.XYCurves.Count,
             dss.utils.xycurves_to_dataframe),
            # TODO This can be very large. Consider making it configurable.
            #("LoadShapeInfo", dss.LoadShape.Count, dss.utils.loadshape_to_dataframe),
        )

        for filename, count_func, get_func in exports:
            if count_func() > 0:
                df = get_func()
                # Always record in CSV format for readability.
                # There are also warning messages from PyTables because the
                # data may contain strings.
                fname = filename + ".csv"
                relpath = os.path.join(self._export_relative_dir, fname)
                filepath = os.path.join(self._export_dir, fname)
                write_dataframe(df, filepath)
                metadata["element_info_files"].append(relpath)
                self._logger.info("Exported %s information to %s.", filename,
                                  filepath)

        self._export_transformers(metadata)

    def _export_transformers(self, metadata):
        dss = self._dss_instance
        df_dict = {
            "Transformer": [],
            "HighSideConnection": [],
            "NumPhases": []
        }

        dss.Circuit.SetActiveClass("Transformer")
        flag = dss.ActiveClass.First()
        while flag > 0:
            name = dss.CktElement.Name()
            df_dict["Transformer"].append(name)
            df_dict["HighSideConnection"].append(
                dss.Properties.Value("conns").split("[")[1].split(",")
                [0].strip(" ").lower())
            df_dict["NumPhases"].append(dss.CktElement.NumPhases())
            flag = dss.ActiveClass.Next()

        df = pd.DataFrame.from_dict(df_dict)

        relpath = os.path.join(self._export_relative_dir,
                               "TransformersPhaseInfo.csv")
        filepath = os.path.join(self._export_dir, "TransformersPhaseInfo.csv")
        write_dataframe(df, filepath)
        metadata["element_info_files"].append(relpath)
        self._logger.info("Exported transformer phase information to %s",
                          filepath)

    def _export_pv_profiles(self):
        dss = self._dss_instance
        pv_systems = self._objects_by_class.get("PVSystems")
        if pv_systems is None:
            raise InvalidConfiguration("PVSystems are not exported")

        pv_infos = []
        profiles = set()
        for full_name, obj in pv_systems.items():
            profile_name = obj.GetParameter("yearly").lower()
            if profile_name != "":
                profiles.add(profile_name)
            pv_infos.append({
                "irradiance": obj.GetParameter("irradiance"),
                "name": full_name,
                "pmpp": obj.GetParameter("pmpp"),
                "load_shape_profile": profile_name,
            })

        pmult_sums = {}
        dss.LoadShape.First()
        sim_resolution = self._options["Project"]["Step resolution (sec)"]
        while True:
            name = dss.LoadShape.Name().lower()
            if name in profiles:
                sinterval = dss.LoadShape.SInterval()
                assert sim_resolution >= sinterval
                offset = int(sim_resolution / dss.LoadShape.SInterval())
                pmult_sums[name] = sum(dss.LoadShape.PMult()[::offset])
            if dss.LoadShape.Next() == 0:
                break

        for pv_info in pv_infos:
            profile = pv_info["load_shape_profile"]
            if profile == "":
                pv_info["load_shape_pmult_sum"] = 0
            else:
                pv_info["load_shape_pmult_sum"] = pmult_sums[profile]

        data = {"pv_systems": pv_infos}
        filename = os.path.join(self._export_dir, "pv_profiles.json")
        dump_data(data, filename, indent=2)
        self._logger.info("Exported PV profile information to %s", filename)

    @staticmethod
    def get_units(prop, index=None):
        units = unit_info.get(prop)
        if units is None:
            raise InvalidParameter(f"no units are stored for {prop}")

        if isinstance(units, dict):
            if index is None:
                raise InvalidParameter(f"index must be provided for {prop}")
            if index == 0:
                return units["E"]
            if index == 1:
                return units["O"]
            raise InvalidParameter("index must be 0 or 1")

        return units

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        total = 0
        for element in self._elements:
            total += element.max_num_bytes()
        return total
Esempio n. 3
0
class ValueContainer:
    """Container for a sequence of instances of ValueStorageBase."""

    # These could potentially be reduced in bit lengths. Compression probably
    # makes that unnecessary.
    _TYPE_MAPPING = {
        float: np.float,
        int: np.int,
        complex: np.complex,
    }

    def __init__(self,
                 value,
                 hdf_store,
                 path,
                 max_size,
                 dataset_property_type,
                 max_chunk_bytes=None,
                 store_timestamp=False):
        group_name = os.path.dirname(path)
        basename = os.path.basename(path)
        try:
            if basename in hdf_store[group_name].keys():
                raise InvalidParameter(f"duplicate dataset name {basename}")
        except KeyError:
            # Don't bother checking each sub path.
            pass

        dtype = self._TYPE_MAPPING.get(value.value_type)
        assert dtype is not None
        scaleoffset = None
        if dtype == np.float:
            scaleoffset = 4
        elif dtype == np.int:
            scaleoffset = 0
        attributes = {"type": dataset_property_type.value}
        timestamp_path = None

        if store_timestamp:
            timestamp_path = self.timestamp_path(path)
            self._timestamps = DatasetBuffer(
                hdf_store,
                timestamp_path,
                max_size,
                np.float,
                ["Timestamp"],
                scaleoffset=scaleoffset,
                max_chunk_bytes=max_chunk_bytes,
                attributes={"type": DatasetPropertyType.TIMESTAMP.value},
            )
            attributes["timestamp_path"] = timestamp_path
        else:
            self._timestamps = None

        self._dataset = DatasetBuffer(
            hdf_store,
            path,
            max_size,
            dtype,
            value.make_columns(),
            scaleoffset=scaleoffset,
            max_chunk_bytes=max_chunk_bytes,
            attributes=attributes,
        )

    @staticmethod
    def timestamp_path(path):
        return path + "Timestamp"

    def append(self, value, timestamp=None):
        """Append a value to the container.

        Parameters
        ----------
        value : ValueStorageBase
        timestamp : float | None

        """
        self._dataset.write_value(value.value)
        if self._timestamps is not None:
            assert timestamp is not None
            self._timestamps.write_value(timestamp)

    def flush_data(self):
        """Flush any outstanding data to disk."""
        self._dataset.flush_data()
        if self._timestamps is not None:
            self._timestamps.flush_data()

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        return self._dataset.max_num_bytes()
Esempio n. 4
0
class ResultData:
    """Exports data to files."""

    METADATA_FILENAME = "metadata.json"
    INDICES_BASENAME = "indices"

    def __init__(self, settings: SimulationSettingsModel, system_paths, dss_objects,
                 dss_objects_by_class, dss_buses, dss_solver, dss_command,
                 dss_instance):
        self._logger = logger
        self._dss_solver = dss_solver
        self._results = {}
        self._buses = dss_buses
        self._objects_by_element = dss_objects
        self._objects_by_class = dss_objects_by_class
        self.system_paths = system_paths
        self._element_metrics = {}  # (elem_class, prop_name) to OpenDssPropertyMetric
        self._summed_element_metrics = {}
        self._settings = settings
        self._cur_step = 0
        self._current_results = {}

        self._dss_command = dss_command
        self._start_day = dss_solver.StartDay
        self._end_day = dss_solver.EndDay
        self._time_dataset = None
        self._frequency_dataset = None
        self._mode_dataset = None
        self._simulation_mode = []
        self._hdf_store = None
        self._scenario = settings.project.active_scenario
        self._base_scenario = settings.project.active_scenario
        self._export_format = settings.exports.export_format
        self._export_compression = settings.exports.export_compression
        self._max_chunk_bytes = settings.exports.hdf_max_chunk_bytes
        self._export_dir = os.path.join(
            self.system_paths["Export"],
            settings.project.active_scenario,
        )
        # Use / because this is used in HDFStore
        self._export_relative_dir = "Exports/" + settings.project.active_scenario
        self._store_frequency = False
        self._store_mode = False
        if settings.frequency.enable_frequency_sweep:
            self._store_frequency = True
            self._store_mode = True

        pathlib.Path(self._export_dir).mkdir(parents=True, exist_ok=True)

        export_list_filename = os.path.join(
            system_paths["ExportLists"],
            "Exports.toml",
        )
        if not os.path.exists(export_list_filename):
            export_list_filename = os.path.join(
                system_paths["ExportLists"],
                "ExportMode-byClass.toml",
            )
        self._export_list = ExportListReader(export_list_filename)
        Reports.append_required_exports(self._export_list, settings)
        dump_data(
            self._export_list.serialize(),
            os.path.join(self._export_dir, "ExportsActual.toml"),
        )
        self._circuit_metrics = {}
        self._create_exports()

    def _create_exports(self):
        for elem_class in self._export_list.list_element_classes():
            if elem_class in ("Buses", "Nodes"):
                objs = self._buses
            elif elem_class in self._objects_by_class:
                objs = self._objects_by_class[elem_class]
            elif elem_class == "FeederHead":
                objs = self._objects_by_class["Circuits"]
            elif elem_class != "CktElement":  # TODO
                continue
            for prop in self._export_list.iter_export_properties(elem_class=elem_class):
                if prop.opendss_classes:
                    dss_objs = []
                    for cls in prop.opendss_classes:
                        if cls not in self._objects_by_class:
                            logger.warning("Export class=%s is not present in the circuit", cls)
                            continue

                        for obj in self._objects_by_class[cls].values():
                            if obj.Enabled and prop.should_store_name(obj.FullName):
                                dss_objs.append(obj)
                else:
                    dss_objs = [x for x in objs.values() if x.Enabled and prop.should_store_name(x.FullName)]
                if prop.custom_metric is None:
                    self._add_opendss_metric(prop, dss_objs)
                else:
                    self._add_custom_metric(prop, dss_objs)

    def _add_opendss_metric(self, prop, dss_objs):
        obj = dss_objs[0]
        if not obj.IsValidAttribute(prop.name):
            raise InvalidParameter(f"{obj.FullName} / {prop.name} cannot be exported")
        key = (prop.elem_class, prop.name)
        if prop.sum_elements:
            metric = self._summed_element_metrics.get(key)
            if metric is None:
                if prop.sum_groups:
                    cls = SummedElementsByGroupOpenDssPropertyMetric
                else:
                    cls = SummedElementsOpenDssPropertyMetric
                metric = cls(prop, dss_objs, self._settings)
                self._summed_element_metrics[key] = metric
            else:
                metric.add_dss_obj(obj)
        else:
            metric = self._element_metrics.get(key)
            if metric is None:
                metric = OpenDssPropertyMetric(prop, dss_objs, self._settings)
                self._element_metrics[key] = metric
            else:
                metric.add_property(prop)

    def _add_custom_metric(self, prop, dss_objs):
        cls = prop.custom_metric
        if cls.is_circuit_wide():
            metric = self._circuit_metrics.get(cls)
            if metric is None:
                metric = cls(prop, dss_objs, self._settings)
                self._circuit_metrics[cls] = metric
            else:
                metric.add_property(prop)
        else:
            key = (prop.elem_class, prop.name)
            metric = self._element_metrics.get(key)
            if metric is None:
                metric = cls(prop, dss_objs, self._settings)
                self._element_metrics[key] = metric
            else:
                metric.add_property(prop)

    def InitializeDataStore(self, hdf_store, num_steps, MC_scenario_number=None):
        if MC_scenario_number is not None:
            self._scenario = self._base_scenario + f"_MC{MC_scenario_number}"
        self._hdf_store = hdf_store
        self._time_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Timestamp",
            max_size=num_steps,
            dtype=float,
            columns=("Timestamp",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._frequency_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Frequency",
            max_size=num_steps,
            dtype=float,
            columns=("Frequency",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._mode_dataset = DatasetBuffer(
            hdf_store=hdf_store,
            path=f"Exports/{self._scenario}/Mode",
            max_size=num_steps,
            dtype="S10",
            columns=("Mode",),
            max_chunk_bytes=self._max_chunk_bytes
        )
        self._cur_step = 0

        base_path = "Exports/" + self._scenario
        for metric in self._iter_metrics():
            metric.initialize_data_store(hdf_store, base_path, num_steps)

    def _iter_metrics(self):
        for metric in self._element_metrics.values():
            yield metric
        for metric in self._summed_element_metrics.values():
            yield metric
        for metric in self._circuit_metrics.values():
            yield metric

    @property
    def CurrentResults(self):
        return self._current_results

    @track_timing(timer_stats_collector)
    def UpdateResults(self, store_nan=False):
        self._current_results.clear()

        # Get the number of seconds since the Epoch without any timezone conversions.
        timestamp = (self._dss_solver.GetDateTime() - datetime.utcfromtimestamp(0)).total_seconds()
        self._time_dataset.write_value([timestamp])
        self._frequency_dataset.write_value([self._dss_solver.getFrequency()])
        self._mode_dataset.write_value([self._dss_solver.getMode()])

        for metric in self._iter_metrics():
            with Timer(timer_stats_collector, metric.label()):
                data = metric.append_values(self._cur_step, store_nan=store_nan)

            if isinstance(data, dict):
                # TODO: reconsider
                # Something is only returned for OpenDSS properties
                self._current_results.update(data)

        self._cur_step += 1
        return self._current_results

    def ExportResults(self):
        metadata = {
            "event_log": None,
            "element_info_files": [],
        }

        if self._settings.exports.export_event_log:
            self._export_event_log(metadata)
        if self._settings.exports.export_elements:
            self._export_elements(metadata, set(self._settings.exports.export_element_types))
            self._export_feeder_head_info(metadata)
        if self._settings.exports.export_pv_profiles:
            self._export_pv_profiles()
        if self._settings.exports.export_node_names_by_type:
            self._export_node_names_by_type()

        filename = os.path.join(self._export_dir, self.METADATA_FILENAME)
        dump_data(metadata, filename, indent=4)
        self._logger.info("Exported metadata to %s", filename)
        self._hdf_store = None

    def Close(self):
        for dataset in (self._time_dataset, self._frequency_dataset, self._mode_dataset):
            dataset.flush_data()
        for metric in self._iter_metrics():
            metric.close()

    def _export_event_log(self, metadata):
        event_log = "event_log.csv"
        file_path = os.path.join(self._export_dir, event_log)
        if os.path.exists(file_path):
            os.remove(file_path)

        orig = os.getcwd()
        os.chdir(self._export_dir)
        try:
            cmd = "Export EventLog {}".format(event_log)
            out = self._dss_command(cmd)
            if out != event_log:
                raise Exception(f"Failed to export EventLog:  {out}")
            self._logger.info("Exported OpenDSS event log to %s", out)
            metadata["event_log"] = self._export_relative_dir + f"/{event_log}"
        finally:
            os.chdir(orig)

    def _export_dataframe(self, df, basename):
        filename = basename + "." + self._export_format
        write_dataframe(df, filename, compress=self._export_compression)
        self._logger.info("Exported %s", filename)

    def _find_feeder_head_line(self):
        feeder_head_line = None
        flag = dss.Topology.First()
        while flag > 0:
            if 'line' in dss.Topology.BranchName().lower():
                feeder_head_line = dss.Topology.BranchName()
                break
            else:
                flag = dss.Topology.Next()

        return feeder_head_line

    def _get_feeder_head_loading(self):
        head_line = self._find_feeder_head_line()
        if head_line is not None:
            flag = dss.Circuit.SetActiveElement(head_line)

            if flag>0:
                n_phases = dss.CktElement.NumPhases()
                max_amp = dss.CktElement.NormalAmps()
                Currents = dss.CktElement.CurrentsMagAng()[:2*n_phases]
                Current_magnitude = Currents[::2]

                max_flow = max(max(Current_magnitude), 1e-10)
                loading = max_flow/max_amp

                return loading
            else:
                return None
        else:
            return None

    def _reverse_powerflow(self):
        reverse_pf = dss.Circuit.TotalPower()[0] > 0 # total substation power is an injection(-) or a consumption(+)
        return reverse_pf

    def _export_feeder_head_info(self, metadata):
        """
        Gets feeder head information comprising:
        1- The name of the feeder head line
        2- The feeder head loading in per unit
        3- The feeder head load in (kW, kVar). Negative in case of power injection
        4- The reverse power flow flag. True if power is flowing back to the feeder head, False otherwise
        """
        if not "feeder_head_info_files" in metadata.keys():
            metadata["feeder_head_info_files"] = []

        total_power = dss.Circuit.TotalPower()
        df_dict = {"FeederHeadLine": self._find_feeder_head_line(),
                   "FeederHeadLoading": self._get_feeder_head_loading(),
                   "FeederHeadLoadKW": total_power[0],
                   "FeederHeadLoadKVar": total_power[1],
                   "ReversePowerFlow": self._reverse_powerflow()
                  }

        filename = "FeederHeadInfo"
        fname = filename + ".json"
        relpath = os.path.join(self._export_relative_dir, fname)
        filepath = os.path.join(self._export_dir, fname)

        #write_dataframe(df, filepath)
        dump_data(df_dict, filepath)
        metadata["feeder_head_info_files"].append(relpath)
        self._logger.info("Exported %s information to %s.", filename, filepath)

    def _export_elements(self, metadata, element_types):
        exports = [
            # TODO: opendssdirect does not provide a function to export Bus information.
            ("Capacitor", "CapacitorsInfo", dss.Capacitors.Count),
            ("Fuse", "FusesInfo", dss.Fuses.Count),
            ("Generator", "GeneratorsInfo", dss.Generators.Count),
            ("Isource", "IsourceInfo", dss.Isource.Count),
            ("Line", "LinesInfo", dss.Lines.Count),
            ("Load", "LoadsInfo", dss.Loads.Count),
            ("Monitor", "MonitorsInfo", dss.Monitors.Count),
            ("PVSystem", "PVSystemsInfo", dss.PVsystems.Count),
            ("Recloser", "ReclosersInfo", dss.Reclosers.Count),
            ("RegControl", "RegControlsInfo", dss.RegControls.Count),
            ("Relay", "RelaysInfo", dss.Relays.Count),
            ("Sensor", "SensorsInfo", dss.Sensors.Count),
            ("Transformer", "TransformersInfo", dss.Transformers.Count),
            ("Vsource", "VsourcesInfo", dss.Vsources.Count),
            ("XYCurve", "XYCurvesInfo", dss.XYCurves.Count),
            # TODO This can be very large. Consider making it configurable.
            #("LoadShape", "LoadShapeInfo", dss.LoadShape.Count),
        ]
        if element_types:
            types = set()
            for elem_type in element_types:
                if elem_type.endswith("s"):
                    # Maintain compatibility with old format used plural names.
                    elem_type = elem_type[:-1]
                types.add(elem_type)
            exports = [x for x in exports if x[0] in types]

        for class_name, filename, count_func in exports:
            df = dss.utils.class_to_dataframe(class_name)
            # Always record in CSV format for readability.
            # There are also warning messages from PyTables because the
            # data may contain strings.
            fname = filename + ".csv"
            relpath = os.path.join(self._export_relative_dir, fname)
            filepath = os.path.join(self._export_dir, fname)
            write_dataframe(df, filepath)
            metadata["element_info_files"].append(relpath)
            self._logger.info("Exported %s information to %s.", filename, filepath)

        if not element_types or "Transformer" in element_types or "Transformers" in element_types:
            self._export_transformers(metadata)

    def _export_transformers(self, metadata):
        df_dict = {"Transformer": [], "HighSideConnection": [], "NumPhases": []}

        dss.Circuit.SetActiveClass("Transformer")
        flag = dss.ActiveClass.First()
        while flag > 0:
            name = dss.CktElement.Name()
            df_dict["Transformer"].append(name)
            df_dict["HighSideConnection"].append(dss.Properties.Value("conns").split("[")[1].split(",")[0].strip(" ").lower())
            df_dict["NumPhases"].append(dss.CktElement.NumPhases())
            flag = dss.ActiveClass.Next()

        df = pd.DataFrame.from_dict(df_dict)

        relpath = os.path.join(self._export_relative_dir, "TransformersPhaseInfo.csv")
        filepath = os.path.join(self._export_dir, "TransformersPhaseInfo.csv")
        write_dataframe(df, filepath)
        metadata["element_info_files"].append(relpath)
        self._logger.info("Exported transformer phase information to %s", filepath)

    def _export_pv_profiles(self):
        granularity = self._settings.reports.granularity
        pv_systems = self._objects_by_class.get("PVSystems")
        if pv_systems is None:
            logger.info("No PVSystems are present")
            return

        pv_infos = []
        profiles = set()
        for full_name, obj in pv_systems.items():
            profile_name = obj.GetParameter("yearly").lower()
            if profile_name != "":
                profiles.add(profile_name)
            pv_infos.append({
                "irradiance": obj.GetParameter("irradiance"),
                "name": full_name,
                "pmpp": obj.GetParameter("pmpp"),
                "load_shape_profile": profile_name,
            })

        pmult_sums = {}
        if dss.LoadShape.First() == 0:
            self._logger.warning("There are no load shapes.")
            return

        sim_resolution = self._settings.project.step_resolution_sec
        per_time_point = (
            ReportGranularity.PER_ELEMENT_PER_TIME_POINT,
            ReportGranularity.ALL_ELEMENTS_PER_TIME_POINT,
        )
        load_shape_data = {}
        while True:
            name = dss.LoadShape.Name().lower()
            if name in profiles:
                sinterval = dss.LoadShape.SInterval()
                assert sim_resolution >= sinterval, f"{sim_resolution} >= {sinterval}"
                df = create_loadshape_pmult_dataframe_for_simulation(self._settings)
                sum_values = df.iloc[:, 0].sum()
                if granularity in per_time_point:
                    load_shape_data[name] = df.iloc[:, 0].values
                    pmult_sums[name] = sum_values
                else:
                    pmult_sums[name] = sum_values
            if dss.LoadShape.Next() == 0:
                break

        if load_shape_data and granularity in per_time_point:
            filename = os.path.join(self._export_dir, PV_LOAD_SHAPE_FILENAME)
            index = create_datetime_index_from_settings(self._settings)
            df = pd.DataFrame(load_shape_data, index=index)
            write_dataframe(df, filename, compress=True)

        for pv_info in pv_infos:
            profile = pv_info["load_shape_profile"]
            if profile == "":
                pv_info["load_shape_pmult_sum"] = 0
            else:
                pv_info["load_shape_pmult_sum"] = pmult_sums[profile]

        data = {"pv_systems": pv_infos}
        filename = os.path.join(self._export_dir, PV_PROFILES_FILENAME)
        dump_data(data, filename, indent=2)
        self._logger.info("Exported PV profile information to %s", filename)

    def _export_node_names_by_type(self):
        data = get_node_names_by_type()
        filename = os.path.join(self._export_dir, NODE_NAMES_BY_TYPE_FILENAME)
        dump_data(data, filename, indent=2)
        self._logger.info("Exported node names by type to %s", filename)

    @staticmethod
    def get_units(prop, index=None):
        units = unit_info.get(prop)
        if units is None:
            raise InvalidParameter(f"no units are stored for {prop}")

        if isinstance(units, dict):
            if index is None:
                raise InvalidParameter(f"index must be provided for {prop}")
            if index == 0:
                return units["E"]
            if index == 1:
                return units["O"]
            raise InvalidParameter("index must be 0 or 1")

        return units

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        total = 0
        for metric in self._iter_metrics():
            total += metric.max_num_bytes()
        return total
Esempio n. 5
0
class ValueContainer:
    """Container for a sequence of instances of ValueStorageBase."""
    def __init__(self,
                 values,
                 hdf_store,
                 path,
                 max_size,
                 elem_names,
                 dataset_property_type,
                 max_chunk_bytes=None,
                 store_time_step=False):
        group_name = os.path.dirname(path)
        basename = os.path.basename(path)
        try:
            if basename in hdf_store[group_name]:
                raise InvalidParameter(f"duplicate dataset name {basename}")
        except KeyError:
            # Don't bother checking each sub path.
            pass

        dtype = values[0].value_type
        scaleoffset = None
        # There is no np.float128 on Windows.
        if dtype in (float, np.float32, np.float64, np.longdouble):
            scaleoffset = 4
        time_step_path = None
        max_size = max_size * len(values) if store_time_step else max_size

        if store_time_step:
            # Store indices for time step and element.
            # Each row of this dataset corresponds to a row in the data.
            # This will be required to interpret the raw data.
            attributes = {"type": DatasetPropertyType.TIME_STEP.value}
            time_step_path = self.time_step_path(path)
            self._time_steps = DatasetBuffer(
                hdf_store,
                time_step_path,
                max_size,
                int,
                ["Time", "Name"],
                scaleoffset=0,
                max_chunk_bytes=max_chunk_bytes,
                attributes=attributes,
            )
            columns = []
            tmp_columns = values[0].make_columns()
            for column in tmp_columns:
                fields = column.split(ValueStorageBase.DELIMITER)
                fields[0] = "AllNames"
                columns.append(ValueStorageBase.DELIMITER.join(fields))
            column_ranges = [0, len(tmp_columns)]
        else:
            columns = []
            column_ranges = []
            col_index = 0
            for value in values:
                tmp_columns = value.make_columns()
                col_range = (col_index, len(tmp_columns))
                column_ranges.append(col_range)
                for column in tmp_columns:
                    columns.append(column)
                    col_index += 1
            self._time_steps = None

        attributes = {"type": dataset_property_type.value}
        if store_time_step:
            attributes["time_step_path"] = time_step_path

        self._dataset = DatasetBuffer(
            hdf_store,
            path,
            max_size,
            dtype,
            columns,
            scaleoffset=scaleoffset,
            max_chunk_bytes=max_chunk_bytes,
            attributes=attributes,
            names=elem_names,
            column_ranges_per_name=column_ranges,
        )

    @staticmethod
    def time_step_path(path):
        return path + "TimeStep"

    def append(self, values):
        """Append a value to the container.

        Parameters
        ----------
        value : list
            list of ValueStorageBase

        """
        if isinstance(values[0].value, list):
            vals = [x for y in values for x in y.value]
        else:
            vals = [x.value for x in values]

        self._dataset.write_value(vals)

    def append_by_time_step(self, value, time_step, elem_index):
        """Append a value to the container.

        Parameters
        ----------
        value : ValueStorageBase
        time_step : int
        elem_index : int

        """
        if isinstance(value.value, list):
            vals = [x for x in value.value]
        else:
            vals = value.value

        self._dataset.write_value(vals)
        self._time_steps.write_value([time_step, elem_index])

    def flush_data(self):
        """Flush any outstanding data to disk."""
        self._dataset.flush_data()
        if self._time_steps is not None:
            self._time_steps.flush_data()

    def max_num_bytes(self):
        """Return the maximum number of bytes the container could hold.

        Returns
        -------
        int

        """
        return self._dataset.max_num_bytes()