示例#1
0
    def _export_transformers(self, metadata):
        dss = self._dss_instance
        df_dict = {
            "Transformer": [],
            "HighSideConnection": [],
            "NumPhases": []
        }

        dss.Circuit.SetActiveClass("Transformer")
        flag = dss.ActiveClass.First()
        while flag > 0:
            name = dss.CktElement.Name()
            df_dict["Transformer"].append(name)
            df_dict["HighSideConnection"].append(
                dss.Properties.Value("conns").split("[")[1].split(",")
                [0].strip(" ").lower())
            df_dict["NumPhases"].append(dss.CktElement.NumPhases())
            flag = dss.ActiveClass.Next()

        df = pd.DataFrame.from_dict(df_dict)

        relpath = os.path.join(self._export_relative_dir,
                               "TransformersPhaseInfo.csv")
        filepath = os.path.join(self._export_dir, "TransformersPhaseInfo.csv")
        write_dataframe(df, filepath)
        metadata["element_info_files"].append(relpath)
        self._logger.info("Exported transformer phase information to %s",
                          filepath)
示例#2
0
 def _export_dataframe_report(self, df, output_dir, basename):
     """Export report to a dataframe."""
     fmt = self._report_global_settings.format
     filename = os.path.join(output_dir, basename + "." + fmt.value)
     compress = True if fmt == "h5" else False
     write_dataframe(df, filename, compress=compress)
     logger.info("Generated %s", filename)
     return filename
示例#3
0
文件: reports.py 项目: yuanzy97/PyDSS
    def generate(self, output_dir):
        df = self.calculate_pv_curtailment()
        filename = os.path.join(
            output_dir, self.FILENAME) + "." + self._report_options["Format"]
        write_dataframe(df, filename, compress=True)

        logger.info("Generated PV Clipping report %s", filename)
        return filename
示例#4
0
 def _export_filtered_dataframes(self, elem_class, prop, path, fmt,
                                 compress):
     for name, df in self.get_filtered_dataframes(elem_class, prop).items():
         if df.empty:
             logger.debug("Skip empty dataframe %s %s %s", elem_class, prop,
                          name)
             continue
         base = "__".join([elem_class, prop, name])
         filename = os.path.join(path, base + "." + fmt.replace(".", ""))
         write_dataframe(df, filename, compress=compress)
示例#5
0
    def export_data(self, path=None, fmt="csv", compress=False):
        """Export data to path.

        Parameters
        ----------
        path : str
            Output directory; defaults to scenario exports path
        fmt : str
            Filer format type (csv, h5)
        compress : bool
            Compress data

        """
        if path is None:
            path = os.path.join(self._project_path, "Exports", self._name)
        os.makedirs(path, exist_ok=True)

        for elem_class in self.list_element_classes():
            for prop in self.list_element_properties(elem_class):
                try:
                    df = self.get_full_dataframe(elem_class, prop)
                except InvalidParameter:
                    logger.info(f"cannot create full dataframe for %s %s",
                                elem_class, prop)
                    self._export_filtered_dataframes(elem_class, prop, path,
                                                     fmt, compress)
                    continue
                base = "__".join([elem_class, prop])
                filename = os.path.join(path,
                                        base + "." + fmt.replace(".", ""))
                write_dataframe(df, filename, compress=compress)

        if self._elem_prop_nums:
            data = copy.deepcopy(self._elem_prop_nums)
            for elem_class, prop, name, val in self.iterate_element_property_numbers(
            ):
                # JSON lib cannot serialize complex numbers.
                if isinstance(val, np.ndarray):
                    new_val = []
                    convert_str = val.dtype == "complex"
                    for item in val:
                        if convert_str:
                            item = str(item)
                        new_val.append(item)
                    data[elem_class][prop][name] = new_val
                elif isinstance(val, complex):
                    data[elem_class][prop][name] = str(val)

            filename = os.path.join(path, "element_property_numbers.json")
            dump_data(data, filename, indent=2)

        logger.info("Exported data to %s", path)
示例#6
0
 def _export_element_timeseries(self, path, fmt, compress):
     for elem_class in self.list_element_classes():
         for prop in self.list_element_properties(elem_class):
             dataset = self._group[f"{elem_class}/ElementProperties/{prop}"]
             prop_type = get_dataset_property_type(dataset)
             if prop_type == DatasetPropertyType.FILTERED:
                 self._export_filtered_dataframes(elem_class, prop, path,
                                                  fmt, compress)
             else:
                 df = self.get_full_dataframe(elem_class, prop)
                 base = "__".join([elem_class, prop])
                 filename = os.path.join(path,
                                         base + "." + fmt.replace(".", ""))
                 write_dataframe(df, filename, compress=compress)
示例#7
0
    def _export_elements(self, metadata):
        dss = self._dss_instance
        exports = (
            # TODO: opendssdirect does not provide a function to export Bus information.
            ("CapacitorsInfo", dss.Capacitors.Count,
             dss.utils.capacitors_to_dataframe),
            ("FusesInfo", dss.Fuses.Count, dss.utils.fuses_to_dataframe),
            ("GeneratorsInfo", dss.Generators.Count,
             dss.utils.generators_to_dataframe),
            ("IsourceInfo", dss.Isource.Count, dss.utils.isource_to_dataframe),
            ("LinesInfo", dss.Lines.Count, dss.utils.lines_to_dataframe),
            ("LoadsInfo", dss.Loads.Count, dss.utils.loads_to_dataframe),
            ("MetersInfo", dss.Meters.Count, dss.utils.meters_to_dataframe),
            ("MonitorsInfo", dss.Monitors.Count,
             dss.utils.monitors_to_dataframe),
            ("PVSystemsInfo", dss.PVsystems.Count,
             dss.utils.pvsystems_to_dataframe),
            ("ReclosersInfo", dss.Reclosers.Count,
             dss.utils.reclosers_to_dataframe),
            ("RegControlsInfo", dss.RegControls.Count,
             dss.utils.regcontrols_to_dataframe),
            ("RelaysInfo", dss.Relays.Count, dss.utils.relays_to_dataframe),
            ("SensorsInfo", dss.Sensors.Count, dss.utils.sensors_to_dataframe),
            ("TransformersInfo", dss.Transformers.Count,
             dss.utils.transformers_to_dataframe),
            ("VsourcesInfo", dss.Vsources.Count,
             dss.utils.vsources_to_dataframe),
            ("XYCurvesInfo", dss.XYCurves.Count,
             dss.utils.xycurves_to_dataframe),
            # TODO This can be very large. Consider making it configurable.
            #("LoadShapeInfo", dss.LoadShape.Count, dss.utils.loadshape_to_dataframe),
        )

        for filename, count_func, get_func in exports:
            if count_func() > 0:
                df = get_func()
                # Always record in CSV format for readability.
                # There are also warning messages from PyTables because the
                # data may contain strings.
                fname = filename + ".csv"
                relpath = os.path.join(self._export_relative_dir, fname)
                filepath = os.path.join(self._export_dir, fname)
                write_dataframe(df, filepath)
                metadata["element_info_files"].append(relpath)
                self._logger.info("Exported %s information to %s.", filename,
                                  filepath)

        self._export_transformers(metadata)
示例#8
0
    def _export_elements(self, metadata, element_types):
        exports = [
            # TODO: opendssdirect does not provide a function to export Bus information.
            ("Capacitor", "CapacitorsInfo", dss.Capacitors.Count),
            ("Fuse", "FusesInfo", dss.Fuses.Count),
            ("Generator", "GeneratorsInfo", dss.Generators.Count),
            ("Isource", "IsourceInfo", dss.Isource.Count),
            ("Line", "LinesInfo", dss.Lines.Count),
            ("Load", "LoadsInfo", dss.Loads.Count),
            ("Monitor", "MonitorsInfo", dss.Monitors.Count),
            ("PVSystem", "PVSystemsInfo", dss.PVsystems.Count),
            ("Recloser", "ReclosersInfo", dss.Reclosers.Count),
            ("RegControl", "RegControlsInfo", dss.RegControls.Count),
            ("Relay", "RelaysInfo", dss.Relays.Count),
            ("Sensor", "SensorsInfo", dss.Sensors.Count),
            ("Transformer", "TransformersInfo", dss.Transformers.Count),
            ("Vsource", "VsourcesInfo", dss.Vsources.Count),
            ("XYCurve", "XYCurvesInfo", dss.XYCurves.Count),
            # TODO This can be very large. Consider making it configurable.
            #("LoadShape", "LoadShapeInfo", dss.LoadShape.Count),
        ]
        if element_types:
            types = set()
            for elem_type in element_types:
                if elem_type.endswith("s"):
                    # Maintain compatibility with old format used plural names.
                    elem_type = elem_type[:-1]
                types.add(elem_type)
            exports = [x for x in exports if x[0] in types]

        for class_name, filename, count_func in exports:
            df = dss.utils.class_to_dataframe(class_name)
            # Always record in CSV format for readability.
            # There are also warning messages from PyTables because the
            # data may contain strings.
            fname = filename + ".csv"
            relpath = os.path.join(self._export_relative_dir, fname)
            filepath = os.path.join(self._export_dir, fname)
            write_dataframe(df, filepath)
            metadata["element_info_files"].append(relpath)
            self._logger.info("Exported %s information to %s.", filename, filepath)

        if not element_types or "Transformer" in element_types or "Transformers" in element_types:
            self._export_transformers(metadata)
示例#9
0
 def _export_summed_element_timeseries(self, path, fmt, compress):
     for elem_class in self._summed_elem_timeseries_props:
         for prop in self._summed_elem_timeseries_props[elem_class]:
             fields = prop.split(ValueStorageBase.DELIMITER)
             if len(fields) == 1:
                 base = ValueStorageBase.DELIMITER.join([elem_class, prop])
             else:
                 assert len(fields) == 2, fields
                 # This will be <elem_class>__<prop>__<group>
                 base = ValueStorageBase.DELIMITER.join([elem_class, prop])
             filename = os.path.join(path,
                                     base + "." + fmt.replace(".", ""))
             dataset = self._group[elem_class]["SummedElementProperties"][
                 prop]
             prop_type = get_dataset_property_type(dataset)
             if prop_type == DatasetPropertyType.PER_TIME_POINT:
                 df = DatasetBuffer.to_dataframe(dataset)
                 self._finalize_dataframe(df, dataset)
                 write_dataframe(df, filename, compress=compress)
示例#10
0
 def __ExportDataFrame(self, df, basename):
     filename = basename + "." + self.__ExportFormat
     write_dataframe(df, filename, compress=self.__ExportCompression)
     self.pyLogger.info("Exported %s", filename)
示例#11
0
 def _export_dataframe(self, df, basename):
     filename = basename + "." + self._export_format
     write_dataframe(df, filename, compress=self._export_compression)
     self._logger.info("Exported %s", filename)
示例#12
0
    def _export_pv_profiles(self):
        granularity = self._settings.reports.granularity
        pv_systems = self._objects_by_class.get("PVSystems")
        if pv_systems is None:
            logger.info("No PVSystems are present")
            return

        pv_infos = []
        profiles = set()
        for full_name, obj in pv_systems.items():
            profile_name = obj.GetParameter("yearly").lower()
            if profile_name != "":
                profiles.add(profile_name)
            pv_infos.append({
                "irradiance": obj.GetParameter("irradiance"),
                "name": full_name,
                "pmpp": obj.GetParameter("pmpp"),
                "load_shape_profile": profile_name,
            })

        pmult_sums = {}
        if dss.LoadShape.First() == 0:
            self._logger.warning("There are no load shapes.")
            return

        sim_resolution = self._settings.project.step_resolution_sec
        per_time_point = (
            ReportGranularity.PER_ELEMENT_PER_TIME_POINT,
            ReportGranularity.ALL_ELEMENTS_PER_TIME_POINT,
        )
        load_shape_data = {}
        while True:
            name = dss.LoadShape.Name().lower()
            if name in profiles:
                sinterval = dss.LoadShape.SInterval()
                assert sim_resolution >= sinterval, f"{sim_resolution} >= {sinterval}"
                df = create_loadshape_pmult_dataframe_for_simulation(self._settings)
                sum_values = df.iloc[:, 0].sum()
                if granularity in per_time_point:
                    load_shape_data[name] = df.iloc[:, 0].values
                    pmult_sums[name] = sum_values
                else:
                    pmult_sums[name] = sum_values
            if dss.LoadShape.Next() == 0:
                break

        if load_shape_data and granularity in per_time_point:
            filename = os.path.join(self._export_dir, PV_LOAD_SHAPE_FILENAME)
            index = create_datetime_index_from_settings(self._settings)
            df = pd.DataFrame(load_shape_data, index=index)
            write_dataframe(df, filename, compress=True)

        for pv_info in pv_infos:
            profile = pv_info["load_shape_profile"]
            if profile == "":
                pv_info["load_shape_pmult_sum"] = 0
            else:
                pv_info["load_shape_pmult_sum"] = pmult_sums[profile]

        data = {"pv_systems": pv_infos}
        filename = os.path.join(self._export_dir, PV_PROFILES_FILENAME)
        dump_data(data, filename, indent=2)
        self._logger.info("Exported PV profile information to %s", filename)
示例#13
0
 def _export_filtered_dataframes(self, elem_class, prop, path, fmt,
                                 compress):
     for name, df in self.iterate_dataframes(elem_class, prop):
         base = "__".join([elem_class, prop, name])
         filename = os.path.join(path, base + "." + fmt.replace(".", ""))
         write_dataframe(df, filename, compress=compress)