Exemplo n.º 1
0
    def _check_sum_groups(self, sum_groups_file):
        if sum_groups_file is not None:
            if self._sum_groups:
                raise InvalidConfiguration(
                    f"Cannot set both sum_groups and sum_groups_file")

            # This path needs to be relative to the current directory, not the Exports.toml.
            # This might need to be changed.
            if not Path(sum_groups_file).exists():
                raise InvalidConfiguration(
                    f"{sum_groups_file} does not exist. The path must be relative to the current directory."
                )
            self._sum_groups = load_data(sum_groups_file)["sum_groups"]

        self._sum_elements = True  # Ignore the user setting. This must be true.
        # Ensure that there are no duplicate names.
        orig_length = 0
        all_names = set()
        for group in self._sum_groups:
            orig_length += len(group["elements"])
            group["elements"] = set(group["elements"])
            all_names = all_names.union(group["elements"])
        if orig_length != len(all_names):
            tag = f"{self.elem_class}/{self.name}"
            raise InvalidConfiguration(
                f"{tag} has duplicate element names in sum_groups")
Exemplo n.º 2
0
    def append_required_exports(exports, settings: SimulationSettingsModel):
        """Append export properties required by the configured reports.

        Parameters
        ----------
        exports : ExportListReader
        settings : SimulationSettingsModel

        """
        all_reports = Reports.get_all_reports()
        report_settings = settings.reports
        if not report_settings:
            return

        existing_scenarios = {x.name for x in settings.project.scenarios}
        for report in report_settings.types:
            if not report.enabled:
                continue
            name = report.name
            if name not in all_reports:
                raise InvalidConfiguration(f"{name} is not a valid report")

            required_scenarios = all_reports[name].get_required_scenario_names(
            )
            missing = required_scenarios.difference(existing_scenarios)
            if missing:
                text = " ".join(missing)
                raise InvalidConfiguration(
                    f"{name} requires these scenarios: {text}")

            scenarios = report.scenarios
            active_scenario = settings.project.active_scenario
            if scenarios and active_scenario not in scenarios:
                logger.debug("report %s is not enabled for scenario %s", name,
                             active_scenario)
                continue

            required = all_reports[name].get_required_exports(settings)
            for elem_class, required_properties in required.items():
                for req_prop in required_properties:
                    found = False
                    store_type = req_prop.get("store_values_type", "all")
                    for prop in exports.list_element_properties(elem_class):
                        if prop.name == req_prop["property"] and \
                                prop.store_values_type.value == store_type:
                            if prop.opendss_classes or req_prop.get(
                                    "opendss_classes"):
                                assert prop.sum_elements == req_prop.get(
                                    "sum_elements", False)
                                assert prop.data_conversion == \
                                    req_prop.get("data_conversion", DataConversion.NONE)
                                prop.append_opendss_classes(
                                    req_prop["opendss_classes"])
                            found = True
                    if not found:
                        exports.append_property(elem_class, req_prop)
                        logger.debug("Add required property: %s %s",
                                     elem_class, req_prop)

            all_reports[name].set_required_project_settings(settings)
Exemplo n.º 3
0
    def _get_window_size_by_name_index(self, prop):
        """Returns a list of window sizes per element name corresponding to self._names."""
        if not prop.opendss_classes:
            raise InvalidConfiguration(
                f"window_sizes requires opendss_classes: {prop.name}"
            )

        window_sizes = [None] * len(self._names)
        for opendss_class, window_size in prop.window_sizes.items():
            if opendss_class not in prop.opendss_classes:
                raise InvalidConfiguration(
                    f"{opendss_class} is not defined in opendss_classes: {prop.name}"
                )

        # Note: names have singluar class names, such as Line.line1.
        # opendss_classes are plural, such as Lines
        mapping = {}
        for i, name in enumerate(self._names):
            opendss_class_singular = name.split(".")[0]
            size = mapping.get(opendss_class_singular)
            if size is None:
                for opendss_class in prop.opendss_classes:
                    if opendss_class.startswith(opendss_class_singular):
                        size = prop.window_sizes[opendss_class]
                        mapping[opendss_class_singular] = size
            if size is None:
                raise InvalidConfiguration(f"Failed to find window_size for {name}")
            window_sizes[i] = size

        return window_sizes
Exemplo n.º 4
0
 def _get_window_sizes(inputs, resolution):
     line_window_size = timedelta(hours=inputs["line_window_size_hours"])
     if line_window_size % resolution != timedelta(0):
         raise InvalidConfiguration(
             f"line_window_size={line_window_size} must be a multiple of {resolution}"
         )
     transformer_window_size = timedelta(hours=inputs["transformer_window_size_hours"])
     if transformer_window_size % resolution != timedelta(0):
         raise InvalidConfiguration(
             f"transformer_window_size={transformer_window_size} must be a multiple of {resolution}"
         )
     return line_window_size // resolution, transformer_window_size // resolution
Exemplo n.º 5
0
    def run(self, logging_configured=True, tar_project=False, zip_project=False, dry_run=False):
        """Run all scenarios in the project."""
        if isinstance(self._fs_intf, PyDssArchiveFileInterfaceBase):
            raise InvalidConfiguration("cannot run from an archived project")
        if tar_project and zip_project:
            raise InvalidParameter("tar_project and zip_project cannot both be True")
        if self._simulation_config['Project']['DSS File'] == "":
            raise InvalidConfiguration("a valid opendss file needs to be passed")

        inst = instance()
        self._simulation_config["Logging"]["Pre-configured logging"] = logging_configured

        if dry_run:
            store_filename = os.path.join(tempfile.gettempdir(), STORE_FILENAME)
        else:
            store_filename = os.path.join(self._project_dir, STORE_FILENAME)

        driver = None
        if self._simulation_config["Exports"].get("Export Data In Memory", True):
            driver = "core"
        with h5py.File(store_filename, mode="w", driver=driver) as hdf_store:
            self._hdf_store = hdf_store
            self._hdf_store.attrs["version"] = DATA_FORMAT_VERSION
            for scenario in self._scenarios:
                self._simulation_config["Project"]["Active Scenario"] = scenario.name
                inst.run(self._simulation_config, self, scenario, dry_run=dry_run)
                self._estimated_space[scenario.name] = inst.get_estimated_space()

        if not dry_run:
            results = None
            export_tables = self._simulation_config["Exports"].get(
                "Export Data Tables", False
            )
            generate_reports = self._simulation_config.get("Reports", False)
            if export_tables or generate_reports:
                # Hack. Have to import here. Need to re-organize to fix.
                from PyDSS.pydss_results import PyDssResults
                results = PyDssResults(self._project_dir)
                if export_tables:
                    for scenario in results.scenarios:
                        scenario.export_data()

                if generate_reports:
                    results.generate_reports()

        if tar_project:
            self._tar_project_files()
        elif zip_project:
            self._zip_project_files()

        if dry_run and os.path.exists(store_filename):
            os.remove(store_filename)
Exemplo n.º 6
0
    def __init__(self, elem_class, data):
        self.elem_class = elem_class
        self._opendss_classes = data.get("opendss_classes", [])
        self.name = data["property"]
        self.publish = data.get("publish", False)
        self._data_conversion = DataConversion(
            data.get("data_conversion", "none"))
        self._sum_elements = data.get("sum_elements", False)
        self._sum_groups = data.get("sum_groups", [])
        sum_groups_file = data.get("sum_groups_file")
        self._limits = self._parse_limits(data, "limits")
        self._limits_filter = LimitsFilter(data.get("limits_filter",
                                                    "outside"))
        self._limits_b = self._parse_limits(data, "limits_b")
        self._limits_filter_b = LimitsFilter(
            data.get("limits_filter_b", "outside"))
        self._store_values_type = StoreValuesType(
            data.get("store_values_type", "all"))
        self._names, self._are_names_regex, self._are_names_filtered = self._parse_names(
            data)
        self._sample_interval = data.get("sample_interval", 1)
        self._window_size = data.get("window_size", 100)
        self._window_sizes = data.get("window_sizes", {})
        custom_prop = f"{elem_class}.{self.name}"
        self._custom_metric = CUSTOM_METRICS.get(custom_prop)

        if self._sum_groups or sum_groups_file:
            self._check_sum_groups(sum_groups_file)

        # Note to devs: any field added here needs to be handled in serialize()

        if self._sum_elements and self._store_values_type not in \
                (StoreValuesType.ALL, StoreValuesType.SUM):
            raise InvalidConfiguration(
                "sum_elements requires store_values_types = all or sum")

        if self._is_max() and self._limits is not None:
            raise InvalidConfiguration("limits are not allowed with max types")

        requires_opendss_classes = (
            "ExportLoadingsMetric",
            "OverloadsMetricInMemory",
            "ExportPowersMetric",
        )
        if elem_class == "CktElement" and self.name in requires_opendss_classes and not self._opendss_classes:
            raise InvalidConfiguration(
                f"Exporting {elem_class}.{self.name} requires that opendss_classes be specifed"
            )
Exemplo n.º 7
0
    def make_storage_container(
        self, path, prop, num_steps, max_chunk_bytes, values, **kwargs
    ):
        """Make a storage container.

        Returns
        -------
        StorageFilterBase

        """
        if prop.store_values_type not in STORAGE_TYPE_MAP:
            raise InvalidConfiguration(f"unsupported {prop.store_values_type}")
        elem_names = self._make_elem_names()
        cls = STORAGE_TYPE_MAP[prop.store_values_type]
        container = cls(
            self._hdf_store,
            path,
            prop,
            num_steps,
            max_chunk_bytes,
            values,
            elem_names,
            **kwargs,
        )
        return container
Exemplo n.º 8
0
    def create(cls, controllers: list, settings: ProjectModel):
        """Create controllers. The circuit must be loaded in OpenDSS."""
        solver = get_solver_from_simulation_type(settings)
        buses = OpenDSS.CreateBusObjects()
        elements, elements_by_class = OpenDSS.CreateDssObjects(buses)
        controllers_by_class = defaultdict(dict)
        for circuit_element_controller in controllers:
            controller_class = circuit_element_controller.get_controller_class(
            )
            element_class = circuit_element_controller.get_element_class()
            for name in circuit_element_controller.element_names:
                element = elements.get(name)
                if element is None:
                    raise InvalidConfiguration(f"{name} is not in the circuit")
                controller = controller_class(
                    element,
                    circuit_element_controller.controller_model.dict(
                        by_alias=True),
                    dss,
                    elements,
                    solver,
                )
                controllers_by_class[element_class]["Controller." +
                                                    name] = controller

        return cls(
            controllers_by_class,
            solver,
            settings.max_control_iterations,
            settings.error_tolerance,
        )
Exemplo n.º 9
0
    def serialize(self):
        """Serialize object to a dictionary."""
        if self._are_names_regex:
            #raise InvalidConfiguration("cannot serialize when names are regex")
            logger.warning("cannot serialize when names are regex")
            names = None
        else:
            names = self._names
        data = {
            "property": self.name,
            "data_conversion": self._data_conversion.value,
            "opendss_classes": self._opendss_classes,
            "sample_interval": self._sample_interval,
            "names": names,
            "publish": self.publish,
            "store_values_type": self.store_values_type.value,
            "sum_elements": self.sum_elements,
            "sum_groups": self.sum_groups,
        }
        if self._limits is not None:
            data["limits"] = [self._limits.min, self._limits.max]
            data["limits_filter"] = self._limits_filter.value
        if self._limits_b is not None:
            data["limits_b"] = [self._limits_b.min, self._limits_b.max]
            data["limits_filter_b"] = self._limits_filter_b.value
        if self.is_moving_average():
            if self.window_sizes:
                data["window_sizes"] = self._window_sizes
                if not self._opendss_classes:
                    raise InvalidConfiguration(
                        f"window_sizes requires opendss_classes: {self.name}")
            else:
                data["window_size"] = self._window_size

        return data
Exemplo n.º 10
0
    def run_scenario(self,
                     project,
                     scenario,
                     settings: SimulationSettingsModel,
                     dry_run=False):
        if dry_run:
            dss = OpenDSS(settings)
            self._dump_scenario_simulation_settings(settings)
            #dss.init(dss_args)
            logger.info('Dry run scenario: %s',
                        settings.project.active_scenario)
            if settings.monte_carlo.num_scenarios > 0:
                raise InvalidConfiguration(
                    "Dry run does not support MonteCarlo simulation.")
            else:
                self._estimated_space = dss.DryRunSimulation(project, scenario)
            return None, None

        opendss = OpenDSS(settings)
        self._dump_scenario_simulation_settings(settings)
        logger.info('Running scenario: %s', settings.project.active_scenario)
        if settings.monte_carlo.num_scenarios > 0:
            opendss.RunMCsimulation(project,
                                    scenario,
                                    samples=settings.monte_carlo.num_scenarios)
        else:
            for is_complete, _, _, _ in opendss.RunSimulation(
                    project, scenario):
                if is_complete:
                    break
Exemplo n.º 11
0
Arquivo: pyDSS.py Projeto: jgu2/PyDSS
    def run_scenario(self,
                     project,
                     scenario,
                     simulation_config,
                     dry_run=False):
        dss_args = self.update_scenario_settings(simulation_config)
        self._dump_scenario_simulation_settings(dss_args)

        if dry_run:
            dss = dssInstance.OpenDSS(dss_args)
            logger.info('Dry run scenario: %s',
                        dss_args["Project"]["Active Scenario"])
            if dss_args["MonteCarlo"]["Number of Monte Carlo scenarios"] > 0:
                raise InvalidConfiguration(
                    "Dry run does not support MonteCarlo simulation.")
            else:
                self._estimated_space = dss.DryRunSimulation(project, scenario)
            return None, None

        dss = dssInstance.OpenDSS(dss_args)
        logger.info('Running scenario: %s',
                    dss_args["Project"]["Active Scenario"])
        if dss_args["MonteCarlo"]["Number of Monte Carlo scenarios"] > 0:
            dss.RunMCsimulation(project,
                                scenario,
                                samples=dss_args["MonteCarlo"]
                                ['Number of Monte Carlo scenarios'])
        else:
            dss.RunSimulation(project, scenario)
        return dss_args
Exemplo n.º 12
0
    def append_required_exports(exports, options):
        """Append export properties required by the configured reports.

        Parameters
        ----------
        exports : ExportListReader
        options : dict
            Simulation options

        """
        report_options = options.get("Reports")
        if report_options is None:
            return

        for report in report_options["Types"]:
            if not report["enabled"]:
                continue
            name = report["name"]
            if name not in REPORTS:
                raise InvalidConfiguration(f"{name} is not a valid report")

            required = REPORTS[name].get_required_reports()
            for elem_class, required_properties in required.items():
                for req_prop in required_properties:
                    found = False
                    store_type = req_prop["store_values_type"]
                    for prop in exports.list_element_properties(elem_class):
                        if prop.name == req_prop["property"] and \
                                prop.store_values_type.value == store_type:
                            found = True
                            break
                    if not found:
                        exports.append_property(elem_class, req_prop)
                        logger.debug("Add required property: %s %s",
                                     elem_class, req_prop)
Exemplo n.º 13
0
 def _GetActiveScenario(self):
     active_scenario = self._settings.project.active_scenario
     for scenario in self._settings.project.scenarios:
         if scenario.name == active_scenario:
             return scenario
     raise InvalidConfiguration(
         f"Active Scenario {active_scenario} is not present")
Exemplo n.º 14
0
    def _parse_limits(data):
        limits = data.get("limits")
        if limits is None:
            return None

        if not isinstance(limits, list) or len(limits) != 2:
            raise InvalidConfiguration(f"invalid limits format: {limits}")

        return MinMax(limits[0], limits[1])
Exemplo n.º 15
0
 def add_property(self, prop):
     """Add an instance of ExportListProperty for tracking."""
     if prop.are_names_filtered != self._are_names_filtered:
         raise InvalidConfiguration(f"All properties for shared elements must have the same filters: "
             f"{self._elem_class.__name__} / {prop.name}.")
     existing = self._properties.get(prop.store_values_type)
     if existing is None:
         self._properties[prop.store_values_type] = prop
     elif prop != existing:
         raise InvalidParameter(f"{prop.store_values_type} is already stored")
Exemplo n.º 16
0
    def _parse_names(data):
        names = data.get("names")
        name_regexes = data.get("name_regexes")

        if names and name_regexes:
            raise InvalidConfiguration(
                f"names and name_regexes cannot both be set")
        for obj in (names, name_regexes):
            if obj is None:
                continue
            if not isinstance(obj, list) or not isinstance(obj[0], str):
                raise InvalidConfiguration(f"invalid name format: {obj}")

        if names:
            return set(names), False
        if name_regexes:
            return [re.compile(r"{}".format(x)) for x in name_regexes], True

        return None, False
Exemplo n.º 17
0
    def hdf_store(self):
        """Return the HDFStore

        Returns
        -------
        pd.HDFStore

        """
        if self._hdf_store is None:
            raise InvalidConfiguration("hdf_store is not defined")
        return self._hdf_store
Exemplo n.º 18
0
 def make_empty_storage_container(self, path, prop):
     """Make an empty storage container."""
     if prop.store_values_type not in STORAGE_TYPE_MAP:
         raise InvalidConfiguration(f"unsupported {prop.store_values_type}")
     elem_names = self._make_elem_names()
     cls = STORAGE_TYPE_MAP[prop.store_values_type]
     values = [ValueByNumber(x.FullName, self.label(), 0.0) for x in self._dss_objs]
     container = cls(
         self._hdf_store, path, prop, 1, self._max_chunk_bytes, values, elem_names
     )
     return container
Exemplo n.º 19
0
 def __init__(self, name, prop, value):
     super().__init__()
     assert not isinstance(value, list), str(value)
     self._name = name
     self._prop = prop
     self._value_type = type(value)
     if self._value_type == str:
         raise InvalidConfiguration(
             f"Data export feature does not support strings: name={name} prop={prop} value={value}"
         )
     self._value = value
Exemplo n.º 20
0
    def __init__(self, registry_filename=None):
        if registry_filename is None:
            self._registry_filename = Path.home() / self._REGISTRY_FILENAME
        else:
            self._registry_filename = Path(registry_filename)

        self._controllers = {x: {} for x in CONTROLLER_TYPES}
        data = copy.deepcopy(DEFAULT_REGISTRY)
        for controller_type, controllers in DEFAULT_REGISTRY[
                "Controllers"].items():
            for controller in controllers:
                path = Path(controller["filename"])
                if not path.exists():
                    raise InvalidConfiguration(
                        f"Default controller file={path} does not exist")

        # This is written to work with legacy versions where default controllers were
        # written to the registry.
        if self._registry_filename.exists():
            registered = load_data(self._registry_filename)
            to_delete = []
            for controller_type, controllers in registered[
                    "Controllers"].items():
                for i, controller in enumerate(controllers):
                    path = Path(controller["filename"])
                    if not path.exists():
                        name = controller["name"]
                        msg = f"The registry contains a controller with an invalid file. " \
                        f"Type={controller_type} name={name} file={path}.\nWould you like to " \
                        "delete it? (y/n) -> "
                        response = input(msg).lower()
                        if response == "y":
                            to_delete.append((controller_type, i))
                            continue
                        else:
                            logger.error(
                                "Exiting because the registry %s is invalid",
                                self._registry_filename)
                            sys.exit(1)
                    if not self._is_default_controller(controller_type,
                                                       controller["name"]):
                        data["Controllers"][controller_type].append(controller)
            if to_delete:
                for ref in reversed(to_delete):
                    registered["Controllers"][ref[0]].pop(ref[1])
                backup = str(self._registry_filename) + ".bk"
                self._registry_filename.rename(backup)
                dump_data(registered, self._registry_filename, indent=2)
                logger.info("Fixed the registry and moved the original to %s",
                            backup)

        for controller_type, controllers in data["Controllers"].items():
            for controller in controllers:
                self._add_controller(controller_type, controller)
Exemplo n.º 21
0
    def fs_interface(self):
        """Return the interface object used to read files.

        Returns
        -------
        PyDssFileSystemInterface

        """
        if self._fs_intf is None:
            raise InvalidConfiguration("fs interface is not defined")
        return self._fs_intf
Exemplo n.º 22
0
def get_load_shape_resolution_secs():
    def func():
        if dss.LoadShape.Name() == "default":
            return None
        return dss.LoadShape.SInterval()

    res = [x for x in iter_elements(dss.LoadShape, func) if x is not None]
    if len(set(res)) != 1:
        raise InvalidConfiguration(
            f"SInterval for all LoadShapes must be the same: {res}")
    return res[0]
Exemplo n.º 23
0
    def _check_scenarios(self):
        scenarios = self._list_scenario_names()

        if scenarios is None:
            return

        exp_scenarios = self.scenario_names
        exp_scenarios.sort()

        for scenario in exp_scenarios:
            if scenario not in scenarios:
                raise InvalidConfiguration(
                    f"{scenario} is not a valid scenario. Valid scenarios: {scenarios}"
                )
Exemplo n.º 24
0
    def _SetSnapshotTimePoint(self, scenario):
        """Adjusts the time parameters based on the mode."""
        p_settings = self._settings.project
        config = scenario.snapshot_time_point_selection_config
        mode = config.mode
        assert mode != SnapshotTimePointSelectionMode.NONE, mode

        if mode != SnapshotTimePointSelectionMode.NONE:
            if p_settings.simulation_type != SimulationType.QSTS:
                raise InvalidConfiguration(
                    f"{mode} is only supported with QSTS simulations")

            # These settings have to be temporarily overridden because of the underlying
            # implementation to create a load shape dataframes..
            orig_start = p_settings.start_time
            orig_duration = p_settings.simulation_duration_min
            if orig_duration != p_settings.step_resolution_sec / 60:
                raise InvalidConfiguration(
                    "Simulation duration must be the same as resolution")
            try:
                p_settings.start_time = config.start_time
                p_settings.simulation_duration_min = config.search_duration_min
                new_start = get_snapshot_timepoint(self._settings,
                                                   mode).strftime(DATE_FORMAT)
                p_settings.start_time = new_start
                self._Logger.info(
                    "Changed simulation start time from %s to %s",
                    orig_start,
                    new_start,
                )
            except Exception:
                p_settings.start_time = orig_start
                raise
            finally:
                p_settings.simulation_duration_min = orig_duration
        else:
            assert False, f"unsupported mode {mode}"
Exemplo n.º 25
0
    def DryRunSimulation(self, project, scenario):
        """Run one time point for getting estimated space."""
        if not self._Options['Exports']['Log Results']:
            raise InvalidConfiguration("Log Reults must set to be True.")

        Steps, _, _ = self._dssSolver.SimulationSteps()
        self._Logger.info('Dry run simulation...')
        self.ResultContainer.InitializeDataStore(project.hdf_store, Steps)

        try:
            self.RunStep(0)
        finally:
            self.ResultContainer.FlushData()

        return self.ResultContainer.max_num_bytes()
Exemplo n.º 26
0
    def compute_chunk_count(num_columns,
                            max_size,
                            dtype,
                            max_chunk_bytes=DEFAULT_MAX_CHUNK_BYTES):
        assert max_size > 0, f"max_size={max_size}"
        tmp = np.empty((1, num_columns), dtype=dtype)
        size_row = tmp.size * tmp.itemsize
        chunk_count = min(int(max_chunk_bytes / size_row), max_size)
        if chunk_count == 0:
            raise InvalidConfiguration(
                f"HDF Max Chunk Bytes is smaller than the size of a row. Please increase it. " \
                f"max_chunk_bytes={max_chunk_bytes} num_columns={num_columns} " \
                f"size_row={size_row}"
            )

        return chunk_count
Exemplo n.º 27
0
    def serialize(self):
        """Serialize object to a dictionary."""
        if self._are_names_regex:
            raise InvalidConfiguration("cannot serialize when names are regex")
        data = {
            "property": self.name,
            "sample_interval": self._sample_interval,
            "names": self._names,
            "publish": self.publish,
            "store_values_type": self.store_values_type.value,
        }
        if self._limits is not None:
            data["limits"] = [self._limits.min, self._limits.max]
            data["limits_filter"] = self._limits_filter.value
        if self._store_values_type == StoreValuesType.MOVING_AVERAGE:
            data["moving_average_store_interval"] = self._ma_store_interval
            data["window_size"] = self._window_size

        return data
Exemplo n.º 28
0
    def _export_pv_profiles(self):
        dss = self._dss_instance
        pv_systems = self._objects_by_class.get("PVSystems")
        if pv_systems is None:
            raise InvalidConfiguration("PVSystems are not exported")

        pv_infos = []
        profiles = set()
        for full_name, obj in pv_systems.items():
            profile_name = obj.GetParameter("yearly").lower()
            if profile_name != "":
                profiles.add(profile_name)
            pv_infos.append({
                "irradiance": obj.GetParameter("irradiance"),
                "name": full_name,
                "pmpp": obj.GetParameter("pmpp"),
                "load_shape_profile": profile_name,
            })

        pmult_sums = {}
        dss.LoadShape.First()
        sim_resolution = self._options["Project"]["Step resolution (sec)"]
        while True:
            name = dss.LoadShape.Name().lower()
            if name in profiles:
                sinterval = dss.LoadShape.SInterval()
                assert sim_resolution >= sinterval
                offset = int(sim_resolution / dss.LoadShape.SInterval())
                pmult_sums[name] = sum(dss.LoadShape.PMult()[::offset])
            if dss.LoadShape.Next() == 0:
                break

        for pv_info in pv_infos:
            profile = pv_info["load_shape_profile"]
            if profile == "":
                pv_info["load_shape_pmult_sum"] = 0
            else:
                pv_info["load_shape_pmult_sum"] = pmult_sums[profile]

        data = {"pv_systems": pv_infos}
        filename = os.path.join(self._export_dir, "pv_profiles.json")
        dump_data(data, filename, indent=2)
        self._logger.info("Exported PV profile information to %s", filename)
Exemplo n.º 29
0
    def get_hdf_store_filename(self):
        """Return the HDFStore filename.

        Returns
        -------
        str
            Path to the HDFStore.

        Raises
        ------
        InvalidConfiguration
            Raised if no store exists.

        """
        filename = os.path.join(self._project_dir, STORE_FILENAME)
        if not os.path.exists(filename):
            raise InvalidConfiguration(f"HDFStore does not exist")

        return filename
Exemplo n.º 30
0
    def read_scenario_export_metadata(self, scenario_name):
        """Return the metadata for a scenario's exported data.

        Parameters
        ----------
        scenario_name : str

        Returns
        -------
        dict

        """
        if self._fs_intf is None:
            raise InvalidConfiguration("pydss fs interface is not defined")

        if scenario_name not in self.list_scenario_names():
            raise InvalidParameter(f"invalid scenario: {scenario_name}")

        return self._fs_intf.read_scenario_export_metadata(scenario_name)