Beispiel #1
0
def tabula_system(code_country, code_boundarycond="SUH", code_variantnumber=1):
    """Return system level information from TABULA archetypes.

    Args:
        code_country (str): the alpha-2 code of the country. eg. "FR"
        code_boundarycond (str): choices are "SUH" and "MUH".
        code_variantnumber (int):

    """
    # Check code country
    code_country = _resolve_codecountry(code_country)

    # Check code_buildingsizeclass
    if code_boundarycond.upper() not in ["SUH", "MUH"]:
        raise ValueError(
            'specified code_boundarycond "{}" not valid. Available values are '
            '"SUH" (Single Unit Houses) '
            'and "MUH" (Multi-unit Houses)')

    # Check code variant number
    if not isinstance(code_variantnumber, str):
        code_variantnumber = str(code_variantnumber).zfill(2)

    # prepare data
    data = {"systype": [code_country, code_boundarycond, code_variantnumber]}
    json_response = tabula_system_request(data)

    if json_response is not None:
        log("")
        # load data
        df = pd.DataFrame(json_response)
        return df.data.to_frame()
    else:
        raise ValueError('No data found in TABULA matrix with query:"{}"\nRun '
                         "archetypal.dataportal.tabula_available_buildings() "
                         'with country code "{}" to get list of possible '
                         "building types"
                         "".format(".".join(s for s in data["systype"]),
                                   code_country))
Beispiel #2
0
def tabula_system_request(data):
    """Returns:

    Examples:
        'http://webtool.building-typology.eu/data/matrix/system/detail/IT.SUH.01/dc/1546889637169'

    Args:
        data (dict): prepared data for html query
    """
    system = ".".join(s for s in data["systype"])
    hexint = hashlib.md5(system.encode("utf-8")).hexdigest()[0:13]

    log("quering system type {}".format(system))
    prepared_url = ("http://webtool.building-typology.eu/data/matrix/system"
                    "/detail/{0}/dc/{1}".format(system, hexint))

    cached_response_json = get_from_cache(prepared_url)

    if cached_response_json is not None:
        # found this request in the cache, just return it instead of making a
        # new HTTP call
        return cached_response_json

    else:
        # if this URL is not already in the cache, pause, then request it
        response = requests.get(prepared_url)

        try:
            response_json = response.json()
            if "remark" in response_json:
                log('Server remark: "{}"'.format(response_json["remark"],
                                                 level=lg.WARNING))
            save_to_cache(prepared_url, response_json)
        except Exception:
            # Handle some server errors
            pass
        else:
            return response_json
Beispiel #3
0
 def _do_partition(surf):
     """
     Args:
         surf (EpBunch):
     """
     the_construction = surf.theidf.getobject(
         "Construction".upper(), surf.Construction_Name
     )
     if the_construction:
         oc = OpaqueConstruction.from_epbunch(the_construction)
         oc.area = surf.area
         oc.Surface_Type = "Partition"
         log(
             'surface "%s" assigned as a Partition' % surf.Name,
             lg.DEBUG,
             name=surf.theidf.name,
         )
         return oc
     else:
         # we might be in a situation where the construction does not exist in the
         # file. For example, this can happen when the construction is defined as
         # "Air Wall", which is a construction type internal to EnergyPlus.
         return None
Beispiel #4
0
    def from_zone_epbunch(cls, zone_ep, sql):
        """Create a Zone object from an eppy 'ZONE' epbunch.

        Args:
            zone_ep (eppy.bunch_subclass.EpBunch): The Zone EpBunch.
            sql (dict): The sql dict for this IDF object.
        """
        cached = cls.get_cached(zone_ep.Name, zone_ep.theidf)
        if cached:
            return cached
        start_time = time.time()
        log('\nConstructing :class:`Zone` for zone "{}"'.format(zone_ep.Name))
        name = zone_ep.Name
        zone = cls(
            Name=name,
            idf=zone_ep.theidf,
            sql=sql,
            Category=zone_ep.theidf.building_name(use_idfname=True),
        )

        zone._epbunch = zone_ep
        zone._zonesurfaces = zone_ep.zonesurfaces

        zone.Constructions = ZoneConstructionSet.from_zone(zone)
        zone.Conditioning = ZoneConditioning.from_zone(zone)
        zone.Ventilation = VentilationSetting.from_zone(zone)
        zone.DomesticHotWater = DomesticHotWaterSetting.from_zone(zone)
        zone.Loads = ZoneLoad.from_zone(zone)
        zone.InternalMassConstruction = zone._internalmassconstruction()
        zone.Windows = WindowSetting.from_zone(zone)

        log(
            'completed Zone "{}" constructor in {:,.2f} seconds'.format(
                zone_ep.Name, time.time() - start_time
            )
        )
        return zone
Beispiel #5
0
def get_from_cache(url):
    """

    Args:
        url:

    Returns:

    """
    # if the tool is configured to use the cache
    if settings.use_cache:
        # determine the filename by hashing the url
        filename = hashlib.md5(url.encode('utf-8')).hexdigest()

        cache_path_filename = os.path.join(settings.cache_folder,
                                           os.extsep.join([filename, 'json']))
        # open the cache file for this url hash if it already exists, otherwise
        # return None
        if os.path.isfile(cache_path_filename):
            with io.open(cache_path_filename, encoding='utf-8') as cache_file:
                response_json = json.load(cache_file)
            log('Retrieved response from cache file "{}" for URL "{}"'.format(
                cache_path_filename, url))
            return response_json
Beispiel #6
0
def _resolve_illuminance_target(zone):
    """Resolves the illuminance target for the Zone object"""
    # First, retrieve the list of Daylighting objects for this zone. Uses the eppy
    # `getreferingobjs` method.
    ep_obj = zone._epbunch
    possible_ctrls = ep_obj.getreferingobjs(iddgroups=["Daylighting"],
                                            fields=["Zone_Name"])
    # Then, if there are controls
    if possible_ctrls:
        # Filter only the "Daylighting:Controls"
        ctrls = [
            ctrl for ctrl in possible_ctrls
            if ctrl.key.upper() == "Daylighting:Controls".upper()
        ]
        ctrl_types = [
            ctrl["Illuminance_Setpoint_at_Reference_Point_1"] for ctrl in ctrls
        ]

        # There should only be one control per zone. A set of controls should return 1.
        if len(set(ctrl_types)) == 1:
            dimming_type = next(iter(set(ctrl_types)))
            log(f"Illuminance target for zone '{zone.Name}' set to '{dimming_type}'"
                )
            return float(dimming_type)  # Return first element
        else:
            raise ValueError(
                "Could not resolve more than one illuminance targets for Zone {}. "
                "Make sure there is only one".format(zone.Name))
    else:
        # Else, there are no dimming controls => set to "Off".
        log(
            "No illuminance target found for zone {}. Setting to default 500 "
            "lux".format(zone.Name),
            lg.DEBUG,
        )
        return 500
Beispiel #7
0
def nrel_api_cbr_request(data):
    # define the Overpass API URL, then construct a GET-style URL as a string to
    # hash to look up/save to cache
    url = 'https://developer.nrel.gov/api/commercial-building-resources/v1' \
          '/resources.json'
    prepared_url = requests.Request('GET', url, params=data).prepare().url
    cached_response_json = get_from_cache(prepared_url)

    if cached_response_json is not None:
        # found this request in the cache, just return it instead of making a
        # new HTTP call
        return cached_response_json

    else:
        start_time = time.time()
        log('Getting from {}, "{}"'.format(url, data))
        response = requests.get(prepared_url)
        # if this URL is not already in the cache, pause, then request it
        # get the response size and the domain, log result
        size_kb = len(response.content) / 1000.
        domain = re.findall(r'//(?s)(.*?)/', url)[0]
        log('Downloaded {:,.1f}KB from {}'
            ' in {:,.2f} seconds'.format(size_kb, domain,
                                         time.time() - start_time))

        try:
            response_json = response.json()
            if 'remark' in response_json:
                log('Server remark: "{}"'.format(response_json['remark'],
                                                 level=lg.WARNING))
            save_to_cache(prepared_url, response_json)
        except Exception:
            # deal with response satus_code here
            log('Server at {} returned status code {} and no JSON data.'.
                format(domain, response.status_code),
                level=lg.ERROR)
        else:
            return response_json
Beispiel #8
0
    def _graph_reduce(self, G):
        """Using the depth first search algorithm, iterate over the zone
        adjacency graph and compute the equivalent zone yielded by the
        'addition' of two consecutive zones.

        'Adding' two zones together means both zones properties are
        weighted-averaged by zone area. All dependent objects implement the
        :func:`operator.add` method.

        Args:
            G (ZoneGraph):

        Returns:
            Zone: The reduced zone
        """
        if len(G) < 1:
            log("No zones for building graph %s" % G.name)
            return None
        else:
            log("starting reduce process for building %s" % self.Name)
            start_time = time.time()

            # start from the highest degree node
            subgraphs = sorted(
                (G.subgraph(c) for c in networkx.connected_components(G)),
                key=len,
                reverse=True,
            )
            from functools import reduce
            from operator import add

            bundle_zone = reduce(
                add,
                [zone for subG in subgraphs for name, zone in subG.nodes(data="zone")],
            )

            log(
                'completed zone reduction for zone "{}" in building "{}" in {:,.2f} seconds'.format(
                    bundle_zone.Name, self.Name, time.time() - start_time
                )
            )
            return bundle_zone
Beispiel #9
0
    def reduce(self, cores, perims):
        """Reduce the building to its simplest core and perimeter zones.

        Args:
            **zone_graph_kwargs:
        """
        start_time = time.time()

        if cores:
            self.Core = reduce(Zone.combine, cores)
        if not perims:
            raise ValueError(
                "Building complexity reduction must have at least one perimeter zone"
            )
        else:
            self.Perimeter = reduce(Zone.combine, perims)

        if self.Perimeter.Windows is None:
            # create generic window
            self.Perimeter.Windows = WindowSetting.generic(idf=self.idf)

        if not self.Core:
            self.Core = self.Perimeter
        log(
            "Equivalent core zone has an area of {:,.0f} m2".format(self.Core.area),
            level=lg.DEBUG,
        )
        log(
            "Equivalent perimeter zone has an area of {:,.0f} m2".format(
                self.Perimeter.area
            ),
            level=lg.DEBUG,
        )
        log(
            'Completed model complexity reduction for BuildingTemplate "{}" in {:,.2f} seconds'.format(
                self.Name, time.time() - start_time
            )
        )
Beispiel #10
0
def nrel_api_cbr_request(data):
    """Query the NREL Commercial Building Resource Database

    Examples:
        >>> import archetypal as ar
        >>> ar.dataportal.nrel_api_cbr_request({'s': 'Commercial'
        >>> 'Reference', 'api_key': 'oGZdX1nhars1cTJYTm7M9T12T1ZOvikX9pH0Zudq'})

    Args:
        data: a dict of

    Returns:
        dict: the json response

    Hint:
        For a detailed description of data arguments, visit
        `Commercial Building Resource API <https://developer.nrel.gov/docs/buildings
        /commercial-building-resource-database-v1/resources/>`_
    """
    # define the Overpass API URL, then construct a GET-style URL as a string to
    # hash to look up/save to cache
    url = ("https://developer.nrel.gov/api/commercial-building-resources/v1"
           "/resources.json")
    prepared_url = requests.Request("GET", url, params=data).prepare().url
    cached_response_json = get_from_cache(prepared_url)

    if cached_response_json is not None:
        # found this request in the cache, just return it instead of making a
        # new HTTP call
        return cached_response_json

    else:
        start_time = time.time()
        log('Getting from {}, "{}"'.format(url, data))
        response = requests.get(prepared_url)
        # if this URL is not already in the cache, pause, then request it
        # get the response size and the domain, log result
        size_kb = len(response.content) / 1000.0
        domain = re.findall(r"//(?s)(.*?)/", url)[0]
        log("Downloaded {:,.1f}KB from {}"
            " in {:,.2f} seconds".format(size_kb, domain,
                                         time.time() - start_time))

        try:
            response_json = response.json()
            if "remark" in response_json:
                log('Server remark: "{}"'.format(response_json["remark"],
                                                 level=lg.WARNING))
            elif "error" in response_json:
                log(
                    "Server at {} returned status code {} meaning {}.".format(
                        domain, response.status_code,
                        response_json["error"]["code"]),
                    level=lg.ERROR,
                )
            else:
                pass
            save_to_cache(prepared_url, response_json)
        except Exception:
            # deal with response satus_code here
            log(
                "Server at {} returned status code {} and no JSON data.".
                format(domain, response.status_code),
                level=lg.ERROR,
            )
        else:
            return response_json
Beispiel #11
0
    def combine(self, other, weights=None):
        """Append other to self. Return self + other as a new object.

        Args:
            other (WindowSetting): The other OpaqueMaterial object
            weights (list-like, optional): A list-like object of len 2. If None,
                equal weights are used.

        Returns:
            WindowSetting: A new combined object made of self + other.
        """
        if self is None:
            return other
        if other is None:
            return self
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # Check if other is not the same as self
        if self == other:
            return self

        if not weights:
            log('using 1 as weighting factor in "{}" '
                "combine.".format(self.__class__.__name__))
            weights = [1.0, 1.0]
        meta = self._get_predecessors_meta(other)
        new_attr = dict(
            Construction=self.Construction.combine(other.Construction,
                                                   weights),
            AfnDischargeC=self._float_mean(other, "AfnDischargeC", weights),
            AfnTempSetpoint=self._float_mean(other, "AfnTempSetpoint",
                                             weights),
            AfnWindowAvailability=self.AfnWindowAvailability.combine(
                other.AfnWindowAvailability, weights),
            IsShadingSystemOn=any(
                [self.IsShadingSystemOn, other.IsShadingSystemOn]),
            IsVirtualPartition=any(
                [self.IsVirtualPartition, other.IsVirtualPartition]),
            IsZoneMixingOn=any([self.IsZoneMixingOn, other.IsZoneMixingOn]),
            OperableArea=self._float_mean(other, "OperableArea", weights),
            ShadingSystemSetpoint=self._float_mean(other,
                                                   "ShadingSystemSetpoint",
                                                   weights),
            ShadingSystemTransmittance=self._float_mean(
                other, "ShadingSystemTransmittance", weights),
            ShadingSystemType=self.ShadingSystemType
            if self.IsShadingSystemOn else other.ShadingSystemType,
            ZoneMixingDeltaTemperature=self._float_mean(
                other, "ZoneMixingDeltaTemperature", weights),
            ZoneMixingFlowRate=self._float_mean(other, "ZoneMixingFlowRate",
                                                weights),
            ZoneMixingAvailabilitySchedule=self.ZoneMixingAvailabilitySchedule.
            combine(other.ZoneMixingAvailabilitySchedule, weights),
            ShadingSystemAvailabilitySchedule=self.
            ShadingSystemAvailabilitySchedule.combine(
                other.ShadingSystemAvailabilitySchedule, weights),
        )
        new_obj = self.__class__(**meta, **new_attr)
        new_obj._predecessors.extend(self._predecessors + other._predecessors)
        return new_obj
Beispiel #12
0
    def _field_interpreter(self, field):
        """dealing with a Field-Set (Through, For, Interpolate, # Until, Value)
        and return the parsed string

        Args:
            field:
        """

        values_sets = [
            "weekdays",
            "weekends",
            "alldays",
            "allotherdays",
            "sunday",
            "monday",
            "tuesday",
            "wednesday",
            "thursday",
            "friday",
            "saturday",
            "summerdesignday",
            "winterdesignday",
            "holiday",
        ]
        keywords = None

        if "through" in field.lower():
            # deal with through
            if ":" in field.lower():
                # parse colon
                f_set, statement = field.split(":")
                hour = None
                minute = None
                value = statement.strip()
            else:
                msg = ('The schedule "{sch}" contains a Field '
                       'that is not understood: "{field}"'.format(
                           sch=self.Name, field=field))
                raise NotImplementedError(msg)
        elif "for" in field.lower():
            keywords = [word for word in values_sets if word in field.lower()]
            if ":" in field.lower():
                # parse colon
                f_set, statement = field.split(":")
                value = statement.strip()
                hour = None
                minute = None
            elif keywords:
                # get epBunch of the sizing period
                statement = " ".join(keywords)
                f_set = [s for s in field.split() if "for" in s.lower()][0]
                value = statement.strip()
                hour = None
                minute = None
            else:
                # parse without a colon
                msg = ('The schedule "{sch}" contains a Field '
                       'that is not understood: "{field}"'.format(
                           sch=self.Name, field=field))
                raise NotImplementedError(msg)
        elif "interpolate" in field.lower():
            msg = ('The schedule "{sch}" contains sub-hourly values ('
                   'Field-Set="{field}"). The average over the hour is '
                   "taken".format(sch=self.Name, field=field))
            log(msg, lg.WARNING)
            f_set, value = field.split(":")
            hour = None
            minute = None
        elif "until" in field.lower():
            if ":" in field.lower():
                # parse colon
                try:
                    f_set, hour, minute = field.split(":")
                    hour = hour.strip()  # remove trailing spaces
                    minute = minute.strip()  # remove trailing spaces
                    value = None
                except:
                    f_set = "until"
                    hour, minute = field.split(":")
                    hour = hour[-2:].strip()
                    minute = minute.strip()
                    value = None
            else:
                msg = ('The schedule "{sch}" contains a Field '
                       'that is not understood: "{field}"'.format(
                           sch=self.Name, field=field))
                raise NotImplementedError(msg)
        elif "value" in field.lower():
            if ":" in field.lower():
                # parse colon
                f_set, statement = field.split(":")
                value = statement.strip()
                hour = None
                minute = None
            else:
                msg = ('The schedule "{sch}" contains a Field '
                       'that is not understood: "{field}"'.format(
                           sch=self.Name, field=field))
                raise NotImplementedError(msg)
        else:
            # deal with the data value
            f_set = field
            hour = None
            minute = None
            value = field[len(field) + 1:].strip()

        return f_set, hour, minute, value
Beispiel #13
0
    def from_surface(cls, surface):
        """Build a WindowSetting object from a FenestrationSurface:Detailed_
        object. This constructor will detect common window constructions and
        shading devices. Supported Shading and Natural Air flow EnergyPlus
        objects are: WindowProperty:ShadingControl_,
        AirflowNetwork:MultiZone:Surface_.

        Important:
            If an EnergyPlus object is not supported, eg.:
            AirflowNetwork:MultiZone:Component:DetailedOpening_, only a warning
            will be issued in the console for the related object instance and
            default values will be automatically used.

        .. _FenestrationSurface:Detailed:
           https://bigladdersoftware.com/epx/docs/8-9/input-output-reference
           /group-thermal-zone-description-geometry.html
           #fenestrationsurfacedetailed
        .. _WindowProperty:ShadingControl:
           https://bigladdersoftware.com/epx/docs/8-9/input-output-reference
           /group-thermal-zone-description-geometry.html
           #windowpropertyshadingcontrol
        .. _AirflowNetwork:MultiZone:Surface:
           https://bigladdersoftware.com/epx/docs/8-9/input-output-reference
           /group-airflow-network.html#airflownetworkmultizonesurface
        .. _AirflowNetwork:MultiZone:Component:DetailedOpening:
           https://bigladdersoftware.com/epx/docs/8-9/input-output-reference
           /group-airflow-network.html
           #airflownetworkmultizonecomponentdetailedopening

        Args:
            surface (EpBunch): The FenestrationSurface:Detailed_ object.

        Returns:
            (WindowSetting): The window setting object.
        """
        if isinstance(surface,
                      EpBunch) and not surface.Surface_Type.upper() == "DOOR":
            construction = surface.get_referenced_object("Construction_Name")
            construction = WindowConstruction.from_epbunch(construction)
            name = surface.Name
            shading_control = surface.get_referenced_object(
                "Shading_Control_Name")
            attr = {}
            if shading_control:
                # a WindowProperty:ShadingControl_ object can be attached to
                # this window
                attr["IsShadingSystemOn"] = True
                if shading_control["Setpoint"] != "":
                    attr["ShadingSystemSetpoint"] = shading_control["Setpoint"]
                shade_mat = shading_control.get_referenced_object(
                    "Shading_Device_Material_Name")
                # get shading transmittance
                if shade_mat:
                    attr["ShadingSystemTransmittance"] = shade_mat[
                        "Visible_Transmittance"]
                # get shading control schedule
                if shading_control["Shading_Control_Is_Scheduled"].upper(
                ) == "YES":
                    name = shading_control["Schedule_Name"]
                    attr["ShadingSystemAvailabilitySchedule"] = UmiSchedule(
                        Name=name, idf=surface.theidf)
                else:
                    # Determine which behavior of control
                    shade_ctrl_type = shading_control["Shading_Control_Type"]
                    if shade_ctrl_type.lower() == "alwaysoff":
                        attr[
                            "ShadingSystemAvailabilitySchedule"] = UmiSchedule.constant_schedule(
                                idf=surface.theidf,
                                name="AlwaysOff",
                                hourly_value=0)
                    elif shade_ctrl_type.lower() == "alwayson":
                        attr[
                            "ShadingSystemAvailabilitySchedule"] = UmiSchedule.constant_schedule(
                                idf=surface.theidf)
                    else:
                        log(
                            'Window "{}" uses a  window control type that '
                            'is not supported: "{}". Reverting to '
                            '"AlwaysOn"'.format(name, shade_ctrl_type),
                            lg.WARN,
                        )
                        attr[
                            "ShadingSystemAvailabilitySchedule"] = UmiSchedule.constant_schedule(
                                idf=surface.theidf)
                # get shading type
                if shading_control["Shading_Type"] != "":
                    mapping = {
                        "InteriorShade": WindowType(1),
                        "ExteriorShade": WindowType(0),
                        "ExteriorScreen": WindowType(0),
                        "InteriorBlind": WindowType(1),
                        "ExteriorBlind": WindowType(0),
                        "BetweenGlassShade": WindowType(0),
                        "BetweenGlassBlind": WindowType(0),
                        "SwitchableGlazing": WindowType(0),
                    }
                    attr["ShadingSystemType"] = mapping[
                        shading_control["Shading_Type"]]
            else:
                # Set default schedules
                attr[
                    "ShadingSystemAvailabilitySchedule"] = UmiSchedule.constant_schedule(
                        idf=surface.theidf)

            # get airflow network
            afn = next(
                iter(
                    surface.getreferingobjs(
                        iddgroups=["Natural Ventilation and Duct Leakage"],
                        fields=["Surface_Name"],
                    )),
                None,
            )
            if afn:
                attr[
                    "OperableArea"] = afn.WindowDoor_Opening_Factor_or_Crack_Factor
                leak = afn.get_referenced_object("Leakage_Component_Name")
                name = afn["Venting_Availability_Schedule_Name"]
                if name != "":
                    attr["AfnWindowAvailability"] = UmiSchedule(
                        Name=name, idf=surface.theidf)
                else:
                    attr[
                        "AfnWindowAvailability"] = UmiSchedule.constant_schedule(
                            idf=surface.theidf)
                name = afn[
                    "Ventilation_Control_Zone_Temperature_Setpoint_Schedule_Name"]
                if name != "":
                    attr["AfnTempSetpoint"] = UmiSchedule(
                        Name=name, idf=surface.theidf).mean
                else:
                    pass  # uses default

                if (leak.key.upper(
                ) == "AIRFLOWNETWORK:MULTIZONE:SURFACE:EFFECTIVELEAKAGEAREA"):
                    attr["AfnDischargeC"] = leak["Discharge_Coefficient"]
                elif (leak.key.upper() ==
                      "AIRFLOWNETWORK:MULTIZONE:COMPONENT:HORIZONTALOPENING"):
                    log(
                        '"{}" is not fully supported. Rerverting to '
                        'defaults for object "{}"'.format(
                            leak.key,
                            cls.mro()[0].__name__),
                        lg.WARNING,
                    )
                elif leak.key.upper(
                ) == "AIRFLOWNETWORK:MULTIZONE:SURFACE:CRACK":
                    log(
                        '"{}" is not fully supported. Rerverting to '
                        'defaults for object "{}"'.format(
                            leak.key,
                            cls.mro()[0].__name__),
                        lg.WARNING,
                    )
                elif (leak.key.upper() ==
                      "AIRFLOWNETWORK:MULTIZONE:COMPONENT:DETAILEDOPENING"):
                    log(
                        '"{}" is not fully supported. Rerverting to '
                        'defaults for object "{}"'.format(
                            leak.key,
                            cls.mro()[0].__name__),
                        lg.WARNING,
                    )
                elif (leak.key.upper() ==
                      "AIRFLOWNETWORK:MULTIZONE:COMPONENT:ZONEEXHAUSTFAN"):
                    log(
                        '"{}" is not fully supported. Rerverting to '
                        'defaults for object "{}"'.format(
                            leak.key,
                            cls.mro()[0].__name__),
                        lg.WARNING,
                    )
                elif (leak.key.upper() ==
                      "AIRFLOWNETWORK:MULTIZONE:COMPONENT:SIMPLEOPENING"):
                    log(
                        '"{}" is not fully supported. Rerverting to '
                        'defaults for object "{}"'.format(
                            leak.key,
                            cls.mro()[0].__name__),
                        lg.WARNING,
                    )
            else:
                attr["AfnWindowAvailability"] = UmiSchedule.constant_schedule(
                    idf=surface.theidf)
            # Zone Mixing
            attr[
                "ZoneMixingAvailabilitySchedule"] = UmiSchedule.constant_schedule(
                    idf=surface.theidf)
            w = cls(Name=name,
                    Construction=construction,
                    idf=surface.theidf,
                    Category=surface.theidf.building_name(use_idfname=True),
                    **attr)
            return w
Beispiel #14
0
    def combine(self, other, weights=None):
        """Combine two ZoneConditioning objects together.

        Args:
            other (ZoneConditioning): The other ZoneConditioning object to
                combine with.
            weights (list-like, optional): A list-like object of len 2. If None,
                the volume of the zones for which self and other belongs is
                used.
        Returns:
            (ZoneConditioning): the combined ZoneConditioning object.
        """
        # Check if other is the same type as self
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # Check if other is not the same as self
        if self == other:
            return self

        meta = self._get_predecessors_meta(other)

        if not weights:
            zone_weight = settings.zone_weight
            weights = [
                getattr(self._belongs_to_zone, str(zone_weight)),
                getattr(other._belongs_to_zone, str(zone_weight)),
            ]
            log(
                'using zone {} "{}" as weighting factor in "{}" '
                "combine.".format(
                    zone_weight,
                    " & ".join(list(map(str, map(int, weights)))),
                    self.__class__.__name__,
                )
            )

        a = self._float_mean(other, "CoolingCoeffOfPerf", weights)
        b = self._str_mean(other, "CoolingLimitType")
        c = self._float_mean(other, "CoolingSetpoint", weights)
        d = self._str_mean(other, "EconomizerType")
        e = self._float_mean(other, "HeatRecoveryEfficiencyLatent", weights)
        f = self._float_mean(other, "HeatRecoveryEfficiencySensible", weights)
        g = self._str_mean(other, "HeatRecoveryType")
        h = self._float_mean(other, "HeatingCoeffOfPerf", weights)
        i = self._str_mean(other, "HeatingLimitType")
        j = self._float_mean(other, "HeatingSetpoint", weights)
        k = any((self.IsCoolingOn, other.IsCoolingOn))
        l = any((self.IsHeatingOn, other.IsHeatingOn))
        m = any((self.IsMechVentOn, other.IsMechVentOn))
        n = self._float_mean(other, "MaxCoolFlow", weights)
        o = self._float_mean(other, "MaxCoolingCapacity", weights)
        p = self._float_mean(other, "MaxHeatFlow", weights)
        q = self._float_mean(other, "MaxHeatingCapacity", weights)
        r = self._float_mean(other, "MinFreshAirPerArea", weights)
        s = self._float_mean(other, "MinFreshAirPerPerson", weights)
        t = self.HeatingSchedule.combine(other.HeatingSchedule, weights)
        u = self.CoolingSchedule.combine(other.CoolingSchedule, weights)
        v = self.MechVentSchedule.combine(other.MechVentSchedule, weights)

        # create a new object with the previous attributes
        new_obj = self.__class__(
            **meta,
            CoolingCoeffOfPerf=a,
            CoolingLimitType=b,
            CoolingSetpoint=c,
            EconomizerType=d,
            HeatRecoveryEfficiencyLatent=e,
            HeatRecoveryEfficiencySensible=f,
            HeatRecoveryType=g,
            HeatingCoeffOfPerf=h,
            HeatingLimitType=i,
            HeatingSetpoint=j,
            IsCoolingOn=k,
            IsHeatingOn=l,
            IsMechVentOn=m,
            MaxCoolFlow=n,
            MaxCoolingCapacity=o,
            MaxHeatFlow=p,
            MaxHeatingCapacity=q,
            MinFreshAirPerArea=r,
            MinFreshAirPerPerson=s,
            HeatingSchedule=t,
            CoolingSchedule=u,
            MechVentSchedule=v,
        )
        new_obj._predecessors.extend(self.predecessors + other.predecessors)
        return new_obj
Beispiel #15
0
    def field_set(self, field, slicer_=None):
        """helper function to return the proper slicer depending on the
        field_set value.

        Available values are: Weekdays, Weekends, Holidays, Alldays,
        SummerDesignDay, WinterDesignDay, Sunday, Monday, Tuesday, Wednesday,
        Thursday, Friday, Saturday, CustomDay1, CustomDay2, AllOtherDays

        Args:
            field (str): The EnergyPlus field set value.
            slicer_:

        Returns:
            (indexer-like): Returns the appropriate indexer for the series.
        """

        if field.lower() == "weekdays":
            # return only days of weeks
            return lambda x: x.index.dayofweek < 5
        elif field.lower() == "weekends":
            # return only weekends
            return lambda x: x.index.dayofweek >= 5
        elif field.lower() == "alldays":
            log(
                'For schedule "{}", the field-set "AllDays" may be overridden '
                'by the "AllOtherDays" field-set'.format(self.Name),
                lg.WARNING,
            )
            # return all days := equivalenet to .loc[:]
            return pd.IndexSlice[:]
        elif field.lower() == "allotherdays":
            # return unused days (including special days). Uses the global
            # variable `slicer_`
            import operator

            if slicer_ is not None:
                return _conjunction(
                    *[self.special_day(field, slicer_), ~slicer_],
                    logical=operator.or_)
            else:
                raise NotImplementedError
        elif field.lower() == "sunday":
            # return only sundays
            return lambda x: x.index.dayofweek == 6
        elif field.lower() == "monday":
            # return only mondays
            return lambda x: x.index.dayofweek == 0
        elif field.lower() == "tuesday":
            # return only Tuesdays
            return lambda x: x.index.dayofweek == 1
        elif field.lower() == "wednesday":
            # return only Wednesdays
            return lambda x: x.index.dayofweek == 2
        elif field.lower() == "thursday":
            # return only Thursdays
            return lambda x: x.index.dayofweek == 3
        elif field.lower() == "friday":
            # return only Fridays
            return lambda x: x.index.dayofweek == 4
        elif field.lower() == "saturday":
            # return only Saturdays
            return lambda x: x.index.dayofweek == 5
        elif field.lower() == "summerdesignday":
            # return design_day(self, field)
            return None
        elif field.lower() == "winterdesignday":
            # return design_day(self, field)
            return None
        elif field.lower() == "holiday" or field.lower() == "holidays":
            field = "holiday"
            return self.special_day(field, slicer_)
        elif not self.strict:
            # If not strict, ignore missing field-sets such as CustomDay1
            return None
        else:
            raise NotImplementedError("Archetypal does not yet support The "
                                      'Field_set "{}"'.format(field))
Beispiel #16
0
    def filter_tabular_data(
        self,
        archetype=None,
        tabulardataindex=None,
        value=None,
        reportname=None,
        reportforstring=None,
        tablename=None,
        rowname=None,
        columnname=None,
        units=None,
        inplace=False,
    ):
        """filter RaportData using specific keywords. Each keywords can be a
        tuple of strings (str1, str2, str3) which will return the logical_or on
        the specific column.

        Args:
            archetype (str or tuple):
            tabulardataindex:
            value (str or tuple):
            reportname:
            reportforstring:
            tablename:
            rowname:
            columnname:
            units (str or tuple):
            inplace (str or tuple):

        Returns:
            pandas.DataFrame
        """
        start_time = time.time()
        c_n = []

        if archetype:
            c_1 = (conjunction(
                *
                [self[self.ARCHETYPE] == archetype for archetype in archetype],
                logical=np.logical_or) if isinstance(archetype, tuple) else
                   self[self.ARCHETYPE] == archetype)
            c_n.append(c_1)
        if tabulardataindex:
            c_2 = (conjunction(*[
                self[self.TABULARDATAINDEX] == tabulardataindex
                for tabulardataindex in tabulardataindex
            ],
                               logical=np.logical_or) if isinstance(
                                   tabulardataindex, tuple) else
                   self[self.TABULARDATAINDEX] == tabulardataindex)
            c_n.append(c_2)
        if value:
            c_3 = (conjunction(*[self[self.VALUE] == value for value in value],
                               logical=np.logical_or)
                   if isinstance(value, tuple) else self[self.VALUE] == value)
            c_n.append(c_3)
        if reportname:
            c_4 = (conjunction(*[
                self[self.REPORTNAME] == reportname
                for reportname in reportname
            ],
                               logical=np.logical_or) if isinstance(
                                   reportname, tuple) else
                   self[self.REPORTNAME] == reportname)
            c_n.append(c_4)
        if value:
            c_5 = (conjunction(*[self[self.VALUE] == value for value in value],
                               logical=np.logical_or)
                   if isinstance(value, tuple) else self[self.VALUE] == value)
            c_n.append(c_5)
        if reportforstring:
            c_6 = (conjunction(*[
                self[self.REPORTFORSTRING] == reportforstring
                for reportforstring in reportforstring
            ],
                               logical=np.logical_or) if isinstance(
                                   reportforstring, tuple) else
                   self[self.REPORTFORSTRING] == reportforstring)
            c_n.append(c_6)
        if tablename:
            c_7 = (conjunction(
                *
                [self[self.TABLENAME] == tablename for tablename in tablename],
                logical=np.logical_or) if isinstance(tablename, tuple) else
                   self[self.TABLENAME] == tablename)
            c_n.append(c_7)
        if rowname:
            c_8 = (conjunction(
                *[self[self.ROWNAME] == rowname for rowname in rowname],
                logical=np.logical_or) if isinstance(rowname, tuple) else
                   self[self.ROWNAME] == rowname)
            c_n.append(c_8)
        if columnname:
            c_9 = (conjunction(*[
                self[self.COLUMNNAME] == columnname
                for columnname in columnname
            ],
                               logical=np.logical_or) if isinstance(
                                   columnname, tuple) else
                   self[self.COLUMNNAME] == columnname)
            c_n.append(c_9)
        if units:
            c_14 = (conjunction(*
                                [self[self.UNITS] == units for units in units],
                                logical=np.logical_or)
                    if isinstance(units, tuple) else self[self.UNITS] == units)
            c_n.append(c_14)

        filtered_df = self.loc[conjunction(*c_n, logical=np.logical_and)]
        log("filtered TabularData in {:,.2f} seconds".format(time.time() -
                                                             start_time))
        if inplace:
            return filtered_df._update_inplace(filtered_df)
        else:
            return filtered_df._constructor(filtered_df).__finalize__(self)
Beispiel #17
0
def stat_can_geo_request(type="json", lang="E", geos="PR", cpt="00"):
    """
    Args:
        type (str): "json" or "xml". json = json response format and xml = xml
            response format.
        lang (str): "E" or "F". where: E = English F = French.
        geos (str): one geographic level code (default = PR). where: CD = Census
            divisions CMACA = Census metropolitan areas and census
            agglomerations CSD = Census subdivisions (municipalities) CT =
            Census tracts DA = Dissemination areas DPL = Designated places ER =
            Economic regions FED = Federal electoral districts (2013
            Representation Order) FSA = Forward sortation areas HR = Health
            regions (including LHINs and PHUs) POPCNTR = Population centres PR =
            Canada, provinces and territories.
        cpt (str): one province or territory code (default = 00). where: 00 =
            All provinces and territories 10 = Newfoundland and Labrador 11 =
            Prince Edward Island 12 = Nova Scotia 13 = New Brunswick 24 = Quebec
            35 = Ontario 46 = Manitoba 47 = Saskatchewan 48 = Alberta 59 =
            British Columbia 60 = Yukon 61 = Northwest Territories 62 = Nunavut.
    """
    prepared_url = (
        "https://www12.statcan.gc.ca/rest/census-recensement"
        "/CR2016Geo.{type}?lang={lang}&geos={geos}&cpt={cpt}".format(type=type,
                                                                     lang=lang,
                                                                     geos=geos,
                                                                     cpt=cpt))

    cached_response_json = get_from_cache(prepared_url)

    if cached_response_json is not None:
        # found this request in the cache, just return it instead of making a
        # new HTTP call
        return cached_response_json

    else:
        # if this URL is not already in the cache, request it
        start_time = time.time()
        log("Getting from {}".format(prepared_url))
        response = requests.get(prepared_url)
        # if this URL is not already in the cache, pause, then request it
        # get the response size and the domain, log result
        size_kb = len(response.content) / 1000.0
        domain = re.findall(r"//(?s)(.*?)/", prepared_url)[0]
        log("Downloaded {:,.1f}KB from {}"
            " in {:,.2f} seconds".format(size_kb, domain,
                                         time.time() - start_time))

        try:
            response_json = response.json()
            if "remark" in response_json:
                log('Server remark: "{}"'.format(response_json["remark"],
                                                 level=lg.WARNING))
            save_to_cache(prepared_url, response_json)

        except Exception:
            # There seems to be a double backlash in the response. We try
            # removing it here.
            try:
                response = response.content.decode("UTF-8").replace("//", "")
                response_json = json.loads(response)
            except Exception:
                log(
                    "Server at {} returned status code {} and no JSON "
                    "data.".format(domain, response.status_code),
                    level=lg.ERROR,
                )
            else:
                save_to_cache(prepared_url, response_json)
                return response_json
            # deal with response satus_code here
            log(
                "Server at {} returned status code {} and no JSON "
                "data.".format(domain, response.status_code),
                level=lg.ERROR,
            )
        else:
            return response_json
Beispiel #18
0
    def discretize(self, n_bins=3, inplace=False):
        """Retruns a discretized pandas.Series

        Args:
            n_bins (int): Number of bins or steps to discretize the function
            inplace (bool): if True, perform operation in-place
        """
        try:
            from scipy.optimize import minimize
            from itertools import chain
        except ImportError:
            raise ImportError("The sklearn package must be installed to "
                              "use this optional feature.")
        if self.archetypes:
            # if multiindex, group and apply operation on each group.
            # combine at the end
            results = {}
            edges = {}
            ampls = {}
            for name, sub in self.groupby(level=0):
                hour_of_min = sub.time_at_min[1]

                sf = [1 / (i * 1.01) for i in range(1, n_bins + 1)]
                sf.extend([sub.min()])
                sf_bounds = [(0, sub.max()) for i in range(0, n_bins + 1)]
                hours = [
                    hour_of_min - hour_of_min * 1 / (i * 1.01)
                    for i in range(1, n_bins + 1)
                ]
                # Todo hours need to work fow datetime index
                hours.extend([len(sub)])
                hours_bounds = [(0, len(sub)) for i in range(0, n_bins + 1)]

                start_time = time.time()
                log("discretizing EnergySeries {}".format(name), lg.DEBUG)
                res = minimize(
                    rmse,
                    np.array(hours + sf),
                    args=(sub.values),
                    method="L-BFGS-B",
                    bounds=hours_bounds + sf_bounds,
                    options=dict(disp=True),
                )
                log(
                    "Completed discretization in {:,.2f} seconds".format(
                        time.time() - start_time),
                    lg.DEBUG,
                )
                edges[name] = res.x[0:n_bins + 1]
                ampls[name] = res.x[n_bins + 1:]
                results[name] = Series(piecewise(res.x))
            self.bin_edges_ = Series(edges).apply(Series)
            self.bin_scaling_factors_ = DataFrame(ampls)

            result = concat(results)
        else:
            hour_of_min = self.time_at_min

            sf = [1 / (i * 1.01) for i in range(1, n_bins + 1)]
            sf.extend([self.min()])
            sf_bounds = [(0, self.max()) for i in range(0, n_bins + 1)]
            hours = [
                hour_of_min - hour_of_min * 1 / (i * 1.01)
                for i in range(1, n_bins + 1)
            ]
            hours.extend([len(self)])
            hours_bounds = [(0, len(self)) for i in range(0, n_bins + 1)]

            start_time = time.time()
            # log('discretizing EnergySeries {}'.format(name), lg.DEBUG)
            res = minimize(
                rmse,
                np.array(hours + sf),
                args=(self.values),
                method="L-BFGS-B",
                bounds=hours_bounds + sf_bounds,
                options=dict(disp=True),
            )
            log(
                "Completed discretization in {:,.2f} seconds".format(
                    time.time() - start_time),
                lg.DEBUG,
            )
            edges = res.x[0:n_bins + 1]
            ampls = res.x[n_bins + 1:]
            result = Series(piecewise(res.x))
            bin_edges = Series(edges).apply(Series)
            self.bin_edges_ = bin_edges
            bin_edges.loc[-1, 0] = 0
            bin_edges.sort_index(inplace=True)
            bin_edges = bin_edges.diff().dropna()
            bin_edges = bin_edges.round()
            self.bin_scaling_factors_ = DataFrame({
                "duration": bin_edges[0],
                "scaling factor": ampls
            })
            self.bin_scaling_factors_.index = np.round(edges).astype(int)

        if inplace:
            self.update(result)
            self.__class__ = EnergySeries
            self.__finalize__(result)
        else:
            result.__class__ = EnergySeries
            return result.__finalize__(self)
Beispiel #19
0
    def from_idf(cls, idf, sql, log_adj_report=True, skeleton=False, force=False):
        """Create a graph representation of all the building zones. An edge
        between two zones represents the adjacency of the two zones.

        If skeleton is False, this method will create all the building
        objects iteratively over the building zones.

        Args:
            log_adj_report (bool, optional): If True, prints an adjacency report
                in the log.
            skeleton (bool, optional): If True, create a zone graph without
                creating hierarchical objects, eg. zones > zoneloads > ect.
            force (bool): If True, will recalculate the graph.

        Returns:
            ZoneGraph: The building's zone graph object
        """

        start_time = time.time()

        G = cls(name=idf.name)

        counter = 0
        for zone in tqdm(idf.idfobjects["ZONE"], desc="zone_loop"):
            # initialize the adjacency report dictionary. default list.
            adj_report = defaultdict(list)
            zone_obj = None
            if not skeleton:
                zone_obj = Zone.from_zone_epbunch(zone, sql=sql)
                zonesurfaces = zone.zonesurfaces
                zone_obj._zonesurfaces = zonesurfaces
                _is_core = zone_obj.is_core
            else:
                zonesurfaces = zone.zonesurfaces
                _is_core = is_core(zone)
            G.add_node(zone.Name, epbunch=zone, core=_is_core, zone=zone_obj)

            for surface in zonesurfaces:
                if surface.key.upper() in ["INTERNALMASS", "WINDOWSHADINGCONTROL"]:
                    # Todo deal with internal mass surfaces
                    pass
                else:
                    adj_zone: EpBunch
                    adj_surf: EpBunch
                    adj_surf, adj_zone = resolve_obco(surface)

                    if adj_zone and adj_surf:
                        counter += 1

                        if skeleton:
                            zone_obj = None
                            _is_core = is_core(zone)
                        else:
                            zone_obj = Zone.from_zone_epbunch(adj_zone, sql=sql)
                            _is_core = zone_obj.is_core

                        # create node for adjacent zone
                        G.add_node(
                            zone.Name, epbunch=adj_zone, core=_is_core, zone=zone_obj
                        )
                        try:
                            this_cstr = surface["Construction_Name"]
                            their_cstr = adj_surf["Construction_Name"]
                            is_diff_cstr = (
                                surface["Construction_Name"]
                                != adj_surf["Construction_Name"]
                            )
                        except:
                            this_cstr, their_cstr, is_diff_cstr = None, None, None
                        # create edge from this zone to the adjacent zone
                        G.add_edge(
                            zone.Name,
                            adj_zone.Name,
                            this_cstr=this_cstr,
                            their_cstr=their_cstr,
                            is_diff_cstr=is_diff_cstr,
                        )

                        add_to_report(
                            adj_report, zone, surface, adj_zone, adj_surf, counter
                        )
                    else:
                        pass
            if log_adj_report:
                msg = "Printing Adjacency Report for zone %s\n" % zone.Name
                msg += tabulate.tabulate(adj_report, headers="keys")
                log(msg)

        log("Created zone graph in {:,.2f} seconds".format(time.time() - start_time))
        log(networkx.info(G), lg.DEBUG)
        return G
Beispiel #20
0
    def combine(self, other, weights=None):
        """Combine two DomesticHotWaterSetting objects together.

        Args:
            other (DomesticHotWaterSetting):
            weights (list-like, optional): A list-like object of len 2. If None,
                the volume of the zones for which self and other belongs is
                used.

        Returns:
            (DomesticHotWaterSetting): a new combined object
        """
        if self is None:
            return other
        if other is None:
            return self
        # Check if other is the same type as self
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # Check if other is not the same as self
        if self == other:
            return self

        meta = self._get_predecessors_meta(other)

        if not weights:
            zone_weight = settings.zone_weight
            weights = [
                getattr(self._belongs_to_zone, str(zone_weight)),
                getattr(other._belongs_to_zone, str(zone_weight)),
            ]
            log(
                'using zone {} "{}" as weighting factor in "{}" '
                "combine.".format(
                    zone_weight,
                    " & ".join(list(map(str, map(int, weights)))),
                    self.__class__.__name__,
                )
            )

        new_obj = DomesticHotWaterSetting(
            **meta,
            IsOn=any((self.IsOn, other.IsOn)),
            WaterSchedule=self.WaterSchedule.combine(
                other.WaterSchedule,
                weights,
                [self.FlowRatePerFloorArea, other.FlowRatePerFloorArea],
            ),
            FlowRatePerFloorArea=self._float_mean(
                other, "FlowRatePerFloorArea", weights
            ),
            WaterSupplyTemperature=self._float_mean(
                other, "WaterSupplyTemperature", weights
            ),
            WaterTemperatureInlet=self._float_mean(
                other, "WaterTemperatureInlet", weights
            )
        )
        new_obj._predecessors.extend(self.predecessors + other.predecessors)
        return new_obj
Beispiel #21
0
    def filter_report_data(
        self,
        archetype=None,
        reportdataindex=None,
        timeindex=None,
        reportdatadictionaryindex=None,
        value=None,
        ismeter=None,
        type=None,
        indexgroup=None,
        timesteptype=None,
        keyvalue=None,
        name=None,
        reportingfrequency=None,
        schedulename=None,
        units=None,
        inplace=False,
    ):
        """filter RaportData using specific keywords. Each keywords can be a
        tuple of strings (str1, str2, str3) which will return the logical_or
        on the specific column.

        Args:
            archetype (str or tuple):
            reportdataindex (str or tuple):
            timeindex (str or tuple):
            reportdatadictionaryindex (str or tuple):
            value (str or tuple):
            ismeter (str or tuple):
            type (str or tuple):
            indexgroup (str or tuple):
            timesteptype (str or tuple):
            keyvalue (str or tuple):
            name (str or tuple):
            reportingfrequency (str or tuple):
            schedulename (str or tuple):
            units (str or tuple):
            inplace (str or tuple):

        Returns:
            pandas.DataFrame
        """
        start_time = time.time()
        c_n = []

        if archetype:
            c_1 = (conjunction(
                *
                [self[self.ARCHETYPE] == archetype for archetype in archetype],
                logical=np.logical_or,
            ) if isinstance(archetype, tuple) else self[self.ARCHETYPE]
                   == archetype)
            c_n.append(c_1)
        if reportdataindex:
            c_2 = (conjunction(
                *[
                    self[self.REPORTDATAINDEX] == reportdataindex
                    for reportdataindex in reportdataindex
                ],
                logical=np.logical_or,
            ) if isinstance(reportdataindex, tuple) else
                   self[self.REPORTDATAINDEX] == reportdataindex)
            c_n.append(c_2)
        if timeindex:
            c_3 = (conjunction(
                *
                [self[self.TIMEINDEX] == timeindex for timeindex in timeindex],
                logical=np.logical_or,
            ) if isinstance(timeindex, tuple) else self[self.TIMEINDEX]
                   == timeindex)
            c_n.append(c_3)
        if reportdatadictionaryindex:
            c_4 = (conjunction(
                *[
                    self[self.REPORTDATADICTIONARYINDEX]
                    == reportdatadictionaryindex
                    for reportdatadictionaryindex in reportdatadictionaryindex
                ],
                logical=np.logical_or,
            ) if isinstance(reportdatadictionaryindex,
                            tuple) else self[self.REPORTDATADICTIONARYINDEX]
                   == reportdatadictionaryindex)
            c_n.append(c_4)
        if value:
            c_5 = (conjunction(
                *[self[self.VALUE] == value for value in value],
                logical=np.logical_or,
            ) if isinstance(value, tuple) else self[self.VALUE] == value)
            c_n.append(c_5)
        if ismeter:
            c_6 = (conjunction(
                *[self[self.ISMETER] == ismeter for ismeter in ismeter],
                logical=np.logical_or,
            ) if isinstance(ismeter, tuple) else self[self.ISMETER] == ismeter)
            c_n.append(c_6)
        if type:
            c_7 = (conjunction(*[self[self.TYPE] == type for type in type],
                               logical=np.logical_or) if isinstance(
                                   type, tuple) else self[self.TYPE] == type)
            c_n.append(c_7)
        if indexgroup:
            c_8 = (conjunction(
                *[
                    self[self.INDEXGROUP] == indexgroup
                    for indexgroup in indexgroup
                ],
                logical=np.logical_or,
            ) if isinstance(indexgroup, tuple) else self[self.INDEXGROUP]
                   == indexgroup)
            c_n.append(c_8)
        if timesteptype:
            c_9 = (conjunction(
                *[
                    self[self.TIMESTEPTYPE] == timesteptype
                    for timesteptype in timesteptype
                ],
                logical=np.logical_or,
            ) if isinstance(timesteptype, tuple) else self[self.TIMESTEPTYPE]
                   == timesteptype)
            c_n.append(c_9)
        if keyvalue:
            c_10 = (conjunction(
                *[self[self.KEYVALUE] == keyvalue for keyvalue in keyvalue],
                logical=np.logical_or,
            ) if isinstance(keyvalue, tuple) else self[self.KEYVALUE]
                    == keyvalue)
            c_n.append(c_10)
        if name:
            c_11 = (conjunction(*[self[self.NAME] == name for name in name],
                                logical=np.logical_or) if isinstance(
                                    name, tuple) else self[self.NAME] == name)
            c_n.append(c_11)
        if reportingfrequency:
            c_12 = (conjunction(
                *[
                    self[self.REPORTINGFREQUENCY] == reportingfrequency
                    for reportingfrequency in reportingfrequency
                ],
                logical=np.logical_or,
            ) if isinstance(reportingfrequency, tuple) else
                    self[self.REPORTINGFREQUENCY] == reportingfrequency)
            c_n.append(c_12)
        if schedulename:
            c_13 = (conjunction(
                *[
                    self[self.SCHEDULENAME] == schedulename
                    for schedulename in schedulename
                ],
                logical=np.logical_or,
            ) if isinstance(schedulename, tuple) else self[self.SCHEDULENAME]
                    == schedulename)
            c_n.append(c_13)
        if units:
            c_14 = (conjunction(
                *[self[self.UNITS] == units for units in units],
                logical=np.logical_or,
            ) if isinstance(units, tuple) else self[self.UNITS] == units)
            c_n.append(c_14)

        filtered_df = self.loc[conjunction(*c_n, logical=np.logical_and)]
        log("filtered ReportData in {:,.2f} seconds".format(time.time() -
                                                            start_time))
        if inplace:
            return filtered_df._update_inplace(filtered_df)
        else:
            return filtered_df.__finalize__(self)
Beispiel #22
0
def download_bld_window(
    u_factor,
    shgc,
    vis_trans,
    oauth_key,
    tolerance=0.05,
    extension="idf",
    output_folder=None,
):
    """Find window constructions corresponding to a combination of a u_factor,
    shgc and visible transmittance and download their idf file to disk. it is
    necessary to have an authentication key (see Info below).

    .. _Building_Component_Library: https://bcl.nrel.gov/user/register

    Args:
        u_factor (float or tuple): The center of glass u-factor. Pass a range of
            values by passing a tuple (from, to). If a tuple is passed,
            *tolerance* is ignored.
        shgc (float or tuple): The Solar Heat Gain Coefficient. Pass a range of
            values by passing a tuple (from, to). If a tuple is passed,
            *tolerance* is ignored.
        vis_trans (float or tuple): The Visible Transmittance. Pass a range of
            values by passing a tuple (from, to). If a tuple is passed,
            *tolerance* is ignored.
        oauth_key (str): the Building_Component_Library_ authentication key.
        tolerance (float): relative tolerance for the input values. Default is
            0.05 (5%).
        extension (str): specify the extension of the file to download.
            (default: 'idf')
        output_folder (str, optional): specify folder to save response data to.
            Defaults to settings.data_folder.

    Returns:
        (list of archetypal.IDF): a list of IDF files containing window objects
            matching the parameters.

    Note:
        An authentication key from NREL is required to download building
        components. Register at Building_Component_Library_
    """
    # check if one or multiple values
    if isinstance(u_factor, tuple):
        u_factor_dict = "[{} TO {}]".format(u_factor[0], u_factor[1])
    else:
        # apply tolerance
        u_factor_dict = "[{} TO {}]".format(u_factor * (1 - tolerance),
                                            u_factor * (1 + tolerance))
    if isinstance(shgc, tuple):
        shgc_dict = "[{} TO {}]".format(shgc[0], shgc[1])
    else:
        # apply tolerance
        shgc_dict = "[{} TO {}]".format(shgc * (1 - tolerance),
                                        shgc * (1 + tolerance))
    if isinstance(vis_trans, tuple):
        vis_trans_dict = "[{} TO {}]".format(vis_trans[0], vis_trans[1])
    else:
        # apply tolerance
        vis_trans_dict = "[{} TO {}]".format(vis_trans * (1 - tolerance),
                                             vis_trans * (1 + tolerance))

    data = {
        "keyword":
        "Window",
        "format":
        "json",
        "f[]": [
            "fs_a_Overall_U-factor:{}".format(u_factor_dict),
            "fs_a_VLT:{}".format(vis_trans_dict),
            "fs_a_SHGC:{}".format(shgc_dict),
            'sm_component_type:"Window"',
        ],
        "oauth_consumer_key":
        oauth_key,
    }
    response = nrel_bcl_api_request(data)

    if response:
        log("found {} possible window component(s) matching "
            "the range {}".format(len(response["result"]), str(data["f[]"])))

        # download components
        uids = []
        for component in response["result"]:
            uids.append(component["component"]["uid"])
        url = "https://bcl.nrel.gov/api/component/download?uids={}".format(
            ","
            "".join(uids))
        # actual download with get()
        d_response = requests.get(url)

        if d_response.ok:
            # loop through files and extract the ones that match the extension
            # parameter
            results = []
            if output_folder is None:
                output_folder = settings.data_folder
            with zipfile.ZipFile(io.BytesIO(d_response.content)) as z:
                for info in z.infolist():
                    if info.filename.endswith(extension):
                        z.extract(info, path=output_folder)
                        results.append(
                            os.path.join(settings.data_folder, info.filename))
            return results
        else:
            return response["result"]
    else:
        raise ValueError(
            "Could not download window from NREL Building Components "
            "Library. An error occurred with the nrel_api_request")
Beispiel #23
0
    def combine(self, other, weights=None):
        """Combine two ZoneLoad objects together.

        Args:
            other (ZoneLoad):
            weights (list-like, optional): A list-like object of len 2. If None,
                the volume of the zones for which self and other belongs is
                used.

        Returns:
            (ZoneLoad): the combined ZoneLoad object.
        """
        # Check if other is the same type as self
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # Check if other is not the same as self
        if self == other:
            return self

        incoming_load_data = self.__dict__.copy()
        incoming_load_data.pop("Name")

        meta = self._get_predecessors_meta(other)

        if not weights:
            zone_weight = settings.zone_weight
            weights = [
                getattr(self._belongs_to_zone, str(zone_weight)),
                getattr(other._belongs_to_zone, str(zone_weight)),
            ]
            log('using zone {} "{}" as weighting factor in "{}" '
                "combine.".format(
                    zone_weight,
                    " & ".join(list(map(str, map(int, weights)))),
                    self.__class__.__name__,
                ))

        attr = dict(
            DimmingType=self._str_mean(other, "DimmingType"),
            EquipmentAvailabilitySchedule=self.EquipmentAvailabilitySchedule.
            combine(
                other.EquipmentAvailabilitySchedule,
                weights=weights,
                quantity=[
                    self.EquipmentPowerDensity, other.EquipmentPowerDensity
                ],
            ),
            EquipmentPowerDensity=self._float_mean(other,
                                                   "EquipmentPowerDensity",
                                                   weights),
            IlluminanceTarget=self._float_mean(other, "IlluminanceTarget",
                                               weights),
            LightingPowerDensity=self._float_mean(other,
                                                  "LightingPowerDensity",
                                                  weights),
            LightsAvailabilitySchedule=self.LightsAvailabilitySchedule.combine(
                other.LightsAvailabilitySchedule,
                weights=weights,
                quantity=[
                    self.LightingPowerDensity, other.LightingPowerDensity
                ],
            ),
            OccupancySchedule=self.OccupancySchedule.combine(
                other.OccupancySchedule,
                weights=weights,
                quantity=[self.PeopleDensity, other.PeopleDensity],
            ),
            IsEquipmentOn=any([self.IsEquipmentOn, other.IsEquipmentOn]),
            IsLightingOn=any([self.IsLightingOn, other.IsLightingOn]),
            IsPeopleOn=any([self.IsPeopleOn, other.IsPeopleOn]),
            PeopleDensity=self._float_mean(other, "PeopleDensity", weights),
        )

        new_obj = self.__class__(**meta, **attr)
        new_obj._belongs_to_zone = self._belongs_to_zone
        new_obj._predecessors.extend(self.predecessors + other.predecessors)
        return new_obj
Beispiel #24
0
def reduce(idf, output, weather, parallel, all_zones):
    """Perform the model reduction and translate to an UMI template file.

    IDF is one or multiple idf files to process.
    OUTPUT is the output file name (or path) to write to. Optional.
    """
    if parallel:
        # if parallel is True, run eplus in parallel
        rundict = {
            file: dict(
                eplus_file=file,
                weather_file=weather,
                annual=True,
                prep_outputs=True,
                expandobjects=True,
                verbose="v",
                output_report="sql",
                return_idf=False,
                ep_version=settings.ep_version,
            )
            for file in idf
        }
        res = parallel_process(rundict, run_eplus)
        res = _write_invalid(res)

        loaded_idf = {}
        for key, sql in res.items():
            loaded_idf[key] = {}
            loaded_idf[key][0] = sql
            loaded_idf[key][1] = load_idf(key)
        res = loaded_idf
    else:
        # else, run sequentially
        res = defaultdict(dict)
        invalid = []
        for i, fn in enumerate(idf):
            try:
                res[fn][0], res[fn][1] = run_eplus(
                    fn,
                    weather,
                    ep_version=settings.ep_version,
                    output_report="sql",
                    prep_outputs=True,
                    annual=True,
                    design_day=False,
                    verbose="v",
                    return_idf=True,
                )
            except EnergyPlusProcessError as e:
                invalid.append({"#": i, "Filename": fn.basename(), "Error": e})
        if invalid:
            filename = Path("failed_reduce.txt")
            with open(filename, "w") as failures:
                failures.writelines(tabulate(invalid, headers="keys"))
                log('Invalid run listed in "%s"' % filename)

    from archetypal import BuildingTemplate

    bts = []
    for fn in res.values():
        sql = next(
            iter([
                value for key, value in fn.items() if isinstance(value, dict)
            ]))
        idf = next(
            iter([
                value for key, value in fn.items() if isinstance(value, IDF)
            ]))
        bts.append(BuildingTemplate.from_idf(idf, sql=sql,
                                             DataSource=idf.name))

    output = Path(output)
    name = output.namebase
    ext = output.ext if output.ext == ".json" else ".json"
    dir_ = output.dirname()
    template = UmiTemplate(name=name, BuildingTemplates=bts)
    final_path: Path = dir_ / name + ext
    template.to_json(path_or_buf=final_path, all_zones=all_zones)
    log("Successfully created template file at {}".format(
        final_path.abspath()))
    def combine(self, other, weights=None):
        """Combine two OpaqueMaterial objects.

        Args:
            weights (list-like, optional): A list-like object of len 2. If None,
                the density of the OpaqueMaterial of each objects is used as
                a weighting factor.
            other (OpaqueMaterial): The other OpaqueMaterial object the
                combine with.

        Returns:
            OpaqueMaterial: A new combined object made of self + other.
        """
        # Check if other is the same type as self
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # Check if other is not the same as self
        if self == other:
            return self

        if not weights:
            log('using OpaqueMaterial density as weighting factor in "{}" '
                "combine.".format(self.__class__.__name__))
            weights = [self.Density, other.Density]

        meta = self._get_predecessors_meta(other)
        new_obj = OpaqueMaterial(
            **meta,
            Conductivity=self._float_mean(other, "Conductivity", weights),
            Roughness=self._str_mean(other, attr="Roughness", append=False),
            SolarAbsorptance=self._float_mean(other, "SolarAbsorptance",
                                              weights),
            SpecificHeat=self._float_mean(other, "SpecificHeat"),
            ThermalEmittance=self._float_mean(other, "ThermalEmittance",
                                              weights),
            VisibleAbsorptance=self._float_mean(other, "VisibleAbsorptance",
                                                weights),
            TransportCarbon=self._float_mean(other, "TransportCarbon",
                                             weights),
            TransportDistance=self._float_mean(other, "TransportDistance",
                                               weights),
            TransportEnergy=self._float_mean(other, "TransportEnergy",
                                             weights),
            SubstitutionRatePattern=self._float_mean(other,
                                                     "SubstitutionRatePattern",
                                                     weights=None),
            SubstitutionTimestep=self._float_mean(other,
                                                  "SubstitutionTimestep",
                                                  weights),
            Cost=self._float_mean(other, "Cost", weights),
            Density=self._float_mean(other, "Density", weights),
            EmbodiedCarbon=self._float_mean(other, "EmbodiedCarbon", weights),
            EmbodiedEnergy=self._float_mean(other, "EmbodiedEnergy", weights),
            MoistureDiffusionResistance=self._float_mean(
                other, "MoistureDiffusionResistance", weights))
        new_obj._predecessors.extend(self.predecessors + other.predecessors)
        return new_obj
Beispiel #26
0
    def combine(self, other, weights=None, quantity=None):
        """Combine two UmiSchedule objects together.

        Args:
            other (UmiSchedule): The other Schedule object to combine with.
            weights (list): Attribute of self and other containing the weight
                factor.
            quantity (list or dict): Scalar value that will be multiplied by self before
                the averaging occurs. This ensures that the resulting schedule
                returns the correct integrated value. If a dict is passed, keys are
                schedules Names and values are quantities.

        Returns:
            (UmiSchedule): the combined UmiSchedule object.
        """
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # check if the schedule is the same
        if all(self.all_values == other.all_values):
            return self

        # check if self is only zeros. Should not affect other.
        if all(self.all_values == 0):
            return other
        # check if other is only zeros. Should not affect self.
        if all(other.all_values == 0):
            return self

        if not weights:
            log('using 1 as weighting factor in "{}" '
                "combine.".format(self.__class__.__name__))
            weights = [1, 1]
        elif isinstance(weights, str):
            weights = [getattr(self, weights), getattr(other, weights)]

        if quantity is None:
            new_values = np.average([self.all_values, other.all_values],
                                    axis=0,
                                    weights=weights)
        elif isinstance(quantity, dict):
            new_values = np.average(
                [
                    self.all_values * quantity[self.Name],
                    other.all_values * quantity[other.Name]
                ],
                axis=0,
                weights=weights,
            )
            new_values /= new_values.max()
        else:
            new_values = np.average(
                [
                    self.all_values * quantity[0],
                    other.all_values * quantity[1]
                ],
                axis=0,
                weights=weights,
            )
            new_values /= new_values.max()

        # the new object's name
        meta = self._get_predecessors_meta(other)

        attr = self.__dict__.copy()
        attr.update(dict(values=new_values))
        attr["Name"] = meta["Name"]
        new_obj = super().from_values(**attr)
        new_name = ("Combined Schedule {{{}}} with mean daily min:{:.2f} "
                    "mean:{:.2f} max:{:.2f}".format(uuid.uuid1(), new_obj.min,
                                                    new_obj.mean, new_obj.max))
        new_obj.rename(new_name)
        new_obj._predecessors.extend(self.predecessors + other.predecessors)
        new_obj.weights = sum(weights)
        return new_obj
Beispiel #27
0
def save_and_show(fig, ax, save, show, close, filename, file_format, dpi,
                  axis_off, extent):
    """Save a figure to disk and show it, as specified.

    Args:
        fig (matplotlib.figure.Figure): the figure
        ax (matplotlib.axes.Axes or list(matplotlib.axes.Axes)): the axes
        save (bool): whether to save the figure to disk or not
        show (bool): whether to display the figure or not
        close (bool): close the figure (only if show equals False) to prevent
            display
        filename (string): the name of the file to save
        file_format (string): the format of the file to save (e.g., 'jpg',
            'png', 'svg')
        dpi (int): the resolution of the image file if saving (Dots per inch)
        axis_off (bool): if True matplotlib axis was turned off by plot_graph so
            constrain the saved figure's extent to the interior of the axis
        extent:

    Returns:
        (tuple) fig, ax
    """
    # save the figure if specified

    if save:
        start_time = time.time()

        # create the save folder if it doesn't already exist
        if not os.path.exists(settings.imgs_folder):
            os.makedirs(settings.imgs_folder)
        path_filename = os.path.join(settings.imgs_folder,
                                     os.extsep.join([filename, file_format]))

        if not isinstance(ax, (np.ndarray, list)):
            ax = [ax]
        if file_format == "svg":
            for ax in ax:
                # if the file_format is svg, prep the fig/ax a bit for saving
                ax.axis("off")
                ax.set_position([0, 0, 1, 1])
                ax.patch.set_alpha(0.0)
            fig.patch.set_alpha(0.0)
            fig.savefig(
                path_filename,
                bbox_inches=0,
                format=file_format,
                facecolor=fig.get_facecolor(),
                transparent=True,
            )
        else:
            if extent is None:
                if len(ax) == 1:
                    if axis_off:
                        for ax in ax:
                            # if axis is turned off, constrain the saved
                            # figure's extent to the interior of the axis
                            extent = ax.get_window_extent().transformed(
                                fig.dpi_scale_trans.inverted())
                else:
                    extent = "tight"
            fig.savefig(
                path_filename,
                dpi=dpi,
                bbox_inches=extent,
                format=file_format,
                facecolor=fig.get_facecolor(),
                transparent=True,
            )
        log("Saved the figure to disk in {:,.2f} seconds".format(time.time() -
                                                                 start_time))

    # show the figure if specified
    if show:
        start_time = time.time()
        plt.show()
        # fig.show()
        log("Showed the plot in {:,.2f} seconds".format(time.time() -
                                                        start_time))
    # if show=False, close the figure if close=True to prevent display
    elif close:
        plt.close()

    return fig, ax
Beispiel #28
0
def stat_can_request(type,
                     lang="E",
                     dguid="2016A000011124",
                     topic=0,
                     notes=0,
                     stat=0):
    """Send a request to the StatCan API via HTTP GET and return the JSON
    response.

    Args:
        type (str): "json" or "xml". json = json response format and xml = xml
            response format.
        lang (str): "E" or "F". E = English and F = French.
        dguid (str): Dissemination Geography Unique Identifier - DGUID. It is an
            alphanumeric code, composed of four components. It varies from 10 to
            21 characters in length. The first 9 characters are fixed in
            composition and length. Vintage (4) + Type (1) + Schema (4) +
            Geographic Unique Identifier (1-12). To find dguid, use any GEO_UID
            ( i.e., DGUID) returned by the 2016 Census geography web data
            service. For more information on the DGUID definition and structure,
            please refer to the `Dissemination Geography Unique Identifier,
            Definition and Structure
            <https://www150.statcan.gc.ca/n1/pub/92f0138m/92f0138m2019001-eng.htm>`_ ,
            Statistics Canada catalogue no. 92F0138M-2019001.
        topic (str): Integer 0-14 (default=0) where: 1. All topics 2. Aboriginal
            peoples 3. Education 4. Ethnic origin 5. Families, households and
            marital status 6. Housing 7. Immigration and citizenship 8. Income
            9. Journey to work 10. Labour 11. Language 12. Language of work 13.
            Mobility 14. Population 15. Visible minority.
        notes (int): 0 or 1. 0 = do not include footnotes. 1 = include
            footnotes.
        stat (int): 0 or 1. 0 = counts. 1 = rates.
    """
    prepared_url = ("https://www12.statcan.gc.ca/rest/census-recensement"
                    "/CPR2016.{type}?lang={lang}&dguid={dguid}&topic="
                    "{topic}&notes={notes}&stat={stat}".format(type=type,
                                                               lang=lang,
                                                               dguid=dguid,
                                                               topic=topic,
                                                               notes=notes,
                                                               stat=stat))

    cached_response_json = get_from_cache(prepared_url)

    if cached_response_json is not None:
        # found this request in the cache, just return it instead of making a
        # new HTTP call
        return cached_response_json

    else:
        # if this URL is not already in the cache, request it
        start_time = time.time()
        log("Getting from {}".format(prepared_url))
        response = requests.get(prepared_url)
        # if this URL is not already in the cache, pause, then request it
        # get the response size and the domain, log result
        size_kb = len(response.content) / 1000.0
        domain = re.findall(r"//(?s)(.*?)/", prepared_url)[0]
        log("Downloaded {:,.1f}KB from {}"
            " in {:,.2f} seconds".format(size_kb, domain,
                                         time.time() - start_time))

        try:
            response_json = response.json()
            if "remark" in response_json:
                log('Server remark: "{}"'.format(response_json["remark"],
                                                 level=lg.WARNING))
            save_to_cache(prepared_url, response_json)

        except Exception:
            # There seems to be a double backlash in the response. We try
            # removing it here.
            try:
                response_str = response.content.decode("UTF-8").replace(
                    "//", "")
                response_json = json.loads(response_str)
            except Exception:
                pass
            else:
                save_to_cache(prepared_url, response_json)
                return response_json
            # deal with response status_code here
            log(
                "Server at {} returned status code {} and no JSON "
                "data.".format(domain, response.status_code),
                level=lg.ERROR,
            )
        else:
            return response_json
Beispiel #29
0
    def combine(self, other, weights=None):
        """
        Args:
            other (Zone):
            weights (list-like, optional): A list-like object of len 2. If None,
                the volume of the zones for which self and other belongs is
                used.

        Returns:
            (Zone): the combined Zone object.
        """
        # Check if other is the same type as self
        if not isinstance(other, self.__class__):
            msg = "Cannot combine %s with %s" % (
                self.__class__.__name__,
                other.__class__.__name__,
            )
            raise NotImplementedError(msg)

        # Check if other is not the same as self
        if self == other:
            return self

        incoming_zone_data = self.__dict__.copy()
        incoming_zone_data.pop("Name")

        meta = self._get_predecessors_meta(other)

        if not weights:
            zone_weight = settings.zone_weight
            weights = [
                getattr(self, str(zone_weight)),
                getattr(other, str(zone_weight)),
            ]
            log(
                'using zone {} "{}" as weighting factor in "{}" '
                "combine.".format(
                    zone_weight,
                    " & ".join(list(map(str, map(int, weights)))),
                    self.__class__.__name__,
                )
            )

        attr = dict(
            Conditioning=self.Conditioning.combine(other.Conditioning, weights),
            Constructions=self.Constructions.combine(other.Constructions, weights),
            Ventilation=self.Ventilation.combine(other.Ventilation, weights),
            Windows=None
            if self.Windows is None or other.Windows is None
            else self.Windows.combine(other.Windows, weights),
            DaylightMeshResolution=self._float_mean(
                other, "DaylightMeshResolution", weights=weights
            ),
            DaylightWorkplaneHeight=self._float_mean(
                other, "DaylightWorkplaneHeight", weights
            ),
            DomesticHotWater=self.DomesticHotWater.combine(
                other.DomesticHotWater, weights
            ),
            InternalMassConstruction=self.InternalMassConstruction.combine(
                other.InternalMassConstruction, weights
            ),
            InternalMassExposedPerFloorArea=self._float_mean(
                other, "InternalMassExposedPerFloorArea", weights
            ),
            Loads=self.Loads.combine(other.Loads, weights),
        )
        new_obj = self.__class__(**meta, **attr)
        new_obj._volume = self.volume + other.volume
        new_obj._area = self.area + other.area
        attr["Conditioning"]._belongs_to_zone = new_obj
        attr["Constructions"]._belongs_to_zone = new_obj
        attr["Ventilation"]._belongs_to_zone = new_obj
        attr["DomesticHotWater"]._belongs_to_zone = new_obj
        if attr["Windows"]:
            attr["Windows"]._belongs_to_zone = new_obj
        new_obj._predecessors.extend(self.predecessors + other.predecessors)
        return new_obj
Beispiel #30
0
def download_bld_window(u_factor,
                        shgc,
                        vis_trans,
                        oauth_key,
                        tolerance=0.05,
                        extension='idf'):
    """

    Args:
        u_factor (float or tuple):
        shgc (float or tuple):
        vis_trans (float or tuple):
        tolerance (float):
        oauth_key (str):
        extension (str): specify the extension of the file to download

    Returns:
        eppy.IDF
    """
    filters = []
    # check if one or multiple values
    if isinstance(u_factor, tuple):
        u_factor_dict = '[{} TO {}]'.format(u_factor[0], u_factor[1])
    else:
        # apply tolerance
        u_factor_dict = '[{} TO {}]'.format(u_factor * (1 - tolerance),
                                            u_factor * (1 + tolerance))
    if isinstance(shgc, tuple):
        shgc_dict = '[{} TO {}]'.format(shgc[0], shgc[1])
    else:
        # apply tolerance
        shgc_dict = '[{} TO {}]'.format(shgc * (1 - tolerance),
                                        shgc * (1 + tolerance))
    if isinstance(vis_trans, tuple):
        vis_trans_dict = '[{} TO {}]'.format(vis_trans[0], vis_trans[1])
    else:
        # apply tolerance
        vis_trans_dict = '[{} TO {}]'.format(vis_trans * (1 - tolerance),
                                             vis_trans * (1 + tolerance))

    data = {
        'keyword':
        'Window',
        'format':
        'json',
        'f[]': [
            'fs_a_Overall_U-factor:{}'.format(u_factor_dict),
            'fs_a_VLT:{}'.format(vis_trans_dict),
            'fs_a_SHGC:{}'.format(shgc_dict), 'sm_component_type:"Window"'
        ],
        'oauth_consumer_key':
        oauth_key
    }
    response = nrel_bcl_api_request(data)

    if response['result']:
        log('found {} possible window component(s) matching '
            'the range {}'.format(len(response['result']), str(data['f[]'])))

    # download components
    uids = []
    for component in response['result']:
        uids.append(component['component']['uid'])
    url = 'https://bcl.nrel.gov/api/component/download?uids={}'.format(
        ','
        ''.join(uids))
    # actual download with get()
    d_response = requests.get(url)

    if d_response.ok:
        # loop through files and extract the ones that match the extension
        # parameter
        with zipfile.ZipFile(io.BytesIO(d_response.content)) as z:
            for info in z.infolist():
                if info.filename.endswith(extension):
                    z.extract(info, path=settings.cache_folder)

    # todo: read the idf somehow

    return response['result']