Пример #1
0
def test_date_from_doy():
    """Test the from_doy method for Date and basic properties."""
    dt1 = Date.from_doy(172)
    assert dt1 == Date(6, 21)
    dt2 = Date.from_doy(172, leap_year=True)
    assert dt2 == Date(6, 20, leap_year=True)
    dt3 = Date.from_doy(181)
    assert dt3 == Date(6, 30)
    dt4 = Date.from_doy(182)
    assert dt4 == Date(7, 1)
Пример #2
0
def draw_analemma_and_arcs(sp, datetimes, radius, center_pt3d):
    """Draw analemma and day arc Rhino geometry.

    Args:
        sp: Sunpath object for which geometry will be drawn.
        datetimes: A list of datetimes, which will be used to get days
            if daily_ is True.
        radius: Number for the radius of the sun path.
        center_pt3d: Point3D for the center of the sun path.

    Returns:
        analemma: List of Rhino curves for the analemmas
        daily: List of Rhino curves for the daily arcs.
    """
    sp.daylight_saving_period = None  # set here so analemmas aren't messed up

    center_pt, z = Point2D(center_pt3d.x, center_pt3d.y), center_pt3d.z
    if not daily_:
        if projection_ is None:
            analemma = [
                from_polyline3d(pline)
                for pline in sp.hourly_analemma_polyline3d(
                    center_pt3d, radius, True, solar_time_)
            ]
            daily = [
                from_arc3d(arc)
                for arc in sp.monthly_day_arc3d(center_pt3d, radius)
            ]
        else:
            analemma = [
                from_polyline2d(pline, z)
                for pline in sp.hourly_analemma_polyline2d(
                    projection_, center_pt, radius, True, solar_time_)
            ]
            daily = [
                from_polyline2d(arc, z) for arc in sp.monthly_day_polyline2d(
                    projection_, center_pt3d, radius)
            ]
    else:
        analemma = []  # No Analemmas for a daily sun path
        doys = set(dt.doy for dt in datetimes)
        dates = [Date.from_doy(doy) for doy in doys]
        if projection_ is None:
            daily = [
                from_arc3d(
                    sp.day_arc3d(dat.month, dat.day, center_pt3d, radius))
                for dat in dates
            ]
        else:
            daily = []
            for dat in dates:
                pline = sp.day_polyline2d(dat.month, dat.day, projection_,
                                          center_pt, radius)
                daily.append(from_polyline2d(pline, z))
    return analemma, daily
Пример #3
0
    def time_interval_text(self, simulation_step):
        """Get text for a specific time simulation_step of the data collections.

        Args:
            simulation_step: An integer for the step of simulation for which
                text should be generated.
        """
        hourly_colls = (HourlyContinuousCollection,
                        HourlyDiscontinuousCollection)
        if isinstance(self._base_collection, hourly_colls):
            return str(self._base_collection.datetimes[simulation_step])
        elif isinstance(self._base_collection, MonthlyCollection):
            month_names = AnalysisPeriod.MONTHNAMES
            return month_names[
                self._base_collection.datetimes[simulation_step]]
        elif isinstance(self._base_collection, DailyCollection):
            return str(
                Date.from_doy(
                    self._base_collection.datetimes[simulation_step]))
        elif isinstance(self._base_collection, MonthlyPerHourCollection):
            dt_tuple = self._base_collection.datetimes[simulation_step]
            date_time = DateTime(month=dt_tuple[0], hour=dt_tuple[1])
            return date_time.strftime('%b %H:%M')
Пример #4
0
def output_csv_queryable(result_sql, model_json, run_period_name, output_names,
                         si, normalize, folder, log_file):
    """Get CSV of outputs resembling a SQLite table that is easily queryable.

    \b
    Args:
        result_sql: Full path to an SQLite file that was generated by EnergyPlus.
        model_json: Full path to a Model JSON that will be matched with the results.
        run_period_name: The name of the run period from which the CSV data will
            be selected (eg. "BOSTON LOGAN INTL ARPT ANN CLG .4% CONDNS DB=>MWB").
        output_names: The name of an EnergyPlus output to be retrieved from
            the SQLite result file. This can also be several output names
            for which all data collections should be retrieved.
    """
    try:
        # figure out the index of the run period
        sql_obj = SQLiteResult(result_sql)
        per_names, per_indices = sql_obj.run_period_names, sql_obj.run_period_indices
        per_i = per_indices[per_names.index(run_period_name)]

        # get the data collections for each output
        data_colls = []
        for output_name in output_names:
            output_name = str(output_name)
            if output_name.startswith('['):
                output_names = tuple(outp.replace('"', '').strip()
                                     for outp in output_name.strip('[]').split(','))
                for outp in output_names:
                    col = sql_obj.data_collections_by_output_name_run_period(outp, per_i)
                    data_colls.append(col)
            else:
                col = sql_obj.data_collections_by_output_name_run_period(
                    output_name, per_i)
                data_colls.append(col)

        # convert the data to IP if it was requested
        if not si:
            for colls in data_colls:
                for data in colls:
                    data.convert_to_ip()

        # re-serialize the Model to Python and ensure it's in correct SI/IP units
        with open(model_json) as json_file:
            data = json.load(json_file)
        model = Model.from_dict(data)
        if si:
            model.convert_to_units('Meters')
        else:
            model.convert_to_units('Feet')

        # match the objects in the Model to the data collections
        room_csv_data = []
        face_csv_data = []
        faces = None
        for colls in data_colls:
            if len(colls) == 0:
                continue
            if 'Surface' in colls[0].header.metadata:
                if faces is None:
                    faces = []
                    for room in model.rooms:
                        faces.extend(room.faces)
                match_data = match_faces_to_data(colls, faces)
                if len(match_data) != 0:
                    face_csv_data.append(match_data)
            elif 'Zone' in colls[0].header.metadata \
                    or 'System' in colls[0].header.metadata:
                match_data = match_rooms_to_data(colls, model.rooms)
                if len(match_data) != 0:
                    room_csv_data.append(match_data)
        assert len(room_csv_data) != 0 or len(face_csv_data) != 0, \
            'None of the requested outputs could be matched to the model_json.'

        # normalize the data if this was requested
        if normalize:
            for matched_data in face_csv_data:  # normalize face data
                if matched_data[0][1].header.data_type.normalized_type is not None:
                    for matched_tup in matched_data:
                        area = matched_tup[0].area if not isinstance(matched_tup, Face) \
                            else matched_tup[0].punched_geometry.area
                        matched_tup[1].values = \
                            [val / area for val in matched_tup[1].values]
        for matched_data in room_csv_data:  # normalize room data
            if normalize and matched_data[0][1].header.data_type.normalized_type \
                    is not None:
                for matched_tup in matched_data:
                    area = matched_tup[0].floor_area
                    try:
                        matched_tup[1].values = [val / (area * matched_tup[2])
                                                 for val in matched_tup[1].values]
                    except ZeroDivisionError:  # no floor area for room
                        matched_tup[1].values = [0] * len(matched_tup[1])
            else:  # we should still account for room multipliers
                matched_tup[1].values = \
                    [val / matched_tup[2] for val in matched_tup[1].values]

        # create the datetime columns
        base_coll = room_csv_data[0][0][1] if len(room_csv_data) != 0 else \
            face_csv_data[0][0][1]
        year = '2016' if base_coll.header.analysis_period.is_leap_year else '2017'
        date_times = []
        if isinstance(base_coll, HourlyContinuousCollection):
            for dat_t in base_coll.datetimes:
                date_times.append(
                    [year, str(dat_t.month), str(dat_t.day), str(dat_t.hour),
                     str(dat_t.minute)])
        elif isinstance(base_coll, DailyCollection):
            for dat_t in base_coll.datetimes:
                date_obj = Date.from_doy(dat_t)
                date_times.append(
                    [year, str(date_obj.month), str(date_obj.day), '0', '0'])
        elif isinstance(base_coll, MonthlyCollection):
            for dat_t in base_coll.datetimes:
                date_times.append([year, str(dat_t), '1', '0', '0'])

        # determine the output folder location
        if folder is None:
            folder = os.path.dirname(result_sql)

        # write everything into the output CSVs
        def write_rows(csv_file, datas, identifier):
            data_rows = [row[:] for row in date_times]  # copy datetimes
            for row in data_rows:
                row.append(identifier)
            for data in datas:
                for i, val in enumerate(data.values):
                    data_rows[i].append(str(val))
            for row in data_rows:
                csv_file.write(','.join(row) + '\n')

        col_names_dict = {}
        if len(room_csv_data) != 0:
            room_file = os.path.join(folder, 'eplusout_room.csv')
            col_names_dict['eplusout_room'] = \
                ['year', 'month', 'day', 'hour', 'minute', 'identifier'] + \
                [data[0][1].header.metadata['type'].replace(' ', '_').lower()
                 for data in room_csv_data]
            with open(room_file, 'w') as rm_file:
                rm_file.write(','.join(col_names_dict['eplusout_room']) + '\n')
                for outp_tups in zip(*room_csv_data):
                    datas = [tup[1] for tup in outp_tups]
                    identifier = outp_tups[0][0].identifier
                    write_rows(rm_file, datas, identifier)
        if len(face_csv_data) != 0:
            room_file = os.path.join(folder, 'eplusout_face.csv')
            col_names_dict['eplusout_face'] = \
                ['year', 'month', 'day', 'hour', 'minute', 'identifier'] + \
                [data[0][1].header.metadata['type'].replace(' ', '_').lower()
                 for data in face_csv_data]
            with open(room_file, 'w') as f_file:
                f_file.write(','.join(col_names_dict['eplusout_face']) + '\n')
                for outp_tups in zip(*face_csv_data):
                    datas = [tup[1] for tup in outp_tups]
                    identifier = outp_tups[0][0].identifier
                    write_rows(f_file, datas, identifier)

        # write the column names into the output file
        log_file.write(json.dumps(col_names_dict))
    except Exception as e:
        _logger.exception('Failed to write queryable csv from sql file.\n{}'.format(e))
        sys.exit(1)
    else:
        sys.exit(0)