Exemplo n.º 1
0
def _run_reopt_unix(feature_geojson, scenario_csv, developer_key):
    """Run a feature and scenario file through REopt on a Unix-based os.

    This includes both Mac OS and Linux since a shell will be used to run
    the simulation.

    Args:
        feature_geojson: The full path to a .geojson file containing the
            footprints of buildings to be simulated.
        scenario_csv: The full path to  a .csv file for the URBANopt scenario.
        developer_key: Text string for the NREL developer key.

    Returns:
        Path to the folder in which results should be contained.
    """
    # check the input file
    directory = _check_urbanopt_file(feature_geojson, scenario_csv)
    # Write the shell script to call OpenStudio CLI
    shell = '#!/usr/bin/env bash\nsource {}\nGEM_DEVELOPER_KEY={}\n' \
        'uo process --reopt-scenario -f {} -s {}'.format(
            folders.urbanopt_env_path, developer_key, feature_geojson, scenario_csv)
    shell_file = os.path.join(directory, 'run_reopt.sh')
    write_to_file(shell_file, shell, True)
    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])
    # run the shell script
    subprocess.call(shell_file)
    result_folder = os.path.basename(scenario_csv).lower().replace('.csv', '')
    return os.path.join(directory, 'run', result_folder)
Exemplo n.º 2
0
def _run_urbanopt_windows(feature_geojson, scenario_csv):
    """Run a feature and scenario file through URBANopt on a Windows-based os.

    A batch file will be used to run the simulation.

    Args:
        feature_geojson: The full path to a .geojson file containing the
            footprints of buildings to be simulated.
        scenario_csv: The full path to  a .csv file for the URBANopt scenario.

    Returns:
        Path to the folder out of which the simulation was run.
    """
    # check the input file
    directory = _check_urbanopt_file(feature_geojson, scenario_csv)
    # Write the batch file to call URBANopt CLI
    working_drive = directory[:2]
    batch = '{}\ncd {}\ncall {}\nuo run -f {} -s {}'.format(
        working_drive, working_drive, folders.urbanopt_env_path,
        feature_geojson, scenario_csv)
    batch_file = os.path.join(directory, 'run_simulation.bat')
    write_to_file(batch_file, batch, True)
    # run the batch file
    os.system(batch_file)
    return directory
Exemplo n.º 3
0
def _run_reopt_windows(feature_geojson, scenario_csv, developer_key):
    """Run a feature and scenario file through REopt on a Windows-based os.

    A batch file will be used to run the simulation.

    Args:
        feature_geojson: The full path to a .geojson file containing the
            footprints of buildings to be simulated.
        scenario_csv: The full path to  a .csv file for the URBANopt scenario.
        developer_key: Text string for the NREL developer key.

    Returns:
        Path to the folder in which results should be contained.
    """
    # check the input file
    directory = _check_urbanopt_file(feature_geojson, scenario_csv)
    # Write the batch file to call URBANopt CLI
    working_drive = directory[:2]
    batch = '{}\ncd {}\ncall {}\nSET GEM_DEVELOPER_KEY={}\n' \
        'uo process --reopt-scenario -f {} -s {}'.format(
            working_drive, working_drive, folders.urbanopt_env_path, developer_key,
            feature_geojson, scenario_csv)
    batch_file = os.path.join(directory, 'run_reopt.bat')
    write_to_file(batch_file, batch, True)
    # run the batch file
    os.system(batch_file)
    result_folder = os.path.basename(scenario_csv).lower().replace('.csv', '')
    return os.path.join(directory, 'run', result_folder)
Exemplo n.º 4
0
def _run_default_report_windows(feature_geojson, scenario_csv):
    """Generate default reports on a Windows-based os.

    Args:
        feature_geojson: The full path to a .geojson file containing the
            footprints of buildings to be simulated.
        scenario_csv: The full path to  a .csv file for the URBANopt scenario.

    Returns:
        Paths to the scenario CSV and JSON reports.
    """
    # check the input file
    directory = _check_urbanopt_file(feature_geojson, scenario_csv)
    # Write the batch file to call URBANopt CLI
    working_drive = directory[:2]
    batch = '{}\ncd {}\ncall {}\nuo process --default -f {} -s {}'.format(
        working_drive, working_drive, folders.urbanopt_env_path,
        feature_geojson, scenario_csv)
    batch_file = os.path.join(directory, 'run_default_report.bat')
    write_to_file(batch_file, batch, True)
    # run the batch file and return output files
    os.system(batch_file)
    result_folder = os.path.basename(scenario_csv).lower().replace('.csv', '')
    run_folder = os.path.join(directory, 'run', result_folder)
    return os.path.join(run_folder, 'default_scenario_report.csv'), \
        os.path.join(run_folder, 'default_scenario_report.json')
Exemplo n.º 5
0
def _run_default_report_unix(feature_geojson, scenario_csv):
    """Generate default reports on a Unix-based os.

    Args:
        feature_geojson: The full path to a .geojson file containing the
            footprints of buildings to be simulated.
        scenario_csv: The full path to  a .csv file for the URBANopt scenario.

    Returns:
        Paths to the scenario CSV and JSON reports.
    """
    # check the input file
    directory = _check_urbanopt_file(feature_geojson, scenario_csv)
    # Write the shell script to call OpenStudio CLI
    shell = '#!/usr/bin/env bash\nsource {}\n' \
        'uo process --default -f {} -s {}'.format(
            folders.urbanopt_env_path, feature_geojson, scenario_csv)
    shell_file = os.path.join(directory, 'run_default_report.sh')
    write_to_file(shell_file, shell, True)
    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])
    # run the shell script
    subprocess.call(shell_file)
    result_folder = os.path.basename(scenario_csv).lower().replace('.csv', '')
    run_folder = os.path.join(directory, 'run', result_folder)
    return os.path.join(run_folder, 'default_scenario_report.csv'), \
        os.path.join(run_folder, 'default_scenario_report.json')
Exemplo n.º 6
0
def test_run_idf():
    """Test the prepare_idf_for_simulation and run_idf methods."""
    # Get input Model
    room = Room.from_box('TinyHouseZone', 5, 10, 3)
    room.properties.energy.program_type = office_program
    room.properties.energy.add_default_ideal_air()
    model = Model('TinyHouse', [room])

    # Get the input SimulationParameter
    sim_par = SimulationParameter()
    sim_par.output.add_zone_energy_use()
    ddy_file = './tests/ddy/chicago.ddy'
    sim_par.sizing_parameter.add_from_ddy_996_004(ddy_file)
    sim_par.run_period.end_date = Date(1, 7)

    # create the IDF string for simulation paramters and model
    idf_str = '\n\n'.join((sim_par.to_idf(), model.to.idf(model)))

    # write the final string into an IDF
    idf = os.path.join(folders.default_simulation_folder, 'test_file',
                       'in.idf')
    write_to_file(idf, idf_str, True)

    # prepare the IDF for simulation
    epw_file = './tests/simulation/chicago.epw'
    prepare_idf_for_simulation(idf, epw_file)

    # run the IDF through EnergyPlus
    sql, zsz, rdd, html, err = run_idf(idf, epw_file)

    assert os.path.isfile(sql)
    assert os.path.isfile(err)
    err_obj = Err(err)
    assert 'EnergyPlus Completed Successfully' in err_obj.file_contents
Exemplo n.º 7
0
def _run_urbanopt_unix(feature_geojson, scenario_csv):
    """Run a feature and scenario file through URBANopt on a Unix-based os.

    This includes both Mac OS and Linux since a shell will be used to run
    the simulation.

    Args:
        feature_geojson: The full path to a .geojson file containing the
            footprints of buildings to be simulated.
        scenario_csv: The full path to  a .csv file for the URBANopt scenario.

    Returns:
        Path to the folder out of which the simulation was run.
    """
    # check the input file
    directory = _check_urbanopt_file(feature_geojson, scenario_csv)
    # Write the shell script to call OpenStudio CLI
    shell = '#!/usr/bin/env bash\nsource {}\nuo -r -f {} -s {}'.format(
        folders.urbanopt_env_path, feature_geojson, scenario_csv)
    shell_file = os.path.join(directory, 'run_simulation.sh')
    write_to_file(shell_file, shell, True)
    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])
    # run the shell script
    subprocess.call(shell_file)
    return directory
Exemplo n.º 8
0
def _run_osw_unix(osw_json, measures_only=True):
    """Run a .osw file using the OpenStudio CLI on a Unix-based operating system.

    This includes both Mac OS and Linux since a shell will be used to run
    the simulation.

    Args:
        osw_json: File path to a OSW file to be run using OpenStudio CLI.
        measures_only: Boolean to note whether only the measures should be applied
            in the running of the OSW (True) or the resulting model should be run
            through EnergyPlus after the measures are applied to it (False).
            Default: True.

    Returns:
        Path to the folder out of which the OSW was run.
    """
    # check the input file
    directory = _check_osw(osw_json)

    # Write the shell script to call OpenStudio CLI
    measure_str = '-m ' if measures_only else ''
    shell = '#!/usr/bin/env bash\n"{}" -I "{}" run {}-w "{}"'.format(
        folders.openstudio_exe, folders.honeybee_openstudio_gem_path,
        measure_str, osw_json)
    shell_file = os.path.join(directory, 'run_workflow.sh')
    write_to_file(shell_file, shell, True)

    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])

    # run the shell script
    subprocess.call(shell_file)

    return directory
Exemplo n.º 9
0
def _run_uwg_unix(uwg_json_path, epw_file_path, epw_name):
    """Run a JSON file through the UWG on a Unix-based operating system.

    This includes both Mac OS and Linux since a shell will be used to run
    the simulation.

    Args:
        uwg_json_path: The full path to a UWG JSON file.
        epw_file_path: The full path to an EPW file.
        epw_name: Text for the name of the EPW file.

    Returns:
        File path to the morphed EPW. Will be None if the UWG failed to run.
    """
    directory = os.path.dirname(uwg_json_path)
    # write a shell file
    shell = '#!/usr/bin/env bash\n\ncd "{}"\n {} -m uwg simulate model "{}" "{}" ' \
        '--new-epw-dir "{}" --new-epw-name "{}"'.format(
            directory, hb_folders.python_exe_path, uwg_json_path,
            epw_file_path, directory, epw_name)
    shell_file = os.path.join(directory, 'in.sh')
    write_to_file(shell_file, shell, True)

    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])

    # run the shell script
    subprocess.call(shell_file)

    epw_file = os.path.join(directory, epw_name)
    return epw_file if os.path.isfile(epw_file) else None
Exemplo n.º 10
0
def _run_osw_windows(osw_json, measures_only=True):
    """Run a .osw file using the OpenStudio CLI on a Windows-based operating system.

    A batch file will be used to run the simulation.

    Args:
        osw_json: File path to a OSW file to be run using OpenStudio CLI.
        measures_only: Boolean to note whether only the measures should be applied
            in the running of the OSW (True) or the resulting model should be run
            through EnergyPlus after the measures are applied to it (False).
            Default: True.

    Returns:
        Path to the folder out of which the OSW was run.
    """
    # check the input file
    directory = _check_osw(osw_json)

    # Write the batch file to call OpenStudio CLI
    working_drive = directory[:2]
    measure_str = '-m ' if measures_only else ''
    batch = '{}\n"{}" -I {} run {}-w {}'.format(
        working_drive, folders.openstudio_exe, folders.honeybee_openstudio_gem_path,
        measure_str, osw_json)
    batch_file = os.path.join(directory, 'run_workflow.bat')
    write_to_file(batch_file, batch, True)

    # run the batch file
    os.system(batch_file)

    return directory
Exemplo n.º 11
0
def test_run_idf_5vertex():
    """Test the run_idf methods with 5-vertex apertures."""
    # Get input Model
    input_hb_model = './tests/json/model_colinear_verts.json'
    with open(input_hb_model) as json_file:
        data = json.load(json_file)
    model = Model.from_dict(data)

    # Get the input SimulationParameter
    sim_par = SimulationParameter()
    sim_par.output.add_zone_energy_use()
    ddy_file = './tests/ddy/chicago.ddy'
    sim_par.sizing_parameter.add_from_ddy_996_004(ddy_file)
    sim_par.run_period.end_date = Date(1, 7)

    # create the IDF string for simulation paramters and model
    idf_str = '\n\n'.join((sim_par.to_idf(), model.to.idf(model)))

    # write the final string into an IDF
    idf = os.path.join(folders.default_simulation_folder, 'test_file_5verts',
                       'in.idf')
    write_to_file(idf, idf_str, True)

    # prepare the IDF for simulation
    epw_file = './tests/simulation/chicago.epw'
    prepare_idf_for_simulation(idf, epw_file)

    # run the IDF through EnergyPlus
    sql, zsz, rdd, html, err = run_idf(idf, epw_file)

    assert os.path.isfile(sql)
    assert os.path.isfile(err)
    err_obj = Err(err)
    assert 'EnergyPlus Completed Successfully' in err_obj.file_contents
Exemplo n.º 12
0
def _run_idf_windows(idf_file_path,
                     epw_file_path=None,
                     expand_objects=True,
                     silent=False):
    """Run an IDF file through energyplus on a Windows-based operating system.

    A batch file will be used to run the simulation.

    Args:
        idf_file_path: The full path to an IDF file.
        epw_file_path: The full path to an EPW file. Note that inputting None here
            is only appropriate when the simulation is just for design days and has
            no weather file run period. (Default: None).
        expand_objects: If True, the IDF run will include the expansion of any
            HVAC Template objects in the file before beginning the simulation.
            This is a necessary step whenever there are HVAC Template objects in
            the IDF but it is unnecessary extra time when they are not
            present. (Default: True).
        silent: Boolean to note whether the simulation should be run silently
            (without the batch window). If so, the simulation will be run using
            subprocess with shell set to True. (Default: False).

    Returns:
        Path to the folder out of which the simulation was run.
    """
    # check and prepare the input files
    directory = prepare_idf_for_simulation(idf_file_path, epw_file_path)

    if not silent:  # run the simulations using a batch file
        # generate various arguments to pass to the energyplus command
        epw_str = '-w "{}"'.format(os.path.abspath(epw_file_path)) \
            if epw_file_path is not None else ''
        idd_str = '-i "{}"'.format(folders.energyplus_idd_path)
        expand_str = ' -x' if expand_objects else ''
        working_drive = directory[:2]
        # write the batch file
        batch = '{}\ncd "{}"\n"{}" {} {}{}'.format(working_drive, directory,
                                                   folders.energyplus_exe,
                                                   epw_str, idd_str,
                                                   expand_str)
        batch_file = os.path.join(directory, 'in.bat')
        write_to_file(batch_file, batch, True)
        os.system('"{}"'.format(batch_file))  # run the batch file
    else:  # run the simulation using subprocess
        cmds = [folders.energyplus_exe, '-i', folders.energyplus_idd_path]
        if epw_file_path is not None:
            cmds.append('-w')
            cmds.append(os.path.abspath(epw_file_path))
        if expand_objects:
            cmds.append('-x')
        process = subprocess.Popen(cmds,
                                   cwd=directory,
                                   stdout=subprocess.PIPE,
                                   shell=True)
        process.communicate(
        )  # prevents the script from running before command is done

    return directory
Exemplo n.º 13
0
def test_run_idf_window_ventilation():
    """Test the Model.to.idf and run_idf method with a model possessing operable windows."""
    room = Room.from_box('TinyHouseZone', 5, 10, 3)
    room.properties.energy.add_default_ideal_air()
    south_face = room[3]
    north_face = room[1]
    south_face.apertures_by_ratio(0.4, 0.01)
    north_face.apertures_by_ratio(0.4, 0.01)
    south_face.apertures[0].is_operable = True
    north_face.apertures[0].is_operable = True

    heat_setpt = ScheduleRuleset.from_constant_value(
        'House Heating', 20, schedule_types.temperature)
    cool_setpt = ScheduleRuleset.from_constant_value(
        'House Cooling', 28, schedule_types.temperature)
    setpoint = Setpoint('House Setpoint', heat_setpt, cool_setpt)
    room.properties.energy.setpoint = setpoint

    vent_control = VentilationControl(22, 27, 12, 30)
    room.properties.energy.window_vent_control = vent_control
    ventilation = VentilationOpening(wind_cross_vent=True)
    op_aps = room.properties.energy.assign_ventilation_opening(ventilation)
    assert len(op_aps) == 2

    model = Model('TinyHouse', [room])

    # Get the input SimulationParameter
    sim_par = SimulationParameter()
    sim_par.output.add_zone_energy_use()
    ddy_file = './tests/ddy/chicago.ddy'
    sim_par.sizing_parameter.add_from_ddy_996_004(ddy_file)
    sim_par.run_period.end_date = Date(6, 7)
    sim_par.run_period.start_date = Date(6, 1)

    # create the IDF string for simulation paramters and model
    idf_str = '\n\n'.join((sim_par.to_idf(), model.to.idf(model)))

    # write the final string into an IDF
    idf = os.path.join(folders.default_simulation_folder,
                       'test_file_window_vent', 'in.idf')
    write_to_file(idf, idf_str, True)

    # prepare the IDF for simulation
    epw_file = './tests/simulation/chicago.epw'
    prepare_idf_for_simulation(idf, epw_file)

    # run the IDF through EnergyPlus
    sql, zsz, rdd, html, err = run_idf(idf, epw_file)

    assert os.path.isfile(sql)
    assert os.path.isfile(err)
    err_obj = Err(err)
    assert 'EnergyPlus Completed Successfully' in err_obj.file_contents
Exemplo n.º 14
0
def test_run_idf_window_shade():
    """Test the Model.to.idf and run_idf method with a model that has a window shade."""
    # Get input Model
    room = Room.from_box('TinyHouseZone', 5, 10, 3)
    room.properties.energy.program_type = office_program
    room.properties.energy.add_default_ideal_air()

    double_clear = WindowConstruction('Double Pane Clear',
                                      [clear_glass, air_gap, clear_glass])
    shade_mat = EnergyWindowMaterialShade('Low-e Diffusing Shade', 0.005, 0.15,
                                          0.5, 0.25, 0.5, 0, 0.4, 0.2, 0.1,
                                          0.75, 0.25)
    sched = ScheduleRuleset.from_daily_values('NighSched', [
        1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1
    ])
    double_ec = WindowConstructionShade('Double Low-E Inside EC', double_clear,
                                        shade_mat, 'Interior',
                                        'OnIfHighSolarOnWindow', 200, sched)

    south_face = room[3]
    south_face.apertures_by_ratio(0.4, 0.01)
    south_face.apertures[0].properties.energy.construction = double_ec
    north_face = room[1]
    north_face.apertures_by_ratio(0.4, 0.01)

    model = Model('TinyHouse', [room])

    # Get the input SimulationParameter
    sim_par = SimulationParameter()
    sim_par.output.add_zone_energy_use()
    ddy_file = './tests/ddy/chicago.ddy'
    sim_par.sizing_parameter.add_from_ddy_996_004(ddy_file)
    sim_par.run_period.end_date = Date(1, 7)

    # create the IDF string for simulation paramters and model
    idf_str = '\n\n'.join((sim_par.to_idf(), model.to.idf(model)))

    # write the final string into an IDF
    idf = os.path.join(folders.default_simulation_folder, 'test_file_shd',
                       'in.idf')
    write_to_file(idf, idf_str, True)

    # prepare the IDF for simulation
    epw_file = './tests/simulation/chicago.epw'
    prepare_idf_for_simulation(idf, epw_file)

    # run the IDF through EnergyPlus
    sql, zsz, rdd, html, err = run_idf(idf, epw_file)

    assert os.path.isfile(sql)
    assert os.path.isfile(err)
    err_obj = Err(err)
    assert 'EnergyPlus Completed Successfully' in err_obj.file_contents
Exemplo n.º 15
0
    def to_idf(self, schedule_directory, include_datetimes=False):
        """IDF string representation of the schedule.

        Note that this method does both the production of the IDF string
        representation of the Schedule:File as well as the actual writing of
        the schedule to a CSV format that can be read in by EnergyPlus.

        Args:
            schedule_directory: [Required] Text string of a path to a folder on this
                machine to which the CSV version of the file will be written.
            include_datetimes: Boolean to note whether a column of datetime objects
                should be written into the CSV alongside the data. Default is False,
                which will keep the resulting CSV lighter in file size but you may
                want to include such datetimes in order to verify that values align with
                the expected timestep. Note that the included datetimes will follow the
                EnergyPlus interpretation of aligning values to timesteps in which case
                the timestep to which the value is matched means that the value was
                utilized over all of the previous timestep.

        Returns:
            schedule_file --
            Text string representation of the Schedule:File describing this schedule.
        """
        # gather all of the data to be written into the CSV
        sched_data = [
            str(val) for val in self.values_at_timestep(self.timestep)
        ]
        if include_datetimes:
            sched_a_per = AnalysisPeriod(timestep=self.timestep,
                                         is_leap_year=self.is_leap_year)
            sched_data = ('{},{}'.format(
                dt, val) for dt, val in zip(sched_a_per.datetimes, sched_data))
        file_path = os.path.join(
            schedule_directory,
            '{}.csv'.format(self.identifier.replace(' ', '_')))

        # write the data into the file
        write_to_file(file_path, ',\n'.join(sched_data), True)

        # generate the IDF strings
        shc_typ = self._schedule_type_limit.identifier if \
            self._schedule_type_limit is not None else ''
        col_num = 1 if not include_datetimes else 2
        num_hrs = 8760 if not self.is_leap_year else 8784
        interp = 'No' if not self.interpolate else 'Yes'
        min_per_step = int(60 / self.timestep)
        fields = (self.identifier, shc_typ, file_path, col_num, 0, num_hrs,
                  'Comma', interp, min_per_step)
        schedule_file = generate_idf_string('Schedule:File', fields,
                                            self._schedule_file_comments)
        return schedule_file
Exemplo n.º 16
0
def test_run_idf_daylight_control():
    """Test Model.to.idf and run_idf with a model possessing daylight controls."""
    room = Room.from_box('TinyHouseZone', 5, 10, 3)
    room.properties.energy.program_type = office_program
    room.properties.energy.add_default_ideal_air()
    south_face = room[3]
    south_face.apertures_by_ratio(0.4, 0.01)

    room.properties.energy.add_daylight_control_to_center(0.8, 500, 0.5)
    sens_pt = room.properties.energy.daylighting_control.sensor_position
    assert sens_pt.x == pytest.approx(2.5, rel=1e-3)
    assert sens_pt.y == pytest.approx(5, rel=1e-3)
    assert sens_pt.z == pytest.approx(0.8, rel=1e-3)
    room.move(Vector3D(5, 5, 0))
    assert room.properties.energy.daylighting_control.sensor_position == \
        sens_pt.move(Vector3D(5, 5, 0))

    model = Model('TinyHouse', [room])

    # Get the input SimulationParameter
    sim_par = SimulationParameter()
    sim_par.output.add_zone_energy_use()
    ddy_file = './tests/ddy/chicago.ddy'
    sim_par.sizing_parameter.add_from_ddy_996_004(ddy_file)
    sim_par.run_period.end_date = Date(6, 7)
    sim_par.run_period.start_date = Date(6, 1)

    # create the IDF string for simulation paramters and model
    idf_str = '\n\n'.join((sim_par.to_idf(), model.to.idf(model)))

    # write the final string into an IDF
    idf = os.path.join(folders.default_simulation_folder, 'test_file_daylight',
                       'in.idf')
    write_to_file(idf, idf_str, True)

    # prepare the IDF for simulation
    epw_file = './tests/simulation/chicago.epw'
    prepare_idf_for_simulation(idf, epw_file)

    # run the IDF through EnergyPlus
    sql, zsz, rdd, html, err = run_idf(idf, epw_file)

    assert os.path.isfile(sql)
    assert os.path.isfile(err)
    err_obj = Err(err)
    assert 'EnergyPlus Completed Successfully' in err_obj.file_contents
Exemplo n.º 17
0
def _run_idf_unix(idf_file_path, epw_file_path=None, expand_objects=True):
    """Run an IDF file through energyplus on a Unix-based operating system.

    This includes both Mac OS and Linux since a shell will be used to run
    the simulation.

    Args:
        idf_file_path: The full path to an IDF file.
        epw_file_path: The full path to an EPW file. Note that inputting None here
            is only appropriate when the simulation is just for design days and has
            no weather file run period. (Default: None).
        expand_objects: If True, the IDF run will include the expansion of any
            HVAC Template objects in the file before beginning the simulation.
            This is a necessary step whenever there are HVAC Template objects in
            the IDF but it is unnecessary extra time when they are not present.
            Default: True.

    Returns:
        Path to the folder out of which the simulation was run.
    """
    # check and prepare the input files
    directory = prepare_idf_for_simulation(idf_file_path, epw_file_path)

    # generate various arguments to pass to the energyplus command
    epw_str = '-w "{}"'.format(os.path.abspath(epw_file_path))\
        if epw_file_path is not None else ''
    idd_str = '-i "{}"'.format(folders.energyplus_idd_path)
    expand_str = ' -x' if expand_objects else ''

    # write a shell file
    shell = '#!/usr/bin/env bash\n\ncd "{}"\n"{}" {} {}{}'.format(
        directory, folders.energyplus_exe, epw_str, idd_str, expand_str)
    shell_file = os.path.join(directory, 'in.sh')
    write_to_file(shell_file, shell, True)

    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])

    # run the shell script
    subprocess.call(shell_file)

    return directory
Exemplo n.º 18
0
def _run_osw_windows(osw_json, measures_only=True, silent=False):
    """Run a .osw file using the OpenStudio CLI on a Windows-based operating system.

    A batch file will be used to run the simulation.

    Args:
        osw_json: File path to a OSW file to be run using OpenStudio CLI.
        measures_only: Boolean to note whether only the measures should be applied
            in the running of the OSW (True) or the resulting model should be run
            through EnergyPlus after the measures are applied to it (False).
            Default: True.
        silent: Boolean to note whether the OSW should be run silently (without
            the batch window). If so, the simulation will be run using subprocess
            with shell set to True. (Default: False).

    Returns:
        Path to the folder out of which the OSW was run.
    """
    # check the input file
    directory = _check_osw(osw_json)

    if not silent:  # write the batch file to call OpenStudio CLI
        working_drive = directory[:2]
        measure_str = '-m ' if measures_only else ''
        batch = '{}\n"{}" -I "{}" run {}-w "{}"'.format(
            working_drive, folders.openstudio_exe,
            folders.honeybee_openstudio_gem_path, measure_str, osw_json)
        batch_file = os.path.join(directory, 'run_workflow.bat')
        write_to_file(batch_file, batch, True)
        os.system('"{}"'.format(batch_file))  # run the batch file
    else:  # run it all using subprocess
        cmds = [
            folders.openstudio_exe, '-I', folders.honeybee_openstudio_gem_path,
            'run', '-w', osw_json
        ]
        if measures_only:
            cmds.append('-m')
        process = subprocess.Popen(cmds, stdout=subprocess.PIPE, shell=True)
        process.communicate(
        )  # prevents the script from running before command is done

    return directory
Exemplo n.º 19
0
def _run_idf_unix(idf_file_path, epw_file_path, expand_objects=True):
    """Run an IDF file through energyplus on a Unix-based operating system.

    This includes both Mac OS and Linux since a shell will be used to run
    the simulation.

    Args:
        idf_file_path: The full path to an IDF file.
        epw_file_path: The full path to an EPW file.
        expand_objects: If True, the IDF run will include the expansion of any
            HVAC Template objects in the file before beginning the simulation.
            This is a necessary step whenever there are HVAC Template objects in
            the IDF but it is unnecessary extra time when they are not present.
            Default: True.

    Returns:
        Path to the folder out of which the simulation was run.
    """
    # check and prepare the input files
    directory = prepare_idf_for_simulation(idf_file_path, epw_file_path)

    # generate the object expansion string if requested
    if expand_objects:
        exp_path = os.path.join(folders.energyplus_path, 'ExpandObjects')
        exp_str = '{}\ntest -f expanded.idf && mv expanded.idf in.idf\n'.format(exp_path)
    else:
        exp_str = ''

    # write a shell file
    run_path = os.path.join(folders.energyplus_path, 'energyplus')
    shell = '#!/usr/bin/env bash\n\ncd {}\n{}{}'.format(directory, exp_str, run_path)
    shell_file = os.path.join(directory, 'in.sh')
    write_to_file(shell_file, shell, True)

    # make the shell script executable using subprocess.check_call
    # this is more reliable than native Python chmod on Mac
    subprocess.check_call(['chmod', 'u+x', shell_file])

    # run the shell script
    subprocess.call(shell_file)

    return directory
Exemplo n.º 20
0
def test_run_idf_hot_water():
    """Test the Model.to.idf and run_idf method with a model possessing hot water."""
    # Get input Model
    room = Room.from_box('TinyHouseZone', 5, 10, 3)
    room.properties.energy.program_type = office_program
    simple_office = ScheduleDay(
        'Simple Weekday', [0, 1, 0],
        [Time(0, 0), Time(9, 0), Time(17, 0)])
    schedule = ScheduleRuleset('Office Water Use', simple_office, None,
                               schedule_types.fractional)
    shw = ServiceHotWater('Office Hot Water', 0.1, schedule)
    room.properties.energy.service_hot_water = shw
    room.properties.energy.add_default_ideal_air()
    model = Model('TinyHouse', [room])

    # Get the input SimulationParameter
    sim_par = SimulationParameter()
    sim_par.output.add_zone_energy_use()
    ddy_file = './tests/ddy/chicago.ddy'
    sim_par.sizing_parameter.add_from_ddy_996_004(ddy_file)
    sim_par.run_period.end_date = Date(1, 7)

    # create the IDF string for simulation paramters and model
    idf_str = '\n\n'.join((sim_par.to_idf(), model.to.idf(model)))

    # write the final string into an IDF
    idf = os.path.join(folders.default_simulation_folder, 'test_file_shw',
                       'in.idf')
    write_to_file(idf, idf_str, True)

    # prepare the IDF for simulation
    epw_file = './tests/simulation/chicago.epw'
    prepare_idf_for_simulation(idf, epw_file)

    # run the IDF through EnergyPlus
    sql, zsz, rdd, html, err = run_idf(idf, epw_file)

    assert os.path.isfile(sql)
    assert os.path.isfile(err)
    err_obj = Err(err)
    assert 'EnergyPlus Completed Successfully' in err_obj.file_contents
Exemplo n.º 21
0
def _run_uwg_windows(uwg_json_path, epw_file_path, epw_name, silent=False):
    """Run a JSON file through the UWG on a Windows-based operating system.

    A batch file will be used to run the simulation unless silent is True.

    Args:
        uwg_json_path: The full path to a UWG JSON file.
        epw_file_path: The full path to an EPW file.
        epw_name: Text for the name of the EPW file.
        silent: Boolean to note whether the simulation should be run silently
            (without the batch window). If so, the simulation will be run using
            subprocess with shell set to True. (Default: False).

    Returns:
        File path to the morphed EPW. Will be None if the UWG failed to run.
    """
    directory = os.path.dirname(uwg_json_path)
    if not silent:  # run the simulations using a batch file
        working_drive = directory[:2]
        # write the batch file
        batch = '{}\ncd "{}"\n"{}" -m uwg simulate model "{}" "{}" --new-epw-dir "{}" ' \
            '--new-epw-name "{}"'.format(
                working_drive, directory, hb_folders.python_exe_path, uwg_json_path,
                epw_file_path, directory, epw_name)
        batch_file = os.path.join(directory, 'in.bat')
        write_to_file(batch_file, batch, True)
        os.system('"{}"'.format(batch_file))  # run the batch file
    else:  # run the simulation using subprocess
        cmds = [
            hb_folders.python_exe_path, '-m', 'uwg', 'simulate', 'model',
            uwg_json_path, epw_file_path, '--new-epw-dir', directory,
            '--new-epw-name', epw_name
        ]
        process = subprocess.Popen(cmds, stdout=subprocess.PIPE, shell=True)
        process.communicate(
        )  # prevents the script from running before command is done

    epw_file = os.path.join(directory, epw_name)
    return epw_file if os.path.isfile(epw_file) else None
Exemplo n.º 22
0
def _run_idf_windows(idf_file_path, epw_file_path, expand_objects=True):
    """Run an IDF file through energyplus on a Windows-based operating system.

    A batch file will be used to run the simulation.

    Args:
        idf_file_path: The full path to an IDF file.
        epw_file_path: The full path to an EPW file.
        expand_objects: If True, the IDF run will include the expansion of any
            HVAC Template objects in the file before beginning the simulation.
            This is a necessary step whenever there are HVAC Template objects in
            the IDF but it is unnecessary extra time when they are not present.
            Default: True.

    Returns:
        Path to the folder out of which the simulation was run.
    """
    # check and prepare the input files
    directory = prepare_idf_for_simulation(idf_file_path, epw_file_path)

    # generate the object expansion string if requested
    if expand_objects:
        exp_path = os.path.join(folders.energyplus_path, 'ExpandObjects')
        exp_str = '{}\nif exist expanded.idf MOVE expanded.idf in.idf\n'.format(exp_path)
    else:
        exp_str = ''

    # write a batch file
    run_path = os.path.join(folders.energyplus_path, 'EnergyPlus')
    working_drive = directory[:2]
    batch = '{}\ncd {}\n{}{}'.format(working_drive, directory, exp_str, run_path)
    batch_file = os.path.join(directory, 'in.bat')
    write_to_file(batch_file, batch, True)

    # run the batch file
    os.system(batch_file)

    return directory
Exemplo n.º 23
0
    def to_idf_collective_csv(schedules,
                              schedule_directory,
                              file_name,
                              include_datetimes=False):
        """Write several ScheduleFixedIntervals into the same CSV file and get IDF text.

        This method is useful when several ScheduleFixedInterval objects are serving a
        similar purpose and the data would be more easily managed if they all were in
        the same file.

        Args:
            schedules: A list of ScheduleFixedInterval objects to be written into
                the same CSV.
            schedule_directory: [Required] Text string of a full path to a folder on
                this machine to which the CSV version of the file will be written.
            file_name: Text string for the name to be used for the CSV file that
                houses all of the schedule data.
            include_datetimes: Boolean to note whether a column of datetime objects
                should be written into the CSV alongside the data. Default is False,
                which will keep the resulting CSV lighter in file size but you may
                want to include such datetimes in order to verify that values align with
                the expected timestep.

        Returns:
            schedule_files --
            A list of IDF text string representations of the Schedule:File describing
            this schedule.
        """
        # ensure that all is_leap_year values are the same
        init_lp_yr = schedules[0].is_leap_year
        for sch in schedules:
            assert sch.is_leap_year is init_lp_yr, 'All is_leap_year properties must ' \
                'match for several ScheduleFixedIntervals to be in the same CSV.'
        # find the greatest timestep of all the schedules
        max_timestep = max([sched.timestep for sched in schedules])
        # gather all of the data to be written into the CSV
        sch_ids = [sched.identifier for sched in schedules]
        sched_vals = [
            sched.values_at_timestep(max_timestep) for sched in schedules
        ]
        sched_data = [
            ','.join([str(x) for x in row]) for row in zip(*sched_vals)
        ]
        if include_datetimes:
            sched_a_per = AnalysisPeriod(timestep=max_timestep,
                                         is_leap_year=init_lp_yr)
            sched_data = ('{},{}'.format(
                dt, val) for dt, val in zip(sched_a_per.datetimes, sched_data))
            sch_ids = [''] + sch_ids
        sched_data = [','.join(sch_ids)] + sched_data
        file_path = os.path.join(schedule_directory,
                                 '{}.csv'.format(file_name.replace(' ', '_')))

        # write the data into the file
        write_to_file(file_path, ',\n'.join(sched_data), True)

        # generate the IDF strings
        schedule_files = []
        for i, sched in enumerate(schedules):
            shc_typ = sched._schedule_type_limit.identifier if \
                sched._schedule_type_limit is not None else ''
            col_num = 1 + i if not include_datetimes else 2 + i
            num_hrs = 8760 if not sched.is_leap_year else 8784
            interp = 'No' if not sched.interpolate else 'Yes'
            min_per_step = int(60 / max_timestep)
            fields = (sched.identifier, shc_typ, file_path, col_num, 1,
                      num_hrs, 'Comma', interp, min_per_step)
            schedule_files.append(
                generate_idf_string(
                    'Schedule:File', fields,
                    ScheduleFixedInterval._schedule_file_comments))
        return schedule_files