예제 #1
0
def test_run_olderv(fresh_start):
    """Will run eplus on a file that needs to be upgraded"""

    file = './input_data/problematic/nat_ventilation_SAMPLE0.idf'
    wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'
    ar.run_eplus(file, wf, ep_version='8.9', annual=True,
                 expandobjects=True)
예제 #2
0
def test_run_olderv_problematic(clean_config):
    """Will run eplus on a file that needs to be upgraded and that should
    fail. Will be ignored in the test suite"""

    file = "tests/input_data/problematic/RefBldgLargeOfficeNew2004_v1.4_7.2_5A_USA_IL_CHICAGO-OHARE.idf"
    wf = "tests/input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw"
    ar.run_eplus(file,
                 wf,
                 prep_outputs=True,
                 annual=True,
                 expandobjects=True,
                 verbose="q")
예제 #3
0
def test_old_than_change_args(clean_config):
    """Should upgrade file only once even if run_eplus args are changed afterwards"""
    from archetypal import run_eplus

    file = (get_eplus_dirs(settings.ep_version) / "ExampleFiles" /
            "RefBldgQuickServiceRestaurantNew2004_Chicago.idf")
    epw = "tests/input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw"

    idf = run_eplus(file, epw, prep_outputs=True, output_report="sql_file")

    idf = run_eplus(file, epw, prep_outputs=True, output_report="sql_file")

    idf = run_eplus(file, epw, prep_outputs=True, output_report="sql")
예제 #4
0
def test_run_olderv(clean_config, ep_version):
    """Will run eplus on a file that needs to be upgraded with one that does
    not"""
    ar.settings.use_cache = False
    files = [
        "tests/input_data/problematic/nat_ventilation_SAMPLE0.idf",
        get_eplus_dirs(settings.ep_version) / "ExampleFiles" /
        "5ZoneNightVent1.idf",
    ]
    wf = "tests/input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw"
    files = ar.copy_file(files)
    rundict = {
        file: dict(
            eplus_file=file,
            weather_file=wf,
            ep_version=ep_version,
            annual=True,
            prep_outputs=True,
            expandobjects=True,
            verbose="q",
            output_report="sql",
        )
        for file in files
    }
    result = {file: ar.run_eplus(**rundict[file]) for file in files}
예제 #5
0
def test_example_idf(processors, expandobjects, annual, fresh_start,
                     idf_source):
    """Will run all combinations of parameters defined above"""

    wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'
    idf = ar.copy_file(idf_source)
    return ar.run_eplus(idf, wf, processors=processors,
                        annual=annual, expandobjects=expandobjects)
예제 #6
0
def test_necb(config, fresh_start):
    import glob
    files = glob.glob("/Users/samuelduchesne/Dropbox/Polytechnique/Doc"
                      "/software/archetypal-dev/data/necb"
                      "/NECB_2011_Montreal_idf/*idf")
    files = copy_file(files)
    wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'
    return ar.run_eplus(files, wf, expandobjects=True, annual=True)
예제 #7
0
def test_small_home_data(clean_config):
    file = (get_eplus_dirs(settings.ep_version) / "ExampleFiles" /
            "BasicsFiles" / "AdultEducationCenter.idf")
    file = ar.copy_file(file)
    wf = "tests/input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw"
    return ar.run_eplus(file,
                        wf,
                        prep_outputs=True,
                        design_day=True,
                        expandobjects=True,
                        verbose="q")
예제 #8
0
def test_run(scratch_then_cache):
    f1 = './input_data/umi_samples/nat_ventilation_SAMPLE0.idf'
    f2 = './input_data/umi_samples' \
         '/no_shed_ventilation_and_no_mech_ventilation_SAMPLE0.idf'
    f3 = './input_data/umi_samples/no_shed_ventilation_SAMPLE0.idf'
    f4 = './input_data/umi_samples/shed_ventilation_SAMPLE0.idf'

    files = copy_file([f1, f2, f3, f4])
    wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'
    sql = ar.run_eplus(files, wf,
                       expandobjects=True, annual=True, processors=-1)
    np = ar.nominal_people(sql)
예제 #9
0
    def test_add_object_and_run_ep(self, config, converttesteasy):
        # Gets from fixture paths to files and IDF object to be used in test
        (
            idf,
            idf_file,
            weather_file,
            window_lib,
            trnsidf_exe,
            template,
            output_folder,
            kwargs,
        ) = converttesteasy

        ep_version = None
        # Adds Output variable in IDF
        outputs = [
            {
                "ep_object": "Output:Variable".upper(),
                "kwargs": dict(
                    Variable_Name="Zone Thermostat Heating Setpoint Temperature",
                    Reporting_Frequency="hourly",
                    save=True,
                ),
            },
            {
                "ep_object": "Output:Variable".upper(),
                "kwargs": dict(
                    Variable_Name="Zone Thermostat Cooling Setpoint Temperature",
                    Reporting_Frequency="hourly",
                    save=True,
                ),
            },
        ]

        # Runs EnergyPlus Simulation
        _, idf = run_eplus(
            idf_file,
            weather_file,
            output_directory=None,
            ep_version=ep_version,
            output_report=None,
            prep_outputs=outputs,
            design_day=False,
            annual=True,
            expandobjects=True,
            return_idf=True,
        )

        # Makes sure idf vriable is an IDF
        assert isinstance(idf, ar.idfclass.IDF)
예제 #10
0
def run_schedules_idf(config):
    files = run_eplus(
        idf_file,
        weather_file="tests/input_data/CAN_PQ_Montreal.Intl.AP"
        ".716270_CWEC.epw",
        annual=True,
        readvars=True,
        include=[
            get_eplus_dirs(settings.ep_version) / "DataSets" / "TDV" /
            "TDV_2008_kBtu_CTZ06.csv"
        ],
        return_files=True,
    )
    cache_dir = files[1][0].dirname()
    csv = next(iter(cache_dir.glob("*out.csv")))
    yield csv
예제 #11
0
def energy_series(config, request):
    from archetypal import ReportData

    outputs = {
        "ep_object": "Output:Variable".upper(),
        "kwargs": {
            "Key_Value": "OCCUPY-1",
            "Variable_Name": "Schedule Value",
            "Reporting_Frequency": "Hourly",
        },
    }
    wf = "tests/input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw"
    sql = ar.run_eplus(
        request.param,
        weather_file=wf,
        output_report="sql_file",
        prep_outputs=[outputs],
        annual=True,
        expandobjects=True,
    )
    report = ReportData.from_sqlite(
        sql,
        table_name=("Heating:Electricity", "Heating:Gas",
                    "Heating:DistrictHeating"),
    )

    hl = EnergySeries.from_sqlite(
        report,
        name="Heating",
        normalize=False,
        sort_values=False,
        concurrent_sort=False,
        to_units="kWh",
    )

    yield hl
예제 #12
0
def reduce(idf, output, weather, parallel, all_zones):
    """Perform the model reduction and translate to an UMI template file.

    IDF is one or multiple idf files to process.
    OUTPUT is the output file name (or path) to write to. Optional.
    """
    if parallel:
        # if parallel is True, run eplus in parallel
        rundict = {
            file: dict(
                eplus_file=file,
                weather_file=weather,
                annual=True,
                prep_outputs=True,
                expandobjects=True,
                verbose="v",
                output_report="sql",
                return_idf=False,
                ep_version=settings.ep_version,
            )
            for file in idf
        }
        res = parallel_process(rundict, run_eplus)
        res = _write_invalid(res)

        loaded_idf = {}
        for key, sql in res.items():
            loaded_idf[key] = {}
            loaded_idf[key][0] = sql
            loaded_idf[key][1] = load_idf(key)
        res = loaded_idf
    else:
        # else, run sequentially
        res = defaultdict(dict)
        invalid = []
        for i, fn in enumerate(idf):
            try:
                res[fn][0], res[fn][1] = run_eplus(
                    fn,
                    weather,
                    ep_version=settings.ep_version,
                    output_report="sql",
                    prep_outputs=True,
                    annual=True,
                    design_day=False,
                    verbose="v",
                    return_idf=True,
                )
            except EnergyPlusProcessError as e:
                invalid.append({"#": i, "Filename": fn.basename(), "Error": e})
        if invalid:
            filename = Path("failed_reduce.txt")
            with open(filename, "w") as failures:
                failures.writelines(tabulate(invalid, headers="keys"))
                log('Invalid run listed in "%s"' % filename)

    from archetypal import BuildingTemplate

    bts = []
    for fn in res.values():
        sql = next(
            iter([
                value for key, value in fn.items() if isinstance(value, dict)
            ]))
        idf = next(
            iter([
                value for key, value in fn.items() if isinstance(value, IDF)
            ]))
        bts.append(BuildingTemplate.from_idf(idf, sql=sql,
                                             DataSource=idf.name))

    output = Path(output)
    name = output.namebase
    ext = output.ext if output.ext == ".json" else ".json"
    dir_ = output.dirname()
    template = UmiTemplate(name=name, BuildingTemplates=bts)
    final_path: Path = dir_ / name + ext
    template.to_json(path_or_buf=final_path, all_zones=all_zones)
    log("Successfully created template file at {}".format(
        final_path.abspath()))
예제 #13
0
    def test_write_to_b18(self, config, converttesteasy):
        # Gets from fixture paths to files and IDF object to be used in test
        (
            idf,
            idf_file,
            weather_file,
            window_lib,
            trnsidf_exe,
            template,
            output_folder,
            kwargs,
        ) = converttesteasy

        # Runs EnergyPlus Simulation
        res = run_eplus(
            idf_file,
            weather_file,
            output_directory=None,
            ep_version=None,
            output_report="htm",
            prep_outputs=True,
            design_day=True,
        )

        # Copy IDF object, making sure we don't change/overwrite original IDF file
        idf_2 = deepcopy(idf)

        # Clean names of idf objects (e.g. 'MATERIAL')
        log_clear_names = False
        clear_name_idf_objects(idf_2, log_clear_names)

        # Get old:new names equivalence
        old_new_names = pd.read_csv(
            os.path.join(
                settings.data_folder,
                Path(idf_file).basename().stripext() + "_old_new_names_equivalence.csv",
            )
        ).to_dict()

        # Get objects from IDF
        (
            buildingSurfs,
            buildings,
            constructions,
            equipments,
            fenestrationSurfs,
            globGeomRules,
            lights,
            locations,
            materialAirGap,
            materialNoMass,
            materials,
            peoples,
            versions,
            zones,
            zonelists,
        ) = get_idf_objects(idf_2)

        # Read a b18 file and write lines in variable (b18_lines)
        b18_path = "tests/input_data/trnsys/T3D_simple_2_zone.b18"
        with open(b18_path) as b18_file:
            b18_lines = b18_file.readlines()

        # initialize variable
        schedules_not_written = []

        # Gets conditioning (heating and cooling) info from simulation results
        heat_name = {}
        for i in range(0, len(res["Zone Sensible Heating"])):
            key = res["Zone Sensible Heating"].iloc[i, 0]
            name = "HEAT_z" + str(res["Zone Sensible Heating"].iloc[i].name)
            heat_name[key] = name
        cool_name = {}
        for i in range(0, len(res["Zone Sensible Cooling"])):
            key = res["Zone Sensible Cooling"].iloc[i, 0]
            name = "HEAT_z" + str(res["Zone Sensible Cooling"].iloc[i].name)
            cool_name[key] = name

        # Selects only 2 first zones
        zones = zones[0:2]
        peoples = peoples[0:2]
        equipments = equipments[0:2]
        lights = lights[0:2]

        # Writes infiltration in b18_lines (b18 file)
        infilt_to_b18(b18_lines, zones, res)

        # Tests both cases, whether schedules are taken as inputs or written in b18_lines
        for cond in [True, False]:
            schedule_as_input = cond
            gains_to_b18(
                b18_lines,
                zones,
                zonelists,
                peoples,
                lights,
                equipments,
                schedules_not_written,
                res,
                old_new_names,
                schedule_as_input,
            )

        # Writes conditioning (heating and cooling) in b18_lines (b18 file)
        conditioning_to_b18(b18_lines, heat_name, cool_name, zones, old_new_names)

        # Asserts infiltration, internal gains and conditioning are written in b18_lines
        assert "INFILTRATION Constant" + "\n" in b18_lines
        assert " INFILTRATION = Constant" + "\n" in b18_lines
        assert any(peoples[0].Name in mystring for mystring in b18_lines[200:])
        assert any(lights[0].Name in mystring for mystring in b18_lines[200:])
        assert any(equipments[0].Name in mystring for mystring in b18_lines[200:])
        assert any(
            heat_name[old_new_names[zones[0].Name.upper()][0]] in mystring
            for mystring in b18_lines[200:]
        )
예제 #14
0
    def test_write_gains_conditioning(self, config, converttest):
        # Gets from fixture paths to files and IDF object to be used in test
        (
            idf,
            idf_file,
            weather_file,
            window_lib,
            trnsidf_exe,
            template,
            output_folder,
            _,
        ) = converttest

        # Gets EnergyPlus version
        ep_version = settings.ep_version

        # Adds Output variable in IDF
        outputs = [
            {
                "ep_object": "Output:Variable".upper(),
                "kwargs": dict(
                    Variable_Name="Zone Thermostat Heating Setpoint Temperature",
                    Reporting_Frequency="hourly",
                    save=True,
                ),
            },
            {
                "ep_object": "Output:Variable".upper(),
                "kwargs": dict(
                    Variable_Name="Zone Thermostat Cooling Setpoint Temperature",
                    Reporting_Frequency="hourly",
                    save=True,
                ),
            },
        ]

        # Run EnergyPlus Simulation
        _, idf = run_eplus(
            idf_file,
            weather_file,
            output_directory=None,
            ep_version=ep_version,
            output_report=None,
            prep_outputs=outputs,
            design_day=False,
            annual=True,
            expandobjects=True,
            return_idf=True,
        )

        # Output reports
        htm = idf.htm
        sql = idf.sql
        sql_file = idf.sql_file

        # Check if cache exists
        log_clear_names = False

        # Clean names of idf objects (e.g. 'MATERIAL')
        idf_2 = deepcopy(idf)
        clear_name_idf_objects(idf_2, log_clear_names)

        # Get old:new names equivalence
        old_new_names = pd.read_csv(
            os.path.join(
                settings.data_folder,
                Path(idf_file).basename().stripext() + "_old_new_names_equivalence.csv",
            )
        ).to_dict()

        # Read IDF_T3D template and write lines in variable
        lines = io.TextIOWrapper(io.BytesIO(settings.template_BUI)).readlines()

        # Get objects from IDF file
        (
            buildingSurfs,
            buildings,
            constructions,
            equipments,
            fenestrationSurfs,
            globGeomRules,
            lights,
            locations,
            materialAirGap,
            materialNoMass,
            materials,
            peoples,
            versions,
            zones,
            zonelists,
        ) = get_idf_objects(idf_2)

        # Write GAINS (People, Lights, Equipment) from IDF to lines (T3D)
        _write_gains(equipments, lights, lines, peoples, htm, old_new_names)

        # Gets schedules from IDF
        schedule_names, schedules = _get_schedules(idf_2)

        # Adds ground temperature to schedules
        adds_sch_ground(htm, schedule_names, schedules)

        # Adds "sch_setpoint_ZONES" to schedules
        df_heating_setpoint = ReportData.from_sqlite(
            sql_file, table_name="Zone Thermostat Heating Setpoint Temperature"
        )
        df_cooling_setpoint = ReportData.from_sqlite(
            sql_file, table_name="Zone Thermostat Cooling Setpoint Temperature"
        )
        # Heating
        adds_sch_setpoint(
            zones, df_heating_setpoint, old_new_names, schedule_names, schedules, "h"
        )
        # Cooling
        adds_sch_setpoint(
            zones, df_cooling_setpoint, old_new_names, schedule_names, schedules, "c"
        )

        # Writes conditioning in lines
        schedule_as_input = True
        heat_dict, cool_dict = _write_conditioning(
            htm, lines, schedules, old_new_names, schedule_as_input
        )
예제 #15
0
def test_std(config, fresh_start):
    import glob
    files = glob.glob("./input_data/STD/*idf")
    files = copy_file(files)
    wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'
    return ar.run_eplus(files, wf, expandobjects=True, annual=True)
예제 #16
0
def test_small_home_data(fresh_start):
    file = './input_data/regular/AdultEducationCenter.idf'
    wf = './input_data/CAN_PQ_Montreal.Intl.AP.716270_CWEC.epw'
    return ar.run_eplus(file, wf, expandobjects=True, annual=True)