Example #1
0
def test_reproduce_run_issue():
    """This is for use as a debugging tool.

    Add the files used in the run as reported/provided by a user.
    Make any additional changes required for reproducing/diagnosing the issue.
    """
    # update the following four lines if necessary
    ep_version = "8-9-0"
    idffile = "V8_9/smallfile.idf"
    iddfile = "Energy+V8_9_0.idd"
    epwfile = "USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw"

    _, eplus_home = paths_from_version(ep_version)
    idfname = os.path.join(IDF_FILES, idffile)
    iddfile = os.path.join(IDD_FILES, iddfile)
    epwfile = os.path.join(eplus_home, "WeatherData", epwfile)
    modeleditor.IDF.setiddname(iddfile, testing=True)
    idf = IDF(idfname, epwfile)
    # make any changes to the IDF here
    try:
        # Add any additional `run` kwargs here
        # `ep_version` kwarg is required due to buggy test isolation
        idf.run(output_directory="test_dir", ep_version=ep_version)
        # Add any tests for expected/unexpected outputs here
    except Exception:
        # Add any tests for expected/unexpected exceptions here
        raise
    finally:
        shutil.rmtree("test_dir", ignore_errors=True)
def test_reproduce_run_issue():
    """This is for use as a debugging tool.

    Add the files used in the run as reported/provided by a user.
    Make any additional changes required for reproducing/diagnosing the issue.
    """
    # update the following four lines if necessary
    ep_version = "8-9-0"
    idffile = "V8_9/smallfile.idf"
    iddfile = "Energy+V8_9_0.idd"
    epwfile = "USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw"

    _, eplus_home = paths_from_version(ep_version)
    idfname = os.path.join(IDF_FILES, idffile)
    iddfile = os.path.join(IDD_FILES, iddfile)
    epwfile = os.path.join(eplus_home, "WeatherData", epwfile)
    modeleditor.IDF.setiddname(iddfile, testing=True)
    idf = IDF(idfname, epwfile)
    # make any changes to the IDF here
    try:
        # Add any additional `run` kwargs here
        # `ep_version` kwarg is required due to buggy test isolation
        idf.run(output_directory="test_dir", ep_version=ep_version)
        # Add any tests for expected/unexpected outputs here
    except Exception:
        # Add any tests for expected/unexpected exceptions here
        raise
    finally:
        shutil.rmtree("test_dir", ignore_errors=True)
Example #3
0
def run_single(idf_name_in, epw_name):
    dir_name = os.path.dirname(os.path.realpath(__file__))
    idd_file = "C:/EnergyPlusV9-0-0/Energy+.idd"
    IDF.setiddname(idd_file)

    idf_path = dir_name + '/' + idf_name_in
    epw_path = dir_name + '/' + epw_name

    out = dir_name + '/out_' + idf_name_in.split('.idf')[0]

    idf = IDF(idf_path, epw_path)

    idf.run(output_directory=out,
            readvars=True,
            output_prefix=idf_name_in.split('.idf')[0],
            output_suffix='D')
Example #4
0
def run_single(idf_name, epw_name, n=None):
    print('Simulation ' + str(n) + ' starts.')
    dir_name = os.path.dirname(os.path.realpath(__file__))
    idd_file = "C:/EnergyPlusV9-0-0/Energy+.idd"
    IDF.setiddname(idd_file)

    idf_path = dir_name + '/' + idf_name
    epw_path = dir_name + '/' + epw_name

    out = dir_name + '/out_' + idf_name.split('.idf')[0]

    idf = IDF(idf_path, epw_path)

    idf.run(output_directory=out,
            readvars=True,
            output_prefix=idf_name.split('.idf')[0],
            output_suffix='D')

    print('Simulation ' + str(n) + ' ends.')
Example #5
0
def get_idf(idf_file: str = config.files.get('idf'),
            idd_file: str = config.files.get('idd'),
            output_directory=config.out_dir) -> IDF:
    """Uses eppy to read an idf file and generate the corresponding idf object"""
    # Trying to change the idd file inside a program will cause an error
    IDF.setiddname(idd_file)
    # TODO: Fix this rather than hiding it.
    # calling IDF causes a warning to appear, currently redirect_stdout hides this.
    with contextlib.redirect_stdout(None):
        idf = IDF(idf_file)
        # override the output location so I stop messing up
        idf.run = partial(idf.run, output_directory=output_directory)
        return idf
'''JSON parameters'''
with open('jsonOUTPUT_PMVOpt10.txt') as jsonParams:
    paramSet = json.load(jsonParams)
'''files used for energy plus simulation'''
iddfile = "C:\EnergyPlusV8-5-0\Energy+.idd"
fname = "SmallOffice.idf"
weatherfile = "USA_UT_Salt.Lake.City.Intl.AP.725720_TMY3.epw"
'''initialize idf file'''
IDF.setiddname(iddfile)
idfdevice = IDF(fname, weatherfile)
'''declare simulation run period'''
Begin_Month = '1'
Begin_Day_of_Month = '14'
End_Month = '1'
End_Day_of_Month = '24'
'''configure the idf file that will be used for simulations'''
configuresmalloffice(idfdevice, Begin_Month, Begin_Day_of_Month, End_Month,
                     End_Day_of_Month)
'''run parametric simulations'''
for i in range(N):
    # for i in range(50):
    '''update JSON file and input parameter array for training meta model'''
    runJSON = {}

    for obj in paramSet['input']:
        runJSON[obj['eppy json string']] = obj['Sample Values'][i]

    json_functions.updateidf(idfdevice, runJSON)
    '''run IDF and the associated batch file to export the custom csv output'''
    idfdevice.run(verbose='q')
    def energy_signature(cls, iddfile, idf_path, epw_path, name):
        """
        Do ES and plot the results:
        1) Do Energy+ simulation
        2) read output simulation (eplusout.csv file)
        3) take consumption and temperature features
        4) compute Energy Signature by finding the best lines using The Ordinary Least Square algorithm.
        5) Plot the results.

        :param iddfile: Energy+.idd file path
        :param idf_path: idf file path
        :param epw_path: weather file path
        :param name: flag name indicating which type of idf is used
        :return: None

        """

        try:
            IDF.setiddname(iddfile)
        except eppy.modeleditor.IDDAlreadySetError as e:
            pass
        idf = IDF(idf_path, epw_path)
        idf.run(readvars=True)  # Eplus Simulation

        fname = 'eplusout_' + str(name) + '.csv'
        os.system(f'cp eplusout.csv ../eplus_simulation/eplus/{fname}')

        df1 = pd.read_csv(f'../eplus_simulation/eplus/{fname}')

        df = pd.DataFrame()
        # Temperature
        for i in df1.columns:
            if 'BLOCCO1:ZONA1' in i:
                if 'Zone Operative Temperature [C](Hourly)' in i:
                    df["Temp_in_1"] = df1[i]
            elif 'BLOCCO1:ZONA2' in i:
                if 'Zone Operative Temperature [C](Hourly)' in i:
                    df["Temp_in_2"] = df1[i]
            elif 'BLOCCO1:ZONA3' in i:
                if 'Zone Operative Temperature [C](Hourly)' in i:
                    df["Temp_in_3"] = df1[i]
        df['Temp_out'] = df1[
            'Environment:Site Outdoor Air Drybulb Temperature [C](Hourly)']
        # Power
        df['Cooling'] = df1['DistrictCooling:Facility [J](Hourly)']
        df['Heating'] = df1['DistrictHeating:Facility [J](Hourly)']
        df['Electricity'] = df1['Electricity:Facility [J](Hourly)']

        df['date'] = df1['Date/Time'].astype('O')
        df['date'] = df['date'].map(
            lambda x: x if '24:00' not in x else x.replace('24:00', '00:00'))
        df = df.set_index('date')
        # idx = df['date'].map(lambda x: x if '24:00' not in x else x.replace('24:00', '00:00'))
        # df.set_index(idx)
        df = df.set_index(pd.to_datetime('2018/' + df.index))
        df['Temp_in'] = df[['Temp_in_1', 'Temp_in_2',
                            'Temp_in_3']].astype(float).mean(1)
        df['deltaT'] = df['Temp_in'] - df['Temp_out']
        df.to_csv(f'../../files/outputs/en_sig_{name}.csv')

        # ============================================================
        # Energy Signature: HOURLY
        # ============================================================
        # HEATING
        heating_df = df.where(df['Heating'] / 3.6e6 > 0.2).dropna()
        heating_df = heating_df.resample('H').mean()
        heating_df = heating_df.dropna()
        model_H = sm.OLS(heating_df['Heating'] / (3.6e6),
                         sm.add_constant(heating_df['Temp_out']))
        results_h = model_H.fit()
        # COOLING
        cool_df = df.where(df['Cooling'] / 3.6e6 > 0.5).dropna()
        cool_df = cool_df.resample('H').mean()
        cool_df = cool_df.dropna()
        model_C = sm.OLS(cool_df['Cooling'] / (3.6e6),
                         sm.add_constant(cool_df['Temp_out']))
        results_c = model_C.fit()
        # Plots
        fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(10, 10))
        fig.suptitle("Energy Signature")
        ax1.plot(heating_df['Temp_out'], results_h.predict(), label='Heating')
        ax1.plot(cool_df['Temp_out'], results_c.predict(), label='Cooling')
        ax1.scatter(heating_df['Temp_out'], heating_df['Heating'] / (3.6e6))
        ax1.scatter(cool_df['Temp_out'], cool_df['Cooling'] / (3.6e6))

        ax1.set_xlabel('T_out [C°]')
        ax1.set_ylabel('Energy Consumption [kWh]')
        ax1.set_title('Hourly resample')
        ax1.legend()
        ax1.grid(linestyle='--', linewidth=.4, which='both')

        # ============================================================
        # Energy Signature: DAILY
        # ============================================================
        # HEATING
        heating_df = df.where(df['Heating'] / 3.6e6 > 0.2).dropna()
        heating_df = heating_df.resample('D').mean()
        heating_df = heating_df.dropna()
        model_h = sm.OLS(heating_df['Heating'] / 3.6e6,
                         sm.add_constant(heating_df['Temp_out']))
        results_d_h = model_h.fit()

        # COOLING
        cool_df = df.where(df['Cooling'] / 3.6e6 > 0.5).dropna()
        cool_df = cool_df.resample('D').mean()
        cool_df = cool_df.dropna()
        model_c = sm.OLS(cool_df['Cooling'] / (3.6e6),
                         sm.add_constant(cool_df['Temp_out']))
        results_d_c = model_c.fit()

        ax2.plot(heating_df['Temp_out'],
                 results_d_h.predict(),
                 label='Heating')
        ax2.plot(cool_df['Temp_out'], results_d_c.predict(), label='Cooling')
        ax2.scatter(heating_df['Temp_out'], heating_df['Heating'] / (3.6e6))
        ax2.scatter(cool_df['Temp_out'], cool_df['Cooling'] / (3.6e6))
        ax2.set_xlabel('T_out [C°]')
        ax2.set_ylabel('Energy Consumption [kWh]')
        ax2.set_title('DAY resample')
        ax2.legend()
        ax2.grid(linestyle='--', linewidth=.4, which='both')

        # ============================================================
        # Energy Signature: WEEK
        # ============================================================
        # HEATING
        heating_df = df.where(df['Heating'] / 3.6e6 > 0.2).dropna()
        heating_df = heating_df.resample('W').mean()
        heating_df = heating_df.dropna()
        model_h = sm.OLS(heating_df['Heating'] / (3.6e6),
                         sm.add_constant(heating_df['Temp_out']))
        results_w_h = model_h.fit()

        # COOLING
        cool_df = df.where(df['Cooling'] / 3.6e6 > 0.5).dropna()
        cool_df = cool_df.resample('W').mean()
        cool_df = cool_df.dropna()
        model_c = sm.OLS(cool_df['Cooling'] / (3.6e6),
                         sm.add_constant(cool_df['Temp_out']))
        results_w_c = model_c.fit()

        ax3.plot(heating_df['Temp_out'],
                 results_w_h.predict(),
                 label='Heating')
        ax3.plot(cool_df['Temp_out'], results_w_c.predict(), label='Cooling')
        ax3.scatter(heating_df['Temp_out'], heating_df['Heating'] / (3.6e6))
        ax3.scatter(cool_df['Temp_out'], cool_df['Cooling'] / (3.6e6))
        ax3.set_xlabel('T_out [C°]')
        ax3.set_ylabel('Energy Consumption [kWh]')
        ax3.set_title('WEEK resample')
        ax3.legend()
        ax3.grid(linestyle='--', linewidth=.4, which='both')
        plt.subplots_adjust(bottom=0.3, right=0.8, top=0.9, hspace=1)
        plt.savefig(fname=f'../../plots/energy_signature_{name}_tout.png',
                    dpi=400)
        plt.close()
energyoutputs = []
for file in dirs:
    idfrun = file[10:-4]
    runnumber = idfrun[-2:]
       

       
    idfname = folderpath +'/'+ file
    epwfile = "C:/EnergyPlusV8-9-0/WeatherData/GBR_London.Gatwick.037760_IWEC.epw"
    idf = IDF(idfname, epwfile)
    

    #% Run the idf file
    
    idf.run(output_directory=newpath, 
            output_prefix = rundate + idfrun + '_',
            expandobjects=True,
            readvarsESO=True)
    
    #results are in this folder:
    # file:///C:/Users/mkj32/eplustbl.htm
    
    #% Read outputs
    
    csvfname =newpath'/'+ rundate+ idfrun + '_tbl.csv'
    with open(csvfname,'r') as table:
       # TotalSiteEnergy = myreader[14]
        data =list(csv.reader(table))
        TotalSiteEnergy = data[14][2]
        EnergyPerArea = data[14][3]
        BuildingArea = data[41][2]
        ElectricityUse = data[64][2]
Example #9
0
class EnergyPlusHelper:
    """ A class for the EnergyPlus communicator
    """

    def __init__(self,
                 idf_path,
                 idd_path=None,
                 weather_path=None,
                 output_path=None,
                 ):
        """ New instance of the `EnergyPlusHelper` class

        Parameters
        ----------
        idf_path : string
            the file name with extension (.idf) of the input file to simulate. With a relative path if not in the same
            running directory.
        idd_path : string
            the Input data dictionary path (.idd)
        weather_path : string
            the weather file path. (.epw)
        output_path : string
            the directory to output the result files in. Default is the same directory
        Examples
        ----------
        >>> from EPinterface.EPHelper import EnergyPlusHelper
        >>> ep = EnergyPlusHelper(idf_path="D:/F/uniopt/EP/singleZonePurchAir_template.idf",
        >>>                         idd_path="D:/F/uniopt/EP/E+.idd",weather_path="D:/F/uniopt/EP/in.epw")
        """
        self.idf_path = idf_path
        self.idd_path = idd_path
        # TODO: handle the weather file path
        # self.weather_path = "/mnt/c/EnergyPlusV9-1-0/WeatherData/USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
        self.weather_path = "C:/EnergyPlusV9-1-0/WeatherData/USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
        self.output_path = output_path
        self.run_filename = "in.idf"
        # IDF.setiddname(idd_path) if self.idf_file else 1
        # TODO : get the path of E+ install as the default .idd is needed.
        # IDF.setiddname("/mnt/d/F/uniopt/EP/Energy+.idd")
        IDF.setiddname("D:/F/uniopt/EP/Energy+.idd")
        self.idf = IDF(self.idf_path, self.weather_path)

    def get_all_objects(self):
        """ returns all the idf file objects

        Returns
        -------
        Iterable
            list of idf objects as dictionaries

        """
        objects = []
        for obj in getidfobjectlist(self.idf):
            one_obj = list(zip(obj.fieldnames, obj.fieldvalues))
            # TODO: prior to Python 3.6 Dicts are not ordered and cannot be forced to be
            #  alt solution :   one_obj = OrderedDict(one_obj)
            #  but we'll need to find a way to dump it into JSON for the ui to be ordered
            dict_obj = {}
            for (k, v) in one_obj:
                dict_obj[k] = v
            objects.append(dict_obj)
        return objects

    def get_object_fields(self, obj_name):
        """ returns the list of all object fields

        Parameters
        ----------
        obj_name : string
            name of the object to get the fields for

        Returns
        -------
        Iterable
            list of the fields of the object.
            TODO : Handle the return of multiple objects, they will have the same name in the UI.
            https://eppy.readthedocs.io/en/latest/Main_Tutorial.html#working-with-e-objects
        """
        objects = self.idf.idfobjects[obj_name]
        fields = []
        for obj in objects:
            for field in obj.fieldnames:
                fields.append({field: getattr(obj, field)})
        return fields

    def get_field_val(self, obj_name, fld_name):
        """ get multiple fields of multiple objects at once

        Parameters
        ----------
        obj_name : list
            list of the objects names
        fld_name : list
            list of fields names

        Returns
        -------
        Iterable
            list of values


        """
        fields = []
        for obj_name, fld_name in zip(obj_name, fld_name):
            # TODO: multiple objects same name?
            obj = self.idf.idfobjects[obj_name]
            for Oneobj in obj:
                fields.append(getattr(Oneobj, fld_name))

        return fields

    def set_field_val(self, obj_name, fld_name, val):
        """ set multiple fields of multiple objects at once

        Parameters
        ----------
        obj_name : list
            list of the objects names
        fld_name : list
            list of fields names
        val : list
            list of values.

        Examples
        ----------
        >>> ep = EnergyPlusHelper(idf_path="D:/F/uniopt/EP/singleZonePurchAir_template.idf",
        >>>                         idd_path="D:/F/uniopt/EP/E+.idd",weather_path="D:/F/uniopt/EP/in.epw")
        >>> ep.set_field_val(obj_name=['BUILDING','MATERIAL'], fld_name=['North_Axis', 'Thickness'], val=[32.,0.02])

        """
        for obj_name, fld_name, val in zip(obj_name, fld_name, val):
            objects = self.idf.idfobjects[obj_name]
            # Loop to handle multiple objects of the same object
            # TODO: multiple objects same name?
            for obj in objects:
                setattr(obj, fld_name, val)

    def set_output_path(self, output_path):
        """

        Parameters
        ----------
        output_path : str
            the desired path of the output files

        """
        self.output_path = output_path

    def get_output_path(self):
        """ get the output path

        Returns
        -------
        str

        """
        return self.output_path

    def run_idf(self):
        """ starts the simulations for the given idf parameters

        """
        # self.idf.saveas(self.run_filename)
        #
        # cmd = "energyplus "
        # if self.output_path:
        #     cmd += " -d " + self.output_path
        # cmd += " " + self.run_filename
        # os.system(cmd)
        # self.idf.run(weather="/mnt/c/EnergyPlusV9-1-0/WeatherData/USA_"
        # TODO: weather file path will be required per run or at least list them to choose
        #  , of course people won't simulate with any weather file :D
        self.idf.run(weather="C:/EnergyPlusV9-1-0/WeatherData/USA_"
                             "CA_San.Francisco.Intl.AP.724940_TMY3.epw", output_directory=self.output_path)

    def get_results(self):
        """ returns the output:variable data with the simulation values in a dictionary

        Returns
        -------
        dict

        """
        path_to_eso = self.output_path + '/eplusout.eso'
        dd = eso.read_from_path(path_to_eso)

        return dd.get_vars()
Example #10
0
class TestIDFRunner(object):

    """Tests for running EnergyPlus from an IDF object.
    """

    def setup(self):
        """Tidy up anything left from previous runs. Get an IDF object to run.
        """
        outdir = os.path.join(THIS_DIR, 'run_outputs')
        if os.path.isdir(outdir):
            shutil.rmtree(outdir)
        iddfile = os.path.join(IDD_FILES, TEST_IDD)
        fname1 = os.path.join(IDF_FILES, TEST_IDF)
        IDF.setiddname(iddfile, testing=True)
        self.idf = IDF(fname1, TEST_EPW)

        self.expected_files = [
            u'eplusout.audit', u'eplusout.bnd', u'eplusout.eio',
            u'eplusout.end', u'eplusout.err', u'eplusout.eso',
            u'eplusout.mdd', u'eplusout.mtd', u'eplusout.rdd',
            u'eplusout.shd', u'eplustbl.htm', u'sqlite.err', ]
        self.expected_files_suffix_C = [
            u'eplus.audit', u'eplus.mdd', u'eplus.err',
            u'eplusSqlite.err', u'eplus.eio', u'eplusTable.htm', u'eplus.shd',
            u'eplus.mtd', u'eplus.bnd', u'eplus.eso', u'eplus.rdd',
            u'eplus.end']
        self.expected_files_suffix_D = [
            u'eplus.audit', u'eplus.mdd', u'eplus-sqlite.err',
            u'eplus-table.htm', u'eplus.err', u'eplus.eio', u'eplus.bnd',
            u'eplus.shd', u'eplus.mtd', u'eplus.end', u'eplus.eso',
            u'eplus.rdd']

    def teardown(self):
        """Destroy temp dir, reset working directory, destroy outputs.
        """
        os.chdir(THIS_DIR)
        shutil.rmtree('run_outputs', ignore_errors=True)
        shutil.rmtree('other_run_outputs', ignore_errors=True)
        shutil.rmtree("test_results", ignore_errors=True)

    def num_rows_in_csv(self, results='./run_outputs'):
        """Check readvars outputs the expected number of rows.
        """
        with open(os.path.join(results, 'eplusout.csv'), 'r') as csv_file:
            return len(csv_file.readlines())

    def test_run(self):
        """
        End to end test of idf.run function.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        assert set(files) == set(self.expected_files)

    def test_run_readvars(self):
        """
        End to end test of idf.run function with readvars set True.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(readvars=True, output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([u'eplusout.rvaudit', u'eplusout.csv'])
        assert set(files) == set(self.expected_files)

    def test_run_annual(self):
        """
        End to end test of idf.run function with annual set True.
        Fails on incorrect size of CSV output, severe errors or
        unexpected/missing output files.

        """
        self.idf.idfobjects['RUNPERIOD'][0].End_Month = 1
        self.idf.run(
            annual=True, readvars=True, output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([u'eplusout.rvaudit', u'eplusout.csv'])
        assert set(files) == set(self.expected_files)
        assert self.num_rows_in_csv() == 35041  # 24 * 365 * 4 + 1 header row

    def test_run_output_directory(self):
        """
        End to end test of idf.run function with a specific output dir set.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(output_directory='other_run_outputs')
        assert not has_severe_errors('other_run_outputs')
        files = os.listdir('other_run_outputs')
        self.expected_files.extend([])
        assert set(files) == set(self.expected_files)

    def test_run_design_day(self):
        """
        End to end test of idf.run function with design_day flag set True.
        Fails on incorrect size of CSV output, severe errors or
        unexpected/missing output files.

        """
        self.idf.run(
            design_day=True, readvars=True, output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([u'eplusout.rvaudit', u'eplusout.csv'])
        assert set(files) == set(self.expected_files)
        assert self.num_rows_in_csv() == 193  # 24 * 8 + 1 header row

    def test_run_epmacro(self):
        """
        End to end test of idf.run function with epmacro flag set True.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(epmacro=True, output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([u'eplusout.epmdet', u'eplusout.epmidf'])
        assert set(files) == set(self.expected_files)

    def test_run_expandobjects(self):
        """
        End to end test of idf.run function with expandobjects flag set to
        True.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.newidfobject(
            'HVACTEMPLATE:THERMOSTAT',
            Name="TestThermostat",
            Cooling_Setpoint_Schedule_Name="",
            Heating_Setpoint_Schedule_Name="",
            Constant_Cooling_Setpoint=25,
            Constant_Heating_Setpoint=21,
        )
        self.idf.run(expandobjects=True, output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([u'eplusout.expidf'])
        assert set(files) == set(self.expected_files)

    def test_run_output_prefix(self):
        """
        End to end test of idf.run function with output prefix set.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(output_prefix='test', output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        prefixed_files = [f.replace('eplus', 'test')
                          for f in self.expected_files]
        assert set(files) == set(prefixed_files)

    def test_run_output_suffix_L(self):
        """
        End to end test of idf.run function with output suffix set.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(output_suffix='L', output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        assert set(files) == set(self.expected_files)

    def test_run_output_suffix_C(self):
        """
        End to end test of idf.run function with output suffix set.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(output_suffix='C', output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        assert set(files) == set(self.expected_files_suffix_C)

    def test_run_output_suffix_D(self):
        """
        End to end test of idf.run function with output suffix set.
        Fails on severe errors or unexpected/missing output files.

        """
        self.idf.run(output_suffix='D', output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        assert set(files) == set(self.expected_files_suffix_D)

    def test_run_IDD(self):
        """
        End to end test of idf.run function with a different IDD set.
        We use an old IDD here since it throws an error we can see. Real uses
        of this option would include using an IDD with extra fields added
        extensible fields to allow larger objects.
        Fails if the expected IDD version is not listed in the error file.

        """
        other_idd = os.path.join(IDD_FILES, TEST_OLD_IDD)
        self.idf.run(idd=other_idd, output_directory='run_outputs')
        with open('run_outputs/eplusout.err', 'r') as errors:
            assert "IDD_Version 8.1.0.009" in errors.readline()

    def test_version(self, capfd):
        """
        End to end test of idf.run function with the version flag set True.
        Fails if the expected EnergyPlus version number is not returned.

        """
        self.idf.run(version=True)
        out, _err = capfd.readouterr()

        expected_version = VERSION.replace('-', '.')
        version_string = "EnergyPlus, Version {}".format(expected_version)

        assert out.strip().startswith(version_string)

    def test_help(self, capfd):
        """
        Test of calling the `help` built-in function on an IDF object.
        Fails if the expected help output is not returned.

        """
        help(self.idf.run)
        out, _err = capfd.readouterr()
        expected = "Help on method run in module eppy.modeleditor:"

        assert out.strip().startswith(expected)

    def test_verbose(self, capfd):
        """
        End to end test of idf.run function with the version flag set True.
        Fails on severe errors or unexpected/missing output files.
        Fails if no output received from EnergyPlus.

        """
        self.idf.run(verbose='v', output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([])
        assert set(files) == set(self.expected_files)
        out, _err = capfd.readouterr()
        assert len(out) > 0

    def test_quiet(self, capfd):
        """
        End to end test of idf.run function with the version flag set True.
        Fails on severe errors or unexpected/missing output files.
        Fails if output received from EnergyPlus.

        """
        self.idf.run(verbose='q', output_directory='run_outputs')
        assert not has_severe_errors()
        files = os.listdir('run_outputs')
        self.expected_files.extend([])
        assert set(files) == set(self.expected_files)
        out, _err = capfd.readouterr()
        assert len(out) == 0
Example #11
0
CSVDir = DirName+'/runtrial/WeatherFileNameList.csv'
WriteEPWNameToCSV(epwDir, CSVDir , 8)
weatherfilename_list = ReadFileNameInCsv(CSVDir)

print(weatherfilename_list)

##run with different locations
for i in weatherfilename_list:
    epwname = epwDir + i +'.epw'         ##Before write the path, put weather files in EnergyPlus WeatherData folder
    ddyname = ddyDir + i +'.ddy'
    fname1 = DirName + '/runtrial/TrialKA2_Unsized.idf'
    idf1 = IDF(fname1, epwname)
    UpdateLocationInfinIDF(idf1,ddyname)
    ##idf1.printidf()
    building = idf1.idfobjects['BUILDING'][0]
    building.Name = "KA2 A Flatroof Sample Building"
    objectlist = idf1.idfobjects
    rundirname = u'../eppy_energy-/runtrial/'
    resultsdir = rundirname+'results'+i
    ##os.makedirs(resultsdir)
    idf1.saveas(DirName + "/idfs/"+i+'.idf')
    idf1.run(output_directory = resultsdir)

Axis = 0
while Axis in range(0,360):
    building.North_Axis = Axis
    ##idf1.saveas(DirName+'/idfs/Axises/'+str(Axis)+".idf")
    resultsdirAxis= rundirname+'results'+i+'Axis'+str(Axis)
    ##os.makedirs(resultsdirAxis)
    ##idf1.run(output_directory = resultsdirAxis)
    Axis += 45
Example #12
0
def get_energy_usage(idf: IDF, obj_list=None) -> float:
    """	get_energy_usage(IDF, list)

		Runs an idf, and extracts the building's energy usage
		from that run.

		Args:
			idf(IDF): The idf to be run.
			obj_list(list): ONLY USED IF VERBOSE IS TRUE FOR
					DEBUGGING PURPOSES. This variable
					prints the final state of each of
					the idf's objects that are being
					changed.
	"""

    #: Verbose is used to help debug, when true it prints
    #: additional information about each run.
    verbose = False

    #: Try to run the idf. If an error is caught during this execution
    #: then the idf is unrunable, and we will return an energy usage of inf
    try:

        #: Run the idf. if Verbose is False, then we log that we're running
        #: the idf, and when it's finished, but we suppress the output from
        #: the idf's actual run. If verbose is True, we instead run the idf
        #: without suppressing its objectives
        if not verbose:
            print("Running idf...")
            idf.run(verbose='q')  #: verbose='q' suppresses objectives
            printSuccess("Done.")
        else:
            idf.run()
    except:
        #: If an exception is raised, then print that we failed to run
        #: the idf, and return inf. NOTE, if verbose is true, we will also
        #: print all of the objects being looked at, so that we can analize
        #: why the idf is unrunable
        printErr("Failed to run idf. Is it valid?")
        if verbose == True and obj_list is not None:
            for object in obj_list:
                print(get_idfobject_from_name(idf, object[0]))

        #: return inf
        return float("inf")

    #: Create a variable to hold the sum of the energy used.
    energy_used = 0.0

    #: Open the meter file to read what the energy usages were.
    with open("eplusout.mtr", 'r') as filestring:

        #: Read all the lines in the file, and split them into seperate string
        #: in a list. This way we can analize each line seperately.
        strings = filestring.read().split('\n')
        for line in strings[35:]:

            #: If the line has the id 13 (ie the first entry is 13) then
            #: this is one of the objectives we want to look at, so grab the
            #: float value from the line (which will be in the second
            #: position of the csv) and read it as a float. Add that float
            #: to the total energy used.
            if line[:3] == "13,":
                energy_used += float(line[3:line[3:].find(',')])

            #: If the line has the id 274 (ie the first entry is 274) then
            #: this is one of the objectives we want to look at, so grab the
            #: float value from the line (which will be in the second
            #: position of the csv) and read it as a float. Add that float
            #: to the total energy used.
            elif line[:4] == "274,":
                energy_used += float(line[4:line[4:].find(',')])

    #: Return the total amount of energy used.
    return energy_used
Example #13
0
        #Create folder for each model
        TypeDir = GeneratedIDFdir + LargeIDFLs[j].split('.')[0] + '\\'
        createFolder(TypeDir)

        building = idf1.idfobjects['BUILDING'][0]
        #Loop for different orientations
        for axis in [0, 90, 180, 270]:
            building.North_Axis = axis
            LocationIDF = TypeDir + 'Orientation_' + str(axis) + '.idf'
            idf1.saveas(LocationIDF)

            #Define the folder path to store the results
            resultsdir = rundirname + 'Results\\' + i + '\\' + LargeIDFLs[
                j].split('.')[0] + '\\' + str(axis)
            createFolder(resultsdir)
            idf1.run(output_directory=resultsdir, expandobjects=True)

            #Parse the peak load results
            d = ReadHVACdesignLoads(resultsdir + '\\eplustbl.htm')
            print(d)

            #Store the peak load data to the lists
            for v in range(15):
                CoolingLoads.append(d.get('LoadClg_' + d.get('Zonename1')))
                ClgSensible.append(
                    d.get('LoadClg' + d.get('Zonename1') + '_Sensible'))
                ClgLatent.append(
                    d.get('LoadClg' + d.get('Zonename1') + '_Latent'))
                HeatingLoads.append(d.get('LoadHtg_' + d.get('Zonename1')))
                HtgSensible.append(
                    d.get('LoadHtg' + d.get('Zonename1') + '_Sensible'))
Example #14
0
import sys
import os
from eppy import modeleditor
from eppy.modeleditor import IDF

archivoIDD = sys.argv[1]
archivoIDF = sys.argv[2]
archivoEPW = sys.argv[3]

IDF.setiddname(archivoIDD)
idf = IDF(archivoIDF, archivoEPW)
nuevaCarpeta = "optimizacion_" + \
    os.path.splitext(os.path.basename(archivoIDF))[0]
idf.run(
    output_directory=os.path.join(os.path.dirname(archivoIDF), nuevaCarpeta))
from eppy.modeleditor import IDF

iddfile = "/Applications/EnergyPlus-8-8-0/Energy+.idd"
IDF.setiddname(iddfile)

idfname = "/Users/bhabalaj/Repositories/COML/pyEp/pyEp/pyEp/example_buildings/LargeOffice/LargeOfficeFUN.idf"
epwfile = "/Users/bhabalaj/Repositories/COML/pyEp/pyEp/SPtMasterTable_587017_2012_amy.epw"

idf = IDF(idfname, epwfile)
idf.run()