示例#1
0
def test_read_epschema_asmunch():
    """py.test for read_epschema_asmunch"""
    SCHEMA_FILE = schemafortesting.schema_file
    schemahandle = open(SCHEMA_FILE, "r")
    result = epschema.read_epschema_asmunch(schemahandle)
    assert isinstance(result, epschema.EPSchemaMunch)

    result = epschema.read_epschema_asmunch(SCHEMA_FILE)
    assert isinstance(result, epschema.EPSchemaMunch)

    schemahandle = schemafortesting.schema
    result = epschema.read_epschema_asmunch(schemahandle)
    assert isinstance(result, epschema.EPSchemaMunch)

    schemahandle = list()
    with pytest.raises(TypeError):
        result = epschema.read_epschema_asmunch(schemahandle)
示例#2
0
 def read_epschema_asmunch(self):
     """Read the epschema file - will become a frozen singleton"""
     read_epschema_asmunch(self.epschemaname)
It will be loaded only once, since that is how imports work"""

import os

from eppy3000 import epschema

THIS_DIR = os.path.dirname(os.path.abspath(__file__))

RESOURCES_DIR = os.path.join(THIS_DIR, os.pardir, "eppy3000", "resources")
VERSION = "9-0-1"  # current default for integration tests on local system

SCHEMA_FILES = os.path.join(RESOURCES_DIR, "schema")
TEST_SCHEMA = f"V{VERSION[:3].replace('-', '_')}/Energy+.schema.epJSON"
schema_file = os.path.join(SCHEMA_FILES, TEST_SCHEMA)

schema = epschema.read_epschema_asmunch(schema_file)

# IDD_FILES = os.path.join(RESOURCES_DIR, 'iddfiles')
# IDF_FILES = os.path.join(RESOURCES_DIR, 'idffiles')
# try:
#     VERSION = os.environ["ENERGYPLUS_INSTALL_VERSION"]  # used in CI files
# except KeyError:
#     VERSION = '8-9-0'
#        # current default for integration tests on local system
# TEST_IDF = "V{}/smallfile.idf".format(VERSION[:3].replace('-', '_'))
# TEST_EPW = 'USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw'
# TEST_IDD = "Energy+V{}.idd".format(VERSION.replace('-', '_'))
# TEST_OLD_IDD = 'Energy+V7_2_0.idd'
#
#
# @pytest.fixture()
示例#4
0
def idf2json(idfhandle, epschemahandle):
    """converts the E+ file in the old IDF format to the new JSON format

    Parameters
    ----------
    jsonhandle: io.TextIOWrapper, io.StringIO
        This is the E+ file in the old IDF format
    epschemahandle: io.TextIOWrapper, io.StringIO
        This is the epjson file (eqv. of the IDD file in the old format)

    Returns
    -------
    str
        E+ file in the new JSON format
    """
    raw_idf = rawidf.readrawidf(idfhandle)
    js = read_epschema_asmunch(epschemahandle)
    idfobjcount = {}
    idfjson = {}
    keys = raw_idf.keys()
    order = 0
    mapping = keymapping(raw_idf.keys(), js.properties.keys())
    # mapping in case the case does not match between keys
    for akey in keys:
        key = mapping[akey]
        idfobjcount.setdefault(key, 0)
        dct = idfjson.setdefault(key, dict())
        fieldnames = js.properties[key].legacy_idd.fields
        idfobjects = raw_idf[akey]
        for idfobject in idfobjects:
            idfobjcount[key] = idfobjcount[key] + 1
            order += 1
            try:
                if fieldnames[0] == "name":
                    alst = {
                        fieldname: idfvalue
                        for idfvalue, fieldname in zip(idfobject[2:],
                                                       fieldnames[1:])
                    }
                    idfobjectname = idfobject[1]
                else:
                    alst = {
                        fieldname: idfvalue
                        for idfvalue, fieldname in zip(idfobject[1:],
                                                       fieldnames)
                    }
                    idfobjectname = f"{key} {idfobjcount[key]}"
            except IndexError as e:
                # catches "if fieldnames[0] == 'name':" when fieldnames = []
                alst = {
                    fieldname: idfvalue
                    for idfvalue, fieldname in zip(idfobject[1:], fieldnames)
                }
                idfobjectname = f"{key} {idfobjcount[key]}"
            alst["idf_order"] = order
            numericfields = js.properties[key].legacy_idd.numerics.fields
            for fieldkey in alst.keys():
                if fieldkey in numericfields:
                    alst[fieldkey] = num(alst[fieldkey])

            try:
                extension = js.properties[key].legacy_idd.extension
                extensibles = js.properties[key].legacy_idd.extensibles
                endvalues = idfobject[len(fieldnames) + 1:]
                g_endvalues = grouper(endvalues, len(extensibles))
                extvalues = [{f: t
                              for f, t in zip(extensibles, tup)}
                             for tup in g_endvalues]

                try:
                    legacyidd = js.properties[key].legacy_idd
                    e_numericfields = legacyidd.numerics.extensions
                    for e_dct in extvalues:
                        for e_key in e_dct:
                            if e_key in e_numericfields:
                                if e_dct[
                                        e_key]:  # will skip None and '' ->maybe make sure of this
                                    e_dct[e_key] = num(e_dct[e_key])
                except AttributeError as e:
                    pass

                alst[extension] = extvalues
            except AttributeError as e:
                pass
            dct.update({idfobjectname: alst})
    return json.dumps(idfjson, indent=2)
示例#5
0
def json2idf(jsonhandle, epschemahandle):
    """converts the E+ file new JSON format to the old IDF format

    Parameters
    ----------
    jsonhandle: io.TextIOWrapper, io.StringIO
        This is the E+ file in the new JSON format
    epschemahandle: io.TextIOWrapper, io.StringIO
        This is the epjson file (eqv. of the IDD file in the old format)

    Returns
    -------
    str
        E+ file in the old IDF format
    """
    lines = []
    js = read_epschema_asmunch(epschemahandle)
    idfjs = read_epschema_asmunch(jsonhandle)

    for key in idfjs.keys():
        for name in idfjs[key].keys():
            fieldval = []
            fieldnames = js.properties[key].legacy_idd.fields
            lastfield = len(fieldnames) - 1
            comma = ","
            semicolon = ";"
            sep = comma
            for i, fieldname in enumerate(fieldnames):
                if i == lastfield:
                    sep = semicolon
                try:
                    value = idfjs[key][name][fieldname]
                    fieldval.append((fieldname, value))
                except KeyError as e:
                    if fieldname == "name":
                        fieldval.append((fieldname, name))
                    else:
                        value = None
                        # debugging here
                        # fieldval.append((fieldname, value))
                        fieldval.append((fieldname, ""))
            try:
                extension = js.properties[key].legacy_idd.extension
                extensibles = js.properties[key].legacy_idd.extensibles
                for i, tup in enumerate(idfjs[key][name][extension]):
                    for fld in extensibles:
                        try:
                            fieldval.append((f"{fld} {i + 1}", tup[fld]))
                        except KeyError as e:
                            fieldval.append((f"{fld} {i + 1}", ""))
                        # fieldval.append((f"{fld} {i + 1}", tup[fld]))
            except AttributeError as e:
                pass
            fieldval = [(fld, val) for fld, val in fieldval if val is not None]
            # remove trailing blanks
            fieldval = removetrailingblanks(fieldval)
            # fieldval.reverse()
            # fieldval = [for first, second, in fieldval, if second]

            lastfield = len(fieldval) - 1
            sep = comma
            lines.append(f"{key},")
            for i, (fld, val) in enumerate(fieldval):
                if i == lastfield:
                    sep = semicolon
                valsep = f"{val}{sep}"
                lines.append(f"    {valsep:<25} !- {fld}")
            lines.append("")

    return "\n".join(lines)