예제 #1
0
def generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, controlNameRoot):
    # make changes to the re dispatch input file, only if initializing the setup dir for this set. This avoids over
    # writing information
    # get the reDispatchInputsFile
    controlName = readXmlTag(setupFile, controlNameRoot, 'value')[0]
    controlInputsFile = os.path.join(projectDir, 'InputData', 'Setup',
                                        projectName + controlName[0].upper() + controlName[1:] + 'Inputs.xml')

    # get the control inputs for this set of simulations
    controlInputsTag = readXmlTag(projectName + 'Set' + str(setNum) + 'Attributes.xml',
                                     [controlNameRoot+'InputAttributeValues', controlNameRoot+'InputTag'], 'value')
    controlInputAttr = readXmlTag(projectName + 'Set' + str(setNum) + 'Attributes.xml',
                                     [controlNameRoot+'InputAttributeValues', controlNameRoot+'InputAttr'], 'value')
    controlInputValue = readXmlTag(projectName + 'Set' + str(setNum) + 'Attributes.xml',
                                      [controlNameRoot+'InputAttributeValues', controlNameRoot+'InputValue'],
                                      'value')

    # copy the reDispatchInput xml file to this simulation set directory and make the specified changes
    setControlInputFile = os.path.join(projectSetDir, 'Setup',
                                          projectName + 'Set' + str(setNum) + controlName[0].upper() + controlName[
                                                                                                      1:] + 'Inputs.xml')
    # copy controlInput file
    copyfile(controlInputsFile, setControlInputFile)
    # make the cbanges to it defined in projectSetAttributes
    for idx, val in enumerate(controlInputValue):  # iterate through all re dispatch attribute values
        tag = controlInputsTag[idx].split('.')
        attr = controlInputAttr[idx]
        value = val
        writeXmlTag(setControlInputFile, tag, attr, value)
예제 #2
0
def getUnits(projectName, projectDir):
    '''
     reads a series fo xml tags in component xml files and returns their values as a list
    :param projectName: [String] the name of a project
    :param projectDir: [String] the path where project files are saved
    :return: [ListOfString],[ListOfString],[ListOfString],[ListOfString],[ListOfString]
    '''
    # projectName is the name of the project *type string*
    # projectDir is the directory where the project setup xml file is located
    from MiGRIDS.Analyzer.DataRetrievers.readXmlTag import readXmlTag
    import numpy as np

    fileName = projectName + 'Setup.xml'
    # get header names of time series data to be manipulated to be used in the simulations

    headerTag = ['componentChannels', 'headerName']
    headerAttr = 'value'
    headerNames = readXmlTag(fileName, headerTag, headerAttr, projectDir)
    # get the units corresponding to header names
    attrTag = ['componentChannels', 'componentAttribute']
    attrAttr = 'unit'
    componentUnits = readXmlTag(fileName, attrTag, attrAttr, projectDir)
    # get the attributes (ie Power, wind speed, temperature, ...)
    attrTag = ['componentChannels', 'componentAttribute']
    attrAttr = 'value'
    componentAttributes = readXmlTag(fileName, attrTag, attrAttr, projectDir)
    # get the component name (ie gen1, wtg2,...)
    nameTag = ['componentChannels', 'componentName']
    nameAttr = 'value'
    componentNames = readXmlTag(fileName, nameTag, nameAttr, projectDir)
    # create new header names by combining the component name with the attribute (eg gen1P, wtg2WS, ...)
    newHeaderNames = np.core.defchararray.add(
        componentNames,
        componentAttributes)  # create column names to replace existing headers

    return headerNames, componentUnits, componentAttributes, componentNames, newHeaderNames
예제 #3
0
def readDataFile(inputDict):
    # inputSpecification points to a script to accept data from a certain input data format *type string*
    # fileLocation is the dir where the data files are stored. It is either absolute or relative to the GBS project InputData dir *type string*
    # fileType is the file type. default is csv. All files of this type will be read from the dir *type string*
    # columnNames, if specified, is a list of column names from the data file that will be returned in the dataframe.
    # otherwise all columns will be returned. Note indexing starts from 0.*type list(string)*
    
    ####### general imports #######
    import pandas as pd
    import os
    import importlib.util
    import numpy as np
    from MiGRIDS.InputHandler.readAllTimeSeries import readAllTimeSeries
    from MiGRIDS.InputHandler.readWindData import readWindData
    from MiGRIDS.Analyzer.DataRetrievers.readXmlTag import readXmlTag
    from MiGRIDS.InputHandler.Component import Component

    
    ### convert inputs to list, if not already
    if not isinstance(inputDict['columnNames'],(list,tuple,np.ndarray)):
        inputDict['columnNames'] = [inputDict['columnNames']]
    if not isinstance(inputDict['useNames'], (list, tuple, np.ndarray)):
        inputDict['useNames'] = [inputDict['useNames']]
    if not isinstance(inputDict['componentUnits'], (list, tuple, np.ndarray)):
        inputDict['componentUnits'] = [inputDict['componentUnits']]
    if not isinstance(inputDict['componentAttributes'], (list, tuple, np.ndarray)):
        inputDict['componentAttributes'] = [inputDict['componentAttributes']]
    
    
    # get just the filenames ending with fileType. check for both upper and lower case
    # met files are text.
    if inputDict['fileType'].lower() == 'met':
        inputDict['fileNames'] = [f for f in os.listdir(inputDict['fileLocation']) if
                     os.path.isfile(os.path.join(inputDict['fileLocation'],f)) & (f.endswith('TXT') or f.endswith('txt'))]
    else:
        inputDict['fileNames'] = [f for f in os.listdir(inputDict['fileLocation']) if
                     os.path.isfile(os.path.join(inputDict['fileLocation'],f)) & (f.endswith(inputDict['fileType'].upper()) or f.endswith(inputDict['fileType'].lower()))]
       
    df = pd.DataFrame()
    ####### Parse the time series data files ############
    # depending on input specification, different procedure
    if inputDict['fileType'].lower() =='csv':
        
        df = readAllTimeSeries(inputDict)
    elif inputDict['fileType'].lower() == 'met':
        
        fileDict, df = readWindData(inputDict)

    # convert units
    if np.all(inputDict['componentUnits'] != None):
        # initiate lists

        listOfComponents = []
        if inputDict['componentAttributes'] != None:
            for i in range(len(inputDict['componentUnits'])): #for each channel make a component object
       
                # cd to unit conventions file
                dir_path = os.path.dirname(os.path.realpath(__file__))                
                unitConventionDir = os.path.join(dir_path, *['..','Analyzer','UnitConverters'])
                # get the default unit for the data type
                units = readXmlTag('internalUnitDefault.xml', ['unitDefaults', inputDict['componentAttributes'][i]], 'units', unitConventionDir)[0]
                # if the units don't match, convert
                if units.lower() != inputDict['componentUnits'][i].lower():
                    unitConvertDir = os.path.join( dir_path,*['..','Analyzer','UnitConverters','unitConverters.py'])
                    funcName = inputDict['componentUnits'][i].lower() + '2' + units.lower()
                    # load the conversion
                    spec = importlib.util.spec_from_file_location(funcName, unitConvertDir)
                    uc = importlib.util.module_from_spec(spec)
                    spec.loader.exec_module(uc)
                    x = getattr(uc, funcName)
                    # update data
                    df[inputDict['useNames'][i]] = x(df[inputDict['useNames'][i]])
                # get the scale and offset
                scale = readXmlTag('internalUnitDefault.xml', ['unitDefaults', inputDict['componentAttributes'][i]], 'scale',
                                   unitConventionDir)[0]
                offset = readXmlTag('internalUnitDefault.xml', ['unitDefaults', inputDict['componentAttributes'][i]], 'offset',
                                    unitConventionDir)[0]
               
                
                df[inputDict['useNames'][i]] = df[inputDict['useNames'][i]]*int(scale) + int(offset)
                # get the desired data type and convert
                datatype = readXmlTag('internalUnitDefault.xml', ['unitDefaults', inputDict['componentAttributes'][i]], 'datatype',
                                      unitConventionDir)[0]
                
                listOfComponents.append(Component(
                        column_name=inputDict['useNames'][i],
                        units=units,scale=scale,
                        offset=offset,datatype=datatype,
                        attribute=inputDict['componentAttributes']))

   
    #drop unused columns
    df = df[inputDict['useNames'] + ['DATE']]
    return df, listOfComponents
예제 #4
0
def runSimulation(projectSetDir=''):

    if projectSetDir == '':
        print('Choose the project directory')
        root = tk.Tk()
        root.withdraw()
        projectSetDir = filedialog.askdirectory()

    # get set number
    dir_path = os.path.basename(projectSetDir)
    setNum = str(dir_path[3:])
    # get the project name
    os.chdir(projectSetDir)
    os.chdir('../..')
    projectDir = os.getcwd()
    projectName = os.path.basename(projectDir)
    # timeseries directory
    timeSeriesDir = os.path.join(projectDir, 'InputData', 'TimeSeriesData',
                                 'ProcessedData')

    # get project name, from the directory name
    projectSetupFile = os.path.join(
        projectSetDir, 'Setup',
        projectName + 'Set' + str(setNum) + 'Setup.xml')

    # get the time step
    timeStep = readXmlTag(projectSetupFile,
                          'timeStep',
                          'value',
                          returnDtype='int')[0]

    # get the time steps to run
    runTimeSteps = readXmlTag(projectSetupFile, 'runTimeSteps', 'value')
    if len(
            runTimeSteps
    ) == 1:  # if only one value, take out of list. this prevents failures further down.
        runTimeSteps = runTimeSteps[0]
        if not runTimeSteps == 'all':
            runTimeSteps = int(runTimeSteps)
    else:  # convert to int
        runTimeSteps = [int(x) for x in runTimeSteps]

    # get the load predicting function
    predictLoadFile = readXmlTag(projectSetupFile, 'predictLoad', 'value')[0]
    predictLoadInputsFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        predictLoadFile[0].upper() + predictLoadFile[1:] + 'Inputs.xml')

    # get the wind predicting function
    predictWindFile = readXmlTag(projectSetupFile, 'predictWind', 'value')[0]
    predictWindInputsFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        predictWindFile[0].upper() + predictWindFile[1:] + 'Inputs.xml')

    # get the ees dispatch
    eesDispatchFile = readXmlTag(projectSetupFile, 'eesDispatch', 'value')[0]
    eesDispatchInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        eesDispatchFile[0].upper() + eesDispatchFile[1:] + 'Inputs.xml')

    # get the tes dispatch
    tesDispatchFile = readXmlTag(projectSetupFile, 'tesDispatch', 'value')[0]
    tesDispatchInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        tesDispatchFile[0].upper() + tesDispatchFile[1:] + 'Inputs.xml')

    # get the minimum required SRC calculation
    getMinSrcFile = readXmlTag(projectSetupFile, 'getMinSrc', 'value')[0]

    getMinSrcInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        getMinSrcFile[0].upper() + getMinSrcFile[1:] + 'Inputs.xml')

    # get the components to run
    componentNames = readXmlTag(projectSetupFile, 'componentNames', 'value')

    # get the load profile to run
    loadProfileFile = readXmlTag(projectSetupFile, 'loadProfileFile',
                                 'value')[0]
    loadProfileFile = os.path.join(timeSeriesDir, loadProfileFile)

    # get the RE dispatch
    reDispatchFile = readXmlTag(projectSetupFile, 'reDispatch', 'value')[0]

    reDispatchInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        reDispatchFile[0].upper() + reDispatchFile[1:] + 'Inputs.xml')

    # get the gen dispatch
    genDispatchFile = readXmlTag(projectSetupFile, 'genDispatch', 'value')[0]

    genDispatchInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        genDispatchFile[0].upper() + genDispatchFile[1:] + 'Inputs.xml')
    # get the gen schedule
    genScheduleFile = readXmlTag(projectSetupFile, 'genSchedule', 'value')[0]

    genScheduleInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        genScheduleFile[0].upper() + genScheduleFile[1:] + 'Inputs.xml')

    # get the wtg dispatch
    wtgDispatchFile = readXmlTag(projectSetupFile, 'wtgDispatch', 'value')[0]

    wtgDispatchInputFile = os.path.join(
        projectSetDir, 'Setup', projectName + 'Set' + str(setNum) +
        wtgDispatchFile[0].upper() + wtgDispatchFile[1:] + 'Inputs.xml')

    while 1:
        # read the SQL table of runs in this set and look for the next run that has not been started yet.
        conn = sqlite3.connect(
            os.path.join(projectSetDir, 'set' + str(setNum) +
                         'ComponentAttributes.db'))  # create sql database
        df = pd.read_sql_query('select * from compAttributes', conn)
        # try to find the first 0 value in started column
        try:
            runNum = list(df['started']).index(0)
        except:  # there are no more simulations left to run
            break
        # set started value to 1 to indicate starting the simulations
        df.at[runNum, 'started'] = 1
        df.to_sql('compAttributes', conn, if_exists="replace",
                  index=False)  # write to table compAttributes in db
        conn.close()
        # Go to run directory and run
        runDir = os.path.join(projectSetDir, 'Run' + str(runNum))
        runCompDir = os.path.join(
            runDir, 'Components')  # component directory for this run
        # output data dir
        outputDataDir = os.path.join(runDir, 'OutputData')
        if not os.path.exists(outputDataDir):  # if doesnt exist, create
            os.mkdir(outputDataDir)

        eesIDs = []
        eesSOC = []
        eesStates = []
        eesSRC = []
        eesDescriptors = []
        tesIDs = []
        tesT = []
        tesStates = []
        tesDescriptors = []
        wtgIDs = []
        wtgStates = []
        wtgDescriptors = []
        genIDs = []
        genStates = []
        genDescriptors = []
        loadDescriptors = []

        for cpt in componentNames:  # for each component
            # check if component is a generator
            if 'gen' in cpt.lower():
                genDescriptors += [
                    os.path.join(
                        runCompDir,
                        cpt.lower() + 'Set' + str(setNum) + 'Run' +
                        str(runNum) + 'Descriptor.xml')
                ]
                genIDs += [cpt[3:]]
                genStates += [2]
            elif 'ees' in cpt.lower():  # or if energy storage
                eesDescriptors += [
                    os.path.join(
                        runCompDir,
                        cpt.lower() + 'Set' + str(setNum) + 'Run' +
                        str(runNum) + 'Descriptor.xml')
                ]
                eesIDs += [cpt[3:]]
                eesStates += [2]
                eesSRC += [0]
                eesSOC += [0]
            elif 'tes' in cpt.lower():  # or if energy storage
                tesDescriptors += [
                    os.path.join(
                        runCompDir,
                        cpt.lower() + 'Set' + str(setNum) + 'Run' +
                        str(runNum) + 'Descriptor.xml')
                ]
                tesIDs += [cpt[3:]]
                tesT += [295]
                tesStates += [2]
            elif 'wtg' in cpt.lower():  # or if wind turbine
                wtgDescriptors += [
                    os.path.join(
                        runCompDir,
                        cpt.lower() + 'Set' + str(setNum) + 'Run' +
                        str(runNum) + 'Descriptor.xml')
                ]
                wtgIDs += [cpt[3:]]
                wtgStates += [2]
            elif 'load' in cpt.lower():  # or if wind turbine
                loadDescriptors += [
                    os.path.join(
                        runCompDir,
                        cpt.lower() + 'Set' + str(setNum) + 'Run' +
                        str(runNum) + 'Descriptor.xml')
                ]

        # initiate the system operations
        # code profiler
        # pr0 = cProfile.Profile()
        # pr0.enable()
        SO = SystemOperations(outputDataDir,
                              timeStep=timeStep,
                              runTimeSteps=runTimeSteps,
                              loadRealFiles=loadProfileFile,
                              loadReactiveFiles=[],
                              predictLoadFile=predictLoadFile,
                              predictLoadInputsFile=predictLoadInputsFile,
                              loadDescriptor=loadDescriptors,
                              predictWindFile=predictWindFile,
                              predictWindInputsFile=predictWindInputsFile,
                              getMinSrcFile=getMinSrcFile,
                              getMinSrcInputFile=getMinSrcInputFile,
                              reDispatchFile=reDispatchFile,
                              reDispatchInputsFile=reDispatchInputFile,
                              genIDs=genIDs,
                              genStates=genStates,
                              genDescriptors=genDescriptors,
                              genDispatchFile=genDispatchFile,
                              genScheduleFile=genScheduleFile,
                              genDispatchInputsFile=genDispatchInputFile,
                              genScheduleInputsFile=genScheduleInputFile,
                              wtgIDs=wtgIDs,
                              wtgStates=wtgStates,
                              wtgDescriptors=wtgDescriptors,
                              windSpeedDir=timeSeriesDir,
                              wtgDispatchFile=wtgDispatchFile,
                              wtgDispatchInputsFile=wtgDispatchInputFile,
                              eesIDs=eesIDs,
                              eesStates=eesStates,
                              eesSOCs=eesSOC,
                              eesDescriptors=eesDescriptors,
                              eesDispatchFile=eesDispatchFile,
                              eesDispatchInputsFile=eesDispatchInputFile,
                              tesIDs=tesIDs,
                              tesTs=tesT,
                              tesStates=tesStates,
                              tesDescriptors=tesDescriptors,
                              tesDispatchFile=tesDispatchFile,
                              tesDispatchInputsFile=tesDispatchInputFile)
        # stop profiler
        # pr0.disable()
        #pr0.print_stats(sort="calls")

        # run the simulation
        # code profiler
        # pr1 = cProfile.Profile()
        # pr1.enable()
        # run sim
        SO.runSimulation()
        # stop profiler
        # pr1.disable()
        # pr1.print_stats(sort="calls")

        # save data
        os.chdir(outputDataDir)

        start_file_write = time.time()

        # Stitch powerhouseP
        powerhouseP = SO.stitchVariable('powerhouseP')
        writeNCFile(SO.DM.realTime, powerhouseP, 1, 0, 'kW', 'powerhousePSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # gen P
        powerhouseP = None

        # Stitch powerhousePch
        powerhousePch = SO.stitchVariable('powerhousePch')
        writeNCFile(SO.DM.realTime, powerhousePch, 1, 0, 'kW',
                    'powerhousePchSet' + str(setNum) + 'Run' + str(runNum) +
                    '.nc')  # gen Pch
        powerhousePch = None

        # Stitch rePlimit
        rePlimit = SO.stitchVariable('rePlimit')
        writeNCFile(SO.DM.realTime, rePlimit, 1, 0, 'kW', 'rePlimitSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # rePlimit
        rePlimit = None

        # Stitch wfPAvail
        wfPAvail = SO.stitchVariable('wfPAvail')
        writeNCFile(SO.DM.realTime, wfPAvail, 1, 0, 'kW', 'wtgPAvailSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # wfPAvail
        wfPAvail = None

        # Stitch wfPImport
        wfPImport = SO.stitchVariable('wfPImport')
        writeNCFile(SO.DM.realTime, wfPImport, 1, 0, 'kW', 'wtgPImportSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # wtgPImport
        wfPImport = None

        # Stitch wfPch
        wfPch = SO.stitchVariable('wfPch')
        writeNCFile(SO.DM.realTime, wfPch, 1, 0, 'kW', 'wtgPchSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # wtgPch
        wfPch = None

        # Stitch wfPTot
        wfPTot = SO.stitchVariable('wfPTot')
        writeNCFile(SO.DM.realTime, wfPTot, 1, 0, 'kW', 'wtgPTotSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # wtgPTot
        wfPTot = None

        # Stitch srcMin
        srcMin = SO.stitchVariable('srcMin')
        writeNCFile(SO.DM.realTime, srcMin, 1, 0, 'kW', 'srcMinSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # srcMin
        srcMin = None

        # Stitch eessDis and write to disk
        eessDis = SO.stitchVariable('eessDis')
        writeNCFile(SO.DM.realTime, eessDis, 1, 0, 'kW', 'eessDisSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # eesDis
        eessDis = None

        # Stitch eessP  and write to disk
        eessP = SO.stitchVariable('eessP')
        writeNCFile(SO.DM.realTime, eessP, 1, 0, 'kW',
                    'eessPSet' + str(setNum) + 'Run' + str(runNum) + '.nc')
        eessP = None

        # Stitch tesP and write to disk
        tesP = SO.stitchVariable('tesP')
        writeNCFile(SO.DM.realTime, tesP, 1, 0, 'kW', 'tessP' + str(setNum) +
                    'Run' + str(runNum) + '.nc')  # tessP
        tesP = None

        # Stitch genPAvail and write to disk
        genPAvail = SO.stitchVariable('genPAvail')
        writeNCFile(SO.DM.realTime, genPAvail, 1, 0, 'kW', 'genPAvailSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # genPAvail
        genPAvail = None

        # Stitch onlineCombinationID and write to disk
        onlineCombinationID = SO.stitchVariable('onlineCombinationID')
        writeNCFile(SO.DM.realTime, onlineCombinationID, 1, 0, 'NA',
                    'onlineCombinationIDSet' + str(setNum) + 'Run' +
                    str(runNum) + '.nc')  # onlineCombinationID
        onlineCombinationID = None

        # Stitch underSRC and write to disk
        underSRC = SO.stitchVariable('underSRC')
        writeNCFile(SO.DM.realTime, underSRC, 1, 0, 'kW', 'underSRCSet' +
                    str(setNum) + 'Run' + str(runNum) + '.nc')  # underSRC
        underSRC = None

        # Stitch outOfNormalBounds and write to disk
        outOfNormalBounds = SO.stitchVariable('outOfNormalBounds')
        writeNCFile(SO.DM.realTime, outOfNormalBounds, 1, 0, 'bool',
                    'outOfNormalBoundsSet' + str(setNum) + 'Run' +
                    str(runNum) + '.nc')  # outOfNormalBounds
        outOfNormalBounds = None

        # Stitch outOfEfficientBounds and write to disk
        outOfEfficientBounds = SO.stitchVariable('outOfEfficientBounds')
        writeNCFile(SO.DM.realTime, outOfEfficientBounds, 1, 0, 'bool',
                    'outOfEfficientBoundsSet' + str(setNum) + 'Run' +
                    str(runNum) + '.nc')  # outOfEfficientBounds
        outOfEfficientBounds = None

        # Stitch wfSpilledWindFlag and write to disk
        wfSpilledWindFlag = SO.stitchVariable('wfSpilledWindFlag')
        writeNCFile(SO.DM.realTime, wfSpilledWindFlag, 1, 0, 'bool',
                    'wfSpilledWindFlagSet' + str(setNum) + 'Run' +
                    str(runNum) + '.nc')  # wfSpilledWindFlag

        # Stitch futureLoadList and write to disk
        futureLoadList = SO.stitchVariable('futureLoadList')
        writeNCFile(SO.DM.realTime, futureLoadList, 1, 0, 'kW',
                    'futureLoad' + str(setNum) + 'Run' + str(runNum) +
                    '.nc')  # future Load predicted
        futureLoadList = None

        # Stitch futureSRC and write to disk
        futureSRC = SO.stitchVariable('futureSRC')
        writeNCFile(SO.DM.realTime, futureSRC, 1, 0, 'kW',
                    'futureSRC' + str(setNum) + 'Run' + str(runNum) +
                    '.nc')  # future SRC predicted
        futureSRC = None

        # power each generators
        genP = SO.stitchVariable('genP')
        for idx, genP in enumerate(
                zip(*genP)):  # for each generator in the powerhouse
            writeNCFile(
                SO.DM.realTime, genP, 1, 0, 'kW',
                'gen' + str(SO.PH.genIDS[idx]) + 'PSet' + str(setNum) + 'Run' +
                str(runNum) + '.nc')
        genP = None

        # fuel consumption for each generator
        genFuelCons = SO.stitchVariable('genFuelCons')
        for idx, genFuelCons in enumerate(
                zip(*genFuelCons)):  # for each generator in the powerhouse
            writeNCFile(
                SO.DM.realTime, genFuelCons, 1, 0, 'kg/s',
                'gen' + str(SO.PH.genIDS[idx]) + 'FuelConsSet' + str(setNum) +
                'Run' + str(runNum) + '.nc')
        genFuelCons = None

        # start times for each generators
        genStartTime = SO.stitchVariable('genStartTime')
        for idx, genST in enumerate(
                zip(*genStartTime)):  # for each generator in the powerhouse
            writeNCFile(SO.DM.realTime, genST, 1, 0, 's',
                        'gen' + str(SO.PH.genIDS[idx]) + 'StartTimeSet' +
                        str(setNum) + 'Run' + str(runNum) + '.nc')  # eessSoc
        genStartTime = None

        # run times for each generators
        genRunTime = SO.stitchVariable('genRunTime')
        for idx, genRT in enumerate(
                zip(*genRunTime)):  # for each generator in the powerhouse
            writeNCFile(SO.DM.realTime, genRT, 1, 0, 's',
                        'gen' + str(SO.PH.genIDS[idx]) + 'RunTimeSet' +
                        str(setNum) + 'Run' + str(runNum) + '.nc')  #
        genRunTime = None

        # SRC for each ees
        eessSrc = SO.stitchVariable('eessSrc')
        for idx, eesSRC in enumerate(zip(*eessSrc)):  # for each eess
            writeNCFile(SO.DM.realTime, eesSRC, 1, 0, 'kW',
                        'ees' + str(SO.EESS.eesIDs[idx]) + 'SRCSet' +
                        str(setNum) + 'Run' + str(runNum) + '.nc')  #
        eessSrc = None

        # SOC for each ees
        eessSoc = SO.stitchVariable('eessSoc')
        for idx, eesSOC in enumerate(zip(*eessSoc)):  # for each eess
            writeNCFile(SO.DM.realTime, eesSOC, 1, 0, 'PU',
                        'ees' + str(SO.EESS.eesIDs[idx]) + 'SOCSet' +
                        str(setNum) + 'Run' + str(runNum) + '.nc')  # eessSoc
        eessSoc = None

        # ees loss
        eesPLoss = SO.stitchVariable('eesPLoss')
        for idx, eesLoss in enumerate(zip(*eesPLoss)):  # for each eess
            writeNCFile(
                SO.DM.realTime, eesLoss, 1, 0, 'kW',
                'ees' + str(SO.EESS.eesIDs[idx]) + 'LossSet' + str(setNum) +
                'Run' + str(runNum) + '.nc')
        eesPLoss = None

        # wtg P avail
        wtgPAvail = SO.stitchVariable('wtgPAvail')
        for idx, wtgPAvail in enumerate(zip(*wtgPAvail)):  # for wtg
            writeNCFile(
                SO.DM.realTime, wtgPAvail, 1, 0, 'kW',
                'wtg' + str(SO.WF.wtgIDS[idx]) + 'PAvailSet' + str(setNum) +
                'Run' + str(runNum) + '.nc')
        wtgPAvail = None

        # wtg P
        wtgP = SO.stitchVariable('wtgP')
        for idx, wtgP in enumerate(zip(*wtgP)):  # for each wtg
            writeNCFile(
                SO.DM.realTime, wtgP, 1, 0, 'kW',
                'wtg' + str(SO.WF.wtgIDS[idx]) + 'PSet' + str(setNum) + 'Run' +
                str(runNum) + '.nc')
        wtgP = None

        # future wind predicted
        futureWindList = SO.stitchVariable('futureWindList')
        for idx, fw in enumerate(zip(*futureWindList)):  # for each wtg
            writeNCFile(
                SO.DM.realTime, fw, 1, 0, 'kW',
                'wtg' + str(SO.WF.wtgIDS[idx]) + 'FutureWind' + str(setNum) +
                'Run' + str(runNum) + '.nc')
        futureWindList = None

        #print('File write operation elapsed time: ' + str(time.time() - start_file_write))

        # set the value in the 'finished' for this run to 1 to indicate it is finished.
        conn = sqlite3.connect(
            os.path.join(projectSetDir, 'set' + str(setNum) +
                         'ComponentAttributes.db'))  # create sql database
        df = pd.read_sql_query('select * from compAttributes', conn)
        # set finished value to 1 to indicate this run is finshed
        df.loc[runNum, 'finished'] = 1
        df.to_sql('compAttributes', conn, if_exists="replace",
                  index=False)  # write to table compAttributes in db
        conn.close()
예제 #5
0
def readSetupFile(fileName):
    ''' Reads a setup.xml file and creates a dictionary of attributes used during the data import process.
    :param: filename [String] a file path to a setup.xml file.
    :return [dictionary] containing attributes needed for loading data.'''
    try:
        # project name
        projectName = readXmlTag(fileName, 'project', 'name')[0]
        setupDir = os.path.dirname(fileName)
        # input specification
        inputDictionary = {}
        #filelocation is the raw timeseries file.
        #if multiple files specified look for raw_wind directory
        # input a list of subdirectories under the GBSProjects directory

        inputDictionary['setupDir'] = setupDir

        lol = readXmlTag(fileName, 'inputFileDir', 'value')
        inputDictionary['fileLocation'] = [
            os.path.join(setupDir, '..', '..', '..', *l)
            if l[0] == projectName else l for l in lol
        ]

        # file type
        inputDictionary['fileType'] = readXmlTag(fileName, 'inputFileType',
                                                 'value')

        inputDictionary['outputInterval'] = readXmlTag(fileName, 'timeStep',
                                                       'value')
        inputDictionary['outputIntervalUnit'] = readXmlTag(
            fileName, 'timeStep', 'unit')
        inputDictionary['runTimeSteps'] = readXmlTag(fileName, 'runTimeSteps',
                                                     'value')

        # get date and time values
        inputDictionary['dateColumnName'] = readXmlTag(fileName, 'dateChannel',
                                                       'value')
        inputDictionary['dateColumnFormat'] = readXmlTag(
            fileName, 'dateChannel', 'format')
        inputDictionary['timeColumnName'] = readXmlTag(fileName, 'timeChannel',
                                                       'value')
        inputDictionary['timeColumnFormat'] = readXmlTag(
            fileName, 'timeChannel', 'format')
        inputDictionary['utcOffsetValue'] = readXmlTag(fileName,
                                                       'inputUTCOffset',
                                                       'value')
        inputDictionary['utcOffsetUnit'] = readXmlTag(fileName,
                                                      'inputUTCOffset', 'unit')
        inputDictionary['dst'] = readXmlTag(fileName, 'inputDST', 'value')
        inputDictionary['timeZone'] = readXmlTag(fileName, 'timeZone', 'value')
        inputDictionary['componentName'] = readXmlTag(fileName,
                                                      'componentName', 'value')
        flexibleYear = readXmlTag(fileName, 'flexibleYear', 'value')

        # convert string to bool
        inputDictionary['flexibleYear'] = [
            (x.lower() == 'true') | (x.lower() == 't') for x in flexibleYear
        ]

        for idx in range(
                len(inputDictionary['outputInterval']
                    )):  # there should only be one output interval specified
            if len(inputDictionary['outputInterval']) > 1:
                inputDictionary['outputInterval'][idx] += inputDictionary[
                    'outputIntervalUnit'][idx]
            else:
                inputDictionary['outputInterval'][idx] += inputDictionary[
                    'outputIntervalUnit'][0]

        # get data units and header names
        inputDictionary['columnNames'], inputDictionary['componentUnits'], \
        inputDictionary['componentAttributes'],inputDictionary['componentNames'], inputDictionary['useNames'] = getUnits(projectName,setupDir)
    except Exception as e:
        print(e)
        return
    return inputDictionary
예제 #6
0
    def eesDescriptorParser(self, eesDescriptor):
        """
        Reads the data from a given eesDescriptor file and uses the information given to populate the
        respective internal variables.

        :param eesDescriptor: relative path and file name of eesDescriptor.xml-file that is used to populate static
        information

        :return:
        """
        # read xml file
        eesDescriptorFile = open(eesDescriptor, "r")
        eesDescriptorXml = eesDescriptorFile.read()
        eesDescriptorFile.close()
        eesSoup = Soup(eesDescriptorXml, "xml")

        # Dig through the tree for the required data
        self.eesName = eesSoup.component.get('name')
        self.eesPOutMax = float(
            eesSoup.POutMaxPa.get('value'))  # max discharging power
        self.eesPInMax = float(
            eesSoup.PInMaxPa.get('value'))  # max charging power
        self.eesQOutMax = float(
            eesSoup.QOutMaxPa.get('value'))  # max discharging power reactive
        self.eesQInMax = float(
            eesSoup.QInMaxPa.get('value'))  # max charging power reactive
        # FUTUREFEATURE: add the effect of charge/discharge rate on capacity. Possibly add something similar to the LossMap
        self.eesEMax = float(
            eesSoup.ratedDuration.get('value')
        ) * self.eesPOutMax  # the maximum energy capacity of the EES in kWs
        # check if EMax is zero, this is likely because it is a zero EES condition. Set it to 1 kWs in order not to crash the
        # SOC calculations
        if self.eesEMax == 0:
            self.eesEMax = 1
        # the amount of time in seconds that the EES must be able to discharge for at current level of SRC being provided
        self.eesSrcTime = float(eesSoup.eesSrcTime.get('value'))

        # the amount of time over which under SRC operation if recorded to see if go over limit eesUnderSrcLimit
        self.eesUnderSrcTime = float(eesSoup.eesUnderSrcTime.get('value'))
        # the limit in kW*s over essUnderSrcTime before underSRC flag is set.
        self.eesUnderSrcLimit = float(eesSoup.eesUnderSrcLimit.get('value'))

        # 'eesDispatchTime' is the minimum amount of time that the ESS must be able to supply the load for in order to
        # be considered as an active discharge option in the diesel schedule.
        self.eesDispatchTime = float(eesSoup.eesDispatchTime.get('value'))
        # 'eesDispatchMinSoc' is the minimum SOC of the ESS in order to be considered as an active discharge option in
        # the diesel schedule. Units are in pu of full energy capacity.
        self.eesDispatchMinSoc = float(eesSoup.eesDispatchMinSoc.get('value'))
        # In order to use the consider the equivalent fuel efficiency of dishcarging the ESS to allow running a smaller
        # diesel generator, an equivalent fuel consumption of the ESS must be calculated in kg/kWh. This is done by calculating
        # how much diesel fuel went into charging the ESS to it's current level. Divide the number of kg by the state of
        # charge to get the fuel consumption of using the energy storage.
        # 'prevEesTime' is how far back that is used to assess what percentage of the current ESS charge came from
        # the diesel generator. This is used in the dispatch schedule to determine the cost of discharging the ESS to supply
        # the load for peak shaving or load leveling purposes.
        self.prevEesTime = float(eesSoup.prevEesTime.get('value'))
        # 'eesCost' is the cost of discharging the ESS that is above the fuel cost that went into charging it. It is
        # stated as a fuel consumption per kWh, kg/kWh. It is added to the effective fuel consumption of discharging the
        # ESS resulting from chargning it with the diesel generators. The cost is used to account for non-fuel costs of
        # discharging the ESS including maintenance and lifetime costs. Units are kg/kWh.
        self.eesCost = float(eesSoup.eesCost.get('value'))
        # 'essChargeRate' is the fraction of power that it would take to fully charge or discharge the ESS that is the
        # maximum charge or discharge power. This creates charging and discharging curves that exponentially approach full
        # and zero charge.
        self.eesChargeRate = float(eesSoup.chargeRate.get('value'))
        # 'lossMapEstep' is the step interval that ePu will be interpolated along in the lossmap
        self.lossMapEstep = float(eesSoup.lossMapEstep.get('value'))
        # 'lossMapPstep' is the step interval that pPu will be interpolated along in the lossmap
        self.lossMapPstep = float(eesSoup.lossMapPstep.get('value'))
        # 'useLossMap' is a bool value that indicates whether or not use the lossMap in the simulation.
        self.useLossMap = eesSoup.useLossMap.get('value').lower() in [
            'true', '1'
        ]

        if self.useLossMap:
            # handle the loss map interpolation
            # 'lossMap' describes the loss experienced by the energy storage system for each state of power and energy.
            # they are described by the tuples 'pPu' for power, 'ePu' for the state of charge, 'tempAmb' for the ambient
            # (outside) temperature and 'lossRate' for the loss. Units for power are P.U. of nameplate power capacity. Positive values
            # of power are used for discharging and negative values for charging. Units for 'ePu' are P.U. nameplate energy
            # capacity. It should be between 0 and 1. 'loss' should include all losses including secondary systems. Units for
            # 'loss' are kW.
            # initiate loss map class
            eesLM = esLossMap()
            pPu = np.array(
                readXmlTag(eesDescriptor, ['lossMap', 'pPu'], 'value', '',
                           'float'))
            ePu = readXmlTag(eesDescriptor, ['lossMap', 'ePu'], 'value', '',
                             'float')
            lossPu = readXmlTag(eesDescriptor, ['lossMap', 'loss'], 'value',
                                '', 'float')
            tempAmb = readXmlTag(eesDescriptor, ['lossMap', 'tempAmb'],
                                 'value', '', 'float')

            # convert per unit power to power
            P = np.array(pPu)
            P[P > 0] = P[P > 0] * self.eesPOutMax
            P[P < 0] = P[P < 0] * self.eesPInMax
            #convert per unit energy to energy
            E = np.array(ePu) * self.eesEMax
            # convert pu loss to power
            L = np.abs(np.array(lossPu) * P)

            lossMapDataPoints = []
            for idx, item in enumerate(pPu):
                lossMapDataPoints.append((float(P[idx]), float(E[idx]),
                                          float(L[idx]), float(tempAmb[idx])))

            eesLM.lossMapDataPoints = lossMapDataPoints
            eesLM.pInMax = self.eesPInMax
            eesLM.pOutMax = self.eesPOutMax
            eesLM.eMax = self.eesEMax
            # check inputs
            eesLM.checkInputs()
            # TODO: remove *2, this is for testing purposes
            # perform the linear interpolation between points, with an energy step every 1 kWh (3600 seconds)
            eesLM.linearInterpolation(self.eesChargeRate,
                                      eStep=self.lossMapEstep,
                                      pStep=self.lossMapPstep)

            self.eesLossMapP = eesLM.P
            # save the index of where the power vector is zero. This is used to speed up run time calculations
            self.eesLossMapPZeroInd = (np.abs(self.eesLossMapP)).argmin()
            self.eesLossMapE = eesLM.E
            self.eesLossMapTemp = eesLM.Temp
            self.eesLossMapLoss = eesLM.loss
            self.eesmaxDischTime = eesLM.maxDischTime
            self.eesNextBinTime = eesLM.nextBinTime
        else:
            self.eesLossMapP = []
            # save the index of where the power vector is zero. This is used to speed up run time calculations
            self.eesLossMapPZeroInd = 0
            self.eesLossMapE = []
            self.eesLossMapTemp = []
            self.eesLossMapLoss = []
            self.eesmaxDischTime = []
            self.eesNextBinTime = []
예제 #7
0
def getRunMetaData(projectSetDir, runs):
    # get the set number
    dir_path = os.path.basename(projectSetDir)
    setNum = str(dir_path[3:])

    # read the input parameter sql database
    os.chdir(projectSetDir)
    conn = sqlite3.connect('set' + str(setNum) + 'ComponentAttributes.db')
    # get the attributes
    dfAttr = pd.read_sql_query('select * from compAttributes', conn)
    conn.close()

    # add columns for results
    df = pd.DataFrame(columns=[
        'Generator Import kWh', 'Generator Charging kWh',
        'Generator Switching', 'Generator Loading',
        'Generator Online Capacity', 'Generator Fuel Consumption kg',
        'Diesel-off time h', 'Generator Cumulative Run time h',
        'Generator Cumulative Capacity Run Time kWh',
        'Generator Overloading Time h', 'Generator Overloading kWh',
        'Wind Power Import kWh', 'Wind Power Spill kWh',
        'Wind Power Charging kWh', 'Energy Storage Discharge kWh',
        'Energy Storage Charge kWh', 'Energy Storage SRC kWh',
        'Energy Storage Overloading Time h', 'Energy Storage Overloading kWh',
        'Thermal Energy Storage Throughput kWh'
    ])

    genOverLoading = []
    eessOverLoading = []

    # check which runs to analyze
    if runs == 'all':
        os.chdir(projectSetDir)
        runDirs = glob.glob('Run*/')
        runs = [int(x[3:-1]) for x in runDirs]

    for runNum in runs:
        # get run dir
        projectRunDir = os.path.join(projectSetDir, 'Run' + str(runNum))

        # go to dir where output files are saved
        os.chdir(os.path.join(projectRunDir, 'OutputData'))

        # load the total powerhouse output
        genPStats, genP, ts = loadResults('powerhousePSet' + str(setNum) +
                                          'Run' + str(runNum) + '.nc')
        # load the total powerhouse charging of eess
        genPchStats, genPch, ts = loadResults('powerhousePchSet' +
                                              str(setNum) + 'Run' +
                                              str(runNum) + '.nc')
        # get generator power available stats
        genPAvailStats, genPAvail, tsGenPAvail = loadResults('genPAvailSet' +
                                                             str(setNum) +
                                                             'Run' +
                                                             str(runNum) +
                                                             '.nc')
        # check to see if fuel consumption has been calculated
        genFuelConsFileNames = glob.glob('gen*FuelConsSet*Run*.nc')
        # if the fuel cons has not been calculated, calculate
        if len(genFuelConsFileNames) == 0:
            getRunFuelUse(projectSetDir, [runNum])
            genFuelConsFileNames = glob.glob('gen*FuelConsSet*Run*.nc')
    # iterate through all generators and sum their fuel consumption.
        genFuelCons = 0
        for genFuelConsFileName in genFuelConsFileNames:
            genFuelConsStats0, genFuelCons0, ts = loadResults(
                genFuelConsFileName)
            # genFuelCons
            genFuelCons = genFuelCons + genFuelConsStats0[4]

        # calculate the average loading while online
        idxOnline = [idx for idx, x in enumerate(genPAvail)
                     if x > 0]  # the indices of when online
        # the loading profile of when online
        genLoading = [
            x / genPAvail[idxOnline[idx]]
            for idx, x in enumerate(genP[idxOnline])
        ]
        genLoadingMean = np.mean(genLoading)
        genLoadingStd = np.std(genLoading)
        genLoadingMax = np.max(genLoading)
        genLoadingMin = np.min(genLoading)
        # the online capacity of diesel generators
        genCapacity = genPAvail[idxOnline]
        genCapacityMean = np.mean(genCapacity)

        # get overloading of diesel
        # get indicies of when diesel generators online
        idxGenOnline = genPAvail > 0
        genOverLoadingTime = np.count_nonzero(
            genP[idxGenOnline] > genPAvail[idxGenOnline]) * ts / 3600
        genLoadingDiff = genP[idxGenOnline] - genPAvail[idxGenOnline]
        genOverLoading = genOverLoading + [[
            x for x in genLoadingDiff if x > 0
        ]]
        genOverLoadingkWh = sum(genLoadingDiff[genLoadingDiff > 0]) * ts / 3600

        # get overloading of the ESS. this is the power requested from the diesel generators when none are online.
        # to avoid counting instances where there there is genP due to rounding error, only count if greater than 1
        eessOverLoadingTime = sum(
            [1 for x in genP[~idxGenOnline] if abs(x) > 1]) * ts / 3600
        eessOverLoadingkWh = sum(
            [abs(x) for x in genP[~idxGenOnline] if abs(x) > 1]) * ts / 3600
        eessOverLoading = eessOverLoading + [[
            x for x in genP[~idxGenOnline] if abs(x) > 1
        ]]

        # get the total time spend in diesel-off
        genTimeOff = np.count_nonzero(genPAvail == 0) * tsGenPAvail / 3600

        # get the total diesel run time
        genTimeRunTot = 0.
        genRunTimeRunTotkWh = 0.
        for genRunTimeFile in glob.glob('gen*RunTime*.nc'):
            genRunTimeStats, genRunTime, ts = loadResults(genRunTimeFile)
            genTimeRunTot += np.count_nonzero(genRunTime != 0) * ts / 3600
            # get the capcity of this generator
            # first get the gen ID
            genID = re.search('gen(.*)RunTime', genRunTimeFile).group(1)
            genPMax = readXmlTag("gen" + genID + "Set" + str(setNum) + "Run" +
                                 str(runNum) + "Descriptor.xml",
                                 "POutMaxPa",
                                 "value",
                                 fileDir=projectRunDir + "/Components",
                                 returnDtype='float')
            genRunTimeRunTotkWh += (np.count_nonzero(genRunTime != 0) * ts /
                                    3600) * genPMax[0]

        # calculate total generator energy delivered in kWh
        genPTot = (genPStats[4] - genPchStats[4]) / 3600

        # calculate total generator energy delivered in kWh
        genPch = (genPchStats[4]) / 3600

        # calculate generator switching
        genSw = np.count_nonzero(np.diff(genPAvail))

        # load the wind data
        wtgPImportStats, wtgPImport, ts = loadResults('wtgPImportSet' +
                                                      str(setNum) + 'Run' +
                                                      str(runNum) + '.nc')
        wtgPAvailStats, wtgPAvail, ts = loadResults('wtgPAvailSet' +
                                                    str(setNum) + 'Run' +
                                                    str(runNum) + '.nc')
        wtgPchStats, wtgPch, ts = loadResults('wtgPchSet' + str(setNum) +
                                              'Run' + str(runNum) + '.nc')

        # tes
        # get tess power, if included in simulations
        if len(glob.glob('ees*SRC*.nc')) > 0:
            tessPStats, tessP, ts = loadResults('tessP' + str(setNum) + 'Run' +
                                                str(runNum) + '.nc')
            tessPTot = tessPStats[4] / 3600
        else:
            tessPStats = [0, 0, 0, 0, 0]

        # spilled wind power in kWh
        wtgPspillTot = (wtgPAvailStats[4] - wtgPImportStats[4] -
                        wtgPchStats[4] - tessPStats[4]) / 3600

        # imported wind power in kWh
        wtgPImportTot = wtgPImportStats[4] / 3600

        # windpower used to charge EESS in kWh
        wtgPchTot = wtgPchStats[4] / 3600

        # eess
        # get eess power
        eessPStats, eessP, ts = loadResults('eessPSet' + str(setNum) + 'Run' +
                                            str(runNum) + '.nc')
        # get the charging power
        eessPch = [x for x in eessP if x < 0]
        eessPchTot = -sum(eessPch) * ts / 3600  # total kWh charging of eess
        # get the discharging power
        eessPdis = [x for x in eessP if x > 0]
        eessPdisTot = (sum(eessPdis) *
                       ts) / 3600  # total kWh dischargning of eess
        # get eess SRC
        # get all ees used in kWh
        eessSRCTot = 0
        for eesFile in glob.glob('ees*SRC*.nc'):
            eesSRCStats, eesSRC, ts = loadResults(eesFile)
            eessSRCTot += eesSRCStats[4] / 3600

        # TODO: add gen fuel consumption
        # create df from generator nc files
        # get all gen power files
        '''
        dfGenP = 0
        for idx, genPFile in enumerate(glob.glob('gen*PSet*.nc')):
            genPStats, genP, genTime = loadResults(genPFile,returnTimeSeries=True) # load the file
            if idx == 0: # if the first, initiate df
                dfGenP = pd.DataFrame([genTime,genP],columns=['time',str(idx)])
            else:
                dfGenP[str(idx)] = genP # assign new column 
            

        # save into SQL db
        os.chdir(projectSetDir)
        conn = sqlite3.connect('set' + str(setNum) + 'Results.db')
        # check if table exists, tableName will be empty if not
        tableName = pd.read_sql_query("SELECT name FROM sqlite_master WHERE type='table' AND name='Results';", conn)
        # if not initialized
        if tableName.empty:
            # create
            df = pd.DataFrame(columns = ['Generator Import kWh','Generator Charging kWh','Generator Switching','Generator Loading','Generator Fuel Consumption kg','Wind Power Import kWh','Wind Power Spill kWh','Wind Power Charging kWh','Energy Storage Discharge kWh','Energy Storage Charge kWh','Energy Storage SRC kWh'])
        else:
            df = pd.read_sql_query('select * from Results',conn)
        '''

        # add row for this run
        df.loc[runNum] = [
            genPTot, genPch, genSw, genLoadingMean, genCapacityMean,
            genFuelCons, genTimeOff, genTimeRunTot, genRunTimeRunTotkWh,
            genOverLoadingTime, genOverLoadingkWh, wtgPImportTot, wtgPspillTot,
            wtgPchTot, eessPdisTot, eessPchTot, eessSRCTot,
            eessOverLoadingTime, eessOverLoadingkWh, tessPTot
        ]

    dfResult = pd.concat([dfAttr, df], axis=1, join='inner')

    os.chdir(projectSetDir)
    conn = sqlite3.connect('set' + str(setNum) + 'Results.db')
    dfResult.to_sql('Results', conn, if_exists="replace",
                    index=False)  # write to table compAttributes in db
    conn.close()
    dfResult.to_csv('Set' + str(setNum) + 'Results.csv')  # save a csv version

    # make pdfs
    # generator overloading
    # get all simulations that had some generator overloading
    genOverloadingSims = [x for x in genOverLoading if len(x) > 0]
    if len(genOverloadingSims) > 0:
        maxbin = max(max(genOverloadingSims))
        minbin = min(min(genOverloadingSims))
        genOverLoadingPdf = [[]] * len(genOverLoading)
        for idx, gol in enumerate(genOverLoading):
            genOverLoadingPdf[idx] = np.histogram(gol,
                                                  10,
                                                  range=(minbin, maxbin))
    else:
        genOverLoadingPdf = []
    outfile = open('genOverLoadingPdf.pkl', 'wb')
    pickle.dump(genOverLoadingPdf, outfile)
    outfile.close()

    # eess overloading
    eessOverLoadingSims = [x for x in eessOverLoading if len(x) > 0]
    if len(eessOverLoadingSims) > 0:
        maxbin = max(max(eessOverLoadingSims))
        minbin = min(min(eessOverLoadingSims))
        eessOverLoadingPdf = [[]] * len(eessOverLoading)
        for idx, eol in enumerate(eessOverLoading):
            eessOverLoadingPdf[idx] = np.histogram(eol,
                                                   10,
                                                   range=(minbin, maxbin))
    else:
        eessOverLoadingPdf = []
    outfile = open('eessOverLoadingPdf.pkl', 'wb')
    pickle.dump(eessOverLoadingPdf, outfile)
    outfile.close()
예제 #8
0
import copy

fileName = os.path.join(
    here, "../../GBSProjects/StMary/InputData/Setup/StMarySetup.xml")

#print('Select the xml project setup file')
#root = tk.Tk()
#root.withdraw()
#root.attributes('-topmost',1)
#fileName = filedialog.askopenfilename()
# get the setup Directory

setupDir = os.path.dirname(fileName)
# Village name

Village = readXmlTag(fileName, 'project', 'name')[0]
# input specification
#input specification can be for multiple input files or a single file in AVEC format.
inputSpecification = readXmlTag(fileName, 'inputFileFormat', 'value')
inputDictionary = {}
#filelocation is the raw timeseries file.
#if multiple files specified look for raw_wind directory
# input a list of subdirectories under the GBSProjects directory
fileLocation = readXmlTag(fileName, 'inputFileDir', 'value')
inputDictionary['fileLocation'] = [
    os.path.join('../../GBSProjects', *x) for x in fileLocation
]
# file type
inputDictionary['fileType'] = readXmlTag(fileName, 'inputFileType', 'value')

inputDictionary['outputInterval'] = readXmlTag(fileName, 'timeStep', 'value')
예제 #9
0
    def loadFixData(self, setupFile):

        from GBSInputHandler.getUnits import getUnits
        from GBSInputHandler.fixBadData import fixBadData
        from GBSInputHandler.fixDataInterval import fixDataInterval

        inputDictionary = {}
        Village = readXmlTag(setupFile, 'project', 'name')[0]
        # input specification


        # input a list of subdirectories under the GBSProjects directory
        fileLocation = readXmlTag(setupFile, 'inputFileDir', 'value')
        #fileLocation = os.path.join(*fileLocation)
        #fileLocation = os.path.join('/', fileLocation)
        #inputDictionary['fileLocation'] = [os.path.join('../../GBSProjects', *x) for x in fileLocation]
        inputDictionary['fileLocation'] = fileLocation
        # file type
        fileType = readXmlTag(setupFile, 'inputFileType', 'value')
        outputInterval = readXmlTag(setupFile, 'timeStep', 'value') + \
                         readXmlTag(setupFile, 'timeStep', 'unit')
        inputInterval = readXmlTag(setupFile, 'inputTimeStep', 'value') + \
                        readXmlTag(setupFile, 'inputTimeStep', 'unit')
        inputDictionary['timeZone'] = readXmlTag(setupFile,'timeZone','value')
        inputDictionary['fileType'] = readXmlTag(setupFile, 'inputFileType', 'value')
        inputDictionary['outputInterval'] = readXmlTag(setupFile, 'timeStep', 'value')
        inputDictionary['outputIntervalUnit'] = readXmlTag(setupFile, 'timeStep', 'unit')
        inputDictionary['inputInterval'] = readXmlTag(setupFile, 'inputTimeStep', 'value')
        inputDictionary['inputIntervalUnit'] = readXmlTag(setupFile, 'inputTimeStep', 'unit')
        inputDictionary['runTimeSteps'] = readXmlTag(setupFile,'runTimeSteps','value')
        # get date and time values
        inputDictionary['dateColumnName'] = readXmlTag(setupFile, 'dateChannel', 'value')
        inputDictionary['dateColumnFormat'] = readXmlTag(setupFile, 'dateChannel', 'format')
        inputDictionary['timeColumnName'] = readXmlTag(setupFile, 'timeChannel', 'value')
        inputDictionary['timeColumnFormat'] = readXmlTag(setupFile, 'timeChannel', 'format')
        inputDictionary['utcOffsetValue'] = readXmlTag(setupFile, 'inputUTCOffset', 'value')
        inputDictionary['utcOffsetUnit'] = readXmlTag(setupFile, 'inputUTCOffset', 'unit')
        inputDictionary['dst'] = readXmlTag(setupFile, 'inputDST', 'value')
        flexibleYear = readXmlTag(setupFile, 'flexibleYear', 'value')
        inputDictionary['flexibleYear'] = [(x.lower() == 'true') | (x.lower() == 't') for x in flexibleYear]

        #combine values with their units as a string
        for idx in range(len(inputDictionary['outputInterval'])):  # there should only be one output interval specified
            if len(inputDictionary['outputInterval']) > 1:
                inputDictionary['outputInterval'][idx] += inputDictionary['outputIntervalUnit'][idx]
            else:
                inputDictionary['outputInterval'][idx] += inputDictionary['outputIntervalUnit'][0]

        for idx in range(len(inputDictionary['inputInterval'])):  # for each group of input files
            if len(inputDictionary['inputIntervalUnit']) > 1:
                inputDictionary['inputInterval'][idx] += inputDictionary['inputIntervalUnit'][idx]
            else:
                inputDictionary['inputInterval'][idx] += inputDictionary['inputIntervalUnit'][0]

        # get data units and header names
        inputDictionary['columnNames'], inputDictionary['componentUnits'], \
        inputDictionary['componentAttributes'], inputDictionary['componentNames'], inputDictionary['useNames'] = getUnits(Village, os.path.dirname(setupFile))

        # read time series data, combine with wind data if files are seperate.
        df, listOfComponents = mergeInputs(inputDictionary)

        # check the timespan of the dataset. If its more than 1 year ask for / look for limiting dates
        minDate = min(df.index)
        maxDate = max(df.index)
        limiters = inputDictionary['runTimeSteps']

        if ((maxDate - minDate) > pd.Timedelta(days=365)) & (limiters ==['all']):
             newdates = self.DatesDialog(minDate, maxDate)
             m = newdates.exec_()
             if m == 1:
               
                #inputDictionary['runTimeSteps'] = [newdates.startDate.text(),newdates.endDate.text()]
                inputDictionary['runTimeSteps'] = [pd.to_datetime(newdates.startDate.text()), pd.to_datetime(newdates.endDate.text())]
                #TODO write to the setup file so can be archived

        # now fix the bad data
        df_fixed = fixBadData(df,os.path.dirname(setupFile),listOfComponents,inputDictionary['inputInterval'],inputDictionary['runTimeSteps'])

        # fix the intervals
        print('fixing data timestamp intervals to %s' %inputDictionary['outputInterval'])
        df_fixed_interval = fixDataInterval(df_fixed, inputDictionary['outputInterval'])
        df_fixed_interval.preserve(os.path.dirname(setupFile))
        return df_fixed_interval, listOfComponents
예제 #10
0
def generateRuns(projectSetDir):
    here = os.getcwd()
    os.chdir(projectSetDir) # change directories to the directory for this set of simulations
    # get the set number
    dir_path = os.path.basename(projectSetDir)
    setNum = str(dir_path[3:])
    # get the project name
    os.chdir(projectSetDir)
    os.chdir('../..')
    projectDir = os.getcwd()
    projectName = os.path.basename(projectDir)

    os.chdir(projectSetDir)

    # load the file with the list of different component attributes
    compName = readXmlTag(projectName + 'Set'+str(setNum) + 'Attributes.xml', ['compAttributeValues', 'compName'], 'value')
    compTag = readXmlTag(projectName + 'Set' + str(setNum) + 'Attributes.xml', ['compAttributeValues', 'compTag'], 'value')
    compAttr = readXmlTag(projectName + 'Set' + str(setNum) + 'Attributes.xml', ['compAttributeValues', 'compAttr'], 'value')
    compValue = readXmlTag(projectName + 'Set' + str(setNum) + 'Attributes.xml', ['compAttributeValues', 'compValue'], 'value')

    # check if wind turbine values were varied from base case. If so, will set the 'recalculateWtgPAvail' tag to 1
    # for each wind turbine
    #isWtg = any(['wtg' in x for x in compName])

    valSplit = [] # list of lists of attribute values
    for val in compValue: # iterate through all comonent attributes
        if not isinstance(val,list): # readXmlTag will return strings or lists, depending if there are commas. we need lists.
            val = [val]
        valSplit.append(val) # split according along commas

    # get all possible combinations of the attribute values
    runValues = list(itertools.product(*valSplit))

    # get headings
    heading = [x + '.' + compTag[idx] + '.' + compAttr[idx] for idx, x in enumerate(compName)]

    # get the setup information for this set of simulations
    setupTag = readXmlTag(projectName + 'Set'+str(setNum) + 'Attributes.xml', ['setupAttributeValues', 'setupTag'], 'value')
    setupAttr = readXmlTag(projectName + 'Set'+str(setNum) + 'Attributes.xml', ['setupAttributeValues', 'setupAttr'], 'value')
    setupValue = readXmlTag(projectName + 'Set'+str(setNum) + 'Attributes.xml', ['setupAttributeValues', 'setupValue'],
                           'value')

    # copy the setup xml file to this simulation set directory and make the specified changes
    # if Setup dir does not exist, create
    setupFile = os.path.join(projectSetDir, 'Setup', projectName + 'Set' + str(setNum) + 'Setup.xml')
    if os.path.exists(os.path.join(projectSetDir,'Setup')):
        inpt = input("This simulation set already has runs generated, overwrite? y/n")
        if inpt.lower() == 'y':
            generateFiles = 1
        else:
            generateFiles = 0
    else:
        generateFiles = 1
    if generateFiles == 1:
        if os.path.exists(os.path.join(projectSetDir,'Setup')):
            rmtree(os.path.join(projectSetDir,'Setup'))
        os.mkdir(os.path.join(projectSetDir,'Setup'))
        # copy setup file
        copyfile(os.path.join(projectDir,'InputData','Setup',projectName+'Setup.xml'), setupFile)
        # make the cbanges to it defined in projectSetAttributes
        for idx, val in enumerate(setupValue):  # iterate through all setup attribute values
            tag = setupTag[idx].split('.')
            attr = setupAttr[idx]
            value = val
            writeXmlTag(setupFile, tag, attr, value)

        # make changes to the predict Load input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'predictLoad')
        # make changes to the predict Wind input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'predictWind')
        # make changes to the reDispatch input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'reDispatch')
        # make changes to the getMinSrc input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'getMinSrc')
        # make changes to the gen dispatch
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'genDispatch')
        # make changes to the genSchedule input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'genSchedule')
        # make changes to the wtg dispatch input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'wtgDispatch')
        # make changes to the ees dispatch input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'eesDispatch')
        # make changes to the tes dispatch input file
        generateInputFile(projectDir, projectSetDir, projectName, setNum, setupFile, 'tesDispatch')


    # get the components to be run
    components = readXmlTag(setupFile, 'componentNames', 'value')

    # generate the run directories
    runValuesUpdated = runValues # if any runValues are the names of another tag, then it will be updated here
    for run, val in enumerate(runValues): # for each simulation run
        # check if there already is a directory for this run number.
        runDir = os.path.join(projectSetDir,'Run'+str(run))
        compDir = os.path.join(runDir, 'Components')
        if not os.path.isdir(runDir):
            os.mkdir(runDir) # make run directory

            os.mkdir(compDir) # make component directory
        # copy component descriptors  and fillSetInfo
        for cpt in components: # for each component
            # copy from main input data
            copyfile(os.path.join(projectDir, 'InputData', 'Components', cpt + 'Descriptor.xml'),
                     os.path.join(compDir, cpt + 'Set' + str(setNum) + 'Run' + str(run) + 'Descriptor.xml'))


        # make changes
        for idx, value in enumerate(val):
            compFile = os.path.join(compDir, compName[idx] + 'Set' + str(setNum) + 'Run' + str(run) + 'Descriptor.xml')
            tag = compTag[idx].split('.')
            attr = compAttr[idx]
            # check if value is a tag in the xml document
            tryTagAttr = value.split('.')  # split into tag and attribute
            if len(tryTagAttr) > 1:
                # seperate into component, tags and attribute. There may be multiple tags
                tryComp = tryTagAttr[0]
                tryTag = tryTagAttr[1]
                for i in tryTagAttr[2:-1]: # if there are any other tag values
                    tryTag = tryTag + '.' + i
                tryAttr = tryTagAttr[-1]  # the attribute
                if tryComp in compName:
                    idxComp = [i for i, x in enumerate(compName) if x == tryComp]
                    idxTag = [i for i, x in enumerate(compTag) if x == tryTag]
                    idxAttr = [i for i, x in enumerate(compAttr) if x == tryAttr]
                    idxVal = list(set(idxTag).intersection(idxAttr).intersection(idxComp))
                    value = val[idxVal[0]] # choose the first match, if there are multiple
                    a = list(runValuesUpdated[run]) # change values from tuple
                    a[idx] = value
                    runValuesUpdated[run] = tuple(a)
                else:
                    # check if it is referring to a tag in the same component
                    # seperate into tags and attribute. There may be multiple tags
                    tryTag = tryTagAttr[0]
                    for i in tryTagAttr[1:-1]:  # if there are any other tag values
                        tryTag = tryTag + '.' + i
                    if tryTag in compTag:
                        tryAttr = tryTagAttr[-1]  # the attribute
                        idxTag = [i for i, x in enumerate(compTag) if x == tryTag]
                        idxAttr = [i for i, x in enumerate(compAttr) if x == tryAttr]
                        idxVal = list(set(idxTag).intersection(idxAttr))
                        value = val[idxVal[0]]  # choose the first match, if there are multiple
                        a = list(runValuesUpdated[run])  # change values from tuple
                        a[idx] = value
                        runValuesUpdated[run] = tuple(a)
            writeXmlTag(compFile, tag, attr, value)

            # if this is a wind turbine, then its values are being altered and the wind power time series will need
            # to be recalculated
            if 'wtg' in compName[idx]:
                if tag == 'powerCurveDataPoints' or tag == 'cutInWindSpeed' or tag == 'cutOutWindSpeedMax' or tag == 'cutOutWindSpeedMin' or tag == 'POutMaxPa':
                    writeXmlTag(compFile, 'recalculateWtgPAvail', 'value', 'True')

    # create dataframe and save as SQL
    df = pd.DataFrame(data=runValuesUpdated, columns=heading)
    # add columns to indicate whether the simulation run has started or finished. This is useful for when multiple processors are
    # running runs to avoid rerunning simulations. The columns are called 'started' and 'finished'. 0 indicate not
    # started (finished) and 1 indicates started (finished)
    df = df.assign(started=[0] * len(runValues))
    df = df.assign(finished=[0] * len(runValues))
    conn = sqlite3.connect('set' + str(setNum) + 'ComponentAttributes.db')  # create sql database

    try:
        df.to_sql('compAttributes', conn, if_exists="replace", index=False)  # write to table compAttributes in db
    except sqlite3.Error as er:
        print(er)
        print('You need to delete the existing set ComponentAttributes.db before creating a new components attribute table')

    conn.close()
    os.chdir(here)