Exemple #1
0
def updateAllinOneXML(treePath, simulationYear, saveToCloud = False):

    xmlRoot = treePath.getroot()

    # These are like climate, construction data, independent from building specific data
    updateGlobalXMLparameters(xmlRoot, simulationYear)

    # Update individual building parameters
    for buildingRoot in xmlRoot.iter('Building'):
        buildingID = buildingRoot.get('key')
        # retrieve building parameter values from input space by buildingID
        buildingInfo = queryBuildingInfo(buildingID)

        # only if building data in DB and is to be simulated will be updated
        if len(buildingInfo) == 1:
            if buildingInfo['post_Tmin'].any():
                Tmin = float(buildingInfo['post_Tmin'])
                Ninf = float(buildingInfo['post_Ninf'])
            else:
                Tmin = float(buildingInfo['c_Tmin'])
                Ninf = float(buildingInfo['c_Ninf'])
            # get building construction type id according to year range
            constructionIDs = getBuildingConstructionType(xmlRoot, buildingInfo)
            updateBuildingData(buildingRoot, buildingInfo, constructionIDs, Tmin, Ninf)
        else:
            continue

    if saveToCloud == True:
        outputPath = globalParameters().cloudDirectory + calibratedXML
    else:
        outputPath = globalParameters().localCalibratedDirectory + calibratedXML

    treePath.write(outputPath)
Exemple #2
0
def main():
    print "Start inserting model inputs indo DB..."
    conn = globalParameters().conn
    gridSize = globalParameters().gridSize

    newDB(conn)
    insertInputs(conn, gridSize)

    print "Model inputs successfully inserted into DB"
Exemple #3
0
def trainingPhase():
    gridSize = globalParameters().gridSize
    # initialize an empty dict() to record trained year count of each postcode
    trainingPC6counts = dict()
    # initialize an empty dict() to update prior to posterior during the training phase
    pc6_prob = dict()
    pc6_archetype = dict()

    for year in trainingYrs:
        # pre-processing input data
        path = gParameter.fileDirectory + gParameter.filePrefix + str(
            year) + '.csv'
        df = pd.read_csv(path, delimiter=',', index_col=False)
        cleaned_df = detectOutlier(df, threshold, remove=True)

        # recording each PC6 has gone through how many years of training
        pc6TrainedCounts = trainingCounts(cleaned_df, trainingPC6counts, year)

        for pc6, archetype, pc6_metered in zip(cleaned_df.iloc[:, 0],
                                               cleaned_df.iloc[:, 1],
                                               cleaned_df.iloc[:, 2]):
            # if this is the first time the pc6 is trained, initialize equal prior probability,
            if pc6_prob.get(pc6) is None:
                pc6_prob[pc6] = [(1.0 / gridSize)**2] * (gridSize * gridSize)
                pc6_archetype[pc6] = archetype

            # compute posterior for each postcode of the year, and update the prior with posterior
            prior = pc6_prob[pc6]
            posterior = computePosterior(cleaned_df, pc6, pc6_metered,
                                         archetype, prior, gridSize)
            pc6_prob[pc6] = posterior

    return pc6_prob, pc6TrainedCounts, pc6_archetype
import os
from shutil import copy
import csv
import time
from globalSetting import globalParameters

globalVariable = globalParameters()
conn = globalVariable.conn
caseNums = globalVariable.caseNum


def createDB(conn, simulationYear):
    cur = conn.cursor()
    cur.execute("""
        DROP TABLE IF EXISTS public."Calibration_""" + str(simulationYear) +
                """results";
        CREATE TABLE public."Calibration_""" + str(simulationYear) +
                """results"
        (
            "buildingID" character varying,
            "case1_annual_h_wh" DOUBLE PRECISION
        )
        WITH (OIDS = FALSE)
        TABLESPACE pg_default;
        ALTER TABLE public."Calibration_""" + str(simulationYear) + """results"
            OWNER to postgres;
        """)
    cur.close()
    conn.commit()

Exemple #5
0
import pandas as pd
import numpy as np
from scipy.stats import norm
from globalSetting import globalParameters

# import global parameter setting
gParameter = globalParameters()
trainingYrs = gParameter.trainingYears
threshold = gParameter.threshold
conn = gParameter.conn
p_Tmin = gParameter.p_Tmin
p_Ninf = gParameter.p_Ninf


# The function checks if the PC6 measured energy data falls withing the PC6 simulation ranges
def detectOutlier(df, threshold, remove=True):
    outlierIDs = []
    for i in range(len(df)):
        pc6consumption = df.iloc[i, 3]
        simMax = max(df.iloc[i, 4:]) + threshold
        simMin = min(df.iloc[i, 4:]) - threshold
        if not simMin <= pc6consumption <= simMax:
            outlierIDs.append(i)
    if bool(remove):
        df.drop(outlierIDs, inplace=True)
        cleaned_df = df.reset_index(drop=True).iloc[:, 1:]
    return cleaned_df


# Count and collect the training years of each postcode
def trainingCounts(cleaned_df, trainingPC6counts, simulationYear):