示例#1
0
文件: calibrate.py 项目: bwintcig/omf
	def runPowerflowIter(tree,scadaSubPower, iterationTimes):
		'''Runs powerflow once, then iterates.'''
		print "Running calibration powerflow #1."
		output = gridlabd.runInFilesystem(tree, keepFiles=True, workDir=gridlabdDir)
		outRealPow = output["caliSub.csv"]["measured_real_power"]
		outImagPower = output["caliSub.csv"]["measured_reactive_power"]
		outAppPowerKw = [(x[0]**2 + x[1]**2)**0.5/1000 for x in zip(outRealPow, outImagPower)]
		lastFile = "subScada.player"
		nextFile = "subScadaCalibrated.player"
		nextPower = outAppPowerKw
		for i in range(1, iterationTimes+1):
			SCAL_CONST = sum(scadaSubPower[1:simLength])/sum(nextPower[1:simLength])
			print "Running calibration powerflow (iteration", str(i+1), "of", iterationTimes+1,") (SCAL_CONST: ", SCAL_CONST,")"
			newPlayData = []
			with open(pJoin(gridlabdDir, lastFile), "r") as playerFile:
				for line in playerFile:
					(key,val) = line.split(',')
					newPlayData.append(str(key) + ',' + str(float(val)*SCAL_CONST) + "\n")
			with open(pJoin(gridlabdDir, nextFile), "w") as playerFile:
				for row in newPlayData:
					playerFile.write(row)
			tree[playerKey]["file"] = nextFile
			tree[outputRecorderKey]["file"] = "caliSubCheck.csv"
			nextOutput = gridlabd.runInFilesystem(tree, keepFiles=True, workDir=gridlabdDir)
			outRealPow2nd = nextOutput["caliSubCheck.csv"]["measured_real_power"]
			outImagPower2nd = nextOutput["caliSubCheck.csv"]["measured_reactive_power"]
			nextAppKw = [(x[0]**2 + x[1]**2)**0.5/1000
				for x in zip(outRealPow2nd, outImagPower2nd)]
			lastFile = nextFile
			nextFile = "subScadaCalibrated"+str(i)+".player"
			nextPower = outAppPowerKw
		return outRealPow, outRealPow2nd, lastFile
示例#2
0
文件: geo.py 项目: dpinney/omf
	def tiles(zoom, x, y):
		filename = pJoin(pathToTiles, zoom, x, y + '.png')
		default = pJoin(pathToTiles,'default.png')
		if os.path.isfile(filename):
			return send_file(filename)
		else:
			return send_file(default)
示例#3
0
def _tests():
    # Variables
    from .. import filesystem
    fs = filesystem.Filesystem().fs
    workDir = pJoin(__metaModel__._omfDir, "data", "Model")
    inData = {
        "batteryEfficiency": "92",
        "cellCapacity": "100",
        "discountRate": "2.5",
        "created": "2015-06-12 17:20:39.308239",
        "dischargeRate": "50",
        "modelType": "energyStorage",
        "chargeRate": "50",
        "demandCurve": fs.open(pJoin(__metaModel__._omfDir, "scratch", "batteryModel", "OlinBeckenhamScada.csv")).read(),
        "cellCost": "25000",
        "cellQuantity": "3",
        "runTime": "0:00:03",
        "projYears": "10",
        "demandCharge": "50"}
    modelLoc = pJoin(workDir, "admin", "Automated energyStorage Testing")
    # Blow away old test results if necessary.
    try:
        shutil.rmtree(modelLoc)
    except:
        # No previous test results.
        pass
    # No-input template.
    renderAndShow(template, fs)
    # Run the model.
    run(modelLoc, inData, fs)
    # Show the output.
    renderAndShow(template, fs, modelDir=modelLoc)
示例#4
0
def renderTemplate(template, modelDir="", absolutePaths=False, datastoreNames={}):
	''' Render the model template to an HTML string.
	By default render a blank one for new input.
	If modelDir is valid, render results post-model-run.
	If absolutePaths, the HTML can be opened without a server. '''
	try:
		allInputData = open(pJoin(modelDir,"allInputData.json")).read()
	except IOError:
		allInputData = None
	try:
		allOutputData = open(pJoin(modelDir,"allOutputData.json")).read()
	except IOError:
		allOutputData = None
	if absolutePaths:
		# Parent of current folder.
		pathPrefix = __metaModel__._omfDir
	else:
		pathPrefix = ""
	feederList = []
	feederIDs = []
	try:
		inputDict = json.load(open(pJoin(modelDir, "allInputData.json")))
		for key in inputDict:
			if key.startswith("feederName"):
				feederIDs.append(key) 
				feederList.append(inputDict[key])
	except IOError:
		pass
	return template.render(allInputData=allInputData,
		allOutputData=allOutputData, modelStatus=getStatus(modelDir), pathPrefix=pathPrefix,
		datastoreNames=datastoreNames, feederIDs = feederIDs, feederList = feederList)
def run(modelDir, inputDict):
	''' Run the model in a separate process. web.py calls this to run the model.
	This function will return fast, but results take a while to hit the file system.'''
	with open(pJoin(modelDir,'allInputData.json')) as inputFile:
		inJson = json.load(inputFile)
	feederList = []
	for key in inJson.keys():
		if 'feederName' in key: 
			inputDict[key] = inJson[key]
			feederList.append(inJson[key])
	print "feeders read:",feederList
	# Check whether model exist or not
	if not os.path.isdir(modelDir):
		os.makedirs(modelDir)
		inputDict["created"] = str(datetime.datetime.now())
	# MAYBEFIX: remove this data dump. Check showModel in web.py and renderTemplate()
	with open(pJoin(modelDir, "allInputData.json"),"w") as inputFile:
		json.dump(inputDict, inputFile, indent = 4)
	# If we are re-running, remove output:
	try:
		os.remove(pJoin(modelDir,"allOutputData.json"))
	except:
		pass
	backProc = multiprocessing.Process(target = runForeground, args = (modelDir, inputDict,))
	backProc.start()
	print "SENT TO BACKGROUND", modelDir
	with open(pJoin(modelDir, "PPID.txt"),"w+") as pPidFile:
		pPidFile.write(str(backProc.pid))
示例#6
0
def cancel(modelDir):
    ''' Try to cancel a currently running model. '''
    # Kill GLD process if already been created
    logger.info('Canceling running model... modelDir: %s', modelDir)
    try:
        with open(pJoin(modelDir, "PID.txt"), "r") as pidFile:
            pid = int(pidFile.read())
            # print "pid " + str(pid)
            os.kill(pid, 15)
            logger.info("PID KILLED")
    except:
        pass
    # Kill runForeground process
    try:
        with open(pJoin(modelDir, "PPID.txt"), "r") as pPidFile:
            pPid = int(pPidFile.read())
            os.kill(pPid, 15)
            logger.info("PPID KILLED")
    except:
        pass
    # Remove PID, PPID, and allOutputData file if existed
    try:
        for fName in os.listdir(modelDir):
            if fName in ["PID.txt", "PPID.txt", "allOutputData.json"]:
                os.remove(pJoin(modelDir, fName))
        logger.info("CANCELED %s", modelDir)
    except:
        pass
示例#7
0
def _tests():
	# Variables
	inData = {"simStartDate": "2010-07-01",
		"simLengthUnits": "hours",
		"feederName1": "superModel Tomorrow",
		"modelType": "dsoSimSuite",
		"weather": "historical",
		"zipCode": "90001",
		"simLength": "10",
		"runTime": ""}
	modelLoc = pJoin(__metaModel__._omfDir,"scratch","dsoSimSuite","Output","Automated dsoSimSuite Test")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		print 'Failed to delete old model during tests.'
	try:
		os.makedirs(modelLoc)
	except: pass
	shutil.copyfile(pJoin(__metaModel__._omfDir,"scratch","dsoSimSuite", inData["feederName1"]+'.omd'),pJoin(modelLoc,inData["feederName1"]+'.omd'))	# No-input template.
	renderAndShow(template)
	# Run the model.
	runForeground(modelLoc, inData)
	## Cancel the model.
	# time.sleep(2)
	# cancel(modelLoc)
	# Show the output.
	renderAndShow(template, modelDir=modelLoc)
示例#8
0
def _tests():
	print "Beginning to test calibrate.py"
	workDir = tempfile.mkdtemp()
	try: os.mkdir(pJoin(workDir,"gridlabD"))
	except: pass	
	print "Currently working in: ", workDir
	scadaPath = pJoin("scratch","uploads", "FrankScada.csv")
	feederPath = pJoin("scratch", "publicFeeders","ABEC Frank pre calib.omd")
	simDate = dt.datetime.strptime("4/13/2011 09:00:00", "%m/%d/%Y %H:%M:%S") # Spring peak.
	simStartDate = {"Date":simDate,"timeZone":"PST"}
	simLength = 24
	simLengthUnits = 'hours'
	error, trim = (0.05, 5), 1
	print "Simulation Date:", simStartDate['Date'], "for", str(simLength), "hours."
	voltVectorA = [random.uniform(7380,7620) for x in range(0,8760)]
	voltVectorC = [-random.uniform(3699,3780) for x in range(0, 8760)]
	voltVectorB = [-random.uniform(3699,3795) for x in range(0, 8760)]
	print "Running gridlabD with voltage players."
	voltFeederPath, outcome = attachVolts(workDir, feederPath, voltVectorA, voltVectorB, voltVectorC, simStartDate, simLength, simLengthUnits)
	try: 
		assert None == omfCalibrate(workDir, voltFeederPath, scadaPath, simStartDate, simLength, simLengthUnits, "FBS", error, trim), "feeder calibration failed"
		print "\n  Success! Ran calibrate with voltage players!"
	except: 
		print "Failed to run calibrate with voltage players. Running only calibrate now."
		assert None == omfCalibrate(workDir, feederPath, scadaPath, simStartDate, simLength, simLengthUnits, "FBS", error, trim), "feeder calibration failed"
		print "\n  Success! Ran calibrate!"
示例#9
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		"batteryEfficiency": "92",
		"inverterEfficiency": "97.5",
		"cellCapacity": "7",
		"discountRate": "2.5",
		"created": "2015-06-12 17:20:39.308239",
		"dischargeRate": "5",
		"modelType": modelName,
		"chargeRate": "5",
		"demandCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","FrankScadaValidCSV_Copy.csv")).read(),
		"fileName": "FrankScadaValidCSV_Copy.csv",
		"priceCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","priceCurve_Copy.csv")).read(),
		"fileNamed":"priceCurve_Copy.csv",
		"cellCost": "7140",
		"cellQuantity": "10",
		"runTime": "0:00:03",
		"projYears": "15",
		"chargePriceThreshold": "0.07",
		"dischargePriceThreshold":"0.15",
		"dodFactor":"100",
		"batteryCycleLife": "5000"
	}
	return __neoMetaModel__.new(modelDir, defaultInputs)
示例#10
0
    def _GetRemoteFile(self, localFolder, remoteFolder, collectedFileName, copyProtocol):
        '''
        Gets: localFolder, remoteFolder, collectedFileName.
        Returns: N/A
        '''
        remoteFilePath = pJoin(remoteFolder, collectedFileName)
        localFilePath = pJoin(localFolder, collectedFileName)
        if copyProtocol == 'sftp':
            try:
                self.client.get(remoteFilePath, localFilePath)
                self.logger.info('Copied file from "%s:%s" to "%s" successfuly' % (self.remoteMachine, remoteFilePath,\
                        localFilePath))
            except:
                self.logger.error('Could not get file %s from [%s] using SFTP'%(remoteFilePath,self.remoteMachine))
                
        
        elif copyProtocol == 'scp':
            scpCopy=getstatusoutput('scp -o ConnectTimeout=5 -p -P %s %s@%s:%s %s'%(self.port, self.username, self.remoteMachine, \
                    remoteFilePath, localFilePath))
            if scpCopy[0] != 0:
                self.logger.error (scpCopy[1])
                self.logger.error('Could not get file %s from [%s] using SCP'%(remoteFilePath,self.remoteMachine))
                self.logger.error(self.Errors)
                print ('%s'%self.Errors[0])

            else:
                self.logger.info('Copied file from "%s:%s" to "%s" successfuly' % (self.remoteMachine, \
                        remoteFilePath, localFilePath))
        else:
            self.logger.error('Unknown copy protocol %s, scp and sftp support only'%copyProtocol)
示例#11
0
def _tests():
    print "Full path to Gridlab executable we're using:", _addGldToPath()
    print "Testing string cleaning."
    strTestCases = [("+954.877", 954.877),
                    ("+2.18351e+006", 2183510.0),
                    ("+7244.99+1.20333e-005d", 7244.99),
                    # ("+7244.99+120d", 7245.98372204), # Fails due to float rounding but should pass.
                    ("+3.76184", 3.76184),
                    ("1", 1.0),
                    ("-32.4", -32.4),
                    ("+7200+0d", 7200.0),
                    ("+175020+003133", 0.0)]
    for (string, result) in strTestCases:
        assert _strClean(
            string) == result, "A _strClean operation failed on: " + string
    # Get a test feeder and test climate.
    print "Testing GridlabD solver."
    with open(pJoin(_omfDir, "data", "Feeder", "public", "Simple Market System.json"), "r") as feederFile:
        feederJson = json.load(feederFile)
    with open(pJoin(_omfDir, "data", "Climate", "AL-HUNTSVILLE.tmy2"), "r") as climateFile:
        tmyStr = climateFile.read()
    # Add climate in.
    feederJson["attachments"]["climate.tmy2"] = tmyStr
    testStudy = runInFilesystem(feederJson["tree"], feederJson["attachments"])
    assert testStudy != {}, "Gridlab run failed and we got blank output."
    print "GridlabD standard error:", testStudy['stderr']
    print "GridlabD standard output:", testStudy['stdout']
示例#12
0
def _tests():
	# Variables
	inData = {"simStartDate": "2012-04-01",
		"simLengthUnits": "hours",
		"feederName1": "superModel Tomorrow",
		"modelType": modelName,
		"zipCode": "59001",
		"simLength": "10",
		"runTime": ""}
	modelLoc = pJoin(__metaModel__._omfDir,"data","Model","admin","Automated solarEngineering Test")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		pass
	try:
		os.makedirs(modelLoc)
	except: pass
	shutil.copyfile(pJoin(__metaModel__._omfDir,"scratch","publicFeeders", inData["feederName1"]+'.omd'),pJoin(modelLoc,inData["feederName1"]+'.omd'))
	# No-input template.
	renderAndShow(template, modelName)
	# Run the model.
	runForeground(modelLoc, inData)
	## Cancel the model.
	# time.sleep(2)
	# cancel(modelLoc)
	# Show the output.
	renderAndShow(template, modelName, modelDir=modelLoc)
示例#13
0
文件: network.py 项目: dpinney/omf
def _secretTests():
    # Parse mat to dictionary.
    networkName = "case9"
    networkJson = parse(
        pJoin(os.getcwd(), "scratch", "transmission", "inData", "matpower6.0b1", networkName + ".m"), filePath=True
    )
    keyLen = len(networkJson.keys())
    print "Parsed MAT file with %s buses, %s generators, and %s branches." % (
        len(networkJson["bus"]),
        len(networkJson["gen"]),
        len(networkJson["branch"]),
    )
    # Use python nxgraph to add lat/lon to .omt.json.
    nxG = netToNxGraph(networkJson)
    networkJson = latlonToNet(nxG, networkJson)
    with open(pJoin(os.getcwd(), "scratch", "transmission", "outData", networkName + ".omt"), "w") as inFile:
        json.dump(networkJson, inFile, indent=4)
    print "Wrote network to: %s" % (pJoin(os.getcwd(), "scratch", "transmission", "outData", networkName + ".omt"))
    # Convert back to .mat and run matpower.
    matStr = netToMat(networkJson, networkName)
    with open(pJoin(os.getcwd(), "scratch", "transmission", "outData", networkName + ".m"), "w") as outMat:
        for row in matStr:
            outMat.write(row)
    print "Converted .omt back to .m at: %s" % (
        pJoin(os.getcwd(), "scratch", "transmission", "outData", networkName + ".m")
    )
示例#14
0
def _tests():
    # # First just test the charting.
    # tree = json.load(open("../data/Feeder/public/Olin Barre Geo.json")).get("tree",{})
    # chart = voltPlot(tree)
    # chart.savefig("/Users/dwp0/Desktop/testChart.png")
    # plt.show()
    # Variables
    workDir = pJoin(__metaModel__._omfDir, "data", "Model")
    inData = {"feederName1": "Olin Barre Geo", "modelType": modelName, "runTime": "", "layoutAlgorithm": "geospatial"}
    modelLoc = pJoin(workDir, "admin", "Automated voltageDrop Testing")
    # Blow away old test results if necessary.
    try:
        shutil.rmtree(modelLoc)
    except:
        # No previous test results.
        pass
    try:
        os.makedirs(modelLoc)
    except:
        pass
    with open(pJoin(modelLoc, "allInputData.json"), "w") as inputFile:
        json.dump(inData, inputFile, indent=4)
    shutil.copyfile(
        pJoin(__metaModel__._omfDir, "scratch", "publicFeeders", inData["feederName1"] + ".omd"),
        pJoin(modelLoc, inData["feederName1"] + ".omd"),
    )
    # No-input template.
    renderAndShow(template, modelName)
    # Run the model.
    run(modelLoc, inData)
    # Show the output.
    renderAndShow(template, modelName, modelDir=modelLoc)
def renderTemplate(template, modelDir="", absolutePaths=False, datastoreNames={}):
	''' Render the model template to an HTML string.
	By default render a blank one for new input.
	If modelDir is valid, render results post-model-run.
	If absolutePaths, the HTML can be opened without a server. '''
	try:
		inJson = json.load(open(pJoin(modelDir,"allInputData.json")))
		modelPath, modelName = pSplit(modelDir)
		deepPath, user = pSplit(modelPath)
		inJson["modelName"] = modelName
		inJson["user"] = user
		allInputData = json.dumps(inJson)
	except IOError:
		allInputData = None
	try:
		allOutputData = open(pJoin(modelDir,"allOutputData.json")).read()
	except IOError:
		allOutputData = None
	if absolutePaths:
		# Parent of current folder.
		pathPrefix = __metaModel__._omfDir
	else:
		pathPrefix = ""
	try:
		inputDict = json.load(open(pJoin(modelDir, "allInputData.json")))
	except IOError:
		pass
	from web import app
	# from jinja2 import Template
	return template.render(allInputData=allInputData,
		allOutputData=allOutputData, modelStatus=getStatus(modelDir), pathPrefix=pathPrefix,
		datastoreNames=datastoreNames, csrf_token=app.jinja_env.globals["csrf_token"])
示例#16
0
def _tests():
	"runs local tests for dynamic CVR model"
	#creating a work directory and initializing data
	inData = { "modelName": "Automated DynamicCVR Testing",
		"modelType": "_cvrDynamic",
		"user": "******",
		"feederName": "public___ABEC Frank pre calib",
		"scadaFile": "FrankScada",
		"runTime": "",
		"capitalCost": 30000,
		"omCost": 1000,
		"wholesaleEnergyCostPerKwh": 0.06,
		"retailEnergyCostPerKwh": 0.10,
		"peakDemandCostSpringPerKw": 5.0,
		"peakDemandCostSummerPerKw": 10.0,
		"peakDemandCostFallPerKw": 6.0,
		"peakDemandCostWinterPerKw": 8.0,
		"simStart": "2011-01-01",
		"simLengthHours": 100}
	workDir = pJoin(__metaModel__._omfDir,"data","Model")
	modelDir = pJoin(workDir, inData["user"], inData["modelName"])
	# Clean up previous run.
	try:
		shutil.rmtree(modelDir)
	except:
		pass
	run(modelDir, inData)
示例#17
0
def _tests():
	# Variables
	workDir = pJoin(__metaModel__._omfDir,"data","Model")
	inData = {
		"batteryEfficiency": "92",
		"inverterEfficiency": "97.5",
		"cellCapacity": "7",
		"discountRate": "2.5",
		"created": "2015-06-12 17:20:39.308239",
		"dischargeRate": "5",
		"modelType": modelName,
		"chargeRate": "5",
		"demandCurve": open(pJoin(__metaModel__._omfDir,"scratch","uploads","FrankScadaValidCSV.csv")).read(),
		"fileName": "FrankScadaValidCSV.csv",
		"cellCost": "7140",
		"cellQuantity": "10",
		"dodFactor":"100",
		"avoidedCost":"2000000",
		"transformerThreshold":"6.6",
		"batteryCycleLife": "5000"
		}
	modelLoc = pJoin(workDir,"admin","Automated storageDeferral Testing")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		pass
	# No-input template.
	renderAndShow(template, modelName)
	# Run the model.
	runForeground(modelLoc, inData)
	# Show the output.
	renderAndShow(template,modelName, modelDir = modelLoc)
示例#18
0
def run(modelDir, inputDict):
	''' Run the model in a separate process. web.py calls this to run the model.
	This function will return fast, but results take a while to hit the file system.'''
	# Check whether model exist or not
	if not os.path.isdir(modelDir):
		os.makedirs(modelDir)
		inputDict["created"] = str(datetime.datetime.now())
	with open(pJoin(modelDir,'allInputData.json')) as inputFile:
		feederName = json.load(inputFile).get('feederName1','feeder1')
	inputDict["feederName1"] = feederName
	# MAYBEFIX: remove this data dump. Check showModel in web.py and renderTemplate()
	with open(pJoin(modelDir, "allInputData.json"),"w") as inputFile:
		json.dump(inputDict, inputFile, indent = 4)
	# If we are re-running, remove output and old GLD run:
	try:
		os.remove(pJoin(modelDir,"allOutputData.json"))
		shutil.rmtree(pJoin(modelDir,"gldContainer"))
	except:
		pass
	# Start background process.
	backProc = multiprocessing.Process(target = heavyProcessing, args = (modelDir, inputDict,))
	backProc.start()
	print "SENT TO BACKGROUND", modelDir
	with open(pJoin(modelDir, "PPID.txt"),"w+") as pPidFile:
		pPidFile.write(str(backProc.pid))
示例#19
0
def _tests():
	# Variables
	workDir = pJoin(__metaModel__._omfDir,"data","Model")
	inData = {"simStartDate": "2012-04-01",
		"simLengthUnits": "hours",
		"modelType": "pvWatts",
		"zipCode": "64735",
		"simLength": "100",
		"systemSize":"10",
		"nonInverterEfficiency":"77",
		"trackingMode":"0",
		"azimuth":"180",
		"runTime": "",
		"rotlim":"45.0",
		"gamma":"0.45",
		"inverterEfficiency":"92",
		"tilt":"45.0",
		"w_stow":"0",
		"inverterSize":"8"}
	modelLoc = pJoin(workDir,"admin","Automated pvWatts Testing")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		pass
	# No-input template.
	renderAndShow(template)
	# Run the model.
	run(modelLoc, inData)
	# Show the output.
	renderAndShow(template, modelDir = modelLoc)
示例#20
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		"user": "******",
		"load_type": "1",
		"number_devices": "2000",
		"power": "5.6",
		"capacitance": "2",
		"resistance": "2",
		"cop": "2.5",
		"setpoint": "22.5",
		"deadband": "0.625",
		"demandChargeCost":"25",
		"electricityCost":"0.06",
		"projectionLength":"15",
		"discountRate":"2",
		"unitDeviceCost":"150",
		"unitUpkeepCost":"5",
		"demandCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","FrankScadaValidVBAT.csv")).read(),
		"tempCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","weatherNoaaTemp.csv")).read(),
		"fileName": "FrankScadaValidVBAT.csv",
		"tempFileName": "weatherNoaaTemp.csv",
		'histFileName': 'Texas_17yr_TempAndLoad.csv',
		"modelType": modelName,
		## FORECAST ##
		'dispatch_type': 'prediction', # 'optimal'
		'confidence': '90',
		"histCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","Texas_17yr_TempAndLoad.csv"), 'rU').read(),
		}
	return __neoMetaModel__.new(modelDir, defaultInputs)
示例#21
0
def renderTemplate(template, fs, modelDir="", absolutePaths=False, datastoreNames={}):
    ''' Render the model template to an HTML string.
    By default render a blank one for new input.
    If modelDir is valid, render results post-model-run.
    If absolutePaths, the HTML can be opened without a server. '''

    # Our HTML template for the interface:
    with fs.open("models/solarEngineering.html") as tempFile:
        template = Template(tempFile.read())

    try:
        inJson = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
        modelPath, modelName = pSplit(modelDir)
        deepPath, user = pSplit(modelPath)
        inJson["modelName"] = modelName
        inJson["user"] = user
        allInputData = json.dumps(inJson)
    except (IOError, HdfsFileNotFoundException):
        allInputData = None
    try:
        allOutputData = fs.open(pJoin(modelDir, "allOutputData.json")).read()
    except (HdfsFileNotFoundException, IOError):
        allOutputData = None
    if absolutePaths:
        # Parent of current folder.
        pathPrefix = __metaModel__._omfDir
    else:
        pathPrefix = ""
    try:
        inputDict = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
    except (IOError, HdfsFileNotFoundException):
        pass
    return template.render(allInputData=allInputData,
                           allOutputData=allOutputData, modelStatus=getStatus(modelDir, fs), pathPrefix=pathPrefix,
                           datastoreNames=datastoreNames)
示例#22
0
文件: calibrate.py 项目: acmbc68/omf
def _tests():
	print "Beginning to test calibrate.py"
	workDir = tempfile.mkdtemp()
	print "Currently working in: ", workDir
	scadaPath = pJoin("uploads", "FrankScada.tsv")
	feederPath = pJoin("data", "Feeder", "public","ABEC Frank LO.json")
	assert None == omfCalibrate(workDir, feederPath, scadaPath), "feeder calibration failed"
示例#23
0
def cancel(modelDir):
	''' Try to cancel a currently running model. '''
	# Kill GLD process if already been created
	try:
		with open(pJoin(modelDir,"PID.txt"),"r") as pidFile:
			pid = int(pidFile.read())
			# print "pid " + str(pid)
			os.kill(pid, 15)
			print "PID KILLED"
	except:
		pass
	# Kill runForeground process
	try:
		with open(pJoin(modelDir, "PPID.txt"), "r") as pPidFile:
			pPid = int(pPidFile.read())
			os.kill(pPid, 15)
			print "PPID KILLED"
	except:
		pass
	# Remove PID, PPID, and allOutputData file if existed
	for fName in ["PID.txt","PPID.txt","allOutputData.json"]:
		try: 
			os.remove(pJoin(modelDir,fName))
		except:
			pass
	print "CANCELED", modelDir
示例#24
0
def _tests():
	# # First just test the charting.
	# tree = json.load(open("../data/Feeder/public/Olin Barre Geo.json")).get("tree",{})
	# chart = voltPlot(tree)
	# chart.savefig("/Users/dwp0/Desktop/testChart.png")
	# plt.show()
	# Variables
	workDir = pJoin(__metaModel__._omfDir,"data","Model")
	inData = {"feederName": "public___Olin Barre Geo",
		"modelType": "voltageDrop",
		"runTime": "",
		"layoutAlgorithm": "geospatial"}
	modelLoc = pJoin(workDir,"admin","Automated voltageDrop Testing")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		pass
	# No-input template.
	renderAndShow(template)
	# Run the model.
	run(modelLoc, inData)
	# Show the output.
	renderAndShow(template, modelDir=modelLoc)
示例#25
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		'batteryEfficiency': '92',
		'inverterEfficiency': '97.5',
		'cellCapacity': '7',
		'discountRate': '2.5',
		'created': '2015-06-12 17:20:39.308239',
		'dischargeRate': '5',
		'modelType': modelName,
		'chargeRate': '5',
		'demandCurve': open(pJoin(__neoMetaModel__._omfDir,'static','testFiles','FrankScadaValidCSV_Copy.csv')).read(),
		'fileName': 'FrankScadaValidCSV_Copy.csv',
		'dispatchStrategy': 'prediction', #'optimal',
		'cellCost': '7140',
		'cellQuantity': '100',
		'runTime': '0:00:03',
		'projYears': '15',
		'demandCharge': '20',
		'dodFactor':'100',
		'retailCost': '0.06',
		'startPeakHour': '18',
		'endPeakHour': '22',
		'batteryCycleLife': '5000',
		# required if dispatch strategy is custom
		'customDispatchStrategy': open(pJoin(__neoMetaModel__._omfDir,'static','testFiles','dispatchStrategy.csv')).read(),
		# forecast
		'confidence': '0',
		'histFileName': 'Texas_17yr_TempAndLoad.csv',
		"histCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","Texas_17yr_TempAndLoad.csv"), 'rU').read(),
	}
	return __neoMetaModel__.new(modelDir, defaultInputs)
示例#26
0
def _tests():
	# Variables
	workDir = pJoin(__metaModel__._omfDir,"data","Model")
	inData = {
		"batteryEfficiency": "92",
		"inverterEfficiency": "97.5",
		"cellCapacity": "7",
		"discountRate": "2.5",
		"created": "2015-06-12 17:20:39.308239",
		"dischargeRate": "5",
		"modelType": "energyStorage",
		"chargeRate": "5",
		"demandCurve": open(pJoin(__metaModel__._omfDir,"uploads","OlinBeckenhamScada.csv")).read(),
		"fileName": "OlinBeckenhamScada.csv",
		"cellCost": "7140",
		"cellQuantity": "10",
		"runTime": "0:00:03",
		"projYears": "15",
		"demandCharge": "20",
		"dodFactor":"100",
		"retailCost": "0.06"}
	modelLoc = pJoin(workDir,"admin","Automated energyStorage Testing")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		pass
	# No-input template.
	renderAndShow(template)
	# Run the model.
	runForeground(modelLoc, inData)
	# Show the output.
	renderAndShow(template, modelDir = modelLoc)
示例#27
0
def renderTemplate(modelDir, absolutePaths=False, datastoreNames={}):
	''' Render the model template to an HTML string.
	By default render a blank one for new input.
	If modelDir is valid, render results post-model-run.
	If absolutePaths, the HTML can be opened without a server. '''
	try:
		inJson = json.load(open(pJoin(modelDir,"allInputData.json")))
		modelPath, modelName = pSplit(modelDir)
		deepPath, user = pSplit(modelPath)
		inJson["modelName"] = modelName
		inJson["user"] = user
		modelType = inJson["modelType"]
		template = getattr(omf.models, modelType).template
		allInputData = json.dumps(inJson)
	except IOError:
		allInputData = None
	try:
		allOutputData = open(pJoin(modelDir,"allOutputData.json")).read()
	except IOError:
		allOutputData = None
	if absolutePaths:
		# Parent of current folder.
		pathPrefix = _omfDir
	else:
		pathPrefix = ""
	return template.render(allInputData=allInputData,
		allOutputData=allOutputData, modelStatus=getStatus(modelDir), pathPrefix=pathPrefix,
		datastoreNames=datastoreNames, modelName=modelType)
示例#28
0
文件: pvWatts.py 项目: acmbc68/omf
def _tests():
	# Variables
	workDir = pJoin(__metaModel__._omfDir,"data","Model")
	inData = {"simStartDate": "2012-04-01",
		"simLengthUnits": "hours",
		"modelType": "pvWatts",
		"climateName": "AL-HUNTSVILLE",
		"simLength": "100",
		"systemSize":"10",
		"derate":"0.97",
		"trackingMode":"0",
		"azimuth":"180",
		"runTime": "",
		"rotlim":"45.0",
		"t_noct":"45.0",
		"t_ref":"25.0",
		"gamma":"-0.5",
		"inv_eff":"0.92",
		"fd":"1.0",
		"i_ref":"1000",
		"poa_cutin":"0",
		"w_stow":"0"}
	modelLoc = pJoin(workDir,"admin","Automated pvWatts Testing")
	# Blow away old test results if necessary.
	try:
		shutil.rmtree(modelLoc)
	except:
		# No previous test results.
		pass
	# No-input template.
	renderAndShow(template)
	# Run the model.
	run(modelLoc, inData)
	# Show the output.
	renderAndShow(template, modelDir = modelLoc)
示例#29
0
def run(modelDir, inputDict, fs):
    ''' Run the model in a separate process. web.py calls this to run the model.
    This function will return fast, but results take a while to hit the file system.'''
    logger.info("Running cvrStatic model... modelDir: %s; inputDict: %s", modelDir, inputDict)
    if not os.path.isdir(modelDir):
        os.makedirs(modelDir)
        inputDict["created"] = str(datetime.datetime.now())
    # MAYBEFIX: remove this data dump. Check showModel in web.py and
    # renderTemplate()
    fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
    feederDir, feederName = inputDict["feederName"].split("___")
    fs.export_from_fs_to_local(pJoin("data", "Feeder", feederDir, feederName + ".json"),
                pJoin(modelDir, "feeder.json"))
    # If we are re-running, remove output:
    try:
        fs.remove(pJoin(modelDir, "allOutputData.json"))
    except:
        pass
    # Start the computation.
    backProc = multiprocessing.Process(
        target=runForeground, args=(modelDir, inputDict, fs))
    backProc.start()
    print "SENT TO BACKGROUND", modelDir
    with open(pJoin(modelDir, "PPID.txt"), "w") as pPidFile:
        pPidFile.write(str(backProc.pid))
示例#30
0
def renderTemplate(template, fs, modelDir="", absolutePaths=False, datastoreNames={}, quickRender=False):
    ''' Render the model template to an HTML string.
    By default render a blank one for new input.
    If modelDir is valid, render results post-model-run.
    If absolutePaths, the HTML can be opened without a server. 
    If quickRender, pass this to template so we can render for non-logged-in users. '''
    logger.debug('Rendering model template... modelDir: %s; absolutePaths: %s; datastoreNames: %s; quickRender: %s',
                 modelDir, absolutePaths, datastoreNames, quickRender)
    try:
        inJson = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
        modelPath, modelName = pSplit(modelDir)
        deepPath, user = pSplit(modelPath)
        inJson["modelName"] = modelName
        inJson["user"] = user
        allInputData = json.dumps(inJson)
    except (HdfsFileNotFoundException, IOError):
        allInputData = None
    try:
        allOutputData = fs.open(pJoin(modelDir, "allOutputData.json")).read()
    except (HdfsFileNotFoundException, IOError):
        allOutputData = None
    if absolutePaths:
        # Parent of current folder.
        pathPrefix = _omfDir
    else:
        pathPrefix = ""
    with open('templates/footer.html', 'r') as footer_file:
        footer = footer_file.read()
    with open('templates/nrelsObligation.html') as nrels_file:
        nrels_text = nrels_file.read()
    return template.render(allInputData=allInputData,
                           allOutputData=allOutputData, modelStatus=getStatus(modelDir, fs), pathPrefix=pathPrefix,
                           datastoreNames=datastoreNames, quickRender=quickRender, footer=footer, nrels_text=nrels_text)
示例#31
0
文件: voltageDrop.py 项目: mw23/omf
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		"feederName1": "Olin Barre Geo",
		"modelType": modelName,
		"runTime": "",
		"layoutAlgorithm": "geospatial",
		"edgeCol" : "None",
		"nodeCol" : "perUnit120Voltage",
		"nodeLabs" : "None",
		"edgeLabs" : "None",
		"customColormap" : "False",
		"rezSqIn" : "225"
	}
	creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
	try:
		shutil.copyfile(pJoin(__neoMetaModel__._omfDir, "static", "publicFeeders", defaultInputs["feederName1"]+'.omd'), pJoin(modelDir, defaultInputs["feederName1"]+'.omd'))
	except:
		return False
	return creationCode
示例#32
0
def getWeather(tree, maxKey, weatherStart, weatherEnd, airport, workDir):
	'''Get and save weather data to a directory.'''
	try:
		# Read wunderground.com weather data.
		weatherFile = "weather"+airport+".csv"
		assert None==makeClimateCsv(weatherStart, weatherEnd, airport, pJoin(workDir,"gridlabD",weatherFile), cleanup=True)
		for key in tree:
			if tree[key].get('object','').lower() == 'climate':
				tree[key]['tmyfile'] = str("\"weather"+airport+".csv\"")
				tree[key]['reader'] = "weatherReader"
				tree[key].pop('quadratic',None)
		tree[maxKey+1] = {"object": "csv_reader",
			"name":"\"weatherReader\"",
			"filename": str("\"weather"+airport+".csv\"")
		}
		return True
	except:
		# Server errored, use climate .tmy2 instead.
		print "ERROR: Using .tmy2 data because I couldn't connect to one of the weather servers."
		return False
示例#33
0
def _tests():
    # Variables
    modelLoc = pJoin(_omfDir, "data", "Model", "admin",
                     "Automated Testing of " + modelName)
    # Blow away old test results if necessary.
    try:
        shutil.rmtree(modelLoc)
    except:
        pass  # No previous test results.
    # Create new model.
    new(modelLoc)
    # No-input template.
    renderAndShow(modelLoc)
    # Run the model.
    runForeground(modelLoc, test_mode=True)
    ## Cancel the model.
    # time.sleep(2)
    # cancel(modelLoc)
    # Show the output.
    renderAndShow(modelLoc)
示例#34
0
def _runModel():
    # Testing the hazard class.
    _testHazards()
    # Location
    modelLoc = pJoin(__neoMetaModel__._omfDir, "data", "Model", "admin",
                     "Automated Testing of " + modelName)
    # Blow away old test results if necessary.
    try:
        shutil.rmtree(modelLoc)
    except:
        # No previous test results.
        pass
    # Create New.
    new(modelLoc)
    # Pre-run.
    # renderAndShow(modelLoc)
    # Run the model.
    __neoMetaModel__.runForeground(modelLoc)
    # Show the output.
    __neoMetaModel__.renderAndShow(modelLoc)
示例#35
0
def new(modelDir):
    ''' Create a new instance of this model. Returns true on success, false on failure. '''
    defaultInputs = {
        'created':
        '2015-06-12 17:20:39.308239',
        'modelType':
        modelName,
        'runTime':
        '0:01:03',
        'epochs':
        '1',
        'max_c':
        '0.1',
        'histFileName':
        'd_Texas_17yr_TempAndLoad.csv',
        "histCurve":
        open(
            pJoin(__neoMetaModel__._omfDir, "static", "testFiles",
                  "d_Texas_17yr_TempAndLoad.csv"), 'rU').read(),
    }
    return __neoMetaModel__.new(modelDir, defaultInputs)
示例#36
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		"modelType": modelName,
		"zipCode": "59001",
		"feederName1": "Olin Barre GH EOL Solar GridBallast",
		# "feederName1": "UCS Egan Housed Solar",
		# "feederName1": "Connexus West End Final Fixed Solar",
		"simStartDate": "2012-01-01 12:00:00",
		"simLength": "180",
		"simLengthUnits": "minutes", #hours
		"eventType": "ramping", #unramping, overfrequency, underfrequency
		"eventTime": "2012-01-01 14:00",
		"eventLength": "00:11"
	}
	creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
	try:
		shutil.copyfile(pJoin(__neoMetaModel__._omfDir, "static", "publicFeeders", defaultInputs["feederName1"]+'.omd'), pJoin(modelDir, defaultInputs["feederName1"]+'.omd'))
	except:
		return False
	return creationCode
示例#37
0
def vbat24hr(ind, temp):
	vbType = ind['load_type']
	variables = [ind['capacitance'], ind['resistance'], ind['power'], ind['cop'], 
		ind['deadband'], float(ind['setpoint']), ind['number_devices']]
	variables = [float(v) for v in variables]
	variables.insert(0, temp)

	if vbType == '1':
		return [list(i) for i in VB.AC(*variables).generate()] # air conditioning
	elif vbType == '2':
		return [list(i) for i in VB.HP(*variables).generate()] # heat pump
	elif vbType == '3':
		return [list(i) for i in VB.RG(*variables).generate()] # refrigerator
	elif vbType == '4':
		temp = np.array([[i]*60 for i in list(variables[0])]).reshape(365*24*60, 1)
		variables[0] = temp
		variables.append(temp)
		file = pJoin(__neoMetaModel__._omfDir,'static','testFiles',"Flow_raw_1minute_BPA.csv")
		water = np.genfromtxt(file, delimiter=',')
		variables.append(water)
		return [list(i) for i in VB.WH(*variables).generate()] # water heater
示例#38
0
def _tests():
    print "Beginning to test weather.py"
    workDir = tempfile.mkdtemp()
    print "IAD lat/lon =", _airportCodeToLatLon("IAD")
    assert (38.947444, -77.459944
            ) == _airportCodeToLatLon("IAD"), "airportCode lookup failed."
    print "Weather downloading to", workDir
    assert None == _downloadWeather("2010-03-01", "2010-04-01", "PDX", workDir)
    print "Peak solar extraction in", workDir
    assert None == _getPeakSolar("PDX",
                                 workDir,
                                 dniScale=1.0,
                                 dhiScale=1.0,
                                 ghiScale=1.0)
    print "Pull weather and solar data together in", workDir
    assert None == _processWeather("2010-03-01", "2010-04-01", "PDX", workDir)
    print "Testing the full process together."
    assert None == makeClimateCsv("2010-07-01",
                                  "2010-08-01",
                                  "IAD",
                                  pJoin(tempfile.mkdtemp(), "weatherDCA.csv"),
                                  cleanup=True)
示例#39
0
def new(modelDir):
    ''' Create a new instance of this model. Returns true on success, false on failure. '''
    f1Name = "load_solar_data.csv"
    with open(pJoin(omf.omfDir, "static", "testFiles", "pyCIGAR",
                    f1Name)) as f1:
        load_PV = f1.read()

    f2Name = "breakpoints.csv"
    with open(pJoin(omf.omfDir, "static", "testFiles", "pyCIGAR",
                    f2Name)) as f2:
        breakpoints_inputs = f2.read()

    f3Name = "ieee37.dss"
    with open(pJoin(omf.omfDir, "static", "testFiles", "pyCIGAR",
                    f3Name)) as f3:
        dssFile = f3.read()

    f4Name = "misc_inputs.csv"
    with open(pJoin(omf.omfDir, "static", "testFiles", "pyCIGAR",
                    f4Name)) as f4:
        miscFile = f4.read()

    defaultInputs = {
        "simStartDate": "2019-07-01T00:00:00Z",
        "simLengthUnits": "seconds",
        # "feederName1": "ieee37fixed",
        "feederName1": "Olin Barre GH EOL Solar AVolts CapReg",
        "modelType": modelName,
        "zipCode": "59001",
        "loadPV": load_PV,
        "breakpoints": breakpoints_inputs,
        "dssFile": dssFile,
        "miscFile": miscFile,
        "trainAgent": "False",
        "attackVariable": "None",
        "defenseVariable": "None"
    }
    creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
    try:
        shutil.copyfile(
            pJoin(__neoMetaModel__._omfDir, "static", "publicFeeders",
                  defaultInputs["feederName1"] + '.omd'),
            pJoin(modelDir, defaultInputs["feederName1"] + '.omd'))
    except:
        return False
    return creationCode
示例#40
0
文件: __init__.py 项目: sammatuba/omf
	def getFloatPercentage(workDir, endDate, simLength, simLengthUnits):	
		try:
			gridlabDTime = ''
			with open(pJoin(workDir, 'stderr.txt'),'r') as stderrFile:
				gridlabDTime = stderrFile.read().strip()
			gridlabDTest = gridlabDTime.split('\r')
			gridlabDTest = gridlabDTest[len(gridlabDTest)-1]
			gridlabDTimeFormatted = gridlabDTest.split('Processing ')[1].split('PST...')[0].lstrip().rstrip()
			gridlabDTimeFormatted = datetime.datetime.strptime(gridlabDTimeFormatted, '%Y-%m-%d %H:%M:%S')
			print("\n   gridlabDTime=", gridlabDTimeFormatted)
			difference = (endDate - gridlabDTimeFormatted)
			print("\n   difference=", difference)
			if simLengthUnits == 'hours':
				floatPercentageStatus = -1 * difference.total_seconds()/3600/simLength + 1.0
			elif simLengthUnits == 'days':
				floatPercentageStatus = -1 * difference.total_seconds()/86400/simLength + 1.0
			elif simLengthUnits == 'minutes':
				floatPercentageStatus = -1 * difference.total_seconds()/60/simLength + 1.0
		except:
			print("\n   No std error file, passing.")
			floatPercentageStatus = 0.0
			pass
		return floatPercentageStatus
示例#41
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		'modelType': modelName,
		'created': '', 
		'runTime': '', 
		'disaggAlgo': 'combOpt', 
		'testFileName': '', 
		'trainFileName': '', 
		'trainingData': '', 
		'testingData': '', 
		'testSet': 'REDD', 
		'trainSet': 'REDD', 
		'testBuilding': '1', 
		'trainBuilding': '1'
	}
	
	creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
	try:
		shutil.copyfile(pJoin(__neoMetaModel__._omfDir, "static", "publicFeeders", defaultInputs["feederName1"]+'.omd'), pJoin(modelDir, defaultInputs["feederName1"]+'.omd'))
	except:
		return False
	return creationCode
示例#42
0
def new(modelDir):
    """ Create a new instance of this model. Returns true on success, false on failure. """
    fName = "ERCOT_south_shortened.csv"
    with open(pJoin(__neoMetaModel__._omfDir, "static", "testFiles",
                    fName)) as f:
        file_ = f.read()
    defaultInputs = {
        "created": "2015-06-12 17:20:39.308239",
        "modelType": modelName,
        "file": file_,
        "fileName": fName,
        "confidence": "0.99",
        "norm_confidence": "0.90",
        "startDate": "2002-01-01",
        "contaminationLof": "0",
        "contaminationIso": "0",
        "neighbors": "20",
        "estimators": "100",
        "samples": "0.1",
        "alphabetSize": "9",
        "windowSize": "12"
    }
    return __neoMetaModel__.new(modelDir, defaultInputs)
示例#43
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		'modelType': modelName,
		'user': '******',
		'feederName1': 'ABEC Frank pre calib',
		'runTime': '',
		'capitalCost': 30000,
		'omCost': 1000,
		'wholesaleEnergyCostPerKwh': 0.06,
		'retailEnergyCostPerKwh': 0.10,
		'peakDemandCostSpringPerKw': 5.0,
		'peakDemandCostSummerPerKw': 10.0,
		'peakDemandCostFallPerKw': 6.0,
		'peakDemandCostWinterPerKw': 8.0,
		'simStart': '2011-01-01',
		'simLengthHours': 100}
	creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
	try:
		shutil.copyfile(pJoin(__neoMetaModel__._omfDir, 'static', 'publicFeeders', defaultInputs['feederName1']+'.omd'), pJoin(modelDir, defaultInputs['feederName1']+'.omd'))
	except:
		return False
	return creationCode
示例#44
0
文件: cvrdynamic.py 项目: cdkkim/omf
def _tests():
    "runs local tests for dynamic CVR model"
    #creating a work directory and initializing data
    inData = {
        "modelName": "Automated DynamicCVR Testing",
        "modelType": "cvrDynamic",
        "user": "******",
        "runTime": "",
        "capitalCost": 30000,
        "omCost": 1000,
        "wholesaleEnergyCostPerKwh": 0.06,
        "retailEnergyCostPerKwh": 0.10,
        "peakDemandCostSpringPerKw": 5.0,
        "peakDemandCostSummerPerKw": 10.0,
        "peakDemandCostFallPerKw": 6.0,
        "peakDemandCostWinterPerKw": 8.0
    }
    # "baselineTap": 3.0,
    # "z_percent": 0.5,
    # "i_percent": 0.0,
    # "p_percent": 0.5,
    # "power_factor": 0.9}
    workDir = pJoin(_omfDir, "data", "Model")
    modelDir = pJoin(workDir, inData["user"], inData["modelName"])
    if not os.path.isdir(modelDir):
        os.makedirs(modelDir)
    #calibrate and run cvrdynamic
    feederPath = pJoin(_omfDir, "data", "Feeder", "admin",
                       "ABEC Frank new.json")
    scadaPath = pJoin(_omfDir, "uploads", "FrankScada.tsv")
    calibrate.omfCalibrate(modelDir, feederPath, scadaPath)
    try:
        os.remove(pJoin(modelDir, "stderr.txt"))
        os.remove(pJoin(modelDir, "stdout.txt"))
    except:
        pass
    with open(pJoin(modelDir, "calibratedFeeder.json"), "r") as jsonIn:
        feederJson = json.load(jsonIn)
        localTree = feederJson.get("tree", {})
    runModel(modelDir, localTree, inData)
示例#45
0
def _processScadaData(workDir, scadaPath, simStartDate):
    '''generate a SCADA player file from raw SCADA data'''
    with open(scadaPath, "r") as scadaFile:
        scadaReader = csv.DictReader(scadaFile, delimiter='\t')
        allData = [row for row in scadaReader]
    scadaSubPower = [float(row["power"]) for row in allData]
    firstDateTime = dt.datetime.strptime(allData[1]["timestamp"],
                                         "%m/%d/%Y %H:%M:%S")
    # Write the player.
    maxPower = max(scadaSubPower)
    positionFound = 0
    with open(pJoin(workDir, "subScada.player"), "w") as playFile:
        for i, row in enumerate(allData):
            timestamp = dt.datetime.strptime(row["timestamp"],
                                             "%m/%d/%Y %H:%M:%S")
            if timestamp >= simStartDate['Date']:
                if positionFound == 0: positionFound = i
                power = float(row["power"]) / maxPower
                line = timestamp.strftime(
                    "%Y-%m-%d %H:%M:%S"
                ) + " " + simStartDate['timeZone'] + "," + str(power) + "\n"
                playFile.write(line)
    return scadaSubPower[positionFound:]
示例#46
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		"batteryEfficiency": "92",
		"retailCost": "0.06",
		"deferralType": "subTransformer",
		"inverterEfficiency": "97.5",
		"cellCapacity": "7",
		"created": "2015-06-12 17:20:39.308239",
		"dischargeRate": "5",
		"modelType": modelName,
		"chargeRate": "5",
		"demandCurve": open(pJoin(__neoMetaModel__._omfDir,"static","testFiles","FrankScadaValidCSV_Copy.csv")).read(),
		"fileName": "FrankScadaValidCSV_Copy.csv",
		"cellCost": "7140",
		"dodFactor":"100",
		"avoidedCost":"2000000",
		"transformerThreshold":"6.6",
		"batteryCycleLife": "5000",
		"carryingCost":"7",
		"yearsToReplace":"2"
	}
	return __neoMetaModel__.new(modelDir, defaultInputs)
示例#47
0
def plotLine(workDir, powerVec, chartData, startTime, simLengthUnits):
	''' Plots vector data with given plotname to filename.png.
		Chartdata accepts: title, timezone, y label, legend labels&colors.'''
	plt.style.use('fivethirtyeight')
	plt.figure("Power")
	plt.title(chartData['Title'], fontsize=12)
	plt.xlabel("Time "+"("+chartData.get('timeZone','')+")")
	plt.ylabel(chartData.get('yAxis','Real Power (kW)'))
	if str(type(chartData['labels'])) == "<type \'list\'>": timeLength = len(powerVec[0])
	else: timeLength = len(powerVec)
	if simLengthUnits == 'hours': x = np.array([(startTime+dt.timedelta(hours=i)) for i in range(timeLength)])
	elif simLengthUnits == 'minutes': x = np.array([(startTime+dt.timedelta(minutes=i)) for i in range(timeLength)])
	elif simLengthUnits == 'days': x = np.array([(startTime+dt.timedelta(days=i)) for i in range(timeLength)])
	if str(type(chartData['labels'])) == "<type \'list\'>":
		for i in range(len(powerVec)):
			colors = chartData.get("colors","['red','black','green']")
			pws = plt.plot(x, powerVec[i], colors[i], label=str(chartData["labels"][i]))
		plt.legend(loc=1,prop={'size':6})
	else:
		if chartData.get('labels','') != "":
			pws = plt.plot(x, powerVec, label=str(chartData["labels"]))
			plt.legend(loc=1,prop={'size':6})
		else: pws = plt.plot(x, powerVec)
	# Add boundaries.
	if chartData.get('boundaries','') != "":
		for i in range(len(chartData['boundaries'])):
			boundary = np.array([float(chartData['boundaries'][i]) for times in range(timeLength)])
			pws = plt.plot(x, boundary,'r--')
	# Set axis text sizes.
	plt.tick_params(axis='both', which='major', labelsize=8)
	plt.tick_params(axis='x', which='minor', labelsize=5)
	plt.gcf().autofmt_xdate()
	plt.tight_layout()
	plt.margins(x=0.1,y=0.2)
	# Save and close plot.
	plt.savefig(pJoin(workDir,chartData['fileName']+".png"))
	plt.close()
示例#48
0
def renderTemplateToFile(modelDir, datastoreNames={}):
	''' Render and open a template (blank or with output) in a local browser. '''
	with tempfile.NamedTemporaryFile('w', suffix=".html", delete=False) as baseTemplate:
		baseTemplate.write(renderTemplate(modelDir, absolutePaths=False))
		baseTemplate.flush()
		baseTemplate.seek(0)
		with web.locked_open(pJoin(modelDir,'inlineTemplate.html'), 'w', encoding='utf-8') as inlineTemplate:
			for line in baseTemplate:
				#add backslash to regex between signle and double quote
				matchObj = re.match( r"(.*)/static(.+?)(['\"])(.+?)", line, re.M|re.I)
				scriptTags = re.match( r"(.*)<script(.*)static/(.*)</script>", line, re.M|re.I)
				styleTags = re.match( r"(.*)<link(.*)stylesheet", line, re.M|re.I)
				if scriptTags:
					with open(_omfDir + "/static"+ matchObj.group(2)) as f:
						sourceFile = f.read() 
					with open(_omfDir + "/static"+ matchObj.group(2), 'r', encoding='utf-8') as yFile:
						ttempfile = yFile.readlines()
					tmp = '<script>'+sourceFile+'</script>'
					inlineTemplate.write('<script>')
					for i in ttempfile:
						try:
							inlineTemplate.write(i)
						except (UnicodeEncodeError):
							print(i)
					inlineTemplate.write('</script>')
				elif styleTags:
					with open(_omfDir + "/static"+ matchObj.group(2), 'r', encoding='utf-8') as yFile:
						ttempfile = yFile.readlines()
					inlineTemplate.write('<style>')
					for i in ttempfile:
						try:
							inlineTemplate.write(i)
						except (UnicodeEncodeError):
							print(i)
					inlineTemplate.write('</style>')
				else:
					inlineTemplate.write(str(line))
示例#49
0
文件: Anomaly.py 项目: sammatuba/omf
def run(modelDir, inputDict):
    ''' Run the model in its directory. '''
    # Delete output file every run if it exists
    try:
        os.remove(pJoin(modelDir, "allOutputData.json"))
    except Exception as e:
        pass
    # Check whether model exist or not
    try:
        if not os.path.isdir(modelDir):
            os.makedirs(modelDir)
            inputDict["created"] = str(datetime.datetime.now())
        with open(pJoin(modelDir, "allInputData.json"), "w") as inputFile:
            json.dump(inputDict, inputFile, indent=4)
        startTime = datetime.datetime.now()
        # Inputs.
        MinDetRunTime = int(inputDict.get('MinDetectionRunTime', 4))
        devFromAve = 1 - float(inputDict.get('MinDeviationFromAverage',
                                             95)) / 100
        workDir = os.getcwd()
        # Run.
        outData = {}
        computeAMIResults(pJoin(workDir, inputDict.get('fileName', '')),
                          devFromAve, MinDetRunTime, outData)
        # Save output.
        with open(pJoin(modelDir, "allOutputData.json"), "w") as outFile:
            json.dump(outData, outFile, indent=4)
        # Update the runTime in the input file.
        endTime = datetime.datetime.now()
        inputDict["runTime"] = str(
            datetime.timedelta(seconds=int((endTime -
                                            startTime).total_seconds())))
        with open(pJoin(modelDir, "allInputData.json"), "w") as inFile:
            json.dump(inputDict, inFile, indent=4)
    except:
        # If input range wasn't valid delete output, write error to disk.
        cancel(modelDir)
        thisErr = traceback.format_exc()
        print('ERROR IN MODEL', modelDir, thisErr)
        inputDict['stderr'] = thisErr
        with open(os.path.join(modelDir, 'stderr.txt'), 'w') as errorFile:
            errorFile.write(thisErr)
        with open(pJoin(modelDir, "allInputData.json"), "w") as inFile:
            json.dump(inputDict, inFile, indent=4)
示例#50
0
文件: vbatStacked.py 项目: mw23/omf
def pyVbat(tempCurve, modelDir, i):
	vbType = i['load_type']
	# with open(pJoin(modelDir, 'temp.csv')) as f:
	# 	ambient = np.array([float(r[0]) for r in csv.reader(f)])
	ambient = np.array(tempCurve)
	variables = [i['capacitance'], i['resistance'], i['power'], i['cop'], 
		i['deadband'], float(i['setpoint']), i['number_devices']]
	variables = [float(v) for v in variables]
	variables.insert(0, ambient)

	if vbType == '1':
		return VB.AC(*variables).generate() # air conditioning
	elif vbType == '2':
		return VB.HP(*variables).generate() # heat pump
	elif vbType == '3':
		return VB.RG(*variables).generate() # refrigerator
	elif vbType == '4':
		ambient = np.array([[i]*60 for i in list(variables[0])]).reshape(365*24*60, 1)
		variables[0] = ambient
		variables.append(ambient)
		file = pJoin(__neoMetaModel__._omfDir,'static','testFiles',"Flow_raw_1minute_BPA.csv")
		water = np.genfromtxt(file, delimiter=',')
		variables.append(water)
		return VB.WH(*variables).generate() # water heater
示例#51
0
def new(modelDir):
	''' Create a new instance of this model. Returns true on success, false on failure. '''
	defaultInputs = {
		"feederName1": "phase_balance_test",
		"criticalNode": 'R1-12-47-1_node_17',
		# "feederName1": "phase_balance_test_2",
		# "criticalNode": 'R1-12-47-2_node_28',
		"modelType": modelName,
		"runTime": "",
		"layoutAlgorithm": "geospatial", #forceDirected
		"zipCode": "64735",
		"retailCost": "0.05",
		"productionCost": "0.03",
		"pf_penalty": "50000",
		"pf_threshold": "0.95",
		"motor_threshold": "2.5",
		"motor_penalty": "3000000",
		"discountRate": "7",
		"edgeCol" : "None",
		"nodeCol" : "perUnitVoltage",
		"nodeLabs" : "None",
		"edgeLabs" : "None",
		"customColormap" : "False",
		"rezSqIn" : "225",
		"colorMin": "0.92",
		"colorMax": "1.08",
		"objectiveFunction": 'VUF', #'I0'
		"pvConnection": 'Delta',
		"iterations": "5"
	}
	creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
	try:
		shutil.copyfile(pJoin(__neoMetaModel__._omfDir, "static", "testFiles", defaultInputs["feederName1"]+'.omd'), pJoin(modelDir, defaultInputs["feederName1"]+'.omd'))
	except:
		return False
	return creationCode
示例#52
0
from jinja2 import Template
import __neoMetaModel__
from __neoMetaModel__ import *

# OMF imports
sys.path.append(__neoMetaModel__._omfDir)
import feeder
from solvers import nrelsam2013
from weather import zipCodeToClimateName

# Model metadata:
fileName = os.path.basename(__file__)
modelName = fileName[0:fileName.rfind('.')]

# Our HTML template for the interface:
with open(pJoin(__neoMetaModel__._myDir,modelName+".html"),"r") as tempFile:
	template = Template(tempFile.read())

def work(modelDir, inputDict):
	''' Run the model in its directory. '''
	# Delete output file every run if it exists
	outData = {}		
	# Model operations goes here.
	inputOne = inputDict.get("input1", 123)
	inputTwo = inputDict.get("input2", 867)
	output = inputOne + inputTwo
	outData["output"] = output
	# Model operations typically ends here.
	# Stdout/stderr.
	outData["stdout"] = "Success"
	outData["stderr"] = ""
示例#53
0
def work(modelDir, inputDict):

    sourceQth = [
        "Source Name",
        str(inputDict['towerLatitude']),
        str(inputDict['towerLongitude']),
        str(inputDict['towerHeight']) + 'm'
    ]

    with open(pJoin(modelDir, "siteLocation.qth"), 'w') as qthFile:
        for i in sourceQth:
            qthFile.write(i)
            qthFile.write("\n")

    sourceLrp = [
        "15.000 ; Earth Dielectric Constant (Relative permittivity)",
        "0.005 ; Earth Conductivity (Siemens per meter)",
        "301.000 ; Atmospheric Bending Constant (N-units)",
        str(inputDict['frequency']), "5 ; Radio Climate",
        str(inputDict['polarization']),
        "0.50 ; Fraction of situations (50 % of locations)",
        "0.90 ; Fraction of time (90% of the time)"
    ]

    with open(pJoin(modelDir, "siteLocation.lrp"), 'w') as lrpFile:
        for i in sourceLrp:
            lrpFile.write(i)
            lrpFile.write("\n")

    #US based lat/lon guide
    #longitude max = 180W
    #longitude min = 043W
    #latitude min = 10N
    #latitude max = 60N
    #get the terrain files from hgt in zips and convert to sdf on as needed basis. Will not need if all are stored in static
    #temp directory for sdf files to use in splat but dont save in output due to size - pull from dds each time splat is run
    sdfDir = tempfile.mkdtemp()
    if inputDict["elevation"] == "digitalElevationModel":
        #sdf dir
        #Set ranges for hgt files to pull - may change on further analysis
        latitudeInt = int(round(float(inputDict['towerLatitude'])))
        latitudeMax = min(latitudeInt + 3, 61)
        latitudeMin = max(latitudeInt - 2, 10)
        longitudeInt = int(round(float(inputDict['towerLongitude'])))
        longitudeMax = min(longitudeInt + 4, 181)
        longitudeMin = max(longitudeInt - 3, 43)
        hgtDir = tempfile.mkdtemp()
        for lat in range(latitudeMin, latitudeMax):
            strLat = str(lat)
            for lon in range(longitudeMin, longitudeMax):
                if len(str(lon)) < 3:
                    strLon = '0' + str(lon)
                else:
                    strLon = str(lon)
                #the zip files from the site are missing the period before hgt in the zip file name beginning with latitude 55
                if lat >= 55:
                    currentUrl = "https://dds.cr.usgs.gov/srtm/version2_1/SRTM3/North_America/N" + strLat + "W" + strLon + "hgt.zip"
                else:
                    currentUrl = "https://dds.cr.usgs.gov/srtm/version2_1/SRTM3/North_America/N" + strLat + "W" + strLon + ".hgt.zip"
                response = requests.get(currentUrl)
                if response.status_code == 200:
                    #print('valid lat lon zip')
                    with tempfile.TemporaryFile() as tmp:
                        tmp.write(response.content)
                        with ZipFile(tmp, 'r') as zipper:
                            zipper.extractall(hgtDir)
                            hgtFile = "N" + strLat + "W" + strLon + ".hgt"
                            args = ["srtm2sdf", pJoin(hgtDir, hgtFile)]
                            subprocess.Popen(args, cwd=sdfDir).wait()
    #Can add in -R switch for area to cover
    args = [
        "splat", "-t",
        pJoin(modelDir, "siteLocation.qth"), "-o",
        pJoin(modelDir, "coverageMap.ppm"), "-kml", "-ngs", "-d", sdfDir
    ]

    #change inputs based on analysis type
    if inputDict["analysisType"] == "lineOfSight":
        args += ["-c", "10", "-metric"]
    else:
        args += ["-L", "10", "-metric"]
        if inputDict["analysisType"] != "pathLoss":
            args += ["-erp", inputDict["erp"]]
            if inputDict["analysisType"] == "recievedPower":
                args += ["-dbm"]

    subprocess.Popen(args, cwd=pJoin(modelDir)).wait()

    outData = {}

    #Convert the image file to use in leaflet
    im = Image.open(pJoin(modelDir, "coverageMap.ppm"))
    im = im.convert("RGBA")
    datas = im.getdata()

    newData = []
    for item in datas:
        if item[0] == 255 and item[1] == 255 and item[2] == 255:
            newData.append((255, 255, 255, 0))
        else:
            newData.append(item)

    im.putdata(newData)
    im.save(pJoin(modelDir, "coverageMap.png"))
    if inputDict["analysisType"] != "lineOfSight":
        scale = Image.open(pJoin(modelDir, "coverageMap-ck.ppm"))
        scale.save(pJoin(modelDir, "rfScale.png"))
        with open(pJoin(modelDir, "rfScale.png"), "rb") as inFile:
            outData["rfScale"] = base64.standard_b64encode(
                inFile.read()).decode()

    #parse kml for image bounding box
    kml = pJoin(modelDir, "coverageMap.kml")
    tree = ET.parse(kml)
    root = tree.getroot()
    all_descendants = list(tree.iter())
    for i in all_descendants:
        if i.tag == '{http://earth.google.com/kml/2.1}south':
            south = i.text
        elif i.tag == '{http://earth.google.com/kml/2.1}north':
            north = i.text
        elif i.tag == '{http://earth.google.com/kml/2.1}east':
            east = i.text
        elif i.tag == '{http://earth.google.com/kml/2.1}west':
            west = i.text

    # Stdout/stderr.
    outData['towerLatitude'] = inputDict['towerLatitude']
    outData['towerLongitude'] = inputDict['towerLongitude']
    with open(pJoin(modelDir, "coverageMap.png"), "rb") as inFile:
        outData["coverageMap"] = base64.standard_b64encode(
            inFile.read()).decode()
    outData['north'] = north
    outData['south'] = south
    outData['west'] = west
    outData['east'] = east
    outData["stdout"] = "Success"
    outData["stderr"] = ""
    return outData
示例#54
0
def work(modelDir, inputDict):
    ''' Run the model in a separate process. web.py calls this to run the model.
	This function will return fast, but results take a while to hit the file system.'''
    # Delete output file every run if it exists
    outData = {}
    # Get variables.
    cellCapacity = float(inputDict['cellCapacity'])
    (cellCapacity, dischargeRate, chargeRate, cellQuantity, cellCost) = \
     [float(inputDict[x]) for x in ('cellCapacity', 'dischargeRate', 'chargeRate', 'cellQuantity', 'cellCost')]
    battEff = float(inputDict.get("batteryEfficiency", 92)) / 100.0 * float(
        inputDict.get("inverterEfficiency", 92)) / 100.0 * float(
            inputDict.get("inverterEfficiency", 92)) / 100.0
    discountRate = float(inputDict.get('discountRate', 2.5)) / 100.0
    dodFactor = float(inputDict.get('dodFactor', 85)) / 100.0
    projYears = int(inputDict.get('projYears', 10))
    dischargePriceThreshold = float(
        inputDict.get('dischargePriceThreshold', 0.15))
    chargePriceThreshold = float(inputDict.get('chargePriceThreshold', 0.07))
    batteryCycleLife = int(inputDict.get('batteryCycleLife', 5000))
    # Put demand data in to a file for safe keeping.
    with open(pJoin(modelDir, "demand.csv"), "w") as demandFile:
        demandFile.write(inputDict['demandCurve'])
    with open(pJoin(modelDir, "priceCurve.csv"), "w") as priceCurve:
        priceCurve.write(inputDict['priceCurve'])
    # Start running battery simulation.
    battCapacity = cellQuantity * cellCapacity * dodFactor
    battDischarge = cellQuantity * dischargeRate
    battCharge = cellQuantity * chargeRate
    # Most of our data goes inside the dc "table"
    dates = [(datetime.datetime(2011, 1, 1, 0, 0) +
              datetime.timedelta(hours=1) * x).strftime("%m/%d/%Y %H:%M:%S")
             for x in range(8760)]
    try:
        dc = []
        with open(pJoin(modelDir, "demand.csv")) as inFile:
            reader = csv.reader(inFile)
            x = 0
            for row in reader:
                dc.append({
                    'datetime': parse(dates[x]),
                    'power': float(row[0])
                })
                x += 1
            if len(dc) != 8760: raise Exception
    except:
        e = sys.exc_info()[0]
        if str(e) == "<type 'exceptions.SystemExit'>":
            pass
        else:
            errorMessage = "Demand CSV file is incorrect format."
            raise Exception(errorMessage)
    #Add price to dc table
    try:
        with open(pJoin(modelDir, 'priceCurve.csv')) as priceFile:
            reader = csv.reader(priceFile)
            rowCount = 0
            i = 0
            for row in reader:
                dc[i]['price'] = float(row[0])
                i += 1
            if i != 8760: raise Exception
    except:
        e = sys.exc_info()[0]
        if str(e) == "<type 'exceptions.SystemExit'>":
            pass
        else:
            errorMessage = "Price Curve File is in an incorrect format."
            raise Exception(errorMessage)
    for row in dc:
        row['month'] = row['datetime'].month - 1
        row['hour'] = row['datetime'].hour
        # row['weekday'] = row['datetime'].weekday() # TODO: figure out why we care about this.
    battSoC = battCapacity
    for row in dc:
        outData['startDate'] = '2011-01-01'  #dc[0]['datetime'].isoformat()
        month = int(row['datetime'].month) - 1
        discharge = min(battDischarge, battSoC)
        charge = min(battCharge, battCapacity - battSoC)
        #If price of energy is above price threshold and battery has charge, discharge battery
        if row['price'] >= dischargePriceThreshold and battSoC > 0:
            row['netpower'] = row['power'] - discharge
            battSoC -= discharge
        #If battery has no charge but price is still above charge threshold, dont charge it
        elif row['price'] > chargePriceThreshold and battSoC == 0:
            row['netpower'] = row['power']
        elif row['price'] <= chargePriceThreshold and battSoC < battCapacity:
            row['netpower'] = row['power'] + charge / battEff
            battSoC += charge
        else:
            row['netpower'] = row['power']
        row['battSoC'] = battSoC
    dischargeGroupByMonth = [[
        t['netpower'] - t['power'] for t in dc if t['datetime'].month - 1 == x
    ] for x in range(12)]
    dcGroupByMonth = [[t for t in dc if t['datetime'].month - 1 == x]
                      for x in range(12)]
    monthlyCharge = []
    monthlyDischarge = []
    #Calculate the monthly energy discharged/charged
    for row in dischargeGroupByMonth:
        chargePower = 0
        dischargePower = 0
        for n in row:
            if n > 0:
                chargePower += n
            else:
                dischargePower += n * -1
        monthlyCharge.append(chargePower)
        monthlyDischarge.append(dischargePower)
    monthlyDischargeSavings = []
    monthlyChargeCost = []
    #Calculate the monthly cost to charge and savings by discharging
    for row in dcGroupByMonth:
        chargeCost = 0
        dischargeSavings = 0
        for n in row:
            if n['netpower'] - n['power'] > 0:
                chargeCost += (n['netpower'] - n['power']) * n['price']
            if n['netpower'] - n['power'] < 0:
                dischargeSavings += (n['netpower'] -
                                     n['power']) * n['price'] * -1
        monthlyDischargeSavings.append(dischargeSavings)
        monthlyChargeCost.append(chargeCost)
    yearlyDischargeSavings = sum(monthlyDischargeSavings)
    yearlyChargeCost = sum(monthlyChargeCost)
    cashFlowCurve = [
        yearlyDischargeSavings - yearlyChargeCost for year in range(projYears)
    ]
    outData['demand'] = [t['power'] * 1000.0 for t in dc]
    outData['demandAfterBattery'] = [t['netpower'] * 1000.0 for t in dc]
    demandAfterBattery = outData['demandAfterBattery']
    demand = outData['demand']
    outData['batteryDischargekW'] = [
        demand - demandAfterBattery
        for demand, demandAfterBattery in zip(demand, demandAfterBattery)
    ]
    batteryDischargekWMax = max(outData['batteryDischargekW'])
    outData['batteryDischargekWMax'] = batteryDischargekWMax
    outData['energyOffset'] = monthlyDischarge
    outData['kWhtoRecharge'] = monthlyCharge
    outData['costToRecharge'] = monthlyChargeCost
    outData['dischargeSavings'] = monthlyDischargeSavings
    outData['benefitNet'] = [
        monthlyDischargeSavings - monthlyChargeCost
        for monthlyChargeCost, monthlyDischargeSavings in zip(
            monthlyChargeCost, monthlyDischargeSavings)
    ]
    outData['batterySoc'] = [
        t['battSoC'] / battCapacity * 100.0 * dodFactor +
        (100 - 100 * dodFactor) for t in dc
    ]
    SoC = outData['batterySoc']
    cycleEquivalents = sum([
        SoC[i] - SoC[i + 1]
        for i, x in enumerate(SoC[0:-1]) if SoC[i + 1] < SoC[i]
    ]) / 100.0
    outData['cycleEquivalents'] = cycleEquivalents
    outData['batteryLife'] = batteryCycleLife / cycleEquivalents
    cashFlowCurve[0] -= (cellCost * cellQuantity)
    outData['netCashflow'] = cashFlowCurve
    outData['cumulativeCashflow'] = [
        sum(cashFlowCurve[0:i + 1]) for i, d in enumerate(cashFlowCurve)
    ]
    outData['NPV'] = npv(discountRate, cashFlowCurve)
    outData['SPP'] = (cellCost * cellQuantity) / (yearlyDischargeSavings -
                                                  yearlyChargeCost)
    battCostPerCycle = cellQuantity * cellCapacity * cellCost / batteryCycleLife
    lcoeTotCost = (cycleEquivalents * cellQuantity * cellCapacity *
                   chargePriceThreshold) + (battCostPerCycle *
                                            cycleEquivalents)
    loceTotEnergy = cycleEquivalents * cellCapacity * cellQuantity
    LCOE = lcoeTotCost / loceTotEnergy
    outData['LCOE'] = LCOE
    # Stdout/stderr.
    outData["stdout"] = "Success"
    outData["stderr"] = ""
    return outData
示例#55
0
文件: vbatStacked.py 项目: mw23/omf
def work(modelDir, ind):
	out = {}
	
	tempCurve = [float(x) for x in ind["tempCurve"].split('\n') if x != '']
	gt_demand = [float(x) for x in ind["gt_demandCurve"].split('\n')] if ind["payment_structure"] == "gt" else []
	with open(pJoin(modelDir, 'inputCsv.csv'), 'w') as f:
		f.write(ind["inputCsv"].replace('\r', ''))

	input_df = pd.read_csv(pJoin(modelDir, 'inputCsv.csv'), index_col=['Hour'])
	P_lower, P_upper, E_UL = pyVbat(tempCurve, modelDir, ind)

	input_df['VB Power upper (kW)'] = P_upper
	input_df['VB Power lower (kW)'] = [-x for x in P_lower]
	input_df['VB Energy upper (kWh)'] = E_UL
	input_df['VB Energy lower (kWh)'] = [-x for x in E_UL]
	
	if ind["payment_structure"] == "gt":
		output_df = VB.run_fhec(ind, gt_demand, input_df)

	else:
		output_df = VB.run_okec(ind, input_df)
		
	out["show_gt"] = "none" if len(gt_demand) == 0 else "";
		
	# ------------------------------- CASH FLOW --------------------------- #
	number_devices = float(ind["number_devices"])
	upkeep_cost = float(ind["unitUpkeepCost"])
	device_cost = float(ind["unitDeviceCost"])
	projYears = int(ind["projectionLength"])

	if ind["payment_structure"] == "gt":
		np_gt_demand = gt_demand #np.array(gt_demand)
		threshold = np.percentile(np_gt_demand, float(ind["peakPercentile"])*100)
		indexes = [i for i, l in enumerate(gt_demand) if l >= threshold]
		
		mult = float(ind["peakMultiplier"])
		rate = float(ind["electricityCost"])
		
		price_structure_before = sum([l*rate*mult if i in indexes else l*rate for i, l in enumerate(output_df["Load (kW)"])])
		price_structure_after = sum([l*rate*mult if i in indexes else l*rate for i, l in enumerate(output_df["Net load (kW)"])])
	if ind["payment_structure"] == "ppm":
		price_structure_before = (output_df['Load (kW)'].max()*float(ind["annual_peak_charge"]) + 
			output_df['Load (kW)'].mean()*float(ind["avg_demand_charge"]))
		price_structure_after = (output_df['Net load (kW)'].max()*float(ind["annual_peak_charge"]) + 
			output_df['Net load (kW)'].mean()*float(ind["avg_demand_charge"]))
	
	if ind["use_regulation"] == "on":
		regulation_after = (output_df['Regulation (kW)']*input_df['Reg-up Price ($/MW)']/1000).sum()
	else:
		regulation_after = 0

	if ind['use_deferral'] == "on":
		not_surpassed = all([x <= float(ind['transformerThreshold']) for x in output_df['Net load (kW)'].tolist()])
		deferral_before = -1*(float(ind['carryingCost'])/100)*float(ind['yearsToReplace'])*float(ind['avoidedCost'])
		deferral_after = 0 if not_surpassed else deferral_before
	else:
		deferral_before = 0
		deferral_after = 0

	total_upkeep_costs = upkeep_cost*number_devices
	cost_before = price_structure_before - deferral_before
	cost_after = price_structure_after - regulation_after - deferral_after
	
	gross_savings = cost_before - cost_after
	money_saved = gross_savings - total_upkeep_costs

	styling = "style='border-bottom: 3px solid black;'"

	out['cost_table'] = (
			"<tr><td style='font-weight: bold;'>Price Structure</td><td>{0}</td><td>{1}</td><td>{2}</td></tr>"
			"<tr><td style='font-weight: bold;'>Deferral</td><td>{3}</td><td>{4}</td><td>{5}</td></tr>"
			"<tr {6}><td style='font-weight: bold; border-bottom: 3px solid black;'>Regulation</td><td {9}>{6}</td><td {9}>{7}</td><td {9}>{8}</td></tr>"
		).format(n(price_structure_before), n(price_structure_after), n(price_structure_before - price_structure_after),
				n(deferral_before), n(deferral_after), n(deferral_after - deferral_before),
				n(0), n(regulation_after), n(regulation_after), 
				styling) + (
				"<tr><td colspan='2'>&nbsp;</td><td style='font-weight: bold;'>Gross Savings</td><td>{0}</td></tr>"
			"<tr><td colspan='2'>&nbsp;</td><td style='font-weight: bold;'>Upkeep Cost</td><td>-{1}</td></tr>"
			"<tr><td colspan='2'>&nbsp;</td><td style='font-weight: bold;'>Total Savings</td><td>{2}</td></tr>"
		).format(n(gross_savings), n(total_upkeep_costs), n(money_saved))

	cashFlowCurve = [money_saved for year in range(projYears)]
	cashFlowCurve.insert(0, -1 * number_devices * device_cost)  # insert initial investment
	out['SPP'] = (number_devices*device_cost)/(money_saved)
	out['netCashflow'] = cashFlowCurve
	out['cumulativeCashflow'] = [sum(cashFlowCurve[:i+1]) for i, d in enumerate(cashFlowCurve)]
	out['NPV'] = npv(float(ind["discountRate"])/100, cashFlowCurve)

	out["gt_demand"] = gt_demand
	out["demand"] = output_df['Load (kW)'].tolist()
	out["VBpower"] = output_df['VB power (kW)'].tolist()
	out["VBenergy"] = [-x for x in output_df['VB energy (kWh)'].tolist()]
	out["demandAdjusted"] = output_df['Net load (kW)'].tolist()
	out["regulation"] = output_df['Regulation (kW)'].tolist() if 'Regulation (kW)' in output_df else 0

	out['vbpu'] = input_df['VB Power upper (kW)'].tolist()
	out['vbpl'] = input_df['VB Power lower (kW)'].tolist()
	out['vbeu'] = input_df['VB Energy upper (kWh)'].tolist()
	out['vbel'] = input_df['VB Energy lower (kWh)'].tolist()

	out['startDate'] = str(dt(2001, 1, 1))
	days_dispatched_arbitrage = [not all([x == 0 for x in out["VBpower"][i:i+24]]) for i in range(0, len(out["VBpower"]), 24)]
	days_dispatched_regulation = ([not all([x == 0 for x in out["regulation"][i:i+24]]) for i in range(0, len(out["regulation"]), 24)] 
		if 'Regulation (kW)' in output_df else [False]*len(days_dispatched_arbitrage))
	
	days_dispatched = [a or b for a, b in zip(days_dispatched_regulation, days_dispatched_arbitrage)]

	out['dispatchDates'] = [[str(dt(2001, 1, 1, 12, 0, 0) + timedelta(days=i)), 0] for i in range(365) if days_dispatched[i]]
	out['totalDispatches'] = len(out['dispatchDates'])

	out['transformerThreshold'] = float(ind['transformerThreshold']) if ind['use_deferral'] == 'on' else None;

	out["stdout"] = "Success"
	return out
示例#56
0
def new(modelDir):
    ''' Create a new instance of this model. Returns true on success, false on failure. '''
    colomaMonths = {
        "janAvg": 914000.0,
        "janPeak": 1290000.0,
        "febAvg": 897000.00,
        "febPeak": 1110000.0,
        "marAvg": 731000.00,
        "marPeak": 1030000.0,
        "aprAvg": 864000.00,
        "aprPeak": 2170000.0,
        "mayAvg": 1620000.0,
        "mayPeak": 4580000.0,
        "junAvg": 2210000.0,
        "junPeak": 5550000.0,
        "julAvg": 3570000.0,
        "julPeak": 6260000.0,
        "augAvg": 3380000.0,
        "augPeak": 5610000.0,
        "sepAvg": 1370000.0,
        "sepPeak": 3740000.0,
        "octAvg": 1030000.0,
        "octPeak": 1940000.0,
        "novAvg": 1020000.0,
        "novPeak": 1340000.0,
        "decAvg": 1030000.0,
        "decPeak": 1280000.0
    }
    # friendshipMonths = {"janAvg": 2740000.0, "janPeak": 4240000.0,
    # 	"febAvg": 2480000.0, "febPeak": 3310000.0,
    # 	"marAvg": 2030000.0, "marPeak": 2960000.0,
    # 	"aprAvg": 2110000.0, "aprPeak": 3030000.0,
    # 	"mayAvg": 2340000.0, "mayPeak": 4080000.0,
    # 	"junAvg": 2770000.0, "junPeak": 5810000.0,
    # 	"julAvg": 3970000.0, "julPeak": 6750000.0,
    # 	"augAvg": 3270000.0, "augPeak": 5200000.0,
    # 	"sepAvg": 2130000.0, "sepPeak": 4900000.0,
    # 	"octAvg": 1750000.0, "octPeak": 2340000.0,
    # 	"novAvg": 2210000.0, "novPeak": 3550000.0,
    # 	"decAvg": 2480000.0, "decPeak": 3370000.0}
    defaultInputs = {
        "modelType": modelName,
        "feederName1": "ABEC Columbia",
        "runTime": "",
        "capitalCost": 30000,
        "omCost": 1000,
        "wholesaleEnergyCostPerKwh": 0.06,
        "retailEnergyCostPerKwh": 0.10,
        "peakDemandCostSpringPerKw": 5.0,
        "peakDemandCostSummerPerKw": 10.0,
        "peakDemandCostFallPerKw": 6.0,
        "peakDemandCostWinterPerKw": 8.0,
        "baselineTap": 3.0,
        "z_percent": 0.5,
        "i_percent": 0.0,
        "p_percent": 0.5,
        "power_factor": 0.9
    }
    for key in colomaMonths:
        defaultInputs[key] = colomaMonths[key]
    creationCode = __neoMetaModel__.new(modelDir, defaultInputs)
    try:
        shutil.copyfile(
            pJoin(__neoMetaModel__._omfDir, "static", "publicFeeders",
                  defaultInputs["feederName1"] + '.omd'),
            pJoin(modelDir, defaultInputs["feederName1"] + '.omd'))
    except:
        return False
    return creationCode
示例#57
0
def work(modelDir, inputDict):
    ''' Run the model in the foreground. WARNING: can take about a minute. '''
    # Global vars, and load data from the model directory.
    feederName = [x for x in os.listdir(modelDir)
                  if x.endswith('.omd')][0][:-4]
    inputDict["feederName1"] = feederName
    feederPath = pJoin(modelDir, feederName + '.omd')
    feederJson = json.load(open(feederPath))
    tree = feederJson.get("tree", {})
    attachments = feederJson.get("attachments", {})
    outData = {}
    ''' Run CVR analysis. '''
    # Reformate monthData and rates.
    rates = {
        k: float(inputDict[k])
        for k in [
            "capitalCost", "omCost", "wholesaleEnergyCostPerKwh",
            "retailEnergyCostPerKwh", "peakDemandCostSpringPerKw",
            "peakDemandCostSummerPerKw", "peakDemandCostFallPerKw",
            "peakDemandCostWinterPerKw"
        ]
    }
    monthNames = [
        "January", "February", "March", "April", "May", "June", "July",
        "August", "September", "October", "November", "December"
    ]
    monthToSeason = {
        'January': 'Winter',
        'February': 'Winter',
        'March': 'Spring',
        'April': 'Spring',
        'May': 'Spring',
        'June': 'Summer',
        'July': 'Summer',
        'August': 'Summer',
        'September': 'Fall',
        'October': 'Fall',
        'November': 'Fall',
        'December': 'Winter'
    }
    monthData = []
    for i, x in enumerate(monthNames):
        monShort = x[0:3].lower()
        season = monthToSeason[x]
        histAvg = float(inputDict.get(monShort + "Avg", 0))
        histPeak = float(inputDict.get(monShort + "Peak", 0))
        monthData.append({
            "monthId": i,
            "monthName": x,
            "histAverage": histAvg,
            "histPeak": histPeak,
            "season": season
        })
    # Graph the SCADA data.
    fig = plt.figure(figsize=(10, 6))
    indices = [r['monthName'] for r in monthData]
    d1 = [r['histPeak'] / (10**3) for r in monthData]
    d2 = [r['histAverage'] / (10**3) for r in monthData]
    ticks = range(len(d1))
    bar_peak = plt.bar(ticks, d1, color='gray')
    bar_avg = plt.bar(ticks, d2, color='dimgray')
    plt.legend([bar_peak[0], bar_avg[0]], ['histPeak', 'histAverage'],
               bbox_to_anchor=(0., 1.015, 1., .102),
               loc=3,
               ncol=2,
               mode="expand",
               borderaxespad=0.1)
    plt.xticks([t + 0.5 for t in ticks], indices)
    plt.ylabel('Mean and peak historical power consumptions (kW)')
    fig.autofmt_xdate()
    plt.savefig(pJoin(modelDir, "scadaChart.png"))
    outData["histPeak"] = d1
    outData["histAverage"] = d2
    outData["monthName"] = [name[0:3] for name in monthNames]
    # Graph feeder.
    fig = plt.figure(figsize=(10, 10))
    myGraph = feeder.treeToNxGraph(tree)
    feeder.latLonNxGraph(myGraph, neatoLayout=False)
    plt.savefig(pJoin(modelDir, "feederChart.png"))
    with open(pJoin(modelDir, "feederChart.png"), "rb") as inFile:
        outData["feederChart"] = inFile.read().encode("base64")
    # Get the load levels we need to test.
    allLoadLevels = [x.get('histPeak', 0) for x in monthData
                     ] + [y.get('histAverage', 0) for y in monthData]
    maxLev = _roundOne(max(allLoadLevels), 'up')
    minLev = _roundOne(min(allLoadLevels), 'down')
    tenLoadLevels = range(int(minLev), int(maxLev), int(
        (maxLev - minLev) / 10))
    # Gather variables from the feeder.
    for key in tree.keys():
        # Set clock to single timestep.
        if tree[key].get('clock', '') == 'clock':
            tree[key] = {
                "timezone": "PST+8PDT",
                "stoptime": "'2013-01-01 00:00:00'",
                "starttime": "'2013-01-01 00:00:00'",
                "clock": "clock"
            }
        # Save swing node index.
        if tree[key].get('bustype', '').lower() == 'swing':
            swingIndex = key
            swingName = tree[key].get('name')
        # Remove all includes.
        if tree[key].get('omftype', '') == '#include':
            del key
    # Find the substation regulator and config.
    for key in tree:
        if tree[key].get('object', '') == 'regulator' and tree[key].get(
                'from', '') == swingName:
            regIndex = key
            regConfName = tree[key]['configuration']
    if not regConfName: regConfName = False
    for key in tree:
        if tree[key].get('name', '') == regConfName:
            regConfIndex = key
    # Set substation regulator to manual operation.
    baselineTap = int(inputDict.get(
        "baselineTap"))  # GLOBAL VARIABLE FOR DEFAULT TAP POSITION
    tree[regConfIndex] = {
        'name': tree[regConfIndex]['name'],
        'object': 'regulator_configuration',
        'connect_type': '1',
        'raise_taps': '10',
        'lower_taps': '10',
        'CT_phase': 'ABC',
        'PT_phase': 'ABC',
        'regulation':
        '0.10',  #Yo, 0.10 means at tap_pos 10 we're 10% above 120V.
        'Control': 'MANUAL',
        'control_level': 'INDIVIDUAL',
        'Type': 'A',
        'tap_pos_A': str(baselineTap),
        'tap_pos_B': str(baselineTap),
        'tap_pos_C': str(baselineTap)
    }
    # Attach recorders relevant to CVR.
    recorders = [{
        'object':
        'collector',
        'file':
        'ZlossesTransformer.csv',
        'group':
        'class=transformer',
        'limit':
        '0',
        'property':
        'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'
    }, {
        'object':
        'collector',
        'file':
        'ZlossesUnderground.csv',
        'group':
        'class=underground_line',
        'limit':
        '0',
        'property':
        'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'
    }, {
        'object':
        'collector',
        'file':
        'ZlossesOverhead.csv',
        'group':
        'class=overhead_line',
        'limit':
        '0',
        'property':
        'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'
    }, {
        'object': 'recorder',
        'file': 'Zregulator.csv',
        'limit': '0',
        'parent': tree[regIndex]['name'],
        'property': 'tap_A,tap_B,tap_C,power_in.real,power_in.imag'
    }, {
        'object':
        'collector',
        'file':
        'ZvoltageJiggle.csv',
        'group':
        'class=triplex_meter',
        'limit':
        '0',
        'property':
        'min(voltage_12.mag),mean(voltage_12.mag),max(voltage_12.mag),std(voltage_12.mag)'
    }, {
        'object': 'recorder',
        'file': 'ZsubstationTop.csv',
        'limit': '0',
        'parent': tree[swingIndex]['name'],
        'property': 'voltage_A,voltage_B,voltage_C'
    }, {
        'object': 'recorder',
        'file': 'ZsubstationBottom.csv',
        'limit': '0',
        'parent': tree[regIndex]['to'],
        'property': 'voltage_A,voltage_B,voltage_C'
    }]
    biggest = 1 + max([int(k) for k in tree.keys()])
    for index, rec in enumerate(recorders):
        tree[biggest + index] = rec
    # Change constant PF loads to ZIP loads. (See evernote for rationale about 50/50 power/impedance mix.)
    blankZipModel = {
        'object': 'triplex_load',
        'name': 'NAMEVARIABLE',
        'base_power_12': 'POWERVARIABLE',
        'power_fraction_12': str(inputDict.get("p_percent")),
        'impedance_fraction_12': str(inputDict.get("z_percent")),
        'current_fraction_12': str(inputDict.get("i_percent")),
        'power_pf_12': str(
            inputDict.get("power_factor")
        ),  #MAYBEFIX: we can probably get this PF data from the Milsoft loads.
        'impedance_pf_12': str(inputDict.get("power_factor")),
        'current_pf_12': str(inputDict.get("power_factor")),
        'nominal_voltage': '120',
        'phases': 'PHASESVARIABLE',
        'parent': 'PARENTVARIABLE'
    }

    def powerClean(powerStr):
        ''' take 3339.39+1052.29j to 3339.39 '''
        return powerStr[0:powerStr.find('+')]

    for key in tree:
        if tree[key].get('object', '') == 'triplex_node':
            # Get existing variables.
            name = tree[key].get('name', '')
            power = tree[key].get('power_12', '')
            parent = tree[key].get('parent', '')
            phases = tree[key].get('phases', '')
            # Replace object and reintroduce variables.
            tree[key] = copy(blankZipModel)
            tree[key]['name'] = name
            tree[key]['base_power_12'] = powerClean(power)
            tree[key]['parent'] = parent
            tree[key]['phases'] = phases
    # Function to determine how low we can tap down in the CVR case:
    def loweringPotential(baseLine):
        ''' Given a baseline end of line voltage, how many more percent can we shave off the substation voltage? '''
        ''' testsWePass = [122.0,118.0,200.0,110.0] '''
        lower = int(math.floor((baseLine / 114.0 - 1) * 100)) - 1
        # If lower is negative, we can't return it because we'd be undervolting beyond what baseline already was!
        if lower < 0:
            return baselineTap
        else:
            return baselineTap - lower

    # Run all the powerflows.
    powerflows = []
    for doingCvr in [False, True]:
        # For each load level in the tenLoadLevels, run a powerflow with the load objects scaled to the level.
        for desiredLoad in tenLoadLevels:
            # Find the total load that was defined in Milsoft:
            loadList = []
            for key in tree:
                if tree[key].get('object', '') == 'triplex_load':
                    loadList.append(tree[key].get('base_power_12', ''))
            totalLoad = sum([float(x) for x in loadList])
            # Rescale each triplex load:
            for key in tree:
                if tree[key].get('object', '') == 'triplex_load':
                    currentPow = float(tree[key]['base_power_12'])
                    ratio = desiredLoad / totalLoad
                    tree[key]['base_power_12'] = str(currentPow * ratio)
            # If we're doing CVR then lower the voltage.
            if doingCvr:
                # Find the minimum voltage we can tap down to:
                newTapPos = baselineTap
                for row in powerflows:
                    if row.get('loadLevel', '') == desiredLoad:
                        newTapPos = loweringPotential(
                            row.get('lowVoltage', 114))
                # Tap it down to there.
                # MAYBEFIX: do each phase separately because that's how it's done in the field... Oof.
                tree[regConfIndex]['tap_pos_A'] = str(newTapPos)
                tree[regConfIndex]['tap_pos_B'] = str(newTapPos)
                tree[regConfIndex]['tap_pos_C'] = str(newTapPos)
            # Run the model through gridlab and put outputs in the table.
            output = gridlabd.runInFilesystem(tree,
                                              attachments=attachments,
                                              keepFiles=True,
                                              workDir=modelDir)
            os.remove(pJoin(modelDir, "PID.txt"))
            p = output['Zregulator.csv']['power_in.real'][0]
            q = output['Zregulator.csv']['power_in.imag'][0]
            s = math.sqrt(p**2 + q**2)
            lossTotal = 0.0
            for device in [
                    'ZlossesOverhead.csv', 'ZlossesTransformer.csv',
                    'ZlossesUnderground.csv'
            ]:
                for letter in ['A', 'B', 'C']:
                    r = output[device]['sum(power_losses_' + letter +
                                       '.real)'][0]
                    i = output[device]['sum(power_losses_' + letter +
                                       '.imag)'][0]
                    lossTotal += math.sqrt(r**2 + i**2)
            ## Entire output:
            powerflows.append({
                'doingCvr':
                doingCvr,
                'loadLevel':
                desiredLoad,
                'realPower':
                p,
                'powerFactor':
                p / s,
                'losses':
                lossTotal,
                'subVoltage':
                (output['ZsubstationBottom.csv']['voltage_A'][0] +
                 output['ZsubstationBottom.csv']['voltage_B'][0] +
                 output['ZsubstationBottom.csv']['voltage_C'][0]) / 3 / 60,
                'lowVoltage':
                output['ZvoltageJiggle.csv']['min(voltage_12.mag)'][0] / 2,
                'highVoltage':
                output['ZvoltageJiggle.csv']['max(voltage_12.mag)'][0] / 2
            })
    # For a given load level, find two points to interpolate on.
    def getInterpPoints(t):
        ''' Find the two points we can interpolate from. '''
        ''' tests pass on [tenLoadLevels[0],tenLoadLevels[5]+499,tenLoadLevels[-1]-988] '''
        loc = sorted(tenLoadLevels + [t]).index(t)
        if loc == 0:
            return (tenLoadLevels[0], tenLoadLevels[1])
        elif loc > len(tenLoadLevels) - 2:
            return (tenLoadLevels[-2], tenLoadLevels[-1])
        else:
            return (tenLoadLevels[loc - 1], tenLoadLevels[loc + 1])

    # Calculate peak reduction.
    for row in monthData:
        peak = row['histPeak']
        peakPoints = getInterpPoints(peak)
        peakTopBase = [
            x for x in powerflows if x.get('loadLevel', '') == peakPoints[-1]
            and x.get('doingCvr', '') == False
        ][0]
        peakTopCvr = [
            x for x in powerflows if x.get('loadLevel', '') == peakPoints[-1]
            and x.get('doingCvr', '') == True
        ][0]
        peakBottomBase = [
            x for x in powerflows if x.get('loadLevel', '') == peakPoints[0]
            and x.get('doingCvr', '') == False
        ][0]
        peakBottomCvr = [
            x for x in powerflows if x.get('loadLevel', '') == peakPoints[0]
            and x.get('doingCvr', '') == True
        ][0]
        # Linear interpolation so we aren't running umpteen million loadflows.
        x = (peakPoints[0], peakPoints[1])
        y = (peakTopBase['realPower'] - peakTopCvr['realPower'],
             peakBottomBase['realPower'] - peakBottomCvr['realPower'])
        peakRed = y[0] + (y[1] - y[0]) * (peak - x[0]) / (x[1] - x[0])
        row['peakReduction'] = peakRed
    # Calculate energy reduction and loss reduction based on average load.
    for row in monthData:
        avgEnergy = row['histAverage']
        energyPoints = getInterpPoints(avgEnergy)
        avgTopBase = [
            x for x in powerflows if x.get('loadLevel', '') == energyPoints[-1]
            and x.get('doingCvr', '') == False
        ][0]
        avgTopCvr = [
            x for x in powerflows if x.get('loadLevel', '') == energyPoints[-1]
            and x.get('doingCvr', '') == True
        ][0]
        avgBottomBase = [
            x for x in powerflows if x.get('loadLevel', '') == energyPoints[0]
            and x.get('doingCvr', '') == False
        ][0]
        avgBottomCvr = [
            x for x in powerflows if x.get('loadLevel', '') == energyPoints[0]
            and x.get('doingCvr', '') == True
        ][0]
        # Linear interpolation so we aren't running umpteen million loadflows.
        x = (energyPoints[0], energyPoints[1])
        y = (avgTopBase['realPower'] - avgTopCvr['realPower'],
             avgBottomBase['realPower'] - avgBottomCvr['realPower'])
        energyRed = y[0] + (y[1] - y[0]) * (avgEnergy - x[0]) / (x[1] - x[0])
        row['energyReduction'] = energyRed
        lossY = (avgTopBase['losses'] - avgTopCvr['losses'],
                 avgBottomBase['losses'] - avgBottomCvr['losses'])
        lossRed = lossY[0] + (lossY[1] - lossY[0]) * (avgEnergy -
                                                      x[0]) / (x[1] - x[0])
        row['lossReduction'] = lossRed
    # Multiply by dollars.
    for row in monthData:
        row['energyReductionDollars'] = row['energyReduction'] / 1000 * (
            rates['wholesaleEnergyCostPerKwh'] -
            rates['retailEnergyCostPerKwh'])
        row['peakReductionDollars'] = row['peakReduction'] / 1000 * rates[
            'peakDemandCost' + row['season'] + 'PerKw']
        row['lossReductionDollars'] = row['lossReduction'] / 1000 * rates[
            'wholesaleEnergyCostPerKwh']
    # Pretty output
    def plotTable(inData):
        fig = plt.figure(figsize=(10, 5))
        plt.axis('off')
        plt.tight_layout()
        plt.table(cellText=[row for row in inData[1:]],
                  loc='center',
                  rowLabels=range(len(inData) - 1),
                  colLabels=inData[0])

    def dictalToMatrix(dictList):
        ''' Take our dictal format to a matrix. '''
        matrix = [dictList[0].keys()]
        for row in dictList:
            matrix.append(row.values())
        return matrix

    # Powerflow results.
    plotTable(dictalToMatrix(powerflows))
    plt.savefig(pJoin(modelDir, "powerflowTable.png"))
    # Monetary results.
    ## To print partial money table
    monthDataMat = dictalToMatrix(monthData)
    dimX = len(monthDataMat)
    dimY = len(monthDataMat[0])
    monthDataPart = []
    for k in range(0, dimX):
        monthDatatemp = []
        for m in range(4, dimY):
            monthDatatemp.append(monthDataMat[k][m])
        monthDataPart.append(monthDatatemp)
    plotTable(monthDataPart)
    plt.savefig(pJoin(modelDir, "moneyTable.png"))
    outData["monthDataMat"] = dictalToMatrix(monthData)
    outData["monthDataPart"] = monthDataPart
    # Graph the money data.
    fig = plt.figure(figsize=(10, 8))
    indices = [r['monthName'] for r in monthData]
    d1 = [r['energyReductionDollars'] for r in monthData]
    d2 = [r['lossReductionDollars'] for r in monthData]
    d3 = [r['peakReductionDollars'] for r in monthData]
    ticks = range(len(d1))
    bar_erd = plt.bar(ticks, d1, color='red')
    bar_lrd = plt.bar(ticks, d2, color='green')
    bar_prd = plt.bar(ticks, d3, color='blue', yerr=d2)
    plt.legend([bar_prd[0], bar_lrd[0], bar_erd[0]], [
        'peakReductionDollars', 'lossReductionDollars',
        'energyReductionDollars'
    ],
               bbox_to_anchor=(0., 1.015, 1., .102),
               loc=3,
               ncol=2,
               mode="expand",
               borderaxespad=0.1)
    plt.xticks([t + 0.5 for t in ticks], indices)
    plt.ylabel('Utility Savings ($)')
    plt.tight_layout(5.5, 1.3, 1.2)
    fig.autofmt_xdate()
    plt.savefig(pJoin(modelDir, "spendChart.png"))
    outData["energyReductionDollars"] = d1
    outData["lossReductionDollars"] = d2
    outData["peakReductionDollars"] = d3
    # Graph the cumulative savings.
    fig = plt.figure(figsize=(10, 5))
    annualSavings = sum(d1) + sum(d2) + sum(d3)
    annualSave = lambda x: (annualSavings - rates['omCost']) * x - rates[
        'capitalCost']
    simplePayback = rates['capitalCost'] / (annualSavings - rates['omCost'])
    plt.xlabel('Year After Installation')
    plt.xlim(0, 30)
    plt.ylabel('Cumulative Savings ($)')
    plt.plot([0 for x in range(31)], c='gray')
    plt.axvline(x=simplePayback, ymin=0, ymax=1, c='gray', linestyle='--')
    plt.plot([annualSave(x) for x in range(31)], c='green')
    plt.savefig(pJoin(modelDir, "savingsChart.png"))
    outData["annualSave"] = [annualSave(x) for x in range(31)]
    # For autotest, there won't be such file.
    return outData
示例#58
0
from jinja2 import Template
from matplotlib import pyplot as plt
from omf.models import __neoMetaModel__
from __neoMetaModel__ import *

# OMF imports
import omf.feeder as feeder
from omf.solvers import gridlabd

# Model metadata:
fileName = os.path.basename(__file__)
modelName = fileName[0:fileName.rfind('.')]
tooltip = "The cvrStatic model calculates the expected costs and benefits (including energy, loss, and peak reductions) for implementing conservation voltage reduction on a given feeder circuit."

# Our HTML template for the interface:
with open(pJoin(__neoMetaModel__._myDir, modelName + ".html"),
          "r") as tempFile:
    template = Template(tempFile.read())


def work(modelDir, inputDict):
    ''' Run the model in the foreground. WARNING: can take about a minute. '''
    # Global vars, and load data from the model directory.
    feederName = [x for x in os.listdir(modelDir)
                  if x.endswith('.omd')][0][:-4]
    inputDict["feederName1"] = feederName
    feederPath = pJoin(modelDir, feederName + '.omd')
    feederJson = json.load(open(feederPath))
    tree = feederJson.get("tree", {})
    attachments = feederJson.get("attachments", {})
    outData = {}
示例#59
0
def runForeground(modelDir):
	''' Run the model in its directory. WARNING: GRIDLAB CAN TAKE HOURS TO COMPLETE. '''
	inputDict = json.load(open(pJoin(modelDir, 'allInputData.json')))
	print "STARTING TO RUN", modelDir
	beginTime = datetime.datetime.now()
	# Get prepare of data and clean workspace if re-run, If re-run remove all the data in the subfolders
	for dirs in os.listdir(modelDir):
		if os.path.isdir(pJoin(modelDir, dirs)):
			shutil.rmtree(pJoin(modelDir, dirs))
	# Get the names of the feeders from the .omd files:
	feederNames = [x[0:-4] for x in os.listdir(modelDir) if x.endswith(".omd")]
	for i, key in enumerate(feederNames):
		inputDict['feederName' + str(i + 1)] = feederNames[i]
	# Run GridLAB-D once for each feeder:
	for feederName in feederNames:
		try:
			os.remove(pJoin(modelDir, feederName, "allOutputData.json"))
		except Exception, e:
			pass
		if not os.path.isdir(pJoin(modelDir, feederName)):
			os.makedirs(pJoin(modelDir, feederName)) # create subfolders for feeders
		shutil.copy(pJoin(modelDir, feederName + ".omd"),
			pJoin(modelDir, feederName, "feeder.omd"))
		inputDict["climateName"] = zipCodeToClimateName(inputDict["zipCode"])
		shutil.copy(pJoin(_omfDir, "data", "Climate", inputDict["climateName"] + ".tmy2"),
			pJoin(modelDir, feederName, "climate.tmy2"))
		try:
			startTime = datetime.datetime.now()
			feederJson = json.load(open(pJoin(modelDir, feederName, "feeder.omd")))
			tree = feederJson["tree"]
			# Set up GLM with correct time and recorders:
			feeder.attachRecorders(tree, "Regulator", "object", "regulator")
			feeder.attachRecorders(tree, "Capacitor", "object", "capacitor")
			feeder.attachRecorders(tree, "Inverter", "object", "inverter")
			feeder.attachRecorders(tree, "Windmill", "object", "windturb_dg")
			feeder.attachRecorders(tree, "CollectorVoltage", None, None)
			feeder.attachRecorders(tree, "Climate", "object", "climate")
			feeder.attachRecorders(tree, "OverheadLosses", None, None)
			feeder.attachRecorders(tree, "UndergroundLosses", None, None)
			feeder.attachRecorders(tree, "TriplexLosses", None, None)
			feeder.attachRecorders(tree, "TransformerLosses", None, None)
			feeder.groupSwingKids(tree)
			feeder.adjustTime(tree=tree, simLength=float(inputDict["simLength"]),
				simLengthUnits=inputDict["simLengthUnits"], simStartDate=inputDict["simStartDate"])
			# RUN GRIDLABD IN FILESYSTEM (EXPENSIVE!)
			rawOut = gridlabd.runInFilesystem(tree, attachments=feederJson["attachments"],
				keepFiles=True, workDir=pJoin(modelDir, feederName))
			cleanOut = {}
			# Std Err and Std Out
			cleanOut['stderr'] = rawOut['stderr']
			cleanOut['stdout'] = rawOut['stdout']
			# Time Stamps
			for key in rawOut:
				if '# timestamp' in rawOut[key]:
					cleanOut['timeStamps'] = rawOut[key]['# timestamp']
					break
				elif '# property.. timestamp' in rawOut[key]:
					cleanOut['timeStamps'] = rawOut[key]['# property.. timestamp']
				else:
					cleanOut['timeStamps'] = []
			# Day/Month Aggregation Setup:
			stamps = cleanOut.get('timeStamps',[])
			level = inputDict.get('simLengthUnits','hours')
			# Climate
			for key in rawOut:
				if key.startswith('Climate_') and key.endswith('.csv'):
					cleanOut['climate'] = {}
					cleanOut['climate']['Rain Fall (in/h)'] = hdmAgg(rawOut[key].get('rainfall'), sum, level)
					cleanOut['climate']['Wind Speed (m/s)'] = hdmAgg(rawOut[key].get('wind_speed'), avg, level)
					cleanOut['climate']['Temperature (F)'] = hdmAgg(rawOut[key].get('temperature'), max, level)
					cleanOut['climate']['Snow Depth (in)'] = hdmAgg(rawOut[key].get('snowdepth'), max, level)
					cleanOut['climate']['Direct Insolation (W/m^2)'] = hdmAgg(rawOut[key].get('solar_direct'), sum, level)
			# Voltage Band
			if 'VoltageJiggle.csv' in rawOut:
				cleanOut['allMeterVoltages'] = {}
				cleanOut['allMeterVoltages']['Min'] = hdmAgg([(i / 2) for i in rawOut['VoltageJiggle.csv']['min(voltage_12.mag)']], min, level)
				cleanOut['allMeterVoltages']['Mean'] = hdmAgg([(i / 2) for i in rawOut['VoltageJiggle.csv']['mean(voltage_12.mag)']], avg, level)
				cleanOut['allMeterVoltages']['StdDev'] = hdmAgg([(i / 2) for i in rawOut['VoltageJiggle.csv']['std(voltage_12.mag)']], avg, level)
				cleanOut['allMeterVoltages']['Max'] = hdmAgg([(i / 2) for i in rawOut['VoltageJiggle.csv']['max(voltage_12.mag)']], max, level)
			cleanOut['allMeterVoltages']['stdDevPos'] = [(x+y/2) for x,y in zip(cleanOut['allMeterVoltages']['Mean'], cleanOut['allMeterVoltages']['StdDev'])]
			cleanOut['allMeterVoltages']['stdDevNeg'] = [(x-y/2) for x,y in zip(cleanOut['allMeterVoltages']['Mean'], cleanOut['allMeterVoltages']['StdDev'])]
			# Total # of meters
			count = 0
			with open(pJoin(modelDir, feederName, "feeder.omd")) as f:
				for line in f:
					if "\"objectType\": \"triplex_meter\"" in line:
						count+=1
			# print "count=", count
			cleanOut['allMeterVoltages']['triplexMeterCount'] = float(count)
			# Power Consumption
			cleanOut['Consumption'] = {}
			# Set default value to be 0, avoiding missing value when computing Loads
			cleanOut['Consumption']['Power'] = [0] * int(inputDict["simLength"])
			cleanOut['Consumption']['Losses'] = [0] * int(inputDict["simLength"])
			cleanOut['Consumption']['DG'] = [0] * int(inputDict["simLength"])
			for key in rawOut:
				if key.startswith('SwingKids_') and key.endswith('.csv'):
					oneSwingPower = hdmAgg(vecPyth(rawOut[key]['sum(power_in.real)'],rawOut[key]['sum(power_in.imag)']), avg, level)
					if 'Power' not in cleanOut['Consumption']:
						cleanOut['Consumption']['Power'] = oneSwingPower
					else:
						cleanOut['Consumption']['Power'] = vecSum(oneSwingPower,cleanOut['Consumption']['Power'])
				elif key.startswith('Inverter_') and key.endswith('.csv'):
					realA = rawOut[key]['power_A.real']
					realB = rawOut[key]['power_B.real']
					realC = rawOut[key]['power_C.real']
					imagA = rawOut[key]['power_A.imag']
					imagB = rawOut[key]['power_B.imag']
					imagC = rawOut[key]['power_C.imag']
					oneDgPower = hdmAgg(vecSum(vecPyth(realA,imagA),vecPyth(realB,imagB),vecPyth(realC,imagC)), avg, level)
					if 'DG' not in cleanOut['Consumption']:
						cleanOut['Consumption']['DG'] = oneDgPower
					else:
						cleanOut['Consumption']['DG'] = vecSum(oneDgPower,cleanOut['Consumption']['DG'])
				elif key.startswith('Windmill_') and key.endswith('.csv'):
					vrA = rawOut[key]['voltage_A.real']
					vrB = rawOut[key]['voltage_B.real']
					vrC = rawOut[key]['voltage_C.real']
					viA = rawOut[key]['voltage_A.imag']
					viB = rawOut[key]['voltage_B.imag']
					viC = rawOut[key]['voltage_C.imag']
					crB = rawOut[key]['current_B.real']
					crA = rawOut[key]['current_A.real']
					crC = rawOut[key]['current_C.real']
					ciA = rawOut[key]['current_A.imag']
					ciB = rawOut[key]['current_B.imag']
					ciC = rawOut[key]['current_C.imag']
					powerA = vecProd(vecPyth(vrA,viA),vecPyth(crA,ciA))
					powerB = vecProd(vecPyth(vrB,viB),vecPyth(crB,ciB))
					powerC = vecProd(vecPyth(vrC,viC),vecPyth(crC,ciC))
					# HACK: multiply by negative one because turbine power sign is opposite all other DG:
					oneDgPower = [-1.0 * x for x in hdmAgg(vecSum(powerA,powerB,powerC), avg, level)]
					if 'DG' not in cleanOut['Consumption']:
						cleanOut['Consumption']['DG'] = oneDgPower
					else:
						cleanOut['Consumption']['DG'] = vecSum(oneDgPower,cleanOut['Consumption']['DG'])
				elif key in ['OverheadLosses.csv', 'UndergroundLosses.csv', 'TriplexLosses.csv', 'TransformerLosses.csv']:
					realA = rawOut[key]['sum(power_losses_A.real)']
					imagA = rawOut[key]['sum(power_losses_A.imag)']
					realB = rawOut[key]['sum(power_losses_B.real)']
					imagB = rawOut[key]['sum(power_losses_B.imag)']
					realC = rawOut[key]['sum(power_losses_C.real)']
					imagC = rawOut[key]['sum(power_losses_C.imag)']
					oneLoss = hdmAgg(vecSum(vecPyth(realA,imagA),vecPyth(realB,imagB),vecPyth(realC,imagC)), avg, level)
					if 'Losses' not in cleanOut['Consumption']:
						cleanOut['Consumption']['Losses'] = oneLoss
					else:
						cleanOut['Consumption']['Losses'] = vecSum(oneLoss,cleanOut['Consumption']['Losses'])
			# Aggregate up the timestamps:
			if level=='days':
				cleanOut['timeStamps'] = aggSeries(stamps, stamps, lambda x:x[0][0:10], 'days')
			elif level=='months':
				cleanOut['timeStamps'] = aggSeries(stamps, stamps, lambda x:x[0][0:7], 'months')
			# Write the output.
			with open(pJoin(modelDir, feederName, "allOutputData.json"),"w") as outFile:
				json.dump(cleanOut, outFile, indent=4)
			# Update the runTime in the input file.
			endTime = datetime.datetime.now()
			inputDict["runTime"] = str(datetime.timedelta(seconds=int((endTime - startTime).total_seconds())))
			with open(pJoin(modelDir, feederName, "allInputData.json"),"w") as inFile:
				json.dump(inputDict, inFile, indent=4)
			# Clean up the PID file.
			os.remove(pJoin(modelDir, feederName,"PID.txt"))
			print "DONE RUNNING GRIDLABMULTI", modelDir, feederName
		except Exception as e:
			print "MODEL CRASHED GRIDLABMULTI", e, modelDir, feederName
			cancel(pJoin(modelDir, feederName))
			with open(pJoin(modelDir, feederName, "stderr.txt"), "a+") as stderrFile:
				traceback.print_exc(file = stderrFile)
示例#60
0
from omf.solvers import gridlabd
from omf.weather import zipCodeToClimateName
from flask import session
import web

# Model metadata:
fileName = os.path.basename(__file__)
modelName = fileName[0:fileName.rfind('.')]
tooltip = "The gridlabMulti model allows you to run multiple instances of GridLAB-D and compare their output visually."

# Locational variables so we don't have to rely on OMF being in the system path.
_myDir = os.path.dirname(os.path.abspath(__file__))
_omfDir = os.path.dirname(_myDir)

# Our HTML template for the interface:
with open(pJoin(_myDir,modelName+".html"),"r") as tempFile:
	template = Template(tempFile.read())

def renderTemplate(modelDir, absolutePaths=False, datastoreNames={}):
	''' Render the model template to an HTML string.
	By default render a blank one for new input.
	If modelDir is valid, render results post-model-run.
	If absolutePaths, the HTML can be opened without a server. '''
	try:
		inJson = json.load(open(pJoin(modelDir,"allInputData.json")))
		modelPath, modelName = pSplit(modelDir)
		deepPath, user = pSplit(modelPath)
		inJson["modelName"] = modelName
		inJson["user"] = user
		modelType = inJson["modelType"]
		template = getattr(omf.models, modelType).template