Ejemplo n.º 1
0
	def runGLMS(glms,batch):
		# Run those .glms by executing a batch file.
		print ('Begining simulations in GridLab-D.')
		try:
			if os.name=='nt':
				gld_job_handler.run_batch_file(dir,batch)
			else:
				gld_job_handler.run_shell_script(dir)
		except KeyboardInterrupt:
			print ('GridLAB-D simulations interrupted during intial round.\n')
			return None, None, None
		# Get comparison metrics from simulation outputs
		print ('Beginning comparison of intitial simulation output with SCADA.')
		raw_metrics = gleanMetrics.funcRawMetsDict(dir, glms, SCADA, days)
		return raw_metrics
Ejemplo n.º 2
0
 def runGLMS(glms, batch):
     # Run those .glms by executing a batch file.
     print('Begining simulations in GridLab-D.')
     try:
         if os.name == 'nt':
             gld_job_handler.run_batch_file(dir, batch)
         else:
             gld_job_handler.run_shell_script(dir)
     except KeyboardInterrupt:
         print('GridLAB-D simulations interrupted during intial round.\n')
         return None, None, None
     # Get comparison metrics from simulation outputs
     print('Beginning comparison of intitial simulation output with SCADA.')
     raw_metrics = gleanMetrics.funcRawMetsDict(dir, glms, SCADA, days)
     return raw_metrics
Ejemplo n.º 3
0
def calibrateLoop(glm_name, main_mets, scada, days, eval, counter, baseGLM, case_flag, feeder_config, dir, batch_file):
	'''This recursive function loops the calibration process until complete.
	
	Arguments:
	glm_name -- ID of "best" .glm so far (CalibX_Config_Y) 
	main_mets (list)-- Summer Peak Value diff, Summer Total Energy diff, Winter Peak Value diff ,Winter Total Energy diff for glm_name. Used to determine action.
	scada (list of lists)-- SCADA values for comparing closeness to .glm simulation output.
	days (list)-- list of dates for summer, winter, and spring
	eval (int)-- result of evaluating WSM score against acceptable WSM and previous WSM. 0 = continue with first choice action, 2 = try "next choice" action
	counter (int)-- Advances each time this function is called.
	baseGLM (dictionary)-- orignal base dictionary for use in Milsoft_GridLAB_D_Feeder_Generation.py
	case_flag (int)-- also for use in Milsoft_GridLAB_D_Feeder_Generation.py
	feeder_config (string)-- (string TODO: this is future work, leave as 'None')-- feeder configuration file (weather, sizing, etc)
	dir (string)-- directory where files for this feeder are being stored and ran
	batch_file (string)-- filename of the batch file that was created to run .glms in directory
	
	Given the above input, this function takes the following steps:
	1. Advance counter.
	2. Use main metrics to determine which action will further calibrate the feeder model. 
	3. Create a calibration file for each set of possible calibration values.
	4. For each calibration file that was generated, create three .glms (one for each day).
	5. Run all the .glm models in GridLab-D.
	6. For each simulation, compare output to SCADA values. 
	7. Calculate the WSM score for each .glm.
	8. Determine which .glm produced the best WSM score. 
	9. Evaluate whether or not this WSM score indicates calibration must finish.
	10. If finished, return the final .glm. If not finished, send "best" .glm, its main metrics, the SCADA values, and the current counter back to this function. 
	
	'''

	# Advance counter. 
	counter += 1
	print ('\n****************************** Calibration Round # '+str(counter)+':')
	
	# Get the last action
	last_action = calib_record[counter-1][2]
	failed = calib_record[counter-1][3]
	print ("The last action ID was "+str(last_action)+". (Fail code = "+str(failed)+")")
	if last_action == 9:
		# we want last action tried before a shedule skew test round
		last_action = calib_record[counter-2][2]
		failed = calib_record[counter-2][3]
		print ("We're going to consider the action before the schedule skew test, which was "+ str(last_action)+". (Fail code = "+str(failed)+")")

	# Delete all .glms from the directory. The winning ones from last round should have already been moved to a subdirectory. 
	# The batch file runs everything /*.glm/ in the directory, so these unecessary ones have got to go. 
	print ("Removing .glm files from the last calibration round...")
	cleanUP(dir)
	
	if case_flag == -1:
		action = -1
		desc = 'scale normalized load shape'
	else:
		if last_action in action_count.keys():
			if action_count[last_action] == 1 and failed != 2:
				# wipe failed counter
				print ("\t( New action was tried and succesful. Wiping action fail counts. )")
				for i in action_failed_count.keys():
					action_failed_count[i] = 0
				
		print ("\nBegin choosing calibration action...")
		# Based on the main metrics (pv_s,te_s,pv_w,te_w), choose the desired action.
		desired, desc = chooseAction.chooseAction(main_mets)
		action = desired
		print ("\tFirst choice: Action ID "+str(action)+" ("+desc+").")
	
		# Use the 'big knobs' as long as total energy differences are 'really big' (get into ballpark)
		c = 0
		print ("\tAre we in ballpark yet?...")
		if abs(main_mets[1]) > 0.25 or abs(main_mets[3]) > 0.25:
			print ("\tSummer total energy difference is "+str(round(main_mets[1]*100,2))+"% and winter total energy is "+str(round(main_mets[3]*100,2))+"%...")
			c = 1
		else:
			print ("\tYes, summer total energy difference is "+str(round(main_mets[1]*100,2))+"% and winter total energy is "+str(round(main_mets[3]*100,2))+"%.")
		if c == 1:
			if main_mets[1] < 0 and main_mets[3] < 0: # summer & winter low
				print ("\tTrying to raise load overall (Action ID 1 or 7)...")
				if not failedLim(1):
					action = 1
				elif not failedLim(7):
					action = 7
			elif main_mets[1] > 0 and main_mets[3] > 0: # summer & winter high
				print ("\tTrying to lower load overall (Action ID -1 or -7)...")
				if not failedLim(-1):
					action = -1
				elif not failedLim(-7):
					action = -7
			elif abs(main_mets[1]) > abs(main_mets[3]):
				if main_mets[1] > 0:
					print ("\tTry to lower load overall (Action ID -1 or -7)...")
					if not failedLim(-1):
						action = -1
					elif not failedLim(-7):
						action = -7
				else:
					print ("\tTry to raise load overall (Action ID 1 or 7)...")
					if not failedLim(1):
						action = 1
					elif not failedLim(7):
						action = 7
			elif abs(main_mets[3]) > abs(main_mets[1]):
				if main_mets[3] > 0:
					print ("\tTry to lower load overall, or lower winter only (Action ID -1, -7, -2, -3)...")
					if not failedLim(-1):
						action = -1
					elif not failedLim(-7):
						action = -7
					elif not failedLim(-2):
						action = -2
					elif not failedLim(-3):
						action = -3
				else:
					print ("\tTry to raise load overall, or raise winter only (Action ID 1, 7, 2, 3)...")
					if not failedLim(1):
						action = 1
					elif not failedLim(7):
						action = 7
					elif not failedLim(2):
						action = 2
					elif not failedLim(3):
						action = 3
			desc = next_choice_action.action_desc[action]
			print ("\tSet Action ID to "+str(action)+" ( "+desc+" ).")
		if action == desired:
			print ("\tOk, let's go with first choice Action ID "+str(desired))

		if failedLim(action):
			# reached fail limit, take next choice
			action = takeNext(action,action)
			desc = next_choice_action.action_desc[action]
			print ("\tTrying action "+str(action)+" ("+desc+")")
			if abs(action) == 0:
				print ("\tWe're all out of calibration options...")
				cleanUP(dir)
				return glm_name

		# Once every few rounds, make sure we check some schedule skew options
		if counter in [3,7,11,15] and not failedLim(9):
			action, desc = 9, "test several residential schedule skew options"
			
	# Update action counters
	if action in action_count.keys():
		action_count[action] += 1
	else:
		action_count[action] = 1
		
	print ("\tFINAL DECISION: We're going to use Action ID "+str(action)+" ( "+desc+" ). \n\tThis action will have been tried " + str(action_count[action]) + " times.")
	
	c = 0;
	calibration_config_files = []
	if case_flag == -1:
		# Scaling normalized load shape
		if abs(main_mets[0]) + abs(main_mets[2]) != abs(main_mets[0] + main_mets[2]):
			print ('*** Warning: One peak is high and one peak is low... this shouldn\'t happen with load shape scaling...')
		last_scalar = getCalibVals(glm_name,dir)[-1]
		avg_peak_diff = (main_mets[0] + main_mets[2])/2
		ideal_scalar = round(last_scalar * (1/(avg_peak_diff + 1)),4)
		a = round(last_scalar + (ideal_scalar - last_scalar) * 0.25,4)
		b = round(last_scalar + (ideal_scalar - last_scalar) * 0.50,4)
		d = round(last_scalar + (ideal_scalar - last_scalar) * 0.75,4)
		load_shape_scalars = [a, b, d, ideal_scalar]
		for i in load_shape_scalars:
			calibration_config_files.append(writeLIB (c, counter, default_params, dir, i))
			c += 1
	else:
		# Take the decided upon action and produce a list of lists with difference calibration parameters to try
		calibrations = takeAction.takeAction(action,action_count[action],getCalibVals(glm_name,dir),main_mets)
		print ("That's " + str(len(calibrations)) + " calibrations to test.")
		# For each list of calibration values in list calibrations, make a .py file.
		print("Begin writing calibration files...")
		for i in calibrations:
			calibration_config_files.append(writeLIB (c, counter, i, dir))
			c += 1
	
		
	# Populate feeder .glms for each file listed in calibration_config_files
	glms_ran = []
	for i in calibration_config_files:
		# need everything necessary to run Milsoft_GridLAB_D_Feeder_Generation.py
		glms_ran.extend(makeGLM.makeGLM(clockDates(days), i, baseGLM, case_flag, feeder_config, dir))
		
	# Run all the .glms by executing the batch file.
	print ('Begining simulations in GridLab-D.')
	if os.name == 'nt':
		gld_job_handler.run_batch_file(dir,batch_file)
	else:
		gld_job_handler.run_shell_script(dir)
		
	# Get comparison metrics between simulation outputs and SCADA.
	raw_metrics = gleanMetrics.funcRawMetsDict(dir, glms_ran, scada, days)
	
	if len(raw_metrics) == 0:
		if case_flag == -1:
			print ("All runs failed.")
			return glm_name
		else:
			print ("It appears that none of the .glms in the last round ran successfully. Let's try a different action.")
			if action in action_failed_count.keys():
				action_failed_count[action] += 1
			else:
				action_failed_count[action] = 1
			calib_record[counter] = ["*all runs failed",calib_record[counter-1][1],action,2]
			#log.write(calib_record[counter][0]+"\t"+str(calib_record[counter][1])+"\t"+str(calib_record[counter][2])+"\t\t"+str(calib_record[counter][3])+"\t"+"N/A"+"\t"+"N/A"+"\n")
			log.write(calib_record[counter][0]+",\t"+str(calib_record[counter][1])+",\t"+str(calib_record[counter][2])+",\t"+str(calib_record[counter][3])+",\t"+"N/A"+",\t,,,,,,,,,,,,,"+"N/A"+"\n")
			cleanUP(dir)
			return calibrateLoop(glm_name, main_mets, scada, days, 2, counter, baseGLM, case_flag, feeder_config, dir, batch_file)
	else:
		# Choose the glm with the best WSM score.
		glm_best, wsm_score_best = chooseBest(raw_metrics)
		print ('** The winning WSM score this round is ' + str(wsm_score_best) + '.')
		
		print ('** Last round\'s WSM score was '+str(calib_record[counter-1][1])+'.')
		
		# Evaluate WSM scroe
		wsm_eval = WSMevaluate(wsm_score_best, counter)
		
		# Update calibration record dictionary for this round.
		calib_record[counter] = [glm_best,wsm_score_best,action,wsm_eval]
		
		Roundbestsofar,WSMbestsofar = bestSoFar(counter)
		print ('** Score to beat is '+str(WSMbestsofar)+' from round '+str(Roundbestsofar)+'.')
		
		if printAllStats == 1:
			for i in raw_metrics.keys():
				stats_log.write( i + ",\t" + stats_log_dict[i] + ",\t" + re.sub('\[|\]','',str(getCalibVals(i,dir))) + ",\t")
				warnOutliers(raw_metrics[i],1)
		parameter_values = getCalibVals (glm_best,dir)
		print ('Winning calibration parameters:\n\tAvg. House: '+str(parameter_values[0])+' VA\tAvg. Comm: '+str(parameter_values[1])+' VA\n\tBase Load: +'+str(round(parameter_values[2]*100,2))+'%\tOffsets: '+str(parameter_values[3])+' F\n\tCOP values: +'+str(round(parameter_values[5]*100,2))+'%\tGas Heat Pen.: -'+str(round(parameter_values[8]*100,2))+'%\n\tSched. Skew Std: '+str(parameter_values[9])+' s\tWindow-Wall Ratio: '+str(round(parameter_values[10]*100,2))+'%\n\tAddtl Heat Deg: '+str(parameter_values[11],)+' F\tSchedule Skew: '+str(parameter_values[7])+' s\t')
		
		# Print warnings about any outlier metrics. 
		warnOutliers(raw_metrics[glm_best],0)
		
		# Get values of our four main metrics. 
		main_mets_glm_best = getMainMetrics(glm_best, raw_metrics)
		
		# print to log
		log.write(calib_record[counter][0]+",\t"+str(calib_record[counter][1])+",\t"+str(calib_record[counter][2])+",\t"+str(calib_record[counter][3])+",\t"+re.sub('\[|\]','',str(getCalibVals(glm_best,dir)))+",\t"+re.sub('\[|\]','',str(main_mets_glm_best))+"\n")

		if wsm_eval == 1: 
			print ("This WSM score has been deemed acceptable.")
			movetoWinners(glm_best,dir)
			cleanUP(dir)
			return glm_best
		else:
			# Not looping load scaling, assuming that our second time through will be OK. 
			if case_flag == -1:
				movetoWinners(glm_best,dir)
				cleanUP(dir)
				return glm_best
			else:
				if wsm_eval == 2:
					# glm_best is not better than the previous. Run loop again but take "next choice" action. 
					if action in action_failed_count.keys():
						action_failed_count[action] += 1
					else:
						action_failed_count[action] = 1
					print ("\nThat last action did not improve the WSM score from the last round. Let's go back and try something different.")
					return calibrateLoop(glm_name, main_mets, scada, days, wsm_eval, counter, baseGLM, case_flag, feeder_config, dir, batch_file)
						   
				else:
					print ('\nTime for the next round.')
					movetoWinners(glm_best,dir)
					return calibrateLoop(glm_best, main_mets_glm_best, scada, days, wsm_eval, counter, baseGLM, case_flag, feeder_config,dir, batch_file)
Ejemplo n.º 4
0
def calibrateLoop(glm_name, main_mets, scada, days, eval, counter, baseGLM,
                  case_flag, feeder_config, dir, batch_file):
    '''This recursive function loops the calibration process until complete.
	
	Arguments:
	glm_name -- ID of "best" .glm so far (CalibX_Config_Y) 
	main_mets (list)-- Summer Peak Value diff, Summer Total Energy diff, Winter Peak Value diff ,Winter Total Energy diff for glm_name. Used to determine action.
	scada (list of lists)-- SCADA values for comparing closeness to .glm simulation output.
	days (list)-- list of dates for summer, winter, and spring
	eval (int)-- result of evaluating WSM score against acceptable WSM and previous WSM. 0 = continue with first choice action, 2 = try "next choice" action
	counter (int)-- Advances each time this function is called.
	baseGLM (dictionary)-- orignal base dictionary for use in Milsoft_GridLAB_D_Feeder_Generation.py
	case_flag (int)-- also for use in Milsoft_GridLAB_D_Feeder_Generation.py
	feeder_config (string)-- (string TODO: this is future work, leave as 'None')-- feeder configuration file (weather, sizing, etc)
	dir (string)-- directory where files for this feeder are being stored and ran
	batch_file (string)-- filename of the batch file that was created to run .glms in directory
	
	Given the above input, this function takes the following steps:
	1. Advance counter.
	2. Use main metrics to determine which action will further calibrate the feeder model. 
	3. Create a calibration file for each set of possible calibration values.
	4. For each calibration file that was generated, create three .glms (one for each day).
	5. Run all the .glm models in GridLab-D.
	6. For each simulation, compare output to SCADA values. 
	7. Calculate the WSM score for each .glm.
	8. Determine which .glm produced the best WSM score. 
	9. Evaluate whether or not this WSM score indicates calibration must finish.
	10. If finished, return the final .glm. If not finished, send "best" .glm, its main metrics, the SCADA values, and the current counter back to this function. 
	
	'''

    # Advance counter.
    counter += 1
    print('\n****************************** Calibration Round # ' +
          str(counter) + ':')

    # Get the last action
    last_action = calib_record[counter - 1][2]
    failed = calib_record[counter - 1][3]
    print("The last action ID was " + str(last_action) + ". (Fail code = " +
          str(failed) + ")")
    if last_action == 9:
        # we want last action tried before a shedule skew test round
        last_action = calib_record[counter - 2][2]
        failed = calib_record[counter - 2][3]
        print(
            "We're going to consider the action before the schedule skew test, which was "
            + str(last_action) + ". (Fail code = " + str(failed) + ")")

    # Delete all .glms from the directory. The winning ones from last round should have already been moved to a subdirectory.
    # The batch file runs everything /*.glm/ in the directory, so these unecessary ones have got to go.
    print("Removing .glm files from the last calibration round...")
    cleanUP(dir)

    if case_flag == -1:
        action = -1
        desc = 'scale normalized load shape'
    else:
        if last_action in action_count.keys():
            if action_count[last_action] == 1 and failed != 2:
                # wipe failed counter
                print(
                    "\t( New action was tried and succesful. Wiping action fail counts. )"
                )
                for i in action_failed_count.keys():
                    action_failed_count[i] = 0

        print("\nBegin choosing calibration action...")
        # Based on the main metrics (pv_s,te_s,pv_w,te_w), choose the desired action.
        desired, desc = chooseAction.chooseAction(main_mets)
        action = desired
        print("\tFirst choice: Action ID " + str(action) + " (" + desc + ").")

        # Use the 'big knobs' as long as total energy differences are 'really big' (get into ballpark)
        c = 0
        print("\tAre we in ballpark yet?...")
        if abs(main_mets[1]) > 0.25 or abs(main_mets[3]) > 0.25:
            print("\tSummer total energy difference is " +
                  str(round(main_mets[1] * 100, 2)) +
                  "% and winter total energy is " +
                  str(round(main_mets[3] * 100, 2)) + "%...")
            c = 1
        else:
            print("\tYes, summer total energy difference is " +
                  str(round(main_mets[1] * 100, 2)) +
                  "% and winter total energy is " +
                  str(round(main_mets[3] * 100, 2)) + "%.")
        if c == 1:
            if main_mets[1] < 0 and main_mets[3] < 0:  # summer & winter low
                print("\tTrying to raise load overall (Action ID 1 or 7)...")
                if not failedLim(1):
                    action = 1
                elif not failedLim(7):
                    action = 7
            elif main_mets[1] > 0 and main_mets[3] > 0:  # summer & winter high
                print("\tTrying to lower load overall (Action ID -1 or -7)...")
                if not failedLim(-1):
                    action = -1
                elif not failedLim(-7):
                    action = -7
            elif abs(main_mets[1]) > abs(main_mets[3]):
                if main_mets[1] > 0:
                    print(
                        "\tTry to lower load overall (Action ID -1 or -7)...")
                    if not failedLim(-1):
                        action = -1
                    elif not failedLim(-7):
                        action = -7
                else:
                    print("\tTry to raise load overall (Action ID 1 or 7)...")
                    if not failedLim(1):
                        action = 1
                    elif not failedLim(7):
                        action = 7
            elif abs(main_mets[3]) > abs(main_mets[1]):
                if main_mets[3] > 0:
                    print(
                        "\tTry to lower load overall, or lower winter only (Action ID -1, -7, -2, -3)..."
                    )
                    if not failedLim(-1):
                        action = -1
                    elif not failedLim(-7):
                        action = -7
                    elif not failedLim(-2):
                        action = -2
                    elif not failedLim(-3):
                        action = -3
                else:
                    print(
                        "\tTry to raise load overall, or raise winter only (Action ID 1, 7, 2, 3)..."
                    )
                    if not failedLim(1):
                        action = 1
                    elif not failedLim(7):
                        action = 7
                    elif not failedLim(2):
                        action = 2
                    elif not failedLim(3):
                        action = 3
            desc = next_choice_action.action_desc[action]
            print("\tSet Action ID to " + str(action) + " ( " + desc + " ).")
        if action == desired:
            print("\tOk, let's go with first choice Action ID " + str(desired))

        if failedLim(action):
            # reached fail limit, take next choice
            action = takeNext(action, action)
            desc = next_choice_action.action_desc[action]
            print("\tTrying action " + str(action) + " (" + desc + ")")
            if abs(action) == 0:
                print("\tWe're all out of calibration options...")
                cleanUP(dir)
                return glm_name

        # Once every few rounds, make sure we check some schedule skew options
        if counter in [3, 7, 11, 15] and not failedLim(9):
            action, desc = 9, "test several residential schedule skew options"

    # Update action counters
    if action in action_count.keys():
        action_count[action] += 1
    else:
        action_count[action] = 1

    print("\tFINAL DECISION: We're going to use Action ID " + str(action) +
          " ( " + desc + " ). \n\tThis action will have been tried " +
          str(action_count[action]) + " times.")

    c = 0
    calibration_config_files = []
    if case_flag == -1:
        # Scaling normalized load shape
        if abs(main_mets[0]) + abs(main_mets[2]) != abs(main_mets[0] +
                                                        main_mets[2]):
            print(
                '*** Warning: One peak is high and one peak is low... this shouldn\'t happen with load shape scaling...'
            )
        last_scalar = getCalibVals(glm_name, dir)[-1]
        avg_peak_diff = (main_mets[0] + main_mets[2]) / 2
        ideal_scalar = round(last_scalar * (1 / (avg_peak_diff + 1)), 4)
        a = round(last_scalar + (ideal_scalar - last_scalar) * 0.25, 4)
        b = round(last_scalar + (ideal_scalar - last_scalar) * 0.50, 4)
        d = round(last_scalar + (ideal_scalar - last_scalar) * 0.75, 4)
        load_shape_scalars = [a, b, d, ideal_scalar]
        for i in load_shape_scalars:
            calibration_config_files.append(
                writeLIB(c, counter, default_params, dir, i))
            c += 1
    else:
        # Take the decided upon action and produce a list of lists with difference calibration parameters to try
        calibrations = takeAction.takeAction(action, action_count[action],
                                             getCalibVals(glm_name, dir),
                                             main_mets)
        print("That's " + str(len(calibrations)) + " calibrations to test.")
        # For each list of calibration values in list calibrations, make a .py file.
        print("Begin writing calibration files...")
        for i in calibrations:
            calibration_config_files.append(writeLIB(c, counter, i, dir))
            c += 1

    # Populate feeder .glms for each file listed in calibration_config_files
    glms_ran = []
    for i in calibration_config_files:
        # need everything necessary to run Milsoft_GridLAB_D_Feeder_Generation.py
        glms_ran.extend(
            makeGLM.makeGLM(clockDates(days), i, baseGLM, case_flag,
                            feeder_config, dir))

    # Run all the .glms by executing the batch file.
    print('Begining simulations in GridLab-D.')
    if os.name == 'nt':
        gld_job_handler.run_batch_file(dir, batch_file)
    else:
        gld_job_handler.run_shell_script(dir)

    # Get comparison metrics between simulation outputs and SCADA.
    raw_metrics = gleanMetrics.funcRawMetsDict(dir, glms_ran, scada, days)

    if len(raw_metrics) == 0:
        if case_flag == -1:
            print("All runs failed.")
            return glm_name
        else:
            print(
                "It appears that none of the .glms in the last round ran successfully. Let's try a different action."
            )
            if action in action_failed_count.keys():
                action_failed_count[action] += 1
            else:
                action_failed_count[action] = 1
            calib_record[counter] = [
                "*all runs failed", calib_record[counter - 1][1], action, 2
            ]
            #log.write(calib_record[counter][0]+"\t"+str(calib_record[counter][1])+"\t"+str(calib_record[counter][2])+"\t\t"+str(calib_record[counter][3])+"\t"+"N/A"+"\t"+"N/A"+"\n")
            log.write(calib_record[counter][0] + ",\t" +
                      str(calib_record[counter][1]) + ",\t" +
                      str(calib_record[counter][2]) + ",\t" +
                      str(calib_record[counter][3]) + ",\t" + "N/A" +
                      ",\t,,,,,,,,,,,,," + "N/A" + "\n")
            cleanUP(dir)
            return calibrateLoop(glm_name, main_mets, scada, days, 2, counter,
                                 baseGLM, case_flag, feeder_config, dir,
                                 batch_file)
    else:
        # Choose the glm with the best WSM score.
        glm_best, wsm_score_best = chooseBest(raw_metrics)
        print('** The winning WSM score this round is ' + str(wsm_score_best) +
              '.')

        print('** Last round\'s WSM score was ' +
              str(calib_record[counter - 1][1]) + '.')

        # Evaluate WSM scroe
        wsm_eval = WSMevaluate(wsm_score_best, counter)

        # Update calibration record dictionary for this round.
        calib_record[counter] = [glm_best, wsm_score_best, action, wsm_eval]

        Roundbestsofar, WSMbestsofar = bestSoFar(counter)
        print('** Score to beat is ' + str(WSMbestsofar) + ' from round ' +
              str(Roundbestsofar) + '.')

        if printAllStats == 1:
            for i in raw_metrics.keys():
                stats_log.write(
                    i + ",\t" + stats_log_dict[i] + ",\t" +
                    re.sub('\[|\]', '', str(getCalibVals(i, dir))) + ",\t")
                warnOutliers(raw_metrics[i], 1)
        parameter_values = getCalibVals(glm_best, dir)
        print('Winning calibration parameters:\n\tAvg. House: ' +
              str(parameter_values[0]) + ' VA\tAvg. Comm: ' +
              str(parameter_values[1]) + ' VA\n\tBase Load: +' +
              str(round(parameter_values[2] * 100, 2)) + '%\tOffsets: ' +
              str(parameter_values[3]) + ' F\n\tCOP values: +' +
              str(round(parameter_values[5] * 100, 2)) +
              '%\tGas Heat Pen.: -' +
              str(round(parameter_values[8] * 100, 2)) +
              '%\n\tSched. Skew Std: ' + str(parameter_values[9]) +
              ' s\tWindow-Wall Ratio: ' +
              str(round(parameter_values[10] * 100, 2)) +
              '%\n\tAddtl Heat Deg: ' + str(parameter_values[11], ) +
              ' F\tSchedule Skew: ' + str(parameter_values[7]) + ' s\t')

        # Print warnings about any outlier metrics.
        warnOutliers(raw_metrics[glm_best], 0)

        # Get values of our four main metrics.
        main_mets_glm_best = getMainMetrics(glm_best, raw_metrics)

        # print to log
        log.write(calib_record[counter][0] + ",\t" +
                  str(calib_record[counter][1]) + ",\t" +
                  str(calib_record[counter][2]) + ",\t" +
                  str(calib_record[counter][3]) + ",\t" +
                  re.sub('\[|\]', '', str(getCalibVals(glm_best, dir))) +
                  ",\t" + re.sub('\[|\]', '', str(main_mets_glm_best)) + "\n")

        if wsm_eval == 1:
            print("This WSM score has been deemed acceptable.")
            movetoWinners(glm_best, dir)
            cleanUP(dir)
            return glm_best
        else:
            # Not looping load scaling, assuming that our second time through will be OK.
            if case_flag == -1:
                movetoWinners(glm_best, dir)
                cleanUP(dir)
                return glm_best
            else:
                if wsm_eval == 2:
                    # glm_best is not better than the previous. Run loop again but take "next choice" action.
                    if action in action_failed_count.keys():
                        action_failed_count[action] += 1
                    else:
                        action_failed_count[action] = 1
                    print(
                        "\nThat last action did not improve the WSM score from the last round. Let's go back and try something different."
                    )
                    return calibrateLoop(glm_name, main_mets, scada, days,
                                         wsm_eval, counter, baseGLM, case_flag,
                                         feeder_config, dir, batch_file)

                else:
                    print('\nTime for the next round.')
                    movetoWinners(glm_best, dir)
                    return calibrateLoop(glm_best, main_mets_glm_best, scada,
                                         days, wsm_eval, counter, baseGLM,
                                         case_flag, feeder_config, dir,
                                         batch_file)