def _main(): _log.debug("Starting application") print( "Application starting-------------------------------------------------------" ) global message_period parser = argparse.ArgumentParser() parser.add_argument( "simulation_id", help="Simulation id to use for responses on the message bus.") parser.add_argument("request", help="Simulation Request") parser.add_argument( "--message_period", help="How often the sample app will send open/close capacitor message.", default=DEFAULT_MESSAGE_PERIOD) # These are now set through the docker container interface via env variables or defaulted to # proper values. # # parser.add_argument("-u", "--user", default="system", # help="The username to authenticate with the message bus.") # parser.add_argument("-p", "--password", default="manager", # help="The password to authenticate with the message bus.") # parser.add_argument("-a", "--address", default="127.0.0.1", # help="tcp address of the mesage bus.") # parser.add_argument("--port", default=61613, type=int, # help="the stomp port on the message bus.") # opts = parser.parse_args() listening_to_topic = simulation_output_topic(opts.simulation_id) print(listening_to_topic) message_period = int(opts.message_period) sim_request = json.loads(opts.request.replace("\'", "")) model_mrid = sim_request["power_system_config"]["Line_name"] print("\n \n The model running is IEEE 123 node with MRID:") print(model_mrid) _log.debug("Model mrid is: {}".format(model_mrid)) gapps = GridAPPSD(opts.simulation_id, address=utils.get_gridappsd_address(), username=utils.get_gridappsd_user(), password=utils.get_gridappsd_pass()) # Get measurement MRIDS for primary nodes in the feeder topic = "goss.gridappsd.process.request.data.powergridmodel" message = { "modelId": model_mrid, "requestType": "QUERY_OBJECT_MEASUREMENTS", "resultFormat": "JSON", "objectType": "ACLineSegment" } obj_msr_loads = gapps.get_response(topic, message, timeout=90) # print(obj_msr_loads) with open('measid.json', 'w') as json_file: json.dump(obj_msr_loads, json_file) toggler = SwitchingActions(opts.simulation_id, gapps, obj_msr_loads) print("Now subscribing") gapps.subscribe(listening_to_topic, toggler) while True: time.sleep(0.1)
object_type = 'ConnectivityNode' object_search = gapps.query_object('_08175e8f-b762-4c9b-92c4-07f369f69bd4') print("Query Object") print(object_search) x=gapps.query_object_dictionary(model_mrid, '_08175e8f-b762-4c9b-92c4-07f369f69bd4') print("Object Dictionary") print(x) y = gapps.query_object('_08175e8f-b762-4c9b-92c4-07f369f69bd4', model_mrid) print("Measurement Query") print(y) topic = "goss.gridappsd.process.request.data.powergridmodel" message = { "modelId": model_mrid, "requestType": "QUERY_OBJECT_MEASUREMENTS", "resultFormat": "JSON", } object_meas = gapps.get_response(topic, message) object_meas = object_meas['data'] testvar = next(item for item in object_meas if item['measid'] == '_08175e8f-b762-4c9b-92c4-07f369f69bd4') print(testvar) name = testvar['name'] print(name)
def run(log, config, sim_id, model_id, gridappsd_address, sim_in_topic, sim_out_topic=None): # Initialize GridAPPSD object from within the platform. gridappsd_object = GridAPPSD(simulation_id=sim_id, address=gridappsd_address, username=utils.get_gridappsd_user(), password=utils.get_gridappsd_pass()) # if sim_out_topic is not None: # dump_output = DumpOutput() # gridappsd_object.subscribe(topic=sim_out_topic, callback=dump_output) # Get all relevant model data from blazegraph/CIM. model_data = get_all_model_data(gridappsd_object, model_id, log=log) # Hard-code timezone for weather data. # TODO: when to "un-hard code?" tz = dateutil.tz.gettz('America/Denver') # Hard-code starting date in 2013, since that's what we have weather # data for. # TODO: "un-hard code" when possible # TODO: When UTC conversion bug is fixed with weather data, change # hour from 7 to 0 below. start_dt = datetime.datetime(year=2013, month=1, day=1, hour=7, minute=0, second=0, microsecond=0, tzinfo=tz) # NOTE: while this probably is slower than adding to the Unix # timestamp, this gives us handy dates for logging. This probably # isn't the way to go long term. end_dt = start_dt + dateutil.relativedelta.relativedelta(days=14) # Convert to Unix timestamps (which are in UTC) start_ts = datetime.datetime.timestamp(start_dt) end_ts = datetime.datetime.timestamp(end_dt) # The platform uses microseconds since the epoch, rather than # seconds, so be sure to convert. Also, it's taking strings, which # is annoying. start_time = '{:.0f}'.format(start_ts * 1e6) end_time = '{:.0f}'.format(end_ts * 1e6) # Pull weather data for the specified interval from the time series # database, and average it over 15 minute intervals. interval = 15 interval_unit = 'min' weather = get_weather(gridappsd_object, start_time, end_time, interval=interval, interval_unit=interval_unit) # Get strings for dates (logging only) # TODO: hard-coding date formatting... yay! # TODO: should probably only do this if the log level is INFO. start_str = start_dt.strftime('%Y-%m-%d %H:%M:%S%z') end_str = end_dt.strftime('%Y-%m-%d %H:%M:%S%z') # Log it. log_str = \ ('Weather data for {} through {} '.format(start_str, end_str) + 'pulled and averaged over {} {} '.format(interval, interval_unit) + 'intervals.') log.info(log_str) # Loop over the load measurements and pull historic data. # Grab a single MRID for experimentation meter_name = 'sx2673305b' data = get_data_for_meter(gridappsd_object, sim_id, model_data['load_measurements'][meter_name]) log.info('Data for meter {} pulled and parsed.'.format(meter_name)) print('Measurements for meter {}:'.format(meter_name)) print(data) # Get the GridLAB-D model # TODO: add to the Python API. payload = { 'configurationType': 'GridLAB-D Base GLM', 'parameters': { 'model_id': model_id } } gld_model = gridappsd_object.get_response(topic=topics.CONFIG, message=payload, timeout=30) log.info('GridLAB-D model for GA use received.') # Remove the json remnants from the message via regular expressions. gld_model['message'] = re.sub('^\s*\{\s*"data"\s*:\s*', '', gld_model['message']) gld_model['message'] = re.sub('\s*,\s*"responseComplete".+$', '', gld_model['message']) log.warn('Bad json for GridLAB-D model fixed via regular expressions.') # Get a modGLM model to modify the base model. model_obj = modGLM.modGLM(strModel=gld_model['message'], pathModelOut='test.glm', pathModelIn='pyvvo.glm') model_obj.writeModel() log.info('modGLM object instantiated.') # Set up the model to run. st = '2016-01-01 00:00:00' et = '2016-01-01 00:15:00' tz = 'UTC0' swing_meter_name = model_obj.setupModel( starttime=st, stoptime=et, timezone=tz, database=config['GLD-DB'], powerflowFlag=True, vSource=model_data['swing_voltage'], triplexGroup=CONST.LOADS['triplex']['group'], triplexList=model_data['load_nominal_voltage']['triplex']['meters']) log.info('GridLAB-D model prepped for GA use.') # Write the base model # model_obj.writeModel() # log.info('Base GridLAB-D model configured and written to file.') # Build dictionary of recorder definitions which individuals in the # population will add to their model. We'll use the append record mode. # This can be risky! If you're not careful about clearing the database out # between subsequent test runs, you can write duplicate rows. recorders = buildRecorderDicts( energyInterval=config['INTERVALS']['OPTIMIZATION'], powerInterval=config['INTERVALS']['SAMPLE'], voltageInterval=config['INTERVALS']['SAMPLE'], energyPowerMeter=swing_meter_name, triplexGroup=CONST.LOADS['triplex']['group'], recordMode='a', query_buffer_limit=config['GLD-DB-OTHER']['QUERY_BUFFER_LIMIT']) log.info('Recorder dictionaries created.') # Convert costs from fraction of nominal voltage to actual voltage # Get pointer to costs dict. costs = config['COSTS'] costs['undervoltage']['limit'] = \ (costs['undervoltage']['limit'] * model_data['load_nominal_voltage']['triplex']['v']) costs['overvoltage']['limit'] = \ (costs['overvoltage']['limit'] * model_data['load_nominal_voltage']['triplex']['v']) log.info('Voltage fractions converted to actual voltage for costs.') # Initialize a clock object for datetimes. clockObj = helper.clock(startStr=st, finalStr=et, interval=config['INTERVALS']['OPTIMIZATION'], tzStr=tz) log.info('Clock object initialized') # Connect to the MySQL database for GridLAB-D simulations db_obj = db.db(**config['GLD-DB'], pool_size=config['GLD-DB-OTHER']['NUM-CONNECTIONS']) log.info('Connected to MySQL for GA GridLAB-D output.') # Clear out the database while testing. # TODO: take this out? db_obj.dropAllTables() log.warning('All tables dropped in {}'.format( config['GLD-DB']['database'])) # Initialize a population. # TODO - let's get the 'inPath' outta here. It's really just being used for # model naming, and we may as well be more explicit about that. sdt, edt = clockObj.getStartStop() pop_obj = population.population(strModel=model_obj.strModel, numInd=config['GA']['INDIVIDUALS'], numGen=config['GA']['GENERATIONS'], numModelThreads=config['GA']['THREADS'], recorders=recorders, dbObj=db_obj, starttime=sdt, stoptime=edt, timezone=tz, inPath=model_obj.pathModelIn, outDir='/pyvvo/pyvvo/models', reg=model_data['voltage_regulator'], cap=model_data['capacitor'], costs=costs, probabilities=config['PROBABILITIES'], gldInstall=config['GLD-INSTALLATION'], randomSeed=config['RANDOM-SEED'], log=log, baseControlFlag=0) log.info('Population object initialized.') log.info('Starting genetic algorithm...') best_ind = pop_obj.ga() log.info('Shutting down genetic algorithm threads...') pop_obj.stopThreads() log.info('Baseline costs:\n{}'.format( json.dumps(pop_obj.baselineData['costs'], indent=4))) log.info('Best individual:\n{}'.format(best_ind)) # Send commands. if sim_in_topic is not None: command_capacitors(log=log, sim_id=sim_id, cap_dict=best_ind.cap, gridappsd_object=gridappsd_object, sim_in_topic=sim_in_topic) log.warning('Sleeping 5 seconds before commanding regulators.') time.sleep(5) command_regulators(log=log, sim_id=sim_id, reg_dict=best_ind.reg, gridappsd_object=gridappsd_object, sim_in_topic=sim_in_topic)
def _main(): parser = argparse.ArgumentParser() parser.add_argument("simulation_id", help="Simulation id to use for responses on the message bus.") parser.add_argument("request", help="Simulation Request") # These are now set through the docker container interface via env variables or defaulted to # proper values. # # parser.add_argument("-u", "--user", default="system", # help="The username to authenticate with the message bus.") # parser.add_argument("-p", "--password", default="manager", # help="The password to authenticate with the message bus.") # parser.add_argument("-a", "--address", default="127.0.0.1", # help="tcp address of the mesage bus.") # parser.add_argument("--port", default=61613, type=int, # help="the stomp port on the message bus.") # opts = parser.parse_args() sim_output_topic = simulation_output_topic(opts.simulation_id) sim_input_topic = simulation_input_topic(opts.simulation_id) sim_request = json.loads(opts.request.replace("\'","")) model_mrid = sim_request["power_system_config"]["Line_name"] gapps = GridAPPSD(opts.simulation_id, address=utils.get_gridappsd_address(), username=utils.get_gridappsd_user(), password=utils.get_gridappsd_pass()) capacitors_dict = {} switches_dict = {} capacitors_meas_dict = {} switches_meas_dict = {} request = { "modelId": model_mrid, "requestType": "QUERY_OBJECT_DICT", "resultFormat": "JSON", "objectType": "LinearShuntCompensator" } response = gapps.get_response("goss.gridappsd.process.request.data.powergridmodel",request) for capacitor in response["data"]: capacitors_dict[capacitor["id"]] = capacitor request = { "modelId": model_mrid, "requestType": "QUERY_OBJECT_DICT", "resultFormat": "JSON", "objectType": "LoadBreakSwitch" } response = gapps.get_response("goss.gridappsd.process.request.data.powergridmodel",request) for switch in response["data"]: switches_dict[switch["id"]] = switch #print(capacitors_dict) #print(switches_dict) request = {"modelId": model_mrid, "requestType": "QUERY_OBJECT_MEASUREMENTS", "resultFormat": "JSON", "objectType": "LinearShuntCompensator" } response = gapps.get_response("goss.gridappsd.process.request.data.powergridmodel",request) for measurement in response["data"]: capacitors_meas_dict[measurement["measid"]] = measurement request = {"modelId": model_mrid, "requestType": "QUERY_OBJECT_MEASUREMENTS", "resultFormat": "JSON", "objectType": "LoadBreakSwitch" } response = gapps.get_response("goss.gridappsd.process.request.data.powergridmodel",request) for measurement in response["data"]: switches_meas_dict[measurement["measid"]] = measurement #print(capacitors_meas_dict) #print(switches_meas_dict) #capacitors_dict = get_capacitor_measurements(gapps, model_mrid) #switches_dict = get_switch_measurements(gapps, model_mrid) subscriber = SimulationSubscriber(opts.simulation_id, gapps, capacitors_dict, switches_dict, capacitors_meas_dict, switches_meas_dict) gapps.subscribe(sim_input_topic, subscriber) gapps.subscribe(sim_output_topic, subscriber) while True: time.sleep(0.1)
def _main(): _log.debug("Starting application") print("Application starting-------------------------------------------------------") global message_period parser = argparse.ArgumentParser() parser.add_argument("simulation_id", help="Simulation id to use for responses on the message bus.") parser.add_argument("request", help="Simulation Request") parser.add_argument("--message_period", help="How often the sample app will send open/close capacitor message.", default=DEFAULT_MESSAGE_PERIOD) opts = parser.parse_args() listening_to_topic = simulation_output_topic(opts.simulation_id) message_period = int(opts.message_period) sim_request = json.loads(opts.request.replace("\'","")) model_mrid = sim_request['power_system_config']['Line_name'] print("\n \n The model running is IEEE 9500-node with MRID:", model_mrid) _log.debug("Model mrid is: {}".format(model_mrid)) gapps = GridAPPSD(opts.simulation_id, address=utils.get_gridappsd_address(), username=utils.get_gridappsd_user(), password=utils.get_gridappsd_pass()) # Get measurement MRIDS for Loadbreakswitches in the feeder print('Get Measurement MRIDS for Loadbreakswitches.....') topic = "goss.gridappsd.process.request.data.powergridmodel" message = { "modelId": model_mrid, "requestType": "QUERY_OBJECT_MEASUREMENTS", "resultFormat": "JSON", "objectType": "LoadBreakSwitch"} obj_msr_loadsw = gapps.get_response(topic, message, timeout=180) with open('measid_LoadbreakSwitch.json', 'w') as json_file: json.dump(obj_msr_loadsw, json_file) # Get measurement MRIDS for kW consumptions at each node print('Get Measurement MRIDS for EnergyConsumers.....') message = { "modelId": model_mrid, "requestType": "QUERY_OBJECT_MEASUREMENTS", "resultFormat": "JSON", "objectType": "EnergyConsumer"} obj_msr_demand = gapps.get_response(topic, message, timeout=180) # Get Eq. MRIDs of Loadbreakswitches print('Get Switches Information.....') switches = get_switches_mrids(gapps, model_mrid) # Load demand and lineparameters with open('Demand9500.json', 'r') as read_file: demand = json.load(read_file) with open('LineData.json', 'r') as read_file: line = json.load(read_file) print("Initialize.....") toggler = SwitchingActions(opts.simulation_id, gapps, switches, \ obj_msr_loadsw, obj_msr_demand, demand, line) print("Now subscribing....") gapps.subscribe(listening_to_topic, toggler) while True: time.sleep(0.1)
def start(log_file, feeder_mrid, model_api_topic, simulation_id): global logfile logfile = log_file SPARQLManager = getattr(importlib.import_module('shared.sparql'), 'SPARQLManager') gapps = GridAPPSD() sparql_mgr = SPARQLManager(gapps, feeder_mrid, model_api_topic) #ysparse,nodelist = sparql_mgr.ybus_export() #idx = 1 #nodes = {} #for obj in nodelist: # nodes[idx] = obj.strip('\"') # idx += 1 ##print(nodes) #Ybus = {} #for obj in ysparse: # items = obj.split(',') # if items[0] == 'Row': # continue # if nodes[int(items[0])] not in Ybus: # Ybus[nodes[int(items[0])]] = {} # Ybus[nodes[int(items[0])]][nodes[int(items[1])]] = complex(float(items[2]), float(items[3])) ##print(Ybus) Ysys = {} Unsupported = {} mod_import = importlib.import_module('line_model_validator.line_model_validator') start_func = getattr(mod_import, 'start') start_func(log_file, feeder_mrid, model_api_topic, False, Ysys, Unsupported) #print('line_model_validator Ysys...') #print(Ysys) #line_count = 0 #for bus1 in Ysys: # line_count += len(Ysys[bus1]) #print('\nLine_model # entries: ' + str(line_count) + '\n', flush=True) #print('\nLine_model # entries: ' + str(line_count) + '\n', file=logfile) mod_import = importlib.import_module('power_transformer_validator.power_transformer_validator') start_func = getattr(mod_import, 'start') start_func(log_file, feeder_mrid, model_api_topic, False, Ysys, Unsupported) #print('power_transformer_validator Ysys...') #print(Ysys) #count = 0 #for bus1 in Ysys: # count += len(Ysys[bus1]) #xfmr_count = count - line_count #print('Power_transformer # entries: ' + str(xfmr_count) + '\n', flush=True) #print('Power_transformer # entries: ' + str(xfmr_count) + '\n', file=logfile) mod_import = importlib.import_module('switching_equipment_validator.switching_equipment_validator') start_func = getattr(mod_import, 'start') start_func(log_file, feeder_mrid, model_api_topic, False, Ysys, Unsupported) #print('switching_equipment_validator (final) Ysys...') #print(Ysys) #count = 0 #for bus1 in Ysys: # count += len(Ysys[bus1]) #switch_count = count - line_count - xfmr_count #print('Switching_equipment # entries: ' + str(switch_count) + '\n', flush=True) #print('Switching_equipment # entries: ' + str(switch_count) + '\n', file=logfile) #print('\n*** Full Ysys:\n') #for bus1 in Ysys: # for bus2 in Ysys[bus1]: # print(bus1 + ',' + bus2 + ',' + str(Ysys[bus1][bus2].real) + ',' + str(Ysys[bus1][bus2].imag)) ysysCount = 0 for bus1 in Ysys: ysysCount += len(Ysys[bus1]) #print('Total computed # entries: ' + str(ysysCount) + '\n', flush=True) #print('Total computed # entries: ' + str(ysysCount) + '\n', file=logfile) # build the Numpy matrix from the full Ysys before we start deleting # entries to check Ysys vs. Ybus # first, create a node index dictionary Node2idx = {} N = 0 for bus1 in list(Ysys): if bus1 not in Node2idx: Node2idx[bus1] = N N += 1 for bus2 in list(Ysys[bus1]): if bus2 not in Node2idx: Node2idx[bus2] = N N += 1 print('Node2idx size: ' + str(N)) print('Node2idx dictionary:') print(Node2idx) sourcebus, sourcevang = sparql_mgr.sourcebus_query() sourcebus = sourcebus.upper() #print('\nquery results sourcebus: ' + sourcebus) #print('query results sourcevang: ' + str(sourcevang)) bindings = sparql_mgr.nomv_query() #print('\nnomv query results:') #print(bindings) sqrt3 = math.sqrt(3.0) Vmag = {} for obj in bindings: busname = obj['busname']['value'].upper() nomv = float(obj['nomv']['value']) Vmag[busname] = nomv/sqrt3 Vang = {} Vang['1'] = math.radians(0.0) Vang['2'] = math.radians(-120.0) Vang['3'] = math.radians(120.0) # calculate CandidateVnom CandidateVnom = {} CandidateVnomPolar = {} for node in Node2idx: bus = node[:node.find('.')] phase = node[node.find('.')+1:] # source bus is a special case for the angle if node.startswith(sourcebus+'.'): CandidateVnom[node] = pol2cart(Vmag[bus], sourcevang+Vang[phase]) CandidateVnomPolar[node] = (Vmag[bus], math.degrees(sourcevang+Vang[phase])) else: if bus in Vmag: CandidateVnom[node] = pol2cart(Vmag[bus], Vang[phase]) CandidateVnomPolar[node] = (Vmag[bus], math.degrees(Vang[phase])) else: print('*** WARNING: no nomv value for bus: ' + bus + ' for node: ' + node) #print('\nCandidateVnom dictionary:') #print(CandidateVnom) src_idxs = [] if sourcebus+'.1' in Node2idx: src_idxs.append(Node2idx[sourcebus+'.1']) if sourcebus+'.2' in Node2idx: src_idxs.append(Node2idx[sourcebus+'.2']) if sourcebus+'.3' in Node2idx: src_idxs.append(Node2idx[sourcebus+'.3']) print('\nsrc_idxs: ' + str(src_idxs)) YsysMatrix = np.zeros((N,N), dtype=complex) # next, remap into a numpy array for bus1 in list(Ysys): for bus2 in list(Ysys[bus1]): YsysMatrix[Node2idx[bus2],Node2idx[bus1]] = YsysMatrix[Node2idx[bus1],Node2idx[bus2]] = Ysys[bus1][bus2] # dump YsysMatrix for MATLAB comparison #print('\nYsysMatrix for MATLAB:') #for row in range(N): # for col in range(N): # print(str(row+1) + ',' + str(col+1) + ',' + str(YsysMatrix[row,col].real) + ',' + str(YsysMatrix[row,col].imag)) np.set_printoptions(threshold=sys.maxsize) #print('\nYsys numpy array:') #print(YsysMatrix) # create the CandidateVnom numpy vector for computations below CandidateVnomVec = np.zeros((N), dtype=complex) for node in Node2idx: if node in CandidateVnom: print('CandidateVnomVec node: ' + node + ', index: ' + str(Node2idx[node]) + ', cartesian value: ' + str(CandidateVnom[node]) + ', polar value: ' + str(CandidateVnomPolar[node])) CandidateVnomVec[Node2idx[node]] = CandidateVnom[node] else: print('*** WARNING: no CandidateVnom value for populating node: ' + node + ', index: ' + str(Node2idx[node])) #print('\nCandidateVnom:') #print(CandidateVnomVec) # dump CandidateVnomVec to CSV file for MATLAB comparison #print('\nCandidateVnom for MATLAB:') #for row in range(N): # print(str(CandidateVnomVec[row].real) + ',' + str(CandidateVnomVec[row].imag)) # time to get the source injection terms # first, get the dictionary of regulator ids bindings = sparql_mgr.regid_query() Rids = [] for obj in bindings: Rids.append(obj['rid']['value']) print('\nRegulator IDs: ' + str(Rids)) # second, subscribe to simulation output so we can start setting tap # positions to 0 simSetRap = SimSetWrapper(gapps, simulation_id, Rids) conn_id = gapps.subscribe(simulation_output_topic(simulation_id), simSetRap) while simSetRap.keepLooping(): #print('Sleeping....', flush=True) time.sleep(0.1) gapps.unsubscribe(conn_id) bindings = sparql_mgr.query_energyconsumer_lf() #print(bindings) phaseIdx = {'A': '.1', 'B': '.2', 'C': '.3', 's1': '.1', 's2': '.2'} DeltaList = [] #print("\nDelta connected load EnergyConsumer query:") for obj in bindings: #name = obj['name']['value'].upper() bus = obj['bus']['value'].upper() conn = obj['conn']['value'] phases = obj['phases']['value'] #print('bus: ' + bus + ', conn: ' + conn + ', phases: ' + phases) if conn == 'D': if phases == '': DeltaList.append(bus+'.1') DeltaList.append(bus+'.2') DeltaList.append(bus+'.3') else: DeltaList.append(bus+phaseIdx[phases]) PNVmag = np.zeros((N), dtype=float) # third, verify all tap positions are 0 config_api_topic = 'goss.gridappsd.process.request.config' message = { 'configurationType': 'CIM Dictionary', 'parameters': {'model_id': feeder_mrid} } cim_dict = gapps.get_response(config_api_topic, message, timeout=10) #print('\nCIM Dictionary:') #print(cim_dict) # get list of regulator mRIDs RegMRIDs = [] CondMRIDs = [] PNVmRIDs = [] PNVdict = {} condTypes = set(['EnergyConsumer', 'LinearShuntCompensator', 'PowerElectronicsConnection', 'SynchronousMachine']) phaseIdx2 = {'A': '.2', 'B': '.3', 'C': '.1'} for feeder in cim_dict['data']['feeders']: for measurement in feeder['measurements']: if measurement['name'].startswith('RatioTapChanger') and measurement['measurementType']=='Pos': RegMRIDs.append(measurement['mRID']) elif measurement['measurementType']=='VA' and (measurement['ConductingEquipment_type'] in condTypes): node = measurement['ConnectivityNode'].upper() + phaseIdx[measurement['phases']] if node in DeltaList: node2 = measurement['ConnectivityNode'].upper() + phaseIdx2[measurement['phases']] #print('Appending CondMRID tuple: (' + measurement['mRID'] + ', ' + measurement['ConductingEquipment_type'] + ', ' + str(Node2idx[node]) + ', ' + str(Node2idx[node2]) + ') for node: ' + node, flush=True) CondMRIDs.append((measurement['mRID'], measurement['ConductingEquipment_type'], Node2idx[node], Node2idx[node2])) else: #print('Appending CondMRID tuple: (' + measurement['mRID'] + ', ' + measurement['ConductingEquipment_type'] + ', ' + str(Node2idx[node]) + ', None) for node: ' + node, flush=True) CondMRIDs.append((measurement['mRID'], measurement['ConductingEquipment_type'], Node2idx[node], None)) elif measurement['measurementType'] == 'PNV': # save PNV measurements in Andy's mixing bowl for later node = measurement['ConnectivityNode'].upper() + phaseIdx[measurement['phases']] #print('Appending PNVmRID tuple: (' + measurement['mRID'] + ', ' + measurement['ConductingEquipment_type'] + ', ' + str(Node2idx[node]) + ') for node: ' + node, flush=True) PNVmRIDs.append((measurement['mRID'], Node2idx[node])) PNVdict[Node2idx[node]] = measurement['mRID'] print('Found RatioTapChanger mRIDs: ' + str(RegMRIDs), flush=True) print('Found ConductingEquipment mRIDs: ' + str(CondMRIDs), flush=True) print('Found PNV dictionary: ' + str(PNVdict), flush=True) print('PNV dictionary size: ' + str(len(PNVdict)), flush=True) # fourth, verify tap ratios are all 0 and then set Sinj values for the # conducting equipment mRIDs by listening to simulation output # start with Sinj as zero vector and we will come back to this later Sinj = np.zeros((N), dtype=complex) Sinj[src_idxs] = complex(0.0,1.0) print('\nInitial Sinj:') print(Sinj) # subscribe to simulation output so we can start checking tap positions # and then setting Sinj simCheckRap = SimCheckWrapper(Sinj, PNVmag, RegMRIDs, CondMRIDs, PNVmRIDs, PNVdict) conn_id = gapps.subscribe(simulation_output_topic(simulation_id), simCheckRap) while simCheckRap.keepLooping(): #print('Sleeping....', flush=True) time.sleep(0.1) gapps.unsubscribe(conn_id) print('\nFinal Sinj:') #print(Sinj) for key,value in Node2idx.items(): print(key + ': ' + str(Sinj[value])) vsrc = np.zeros((3), dtype=complex) vsrc = CandidateVnomVec[src_idxs] #print('\nvsrc:') #print(vsrc) Iinj_nom = np.conj(Sinj/CandidateVnomVec) #print('\nIinj_nom:') #print(Iinj_nom) Yinj_nom = -Iinj_nom/CandidateVnomVec #print('\nYinj_nom:') #print(Yinj_nom) Yaug = YsysMatrix + np.diag(Yinj_nom) #print('\nYaug:') #print(Yaug) Zaug = np.linalg.inv(Yaug) #print('\nZaug:') #print(Zaug) tolerance = 0.01 Nfpi = 10 Nfpi = 15 Isrc_vec = np.zeros((N), dtype=complex) Vfpi = np.zeros((N,Nfpi), dtype=complex) # start with the CandidateVnom for Vfpi Vfpi[:,0] = CandidateVnomVec #print('\nVfpi:') #print(Vfpi) k = 1 maxdiff = 1.0 while k<Nfpi and maxdiff>tolerance: Iload_tot = np.conj(Sinj / Vfpi[:,k-1]) Iload_z = -Yinj_nom * Vfpi[:,k-1] Iload_comp = Iload_tot - Iload_z #print('\nIload_comp numpy matrix:') #print(Iload_comp) term1 = np.linalg.inv(Zaug[np.ix_(src_idxs,src_idxs)]) term2 = vsrc - np.matmul(Zaug[np.ix_(src_idxs,list(range(N)))], Iload_comp) Isrc_vec[src_idxs] = np.matmul(term1, term2) #print("\nIsrc_vec:") #print(Isrc_vec) Icomp = Isrc_vec + Iload_comp Vfpi[:,k] = np.matmul(Zaug, Icomp) #print("\nVfpi:") #print(Vfpi) #print(Vfpi[:,k]) maxlist = abs(abs(Vfpi[:,k]) - abs(Vfpi[:,k-1])) print("\nmaxlist:") for i in range(41): print(str(i) + ": " + str(maxlist[i])) maxdiff = max(abs(abs(Vfpi[:,k]) - abs(Vfpi[:,k-1]))) print("\nk: " + str(k) + ", maxdiff: " + str(maxdiff)) k += 1 if k == Nfpi: print("\nDid not converge with k: " + str(k)) return # set the final Vpfi index k -= 1 print("\nconverged k: " + str(k)) print("\nVfpi:") for key, value in Node2idx.items(): rho, phi = cart2pol(Vfpi[value,k]) print(key + ': rho: ' + str(rho) + ', phi: ' + str(math.degrees(phi))) print('index: ' + str(value) + ', sim mag: ' + str(PNVmag[value])) print("\nVfpi rho to sim magnitude CSV:") for key, value in Node2idx.items(): mag = PNVmag[value] if mag != 0.0: rho, phi = cart2pol(Vfpi[value,k]) print(str(value) + ',' + key + ',' + str(rho) + ',' + str(mag)) return