示例#1
0
 def setUpClass(cls):
     """Attempt to connect to the platform."""
     try:
         cls.sparql = sparql.SPARQLManager(feeder_mrid=_df.FEEDER_MRID_9500)
     except ConnectFailedException:
         # We cannot connect to the platform.
         raise unittest.SkipTest('Failed to connect to GridAPPS-D.')
示例#2
0
    def setUpClass(cls):
        cls.s = sparql.SPARQLManager(feeder_mrid=_df.FEEDER_MRID_123)

        cls.a = [
            (cls.s.query_capacitors, _df.CAPACITORS_123),
            (cls.s.query_regulators, _df.REGULATORS_123),
            (cls.s.query_rtc_measurements, _df.REG_MEAS_123),
            (cls.s.query_capacitor_measurements, _df.CAP_MEAS_123),
            (cls.s.query_load_nominal_voltage, _df.LOAD_NOM_V_123),
            # Node naming is screwing up dtypes here.
            (cls.s.query_load_measurements, _df.LOAD_MEAS_123),
            (cls.s.query_substation_source, _df.SUBSTATION_123),
            (cls.s.query_switches, _df.SWITCHES_123),
            (cls.s.query_switch_measurements, _df.SWITCH_MEAS_123)
        ]
示例#3
0
    def setUpClass(cls):
        cls.s = sparql.SPARQLManager(feeder_mrid=_df.FEEDER_MRID_9500)

        cls.a = [
            (cls.s.query_capacitors, _df.CAPACITORS_9500),
            (cls.s.query_regulators, _df.REGULATORS_9500),
            (cls.s.query_rtc_measurements, _df.REG_MEAS_9500),
            (cls.s.query_capacitor_measurements, _df.CAP_MEAS_9500),
            (cls.s.query_load_nominal_voltage, _df.LOAD_NOM_V_9500),
            # Node naming is screwing up dtypes here.
            (cls.s.query_load_measurements, _df.LOAD_MEAS_9500),
            (cls.s.query_substation_source, _df.SUBSTATION_9500),
            # For some wonderful reason, things come back in a different
            # order for switches, causing a test failure.
            # TODO: We may want to dig further to find out why.
            # For now, reluctantly commenting out this line.
            # (cls.s.query_switches, _df.SWITCHES_9500),
            # (cls.s.query_switch_measurements, _df.SWITCH_MEAS_9500)
            (cls.s.query_inverters, _df.INVERTERS_9500),
            (cls.s.query_inverter_measurements, _df.INVERTER_MEAS_9500),
            (cls.s.query_synchronous_machines, _df.SYNCH_MACH_9500),
            (cls.s.query_synchronous_machine_measurements,
             _df.SYNCH_MACH_MEAS_9500)
        ]
示例#4
0
def gen_expected_sparql_results():
    """Helper to generate expected results. Uncomment in the "main"
    section. This function is a bit gross and not particularly
    maintainable, it'll do.
    """
    s1 = sparql.SPARQLManager(feeder_mrid=FEEDER_MRID_8500)

    # Create list of lists to run everything. The third tuple element
    # is used to truncate DataFrames (no reason to save 4000 entries
    # to file for testing)
    a1 = [(s1.query_capacitors, CAPACITORS_8500),
          (s1.query_regulators, REGULATORS_8500),
          (s1.query_load_nominal_voltage, LOAD_NOM_V_8500),
          (s1.query_rtc_measurements, REG_MEAS_8500),
          (s1.query_capacitor_measurements, CAP_MEAS_8500),
          (s1.query_load_measurements, LOAD_MEAS_8500),
          (s1.query_substation_source, SUBSTATION_8500),
          (s1.query_switches, SWITCHES_8500),
          (s1.query_switch_measurements, SWITCH_MEAS_8500)]

    s2 = sparql.SPARQLManager(feeder_mrid=FEEDER_MRID_13)
    a2 = [(s2.query_capacitors, CAPACITORS_13),
          (s2.query_regulators, REGULATORS_13),
          (s2.query_rtc_measurements, REG_MEAS_13),
          (s2.query_capacitor_measurements, CAP_MEAS_13),
          (s2.query_load_nominal_voltage, LOAD_NOM_V_13),
          (s2.query_load_measurements, LOAD_MEAS_13),
          (s2.query_substation_source, SUBSTATION_13),
          (s2.query_switches, SWITCHES_13),
          (s2.query_switch_measurements, SWITCH_MEAS_13)]

    s3 = sparql.SPARQLManager(feeder_mrid=FEEDER_MRID_123)
    a3 = [(s3.query_capacitors, CAPACITORS_123),
          (s3.query_regulators, REGULATORS_123),
          (s3.query_rtc_measurements, REG_MEAS_123),
          (s3.query_capacitor_measurements, CAP_MEAS_123),
          (s3.query_load_nominal_voltage, LOAD_NOM_V_123),
          (s3.query_load_measurements, LOAD_MEAS_123),
          (s3.query_substation_source, SUBSTATION_123),
          (s3.query_switches, SWITCHES_123),
          (s3.query_switch_measurements, SWITCH_MEAS_123)]

    s4 = sparql.SPARQLManager(feeder_mrid=FEEDER_MRID_9500)
    a4 = [(s4.query_capacitors, CAPACITORS_9500),
          (s4.query_regulators, REGULATORS_9500),
          (s4.query_rtc_measurements, REG_MEAS_9500),
          (s4.query_capacitor_measurements, CAP_MEAS_9500),
          (s4.query_load_nominal_voltage, LOAD_NOM_V_9500),
          (s4.query_load_measurements, LOAD_MEAS_9500),
          (s4.query_substation_source, SUBSTATION_9500),
          (s4.query_switches, SWITCHES_9500),
          (s4.query_switch_measurements, SWITCH_MEAS_9500),
          (s4.query_inverters, INVERTERS_9500),
          (s4.query_inverter_measurements, INVERTER_MEAS_9500),
          (s4.query_synchronous_machines, SYNCH_MACH_9500),
          (s4.query_synchronous_machine_measurements, SYNCH_MACH_MEAS_9500)]

    for a in [a1, a2, a3, a4]:
        for b in a:
            # Run function.
            actual_full = b[0]()

            # Truncate if necessary.
            try:
                actual = actual_full.iloc[0:b[2]]
            except IndexError:
                actual = actual_full

            # If changing column names, creating a new file, etc.,
            # you'll need to write to file here. Otherwise, to just
            # update MRIDs the file gets written at the end of the loop.
            # to_file(actual, b[1])

            # Read file.
            expected = read_pickle(b[1])

            # Ensure frames match except for MRIDs.
            ensure_frame_equal_except_mrid(actual, expected)

            # Write new file.
            to_file(actual, b[1])
示例#5
0
def main(sim_id: str, sim_request: dict):
    """Function for running the PyVVO application.

    :param sim_id: ID of simulation PyVVO will interact with.
    :param sim_request: Simulation request details that were used to
        start the simulation.
    """
    LOG.debug("Simulation ID: {}".format(sim_id))
    LOG.debug("Simulation Request:\n{}".format(
        json.dumps(sim_request, indent=2)))

    # Extract the feeder_mrid from the simulation request.
    feeder_mrid = sim_request["power_system_config"]["Line_name"]
    LOG.debug("Feeder MRID extracted from simulation request.")

    # Get a SPARQL manager.
    sparql_mgr = sparql.SPARQLManager(feeder_mrid=feeder_mrid)

    # Get a platform manager
    platform = PlatformManager()

    # Extract dates from the simulation request.
    start_seconds = int(sim_request["simulation_config"]["start_time"])
    # duration = int(sim_request["simulation_config"]["duration"])
    LOG.debug("Simulation start time and duration extracted from simulation "
              "request.")

    # Initialize a simulation clock.
    clock = SimulationClock(
        gad=platform.gad,
        sim_id=sim_id,
        sim_start_ts=start_seconds,
        log_interval=ga.CONFIG['misc']['clock_log_interval'])

    # ####################################################################
    # # GET PREREQUISITE DATA
    # ####################################################################
    #
    # TODO: Dispatch these jobs to threads.
    # Get regulator information.
    reg_df = sparql_mgr.query_regulators()
    reg_objects = equipment.initialize_regulators(reg_df)
    reg_meas = sparql_mgr.query_rtc_measurements()
    reg_meas_mrid = list(reg_meas[sparql.REG_MEAS_MEAS_MRID_COL])
    reg_mgr = equipment.EquipmentManager(
        eq_dict=reg_objects,
        eq_meas=reg_meas,
        meas_mrid_col=sparql.REG_MEAS_MEAS_MRID_COL,
        eq_mrid_col=sparql.REG_MEAS_REG_MRID_COL,
        eq_type='Regulator')

    # Get capacitor information.
    cap_df = sparql_mgr.query_capacitors()
    cap_objects = equipment.initialize_capacitors(cap_df)
    cap_meas = sparql_mgr.query_capacitor_measurements()
    cap_meas_mrid = list(cap_meas[sparql.CAP_MEAS_MEAS_MRID_COL])
    cap_mgr = equipment.EquipmentManager(
        eq_dict=cap_objects,
        eq_meas=cap_meas,
        meas_mrid_col=sparql.CAP_MEAS_MEAS_MRID_COL,
        eq_mrid_col=sparql.CAP_MEAS_CAP_MRID_COL,
        eq_type='Capacitor')

    # Get switch information.
    switch_df = sparql_mgr.query_switches()
    switch_objects = equipment.initialize_switches(switch_df)
    switch_meas = sparql_mgr.query_switch_measurements()
    switch_meas_mrid = list(switch_meas[sparql.SWITCH_MEAS_MEAS_MRID_COL])
    switch_mgr = equipment.EquipmentManager(
        eq_dict=switch_objects,
        eq_meas=switch_meas,
        meas_mrid_col=sparql.SWITCH_MEAS_MEAS_MRID_COL,
        eq_mrid_col=sparql.SWITCH_MEAS_SWITCH_MRID_COL,
        eq_type='Switch')

    # Get inverter information.
    inverter_df = sparql_mgr.query_inverters()
    inverter_meas = sparql_mgr.query_inverter_measurements()
    inverter_meas_mrid = \
        list(inverter_meas[sparql.INVERTER_MEAS_MEAS_MRID_COL])
    inverter_objects = equipment.initialize_inverters(inverter_df)
    inverter_mgr = equipment.PQEquipmentManager(
        eq_dict=inverter_objects,
        eq_meas=inverter_meas,
        meas_mrid_col=sparql.INVERTER_MEAS_MEAS_MRID_COL,
        eq_mrid_col=sparql.INVERTER_MEAS_INV_MRID_COL,
        eq_type='Inverter')

    # Get synchronous machine information.
    machine_df = sparql_mgr.query_synchronous_machines()
    machine_meas = sparql_mgr.query_synchronous_machine_measurements()
    machine_meas_mrid = \
        list(machine_meas[sparql.SYNCH_MACH_MEAS_MEAS_COL])
    machine_objects = equipment.initialize_synchronous_machines(machine_df)
    machine_mgr = equipment.PQEquipmentManager(
        eq_dict=machine_objects,
        eq_meas=machine_meas,
        meas_mrid_col=sparql.SYNCH_MACH_MEAS_MEAS_COL,
        eq_mrid_col=sparql.SYNCH_MACH_MEAS_MACH_COL,
        eq_type='SynchronousMachine')

    # Get list of dictionaries for routing output.
    fn_mrid_list = [{
        'function': reg_mgr.update_state,
        'mrids': reg_meas_mrid
    }, {
        'function': cap_mgr.update_state,
        'mrids': cap_meas_mrid
    }, {
        'function': switch_mgr.update_state,
        'mrids': switch_meas_mrid
    }, {
        'function': inverter_mgr.update_state,
        'mrids': inverter_meas_mrid
    }, {
        'function': machine_mgr.update_state,
        'mrids': machine_meas_mrid
    }]

    # Create a SimOutRouter to listen to simulation outputs.
    # noinspection PyUnusedLocal
    router = SimOutRouter(platform_manager=platform,
                          sim_id=sim_id,
                          fn_mrid_list=fn_mrid_list)

    # Get EnergyConsumer (load) data.
    # noinspection PyUnusedLocal
    load_nom_v = sparql_mgr.query_load_nominal_voltage()
    load_meas = sparql_mgr.query_load_measurements()

    # noinspection PyUnusedLocal
    meas_id = load_meas.iloc[0]['id']

    # Get substation data.
    substation = sparql_mgr.query_substation_source()
    # noinspection PyUnusedLocal
    substation_bus_meas = sparql_mgr.query_measurements_for_bus(
        bus_mrid=substation.iloc[0]['bus_mrid'])

    # Get model, instantiate GLMManager.
    model = platform.get_glm(model_id=feeder_mrid)
    glm_mgr = GLMManager(model=model, model_is_path=False)

    # Tweak the model (one time setup).
    _prep_glm(glm_mgr)

    # TODO:
    #   Initialize a load_model.LoadModelManager object.
    #   Kick off the parallel load modeling process by calling the
    #   "fit_for_all" method. This could perhaps be started earlier
    #   in this function so that it's running in the background while
    #   other work is performed.

    # Extract the duration for which GridLAB-D models will be run in the
    # genetic algorithm.
    model_run_time = ga.CONFIG["ga"]["intervals"]["model_run"]

    # Turn down inverter logging.
    log_level = 'WARNING'
    inverter_mgr.log.setLevel(log_level)
    LOG.info(
        f'InverterManager log level changed to {log_level} to reduce output.')
    log_level = 'ERROR'
    inverter_mgr.update_equipment_log_level(level=log_level)
    LOG.info(
        f'All individual inverter log levels changed to {log_level} to reduce '
        'output.')

    # Run the genetic algorithm.
    # TODO: Manage loop exit, etc. Should exit when simulation is
    #   complete.
    iterations = 0
    while True:
        LOG.info('*' * 200)
        # Update the inverter, switches, and machines in the GridLAB-D
        # model with the current states from the platform.
        _update_glm_inverters_switches_machines(glm_mgr, inverter_objects,
                                                switch_objects,
                                                machine_objects)

        # Get the most recent simulation time from the clock. The
        # platform operates in UTC.
        starttime = datetime.fromtimestamp(clock.sim_time,
                                           tz=dateutil.tz.tzutc())

        # TODO: Layer the most recent ZIP load models onto the
        #   GridLAB-D model via the GLMManager's
        #   update_all_triplex_loads method.
        # TODO: Periodically update the ZIP load models via the
        #   LoadModelManager's fit_for_all method.

        # Compute stop time.
        stoptime = starttime + timedelta(seconds=model_run_time)

        LOG.info('Starting genetic algorithm to compute set points for '
                 f'{starttime} through {stoptime}.')

        # Initialize manager for genetic algorithm.
        ga_mgr = ga.GA(regulators=reg_objects,
                       capacitors=cap_objects,
                       starttime=starttime,
                       stoptime=stoptime)

        # Create a GAStopper to ensure that the GA stops if a switch
        # opens.
        # noinspection PyUnusedLocal
        ga_stopper = GAStopper(ga_obj=ga_mgr,
                               eq_mgr=switch_mgr,
                               eq_type='switch')

        # Start the genetic algorithm.
        ga_mgr.run(glm_mgr=glm_mgr)

        # Wait for the genetic algorithm to complete.
        ga_mgr.wait()

        # Extract equipment settings.
        reg_forward = ga_mgr.regulators
        cap_forward = ga_mgr.capacitors

        # Get the commands.
        reg_cmd = reg_mgr.build_equipment_commands(reg_forward)
        cap_cmd = cap_mgr.build_equipment_commands(cap_forward)

        # Send 'em!
        reg_msg = platform.send_command(sim_id=sim_id, **reg_cmd)
        if reg_msg is not None:
            LOG.info('Regulator commands sent in.')

        cap_msg = platform.send_command(sim_id=sim_id, **cap_cmd)
        if cap_msg is not None:
            LOG.info('Capacitor commands sent in.')

        # Verify that the equipment was properly updated. At present,
        # the simulator emits messages every 3 simulation seconds. So,
        # using a wait_duration of 12 will wait 3 time steps. Using a
        # timeout of 5 essentially gives a 2 second grace period for
        # all the processing in between simulation time steps.
        # TODO: regulator and capacitor command verification should be
        #   done concurrently, rather than in series like this.
        # TODO: Attempt to command inoperable equipment to bring it
        #   back into the fold.
        if reg_msg is not None:
            inoperable_regs = _verify_commands(mgr=reg_mgr,
                                               eq_type='regulator',
                                               wait_duration=12,
                                               timeout=5)

        if cap_msg is not None:
            inoperable_caps = _verify_commands(mgr=cap_mgr,
                                               eq_type='capacitor',
                                               wait_duration=12,
                                               timeout=5)

        iterations += 1

        if (iterations % 5) == 0:
            LOG.warning("I'm tired! I've ran the genetic algorithm "
                        f"{iterations} times! When does it end?")