Пример #1
0
def run_simulation(config_path):
    path = os.path.join(os.path.dirname(__file__), '..', config_path)
    with open(path) as simulation_data:
        data = json.loads(simulation_data.read())

    simulation_config = simulation.Config(data)
    simulator = simulation.Simulator(simulation_config)
    return simulator.simulate()
Пример #2
0
def run_sims(switch_time, nruns, setup, model, params):
    """Run simulations and extract objective values."""
    def controller(time):
        if time < switch_time:
            return [1.0, 0.0]
        else:
            return [0.0, 1.0]

    params["controller_args"] = {"oc_model": model, "policy": controller}

    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params,
                                     control_tools.RiskPolicyController)
    run_data = simulator.run_simulation(nruns=nruns, verbose=False)

    return [np.sum(x.objective) for x in run_data]
Пример #3
0
def create_simulator():
    sim = simulation.Simulator()

    sim.add_polygon(
        simulation.Polygon(geo.Point(-3, 2), 30 * 3.14 / 180, [
            geo.Point(1, 1),
            geo.Point(-1, 1),
            geo.Point(-1, -1),
            geo.Point(1, -1)
        ], 1, 2.0 / 3.0, geo.Vector(4, 0), geo.Vector(0, 0, 2 * 3.14)))

    sim.add_polygon(
        simulation.Polygon(geo.Point(5, 0), 90 * 3.14 / 180, [
            geo.Point(3, 1),
            geo.Point(-3, 1),
            geo.Point(-3, -1),
            geo.Point(3, -1)
        ], 3, 10, geo.Vector(0, 0), geo.Vector(0, 0, 3.14)))

    return sim
Пример #4
0
def main(_):
    config = cf.CustomConfig()
    sim = simulation.Simulator()
    sim.fillStupid()
    raw_data = sim.getAllData()

    metagraphs = []
    current_time = 6

    with tf.Graph().as_default():
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        with tf.name_scope("Input"):
            train_input = mod.PflanzInput(config=config,
                                          data=raw_data,
                                          name="input")
            train_input.make_batches()
            train_input.input_is_one_batch(0)

        with tf.variable_scope("Model", reuse=None, initializer=initializer):
            m = mod.PflanzModel(current_time,
                                is_training=True,
                                config=config,
                                input_=train_input)

        with tf.Session() as session:
            session.run(tf.global_variables_initializer())
            state = session.run(m.initial_state)
            session.run([m.final_state])

            m.save(session, config, 6)

    actions = get_params(current_time, config, sim)
    for i in actions:
        for j in i:
            for k in j:
                print(k)
            print("oneplant")
        print("onebatch")
Пример #5
0
def get_objective_set(fitter, setup, state_init, high_prop, nruns):
    """Get objective values from a number of simulation runs using given control proportion."""

    params = {
        "birth_rate": setup['birth_rate'],
        "death_rate": setup['death_rate'],
        "removal_rate": setup['removal_rate'],
        "recov_rate": setup['recov_rate'],
        "end_time": setup['end_time'],
        "update_control_on_all_events": True,
        "control_update_freq": np.inf,
        "return_event_data": False,
    }

    model_params = {
        'birth_rate': setup['birth_rate'],
        'death_rate': setup['death_rate'],
        'removal_rate': setup['removal_rate'],
        'recov_rate': setup['recov_rate'],
        'state_init': state_init,
        'times': np.linspace(0, setup['end_time'], 101),
        'max_control_rate': setup['max_control_rate'],
        'high_alloc_cost': 0.0,
        'low_alloc_cost': 0.0
    }

    model = risk_model.RiskModel(model_params, fitter.get_fit())

    def controller(time):
        return [high_prop, 1.0 - high_prop]

    params["controller_args"] = {"oc_model": model, "policy": controller}

    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params,
                                     control_tools.RiskPolicyController)
    run_data = simulator.run_simulation(nruns=nruns, verbose=False)
    print("Done high proportion {}".format(high_prop))
    return [np.sum(x.objective) for x in run_data]
Пример #6
0
    def get_data(self, ninits=20, nrepeats=10, initial_nodes=None):
        """Run simulations and extract data for fitting.

        Arguments:
            ninits          Number of distinct initialisations (potentially changing initial conds.
            nrepeats        Number of simulation realisations for each initialisation.
            initial_nodes   Starting conditions for simulation realisations. If None then conditions
                            are randomised, otherwise must be a list of simulation.Node objects.
        """

        print("Generating training set...")

        self.data['dpc_times'] = np.linspace(0, self.sim_params['end_time'], 51)
        self.data['dpc_sus'] = np.empty((3, 2, len(self.data['dpc_times']), 0))
        self.data['dpc_inf'] = np.empty((3, 2, len(self.data['dpc_times']), 0))
        self.data['init_state'] = np.empty((0, 18))

        input_events = []

        for _ in range(ninits):
            if initial_nodes is None:
                nlow = np.random.randint(0, 3)
                nhigh = 2 - nlow
                nodes_init = randomise_infection(self.nodes, nhigh=nhigh, nlow=nlow)
            else:
                nodes_init = copy.deepcopy(initial_nodes)
            simulator = simulation.Simulator(
                nodes_init, self.risk_coupling, self.dist_coupling, self.sim_params)
            all_runs = simulator.run_simulation(nruns=nrepeats, verbose=False)
            if nrepeats == 1:
                all_runs = [all_runs]
            self._store_dpc_data(all_runs)

            input_events += self._extract_event_data(all_runs)

        self.data['input_events'] = input_events
Пример #7
0
def main():
    parser = argparse.ArgumentParser(
        description='Run simulation on JSON file.')

    parser.add_argument('--json',
                        '-j',
                        action='store',
                        dest='json_file_name',
                        help='Use network stored in json file',
                        required=True)

    #option for tcp reno or tcp fast
    tcp_type = parser.add_mutually_exclusive_group(required=True)
    tcp_type.add_argument('--Reno',
                          dest='tcp_type',
                          action='store_const',
                          const='Reno',
                          help='Use the TCP-Reno congestion control algorithm')

    tcp_type.add_argument("--FAST",
                          dest='tcp_type',
                          action='store_const',
                          const='FAST',
                          help='Use the TCP-FAST congestion control algorithm')

    # options for graphing metrics
    metrics = parser.add_argument_group()
    metrics.add_argument('-m',
                         dest='metrics',
                         action='store_true',
                         help='Print graphs for metrics.\
                    Requires the following subarguments:')

    metricType = metrics.add_mutually_exclusive_group()

    metricType.add_argument('--more',
                            dest='log',
                            action='store_const',
                            const='more',
                            help='Prints a timetrace from collecting\
            all data.\
            Requires the -m argument.')

    metricType.add_argument('--less',
                            dest='log',
                            action='store_const',
                            const='less',
                            help='Prints a timetrace from collecting\
            a single datum per discrete time interval. \
            Subargument for the -m argument.')

    metricType.add_argument('--avg',
                            dest='log',
                            action='store_const',
                            const='avg',
                            help='Prints an approximate (average) timetrace\
            by collecting data over a discrete time interval. \
            Subargument for the -m argument.')

    metrics.add_argument('-l',
                         '--links',
                         nargs='+',
                         type=str,
                         action='store',
                         dest='links',
                         metavar='LinkID',
                         help='Specify which\
            links are to be logged. LinkID must given in the form\
            \'L1\', \'L2\', etc. Subargument for the -m argument.')

    metrics.add_argument('-f',
                         '--flows',
                         nargs='+',
                         type=str,
                         action='store',
                         dest='flows',
                         metavar='FlowID',
                         help='Specify which\
            flows are to be logged. FlowID must given in the form\
            \'F1\', \'F2\', etc. Subargument for the -m argument.')

    parser.add_argument('-v',
                        action='store_true',
                        dest='verbose',
                        help='verbose: prints out information about events,\
            event time, and number of elements in priority queue')

    args = parser.parse_args()
    # All subargs must be present if --m is invoked
    if not args.metrics and (args.log is not None or args.links is not None
                             or args.flows is not None):
        parser.print_usage()
        print "Error: -m argument is required."
        return
    # All subargs must be present if --m is invoked
    elif args.metrics and (args.log is None or args.links is None
                           or args.flows is None):
        parser.print_usage()
        print "Error: All of --m's subargments required."
        return

    f = open(args.json_file_name)

    parsed_data = json.loads(f.read())
    if args.verbose:
        print "JSON DATA:"
        pprint.pprint(parsed_data)

    devices = {}
    links = {}
    flows = {}

    print "\n\n"

    # Parse json data into data structures
    print "Iterating over hosts:"
    for host_name in parsed_data['hosts']:
        print "Host ", host_name, "has data: ", parsed_data['hosts'][host_name]
        host = classes.Host(str(host_name))
        devices[str(host_name)] = host

    print "Iterating over routers:"
    for router_name in parsed_data['routers']:
        print "Router ", router_name, "has data: ", parsed_data['routers'][
            router_name]
        router = classes.Router(str(router_name))
        devices[str(router_name)] = router
    print "Hosts and routers instantiated. ", "\n\n"

    print "Iterating over links and adding to hosts/routers:"
    for link_name in parsed_data['links']:
        link_data = parsed_data['links'][link_name]
        print "Link ", link_name, "has data: ", link_data

        link = classes.Link(str(link_name), link_data['link_rate'],
                            link_data['link_delay'], link_data['link_buffer'],
                            devices[link_data['devices'][0]],
                            devices[link_data['devices'][1]])
        links[str(link_name)] = link
    print "Links instantiated.", "\n\n"

    print "Iterating over flows:"
    for flow_name in parsed_data['flows']:
        flow_data = parsed_data['flows'][flow_name]
        print "Flow ", flow_name, "has data: ", flow_data

        flow = classes.Flow(str(flow_name), devices[flow_data['flow_src']],
                            devices[flow_data['flow_dest']],
                            flow_data['data_amt'], flow_data['flow_start'],
                            flow_data['theoRTT'])
        flows[str(flow_name)] = flow
    print "Flows instantiated.", "\n\n"

    # Verifying metric inputs from command line are correct
    if args.metrics:
        for flowID in args.flows:
            if flowID not in flows.keys():
                print "Bad flowID in argument list."
                return
        for linkID in args.links:
            if linkID not in links.keys():
                print "Bad linkID in argument list."
                return

    network = classes.Network(devices, links, flows)
    met = None
    if args.metrics:
        met = m.Metrics(args.log, args.flows, args.links)
    simulator = simulation.Simulator(network, args.tcp_type, met)

    # Generate initial routing table
    print "Running..."
    if args.verbose:
        print "Static routing:"

    simulator.staticRouting()
    while not simulator.q.empty():
        result = simulator.processEvent()
        if args.verbose:
            print "processing one event\n" + str(result)

    if args.verbose:
        print "------------NETWORK------------"
        print "----------DEVICE DETAILS----------"
        for device_name in devices:
            print devices[device_name]

        print "----------LINK DETAILS----------"
        for link_name in links:
            print links[link_name]

        print "----------FLOW DETAILS----------"
        for flow_name in flows:
            print flows[flow_name]

        print "----------STARTING SIMULATION------------"

    # Flows begin:
    for flow_name in flows:
        flow = flows[flow_name]

        counter = 0
        timer = flow.flow_start

        newGenEvent = simulation.Event(None, None, "INITIALIZEFLOW", timer,
                                       flow)
        simulator.insertEvent(newGenEvent)

    newDynamicRoutingEvent = simulation.Event(None, None, "REROUT",
                                              constants.REROUT_TIME, None)
    simulator.insertEvent(newDynamicRoutingEvent)

    while not simulator.q.empty():
        result = simulator.processEvent()
        if args.verbose:
            print "QUEUE SIZE: " + str(
                simulator.q.qsize()) + "\n" + str(result)

    for flow_name in flows:
        flow = flows[flow_name]
        print "DATA ACKNOWLEDGED: " + str(flow.data_acknowledged)
        print "DATA MADE: " + str(flow.data_amt)

    print "Simulation for ", args.json_file_name[:
                                                 -4], args.tcp_type, args.log, " done!"
    simulator.done()
Пример #8
0
    def _get_sim_dpcs(self, model, opt_control, initial_nodes=None):
        """Run simulations with and without control for assessment DPCs."""

        sim_params = copy.deepcopy(self.parent_fitter.sim_params)

        # Run no control sims
        print("Generating risk testing set...")
        stored_data = {}
        input_events = []
        for _ in range(10):
            if initial_nodes is None:
                nodes_init = fitting.randomise_infection(
                    self.parent_fitter.nodes, nhigh=self.data['init_conds'][1],
                    nlow=self.data['init_conds'][4])
            else:
                nodes_init = copy.deepcopy(initial_nodes)

            simulator = simulation.Simulator(nodes_init, self.parent_fitter.risk_coupling,
                                             self.parent_fitter.dist_coupling, sim_params,
                                             controller=None)
            all_runs = simulator.run_simulation(nruns=10, verbose=False)
            self.parent_fitter._store_dpc_data(all_runs, data_store=stored_data, include_vac=False)
            input_events += self.parent_fitter._extract_event_data(all_runs)

        # Convert to risk based format
        risk_events = []
        for run in input_events:
            risk_run = np.zeros((len(run), 7))
            for i in range(4):
                risk_run[:, i] = np.sum(run[:, i:12:4], axis=1)
            risk_run[:, 4:] = run[:, 12:15]
            risk_events.append(risk_run)

        for key, val in stored_data.items():
            self.data[key] = val
        self.data["assessment_events"] = risk_events

        sim_params['update_control_on_all_events'] = True
        sim_params['vac_rate'] = 1.0
        sim_params["controller_args"] = {
            "oc_model": model,
            "policy": opt_control
        }

        # Run controlled simulations
        print("Generating controlled risk testing set...")
        stored_data = {}
        input_events = []
        for i in range(10):
            if initial_nodes is None:
                nodes_init = fitting.randomise_infection(
                    self.parent_fitter.nodes, nhigh=self.data['init_conds'][1],
                    nlow=self.data['init_conds'][4])
            else:
                nodes_init = copy.deepcopy(initial_nodes)

            simulator = simulation.Simulator(nodes_init, self.parent_fitter.risk_coupling,
                                             self.parent_fitter.dist_coupling, sim_params,
                                             controller=control_tools.RiskPolicyController)
            all_runs = simulator.run_simulation(nruns=10, verbose=False)
            self.parent_fitter._store_dpc_data(all_runs, data_store=stored_data, include_vac=True)
            input_events += self.parent_fitter._extract_event_data(all_runs)

        # Convert to risk based format
        risk_events = []
        for run in input_events:
            risk_run = np.zeros((len(run), 7))
            for i in range(4):
                risk_run[:, i] = np.sum(run[:, i:12:4], axis=1)
            risk_run[:, 4:] = run[:, 12:15]
            risk_events.append(risk_run)

        for key, val in stored_data.items():
            self.data["controlled_" + key] = val
        self.data["controlled_assessment_events"] = risk_events
Пример #9
0
def main():
    simulator = simulation.Simulator()
    simulator.simulate()

    """
Пример #10
0
from Sensors import *
import simulation

# simulation object
sim = simulation.Simulator()

# get robots from sim
robot1 = sim.robot1
robot2 = sim.robot2

# add sensors to robot 1
sensor1 = Debug_Sensor()  # debug sensor is ground truth, can't be used in official run
robot1.add_sensor(sensor1)

# add sensors to robot 2
sensor2 = IMU_Sensor()
sensor3 = Stereo_Camera()
robot2.add_sensor(sensor2)
robot2.add_sensor(sensor3)

# start simulation (ensure to add sensors before calling this)
sim.start_simulation()

x1 = sensor1.x
y1 = sensor1.y

# round float to int for path planning
grid_x1, grid_y1 = simulation.to_grid(x1, y1)

# get obstacle map
ob_map = sim.obstacle_map
Пример #11
0
    def __init__(self, config, transform=None):
        self.transform = transform
        self.min_len_wav = (config["data_config"]["seg_len"] - 1) * config[
            "data_config"]["frame_shift"] + config["data_config"]["frame_len"]
        self.sequence_mode = config["data_config"]["sequence_mode"]

        # IO layer: set up the data sources

        # load the three types of source data
        dir_noise_streams = None
        rir_streams = None
        source_streams = None
        if "dir_noise_paths" in config:
            dir_noise_streams = self.load_streams(config["dir_noise_paths"],
                                                  config['data_path'],
                                                  is_speech=False)

        if "rir_paths" in config:
            rir_streams = self.load_streams(config["rir_paths"],
                                            config['data_path'],
                                            is_speech=False,
                                            is_rir=True)

        source_streams = self.load_streams(config["source_paths"],
                                           config['data_path'],
                                           is_speech=True)

        self.source_stream_sizes = [
            i.get_number_of_data() for i in source_streams
        ]

        if self.sequence_mode:
            self.source_stream_cum_sizes = [self.source_stream_sizes[0]]
            for i in range(1, len(self.source_stream_sizes)):
                self.source_stream_cum_sizes.append(
                    self.source_stream_cum_sizes[-1] +
                    self.source_stream_sizes[i])

        # Simulation layer: set up the simulator
        # get the single channel single source simulation configuration
        use_reverb = config["data_config"]["use_reverb"]
        use_noise = config["data_config"]["use_dir_noise"]
        snr_range = [
            config["data_config"]["snr_min"], config["data_config"]["snr_max"]
        ]
        t60_range = [
            config["data_config"]["t60_min"], config["data_config"]["t60_max"]
        ]
        cfg_simu = simu.config.single_channel_single_source_config(
            use_reverb=use_reverb,
            use_noise=use_noise,
            snr_range=snr_range,
            t60_range=t60_range)

        #print("data simulation config{}".format(json.dumps(cfg_simu, sort_keys=True, indent=4)))

        # single source simulation shares the input streams with multi-source simulation, but uses different configurations
        simulator_ss = simu.Simulator(cfg_simu,
                                      source_streams,
                                      noise_streams=dir_noise_streams,
                                      rir_streams=rir_streams,
                                      iso_noise_streams=None)

        generator_config = DataGeneratorSequenceConfig(
            n_hour_per_epoch=config["sweep_size"],
            sequence_mode=self.sequence_mode,
            load_label=config["data_config"]["load_label"],
            seglen=config["data_config"]["seg_len"],
            segshift=config["data_config"]["seg_shift"],
        )

        data_generator = DataGeneratorTrain(simulator_ss,
                                            generator_config,
                                            DEBUG=False)
        if self.sequence_mode:
            self.data_buffer = data_generator
        else:
            self.data_buffer = feature.data_generation.DataBuffer(
                data_generator,
                buffer_size=200,
                preload_size=200,
                randomize=True)
#        self.data_buffer = feature.data_generation.DataBuffer(data_generator)
        self.use_cmn = config["data_config"]["use_cmn"]
        self.sample_len_seconds = config["data_config"][
            "seg_len"] * 0.01  # default sampling rate: 100Hz
        self.stream_idx_for_transform = [0]
Пример #12
0
	allocators = {'best_fit': best_fit, 'first_fit': first_fit}

	if not os.path.exists('output'):
		os.mkdir('output')

	for name, allocator in allocators.items():
		print(f'Allocator: {name}')
		for t in thresholds:
			print(f'Threshold: {t}')
			file = f"output\\{name}-{str(t).replace('.', '')}.txt"
			recorder = recording.Recorder(memory, file=file)
			defragmentor = storage.ThresholdDefragmentor(memory, threshold=t)

			simulator = simulation.Simulator(
				stream=stream,
				allocator=allocator,
				defragmentor=defragmentor,
				recorder=recorder,
				std_out=std_out)

			simulator.run(num_steps)

		memory.reset()

	groups = get_allocator_files()
	for group in groups:
		fig, ax = plt.subplots()
		summaries = ((a, s) for a, s in group if 'summary' in f)
		allocator_name = None
		str_thresh = None
		for allocator, summary in summaries:
			allocator_name = allocator
Пример #13
0
    def _get_sim_dpcs(self, model, opt_control, initial_nodes=None):
        """Run simulations with and without control for assessment DPCs."""

        sim_params = copy.deepcopy(self.parent_fitter.sim_params)

        # Run no control sims
        print("Generating space testing set...")

        stored_data = {}
        input_events = []
        for _ in range(10):
            if initial_nodes is None:
                region_init = {
                    "A":
                    (self.data['init_conds'][1], self.data['init_conds'][4]),
                    "B":
                    (self.data['init_conds'][7], self.data['init_conds'][10]),
                    "C":
                    (self.data['init_conds'][13], self.data['init_conds'][16])
                }
                nodes_init = fitting.randomise_init_infs(
                    self.parent_fitter.nodes, region_init)
            else:
                nodes_init = copy.deepcopy(initial_nodes)

            simulator = simulation.Simulator(nodes_init,
                                             self.parent_fitter.risk_coupling,
                                             self.parent_fitter.dist_coupling,
                                             sim_params,
                                             controller=None)
            all_runs = simulator.run_simulation(nruns=10, verbose=False)
            self.parent_fitter._store_dpc_data(all_runs,
                                               data_store=stored_data,
                                               include_vac=False)
            input_events += self.parent_fitter._extract_event_data(all_runs)

        for key, val in stored_data.items():
            self.data[key] = val
        self.data["assessment_events"] = input_events

        sim_params['update_control_on_all_events'] = True
        sim_params['vac_rate'] = 1.0
        sim_params["controller_args"] = {
            "oc_model": model,
            "policy": opt_control
        }

        # Run simulations with control
        stored_data = {}
        input_events = []
        for _ in range(10):
            if initial_nodes is None:
                region_init = {
                    "A":
                    (self.data['init_conds'][1], self.data['init_conds'][4]),
                    "B":
                    (self.data['init_conds'][7], self.data['init_conds'][10]),
                    "C":
                    (self.data['init_conds'][13], self.data['init_conds'][16])
                }
                nodes_init = fitting.randomise_init_infs(
                    self.parent_fitter.nodes, region_init)
            else:
                nodes_init = copy.deepcopy(initial_nodes)

            simulator = simulation.Simulator(
                nodes_init,
                self.parent_fitter.risk_coupling,
                self.parent_fitter.dist_coupling,
                sim_params,
                controller=control_tools.SpacePolicyController)
            all_runs = simulator.run_simulation(nruns=10, verbose=False)
            self.parent_fitter._store_dpc_data(all_runs,
                                               data_store=stored_data,
                                               include_vac=True)
            input_events += self.parent_fitter._extract_event_data(all_runs)

        for key, val in stored_data.items():
            self.data["controlled_" + key] = val
        self.data["controlled_assessment_events"] = input_events
Пример #14
0
def make_data(nruns=10,
              reuse_fitter=False,
              skip_risk_mpc=False,
              skip_space_mpc=False,
              check_data=None):
    """Generate all data for figures.

    Arguments:
        nruns           Number of simulation runs for each control strategy
        reuse_fitter    Whether to reuse previous fits. If so take from Data/Fit/FitData.pickle
        skip_risk_mpc   If True do not run risk based MPC simulations
        skip_space_mpc  If True do not run space based MPC simulations
        check_data      If not None this is filepath for existing data that this new data will be
                        appended to. Setup in each will be checked to ensure parameters have not
                        been changed.
    """

    plt.style.use("seaborn-whitegrid")

    if check_data is not None:
        with open(check_data, "rb") as infile:
            setup_check = pickle.load(infile)['setup']

    all_data = {}

    # Setup initial network structure and state from node file
    node_file = "node_file.txt"
    nodes = simulation.initialise_nodes(node_file)
    nodes = fitting.randomise_infection(nodes, nlow=3, nhigh=0, node_choice=0)

    # Setup spatial and risk coupling
    beta = 2.5
    scale = 0.2
    risk_coupling = np.array([[1, 0.008], [0.008, 0.016]])
    dist_coupling = np.zeros((len(nodes), len(nodes)))
    np.fill_diagonal(dist_coupling, 1.0)
    for i, node1 in enumerate(nodes):
        for j, node2 in enumerate(nodes):
            if node1.region == node2.region:
                # within region connections
                dist = np.linalg.norm(node1.position - node2.position)
                dist_coupling[i, j] = np.exp(-dist / scale)

    for k, l in [(17, 20), (18, 21), (19, 22), (32, 52), (33, 53), (34, 54)]:
        # between region connections
        dist_coupling[l, k] = 0.1
        dist_coupling[k, l] = 0.1
    for i, node1 in enumerate(nodes):
        dist_coupling[:, i] *= beta

    # Setup required parameters for simulations and fitting
    setup = {
        'nodes': nodes,
        'dist_coupling': dist_coupling,
        'risk_coupling': risk_coupling,
        'birth_rate': 0.01,
        'death_rate': 0.01,
        'removal_rate': 0.5,
        'recov_rate': 0.25,
        'end_time': 5,
        'max_control_rate': 200,
        'mpc_update_freq': 0.5,
        'bounds': {
            'beta':
            np.array([[(0, 1), (0, 1)], [(0, 1), (0, 1)]]),
            'coef':
            np.full((4, 2, 2), (0, 3), dtype=np.float),
            'sigma':
            np.array([[(0, 1), (0, 1), (0, 1)], [(0, 1), (0, 1), (0, 1)],
                      [(0, 1), (0, 1), (0, 1)]]),
            'rho':
            np.array([[(0, 1), (0, 1)], [(0, 1), (0, 1)]])
        },
        'start': {
            'beta':
            np.array([[8.8e-3, 5.8e-5], [7.7e-4, 6.2e-4]]),
            'coef':
            np.array([[1.9, 1.0], [1.5, 1.0], [2.4, 1.0], [1.9, 1.1]]),
            'sigma':
            np.array([[5.4e-2, 4.9e-5, 0], [7.7e-5, 5.3e-2, 1.5e-4],
                      [0, 1.5e-4, 5.5e-2]]),
            'rho':
            np.array([[1, 0.03], [0.03, 0.05]])
        },
    }

    all_data['setup'] = setup

    params = {
        "birth_rate": setup['birth_rate'],
        "death_rate": setup['death_rate'],
        "removal_rate": setup['removal_rate'],
        "recov_rate": setup['recov_rate'],
        "end_time": setup['end_time'],
        "update_control_on_all_events": True,
        "control_update_freq": None,
        "return_event_data": True
    }

    os.makedirs(os.path.join("Data", "Fit"), exist_ok=True)

    # Test run and show sample no control simulations
    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params, None)
    no_control_run_data = simulator.run_simulation(nruns=5)
    visualisation.plot_dpc_data(nodes, no_control_run_data, {}, nruns=5)
    plt.show()

    # Run fitting of approximate models
    if reuse_fitter:
        fitter = fitting.Fitter.from_file(
            os.path.join("Data", "Fit", "FitData.pickle"))
    else:
        fitter = fitting.Fitter(nodes, risk_coupling, dist_coupling, params)
        fitter.get_data(initial_nodes=nodes)
        fitter.fit_risk(setup['bounds'], setup['start'])
        fitter.fit_space(setup['bounds'], setup['start'])

        fitter.save(filename=os.path.join("Data", "Fit", "FitData.pickle"))
        print(fitter)
        fitter.assess(save_folder=os.path.join("Data", "Fit"),
                      initial_nodes=nodes,
                      max_control_rate=setup['max_control_rate'])
        fitter.save(filename=os.path.join("Data", "Fit", "FitData.pickle"))

    params['return_event_data'] = False

    # No control simulation results
    print("No control simulation runs")
    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params, None)

    no_control_run_data = simulator.run_simulation(nruns=nruns)
    all_data['sim_no_control'] = no_control_run_data

    # Scenario testing runs
    model_params = {
        'birth_rate':
        setup['birth_rate'],
        'death_rate':
        setup['death_rate'],
        'removal_rate':
        setup['removal_rate'],
        'recov_rate':
        setup['recov_rate'],
        'state_init':
        np.array(no_control_run_data[0][0]['Global'][0])[[1, 2, 3, 5, 6, 7]],
        'times':
        np.linspace(0, setup['end_time'], 101),
        'max_control_rate':
        setup['max_control_rate'],
        'high_alloc_cost':
        0.0,
        'low_alloc_cost':
        0.0
    }
    space_model_params = copy.deepcopy(model_params)
    state = np.zeros(18)
    state[0:6] = np.array(
        no_control_run_data[0][0]["RegionA"][0])[[1, 2, 3, 5, 6, 7]]
    state[6:12] = np.array(
        no_control_run_data[0][0]["RegionB"][0])[[1, 2, 3, 5, 6, 7]]
    state[12:18] = np.array(
        no_control_run_data[0][0]["RegionC"][0])[[1, 2, 3, 5, 6, 7]]
    space_model_params['state_init'] = state

    if check_data is None:
        # Get optimal constant split strategy balancing high and low control
        print("Optimising constant risk split strategy")
        min_prop = risk_split_scan.make_data(fitter,
                                             setup,
                                             model_params['state_init'],
                                             num_props=101,
                                             nruns=1000)
        setup['risk_min_prop'] = min_prop
    else:
        setup['risk_min_prop'] = setup_check['risk_min_prop']

    # Check setup is same as any previous data
    if check_data is not None:
        for key, val in setup.items():
            if isinstance(val, dict):
                for key2 in val.keys():
                    if np.any(val[key2] != setup_check[key][key2]):
                        raise ValueError(
                            "Setup parameters do not match in {0}!".format(
                                key))
            else:
                if np.any(val != setup_check[key]):
                    raise ValueError(
                        "Setup parameters do not match in {0}!".format(key))

    # Approximate models
    model = risk_model.RiskModel(model_params, fitter.get_fit())
    model_space = space_model.SpaceModel(space_model_params,
                                         fitter.get_fit(space=True))

    def high_prio_controller(time):
        return [1.0, 0.0]

    def split_prio_controller(time):
        return [setup['risk_min_prop'], 1.0 - setup['risk_min_prop']]

    params["controller_args"] = {
        "oc_model": model,
        "policy": high_prio_controller
    }
    # Prioritise high risk population
    print("High risk prioritisation simulation runs")
    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params,
                                     control_tools.RiskPolicyController)
    high_run_data = simulator.run_simulation(nruns=nruns)
    all_data['sim_high'] = high_run_data

    # Constant optimal risk split population
    print("Constant risk split simulation runs")
    params["controller_args"]["policy"] = split_prio_controller
    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params,
                                     control_tools.RiskPolicyController)
    split_run_data = simulator.run_simulation(nruns=nruns)
    all_data['sim_split'] = split_run_data

    # Risk based model - no control DPC
    no_control_risk_run = model.run_policy(risk_model.no_control_policy)
    all_data['risk_model_no_control'] = no_control_risk_run

    # Risk based model - optimal control
    print("Risk model open loop simulation runs")
    bocop_run = model.run_bocop(verbose=True,
                                init_policy=risk_model.even_control_policy)
    if bocop_run.exit_code != "Optimal Solution Found.":
        raise RuntimeError("Convergence Failure!")
    params["controller_args"]["policy"] = bocop_run.control
    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params,
                                     control_tools.RiskPolicyController)
    risk_opt_run_data = simulator.run_simulation(nruns=nruns)
    all_data['sim_risk_opt'] = risk_opt_run_data
    all_data['risk_model_opt'] = bocop_run

    # Space based model - no control DPC
    no_control_space_run = model_space.run_policy(
        space_model.no_control_policy)
    all_data['space_model_no_control'] = no_control_space_run

    # Space based model - optimal control
    print("Space model open loop simulation runs")
    params['vac_rate'] = 1.0
    bocop_run = model_space.run_bocop(verbose=True, sol_file="InitSol0.sol")
    if bocop_run.exit_code != "Optimal Solution Found.":
        raise RuntimeError("Convergence Failure!")
    params["controller_args"]["policy"] = bocop_run.control
    params["controller_args"]["oc_model"] = model_space
    simulator = simulation.Simulator(setup['nodes'], setup['risk_coupling'],
                                     setup['dist_coupling'], params,
                                     control_tools.SpacePolicyController)
    space_opt_run_data = simulator.run_simulation(nruns=nruns)
    all_data['sim_space_opt'] = space_opt_run_data
    all_data['space_model_opt'] = bocop_run

    if not skip_risk_mpc:
        # Risk based MPC runs
        print("Risk model MPC simulation runs")
        params['controller_args'] = {
            'oc_model': model,
            'mpc_params': {
                'update_freq': setup['mpc_update_freq'],
                'model_horizon': model_params['times'][-1],
                'rolling_horizon': False,
                'init_policy': all_data['risk_model_opt'].control,
                'initial_control_func': None
            },
            'verbose': True
        }
        params['control_update_freq'] = setup['mpc_update_freq']
        simulator = simulation.Simulator(setup['nodes'],
                                         setup['risk_coupling'],
                                         setup['dist_coupling'], params,
                                         control_tools.MPCRiskController)
        mpc_run_data = simulator.run_simulation(nruns=nruns)
        all_data['sim_risk_mpc'] = mpc_run_data

    if not skip_space_mpc:
        # Setup cold start intialisation files
        update_times = np.arange(0, setup['end_time'],
                                 setup['mpc_update_freq'])
        sol_file_names = [
            "InitSol" + str(i) + ".sol" for i in range(len(update_times))
        ]
        if not reuse_fitter:
            for i, (sol_file, start_time) in enumerate(
                    zip(sol_file_names[1:], update_times[1:])):
                model_space.params['state_init'] = all_data[
                    'space_model_opt'].state(start_time)[:-1]
                # model_space.params['times'] = np.linspace(start_time, setup['end_time'], 101)
                model_space.params['times'] = np.arange(
                    start_time, setup['end_time'], setup['end_time'] / 100)
                bocop_run = model_space.run_bocop(
                    verbose=True,
                    sol_file=sol_file,
                    init_policy=space_model.even_control_policy)
                if bocop_run.exit_code != "Optimal Solution Found.":
                    raise RuntimeError("Convergence Failure!")

        # Space based MPC runs
        print("Space model MPC simulation runs")
        params['controller_args'] = {
            'oc_model': model_space,
            'mpc_params': {
                'update_freq': setup['mpc_update_freq'],
                'model_horizon': space_model_params['times'][-1],
                'rolling_horizon': False,
                'cold_start_files': sol_file_names,
                'init_policy': all_data['space_model_opt'].control,
                'initial_control_func': None
            },
            'verbose': True
        }
        params['control_update_freq'] = setup['mpc_update_freq']
        simulator = simulation.Simulator(setup['nodes'],
                                         setup['risk_coupling'],
                                         setup['dist_coupling'], params,
                                         control_tools.MPCSpaceController)
        mpc_run_data = simulator.run_simulation(nruns=nruns)
        all_data['sim_space_mpc'] = mpc_run_data

    params["controller_args"] = None
    return all_data
Пример #15
0
    def __init__(self, config):
        self.transform = None
        self.sequence_mode = config["data_config"]["sequence_mode"]

        # load the three types of source data
        dir_noise_streams = None
        if "dir_noise_paths" in config:
            dir_noise_streams = self.load_streams(config["dir_noise_paths"],
                                                  config['data_path'],
                                                  is_speech=False)

        rir_streams = None
        if "rir_paths" in config:
            rir_streams = self.load_streams(config["rir_paths"],
                                            config['data_path'],
                                            is_speech=False,
                                            is_rir=True)

        source_streams = self.load_streams(config["source_paths"],
                                           config['data_path'],
                                           is_speech=True)
        self.source_stream_sizes = [
            i.get_number_of_data() for i in source_streams
        ]
        if self.sequence_mode:
            self.source_stream_cum_sizes = [self.source_stream_sizes[0]]
            for i in range(1, len(self.source_stream_sizes)):
                self.source_stream_cum_sizes.append(
                    self.source_stream_cum_sizes[-1] +
                    self.source_stream_sizes[i])

        # Simulation layer: set up the simulator
        # get the single channel single source simulation configuration
        use_reverb = config["data_config"]["use_reverb"]
        use_noise = config["data_config"]["use_dir_noise"]
        snr_range = [
            config["data_config"]["snr_min"], config["data_config"]["snr_max"]
        ]
        cfg_simu = simu.config.single_channel_single_source_config(
            use_reverb=use_reverb, use_noise=use_noise, snr_range=snr_range)

        simulator = simu.Simulator(cfg_simu,
                                   source_streams,
                                   noise_streams=dir_noise_streams,
                                   rir_streams=rir_streams)

        generator_config = DataGeneratorSequenceConfig(
            n_hour_per_epoch=config["sweep_size"],
            sequence_mode=self.sequence_mode,
            load_label=config["data_config"]["load_label"],
            seglen=config["data_config"]["seg_len"],
            segshift=config["data_config"]["seg_shift"],
            use_cmn=config["data_config"]["use_cmn"])

        data_generator = DataGeneratorTrain(simulator,
                                            generator_config,
                                            DEBUG=False)
        if self.sequence_mode:
            self.data_buffer = data_generator
        else:
            self.data_buffer = DataBuffer(data_generator,
                                          buffer_size=20000,
                                          preload_size=200,
                                          randomize=True)

        self.sample_len_seconds = config["data_config"][
            "seg_len"] * 0.01  # default sampling rate: 100Hz
        self.stream_idx_for_transform = [0]
Пример #16
0
def get_lag_times(coupling, nruns=1000, keep_sims=False):
    """Setup stochastic and deterministic analogues and evaluate lag times to regions B and C.

    keep_sims: If True then all simulation data and deterministic run data is returned also.
    """

    initial_state_a = [495, 5, 0, 0, 0, 0, 0, 0]
    initial_state_b = [500, 0, 0, 0, 0, 0, 0, 0]
    initial_state_c = [500, 0, 0, 0, 0, 0, 0, 0]

    ######################
    ## STOCHASTIC MODEL ##
    ######################

    nodes = []
    nodes.append(simulation.Node((0, 0), "A", initial_state_a, 0))
    nodes.append(simulation.Node((0, 0), "B", initial_state_b, 1))
    nodes.append(simulation.Node((0, 0), "C", initial_state_c, 2))

    beta = 0.03

    risk_coupling = np.ones((2, 2))
    dist_coupling = np.array([[beta, coupling, 0], [coupling, beta, coupling],
                              [0, coupling, beta]])

    # Setup required parameters
    params = {
        "birth_rate": 0.0,
        "death_rate": 0.0,
        "removal_rate": 1.0,
        "recov_rate": 0.0,
        "end_time": 50.0,
        "update_control_on_all_events": True,
        "control_update_freq": np.inf,
        "return_event_data": False
    }

    simulator = simulation.Simulator(nodes, risk_coupling, dist_coupling,
                                     params, None)
    stoch_run_data = simulator.run_simulation(nruns=nruns, verbose=False)

    stoch_time_max = np.zeros((nruns, 3))
    for j, run in enumerate(stoch_run_data):
        stoch_time_max[j, 0] = run.run_data["RegionA"][np.argmax(
            np.array(run.run_data["RegionA"])[:, 2])][0]

        arg_b = np.argmax(np.array(run.run_data["RegionB"])[:, 2])
        if run.run_data["RegionB"][arg_b][2] > 5:
            stoch_time_max[j, 1] = run.run_data["RegionB"][arg_b][0]
        else:
            stoch_time_max[j, 1] = np.nan

        arg_c = np.argmax(np.array(run.run_data["RegionC"])[:, 2])
        if run.run_data["RegionC"][arg_c][2] > 5:
            stoch_time_max[j, 2] = run.run_data["RegionC"][arg_c][0]
        else:
            stoch_time_max[j, 2] = np.nan

    #########################
    ## DETERMINISTIC MODEL ##
    #########################

    model_params = {
        "birth_rate":
        0.0,
        "death_rate":
        0.0,
        "removal_rate":
        1.0,
        "recov_rate":
        0.0,
        "state_init":
        np.array([495, 5, 0, 0, 0, 0, 500, 0, 0, 0, 0, 0, 500, 0, 0, 0, 0, 0],
                 dtype=float),
        'times':
        np.linspace(0.0, params['end_time'], 2001),
        'max_control_rate':
        0.0,
        'high_alloc_cost':
        0.0,
        'low_alloc_cost':
        0.0
    }

    space_fitter = fit_space_model.SpaceFitterLikelihood(None)
    space_fitter.data['sigma'] = dist_coupling
    space_fitter.data['rho'] = risk_coupling
    determ_model = space_model.SpaceModel(model_params, space_fitter)
    determ_run_data = determ_model.run_policy(space_model.no_control_policy)

    determ_time_max = np.array([
        model_params['times'][np.argmax([
            determ_run_data.state(t)[6 * x + 1] for t in model_params['times']
        ])] for x in range(3)
    ])

    lag_times = stoch_time_max - determ_time_max

    if keep_sims:
        return (lag_times, [run.run_data
                            for run in stoch_run_data], determ_run_data)
    return (lag_times, )