Esempio n. 1
0
    def __init__(self,
                 host="localhost",
                 port=6379,
                 db=0,
                 freq_sim=1000,
                 freq_control=100):
        self.links = ()
        self.rbdl = Rbdl()
        self.simulator = Simulator(self.rbdl,
                                   freq_sim=freq_sim,
                                   freq_control=freq_control,
                                   asynchronous=False)
        self.redis_db = redis.Redis(host=host,
                                    port=port,
                                    db=db,
                                    decode_responses=True)

        self.publish_cv = threading.Condition()
        self.publish_queue = None
        self.publisher = threading.Thread(target=self.publisher_thread)
        self.publisher.daemon = True
        self.publisher.start()

        self.set_gravity()
        self.set_ee_offset()
        self.set_joint_limits()
Esempio n. 2
0
    def simulate(self):
        '''
        This method is used to solve the resulting initial value problem
        after the computation of a solution for the input trajectories.
        '''

        logging.debug("Solving Initial Value Problem")

        # calulate simulation time
        T = self.dyn_sys.b - self.dyn_sys.a

        # get list of start values
        start = []

        if self.constraints is not None:
            sys = self._dyn_sys_orig
        else:
            sys = self.dyn_sys

        x_vars = sys.states
        start_dict = dict([(k, v[0]) for k, v in sys.boundary_values.items()
                           if k in x_vars])
        ff = sys.f_num

        for x in x_vars:
            start.append(start_dict[x])

        # create simulation object
        S = Simulator(ff, T, start, self.eqs.trajectories.u)

        logging.debug("start: %s" % str(start))

        # start forward simulation
        self.sim_data = S.simulate()
Esempio n. 3
0
	def btnStartPauseClicked(self):
		"""
		Start/Pause button is clicked
		"""

		if self.data['Geno'] is None:
			# data not loaded yet
			print('Data not loaded yet')
			self.changeStatus('Please load the data first.')
		else:
			# start simulation

			if self.simulator is None:
				# if it is the first time to start the simulation

				self.simulator = Simulator(self.data, self.gridLayout, self.canvas, self.listStatus, self.listSimStatus)
				self.simulator.start() 
				self.btnStartPause.setText('Pause')
			else:
				if self.simulator.isSimStopped():
					self.simulator.restart()
				else:
					if self.simulator.isSimPaused():
						self.simulator.resume()
						self.btnStartPause.setText('Pause')
					else:
						self.simulator.pause()
						self.btnStartPause.setText('Resume')
Esempio n. 4
0
def run_n_simulations(designpoint,
                      dp_name,
                      iterations,
                      outputs,
                      all_samples=False):
    """ Evaluate the given design point by calculating the MTTF for a given amount of sample_budget.

    This function is used for parallelising the Monte Carlo Simulation evaluation.

    :param designpoint: DesignPoint object - this DesignPoint is evauluated.
    :param dp_name: any - unique inidcator of this designpoint (e.g. integer)
    :param iterations: number of sample_budget to run the MCS.
    :param outputs: dictionary to write the MTTF output.
    :return: None
    """
    TTFs = []
    consumptions = []

    sim = Simulator(designpoint)

    for i in range(iterations):
        ttf, consum, size = sim.run()
        TTFs.append(ttf)
        consumptions.append(consum)

    if not all_samples:
        outputs[dp_name] = sum(TTFs) / len(TTFs), sum(consumptions) / len(
            consumptions), size
    else:
        outputs[dp_name] = list(
            zip(TTFs, consumptions, [size for _ in range(len(TTFs))]))
Esempio n. 5
0
    def simulate(self):
        '''
        This method is used to solve the resulting initial value problem
        after the computation of a solution for the input trajectories.
        '''

        logging.debug("Solving Initial Value Problem")

        # calulate simulation time
        T = self.dyn_sys.b - self.dyn_sys.a
        
        # get list of start values
        start = []

        if self.constraints is not None:
            sys = self._dyn_sys_orig
        else:
            sys = self.dyn_sys
            
        x_vars = sys.states
        start_dict = dict([(k, v[0]) for k, v in sys.boundary_values.items() if k in x_vars])
        ff = sys.f_num
        
        for x in x_vars:
            start.append(start_dict[x])
        
        # create simulation object
        S = Simulator(ff, T, start, self.eqs.trajectories.u)
        
        logging.debug("start: %s"%str(start))
        
        # start forward simulation
        self.sim_data = S.simulate()
Esempio n. 6
0
def compareChurnInjection():
    s = Simulator()
    random.seed(125)
    s.setupSimulation(strategy="churn",
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000,
                      churnRate=0.01)
    loads1, medians1, means1, maxs1, devs1 = s.simulateLoad()
    random.seed(125)
    s = Simulator()
    s.setupSimulation(strategy='randomInjection',
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000,
                      churnRate=0)
    loads2 = s.simulateLoad()[0]
    for i in range(0, len(loads1), 5):
        x1 = loads1[i]
        x2 = loads2[i]
        colors = ["r", "b"]
        labels = ["Churn", "Random Injection"]
        plt.hist([x1, x2], 25, normed=1, color=colors, label=labels)

        plt.legend(loc=0)
        plt.title('Churn vs Random Injection at Tick ' + str(i))

        plt.xlabel('Tasks Per Node')
        plt.ylabel('Fraction of the Network')
        #plt.ylim(0, 0.05)
        plt.show()
Esempio n. 7
0
def compareInjectionStable():
    s = Simulator()
    random.seed(125)
    s.setupSimulation(strategy="churn",
                      homogeneity="equal",
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000,
                      churnRate=0)
    loads1, medians1, means1, maxs1, devs1 = s.simulateLoad()
    random.seed(125)
    s = Simulator()
    s.setupSimulation(strategy="randomInjection",
                      homogeneity="perStrength",
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000,
                      churnRate=0)
    loads2 = s.simulateLoad()[0]
    for i in range(0, len(loads1), 5):
        x1 = loads1[i]
        x2 = loads2[i]
        colors = ["k", "w"]
        labels = ["No Strategy", "Random Injection"]
        plt.hist([x1, x2], 25, normed=1, color=colors, label=labels)

        plt.legend(loc=0)
        plt.title('Random Injection in a Heterogeneous Network at Tick ' +
                  str(i))

        plt.xlabel('Tasks Per Node')
        plt.ylabel('Fraction of the Network')
        #plt.ylim(0, 0.05)
        plt.show()
Esempio n. 8
0
def compareInviteNeighbor():
    s = Simulator()
    random.seed(125)
    s.setupSimulation(strategy="neighbors",
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000,
                      churnRate=0)
    loads1, medians1, means1, maxs1, devs1 = s.simulateLoad()
    random.seed(125)
    s = Simulator()
    s.setupSimulation(strategy="invite",
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000,
                      churnRate=0)
    loads2 = s.simulateLoad()[0]
    for i in range(0, len(loads1), 5):
        x1 = loads1[i]
        x2 = loads2[i]
        colors = ["k", "w"]
        labels = ["Neighbors", "Invitation"]
        plt.hist([x1, x2], 25, normed=1, color=colors, label=labels)

        plt.legend(loc=0)
        plt.title('Invitation vs Smart Neighbors at Tick ' + str(i))

        plt.xlabel('Tasks Per Node')
        plt.ylabel('Fraction of the Network')
        #plt.ylim(0, 0.05)
        plt.show()
Esempio n. 9
0
def validate_MDP_policy(root_path, flag_with_obs=True, flag_plan=True):
    s_g = np.array([4.0, 3.0, 0.0])
    modality = "haptic"

    obs_list = [(1.5, 2.00, 1.0, 1.25),
                (2.0, 1.0, 1.25, 0.5)]

    if flag_with_obs:
        file_name = root_path + "/mdp_planenr_obs_" + modality + ".pkl"
    else:
        file_name = root_path + "/mdp_planner_" + modality + ".pkl"

    if flag_plan:
        human_model = MovementModel()
        human_model.load_model(root_path)
        human_model.set_default_param()

        planner = MDPFixedTimePolicy(tmodel=human_model)

        if flag_with_obs:
            planner.gen_env(obs_list)

        planner.compute_policy(s_g, modality, max_iter=30)
    else:
        with open(file_name) as f:
            planner = pickle.load(f)

    fig, axes = plt.subplots()
    planner.visualize_policy(axes)
    plt.show()

    if flag_plan:
        with open(file_name, "w") as f:
            pickle.dump(planner, f)

    sim = Simulator(planner)
    n_trials = 30

    traj_list = []
    start_time = time.time()
    for i in range(n_trials):
        traj_list.append(sim.run_trial((0.5, 0.5, 0.0), s_g, modality, 30.0, tol=0.5))

    print "--- %s seconds ---" % (time.time() - start_time)

    fig, axes = plt.subplots()
    for i in range(n_trials):
        t, traj = traj_list[i]
        axes.plot(traj[:, 0], traj[:, 1])
    axes.axis("equal")

    axes.scatter(s_g[0], s_g[1])

    if flag_with_obs:
        for x, y, w, h in obs_list:
            rect = Rectangle((x, y), w, h)
            axes.add_patch(rect)

    plt.show()
Esempio n. 10
0
def validate_MDP_policy(root_path, flag_with_obs=True, flag_plan=True):
    s_g = np.array([4.0, 3.0, 0.0])
    modality = "haptic"

    obs_list = [(1.5, 2.00, 1.0, 1.25), (2.0, 1.0, 1.25, 0.5)]

    if flag_with_obs:
        file_name = root_path + "/mdp_planenr_obs_" + modality + ".pkl"
    else:
        file_name = root_path + "/mdp_planner_" + modality + ".pkl"

    if flag_plan:
        human_model = MovementModel()
        human_model.load_model(root_path)
        human_model.set_default_param()

        planner = MDPFixedTimePolicy(tmodel=human_model)

        if flag_with_obs:
            planner.gen_env(obs_list)

        planner.compute_policy(s_g, modality, max_iter=30)
    else:
        with open(file_name) as f:
            planner = pickle.load(f)

    fig, axes = plt.subplots()
    planner.visualize_policy(axes)
    plt.show()

    if flag_plan:
        with open(file_name, "w") as f:
            pickle.dump(planner, f)

    sim = Simulator(planner)
    n_trials = 30

    traj_list = []
    start_time = time.time()
    for i in range(n_trials):
        traj_list.append(
            sim.run_trial((0.5, 0.5, 0.0), s_g, modality, 30.0, tol=0.5))

    print "--- %s seconds ---" % (time.time() - start_time)

    fig, axes = plt.subplots()
    for i in range(n_trials):
        t, traj = traj_list[i]
        axes.plot(traj[:, 0], traj[:, 1])
    axes.axis("equal")

    axes.scatter(s_g[0], s_g[1])

    if flag_with_obs:
        for x, y, w, h in obs_list:
            rect = Rectangle((x, y), w, h)
            axes.add_patch(rect)

    plt.show()
 def __init__(self):
     self.dt = 0.01
     self.max_force = 125
     self.step_n =  int(450) # number of steps
     self.simulator = Simulator()
     self.Ctheta = -1
     self.Ktheta = 0.05
     self.Cx = 0.5
     self.Kx = 10    
Esempio n. 12
0
def testChurnSteps():
    s = Simulator()
    s.setupSimulation(strategy='churn',
                      workMeasurement="one",
                      numNodes=1000,
                      numTask=100000,
                      churnRate=0.001)
    loads, medians, means, maxs, devs = s.simulateLoad()
    print(medians)
    print(devs)
Esempio n. 13
0
def runSim(trials):
   sim = Simulator(requests)
   outFile = open("data/results/{0}_{1}_{2}_delays.results".format(week, day, trials), 'w')

   # Report the results to the open file.
   def reportResults(report):
      report.printTSV(outFile)
      if trials > 1:
         runSim(trials - 1)

   # Run the simulation.
   tieBreakingPolicy = crisisThresholdsAndFlip(10 * 60, 40 * 60, 1.5)
   sim.run(reportResults, tieBreakingPolicy, regular, senior)
Esempio n. 14
0
def testInjectionSteps():
    s =  Simulator()
    s.setupSimulation(strategy = 'randomInjections',workMeasurement="one", numNodes=1000, numTasks=100000)
    loads, medians, means, maxs, devs = s.simulateLoad()
    for i in range(0,len(loads), 5):
        x = loads[i]
        plt.hist(x, 100, normed =1 )
        plt.xlabel('Tasks Per Node')
        plt.ylabel('Probability')
        plt.axvline(medians[i], color='r', linestyle='--')
        plt.axvline(means[i], color='k', linestyle='--')
        #plt.ylim(0, 0.05)
        plt.show()
Esempio n. 15
0
def save_timing_performance(timing, filename):
    full_timings = [3] * 8
    full_timings[0] = timing[0]
    full_timings[4] = timing[1]
    traffic_light = PhaseModifier("node1")
    controller = StaticTrafficLightController(controller=traffic_light,
                                              sequence=list(range(8)),
                                              timings=full_timings)
    sim = Simulator()
    sim.add_simulation_component(SimulationOutputParser)
    sim.add_tickable(controller)
    sim.run(sumocfg1, gui=False)
    sim.save_results(filename)
Esempio n. 16
0
def plotLoads():
    s = Simulator()
    seed = 500
    loads = []
    for _ in range(20):
        random.seed(seed)
        s.setupSimulation(numNodes=1000,numTasks=1000000)
        loads = loads + [len(x.tasks) for x in s.nodes.values()]
        seed += 1
    n, bins, patches = plt.hist(loads, 25, normed =1 )
    plt.xlabel('Tasks Per Node')
    plt.ylabel('Probability')
    plt.axvline(statistics.median_low(loads), color='r', linestyle='--')
    plt.show()
Esempio n. 17
0
def plotLoads():
    s = Simulator()
    seed = 500
    loads = []
    for _ in range(20):
        random.seed(seed)
        s.setupSimulation(numNodes=1000, numTasks=1000000)
        loads = loads + [len(x.tasks) for x in s.nodes.values()]
        seed += 1
    n, bins, patches = plt.hist(loads, 25, normed=1)
    plt.xlabel('Tasks Per Node')
    plt.ylabel('Probability')
    plt.axvline(statistics.median_low(loads), color='r', linestyle='--')
    plt.show()
Esempio n. 18
0
def monte_carlo_iterative(designpoints, sample_budget, all_samples=False):
    """ Iterative implementation of the MCS to rank the given design points.

    :param designpoints: [DesignPoint object] - List of designpoint objects (the candidates).
    :param sample_budget: number of MC sample_budget to run
    :return: [float] - List of MTTF corresponding indexwise to the design points.
    """
    TTFs = {i: [] for i in range(len(designpoints))}
    consumptions = {i: [] for i in range(len(designpoints))}

    sims = [Simulator(d) for d in designpoints]

    i_per_dp = sample_budget // len(designpoints)

    for i in range(len(designpoints)):
        for _ in range(i_per_dp):
            ttf, consum, size = sims[i].run()
            TTFs[i].append(ttf)
            consumptions[i].append(consum)

    for i in range(len(TTFs)):
        TTFs[i] = sum(TTFs[i]) / len(TTFs[i])
        consumptions[i] = sum(consumptions[i]) / len(consumptions[i])

    output = {i: [] for i in range(len(designpoints))}

    for i in TTFs:
        output[i] = (TTFs[i], consumptions[i], designpoints[i].evaluate_size())

    return output
Esempio n. 19
0
def testInjectionSteps():
    s = Simulator()
    s.setupSimulation(strategy='randomInjections',
                      workMeasurement="one",
                      numNodes=1000,
                      numTasks=100000)
    loads, medians, means, maxs, devs = s.simulateLoad()
    for i in range(0, len(loads), 5):
        x = loads[i]
        plt.hist(x, 100, normed=1)
        plt.xlabel('Tasks Per Node')
        plt.ylabel('Probability')
        plt.axvline(medians[i], color='r', linestyle='--')
        plt.axvline(means[i], color='k', linestyle='--')
        #plt.ylim(0, 0.05)
        plt.show()
Esempio n. 20
0
def siumlate_with_input(tp, inputseq, n_parts ):
    """

    :param tp:          TransitionProblem
    :param inputseq:    Sequence of input values (will be spline-interpolated)
    :param n_parts:     number of spline parts for the input
    :return:
    """

    tt = np.linspace(tp.a, tp.b, len(inputseq))
    # currently only for single input systems
    su1 = new_spline(tp.b, n_parts, (tt, inputseq), 'u1')
    sim = Simulator(tp.dyn_sys.f_num_simulation, tp.b, tp.dyn_sys.xa, x_col_fnc=None,
                    u_col_fnc=su1.f)
    tt, xx, uu = sim.simulate()

    return tt, xx, uu
Esempio n. 21
0
def mab_so_gradient(designpoints,
                    step_size,
                    nr_samples=10000,
                    idx=0,
                    func=max):
    """ Single objective gradient bandits algorithm.

    Will first explore all design points once and then only take Upper-Confidence-Bound actions.
    Since the output of the simulator will have multiple values (multi-objective), the idx
    variable can be used to specify which value should be used by the MAB (default=TTF).

    :param designpoints: [DesignPoint object] - List of DesignPoint objects (the candidates).
    :param step_size: step size of the algorithm
    :param nr_samples: number of samples
    :param idx: index of simulator return value to use as objective
    :param func: function to select the best DesignPoint (should be max or min).
    :return: [(mean of samples, nr_samples)] - will return the mean of the sampled values
                                               and the amount of samples taken for this dp.
    """
    simulators = [Simulator(dp) for dp in designpoints]

    k = len(designpoints)  # total amount of bandits
    H = np.zeros(k)  # H from formula
    P = np.ones(k) / k  # probability per index
    N = np.zeros(k)  # amount of samples per index?
    qt = np.zeros(k)

    avg_reward = 0

    for t in range(1, nr_samples + 1):
        A = np.random.choice(k, 1, p=P)[0]
        N[A] += 1
        R = simulators[A].run()[idx]
        qt[A] += (R - qt[A]) / N[A]
        R /= 100000

        avg_reward += (R - avg_reward) / t
        baseline = avg_reward

        H[A] += step_size * (R - baseline) * (1 - P[A])

        for a in range(k):
            if a != A:
                H[a] -= step_size * (R - baseline) * P[a]

        aux_exp = np.exp(H)
        P = aux_exp / np.sum(aux_exp)

    print(
        "Best Gradient candidates:",
        sorted(
            set([
                i for (i, val) in sorted(zip(np.arange(qt.size), qt),
                                         key=lambda x: x[1],
                                         reverse=True)
            ][:TOP_CANDIDATES])))

    return list(zip(qt, N))
Esempio n. 22
0
    def simulate(self):
        """
        This method is used to solve the resulting initial value problem
        after the computation of a solution for the input trajectories.
        """

        self.log_debug("Solving Initial Value Problem")

        # calulate simulation time
        T = self.dyn_sys.b - self.dyn_sys.a

        ##:ck: obsolete comment?
        # Todo T = par[0] * T

        # get list of start values
        start = self.dyn_sys.xa

        ff = self.dyn_sys.f_num_simulation

        par = self.get_par_values()
        # create simulation object
        x_fncs, xdot_fncs, u_fnc = self.get_constrained_spline_fncs()

        mpc_flag = self.nIt >= self.mpc_sim_threshold
        self.simulator = Simulator(ff,
                                   T,
                                   start,
                                   x_col_fnc=x_fncs,
                                   u_col_fnc=u_fnc,
                                   z_par=par,
                                   dt=self._parameters['dt_sim'],
                                   mpc_flag=mpc_flag)

        self.log_debug("start: %s" % str(start))

        # forward simulation
        self.sim_data = self.simulator.simulate()

        ##:: S.simulate() is a method,
        # returns a list [np.array(self.t), np.array(self.xt), np.array(self.ut)]
        # self.sim_data is a `self.variable?` (initialized with None in __init__(...))

        # convenient access
        self.sim_data_tt, self.sim_data_xx, self.sim_data_uu = self.sim_data
Esempio n. 23
0
def mab_so_gape_v(designpoints, a, b, m, nr_samples=1000, idx=0):
    """ Single-objective MAB Gap-based Exploration with Variance (GapE-V).

    :param designpoints: [DesignPoint object] - List of DesignPoint objects (the candidates).
    :param a: degree of exploration
    :param b: float - maximum expected value from samples
    :param m: amount of designs to select
    :param nr_samples: number of samples
    :param idx: index of simulator return value to use as objective
    :return: [(mean of samples, nr_samples)] - will return the mean of the sampled values
                                               and the amount of samples taken for this dp.
    """
    simulators = [Simulator(dp) for dp in designpoints]
    ui = [(i, simulators[i].run()[idx])
          for i in range(len(simulators))]  # empirical means
    oi = [0 for _ in range(len(simulators))]
    T = [1 for _ in range(len(designpoints))]

    gap_d = [0 for _ in range(len(designpoints))]
    indices = [0 for _ in range(len(designpoints))]

    for t in range(len(simulators), nr_samples):
        sorted_indices = [
            i for (i, val) in sorted(ui, key=lambda x: x[1], reverse=True)
        ]

        i_star_up = sorted_indices[m]
        i_star_down = sorted_indices[m + 1]

        for i in sorted_indices[:m]:
            gap_d[i] = ui[i][1] - ui[i_star_down][1]

        for i in sorted_indices[m:]:
            gap_d[i] = ui[i_star_up][1] - ui[i][1]

        for i in sorted_indices:
            indices[i] = -gap_d[i] + math.sqrt(
                (2 * a * oi[i]) / T[i]) + (7 * a * b) / (3 * T[i])

        j = indices.index(max(indices))
        new_sample = simulators[j].run()[idx]
        prev_ui = ui[j][1]  # stores the current empiric mean
        ui[j] = (j, ui[j][1] + (new_sample - ui[j][1]) / T[j])
        # iterative variance calculation for o_i
        oi[j] += ((new_sample - prev_ui) *
                  (new_sample - ui[j][1]) - oi[j]) / T[j]
        T[j] += 1

    print(
        "Best GapE-V candidates:",
        sorted(
            set([
                i for (i, val) in sorted(ui, key=lambda x: x[1], reverse=True)
            ][:TOP_CANDIDATES])))

    return list(zip([x[1] for x in ui], T))
Esempio n. 24
0
def simulate_naive_policy(n_trials, s_g, modality, usr):
    planner = NaivePolicy()

    model_path = "/home/yuhang/Documents/proactive_guidance/training_data/user" + str(usr)
    sim = Simulator(planner, model_path)

    traj_list = []
    for i in range(n_trials):
        traj_list.append(sim.run_trial((-1.0, 2.0, 0.0), s_g, modality, 20.0, tol=0.5))

    fig, axes = plt.subplots()
    for i in range(n_trials):
        t, traj = traj_list[i]
        axes.plot(traj[:, 0], traj[:, 1])
    axes.axis("equal")

    axes.scatter(s_g[0], s_g[1])

    plt.show()
Esempio n. 25
0
def sSAR(individuals, p, S, n):
    """ Scalarized SAR implementation as presented in algorithm 3 of [Drugan&Nowe2014]

    :param individuals: list of design points
    :param p: number of individuals to select
    :param S: list of scalarized functions
    :param n: total number of samples
    :return: Set of selected candidates
    """
    accepted_arms = [set() for _ in range(len(S))]

    K = len(individuals)  # Number of rounds
    A_all = [[i for i in range(K)] for _ in range(len(S))
             ]  # Contains bandits for each scalarization function
    A = [i for i in range(K)]  # Total active arms
    P_i = [p for _ in range(len(S))]
    sims = [Simulator(i) for i in individuals]  # Simulators per individual

    N = [0 for _ in range(K)]  # Number of samples per individual
    ui = [[0 for _ in range(NR_OBJECTIVES)]
          for _ in range(K)]  # empirical reward vector

    n_k = 0
    LOG_K = 1 / 2 + sum([1 / i for i in range(2, K + 1)])

    for k in range(1, K):
        n_k_prev = n_k
        n_k = math.ceil((1 / LOG_K) * ((n - K) / (K + 1 - k)))
        samples = int(
            n_k - n_k_prev)  # Number of samples per individual in this phase

        for i in A:  # for all active bandits
            for _ in range(
                    samples):  # Sample each bandit and update empirical vector
                N[i] += 1
                reward_vector = sims[i].run()
                ui[i] = update_empirical_mean(normalize(reward_vector), ui[i],
                                              N[i])

        for i in range(len(S)):
            max_gap_idx, accepted = delta_pk_ij(ui, A_all[i], S[i],
                                                P_i[i] - len(accepted_arms[i]))
            A_all[i].remove(max_gap_idx)

            if accepted:  # Store the arms that are accepted by a function for this round
                accepted_arms[i].add(max_gap_idx)

        # Updates A to only include any arm that has not yet been removed by an F_j
        A = set().union(*A_all)

    # invert normalization of mean values
    ui = [normalize(i, invert=True) for i in ui]

    return set.union(*accepted_arms), ui, N
Esempio n. 26
0
def validate_free_space_policy(planner, s_g, modality, path, model_path):
    fig, axes = plt.subplots()
    planner.visualize_policy(axes)
    fig.savefig(path + "/value_func.png")

    sim = Simulator(planner, model_path)
    n_trials = 30

    traj_list = []
    for i in range(n_trials):
        traj_list.append(sim.run_trial((-1.0, 2.0, 0.0), s_g, modality, 30.0, tol=0.5))

    fig, axes = plt.subplots()
    for i in range(n_trials):
        t, traj = traj_list[i]
        axes.plot(traj[:, 0], traj[:, 1])
    axes.axis("equal")

    axes.scatter(s_g[0], s_g[1])
    fig.savefig(path + "/simulation.png")
Esempio n. 27
0
def simulate_naive_policy(n_trials, s_g, modality, usr):
    planner = NaivePolicy()

    model_path = "/home/yuhang/Documents/proactive_guidance/training_data/user" + str(
        usr)
    sim = Simulator(planner, model_path)

    traj_list = []
    for i in range(n_trials):
        traj_list.append(
            sim.run_trial((-1.0, 2.0, 0.0), s_g, modality, 20.0, tol=0.5))

    fig, axes = plt.subplots()
    for i in range(n_trials):
        t, traj = traj_list[i]
        axes.plot(traj[:, 0], traj[:, 1])
    axes.axis("equal")

    axes.scatter(s_g[0], s_g[1])

    plt.show()
Esempio n. 28
0
def validate_free_space_policy(planner, s_g, modality, path, model_path):
    fig, axes = plt.subplots()
    planner.visualize_policy(axes)
    fig.savefig(path + "/value_func.png")

    sim = Simulator(planner, model_path)
    n_trials = 30

    traj_list = []
    for i in range(n_trials):
        traj_list.append(
            sim.run_trial((-1.0, 2.0, 0.0), s_g, modality, 30.0, tol=0.5))

    fig, axes = plt.subplots()
    for i in range(n_trials):
        t, traj = traj_list[i]
        axes.plot(traj[:, 0], traj[:, 1])
    axes.axis("equal")

    axes.scatter(s_g[0], s_g[1])
    fig.savefig(path + "/simulation.png")
Esempio n. 29
0
def esSR(individuals, S, n):
    """ Efficient Scalarized Succesive Reject multi-armed bandit algorithm.
    Algorithm2 as presented in [Drugan & Nowe (2014)].

    Samples a list of individual design points in phases and will reject a design point
    per phase with multiple scalarization functions (S).
    At the end only one design point will remain, which is considered the best arm.

    :param individuals: list of design points
    :param S: list of scalarized functions
    :param n: total number of samples
    :return:
    """
    K = len(individuals)
    A = [i for i in range(K)]  # Contains all active bandits
    A_all = [[i for i in range(K)] for _ in range(len(S))
             ]  # Contains bandits for each scalarization function
    N = [0 for _ in range(K)]  # Number of samples per bandit
    ui = [[0 for _ in range(NR_OBJECTIVES)]
          for _ in range(K)]  # empirical reward vector
    sims = [Simulator(i) for i in individuals]

    n_k = 0
    LOG_K = 1 / 2 + sum([1 / i for i in range(2, K + 1)])

    for k in range(1, K):
        n_k_prev = n_k
        n_k = math.ceil(1 / LOG_K * (n - K) / (K + 1 - k))

        samples = int(n_k - n_k_prev)  # Number of samples per phase

        for i in A:  # for all active bandits
            for _ in range(
                    samples):  # Sample each bandit and update empirical vector
                N[i] += 1
                ui[i] = update_empirical_mean(sims[i].run(), ui[i], N[i])

        for i in range(
                len(S)):  # for each scalarization function dismiss a bandit
            A_j = A_all[i]
            scalarized_rewards = [(z, S[i](ui[z])) for z in A_j if z in A_j]

            idx = min(scalarized_rewards, key=lambda t: t[1])[0]
            A_j.remove(
                idx)  # deletes the worst individual of this scalarization func

        A = set().union(
            *A_all
        )  # Updates A to only include any arm that has not yet been removed by an F_j

    print("Accepted:", A)
    return list(zip(ui, N))
Esempio n. 30
0
def SAR(individuals, m, nr_samples=1000):
    """ Scalarized multi-objective MAB evaluation via Successive Accept Reject (SAR).

    :param individuals: individuals provided by the ga (must have fitness attributes)
    :param m: int - length of best-arm set
    :param nr_samples: int - amount of samples
    :return: [(idx, sampled_value)]
    """
    simulators = [Simulator(d) for d in individuals]
    D = len(individuals)
    A = [i for i in range(D)]
    N = [0 for _ in range(D)]
    ui = [(i, 0) for i in range(len(simulators))]  # empirical means
    S = set()

    LOG_D = 1 / 2 + sum([1 / i for i in range(2, D + 1)])
    m_o = m

    for k in range(1, D):
        samples = int(f_n_k(k, nr_samples, D) - f_n_k(k - 1, nr_samples, D))
        for i in A:
            for _ in range(samples):
                new_sample = l_scale(list(simulators[i].run()) +
                                     [individuals[i].evaluate_size()],
                                     weights=(1, -1, -1))
                N[i] += 1
                ui[i] = (i, ui[i][1] + (new_sample - ui[i][1]) / N[i])

        sorted_indices = [
            i for (i, val) in sorted(ui, key=lambda x: x[1], reverse=True)
            if i in A
        ]

        i_star_up = sorted_indices[m_o]
        i_star_down = sorted_indices[m_o + 1]
        gap_d = [0 for _ in range(D)]

        for i in sorted_indices[:m_o]:
            gap_d[i] = ui[i][1] - ui[i_star_down][1]

        for i in sorted_indices[m_o:]:
            gap_d[i] = ui[i_star_up][1] - ui[i][1]

        j = gap_d.index(max(gap_d))

        if j == sorted_indices[0]:
            S.add(j)
            m_o -= 1

        A = [i for i in A if i != j]

    return ui
Esempio n. 31
0
def pareto_ucb1(individuals, k, nr_samples=500):
    """ Implementation of the pareto Upper-Confidence-Bound1 (pareto UCB1) pseudocode [Drugan&Nowe(2013)]

    :param individuals: individuals provided by the ga (must have fitness attributes)
    :param k: The number of individuals to select.
    :return: [(mean of samples, nr_samples)] - will return the mean of the sampled values
                                               and the amount of samples taken for this dp.
    :return: [(idx, sampled_value)]
    """
    n = len(individuals)
    simulators = {individuals[i]: Simulator(individuals[i]) for i in range(n)}

    # Samples per individual
    N = {individuals[i]: 1 for i in range(n)}

    # Empirical mean vector per individual
    ui = {
        individuals[i]: list(normalize(simulators[individuals[i]].run()))
        for i in range(n)
    }

    # individual fitness values are empirical means
    for i in individuals:
        mttf, pow_usage, size = ui[i]
        i.fitness.values = (mttf, pow_usage, size)

    samples = len(individuals)

    while samples < nr_samples:
        A_star = sortNondominated(individuals, k, first_front_only=True)[0]
        # Adds confidence interval
        add_confidence_interval(individuals, list(N.values()), len(A_star))

        A_p = sortNondominated(individuals, k, first_front_only=True)[0]
        # Removes confidence interval
        add_confidence_interval(individuals,
                                list(N.values()),
                                len(A_star),
                                subtract=True)

        a = np.random.choice(A_p)

        N[a] += 1
        samples += 1

        ui[a] = update_empirical_mean(normalize(simulators[a].run()), ui[a],
                                      N[a])
        a.fitness.values = ui[a]

    return [normalize(ui[individuals[i]], invert=True)
            for i in range(n)], [N[individuals[i]] for i in range(n)]
Esempio n. 32
0
    def get_simulator_example(cap1=100,
                              cap2=100,
                              loc1=(0, 0),
                              loc2=(1, 1),
                              app1=50,
                              app2=50):
        c1 = Component(cap1, loc1)
        c2 = Component(cap2, loc2)

        a1 = Application(app1)
        a2 = Application(app2)

        dp = DesignPoint([c1, c2], [a1, a2], [(c1, a1), (c2, a2)])

        return Simulator(dp)
Esempio n. 33
0
def evaluate_timing(timing):
    traffic_light = PhaseModifier("node1")
    full_timings = [3] * 8
    full_timings[0] = timing[0]
    full_timings[4] = timing[1]
    controller = StaticTrafficLightController(controller=traffic_light,
                                              sequence=list(range(8)),
                                              timings=full_timings)
    sim = Simulator()
    sim.add_simulation_component(SimulationOutputParser)
    sim.add_tickable(controller)
    if not sim.run(sumocfg1, time_steps=2000, gui=False):
        return sim.results
    return False
Esempio n. 34
0
def mab_so_epsilon_greedy(designpoints, e, nr_samples=10000, idx=0, func=max):
    """ Single objective epsilon-greedy evaluation of the given list of design points.

    Will first explore all design points once and then only take epsilon-greedy actions.
    Since the output of the simulator will have multiple values (multi-objective), the idx
    variable can be used to specify which value should be used by the MAB (default=TTF).

    :param designpoints: [DesignPoint object] - List of DesignPoint objects (the candidates).
    :param e: epsilon value between [0.0 - 1.0] indicating the probability to explore a random dp.
    :param nr_samples: number of samples
    :param idx: index of simulator return value to use as objective
    :param func: function to select the best DesignPoint (should be max or min).
    :return: [(mean of samples, nr_samples)] - will return the mean of the sampled values
                                               and the amount of samples taken for this dp.
    """
    simulators = [Simulator(dp) for dp in designpoints]
    samples = [1 for _ in simulators]
    qt = [sim.run()[idx]
          for sim in simulators]  # Q_t = est value of action a at timestep t

    for _ in range(len(simulators), nr_samples):
        if random.random() < e:  # epsilon exploration
            a = random.randint(0, len(simulators) - 1)
        else:  # greedy exploitation
            a = random.choice(
                [i for i, val in enumerate(qt) if val == func(qt)])

        new_sample = simulators[a].run()[idx]
        samples[a] += 1
        # Incremental implementation of MAB [Sutton & Barto(2011)].
        qt[a] += (new_sample - qt[a]) / samples[a]

    print(
        "Best e-greedy candidates:",
        sorted(
            set([
                i for (i, val) in sorted(zip(np.arange(len(qt)), qt),
                                         key=lambda x: x[1],
                                         reverse=True)
            ][:TOP_CANDIDATES])))

    return list(zip(qt, samples))
Esempio n. 35
0
def simulator(request):
    from main import AutotestingApp
    application = AutotestingApp()
    simulator = Simulator(application)

    def fin():

        simulator.clean_queue()

        from kivy.lang import Builder

        files = list(Builder.files)
        for filename in files:
            Builder.unload_file(filename)
        kivy_style_filename = os.path.join(kivy_data_dir, 'style.kv')
        if not kivy_style_filename in Builder.files:
            Builder.load_file(kivy_style_filename, rulesonly=True)

    request.addfinalizer(fin)
    return simulator
Esempio n. 36
0
def mab_so_ucb(designpoints, c, nr_samples=10000, idx=0, func=max):
    """ Single objective Upper-Confidence-Bound (UCB) action selection

    Will first explore all design points once and then only take Upper-Confidence-Bound actions.
    Since the output of the simulator will have multiple values (multi-objective), the idx
    variable can be used to specify which value should be used by the MAB (default=TTF).

    :param designpoints: [DesignPoint object] - List of DesignPoint objects (the candidates).
    :param c: degree of exploration
    :param nr_samples: number of samples
    :param idx: index of simulator return value to use as objective
    :param func: function to select the best DesignPoint (should be max or min).
    :return: [(mean of samples, nr_samples)] - will return the mean of the sampled values
                                               and the amount of samples taken for this dp.
    """
    simulators = [Simulator(dp) for dp in designpoints]
    samples = [1 for _ in simulators]
    qt = [sim.run()[idx]
          for sim in simulators]  # Q_t = est value of action a at timestep t

    for t in range(len(simulators), nr_samples):
        actions = [
            qt[i] + c * math.sqrt(math.log(t) / samples[i])
            for i in range(len(simulators))
        ]
        a = random.choice(
            [i for i, val in enumerate(actions) if val == func(actions)])
        samples[a] += 1
        new_sample = simulators[a].run()[idx]
        qt[a] += (new_sample - qt[a]) / samples[a]

    print(
        "Best UCB candidates:",
        sorted(
            set([
                i for (i, val) in sorted(zip(np.arange(len(qt)), qt),
                                         key=lambda x: x[1],
                                         reverse=True)
            ][:TOP_CANDIDATES])))

    return list(zip(qt, samples))
Esempio n. 37
0
def compareChurnStable():
    s =  Simulator()
    random.seed(125)
    s.setupSimulation(strategy= "churn",  workMeasurement= "one", numNodes= 1000, numTasks = 100000, churnRate =0)
    loads1, medians1, means1, maxs1, devs1 = s.simulateLoad()
    random.seed(125)
    s=Simulator()
    s.setupSimulation(strategy= "churn",  workMeasurement= "one", numNodes= 1000, numTasks = 100000, churnRate =0.1)
    loads2 = s.simulateLoad()[0]
    for i in range(0,len(loads1), 5):
        x1= loads1[i]
        x2 =loads2[i]
        colors = ["k", "w"]
        labels = ["No Strategy","Churn"]
        plt.hist([x1,x2], 25, normed =1, color=colors, label=labels)
        
        plt.legend(loc=0)
        plt.title('Churn vs No Strategy at Tick ' + str(i))
        
        plt.xlabel('Tasks Per Node')
        plt.ylabel('Fraction of the Network')
        #plt.ylim(0, 0.05)
        plt.show()
Esempio n. 38
0
from network import Network
from process import Process
from simulation import Simulator
from numpy import inf
import oracle
import random
import action
import time

fault_rates = [100]
start = time.clock()
sim = Simulator()
sim.run_fault_experiment(fault_rates, 1, 100)
end = time.clock()


print "Experiment finished! (", end - start, "s)"
sim.record_results("Linear-results-faults_1-10.txt", fault_rates)
Esempio n. 39
0
def testChurnSteps():
    s = Simulator()
    s.setupSimulation(strategy="churn", workMeasurement="one", numNodes=1000, numTask=100000, churnRate=0.001)
    loads, medians, means, maxs, devs = s.simulateLoad()
    print(medians)
    print(devs)