Ejemplo n.º 1
0
    def _start_simulation(self):
        """Start simulation in pygame."""
        self.number_of_atoms = int(self.AtomNumInput.get())
        self.radius = int(self.AtomRadInput.get())
        self.velocity = int(self.AtomVelocityInput.get())
        self.time_coefficient = int(self.TimeInput.get())

        if self.number_of_atoms <= 100:
            self._scaling()
            # Create frame for pygame
            self.simulation_window = tk.Frame(self.master,
                                              height=CONTAINER_SIZE[0],
                                              width=CONTAINER_SIZE[1])
            self.simulation_window.grid(row=5, columnspan=2, padx=10, pady=10)

            # Embed pygame into frame
            # FIXME: Broken on Linux
            if os.name == "nt":
                os.environ["SDL_WINDOWID"] = str(
                    self.simulation_window.winfo_id())
            # Start simulation
            self.simulation = Simulation(
                self.radius,
                self.velocity,
                self.number_of_atoms,
                self.time_coefficient,
                self.simulation_window,
            )
            self.simulation._start()
        else:
            messagebox.showerror(
                "Błąd", "Liczba atomów nie może być większa niż 100.")
Ejemplo n.º 2
0
def pretrain():
    sim = Simulation(nodes)
    state = sim.get_state()
    for i in range(pretrain_length):
        a = np.random.randint(0, len(actions)) # Random action
        new_state, reward, done = sim.step(actions[a])

        if done:
            # We finished the episode
            new_state = np.zeros(state.shape)
            memory.add((state, a, reward, new_state, done)) # Add experience to memory
            sim = Simulation(nodes) # Start a new episode
            state = sim.get_state() # First we need a state

        else:
            memory.add((state, a, reward, new_state, done)) # Add experience to memory
            state = new_state # Our state is now the next_state
Ejemplo n.º 3
0
    def simulator(self, event):
        """ control start/stop simulation mode """
        target_id = event.target.text

        if target_id == 'reset':
            if self.simulation:
                self.simulation.reset()
            self.callback = self.on_select
            self.move_enabled = True
            self.ctx.clear(txt='>>> ')
        else:
            self.move_enabled = False
            oid = self.ctx.time()
            self.simulation = Simulation(oid, self)
            self.ctx.create(self.schema, oid)
            # FIXME add subscribe after websocket functions
            #self.ctx.subscribe(str(self.schema), str(oid))
            self.ctx.log(self.schema, oid, 'NEW')
            self.callback = self.on_trigger
Ejemplo n.º 4
0
            config['simulation']['save_data'] = False
        else:
            config['simulation']['save_data'] = True
    if (args.repeat):
        config['simulation']['repeat'] = args.repeat
    if (args.save_figs != None):
        if (args.save_figs == 0):
            config['analysis']['save_to_file'] = False
        else:
            config['analysis']['save_to_file'] = True
    if (args.logging):
        config['logging_level'] = args.logging
    if (args.market_type):
        config['market_type'] = args.market_type

    print "Going to run with the following configuration:"
    pprint.pprint(config)

    if not args.no_confirm:
        answer = raw_input('Do you want to continue? [Y/n]\r\n')

        if (answer == 'n'):
            exit(0)
    else:
        print "No confirm detected, continuing."

    logging.basicConfig(level=config["logging_level"])

    sim = Simulation(config, args.aggregate_id, args.name)
    sim.run()
Ejemplo n.º 5
0
    'delta_aileron': 0,
    'delta_rudder': 0,
    'delta_t': 0.5
}

trimmed_state, trimmed_controls = steady_state_trim(aircraft, environment, pos,
                                                    psi, TAS, controls0)

system = EulerFlatEarth(time0=0, tot_state=trimmed_state)

de0 = trimmed_controls['delta_elevator']

controls = controls = {
    'delta_elevator': Doublet(t_init=2, T=1, A=0.1, offset=de0),
    'delta_aileron': Constant(trimmed_controls['delta_aileron']),
    'delta_rudder': Constant(trimmed_controls['delta_rudder']),
    'delta_t': Constant(trimmed_controls['delta_t'])
}

sim = Simulation(aircraft, system, environment, controls, dt=0.3)
results_03 = sim.propagate(25)

#sim = Simulation(aircraft, system, environment, controls, dt=0.05)
#results_005 = sim.propagate(25)

kwargs = {'subplots': True, 'sharex': True, 'figsize': (12, 100)}

#ax = results_005.plot(marker='.', color='r', **kwargs)
#ax = resu  lts_03.plot(ax=ax, marker='x', color='k', ls='', **kwargs)
results_03.to_excel("output.xlsx")
#plt.show()
Ejemplo n.º 6
0
    results = np.zeros((5, 5))

    irs_thrs = [2, 4, 6, 8, 10]
    irs_max = [12, 14, 16, 18, 20]

    if (False):
        print "Going to sweep over Maximum and threshold IRS values"

        for (i, irs_ths) in enumerate(irs_thrs):
            for (j, irs_mx) in enumerate(irs_max):
                config['model']['irs_threshold'] = irs_ths
                config['model']['max_irs_value'] = irs_mx

                aggregate_id = str(uuid.uuid4())
                sim = Simulation(config, aggregate_id)
                sim.run()
                a = Aggregate(config['file_root'], aggregate_id, True)

                results[i, j] = a.default_distribution()

        print results
        path = orig_file_root + "%s.png"
        cbmax = int(np.max(results) + 0.5)
        #cbmax = int(5.2+0.5)
        heat_map(results, "Exponent heatmap", 'IrsThreshold', 'MaxIrsValue',
                 irs_thrs, irs_max, range(0, cbmax), path, "IrsThs_IrsMax")

        min_alpha = np.min(results[np.where(results > 0)])

        (xr, yr) = np.where(results == min_alpha)
Ejemplo n.º 7
0
    alpha = float(input("Alpha: "))
    discount = float(input("Discount: "))
    series = int(input("Series: "))
    #layers = [int(x) for x in input("Layers: ").split()]
    print()
    print("Configuration:")
    print("\tEpsilon:  %f" % eps)
    print("\tAlpha:    %f" % alpha)
    print("\tDiscount: %f" % discount)
    print("\tSeries:   %d" % series)
    #print("\tLayers:   %s" % str(layers))

    sarsa_agent = get_fourier_sarsa_agent(eps, alpha, discount, series)
    random_agent = RandomAgent()

    sarsa_sim = Simulation(sarsa_agent, discount)
    random_sim = Simulation(random_agent, discount)

    sarsa_vals = []
    random_vals = []

    print("====================================")
    print("          AGENT COMPARISON          ")
    print("====================================")

    print()
    print("SARSA agent with Fourier series vs Random Action agent")
    print()
    print("Simulating...")
    for i in range(1, ep_count+1):
        print("Episode %d" % i)
Ejemplo n.º 8
0
#!/usr/bin/env python
#-*- coding:utf-8 -*-

from simulator import Simulation
import matplotlib.pyplot as plt
import numpy as np


if __name__ == "__main__":
    # both stand
    sim = Simulation(width=20, height_floor=20, length_escalator=10,
                     x_exit_stand = [9,10], x_exit_walk = [],
                     speed_stander=[1,1], speed_walker=[],
                     mu=0.0, beta=10)
    N_iter = 10
    duration_both_stand = np.zeros((N_iter,))
    for n in range(N_iter):
        sim.initialize(n_stander=100, n_walker=0)
        duration_both_stand[n] = sim.run_all_pass(1000)
    baseline = np.median(duration_both_stand)
    
    # stand and walk
    sim = Simulation(width=20, height_floor=20, length_escalator=10,
                     x_exit_stand = [9], x_exit_walk = [10],
                     speed_stander=[1], speed_walker=[2],
                     mu=0.0, beta=10)

    ns_s = np.linspace(1, 100, 20).astype(np.int)
    
    duration = np.zeros((len(ns_s), N_iter))
    
Ejemplo n.º 9
0
model.add(InputLayer(batch_input_shape=(1, len(nodes))))
model.add(Dense(5, activation='sigmoid'))
model.add(Dense(len(actions), activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# Q-learning

num_episodes = 1000
y = 0.95
eps = 0.5 # Exploration rate
r_avg_list = []
for i in range(num_episodes):
    if i % 100 == 0:
        print("Episode {} of {}".format(i + 1, num_episodes))
    done = False
    r_sum = 0
    sim = Simulation(nodes)
    state = sim.get_state() # Initial state

    iteration = 0

    while not done:

        eps = 1/math.sqrt(iteration + 1)                # Gradually decrease exploration rate

        if np.random.random() < eps:
            a = np.random.randint(0, len(actions))      # Explore by picking a random action
        else:
            a = np.argmax(model.predict(state))             # Use network to predict which action to take

        action = actions[a]
        #print(action)