def main():
    if 'demo' in sys.argv:
        initial_config = {
            'width': 250,
            'height': 250,
            'n_particles': 10,
            'timescale': 250,
            'nRed': 300,
            'nGreen': 300,
            'nBlue': 400,
            'nYellow': 0,
            'nMagenta': 0,
            'nCyan': 0,
            'verbose': True
        }

        rgb = RGB()
        sim = simengine.Engine(initial_config, rgb)
        collisions = sim.run(RGB=rgb, animate=True, save=True)
        casm = StandardModel()
        exit(0)
    elif 'npVsz' in sys.argv:
        rgb = RGB
        const_time = 100
        dims = [50, 100, 150, 250, 350, 450, 550]
        np = [10, 20, 30, 40, 50, 100, 500]
        ncs = [['nRed', 'nBlue'], ['nRed', 'nGreen'],
               ['nRed', 'nBlue', 'nGreen'], ['nCyan', 'nMagenta', 'nYellow'],
               ['nRed', 'nBlue', 'nGreen', 'nCyan'],
               ['nRed', 'nBlue', 'nGreen', 'nCyan', 'nMagenta'],
               ['nRed', 'nBlue', 'nGreen', 'nCyan', 'nMagenta', 'nYellow']]
        opts = ['nRed', 'nBlue', 'nGreen', 'nCyan', 'nMagenta', 'nYellow']
        collision_data = []
        runtime_data = []
        for shape in dims:
            width = shape
            height = shape
            for n_particles in np:
                for particle_combos in ncs:
                    config = {
                        'width': width,
                        'height': height,
                        'n_particles': n_particles,
                        'timescale': const_time
                    }
                    # TODO: Automate nColor selections
                    config['verbose'] = False
                    tic = time.time()
                    collisions = simengine.Engine(config,
                                                  rgb).run(RGB=rgb,
                                                           animate=False,
                                                           save=False)
                    toc = time.time()
                    runtime_data.append(float(toc - tic))
                    collision_data.append(float(collisions))
        plt.plot(np.array(collision_data))
        plt.plot(np.array(runtime_data))
        plt.show()
def main():
    np.random.seed(0)
    engine = simengine.Engine()
    engine.connect()
    if engine.connect():
        engine.start()

        # Initialization
        agents = [IntentAgent(i, handler) for i, handler in enumerate(engine.get_agents())]
        agent_num = len(agents)
        IntentAgent.set_static_goals(engine.get_goals())

        iteration = 0
        total_steps = 0

        goals, state, reward = engine.step([-1 for _ in range(agent_num)])
        while not state:
            goals, state, reward = engine.step([-1 for _ in range(agent_num)])
        IntentAgent.compute_intent_probability_matrix(goals, state)
        actions = [agents[i].act(goals, state, reward[i], iteration) for i in range(agent_num)]

        while True:
            performance = engine.get_performance()
            if state:
                IntentAgent.compute_intent_probability_matrix(goals, state)
                actions = [agents[i].act(goals, state, reward[i], iteration) for i in range(agent_num)]

                total_steps += 1

                if iteration % 500 == 0 or not all(r == 0 for r in reward):
                    print 'iteration:', iteration, 'steps:', total_steps, 'reward:', reward, 'performance', performance, 'state:', state
                    print 'actions', actions

            iteration += 1

            goals, state, reward = engine.step(actions)
            actions = None

    engine.disconnect()
    def multivariable_experiment():
        experiment_data = []
        # First Test Three Particle Collisions [R, G, B], even mixes
        for shape in Ws:
            state = np.zeros((shape, shape))
            for ts in Ts:
                for n_particles in SZs:
                    config = {'width': state.shape[0],
                              'height': state.shape[1],
                              'n_particles': n_particles,
                              'timescale': ts,
                              'nRed': n_particles / 3,
                              'nGreen': n_particles / 3,
                              'nBlue': n_particles / 3,
                              'nYellow': 0,
                              'nCyan': 0,
                              'nMagenta': 0,
                              'verbose': True}
                    t0 = time.time()
                    sim = simengine.Engine(config, rgb)
                    collisions = sim.run(rgb, False, False)
                    cc = CloudChamber(config, collisions, time.time() - t0)
                    experiment_data.append(cc)
        '''
        Compare N Collisions to state size

        Compare N Collisions to N Particles / state size  
        '''
        collision_detections = {}
        for k in Ws:
            collision_detections[k] = []
        for test in experiment_data:
            size = test.configuration['width']
            dt = test.computation_cost
            collision_detections[size].append(test.n_collisions)

        plt.plot(collision_detections.keys(), collision_detections.values())
        plt.show()
rgb = ColorAutomataSimulations.RGB()
Ws = [50, 100, 150, 200, 250]
Ts = [10, 50, 100, 150, 250]
SZs = [20, 50, 100, 200, 500]

nhits = []
timer = []
for size in Ws:
    for n_particles in SZs:
        config = {'width': size,
                  'height': size,
                  'n_particles': 100,
                  'timescale': 100,
                  'nRed': n_particles / 3,
                  'nGreen': n_particles / 3,
                  'nBlue': n_particles / 3,
                  'nYellow': 0,
                  'nCyan': 0,
                  'nMagenta': 0,
                  'verbose': True}
        sim = simengine.Engine(config, rgb)
        tic = time.time()
        collisions = sim.run(rgb, False, False)
        toc = time.time()
        timer.append(toc - tic)
        nhits.append(len(collisions))

plt.plot(nhits)
plt.plot(timer)
plt.show()
Beispiel #5
0
def main():
    # # Testing RNN agent
    # tf.set_random_seed(0)
    # np.random.seed(0)
    # static_goals = [{i: i for i in range(4)}, {i+4: i+4 for i in range(4)}, {i+8: i+8 for i in range(1)}]
    # RNNAgent.set_static_goals(static_goals)
    # agent = RNNAgent(0, 20)
    # RNNAgent.init_lstm()
    #
    # reward = 0
    # for _ in range(10000):
    #     print 'iteration', _
    #     # a = agent.act(static_goals, [[20, 100], [10, 10, 10, 10]], reward)
    #     a = agent.act(static_goals, [[20, 100], [0, 0, 0, 0]], reward)
    #     reward = 1 if a == 3 else -1

    # Simulation for learning
    engine = simengine.Engine()
    engine.connect()
    if engine.connect():
        engine.start()

        # Initialization
        RNNAgent.set_static_goals(engine.get_goals())
        agents = [
            RNNAgent(i, handler)
            for i, handler in enumerate(engine.get_agents())
        ]
        agent_num = len(agents)
        RNNAgent.init_lstm()

        iteration = 0
        i_episode = 0
        episode_done = False

        goals, state, reward = engine.step([-1 for _ in range(agent_num)])
        while not state:
            goals, state, reward = engine.step([-1 for _ in range(agent_num)])
        actions = [
            agents[i].act(goals, state, reward[i]) for i in range(agent_num)
        ]

        while True:
            performance = engine.get_performance()
            if state and performance:
                if performance[1] == 10 * (i_episode + 1):
                    i_episode += 1
                    episode_done = True
                else:
                    episode_done = False

                actions = [
                    agents[i].act(goals, state, reward[i], episode_done)
                    for i in range(agent_num)
                ]

                if iteration % 500 == 0 or not all(r == 0 for r in reward):
                    print 'iteration:', iteration, 'reward:', reward, 'performance', performance, 'state:', state
                    print 'actions', actions

            if iteration % 5000 == 0:
                RNNAgent.save_model(iteration, performance)
            iteration += 1

            goals, state, reward = engine.step(actions)
            actions = None

    engine.disconnect()
Beispiel #6
0
def main():
    np.random.seed(0)
    engine = simengine.Engine()
    engine.connect()
    if engine.connect():
        engine.start()

        # Initialization
        agents = [
            IntentAgent(i, handler)
            for i, handler in enumerate(engine.get_agents())
        ]
        agent_num = len(agents)
        IntentAgent.set_static_goals(engine.get_goals())

        iteration = 0
        total_steps = 0

        goals, state, reward = engine.step([-1 for _ in range(agent_num)])
        while not state:
            goals, state, reward = engine.step([-1 for _ in range(agent_num)])
        IntentAgent.compute_intent_probability_matrix(goals, state)
        actions = [
            agents[i].act(goals, state, reward[i]) for i in range(agent_num)
        ]

        # # Static strategy testing
        # actions = [-1 for _ in range(agent_num)]
        # actions[0] = agents[0].act(goals, state, reward[0])

        goal_num = len(
            [k for i in range(len(goals)) for k, v in goals[i].items()])
        last_goal_num = goal_num

        while True:
            performance = engine.get_performance()
            if state:
                IntentAgent.compute_intent_probability_matrix(goals, state)
                # print IntentAgent.goal_list, IntentAgent.goal_types
                # print IntentAgent.intent_probability_matrix
                actions = [
                    agents[i].act(goals, state, reward[i])
                    for i in range(agent_num)
                ]

                # # Static strategy testing
                # actions = [-1 for _ in range(agent_num)]
                # actions[0] = agents[0].act(goals, state, reward[0])

                total_steps += 1

                if iteration % 500 == 0 or not all(r == 0 for r in reward):
                    print 'iteration:', iteration, 'steps:', total_steps, 'reward:', reward, 'performance', performance, 'state:', state
                    print 'actions', actions

                if total_steps % 1000 == 0:
                    IntentAgent.decrease_learning_rate()

            if not all(r == 0 for r in reward) or iteration % 5000 == 0:
                IntentAgent.save_model(iteration, performance)
            iteration += 1

            last_goal_num = goal_num
            goals, state, reward = engine.step(actions)
            goal_num = len(
                [k for i in range(len(goals)) for k, v in goals[i].items()])
            actions = None

    engine.disconnect()