コード例 #1
0
def run_simulation(DRAW, SCREEN_HEIGHT, SCREEN_WIDTH, MODE, EXTRACT_DATA,
                   SIM_SPEED, save_first_winner, level_to_save,
                   generation_limit, end_on_lap, levels, attempts):

    print('MODEL = *** RANDOM SEARCH ***')
    print()
    agents = AgentManager(mode=MODE,
                          yPos_range=None,
                          starting_xPos=100,
                          starting_yPos=500,
                          xPos_range=None,
                          population_size=300,
                          screen_height=SCREEN_HEIGHT,
                          vertical_fuel_depletion_rate=0.0005,
                          horizontal_fuel_depletion_rate=0.0005,
                          screen_width=SCREEN_WIDTH,
                          rand_init_population_size=300,
                          rand_init_mutation_rate=0.0,
                          med_sim_init_population_size=300,
                          med_sim_init_rand_agent_percenatge=0.0,
                          med_sim_init_med_memory_agent_percenatge=1.0,
                          med_sim_init_rand_mutation_rate=0.0,
                          med_sim_init_med_mutation_rate=0.2,
                          load_agent_filepath='sim_data/sim_test_main_agent',
                          save_path='sim_test_main_agent')

    screen, clock, current_level, current_level_no = init(
        SCREEN_WIDTH, SCREEN_HEIGHT, levels)

    internal_generation_count = 0
    level_generation_count = 0
    generation_record = np.zeros(len(levels)) - 1
    generation_record_count = 0
    attempts = attempts
    starting_attempts = attempts
    lap_no = 1
    change_level_flag = False
    winners = []

    done = False
    frame_count = 0
    while not done:

        # --- Event Processing ---

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                done = True

            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_DOWN:
                    SIM_SPEED -= 10

                elif event.key == pygame.K_UP:
                    SIM_SPEED += 10

                elif event.key == pygame.K_s and MODE == 'train':
                    agents.save_best_agent()

                elif event.key == pygame.K_d:
                    if DRAW:
                        DRAW = False
                    else:
                        DRAW = True

                elif event.key == pygame.K_SPACE:
                    pause = True
                    while pause == True:
                        for event in pygame.event.get():
                            if event.type == pygame.KEYDOWN:
                                if event.key == pygame.K_SPACE:
                                    pause = False

        for i in range(SIM_SPEED):

            # --- Update Agents ---
            i = 0
            while i < len(agents.not_sprites) and i > -1:
                agent = agents.not_sprites[i]

                agent.think(current_level, SCREEN_WIDTH, SCREEN_HEIGHT)
                agent.update(SCREEN_HEIGHT)

                if agent.current_closest_block.hit(agent) or agent.off_screen(
                        SCREEN_HEIGHT, SCREEN_WIDTH) or agent.fuel_depleted():

                    # agent.computeFitness()
                    agents.splice(i)
                    i -= 1

                if len(winners) == 0:
                    if agent.rect.right > current_level[
                            len(current_level) - 1].top_block.rect.right - 10:
                        if save_first_winner == True and level_to_save == current_level_no and MODE == 'train':
                            agents.save_best_agent()
                            save_first_winner = False
                        change_level_flag = True
                        agent.fuel = 1.0
                        agent.rect.x = agents.starting_xPos
                        agent.rect.y = agents.starting_yPos
                        agent.velocity_x = 0
                        agent.velocity_y = 0
                        winners.append(agent)
                        agents.splice(i, mode='delete')
                        i -= 1

                else:
                    if agent.rect.right > current_level[len(
                            current_level
                    ) - 1].top_block.rect.left and agent.rect.right < current_level[
                            len(current_level) - 1].top_block.rect.right:

                        change_level_flag = True
                        agent.fuel = 1.0
                        agent.rect.x = agents.starting_xPos
                        agent.rect.y = agents.starting_yPos
                        agent.velocity_x = 0
                        agent.velocity_y = 0
                        winners.append(agent)
                        agents.splice(i, mode='delete')
                        i -= 1

                i += 1

            if change_level_flag:

                print('LEVEL ' + str(current_level_no) +
                      ' COMPLETE: RANDOM VANILLA')
                attempts = starting_attempts

                agents.splice(index=None, mode='all')
                agents.not_sprites = winners
                change_level_flag = False
                winners = []

                last_level = False
                if current_level_no == len(levels) - 1:
                    last_level = True

                if current_level_no < len(levels) - 1:
                    current_level_no += 1
                else:
                    current_level_no = 0

                current_level = levels[current_level_no]

                level_generation_count += internal_generation_count
                generation_record[
                    generation_record_count] = level_generation_count
                generation_record_count += 1
                level_generation_count = 0
                internal_generation_count = 0

                if last_level:
                    if current_level_no == 0 and end_on_lap == lap_no:
                        done = True
                        break
                    else:
                        lap_no += 1

            # check if all active agents are dead, the perform GA and reset game level and epochs
            if len(agents.not_sprites) == 0:

                skip_ga = False
                if internal_generation_count >= generation_limit:

                    agents.dead_agents = []
                    agents = AgentManager(
                        mode=MODE,
                        yPos_range=None,
                        starting_xPos=100,
                        starting_yPos=500,
                        xPos_range=None,
                        population_size=300,
                        screen_height=SCREEN_HEIGHT,
                        vertical_fuel_depletion_rate=0.0005,
                        horizontal_fuel_depletion_rate=0.0005,
                        screen_width=SCREEN_WIDTH,
                        rand_init_population_size=300,
                        rand_init_mutation_rate=0.0,
                        med_sim_init_population_size=300,
                        med_sim_init_rand_agent_percenatge=1.0,
                        med_sim_init_med_memory_agent_percenatge=0.0,
                        med_sim_init_rand_mutation_rate=0.0,
                        med_sim_init_med_mutation_rate=0.2,
                        load_agent_filepath='sim_data/sim_test_main_agent',
                        save_path='sim_test_main_agent'
                    )  # leave one model the current one unchanges
                    skip_ga = True

                    level_generation_count += internal_generation_count
                    internal_generation_count = 0

                    if EXTRACT_DATA == 'gen_test' and attempts == 0:
                        generation_record[
                            generation_record_count] = level_generation_count
                        done = True
                        print('~FAIL')
                        break
                    else:
                        attempts -= 1

                    print(
                        'MODEL LOST IN BAD THOUGHT POOL: ATTEMPTS = %s out %s RE-INIT GA PROCESSING'
                        % (attempts, starting_attempts))

                if skip_ga == False:

                    agents.dead_agents = []
                    agents = AgentManager(
                        mode=MODE,
                        yPos_range=None,
                        starting_xPos=100,
                        starting_yPos=500,
                        xPos_range=None,
                        population_size=300,
                        screen_height=SCREEN_HEIGHT,
                        vertical_fuel_depletion_rate=0.0005,
                        horizontal_fuel_depletion_rate=0.0005,
                        screen_width=SCREEN_WIDTH,
                        rand_init_population_size=300,
                        rand_init_mutation_rate=0.0,
                        med_sim_init_population_size=300,
                        med_sim_init_rand_agent_percenatge=1.0,
                        med_sim_init_med_memory_agent_percenatge=0.0,
                        med_sim_init_rand_mutation_rate=0.0,
                        med_sim_init_med_mutation_rate=0.2,
                        load_agent_filepath='sim_data/sim_test_main_agent',
                        save_path='sim_test_main_agent')

                    internal_generation_count += 1
                    print(
                        'generation = %s level_generation = %s population size = %s level no = %s / %s'
                        % (internal_generation_count, level_generation_count,
                           len(agents.not_sprites), current_level_no,
                           len(levels)))

                    # if MODE != 'adapt':
                    #     current_level_no = 0
                    #     current_level = levels[current_level_no]

            # --- Drawing ---
            screen.fill(WHITE)

            if DRAW:
                for block in current_level:
                    block.draw(screen)

                agents.draw(screen, mode=MODE)

            pygame.display.flip()

            clock.tick()
            frame_count += 1
            if change_level_flag:
                change_level_flag = False

    pygame.quit()
    print('SIMULATION LEVEL COMPLETED BY AGENT')
    return generation_record
コード例 #2
0
def main():
    """ Main Program """
    MODE = 'test'
    SIM_SPEED = 1
    save_first_winner = False
    level_to_save = 0

    level1 = build_blueprint([400, 450, 580, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level2 = build_blueprint([400, 400, 520, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level3 = build_blueprint([400, 350, 480, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level4 = build_blueprint([400, 250, 380, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level5 = build_blueprint([400, 150, 280, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level6 = build_blueprint([400, 50, 180, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)

    levels = Block.generate(SCREEN_WIDTH, SCREEN_HEIGHT, 100, level1, level3,
                            level1, level4, level6)
    #levels = Block.generate(SCREEN_WIDTH, SCREEN_HEIGHT, 100, level1)

    agents = AgentManager(mode=MODE,
                          yPos_range=None,
                          starting_xPos=100,
                          starting_yPos=500,
                          xPos_range=None,
                          population_size=300,
                          screen_height=SCREEN_HEIGHT,
                          vertical_fuel_depletion_rate=0.0005,
                          horizontal_fuel_depletion_rate=0.0005,
                          screen_width=SCREEN_WIDTH,
                          load_agent_filepath='sim_data/default_best')

    visual_system = VisualSystem.init(img_shape=(40, 40, 1),
                                      latent_dims=3,
                                      RE_delta=0.0,
                                      model_folder='CNND',
                                      start=6,
                                      MODE=MODE,
                                      preview_output=False)

    memory_system = MemorySystem.init(MODE=MODE,
                                      high_simularity_threshold=1.0e-08,
                                      low_simularity_threshold=0.2,
                                      forget_usage_threshold=0,
                                      forget_age_threshold=50,
                                      max_memory_size=50)

    screen, clock, current_level, current_level_no = init(
        SCREEN_WIDTH, SCREEN_HEIGHT, levels)

    generation_count = 0
    change_level_flag = False
    adaption_complete_flag = False
    winners = []
    latent_representation = []
    is_familiar = None

    done = False
    frame_count = 0
    while not done:

        # --- Event Processing ---

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                done = True

            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_DOWN:
                    SIM_SPEED -= 10

                elif event.key == pygame.K_UP:
                    SIM_SPEED += 10

                elif event.key == pygame.K_s and MODE == 'train':
                    agents.save_best_agent()

        for i in range(SIM_SPEED):

            if MODE == 'train' or 'test':
                # --- Update Agents ---
                i = 0
                while i < len(agents.not_sprites) and i > -1:
                    agent = agents.not_sprites[i]

                    if MODE == 'test' and visual_system is not None:
                        # ignore blackout screen at beginning
                        visual_system.process_image(screen,
                                                    agents,
                                                    frame_count,
                                                    preview_images=False)
                        is_familiar = visual_system.is_familiar()

                        if is_familiar == False and memory_system is not None:
                            if visual_system.supress == False:
                                # get latent representation from vs
                                latent_representation, _ = visual_system.generate_latent_representation(
                                )
                                memory, action = memory_system.query(
                                    latent_representation)

                                if memory is not None and action == 'memory_to_fs_system_switch':
                                    agents.temp_agent_store.append(agent)
                                    agents.reset_temp_agent_store(
                                        memory.get('solution'))
                                    visual_system.supress = True
                                    print(action)
                                elif memory is None and action == 'adaption_using_medium_memory_as_init_foundation':
                                    pass

                                elif memory is None and action == 'adaption_using_low_memory_and_random_init_foundation':
                                    MODE = 'adapt'
                                    agents.adaptive_rand_population_init(
                                        mode=MODE,
                                        rand_init_population_size=400,
                                        rand_to_current_model_split=(1.0, 0.0),
                                        rand_init_mutation_rate=0.0,
                                        current_model_mutation_rate=0.99
                                    )  # leave one model the current one unchanges
                                    print('WHERRRRR')
                        else:

                            agents.restore_original_test_agent()

                    #   print('visual system is frame familiar: %s' % is_familiar)

                    agent.think(current_level, SCREEN_WIDTH, SCREEN_HEIGHT)
                    agent.update(SCREEN_HEIGHT)

                    if agent.current_closest_block.hit(
                            agent) or agent.off_screen(
                                SCREEN_HEIGHT,
                                SCREEN_WIDTH) or agent.fuel_depleted():

                        agent.computeFitness()
                        agents.splice(i)
                        i -= 1

                    if len(winners) == 0:
                        if agent.rect.right > current_level[len(
                                current_level) - 1].top_block.rect.right - 10:
                            if save_first_winner == True and level_to_save == current_level_no and MODE == 'train':
                                agents.save_best_agent()
                                save_first_winner = False
                            change_level_flag = True
                            agent.fuel = 1.0
                            agent.rect.x = agents.starting_xPos
                            agent.rect.y = agents.starting_yPos
                            agent.velocity_x = 0
                            agent.velocity_y = 0
                            agent.name = 'winner' + str(i)
                            winners.append(agent)
                            agents.splice(i, mode='delete')
                            i -= 1

                    else:
                        if agent.rect.right > current_level[len(
                                current_level
                        ) - 1].top_block.rect.left and agent.rect.right < current_level[
                                len(current_level) - 1].top_block.rect.right:

                            change_level_flag = True
                            agent.fuel = 1.0
                            agent.rect.x = agents.starting_xPos
                            agent.rect.y = agents.starting_yPos
                            agent.velocity_x = 0
                            agent.velocity_y = 0
                            agent.name = 'winner' + str(i)
                            winners.append(agent)
                            agents.splice(i, mode='delete')
                            i -= 1

                    i += 1

                if change_level_flag:

                    if MODE != 'adapt':
                        agents.splice(index=None, mode='all')
                        agents.not_sprites = winners
                        change_level_flag = False
                        winners = []

                        if current_level_no < len(levels) - 1:
                            current_level_no += 1
                        else:
                            current_level_no = 0

                        current_level = levels[current_level_no]

                        if visual_system is not None and visual_system.supress == True:
                            visual_system.supress = False
                        break

                    else:
                        solution = get_best_winner(winners)
                        memory_system.create_memory(
                            latent_representation=latent_representation,
                            solution=solution.functional_system,
                            tag='mem_level' + str(current_level_no))
                        agents.reset_temp_agent_store(
                            solution.functional_system)
                        visual_system.supress = True
                        winners = []
                        agents.dead_agents = []
                        MODE = 'test'
                        print('ADAPTION COMPLETE: USING NEW SOLUTION')

                # check if all active agents are dead, the perform GA and reset game level and epochs
                if len(agents.not_sprites) == 0:

                    if MODE == 'test':
                        dead_agent = agents.dead_agents[0]
                        dead_agent.reset()
                        new_population = [dead_agent]
                        agents.update_arrays(input=new_population)

                    else:
                        if MODE == 'adapt':
                            a = 0
                        new_population = GenticAlgorithm.produceNextGeneration(
                            population=agents.dead_agents,
                            screen_width=SCREEN_WIDTH,
                            screen_height=SCREEN_HEIGHT,
                            agent_meta_data=agents.__dict__)

                        agents.update_arrays(new_population)

                    generation_count += 1
                    print(
                        'generation = %s population size = %s level no = %s / %s'
                        % (generation_count, len(new_population),
                           current_level_no, len(levels)))

                    if MODE != 'adapt':
                        current_level_no = 0
                        current_level = levels[current_level_no]

        # --- Drawing ---
        screen.fill(WHITE)

        for block in current_level:
            block.draw(screen)

        agents.draw(screen, mode=MODE)

        capture_success = capture(surface=screen,
                                  mode=MODE,
                                  remove_blank_frames=True,
                                  level_number=current_level_no,
                                  save_folder_path='test',
                                  preview=True)

        if MODE == 'capture' and capture_success:
            if current_level_no < len(levels) - 1:
                current_level_no += 1
                current_level = levels[current_level_no]
            else:
                done = True

        pygame.display.flip()

        clock.tick(60)
        frame_count += 1
        if change_level_flag:
            change_level_flag = False

        if MODE == 'adapt':
            print('Mode: %s is familiar: %s model in use: %s ' %
                  (MODE, is_familiar, 'Finding solution'))

        elif MODE == 'test':
            print('Mode: %s is familiar: %s model in use: %s ' %
                  (MODE, is_familiar,
                   agents.not_sprites[0].functional_system.name))

    pygame.quit()
    print('SIMULATION LEVEL COMPLETED BY AGENT')
コード例 #3
0
def run_model(MODE=None,
              data_extract=None,
              SIM_SPEED=None,
              generation_limit=None,
              run_time=None,
              target_levels=None,
              print_data=True,
              gentic_algorithm_active=True,
              target_generation=1,
              med_sim_init_rand_agent_percenatge=1.0,
              visual_system=None,
              med_sim_init_med_memory_agent_percenatge=0.0,
              med_sim_init_rand_mutation_rate=0.0,
              med_sim_init_med_mutation_rate=0.5):
    """ Main Program """
    if data_extract is None:
        MODE = 'capture'
        gentic_algorithm_active = True
        data_extract = False
        SIM_SPEED = 1
        generation_limit = 10
        target_generation = 3
        run_time = 600
        target_levels = [1]
        print_data = True

        med_sim_init_rand_agent_percenatge = 0.0
        med_sim_init_med_memory_agent_percenatge = 1.0
        med_sim_init_rand_mutation_rate = 0.0
        med_sim_init_med_mutation_rate = 0.1

    save_first_winner = False
    level_to_save = 0
    save_folder_path = 'test_report'

    test_levels = build_blueprint_range(x=400,
                                        start_bottom_y=600,
                                        start_top_y=500,
                                        width=100,
                                        stop=0,
                                        multipliers=4,
                                        y_interval=1,
                                        x_interval=50)
    #390, 490
    level0 = build_blueprint([400, 460, 560, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level1 = build_blueprint([400, 390, 490, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level2 = build_blueprint([400, 350, 450, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level3 = build_blueprint([400, 330, 430, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)
    level4 = build_blueprint([400, 250, 350, 100, BLACK],
                             multipliers=4,
                             xPos_interval=50)

    level0 = build_blueprint([300, 500, 600, 100, BLACK],
                             multipliers=1,
                             xPos_interval=100)
    level1 = build_blueprint([300, 480, 580, 100, BLACK],
                             multipliers=2,
                             xPos_interval=90)
    level2 = build_blueprint([300, 460, 560, 100, BLACK],
                             multipliers=3,
                             xPos_interval=80)
    level3 = build_blueprint([300, 440, 540, 100, BLACK],
                             multipliers=4,
                             xPos_interval=70)
    level4 = build_blueprint([300, 420, 520, 100, BLACK],
                             multipliers=5,
                             xPos_interval=60)
    level5 = build_blueprint([300, 400, 500, 100, BLACK],
                             multipliers=6,
                             xPos_interval=50)
    level6 = build_blueprint([300, 380, 480, 100, BLACK],
                             multipliers=7,
                             xPos_interval=40)
    level7 = build_blueprint([300, 360, 460, 100, BLACK],
                             multipliers=8,
                             xPos_interval=30)
    level8 = build_blueprint([300, 340, 440, 100, BLACK],
                             multipliers=9,
                             xPos_interval=20)
    level9 = build_blueprint([300, 320, 420, 100, BLACK],
                             multipliers=10,
                             xPos_interval=10)
    level10 = build_blueprint([300, 300, 400, 100, BLACK],
                              multipliers=11,
                              xPos_interval=0)

    #level1
    levels = Block.generate(SCREEN_WIDTH, SCREEN_HEIGHT, 100, False, level0,
                            level1, level2, level3, level4, level5, level6,
                            level7, level8, level9, level10)
    #levels = Block.generate(SCREEN_WIDTH, SCREEN_HEIGHT, 100, level1)

    agents = AgentManager(
        mode=MODE,
        yPos_range=None,
        starting_xPos=100,
        starting_yPos=500,
        xPos_range=None,
        population_size=300,
        screen_height=SCREEN_HEIGHT,
        vertical_fuel_depletion_rate=0.001,
        horizontal_fuel_depletion_rate=0.001,
        screen_width=SCREEN_WIDTH,
        rand_init_population_size=300,
        rand_init_mutation_rate=0.0,
        med_sim_init_population_size=300,
        med_sim_init_rand_agent_percenatge=med_sim_init_rand_agent_percenatge,
        med_sim_init_med_memory_agent_percenatge=
        med_sim_init_med_memory_agent_percenatge,
        med_sim_init_rand_mutation_rate=med_sim_init_rand_mutation_rate,
        med_sim_init_med_mutation_rate=med_sim_init_med_mutation_rate,
        load_agent_filepath='sim_data/default_best')

    if visual_system == None:
        visual_system = VisualSystem.init(img_shape=(40, 40, 1),
                                          latent_dims=3,
                                          RE_delta=0.0,
                                          model_folder='CNND_sim_test',
                                          start=1,
                                          MODE=MODE,
                                          preview_output=False)

    memory_system = MemorySystem.init(MODE=MODE,
                                      high_simularity_threshold=0.05,
                                      low_simularity_threshold=0.09,
                                      forget_usage_threshold=0,
                                      forget_age_threshold=50,
                                      max_memory_size=50)

    screen, clock, current_level, current_level_no = init(
        SCREEN_WIDTH, SCREEN_HEIGHT, levels)

    avg_fitness = None

    generation_count = 0
    global_generation_count = 0
    change_level_flag = False
    winners = []
    latent_representation = []
    is_familiar = None
    init_first_original_memory = True
    memory = None

    event_flag = False
    done = False
    frame_count = 0
    start_time = pygame.time.get_ticks()
    if print_data:
        print('MODEL IS ACTIVE: GA ACTIVE = %s' % gentic_algorithm_active)
    while not done:

        is_target_level = False
        if data_extract:
            for targets in target_levels:
                if current_level_no == targets:
                    is_target_level = True
                    break

        # --- Event Processing ---

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                done = True

            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_DOWN:
                    SIM_SPEED -= 10

                elif event.key == pygame.K_UP:
                    SIM_SPEED += 10

                elif event.key == pygame.K_s and MODE == 'train':
                    agents.save_best_agent()

                elif event.key == pygame.K_SPACE:
                    pause = True
                    while pause == True:
                        for event in pygame.event.get():
                            if event.type == pygame.KEYDOWN:
                                if event.key == pygame.K_SPACE:
                                    pause = False

        for i in range(SIM_SPEED):

            if MODE == 'train' or MODE == 'test':
                # --- Update Agents ---
                i = 0
                while i < len(agents.not_sprites) and i > -1:
                    agent = agents.not_sprites[i]

                    if MODE == 'test' and visual_system is not None:
                        # ignore blackout screen at beginning
                        visual_system.process_image(screen,
                                                    agents,
                                                    frame_count,
                                                    mask_agent=True,
                                                    preview_images=False)
                        is_familiar = visual_system.is_familiar()

                        if is_familiar and is_familiar is not None and init_first_original_memory == True:
                            latent_representation, _ = visual_system.generate_latent_representation(
                            )
                            memory_system.create_memory(
                                latent_representation=latent_representation,
                                solution=agent.functional_system,
                                tag='mem_level' + str(current_level_no))
                            init_first_original_memory = False
                            visual_system.supress = True
                            event_flag = True

                        if visual_system.supress == False and is_familiar is not None:
                            # get latent representation from vs
                            latent_representation, _ = visual_system.generate_latent_representation(
                            )
                            memory, action = memory_system.query(
                                latent_representation)

                            if memory is not None and action == 'memory_to_fs_system_switch':
                                agents.temp_agent_store.append(agent)
                                agent = agents.reset_temp_agent_store(
                                    memory.get('solution'))
                                visual_system.supress = True
                                if print_data:
                                    print(action)

                            elif memory is not None and action == 'adaption_using_medium_memory_as_init_foundation':
                                MODE = 'adapt'
                                agents.adaptive_med_sim_population_init(
                                    MODE, memory)
                                if print_data:
                                    print(action)
                                break

                            elif action == 'adaption_using_low_memory_and_random_init_foundation':
                                MODE = 'adapt'
                                agents.adaptive_rand_population_init(
                                    MODE
                                )  # leave one model the current one unchanges
                                if print_data:
                                    print(action)
                                break

                            event_flag = True

                    #   print('visual system is frame familiar: %s' % is_familiar)

                    agent.think(current_level, SCREEN_WIDTH, SCREEN_HEIGHT)
                    agent.update(SCREEN_HEIGHT)

                    if agent.current_closest_block.hit(
                            agent) or agent.off_screen(
                                SCREEN_HEIGHT,
                                SCREEN_WIDTH) or agent.fuel_depleted():

                        # agent.computeFitness()
                        agents.splice(i)
                        i -= 1

                    if len(winners) == 0:
                        if agent.rect.right > current_level[len(
                                current_level) - 1].top_block.rect.right - 10:
                            if save_first_winner == True and level_to_save == current_level_no and MODE == 'train':
                                agents.save_best_agent()
                                save_first_winner = False
                            change_level_flag = True
                            agent.fuel = 1.0
                            agent.rect.x = agents.starting_xPos
                            agent.rect.y = agents.starting_yPos
                            agent.velocity_x = 0
                            agent.velocity_y = 0
                            winners.append(agent)
                            agents.splice(i, mode='delete')
                            i -= 1

                    else:
                        if agent.rect.right > current_level[len(
                                current_level
                        ) - 1].top_block.rect.left and agent.rect.right < current_level[
                                len(current_level) - 1].top_block.rect.right:

                            change_level_flag = True
                            agent.fuel = 1.0
                            agent.rect.x = agents.starting_xPos
                            agent.rect.y = agents.starting_yPos
                            agent.velocity_x = 0
                            agent.velocity_y = 0
                            winners.append(agent)
                            agents.splice(i, mode='delete')
                            i -= 1

                    i += 1

                if change_level_flag:

                    if MODE != 'adapt':
                        agents.splice(index=None, mode='all')
                        agents.not_sprites = winners
                        change_level_flag = False
                        winners = []

                        if current_level_no < len(levels) - 1:
                            current_level_no += 1
                        else:
                            current_level_no = 0

                        current_level = levels[current_level_no]

                        if visual_system is not None and visual_system.supress == True:
                            visual_system.supress = False
                        break

                    else:

                        if is_target_level:
                            sum_fitness = 0

                            for ag in agents.dead_agents:
                                ag.computeFitness()
                                sum_fitness += ag.fitness

                            for ag in winners:
                                ag.computeFitness()
                                sum_fitness += ag.fitness

                            avg_fitness = sum_fitness / (
                                len(agents.dead_agents) + len(winners))
                            done = True
                            break

                        solution = get_best_winner(winners)
                        memory_system.create_memory(
                            latent_representation=latent_representation,
                            solution=solution.functional_system,
                            tag='mem_level' + str(current_level_no))
                        agents.reset_temp_agent_store(
                            solution.functional_system)
                        visual_system.supress = True
                        generation_count = 0
                        winners = []
                        agents.dead_agents = []
                        MODE = 'test'

                        if print_data:
                            print('ADAPTION COMPLETE: SOLUTION: %s' %
                                  (solution.functional_system.name))

                # check if all active agents are dead, the perform GA and reset game level and epochs
                if len(agents.not_sprites) == 0:

                    skip_ga = False
                    if MODE == 'test':

                        dead_agent = agents.dead_agents[0]
                        dead_agent.reset()
                        new_population = [dead_agent]
                        agents.update_arrays(input=new_population)
                        return 'ERROR_AGENT_DEATH'
                        #raise ValueError('AGENT DEATH DURING TEST MODE: MEMORY WAS INEFFECTIVE')

                    else:

                        if MODE == 'adapt' and generation_count >= generation_limit:

                            if memory_system.current_action == 'adaption_using_medium_memory_as_init_foundation':
                                MODE = 'adapt'
                                agents.dead_agents = []
                                agents.adaptive_med_sim_population_init(
                                    MODE, memory)

                            elif memory_system.current_action == 'adaption_using_low_memory_and_random_init_foundation':
                                MODE = 'adapt'
                                agents.dead_agents = []
                                agents.adaptive_rand_population_init(
                                    MODE
                                )  # leave one model the current one unchanges
                            skip_ga = True
                            generation_count = 0
                            if print_data:
                                print(
                                    'MODEL LOST IN BAD THOUGHT POOL: RE-INIT GA PROCESSING'
                                )

                    if skip_ga == False:

                        new_population_len = []
                        generation_count += 1
                        global_generation_count += 1
                        if gentic_algorithm_active:

                            new_population = GenticAlgorithm.produceNextGeneration(
                                population=agents.dead_agents,
                                agent_meta_data=agents.__dict__)

                            if is_target_level and generation_count == target_generation:
                                sum_fitness = 0

                                for ag in agents.dead_agents:
                                    # ag.computeFitness()
                                    sum_fitness += ag.fitness

                                avg_fitness = sum_fitness / (len(
                                    agents.dead_agents))
                                global_generation_count = -1
                                done = True
                                break

                            agents.update_arrays(new_population)
                            new_population_len = len(new_population)

                        else:
                            agents = AgentManager(
                                mode=MODE,
                                yPos_range=None,
                                starting_xPos=100,
                                starting_yPos=500,
                                xPos_range=None,
                                population_size=300,
                                screen_height=SCREEN_HEIGHT,
                                vertical_fuel_depletion_rate=0.001,
                                horizontal_fuel_depletion_rate=0.001,
                                screen_width=SCREEN_WIDTH,
                                rand_init_population_size=300,
                                rand_init_mutation_rate=0.0,
                                med_sim_init_population_size=300,
                                med_sim_init_rand_agent_percenatge=
                                med_sim_init_rand_agent_percenatge,
                                med_sim_init_med_memory_agent_percenatge=
                                med_sim_init_med_memory_agent_percenatge,
                                med_sim_init_rand_mutation_rate=
                                med_sim_init_rand_mutation_rate,
                                med_sim_init_med_mutation_rate=
                                med_sim_init_med_mutation_rate,
                                load_agent_filepath='sim_data/default_best')
                            new_population_len = len(agents.dead_agents)

                        if print_data:
                            print(
                                'generation = %s population size = %s level no = %s / %s'
                                % (generation_count, new_population_len,
                                   current_level_no, len(levels)))

                        if MODE != 'adapt':
                            current_level_no = 0
                            current_level = levels[current_level_no]

        # --- Drawing ---
        screen.fill(WHITE)

        for block in current_level:
            block.draw(screen)

        agents.draw(screen, mode=MODE)

        capture_success = capture(surface=screen,
                                  mode=MODE,
                                  remove_blank_frames=True,
                                  level_number=current_level_no,
                                  save_folder_path=save_folder_path,
                                  preview=True)

        if MODE == 'capture' and capture_success:
            if current_level_no < len(levels) - 1:
                current_level_no += 1
                current_level = levels[current_level_no]
            else:
                done = True

        pygame.display.flip()

        end_time = pygame.time.get_ticks()
        time = (end_time - start_time) / 1000
        clock.tick(60)

        if time >= run_time:
            done = True

        frame_count += 1
        if change_level_flag:
            change_level_flag = False

        if event_flag:

            if memory is None:
                sim_score = 'N/A'
            else:

                if isinstance(memory, list):
                    min = memory[0].get('similarity_score')

                    for item in memory:
                        sim = item.get('similarity_score')
                        if sim < min:
                            min = sim
                    sim_score = 'minimum = ' + str(min)

                else:
                    sim_score = memory.get('similarity_score')

            if print_data:
                if MODE == 'adapt':
                    print(
                        'Mode: %s is familiar: %s model in use: %s similarity: %s'
                        % (MODE, is_familiar, 'Finding solution', sim_score))

                elif MODE == 'test':
                    print(
                        'Mode: %s is familiar: %s model in use: %s Similarity: %s'
                        % (MODE, is_familiar,
                           agents.not_sprites[0].functional_system.name,
                           sim_score))

            event_flag = False

    if print_data:
        print('Simulation Terminated')
        print('Simulation Time Length = %s' % start_time)
    pygame.quit()
    return avg_fitness, global_generation_count
コード例 #4
0
def run_simulation(DRAW, SCREEN_HEIGHT, SCREEN_WIDTH, MODE, EXTRACT_DATA,
                   SIM_SPEED, save_first_winner, level_to_save,
                   generation_limit, end_on_lap, capture_filename, levels,
                   attempts, save_trained_agent, load_agent_filepath,
                   high_simularity_threshold, low_simularity_threshold,
                   min_lts, max_lts, visual_system):

    print()
    print('MODEL = *** STM SEARCH ***')
    print()

    agents = AgentManager(mode=MODE,
                          yPos_range=None,
                          starting_xPos=100,
                          starting_yPos=500,
                          xPos_range=None,
                          population_size=300,
                          screen_height=SCREEN_HEIGHT,
                          vertical_fuel_depletion_rate=0.0005,
                          horizontal_fuel_depletion_rate=0.0005,
                          screen_width=SCREEN_WIDTH,
                          rand_init_population_size=300,
                          rand_init_mutation_rate=0.0,
                          med_sim_init_population_size=300,
                          med_sim_init_rand_agent_percenatge=0.1,
                          med_sim_init_med_memory_agent_percenatge=0.9,
                          med_sim_init_rand_mutation_rate=0.0,
                          med_sim_init_med_mutation_rate=0.3,
                          load_agent_filepath=load_agent_filepath,
                          save_path=save_trained_agent)

    # visual_system = VisualSystem.init(img_shape=(40, 40, 1),
    #                                   latent_dims=3,
    #                                   RE_delta=0.0,
    #                                   model_folder='CNND_main',
    #                                   start=1,
    #                                   MODE=MODE,
    #                                   preview_output=False
    #                                  )

    memory_system = MemorySystem.init(
        MODE=MODE,
        high_simularity_threshold=high_simularity_threshold,
        low_simularity_threshold=low_simularity_threshold,
        forget_usage_threshold=0,
        forget_age_threshold=50,
        max_memory_size=50)

    screen, clock, current_level, current_level_no = init(
        SCREEN_WIDTH, SCREEN_HEIGHT, levels)
    norm = np.vectorize(normalise)

    internal_generation_count = 0
    level_generation_count = 0
    attempts = attempts
    starting_attempts = attempts
    generation_record = np.zeros(len(levels)) - 1
    generation_record_count = 0
    lap_no = 1
    change_level_flag = False
    winners = []
    norm_latent_rep = []
    is_familiar = None
    init_first_original_memory = True
    memory = None

    event_flag = False
    done = False
    frame_count = 0
    while not done:

        # --- Event Processing ---

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                done = True

            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_DOWN:
                    SIM_SPEED -= 10

                elif event.key == pygame.K_UP:
                    SIM_SPEED += 10

                elif event.key == pygame.K_s and MODE == 'train':
                    agents.save_best_agent()

                elif event.key == pygame.K_d:
                    if DRAW:
                        DRAW = False
                    else:
                        DRAW = True

                elif event.key == pygame.K_SPACE:
                    pause = True
                    while pause == True:
                        for event in pygame.event.get():
                            if event.type == pygame.KEYDOWN:
                                if event.key == pygame.K_SPACE:
                                    pause = False

        for j in range(SIM_SPEED):

            if MODE == 'train' or 'test':
                # --- Update Agents ---
                i = 0
                while i < len(agents.not_sprites) and i > -1:
                    agent = agents.not_sprites[i]

                    if MODE == 'test' and visual_system is not None:
                        # ignore blackout screen at beginning
                        visual_system.process_image(screen,
                                                    agents,
                                                    frame_count,
                                                    mask_agent=True,
                                                    preview_images=False)
                        is_familiar = visual_system.is_familiar()

                        if is_familiar and is_familiar is not None and init_first_original_memory == True:
                            latent_representation, _ = visual_system.generate_latent_representation(
                            )
                            norm_latent_rep = norm(min_lts, max_lts,
                                                   latent_representation)
                            memory_system.create_memory(
                                latent_representation=norm_latent_rep,
                                solution=agent.functional_system,
                                tag='mem_level' + str(current_level_no))
                            init_first_original_memory = False
                            visual_system.supress = True
                            event_flag = True

                        if visual_system.supress == False and is_familiar is not None:
                            # get latent representation from vs
                            latent_representation, _ = visual_system.generate_latent_representation(
                            )
                            norm_latent_rep = norm(min_lts, max_lts,
                                                   latent_representation)
                            memory, action = memory_system.query(
                                norm_latent_rep)

                            if memory is not None and action == 'memory_to_fs_system_switch':
                                agents.temp_agent_store.append(agent)
                                agent = agents.reset_temp_agent_store(
                                    memory.get('solution'))
                                visual_system.supress = True
                                print(action)

                            elif memory is not None and action == 'adaption_using_medium_memory_as_init_foundation':
                                MODE = 'adapt'
                                agents.adaptive_med_sim_population_init(
                                    MODE, memory)
                                print(action)

                            elif action == 'adaption_using_low_memory_and_random_init_foundation':
                                MODE = 'adapt'
                                agents.adaptive_rand_population_init(
                                    MODE
                                )  # leave one model the current one unchanges
                                print(action)

                            event_flag = True

                    #   print('visual system is frame familiar: %s' % is_familiar)

                    agent.think(current_level, SCREEN_WIDTH, SCREEN_HEIGHT)
                    agent.update(SCREEN_HEIGHT)

                    if agent.current_closest_block.hit(
                            agent) or agent.off_screen(
                                SCREEN_HEIGHT,
                                SCREEN_WIDTH) or agent.fuel_depleted():

                        # agent.computeFitness()
                        agents.splice(i)
                        i -= 1

                    if len(winners) == 0:
                        if agent.rect.right > current_level[len(
                                current_level) - 1].top_block.rect.right - 10:
                            if save_first_winner == True and level_to_save == current_level_no and MODE == 'train':
                                agents.save_best_agent()
                                save_first_winner = False
                            change_level_flag = True
                            agent.fuel = 1.0
                            agent.rect.x = agents.starting_xPos
                            agent.rect.y = agents.starting_yPos
                            agent.velocity_x = 0
                            agent.velocity_y = 0
                            winners.append(agent)
                            agents.splice(i, mode='delete')
                            i -= 1

                    else:
                        if agent.rect.right > current_level[len(
                                current_level
                        ) - 1].top_block.rect.left and agent.rect.right < current_level[
                                len(current_level) - 1].top_block.rect.right:

                            change_level_flag = True
                            agent.fuel = 1.0
                            agent.rect.x = agents.starting_xPos
                            agent.rect.y = agents.starting_yPos
                            agent.velocity_x = 0
                            agent.velocity_y = 0
                            winners.append(agent)
                            agents.splice(i, mode='delete')
                            i -= 1

                    i += 1

                if change_level_flag:

                    if MODE != 'adapt':
                        agents.splice(index=None, mode='all')
                        agents.not_sprites = winners
                        change_level_flag = False
                        winners = []

                        last_level = False
                        if current_level_no == len(levels) - 1:
                            last_level = True

                        if current_level_no < len(levels) - 1:
                            current_level_no += 1
                        else:
                            current_level_no = 0

                        current_level = levels[current_level_no]

                        level_generation_count += internal_generation_count
                        generation_record[
                            generation_record_count] = level_generation_count
                        generation_record_count += 1
                        level_generation_count = 0

                        if last_level:
                            if current_level_no == 0 and end_on_lap == lap_no:
                                done = True
                                break
                            else:
                                lap_no += 1

                        if visual_system is not None and visual_system.supress == True:
                            visual_system.supress = False

                    else:
                        solution = get_best_winner(winners)
                        memory_system.create_memory(
                            latent_representation=norm_latent_rep,
                            solution=solution.functional_system,
                            tag='mem_level' + str(current_level_no))
                        agents.reset_temp_agent_store(
                            solution.functional_system)
                        agents.not_sprites[0].reset()
                        visual_system.supress = True
                        level_generation_count += internal_generation_count
                        internal_generation_count = 0
                        winners = []
                        agents.dead_agents = []
                        print('LEN' + str(len(agents.not_sprites)))
                        MODE = 'test'
                        print('LEVEL ' + str(current_level_no) +
                              'ADAPT COMPLETE STM: SOLUTION: %s' %
                              (solution.functional_system.name))

                # check if all active agents are dead, the perform GA and reset game level and epochs
                if len(agents.not_sprites) == 0:

                    skip_ga = False
                    if MODE == 'test':
                        done = True
                        break

                        # dead_agent = agents.dead_agents[0]
                        # dead_agent.reset()
                        # new_population = [dead_agent]
                        # agents.update_arrays(input=new_population)
                        # MODE = 'adapt'
                        # agents.dead_agents = []
                        # agents.adaptive_med_sim_population_init(MODE, memory)
                        # skip_ga = True
                    # raise ValueError('AGENT DEATH DURING TEST MODE: MEMORY WAS INEFFECTIVE')

                    else:

                        if MODE == 'adapt' and internal_generation_count >= generation_limit:

                            if memory_system.current_action == 'adaption_using_medium_memory_as_init_foundation':
                                MODE = 'adapt'
                                agents.dead_agents = []
                                agents.adaptive_med_sim_population_init(
                                    MODE, memory)

                            elif memory_system.current_action == 'adaption_using_low_memory_and_random_init_foundation':
                                MODE = 'adapt'
                                agents.dead_agents = []
                                agents.adaptive_rand_population_init(
                                    MODE
                                )  # leave one model the current one unchanges
                            skip_ga = True

                            level_generation_count += internal_generation_count
                            internal_generation_count = 0

                            if EXTRACT_DATA == 'gen_test' and attempts == 0:
                                generation_record[
                                    generation_record_count] = level_generation_count
                                done = True
                                print('~FAIL')
                                break

                            else:
                                attempts -= 1

                            print(
                                'MODEL LOST IN BAD THOUGHT POOL: ATTEMPTS = %s out %s RE-INIT GA PROCESSING'
                                % (attempts, starting_attempts))

                            print(
                                'MODEL LOST IN BAD THOUGHT POOL: RE-INIT GA PROCESSING'
                            )

                    if skip_ga == False:

                        new_population = GenticAlgorithm.produceNextGeneration(
                            population=agents.dead_agents,
                            agent_meta_data=agents.__dict__)

                        agents.update_arrays(new_population)

                        internal_generation_count += 1
                        print(
                            'generation = %s population size = %s level no = %s / %s'
                            % (internal_generation_count, len(new_population),
                               current_level_no, len(levels)))

                        # if MODE != 'adapt':
                        #     current_level_no = 0
                        #     current_level = levels[current_level_no]

            # --- Drawing ---

            screen.fill(WHITE)

            if DRAW:
                for block in current_level:
                    block.draw(screen)

                agents.draw(screen, mode=MODE)

            capture_success = capture(surface=screen,
                                      mode=MODE,
                                      remove_blank_frames=True,
                                      level_number=current_level_no,
                                      save_folder_path=capture_filename,
                                      preview=True)

            if MODE == 'capture' and capture_success:
                if current_level_no < len(levels) - 1:
                    current_level_no += 1
                    current_level = levels[current_level_no]
                else:
                    done = True

            pygame.display.flip()
            clock.tick()
            frame_count += 1

            if change_level_flag:
                change_level_flag = False

            if event_flag:

                if memory is None:
                    sim_score = 'N/A'
                else:

                    if isinstance(memory, list):
                        min = memory[0].get('similarity_score')

                        for item in memory:
                            sim = item.get('similarity_score')
                            if sim < min:
                                min = sim
                        sim_score = 'minimum = ' + str(min)

                    else:
                        sim_score = memory.get('similarity_score')

                if MODE == 'adapt':
                    print(
                        'Mode: %s is familiar: %s model in use: %s similarity: %s'
                        % (MODE, is_familiar, 'Finding solution', sim_score))

                elif MODE == 'test':
                    print(
                        'Mode: %s is familiar: %s model in use: %s Similarity: %s'
                        % (MODE, is_familiar,
                           agents.not_sprites[0].functional_system.name,
                           sim_score))

                event_flag = False

    pygame.quit()
    print('SIMULATION LEVEL COMPLETED BY AGENT')
    return generation_record