コード例 #1
0
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """

    initial_actions_vector = get_random_action_list()
    to_print = ""
    for i in range(50):
        print("round num: " + str(i))
        curr_action = initial_actions_vector[i]
        for act in GameAction:
            if act != curr_action:
                test_actions_vector = initial_actions_vector.copy()
                test_actions_vector[i] = act  # put the new action here
                if get_fitness(initial_actions_vector) <= get_fitness(
                        test_actions_vector):
                    initial_actions_vector[i] = test_actions_vector[i]
    for i in initial_actions_vector.__iter__():
        to_print = to_print + i.name + " "
    print(to_print)
コード例 #2
0
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    k = 15
    n = 50
    list_new_beam = [[np.random.choice(list(GameAction), p=[0.05, 0.9, 0.05]) for _ in range(n)] for _ in range(k)]
    new_beam = dict()
    for item in list_new_beam:
        new_beam[get_fitness(tuple(item))] = item
    for i in range(n):
        beam = new_beam.copy()
        new_beam = dict()
        for s in beam.values():
            for action in list(GameAction):
                new = s.copy()
                new[i] = action
                if len(new_beam) < k:
                    new_beam[get_fitness(tuple(new))] = new
                else:
                    h_new = get_fitness(tuple(new))
                    if h_new > min(new_beam.keys()):
                        new_beam.pop(min(new_beam.keys()))
                        new_beam[h_new] = new
    print(new_beam[max(new_beam.keys())])
    print(get_fitness(tuple(new_beam[max(new_beam.keys())])))
コード例 #3
0
ファイル: submission.py プロジェクト: dekool/intro_ai
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    game_duration = 50
    initial_state = [GameAction.STRAIGHT]
    current_state = LocalSearchState(initial_state, game_duration)
    sideways = 0
    limit_sidesteps = 50
    last_best_val = -np.inf
    last_best_actions = initial_state
    random_steps_allowed = 3
    for r in range(random_steps_allowed):
        for i in range(game_duration):
            best_val = -np.inf
            best_states = []
            for new_state in current_state.get_legal_actions_double_move(i):
                new_val = get_fitness(tuple(new_state.actions))
                if new_val > best_val:
                    best_val = new_val
                    best_states = [new_state]
                elif new_val == best_val:
                    best_states.append(new_state)
            current_val = get_fitness(tuple(current_state.actions))
            if best_val > current_val:
                index = np.random.choice(len(best_states))
                current_state = best_states[index]
                print("best val so far:" + str(best_val))
                sideways = 0
            elif best_val == current_val and sideways < limit_sidesteps:
                best_states.append(current_state)
                index = np.random.choice(len(best_states))
                current_state = best_states[index]
                sideways += 1
            else:  # no more improving moves or no more sidesteps allowed
                pass
        current_val = get_fitness(tuple(current_state.actions))
        print("last loop over. his score is: " + str(current_val))
        print("Last loop vector: " + str(current_state.actions))
        if current_val > last_best_val:  # the last run didn't improved at all
            last_best_val = current_val
            last_best_actions = current_state
            last_best_actions.actions.copy()
        # new random initial state
        for j in range(game_duration):
            current_state.actions[j] = np.random.choice(list(GameAction))
    print("best move vector found: ")
    print(last_best_actions.actions)
    print("best score found:")
    print(last_best_val)
コード例 #4
0
def reproduce(steps1, steps2):
    c = np.random.choice(n)
    steps3 = copy.deepcopy(steps1[0:c])
    steps3.extend(steps2[c:n])
    steps4 = copy.deepcopy(steps2[0:c])
    steps4.extend(steps1[c:n])
    if get_fitness(steps3) >= get_fitness(steps4):
        return steps3
    return steps4
コード例 #5
0
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    # size of state
    l = 50
    # size of population
    n = 100
    # probability for crossover
    pc = 0.5
    # probability for mutate
    pm = 0.05
    num_of_generations = 30
    # creating first generation with random actions
    generation = np.random.choice(list(GameAction), (n, l))
    for i in range(num_of_generations):
        # finds fitness for all snakes in the current generation
        fitness_list = [get_fitness(tuple(actions)) for actions in generation]
        total_fitness = np.sum(fitness_list)
        # calculating probability for each snake according to it`s score
        probability_list = fitness_list / total_fitness
        next_gen = []
        # building new generation, picking 2 parents -> crossover -> mutate
        for _ in range(n // 2):
            parents = np.random.choice(n, 2, p=probability_list)
            par1 = generation[parents[0]]
            par2 = generation[parents[1]]
            kid1 = par1
            kid2 = par2
            # crossover
            if np.random.uniform() > pc:
                crossover_index = np.random.choice(l)
                kid1[:crossover_index] = par1[:crossover_index]
                kid1[crossover_index:] = par2[crossover_index:]
                kid2[:crossover_index] = par2[:crossover_index]
                kid2[crossover_index:] = par1[crossover_index:]
            # mutate
            for i in range(l):
                if np.random.uniform() < pm:
                    kid1[i] = np.random.choice(list(GameAction))
                if np.random.uniform() < pm:
                    kid2[i] = np.random.choice(list(GameAction))
            # adding kids to new generation
            next_gen.extend([kid1, kid2])
        generation = next_gen
    fitness_list = [get_fitness(tuple(actions)) for actions in generation]
    print(generation[np.argmax(np.array(fitness_list))])
コード例 #6
0
def mutate(steps):
    for i in range(50):
        steps_score = get_fitness(tuple(steps))
        neighbour = copy.deepcopy(steps)
        for action in GameAction:
            if action == steps[i]:
                continue
            neighbour[i] = action
            score = get_fitness(tuple(neighbour))
            if score > steps_score:
                steps_score = score
                steps = copy.deepcopy(neighbour)
    return steps
コード例 #7
0
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    # size of our state
    n = 50
    # starting state (STRAIGHT, STRAIGHT ,..., STRAIGHT)
    actions = [GameAction.STRAIGHT for _ in range(n)]
    # going over each action in current state and checking which action gives the best results
    for i in range(n):
        best_action = []
        best_value = -np.inf
        # checking all actions possible
        for action in list(GameAction):
            actions[i] = action
            # getting fitness of current state with current action changed
            fitness = get_fitness(tuple(actions))
            if fitness > best_value:
                best_value = fitness
                best_action = [action]
            elif fitness == best_value:
                best_action.append(action)
        # picking best action
        actions[i] = np.random.choice(best_action)

    print(actions)
コード例 #8
0
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    # 1) pick an initial state
    # np.random.choice(max_actions)
    n = 50
    random_vec = [GameAction(np.random.choice([0, 1, 2])) for _ in range(n)]
    for i in range(n):
        best_res = 0
        best_choice = []
        for new_action in [0, 1, 2]:
            random_vec[i] = GameAction(new_action)
            tuple_vec = tuple(random_vec)
            res = get_fitness(tuple_vec)
            if res > best_res:
                best_choice = [new_action]
                best_res = res
            elif res == best_res:
                best_choice.append(new_action)

        random_vec[i] = GameAction(np.random.choice(best_choice))
    print(random_vec)
コード例 #9
0
def local_search():
    k = 4
    N = 50
    NewBeam = [[
        np.random.choice(list(GameAction), p=[0.1, 0.8, 0.1]) for _ in range(N)
    ] for _ in range(k)]
    NewBeam = [[state, get_fitness(state), 0] for state in NewBeam]
    NewBeam = sorted(NewBeam, key=lambda x: x[1])
    Beam = []
    while True:
        if Beam and Beam[-1][1] == NewBeam[-1][1]:
            break
        Beam = NewBeam.copy()
        NewBeam = []
        for item in Beam:
            cur_turn = item[2]
            if cur_turn == N:
                check_insert_to_newbeam(NewBeam, item.copy(), k)
                NewBeam = sorted(NewBeam, key=lambda x: x[1])
            for j in range(3):
                if j == 0:
                    item[0][cur_turn] = GameAction.RIGHT
                elif j == 1:
                    item[0][cur_turn] = GameAction.STRAIGHT
                elif j == 2:
                    item[0][cur_turn] = GameAction.LEFT
                check_insert_to_newbeam(NewBeam, item.copy(), k)
                NewBeam = sorted(NewBeam, key=lambda x: x[1])
    print("my moves were {}".format(Beam[-1][0]))
    print("and I got {}".format(Beam[-1][1]))
コード例 #10
0
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    N = 50
    init_state = [GameAction.STRAIGHT] * N
    sideways = 0
    limit = N / 5
    for i in range(N):
        best_val = np.NINF
        best_states = None
        for j in range(3):
            tmp = init_state.copy()
            if j == 0:
                tmp[i] = GameAction.RIGHT
            elif j == 1:
                tmp[i] = GameAction.STRAIGHT
            elif j == 2:
                tmp[i] = GameAction.LEFT
            new_val = get_fitness(tmp)
            if new_val == best_val:
                best_states.append(tmp)

            if new_val > best_val:
                best_val = new_val
                best_states = [tmp]
        state_fitness = get_fitness(init_state)
        if best_val > state_fitness:
            chosen_state = np.random.choice(len(best_states))
            init_state = best_states[chosen_state]
            sideways = 0
            M = best_val
        elif best_val == state_fitness and sideways <= limit:
            chosen_state = np.random.choice(len(best_states))
            init_state = best_states[chosen_state]
            sideways = sideways + 1
    print("the best combination I found was {} ".format(init_state))
    print("and I got " + str(M))
コード例 #11
0
ファイル: submission.py プロジェクト: dekool/intro_ai
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    game_duration = 50
    initial_state = [GameAction.STRAIGHT]
    current_state = LocalSearchState(initial_state, game_duration)
    sideways = 0
    limit_sidesteps = 30
    for i in range(game_duration):
        best_val = -np.inf
        best_states = []
        for new_state in current_state.get_legal_actions_current_move(i):
            new_val = get_fitness(tuple(new_state.actions))
            if new_val > best_val:
                best_val = new_val
                best_states = [new_state]
            elif new_val == best_val:
                best_states.append(new_state)
        current_val = get_fitness(tuple(current_state.actions))
        if best_val > current_val:
            index = np.random.choice(len(best_states))
            current_state = best_states[index]
            print("best val so far:" + str(best_val))
            sideways = 0
        elif best_val == current_val and sideways < limit_sidesteps:
            # replace in random to one of the new best states, or stay with the current one
            best_states.append(current_state)
            index = np.random.choice(len(best_states))
            current_state = best_states[index]
            sideways += 1
        else:  # no more improving moves or no more sidesteps allowed
            pass
    best_val_found = get_fitness(tuple(current_state.actions))
    print("best move vector found: ")
    print(current_state.actions)
    print("best score found:")
    print(best_val_found)
コード例 #12
0
def check_insert_to_newbeam(NewBeam, item, k):
    item[2] += 1
    item[1] = get_fitness(item[0])
    if len(NewBeam) < k:
        NewBeam.append([item[0].copy(), item[1], item[2]])
    elif NewBeam[0][1] < item[1]:
        NewBeam[0] = [item[0].copy(), item[1], item[2]]
    pass
コード例 #13
0
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    to_print = ""
    max_fitness = 0
    max_actions_vector = []
    rounds = 0
    while rounds < 10:
        i = 0
        initial_actions_vector = get_random_action_list()
        print("round num:" + str(rounds))
        while i < 50:
            print("itr num:" + str(i))
            curr_action = initial_actions_vector[i]
            for act in GameAction:
                if act != curr_action:
                    test_actions_vector = initial_actions_vector.copy()
                    test_actions_vector[i] = act  # put the new action here
                    if get_fitness(initial_actions_vector) <= get_fitness(
                            test_actions_vector):
                        initial_actions_vector[i] = test_actions_vector[i]
            i += 1
        if get_fitness(initial_actions_vector) > max_fitness:
            max_fitness = get_fitness(initial_actions_vector)
            max_actions_vector = initial_actions_vector.copy()
        rounds += 1
    print("max fitness: " + str(max_fitness))
    for i in max_actions_vector.__iter__():
        to_print = to_print + i.name + " "
    print(to_print)
コード例 #14
0
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    action_vector = tuple(int2GA(np.random.choice(2)) for i in range(50))
    max_value = get_fitness(action_vector)
    for i in range(100):
        curr_vector = tuple(int2GA(np.random.choice(2)) for i in range(50))
        curr_value = get_fitness(curr_vector)
        if curr_value > max_value:
            action_vector = curr_vector
            max_value = curr_value
    print(action_vector)
    print(max_value)
    print("-----------------------------------------------------------")
    #action_vector = tuple(int2GA(i) for i in np.random.choice(3, 50))
    #print(action_vector)
    #print(get_fitness(action_vector))
    side_steps = 0
    action_set = {GameAction.LEFT, GameAction.STRAIGHT, GameAction.RIGHT}
    for i in range(50):
        curr_value = get_fitness(action_vector)
        operator = list(action_set.difference({action_vector[i]}))
        help_list = list(action_vector)
        max = -np.inf
        best_states = []
        for k in operator:
            help_list[i] = k
            new_state = tuple(help_list)
            new_state_value = get_fitness(new_state)
            if new_state_value > max:
                max = new_state_value
                best_states = [new_state]
            elif new_state_value == max:
                best_states.append(new_state)
        index = np.random.choice(best_states.__len__())
        if max > curr_value:
            action_vector = tuple(best_states[index])
            side_steps = 0
        elif max == curr_value and side_steps < 50:
            action_vector = tuple(best_states[index])
            print("side step")
            side_steps += 1
        else:
            print(action_vector)
            get_fitness(action_vector)
            print(curr_value)
            break
    pass
コード例 #15
0
def SAHC_sideways_internal(steps):
    sideways_limit = 5
    sideways_count = 0
    best_score = get_fitness(tuple(steps))
    visited_neighbours = []

    while sideways_count <= sideways_limit:
        current_score = best_score
        sideways_neighbours = []
        best_neighbour = []
        for i in range(50):
            neighbour = copy.deepcopy(steps)
            for action in GameAction:
                if action == steps[i]:
                    continue
                neighbour[i] = action
                score = get_fitness(tuple(neighbour))
                if score > best_score:
                    best_score = score
                    best_neighbour = copy.deepcopy(neighbour)
                if score == current_score and neighbour not in visited_neighbours:
                    sideways_neighbours.append(neighbour)
        if len(best_neighbour) == 0 and len(sideways_neighbours) == 0:
            break
        elif best_score > current_score:
            print("changing")
            print(get_fitness(steps))
            steps = copy.deepcopy(best_neighbour)
            sideways_count = 0
            visited_neighbours = [steps]
        elif len(sideways_neighbours) > 0:
            steps = sideways_neighbours[0]
            visited_neighbours.append(sideways_neighbours[0])
            sideways_count += 1
        print(get_fitness(steps))

    print(get_fitness(steps))
    print(steps)
コード例 #16
0
def SAHC_sideways():
    """
    Implement Steepest Ascent Hill Climbing with Sideways Steps Here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    n = 50

    "initial state derived from several experiments as described in the PDF"
    current_state = [GameAction.LEFT, GameAction.STRAIGHT, GameAction.RIGHT, GameAction.RIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.RIGHT, GameAction.STRAIGHT, GameAction.LEFT,
                     GameAction.RIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.RIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,
                     GameAction.RIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.LEFT, GameAction.RIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,
                     GameAction.STRAIGHT, GameAction.STRAIGHT, GameAction.STRAIGHT,  GameAction.RIGHT, GameAction.LEFT,
                     GameAction.LEFT]

    best_val = np.NINF
    for i in range(n):
        best_states = []
        for action in list(GameAction):
            new_state = current_state.copy()
            new_state[i] = action
            new_value = get_fitness(tuple(new_state))
            if new_value > best_val:
                best_val = new_value
                best_states = [new_state]
            elif new_value == best_val:
                best_states += [new_state]
        random_index = np.random.choice(range(len(best_states)))
        current_state = best_states[random_index]
    print(current_state)
コード例 #17
0
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    population_number = 2
    population = []

    for j in range(population_number):
        steps = []
        for i in range(50):
            steps.append(np.random.choice(GameAction))
        SAHC_sideways_internal(steps)
        population.append(steps)


    while population_number > 1:
        population.sort(key=get_fitness)
        population_number /= 2
        if (population_number == 1):
            break
        population = population[0:population_number]


        new_population = []
        for i in range(int(len(population)/2)):
            child = reproduce(population[2*i],population[2*i+1])
            child = mutate(child)
            new_population.append(child)
        population = copy.deepcopy(new_population)
        population_number = len(new_population)


    print(population[0])
    print(get_fitness(population[0]))
コード例 #18
0
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """
    T_factor = 0.66
    for j in range(1):
        best_value_ever = -np.inf
        best_action_ever = []
        # the local search algo for 10 times:
        #-----------------------------------------------------------------------------------------------
        for k in range(10):
            action_vector = tuple(int2GA(i) for i in np.random.choice(3, 50))
            side_steps = 0
            action_set = {
                GameAction.LEFT, GameAction.STRAIGHT, GameAction.RIGHT
            }
            T = 10
            best_value = -np.inf
            best_action = []
            # one iteration
            # ____________________________________________________________________________________
            for i in range(50):
                curr_value = get_fitness(action_vector)
                operator = list(action_set.difference({action_vector[i]}))
                help_list = list(action_vector)
                max_ = -np.inf
                best_states = []
                for k in operator:
                    help_list[i] = k
                    new_state = tuple(help_list)
                    new_state_value = get_fitness(new_state)
                    if new_state_value > max_:
                        max_ = new_state_value
                        best_states = [new_state]
                    elif new_state_value == max_:
                        best_states.append(new_state)
                index = np.random.choice(best_states.__len__())
                if max_ > curr_value:
                    action_vector = tuple(best_states[index])
                    curr_value = max_
                    side_steps = 0
                elif max_ == curr_value and side_steps < 50:
                    action_vector = tuple(best_states[index])
                    side_steps += 1
                    curr_value = max_
                elif max_ < curr_value and flip_coin(
                        np.exp(-abs(max_ - curr_value) / T)):
                    action_vector = tuple(best_states[index])
                    side_steps = 0
                else:
                    break
                if best_value < curr_value:
                    best_action = action_vector
                    best_value = curr_value
                T = T * T_factor


# ____________________________________________________________________________________
            if best_value > best_value_ever:
                best_value_ever = best_value
                best_action_ever = best_action
        print(best_action_ever)
        print(best_value_ever)
        print(get_fitness(best_action_ever))
        T_factor *= 0.95

    pass
コード例 #19
0
def local_search():
    """
    Implement your own local search algorithm here.
    We give you the freedom to choose an initial state as you wish. You may start with a deterministic state (think of
    examples, what interesting options do you have?), or you may randomly sample one (you may use any distribution you
    like). In any case, write it in your report and describe your choice.

    an outline of the algorithm can be
    1) pick an initial state/states
    2) perform the search according to the algorithm
    3) print the best moves vector you found.
    :return:
    """

    # Creating the initial population.
    new_population = []
    n = 50
    for j in range(3):
        random_vec = [
            GameAction(np.random.choice([0, 1, 2])) for _ in range(n)
        ]
        for i in range(n):
            best_res = 0
            best_choice = []
            for new_action in [0, 1, 2]:
                random_vec[i] = GameAction(new_action)
                tuple_vec = tuple(random_vec)
                res = get_fitness(tuple_vec)
                if res > best_res:
                    best_choice = [new_action]
                    best_res = res
                elif res == best_res:
                    best_choice.append(new_action)

            random_vec[i] = GameAction(np.random.choice(best_choice))
            new_population.append(random_vec)

    num_generations = 24

    parent1_f = -1
    parent2_f = -1
    parent1 = []
    parent2 = []
    for generation in range(num_generations):
        for moves in new_population:
            fitness = get_fitness(tuple(moves))
            if fitness > parent1_f:
                parent1_f = fitness
                parent1 = moves
            elif fitness > parent2_f:
                parent2_f = fitness
                parent2 = moves
        temp_res = crossover(parent1, parent2, 50)
        child1 = temp_res[0]
        child2 = temp_res[1]
        new_population = [parent1, parent2, child1, child2]
    max_f = -1
    max_vec = []
    for sample in new_population:
        fitness = get_fitness(tuple(sample))
        if fitness > max_f:
            max_f = fitness
            max_vec = sample

#   print(max_f)
    print(max_vec)