Пример #1
0
# window_size = (0,0,1920,1017)
# station_size = (230, 230, 1670, 930)
# WIDTH = 400
# HEIGHT = 200

# hp_station = cv2.cvtColor(cv2.resize(grab_screen(station_size),(WIDTH,HEIGHT)),cv2.COLOR_BGR2GRAY)
# # boss_blood = boss_hp(hp_station, 570)
# # last_hp = boss_blood
# # next_self_blood  = player_hp(hp_station)

# min_hp = 9

# check_point = (612, 187)
# # start_time = time.time()

h = Hp_getter()
last_hy = 0
while True:
    # take_action(6)
    px, py = h.get_play_location()
    hx, hy = h.get_hornet_location()
    if last_hy > 32 and last_hy < 32.5 and hy > 32 and last_hy < 32.5:
        print("skill")
    last_hy = hy
    time.sleep(0.25)
    # print(direction_reward(0, px, hx), "   ",distance_reward(0, px, hx), " ", px - hx)
    # print(h.get_play_location(), "   ",h.get_hornet_location())
    # hp_station = cv2.cvtColor(cv2.resize(grab_screen(station_size),(WIDTH,HEIGHT)),cv2.COLOR_RGBA2RGB)
    # fn = "./test_img/" + str(i) + ".png"
    # cv2.imwrite(fn, hp_station)
    # time.sleep(0.02)
Пример #2
0
    total_remind_hp = 0

    act_rmp_correct = ReplayMemory(
        MEMORY_SIZE, file_name='./act_correct_memory')  # experience pool
    act_rmp_wrong = ReplayMemory(
        MEMORY_SIZE, file_name='./act_wrong_memory')  # experience pool
    move_rmp_correct = ReplayMemory(
        MEMORY_SIZE, file_name='./move_correct_memory')  # experience pool
    move_rmp_wrong = ReplayMemory(
        MEMORY_SIZE, file_name='./move_wrong_memory')  # experience pool

    # new model, if exit save file, load it
    model = Model(INPUT_SHAPE, ACTION_DIM)

    # Hp counter
    hp = Hp_getter()

    model.load_model()
    algorithm = DQN(model, gamma=GAMMA, learnging_rate=LEARNING_RATE)
    agent = Agent(ACTION_DIM, algorithm, e_greed=0, e_greed_decrement=1e-6)

    # get user input, no need anymore
    # user = User()

    # paused at the begining
    paused = True
    paused = Tool.Helper.pause_game(paused)

    max_episode = 30000
    # 开始训练
    episode = 0