def main(): config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Train model loaded_model = load() last_time = time.time() for i in list(range(4))[::-1]: print(i + 1) time.sleep(1) paused = False while True: if not paused: # 800x600 windowed mode # screen = np.array(ImageGrab.grab(bbox=(0,40,800,640))) screen = grab_screen(region=(0, 40, 800, 640)) print('loop took {} seconds'.format(time.time() - last_time)) last_time = time.time() screen = cv2.resize(screen, (299, 299)) prediction = loaded_model.predict([screen.reshape(1, 299, 299, 3)]) print(prediction) turn_thresh = .75 fwd_thresh = 0.70 if prediction[0][1] > fwd_thresh: straight() elif prediction[0][0] > turn_thresh: left() elif prediction[0][2] > turn_thresh: right() elif prediction[0][3] > turn_thresh: stop() else: straight() keys = key_check() # p pauses game and can get annoying. if 'T' in keys: if paused: paused = False time.sleep(1) else: paused = True ReleaseKey(A) ReleaseKey(W) ReleaseKey(D) ReleaseKey(S) time.sleep(1)
pyautogui.click() # Create some setup variables sct = mss.mss() goingRight = True count = 0 kernel = np.ones((3,3), np.uint8) # Used to hold images until game is over then we can # write them to disk. image_holder = [] # Start game loop # holding h will break loop while True: keys = key_check() if keys == "H": break count += 1 # Define our area and grab screen. scr = sct.grab({ 'left': 0, 'top': 390, 'width': 440, 'height': 50 }) # Turn screen grab into numpy array. img = np.array(scr)
def main(): last_time = time.time() for i in list(range(4))[::-1]: print(i + 1) time.sleep(1) paused = False mode_choice = 0 screen = grabscreen(region=(0, 40, GAME_WIDTH, GAME_HEIGHT)) screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB) t_minus = screen t_now = screen t_plus = screen while (True): if not paused: screen = grabscreen(region=(0, 40, GAME_WIDTH, GAME_HEIGHT + 40)) screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB) print(screen.shape) last_time = time.time() delta_count = motion_detection(t_minus, t_now, t_plus) t_minus = t_now t_now = t_plus t_plus = screen t_plus = cv2.blur(t_plus, (4, 4)) prediction = model.predict(screen.reshape(1, HEIGHT, WIDTH, 3))[0] prediction = np.array(prediction)# * np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2]) second = np.argpartition(a, -2)[1] mode_choice = np.argmax(prediction) print('Choice: {}'.format(mode_choice)) print('Second: {}'.format(second)) print(prediction) if mode_choice == 0: straight() choice_picked = 'straight' elif mode_choice == 1: reverse() choice_picked = 'reverse' elif mode_choice == 2: left() choice_picked = 'left' elif mode_choice == 3: right() choice_picked = 'right' elif mode_choice == 4: forward_left() choice_picked = 'forward+left' elif mode_choice == 5: forward_right() choice_picked = 'forward+right' elif mode_choice == 6: reverse_left() choice_picked = 'reverse+left' elif mode_choice == 7: reverse_right() choice_picked = 'reverse+right' elif mode_choice == 8: no_keys() choice_picked = 'nokeys' motion_log.append(delta_count) motion_avg = round(mean(motion_log), 3) print('loop took {} seconds. Motion: {}. Choice: {}'.format(round(time.time() - last_time, 3), motion_avg, choice_picked)) if motion_avg < motion_req and len(motion_log) >= log_len: print('WERE PROBABLY STUCK FFS, initiating some evasive maneuvers.') # 0 = reverse straight, turn left out # 1 = reverse straight, turn right out # 2 = reverse left, turn right out # 3 = reverse right, turn left out quick_choice = random.randrange(0, 4) if quick_choice == 0: reverse() time.sleep(random.uniform(1, 2)) forward_left() time.sleep(random.uniform(1, 2)) elif quick_choice == 1: reverse() time.sleep(random.uniform(1, 2)) forward_right() time.sleep(random.uniform(1, 2)) elif quick_choice == 2: reverse_left() time.sleep(random.uniform(1, 2)) forward_right() time.sleep(random.uniform(1, 2)) elif quick_choice == 3: reverse_right() time.sleep(random.uniform(1, 2)) forward_left() time.sleep(random.uniform(1, 2)) for i in range(log_len - 2): del motion_log[0] keys = key_check() # p pauses game and can get annoying. if 'T' in keys: if paused: paused = False time.sleep(1) else: paused = True ReleaseKey(A) ReleaseKey(W) ReleaseKey(D) time.sleep(1)
def main(self): print("Running on: {}x{}".format(self.res[0], self.res[1])) print("Using {} key".format(self.key)) print("Autosend: {}".format(self.autosend)) print("Zoom: {}".format(self.zoom)) ################################################################################################### # L=152,98+I*8,36 -> para achar largura em função do número da imagem # C=593,81+I*31,62 -> para achar comprimento em função do número da imagem # zoom_dict = { # '-5': (113,447), ok # '-4': (121,474), ok # '-3': (121,479), not ok # '-2': (134,509), not ok # '-1': (148,571), ok # '0': (156,600), ok # '1': (161,629), # '2': (169,659), # '3': (177,687), # '4': (187,721), # '5': (195, 752) # Linear aproximation (I couldn't measure it) # } file_name = 'Data\\training_data.npy' if os.path.isfile(file_name): print("Training file exists, loading previos data!") training_data = list(np.load(file_name)) else: print("Training file does not exist, starting fresh!") training_data = [] frame_file = 'Data\\frames.npy' if os.path.isfile(frame_file): print("Frames file exists, loading previos data!") frames = list(np.load(frame_file)) else: print("Frames file does not exist, starting fresh!") frames = [] ################################################################################################### fishing_region_file = 'media\\Images\\fr {}.png'.format(self.zoom) if os.path.exists(fishing_region_file): region_template = cv2.imread('media\\Images\\fr {}.png'.format(self.zoom)) print(fishing_region_file) else: quit() region_template_gray = cv2.cvtColor(region_template, cv2.COLOR_BGR2GRAY) # region_template_gray = cv2.resize(region_template_gray, zoom_dict[str(self.zoom)]) wr, hr = region_template_gray.shape[::-1] # 121, 474 print("w: 121 h: 474".format(wr, hr), end='\t') # resized = zoom_dict[str(self.zoom)] # print(resized) was_fishing = False counter = 1 while self.run: res_x, res_y = self.res screen = np.array(ImageGrab.grab(bbox=(0, 40, res_x, res_y+40 ))) fishing, green_bar_window, floor_height = fishing_region(screen, region_template_gray, wr, hr) if fishing: if counter == 1: initial_time = datetime.datetime.now() counter = 2 contour, green_bar_height, lowest_point = process_img(screen, green_bar_window) # process every frame (would be nice if it could process every 5 or so frames, so the process becomes faster). fish_detected, fish_height, searching_nemo = fish(green_bar_window) d_rect_fish = fish_height - green_bar_height # if result is + : fish is below the green bar, if result is - : fish is above the green bar d_rect_floor = floor_height - lowest_point # always + key_pressed = key_check(self.key) data = [d_rect_fish, d_rect_floor, key_pressed] # example key pressed: [231, 456, 1] training_data.append(data) print(data) was_fishing = True if not fishing and was_fishing: if len(frames) == 0: # print('list of frames is new') frames.append(len(training_data)) print("Frames analysed:\t", len(training_data)) np.save(frame_file, frames) print("Saving...") np.save(file_name, training_data) was_fishing = False self.score.emit(sum(frames)) if self.autosend: with open("config.json", 'r') as f: output = json.loads(f.read()) BASE_URL = 'http://192.168.1.102' response_code = SendFiles.send_data(BASE_URL, output['User'], output['Password']) self.data_response_code.emit(response_code) else: frame = len(training_data) - sum(frames) frames.append(frame) print("Frames analysed:\t", frames[-1]) np.save(frame_file, frames) print("Saving...") np.save(file_name, training_data) was_fishing = False self.score.emit(sum(frames)) if self.autosend: with open("config.json", 'r') as f: output = json.loads(f.read()) BASE_URL = 'http://192.168.1.102' response_code = SendFiles.send_data(BASE_URL, output['User'], output['Password']) self.data_response_code.emit(response_code) final_time = datetime.datetime.now() time_delta = final_time - initial_time print(time_delta.total_seconds()) frame_yield = 100 * frames[-1] / (time_delta.total_seconds() * 30) print("Rendimento: {}%".format(frame_yield))
def main(self): # Unique file name file_name = 'Data\\Training Data\\%s.npy' % uuid4() logger.info("Training data file created") training_data = np.empty(shape=[0, 2]) region_template, wr, hr = self.load_template( zoom_level=-4 ) # Default value is -4, but it cycles trough. could be from -5 to +5 # Variáveis de controle: # was_fishing: sinaliza se o frame anterior foi ou não um frame da sessão de pescaria. Caso o mini-game seja # detectado, isso é setado para True no final do loop. Assim, no frame seguinte, caso não tenha sido detectado a # região e was_fishing for True, isso signifca que a pescaria acabou, e deve ser feito o processo de finalização # da captura de dados. # coords: coordenadas da região reduzida encontrada do mini-game. was_fishing = False coords = None logger.info("Data started") while self.run: # res_x, res_y = self.res screen = grabscreen.grab_screen() # Return BGR screen if screen is not None: # Finds the thin area the fish stays at if coords is not None: region = fishing_region( screen[coords[0]:coords[1], coords[2]:coords[3]], region_template, wr, hr) else: region = fishing_region(screen, region_template, wr, hr) zoom_dict = self.find_zoom(screen) if zoom_dict['Found']: # In subsequent fishing sessions, it will start by trying this zoom level logger.info(f"Zoom used: {zoom_dict['Zoom']}") region_template, wr, hr = self.load_template( zoom_dict['Zoom']) if region["Detected"]: # Se a área for detectada, salvar na np.array o frame e a o key-press do jogador. window = region["Region"] key_pressed = key_check(self.key) # return 1 or 0 data = [ window, key_pressed ] # or data = np.array([key_pressed, window], dtype=object) training_data = np.vstack((training_data, data)) # Contants for the next loop was_fishing = True bgr_screen_last = region["BGR Region"] # For the first frame of the detected region, get its coordinates to reduce the area to look for it again if coords is None: print("Found") bgr_screen_first = region["BGR Region"] coords = region["Coords"] logger.info("Coordinates found: %s" % coords) initial_time = datetime.datetime.now() # If area not detected this frame, but was on the last one, this means fishing is over. if not region["Detected"] and was_fishing: logger.info("Fishing finished") final_time = datetime.datetime.now() new_frames = np.float64(len(training_data)) print("Frames analysed: %s" % new_frames) # Apenas salva caso houver mais de 75 frames if new_frames >= 75: validated = self.validate(bgr_screen_first, bgr_screen_last) verified = verify_too_similar_frames(training_data) if validated and verified: np.save(file_name, training_data) # Sinaliza ao main_thread que deve enviar os dados coletados self.send_data.emit() print("Session saved!") # Necessary to reset the region coordinates after every fishing session. training_data = np.empty(shape=[0, 2]) file_name = 'Data\\Training Data\\%s.npy' % uuid4() coords = None was_fishing = False # Measurements (debug): time_delta = (final_time - initial_time).total_seconds() median_fps = round(new_frames / time_delta, 2) print(f"FPS: {median_fps}\n") method = 'np.vstack' w_img, h_img = window.shape[::-1] with open("Data\\log.txt", 'a') as f: f.write( f"Method: {method}\nMedian FPS: {median_fps}\ndTime: {time_delta}s\n" f"Frames: {new_frames}\nSize: ({w_img}, {h_img})\n\n" ) # Caso o usuário clique em "Stop" na GUI self.finished.emit()