def scoreGame(view: GameViewSource, showDisplay: bool=False, showFullDisplay: bool=False, slowDown: int=1): """The main game function.""" prevFrame = None if showFullDisplay: cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty("window", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) servingSide = getSideSignal(view, [0, 0], showFullDisplay) if servingSide is None: print('Quiting...') exit(-1) ball = Ball(view.netX, servingSide=servingSide) game = GameState(view) game.begin(view.netX, servingSide) CURRENT_DISPLAY = getDisplay(game.score, servingSide) print('Got serving side') gameMonitor = GameMonitor(game) while True: frame = view.read() # End of video if frame is None: print("Stream ended.") break if ball.updatePosFromFrame(prevFrame, frame, showProcessedFrame=False, showMaskFrame=False): ball.updateProcessedData(output=False) game.updateState(ball, output=False) gameMonitor.printNewEvents() if showDisplay: key = cv2.waitKey(int(1000 / view.fps * (slowDown if slowDown != -1 else 1.0))) if key == ord('q'): print('Quiting.') exit(0) if slowDown == -1: input('>') if showFullDisplay: newDisplay = gameMonitor.getGameDisplay() if newDisplay is not None: CURRENT_DISPLAY = newDisplay cv2.imshow('window', CURRENT_DISPLAY) if cv2.waitKey(1) == ord('q'): break prevFrame = frame
def setup(self): # State self._state = GameState() # Setup Functions self.setup_audio() self.setup_textures() self.setup_sprites() self.setup_animations() self.setup_meshes() # Setup Hud GameHud.get_singleton().setup() # Initial State self.set_state(GameState_Game()) self.is_setup = True
def value_iteration(self): """ Main work horse for the VI algorithm. after initialization of everything so that all states are known and stored in V and Q start updating V and Q until the largest difference in V and V_last is less than epsilon. """ epsilon = .1 states = self.game.get_valid_states() for state in states: if self.game.is_goal(state): self.v[state.value()] = 1 else: self.v[state.value()] = 0 v_last = copy.deepcopy(self.v) self.q = self.init_q(self.v, self.possible_actions) policy = self.init_policy(self.v) is_first_round = True update_cycle = 0 while not self.v_s_comp_v_s_last_less_than_epsilon( v_last, self.v, epsilon, is_first_round): is_first_round = False v_last = copy.deepcopy(self.v) # count = 0 for tuple_state in self.v: if self.game.is_goal( GameState((tuple_state[0], tuple_state[1]), (tuple_state[2], tuple_state[3]))): policy[tuple_state] = None # reward value self.v[tuple_state] = 1 else: for action in self.possible_actions: self.update_q_iteration(self.game, tuple_state, action, self.q, self.gamma, v_last) policy[tuple_state] = self.arg_max_iteration_version( tuple_state, self.q) self.v[tuple_state] = self.q[tuple_state][ policy[tuple_state]] # print(count) # count += 1 print(update_cycle) update_cycle += 1 return policy, update_cycle