def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifing the Player's movement, chosen from self.directions """ # TODO: erase the following line and implement this function. depth = 2 # this is changing depand on expriment player1_turn = True try: move_direction = self.strategy.search( utils.State(np.copy(self.board), player1_turn, players_score, self.num_of_turns, self.fruits_on_board.copy(), self.pos_players), depth, True)[1] except RuntimeError: # if we dont have time even to one step minimax move_direction = utils.State.firstLegal(self.board, self.pos_players[0]) if move_direction is None: move_direction = utils.State.firstLegal(self.board, self.pos_players[0]) # preform move my_player_pos = self.pos_players[0] my_player_new_pos = (my_player_pos[0] + move_direction[0], my_player_pos[1] + move_direction[1]) self.perform_move_on_selfPlayer(True, my_player_new_pos) return move_direction
def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifing the Player's movement, chosen from self.directions """ # TODO: erase the following line and implement this function. player1_turn = True curr_turn_time_budget = self.get_turn_time() if curr_turn_time_budget is None: # we have only single move, we can save some time move_direction = self.next_move else: move_direction = self.strategy.interruptible( utils.State(np.copy(self.board), player1_turn, players_score, self.num_of_turns, self.fruits_on_board.copy(), self.pos_players), curr_turn_time_budget)[1] if move_direction is None: move_direction = utils.State.firstLegal(self.board, self.pos_players[0]) # preform move my_player_pos = self.pos_players[0] my_player_new_pos = (my_player_pos[0] + move_direction[0], my_player_pos[1] + move_direction[1]) self.perform_move_on_selfPlayer(True, my_player_new_pos) return move_direction
def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifing the Player's movement, chosen from self.directions """ # TODO: player1_turn = True try: move_direction = self.strategy.interruptible( utils.State(np.copy(self.board), player1_turn, players_score, self.num_of_turns, self.fruits_on_board.copy(), self.pos_players), time_limit)[1] except: # if we dont have time even to one step minimax move_direction = utils.State.firstLegal(self.board, self.pos_players[0]) if move_direction is None: move_direction = utils.State.firstLegal(self.board, self.pos_players[0]) # preform move my_player_pos = self.pos_players[0] my_player_new_pos = (my_player_pos[0] + move_direction[0], my_player_pos[1] + move_direction[1]) self.perform_move_on_selfPlayer(True, my_player_new_pos) return move_direction
def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifying the Player's movement, chosen from self.directions """ finish_time = time.time() + self.turn_time depth = 1 best_move = (-np.inf, (-1, 0)) while True: for direction in self.directions: initial_state = utils.State(self.board, direction, self.pos, self.current_turn, self.fruits_on_board_dict, finish_time) try: outcome = self.minimax_algo.search(initial_state, depth, True) if outcome[0] > best_move[0]: best_move = outcome except TimeoutError: self.board[self.pos[0]][self.pos[1]] = -1 self.pos = (self.pos[0] + best_move[1][0], self.pos[1] + best_move[1][1]) self.board[self.pos[0]][self.pos[1]] = 1 return best_move[1] depth += 1
def add_succesor_to_list(self, changed_son_pos, direction, i, is_father_max_player, j, my_son_player_pos, opponent_son_pos, state, successors): new_player_score = state.current_player_score new_opponent_player_score = state.opponent_player_score if changed_son_pos in state.fruits_on_board_dictionary: if not is_father_max_player: # Our move new_player_score += state.fruits_on_board_dictionary[ changed_son_pos] else: new_opponent_player_score += state.fruits_on_board_dictionary[ changed_son_pos] new_board, new_graph, fruits_on_board_real_dict = self.update_graph_and_board( changed_son_pos, i, is_father_max_player, j, state) new_son_state = utils.State(new_board, new_graph, direction, my_son_player_pos, opponent_son_pos, state.turn + 1, fruits_on_board_real_dict, state.finish_time, state.pos, new_player_score, new_opponent_player_score) # if self.is_goal(new_son_state) and : successors.append(new_son_state)
def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifing the Player's movement, chosen from self.directions """ # print("start computing Global AB move") # printing for self test start_time = time.time() allowed_time = min(self.time_for_curr_iter, time_limit) if self.first_player == -1: self.first_player = True state = utils.State(copy.deepcopy(self.board), self.pos, self.rival_pos, players_score, self.penalty_score, self.turns_till_fruit_gone, self.fruits_on_board_dict, self.first_player, time.time() + allowed_time - self.extra_safe_time) search_algo = SearchAlgos.AlphaBeta(utils.utility, utils.succ, utils.perform_move, utils.goal) depth = 1 best_move = None, None while depth <= self.max_turns: try: best_move = search_algo.search(state, depth, True) except TimeoutError: break depth += 1 if best_move[1] is None: # print("something went wrong,.. im out... probably not enough time for at least depth=1") # printing for self test exit(0) # print("depth is : ", depth - 1) # printing for self test if self.time_for_each_iter is not None: # print("my turn is: ", self.my_turn) # printing for self test self.time_for_curr_iter += self.time_for_each_iter[ self.my_turn] - (time.time() - start_time) self.my_turn -= 1 else: self.time_for_curr_iter += (self.time_for_curr_iter / self.risk_factor1) - (time.time() - start_time) # print("current iter took: ", time.time()-start_time) # printing for self test # print("next iter will take: ", self.time_for_curr_iter) # print("Global AB choose the move: ", best_move) self.max_turns -= 1 self.board[self.pos] = -1 tmp1 = best_move[1] self.pos = (self.pos[0] + tmp1[0], self.pos[1] + tmp1[1]) self.board[self.pos] = 1 self.turns_till_fruit_gone -= 1 return best_move[1]
def __init__(self, num_grid_points, dt, domain_size, c, f, df_dx): super().__init__(0, dt) self.domain_size = domain_size self.c = c self.num_grid_points = num_grid_points self.f = f self.df_dx = df_dx self.axes = np.tile( np.linspace(0, domain_size, self.num_grid_points + 1)[:-1], (2, 1)) self.state = utils.State(num_vars=2, dim_vars=self.num_grid_points, axes=self.axes)
def __init__(self, num_grid_points, dt, domain_size, c, f): super().__init__(0, dt) self.num_grid_points = num_grid_points self.domain_size = domain_size self.c = c self.f = f self.axes = np.tile( np.linspace(0, domain_size, self.num_grid_points + 1)[:-1], (2, 1)) self.state = utils.State(num_vars=2, dim_vars=self.num_grid_points, axes=self.axes, names=[("x", "u"), ("x", "v")])
def step(self, t, current_state, action): try: if (current_state.period != t - 1) or (action.period != t) or (t > T + 1): raise Exception if t == T + 1: next_state = utils.State(T + 1, np.zeros(N)) return self.rf.rewardfunction(t, self.parameterDP, current_state, action, next_state) inventory = current_state.inventory + action.order - self.demand.demand[ t - 1] inventory = utils.maxElementwise(inventory, np.zeros(len(inventory))) next_state = utils.State(t, inventory) reward = self.rf.rewardfunction(t, self.parameterDP, current_state, action, next_state) done = True if t == T + 1 else False return next_state, reward, done except: utils.printErrorAndExit('step')
def statespace(t, parameterDP): '''the set of states''' inventory = [] limit = parameterDP.MaxInventory partial = np.zeros(parameterDP.N) utils.enumerate(inventory, limit, partial, 0) result = [] for elem in inventory: result.append(utils.State(t, elem)) return result
def __init__(self, num_grid_points, dt, domain_size): super().__init__(0, dt) self.num_grid_points = num_grid_points self.domain_size = domain_size self.axes = np.tile( np.linspace(0, domain_size, self.num_grid_points + 1)[:-1], (2, 1)) self.axes[0] += 0.5 * domain_size / num_grid_points self.state = utils.State(num_vars=2, dim_vars=self.num_grid_points, axes=self.axes) self.g = const.g
def DPAlgorithmCore(parameterDP, pdf, rf, demand): # the optimal state value function value_pre = {} value_suc = {} # initialization print('initialization') for state in statespace(parameterDP.T, parameterDP): value_pre[state.hash()] = rf.rewardfunction(parameterDP.T + 1, parameterDP, \ state, utils.Action(parameterDP.T + 1, np.zeros(parameterDP.N)), \ utils.State(parameterDP.T + 1, np.zeros(parameterDP.N))) for t in range(parameterDP.T, 0, -1): print('t = {}'.format(t)) for current_state in statespace(t - 1, parameterDP): value_suc[current_state.hash()] = 0 for action in setofaction(t, parameterDP, current_state): value_suc[current_state.hash()] = max(value_suc[current_state.hash()], \ actionvalue(t, parameterDP, pdf, rf, current_state, action, value_pre, demand)) value_pre = value_suc value_suc = {} return value_pre[utils.State(0, np.zeros(parameterDP.N)).hash()]
def __init__(self, num_grid_points, dt, domain_size, c, coefficients): """ :param coefficients: list of tuples (k,b_k), where k is the number of minimums/maximums of the corresponding sine-wave, and b_k is the coefficient of that sine-wave. :param num_grid_points: Number of samples in space-dimension. :param dt: Size of a time-step. :param c: Wave speed from wave equation. :param domain_size: is currently ignored and assumed to be 1. """ super().__init__(0, dt) self._coefficients = coefficients self._c = c self._num_grid_points = num_grid_points self._axes = np.tile(np.linspace(0, 1, self._num_grid_points + 1)[:-1], (2, 1)) self._state = utils.State(num_vars=2, dim_vars=self._num_grid_points, axes=self._axes)
def __init__(self, num_grid_points, dt, domain_size, num_vars, ref_solution_generator: ReferenceSolutionCalculator, initial_cond, ref_dt=None, ref_t=10): super().__init__(0, dt) self.num_grid_points = num_grid_points self.num_vars = num_vars self.axes = np.tile(np.linspace(0, domain_size, self.num_grid_points + 1)[:-1], (num_vars, 1)) self.state = utils.State(num_vars=num_vars, dim_vars=self.num_grid_points, axes=self.axes) if ref_dt is None: ref_dt = dt / 1000 file_name = ref_solution_generator.generate(ref_dt, ref_t, initial_cond) self.sol_data = np.load(path + file_name + ".npy") self.sol_axes = np.load(path + file_name + "_axes.npy") self.sol_time = np.load(path + file_name + "_time_vector.npy")
def __init__(self, num_grid_points, dt, domain_size, c, f): super().__init__(0, dt) self.num_grid_points = num_grid_points self.domain_size = domain_size self.c = c self.f = f self.axes = np.tile( np.linspace(0, domain_size, self.num_grid_points + 1)[:-1], (2, 1)) delta_x = 1.0 / num_grid_points self.axes[1] += delta_x * 0.5 self.state = utils.State(num_vars=2, dim_vars=self.num_grid_points, axes=self.axes)
def solution(self, t, new_object=False): # get object to return if new_object: state = utils.State(num_vars=2, dim_vars=self.num_grid_points, axes=self.axes) state_vars = state.get_state_vars() else: state = self.state state_vars = state.get_state_vars() state_vars.fill(0.0) state_vars[0] = np.log(exp_curve(self.axes[0], self.g)) state_vars[1] *= 0 return state
def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifying the Player's movement, chosen from self.directions """ finish_time = time.time() + self.turn_time # if self.pos == (0, 6) or self.pos == (6, 3): # finish_time += 500 if self.pos == (0, 4): finish_time = time.time() + 500 depth = 1 best_move = (-np.inf, (-1, 0)) initial_state = utils.State(self.board, self.graph, (0, 0), self.pos, self.opponent_pos, self.current_turn, self.fruits_on_board_dict, finish_time, None, self.current_player_score, self.opponent_player_score) while True: try: if depth > 40: finish_time += 500 best_move = self.minimax_algo.search(initial_state, depth, True) if best_move[1] == (0, 0): initial_state.board[self.pos[0]][self.pos[1]] = -1 poses = [(utils.tup_add(direction, self.pos), direction) for direction in self.directions] valid_poses = list( filter( lambda tup: self.is_move_legal( initial_state, tup[0][0], tup[0][1]), poses)) if len(valid_poses) == 0: raise ValueError("No valid moves") return valid_poses[0][1] elif (best_move[0] in [-1, 1]): self.finish_turn(best_move, depth) return best_move[1] except TimeoutError: # TODO: Add reference here for score. self.finish_turn(best_move, depth) return best_move[1] depth += 1
def setofstate(t, pdf, current_state, action, demand): '''given the current state current_state and the action, return the set of next states''' try: if pdf.name == 'general': inventory = current_state.inventory + action.order - demand.demand[ t - 1] inventory = utils.maxElementwise(inventory, np.zeros(len(inventory))) result = [] result.append(utils.State(t, inventory)) return result else: raise Exception except: utils.printErrorAndExit('setofstate')
def test(env, nets, episodes, epsilon): profits = [] for episode in range(1, episodes + 1): profit = 0 current_state = utils.State(0, np.zeros(N)) for t in range(1, T + 2): if t == T + 1: action = utils.Action(T + 1, np.zeros(N)) profit += env.step(t, current_state, action) else: action = act(env, nets, t, current_state, epsilon) next_state, reward, done = env.step(t, current_state, action) current_state = next_state profit += reward profits.append(profit) return np.mean(profits)
def solution(self, t, new_object=False): # get object to return if new_object: state = utils.State(num_vars=self.num_vars, dim_vars=self.num_grid_points, axes=self.axes) state_vars = state.get_state_vars() else: state = self.state state_vars = state.get_state_vars() state_vars.fill(0.0) high_definition_sol = interpolate(t, self.sol_time, self.sol_data) # low_def_sol = np.zeros((self.num_vars, self.num_grid_points)) for i in range(self.num_vars): state_vars[i] = np.interp(self.axes[i], self.sol_axes[i], high_definition_sol[i]) # np.copyto(state_vars, low_def_sol) return state
def solution(self, t, new_object=False): # get object to return if new_object: state = utils.State(num_vars=2, dim_vars=self._num_grid_points, axes=self._axes) state_vars = state.get_state_vars() else: state = self._state state_vars = state.get_state_vars() state_vars.fill(0.0) # put solution for time t into the object for k, b_k in self._coefficients: # u(x,t) of a standing wave state_vars[0] += b_k * math.cos(np.pi * k * self._c * t) * np.sin(np.pi * k * self._axes[0]) # v = d u(x,t)/dt of the standing wave state_vars[1] += - b_k * np.pi * k * self._c * math.sin(np.pi * k * self._c * t) * np.sin( np.pi * k * self._axes[1]) return state
def solution(self, t, new_object=False): # according to D'Alembert # get object to return if new_object: state = utils.State(num_vars=2, dim_vars=self.num_grid_points, axes=self.axes) state_vars = state.get_state_vars() else: state = self.state state_vars = state.get_state_vars() state_vars.fill(0.0) travelled_dist = t * self.c travelled_dist = travelled_dist % self.domain_size # get rid of wraparound # initialize with wave travelling right cond = travelled_dist <= self.axes[0] state_vars[0][cond] = self.f(self.axes[0][cond] - travelled_dist) state_vars[1][cond] = -self.f(self.axes[1][cond] - travelled_dist) # part of wave travelling right that has wrapped around cond = np.invert(cond) state_vars[0][cond] = self.f(self.axes[0][cond] + self.domain_size - travelled_dist) state_vars[1][cond] = -self.f(self.axes[1][cond] + self.domain_size - travelled_dist) # add wave travelling left cond = self.axes[0] <= self.domain_size - travelled_dist state_vars[0][cond] += self.f(self.axes[0][cond] + travelled_dist) state_vars[1][cond] += self.f(self.axes[1][cond] + travelled_dist) # part of wave travelling left that has wrapped around cond = np.invert(cond) state_vars[0][cond] += self.f(self.axes[0][cond] - self.domain_size + travelled_dist) state_vars[1][cond] += self.f(self.axes[1][cond] - self.domain_size + travelled_dist) state_vars[1] *= self.c return state
def succ(self, state, max_player): # Expecting board, returns list of boards. lst = [] state.board[state.pos[0]][state.pos[1]] = -1 for d in self.directions: new_pos = (state.pos[0] + d[0], state.pos[1] + d[1]) i = new_pos[0] j = new_pos[1] if 0 <= i < len(self.board) and 0 <= j < len(self.board[0]) and ( self.board[i][j] not in [-1, 1, 2]): # then move is legal new_board = np.copy(state.board) new_board[i][j] = 1 if max_player else 2 if state.turn + 1 == self.max_fruit_turn: for pos in self.fruits_on_board_dict.keys(): if new_board[pos[0]][pos[1]] not in [-1, 1, 2]: new_board[pos[0]][pos[1]] = 0 lst.append( utils.State(new_board, d, new_pos, state.turn + 1, self.fruits_on_board_dict, state.finish_time)) return lst
def solution(self, t, new_object=False): # get object to return if new_object: state = utils.State(num_vars=1, dim_vars=1, axes=self.axes) state_vars = state.get_state_vars() else: state = self.state state_vars = state.get_state_vars() state_vars.fill(0.0) cases = { 0: lambda t: self.start_value * math.exp(0.5 * (t - math.sin(t - self.t0) * math.cos(t - self.t0))), 1: lambda t: self.start_value * math.exp(-3 * (t - self.t0)), 2: lambda t: self.start_value + 0.5 * math.log(t * t + 1), 3: lambda t: self.start_value * math.e * math.exp(-math.cos(t - self.t0)) } state_vars[0] = cases[self.case_num](t) return state
def make_move(self, time_limit, players_score): """Make move with this Player. input: - time_limit: float, time limit for a single turn. output: - direction: tuple, specifing the Player's movement, chosen from self.directions """ # print("start computing minimax move") # printing for self test if self.first_player == -1: self.first_player = True state = utils.State(copy.deepcopy(self.board), self.pos, self.rival_pos, players_score, self.penalty_score, self.turns_till_fruit_gone, self.fruits_on_board_dict, self.first_player, time.time() + time_limit - .015) search_algo = SearchAlgos.MiniMax(utils.utility, utils.succ, utils.perform_move, utils.goal) depth = 1 best_move = None, None while depth <= self.max_turns: try: best_move = search_algo.search(state, depth, True) except TimeoutError: break depth += 1 if best_move[1] is None: # print("something went wrong,.. im out") # printing for self test exit(0) # print("depth is : ", depth-1) # printing for self test # print("minmax choose the move: ", best_move) # printing for self test self.board[self.pos] = -1 tmp1 = best_move[1] self.pos = (self.pos[0] + tmp1[0], self.pos[1] + tmp1[1]) self.board[self.pos] = 1 self.turns_till_fruit_gone -= 1 return best_move[1]
def generate(self, dt, end_time, initial_cond): state = utils.State(self.num_vars, self.num_grid_points, self.axes) integrator = integrators.RungeKutta.Explicit(state, self.time_derivative, 0, dt) file_name = "{}_#{}_dt{}_t{}_w{}".format(str(self.time_derivative), self.num_grid_points, int(1 / dt), end_time, self.domain_length) import os if not os.path.isfile(path + file_name + ".npy"): init_data = state.get_state_vars() num_iterations = int(end_time / dt) # first index: go through time, second index: variables, third index: go through space reference_solution = np.zeros((int(num_iterations / self.down_sampling_rate) + 1, self.num_vars, self.num_grid_points)) times = np.zeros(((int(num_iterations / self.down_sampling_rate) + 1))) for i in range(self.num_vars): reference_solution[0, i] = init_data[i] = initial_cond[i](self.axes[i]) timer = error_tracking_tools.TimeIterator(0, dt) for i, (time, state) in enumerate(zip(timer, integrator), 1): if i % self.down_sampling_rate == 0: print(i / num_iterations) times[int(i / self.down_sampling_rate)] = time reference_solution[int(i / self.down_sampling_rate)] = state.get_state_vars() if i == num_iterations: break np.save(path + file_name, reference_solution) np.save(path + file_name + "_axes", self.axes) np.save(path + file_name + "_time_vector", times) return file_name
def __init__(self, num_grid_points, dt, domain_size, t0, start_value, case_num): super().__init__(t0, dt) self.start_value = start_value self.axes = np.zeros((1, 1)) self.state = utils.State(num_vars=1, dim_vars=1, axes=self.axes) self.case_num = case_num
def set_game_params(self, board): """Set the game parameters needed for this player. This function is called before the game starts. (See GameWrapper.py for more info where it is called) input: - board: np.array, a 2D matrix of the board. No output is expected. """ self.board = board # need to set my pos, the rival pos, all the grey area and all fruits self.max_turns = len(board) * len(board[0]) - 2 self.turns_till_fruit_gone = min(len(board), len(board[0])) * 2 for r, row in enumerate(board): for c, num in enumerate(row): if num == -1: self.max_turns -= 1 if num == 1: self.pos = (r, c) # this my pos elif num == 2: self.rival_pos = (r, c) elif num > 2: self.fruits_on_board_dict[( r, c )] = num # need to do this manually only for this player self.time_for_curr_iter = (-2 / 3) * self.game_time / (( (1 / 3)**self.max_turns) - 1) min_iter_time = time.time() state = utils.State(copy.deepcopy(self.board), self.pos, self.rival_pos, [0, 0], self.penalty_score, self.turns_till_fruit_gone, self.fruits_on_board_dict, True) search_algo = SearchAlgos.AlphaBeta(utils.utility, utils.succ, utils.perform_move, utils.goal) search_algo.search(state, 2, True) min_iter_time = (time.time() - min_iter_time) * 1.25 if min_iter_time == 0: min_iter_time = 0.0022 self.my_turn = int((1 + self.max_turns) / 2) tmp_time = self.time_for_curr_iter tmp_depth = self.my_turn while tmp_depth and tmp_time > min_iter_time: # check every iter is possible for at least depth=1 tmp_time = tmp_time / self.risk_factor1 tmp_depth -= 1 if tmp_time < min_iter_time: # not every iter is possible for at least depth=1. plan B for time sharing: avg_time = self.game_time / self.my_turn self.time_for_each_iter = {} index_left = self.my_turn index_right = 0 exchange_tmp = avg_time - min_iter_time while index_left >= index_right and exchange_tmp > 0: # exchange time between the latest iter and the first self.time_for_each_iter[index_left] = avg_time + exchange_tmp self.time_for_each_iter[ index_right] = min_iter_time + self.extra_safe_time index_right += 1 index_left -= 1 min_iter_time *= self.risk_factor2 exchange_tmp = avg_time - (min_iter_time + self.extra_safe_time) while index_left >= index_right: self.time_for_each_iter[index_left] = avg_time self.time_for_each_iter[index_right] = avg_time index_right += 1 index_left -= 1 self.time_for_curr_iter = self.time_for_each_iter[self.my_turn] self.my_turn -= 1
time_factor = 1.0 dt = 16.0 / (1 * num_grid_points) params = { 'num_grid_points': num_grid_points, 'domain_size': 1000.0, 'dt': dt, 'sampling_rate': time_factor * 2 } # setup the state axes = np.tile( np.linspace(0, params['domain_size'], num_grid_points + 1)[:-1], (2, 1)) # setup the axes axes[0] = axes[0] + 0.5 * dx # offset the lnrho-axis state = utils.State(2, num_grid_points, axes, [("x", "rho"), ("x", "w")]) # create the state # choose starting condition data = state.get_state_vars() # get the underlying numpy-array # specify rho-values data[0] = starting_conditions.GaussianBump( 0.5 * params['domain_size'], 0.001).get_start_condition(axes[0]) * 0.1 + 1 # data[0] = sinc_start_cond(axes[0]) # data[0] = const_start_cond(axes[0]) # data[0] = exp_curve(axes[0]) data[0] = np.log(data[0]) # apply logarithm in order to have ln(rho)-axis # choose inputs for the time_derivative time_derivative_input = [
import sys import time import numpy as np import utils # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Bind the socket to the port server_address = ('192.168.0.3', 6665) print('starting up on %s port %s' % server_address) sock.bind(server_address) # Listen for incoming connections sock.listen(1) connection, client_address = sock.accept() s = utils.State() presses = [] utils.find_pattern(s, presses) while True: data = connection.recv(16) data = data.decode().split() for x in data: presses.append(int(x)) presses = utils.find_pattern(s, presses) time.sleep(0.01)