def _wait_data_worker(self, latest, proto_to_wait_for, timestamp): if latest is None: logger.debug("Nothing received since MAD started") time.sleep(0.5) elif 156 in latest and latest[156].get('timestamp', 0) >= timestamp: return LatestReceivedType.GYM elif 102 in latest and latest[102].get('timestamp', 0) >= timestamp: return LatestReceivedType.MON elif proto_to_wait_for not in latest: logger.debug( "No data linked to the requested proto since MAD started.") time.sleep(0.5) else: # proto has previously been received, let's check the timestamp... # TODO: int vs str-key? latest_proto = latest.get(proto_to_wait_for, None) latest_timestamp = latest_proto.get("timestamp", 0) + 5000 # ensure a small timedelta because pogo smts loads data later then excepted if latest_timestamp >= timestamp: # TODO: consider reseting timestamp here since we clearly received SOMETHING latest_data = latest_proto.get("values", None) logger.debug4("Latest data received: {}".format(str(latest_data))) if latest_data is None: time.sleep(0.5) return None elif proto_to_wait_for == 101: payload: dict = latest_data.get("payload", None) if payload is None: return None result: int = payload.get("result", 0) if result == 1 and len(payload.get('items_awarded', [])) > 0: return FortSearchResultTypes.QUEST elif (result == 1 and len(payload.get('items_awarded', [])) == 0): return FortSearchResultTypes.TIME elif result == 2: return FortSearchResultTypes.OUT_OF_RANGE elif result == 3: return FortSearchResultTypes.COOLDOWN elif result == 4: return FortSearchResultTypes.INVENTORY elif result == 5: return FortSearchResultTypes.LIMIT elif proto_to_wait_for == 104: fort_type: int = latest_data.get("payload").get("type", 0) if fort_type == 0: return LatestReceivedType.GYM else: return LatestReceivedType.STOP if proto_to_wait_for == 4 and 'inventory_delta' in latest_data['payload'] and \ len(latest_data['payload']['inventory_delta']['inventory_items']) > 0: return LatestReceivedType.CLEAR else: logger.debug("latest timestamp of proto {} ({}) is older than {}", str( proto_to_wait_for), str(latest_timestamp), str(timestamp)) # TODO: timeoutopen error instead of data_error_counter? Differentiate timeout vs missing data (the # TODO: latter indicates too high speeds for example time.sleep(0.5) return LatestReceivedType.UNDEFINED
def process_data(self, received_timestamp, data, origin): data_type = data.get("type", None) raw = data.get("raw", False) logger.debug2("Processing data of {}".format(origin)) if raw: logger.debug5("Received raw payload: {}", data["payload"]) if data_type and not raw: logger.debug2("Running stats collector of {}".format(origin)) if self.__application_args.game_stats: self.__mitm_mapper.run_stats_collector(origin) logger.debug4("Received data of {}: {}", origin, data) if data_type == 106: # process GetMapObject logger.success("Processing GMO received from {}. Received at {}", str( origin), str(datetime.fromtimestamp(received_timestamp))) if self.__application_args.weather: self.__db_submit.weather(origin, data["payload"], received_timestamp) self.__db_submit.stops(origin, data["payload"]) self.__db_submit.gyms(origin, data["payload"]) self.__db_submit.raids(origin, data["payload"], self.__mitm_mapper) self.__db_submit.spawnpoints(origin, data["payload"]) mon_ids_iv = self.__mitm_mapper.get_mon_ids_iv(origin) self.__db_submit.mons(origin, data["payload"], mon_ids_iv, self.__mitm_mapper) self.__db_submit.cells(origin, data["payload"]) self.__mitm_mapper.submit_gmo_for_location(origin, data["payload"]) logger.debug2("Done processing GMO of {}".format(origin)) elif data_type == 102: playerlevel = self.__mitm_mapper.get_playerlevel(origin) if playerlevel >= 30: logger.info("Processing Encounter received from {} at {}", str(origin), str(received_timestamp)) self.__db_submit.mon_iv(origin, received_timestamp, data["payload"], self.__mitm_mapper) logger.debug2("Done processing encounter of {}".format(origin)) else: logger.debug('Playerlevel lower than 30 - not processing encounter Data') elif data_type == 101: logger.debug2("Processing proto 101 of {}".format(origin)) self.__db_submit.quest(origin, data["payload"], self.__mitm_mapper) logger.debug2("Done processing proto 101 of {}".format(origin)) elif data_type == 104: logger.debug2("Processing proto 104 of {}".format(origin)) self.__db_submit.stop_details(data["payload"]) logger.debug2("Done processing proto 104 of {}".format(origin)) elif data_type == 4: logger.debug2("Processing proto 4 of {}".format(origin)) self.__mitm_mapper.generate_player_stats(origin, data["payload"]) logger.debug2("Done processing proto 4 of {}".format(origin)) elif data_type == 156: logger.debug2("Processing proto 156 of {}".format(origin)) self.__db_submit.gym(origin, data["payload"]) logger.debug2("Done processing proto 156 of {}".format(origin))
def execute(self, sql, args=(), commit=False, **kwargs): """ Execute a sql, it could be with args and with out args. The usage is similar with execute() function in module pymysql. :param sql: sql clause :param args: args need by sql clause :param commit: whether to commit :return: if commit, return None, else, return result """ self._connection_semaphore.acquire() conn = self._pool.get_connection() cursor = self.setup_cursor(conn, **kwargs) get_id = kwargs.get('get_id', False) get_dict = kwargs.get('get_dict', False) raise_exc = kwargs.get('raise_exc', False) suppress_log = kwargs.get('suppress_log', False) try: multi = False if type(args) != tuple and args is not None: args = (args,) if sql.count(';') > 1: multi = True for res in conn.cmd_query_iter(sql): pass else: cursor.execute(sql, args) logger.debug4(cursor.statement) if commit is True: conn.commit() if not multi: affected_rows = cursor.rowcount if get_id: return cursor.lastrowid else: return affected_rows else: if not multi: res = cursor.fetchall() if get_dict: return self.__convert_to_dict(cursor.column_names, res) return res except mysql.connector.Error as err: if not suppress_log: logger.error("Failed executing query: {}, error: {}", str(sql), str(err)) logger.debug(sql) logger.debug(args) if raise_exc: raise err return None except Exception as e: logger.error("Unspecified exception in dbWrapper: {}", str(e)) return None finally: self.close(conn, cursor) self._connection_semaphore.release()
def proto_endpoint(self, origin: str, data: Union[dict, list]): logger.debug2("Receiving proto from {}".format(origin)) logger.debug4("Proto data received from {}: {}".format(origin, str(data))) if isinstance(data, list): # list of protos... we hope so at least.... logger.debug2("Receiving list of protos") for proto in data: self.__handle_proto_data_dict(origin, proto) elif isinstance(data, dict): logger.debug2("Receiving single proto") # single proto, parse it... self.__handle_proto_data_dict(origin, data) self.__mitm_mapper.set_injection_status(origin)
def submit_gmo_for_location(self, origin, payload): logger.debug4("submit_gmo_for_location of {}", origin) cells = payload.get("cells", None) if cells is None: return current_cells_id = sorted(list(map(lambda x: x['id'], cells))) if origin in self.__last_cellsid: last_cells_id = self.__last_cellsid[origin] self.__last_cellsid[origin] = current_cells_id if last_cells_id != current_cells_id: self.__last_possibly_moved[origin] = time.time() else: self.__last_cellsid[origin] = current_cells_id self.__last_possibly_moved[origin] = time.time() logger.debug4("Done submit_gmo_for_location of {} with {}", origin, current_cells_id)
def proto_endpoint(self, origin, data): logger.debug2("Receiving proto from {}".format(origin)) logger.debug4("Proto data received from {}: {}".format( origin, str(data))) type = data.get("type", None) if type is None or type == 0: logger.warning( "Could not read method ID. Stopping processing of proto") return None if not self.__mitm_mapper.get_injection_status(origin): logger.info("Worker {} is injected now", str(origin)) self.__mitm_mapper.set_injection_status(origin) # extract timestamp from data timestamp: float = data.get("timestamp", int(time.time())) self.__mitm_mapper.update_latest( origin, timestamp_received_raw=timestamp, timestamp_received_receiver=time.time(), key=type, values_dict=data) logger.debug3( "Placing data received by {} to data_queue".format(origin)) self._data_queue.put((timestamp, data, origin)) return None
def __send_webhook(self, payload): if len(payload) == 0: logger.debug("Payload empty. Skip sending to webhook.") return # get list of urls webhooks = self.__args.webhook_url.replace(" ", "").split(",") webhook_count = len(webhooks) current_wh_num = 1 for webhook in webhooks: payloadToSend = [] subTypes = "all" url = webhook.strip() if url.startswith("["): endIndex = webhook.rindex("]") endIndex += 1 subTypes = webhook[:endIndex] url = url[endIndex:] for payloadData in payload: if payloadData["type"] in subTypes: payloadToSend.append(payloadData) else: payloadToSend = payload if len(payloadToSend) == 0: logger.debug("Payload empty. Skip sending to: {} (Filter: {})", url, subTypes) continue else: logger.debug("Sending to webhook url: {} (Filter: {})", url, subTypes) payload_list = self.__payload_chunk( payloadToSend, self.__args.webhook_max_payload_size) current_pl_num = 1 for payload_chunk in payload_list: logger.debug4("Python data for payload: {}", str(payload_chunk)) logger.debug3("Payload: {}", str(json.dumps(payload_chunk))) try: response = requests.post( url, data=json.dumps(payload_chunk), headers={"Content-Type": "application/json"}, timeout=5, ) if response.status_code != 200: logger.warning( "Got status code other than 200 OK from webhook destination: {}", str(response.status_code), ) else: if webhook_count > 1: whcount_text = " [wh {}/{}]".format( current_wh_num, webhook_count) else: whcount_text = "" if len(payload_list) > 1: whchunk_text = " [pl {}/{}]".format( current_pl_num, len(payload_list)) else: whchunk_text = "" logger.success( "Successfully sent payload to webhook{}{}. Stats: {}", whchunk_text, whcount_text, json.dumps( self.__payload_type_count(payload_chunk)), ) except Exception as e: logger.warning( "Exception occured while sending webhook: {}", str(e)) current_pl_num += 1 current_wh_num += 1
def _wait_data_worker(self, latest, proto_to_wait_for, timestamp): if latest is None: logger.debug("Nothing received since MAD started") time.sleep(0.5) elif 156 in latest and latest[156].get('timestamp', 0) >= timestamp: return LatestReceivedType.GYM elif 102 in latest and latest[102].get('timestamp', 0) >= timestamp: return LatestReceivedType.MON elif proto_to_wait_for not in latest: logger.debug( "No data linked to the requested proto since MAD started.") time.sleep(0.5) else: # when waiting for stop or spin data, it is enough to make sure # our data is newer than the latest of last quest received, last # successful bag clear or last successful quest clear. This eliminates # the need to add arbitrary timedeltas for possible small delays, # which we don't do in other workers either if proto_to_wait_for in [101, 104]: replacement = max(x for x in [self._latest_quest, self.get_devicesettings_value('last_cleanup_time', 0), self.get_devicesettings_value('last_questclear_time', 0)] if isinstance(x, int) or isinstance(x, float)) logger.debug("timestamp {} being replaced with {} because " "we're waiting for proto {}", datetime.fromtimestamp(timestamp).strftime('%H:%M:%S'), datetime.fromtimestamp(replacement).strftime('%H:%M:%S'), proto_to_wait_for) timestamp = replacement # proto has previously been received, let's check the timestamp... # TODO: int vs str-key? latest_proto = latest.get(proto_to_wait_for, None) latest_timestamp = latest_proto.get("timestamp", 0) if latest_timestamp >= timestamp: # TODO: consider reseting timestamp here since we clearly received SOMETHING latest_data = latest_proto.get("values", None) logger.debug4("Latest data received: {}".format(str(latest_data))) if latest_data is None: time.sleep(0.5) return None elif proto_to_wait_for == 101: payload: dict = latest_data.get("payload", None) if payload is None: return None quest_type: int = payload.get('challenge_quest', {}) \ .get('quest', {}) \ .get('quest_type', False) result: int = payload.get("result", 0) if (result == 1 and len(payload.get('items_awarded', [])) == 0): return FortSearchResultTypes.TIME elif result == 1 and quest_type == 0: return FortSearchResultTypes.FULL elif result == 1 and len(payload.get('items_awarded', [])) > 0: return FortSearchResultTypes.QUEST elif result == 2: return FortSearchResultTypes.OUT_OF_RANGE elif result == 3: return FortSearchResultTypes.COOLDOWN elif result == 4: return FortSearchResultTypes.INVENTORY elif result == 5: return FortSearchResultTypes.LIMIT elif proto_to_wait_for == 104: fort_type: int = latest_data.get("payload").get("type", 0) if fort_type == 0: return LatestReceivedType.GYM else: return LatestReceivedType.STOP elif proto_to_wait_for == 106: for data_extract in latest_data['payload']['cells']: for forts in data_extract['forts']: if forts['id']: return LatestReceivedType.GMO if proto_to_wait_for == 4 and 'inventory_delta' in latest_data['payload'] and \ len(latest_data['payload']['inventory_delta']['inventory_items']) > 0: return LatestReceivedType.CLEAR else: logger.debug("latest timestamp of proto {} ({}) is older than {}", str( proto_to_wait_for), str(latest_timestamp), str(timestamp)) # TODO: timeoutopen error instead of data_error_counter? Differentiate timeout vs missing data (the # TODO: latter indicates too high speeds for example time.sleep(0.5) return LatestReceivedType.UNDEFINED