Example #1
0
 def __update_mad(self):
     if self._madver < self._installed_ver:
         logger.error('Mis-matched version number detected.  Not applying any updates')
     else:
         logger.warning('Performing updates from version {} to {} now',
                        self._installed_ver, self._madver)
         all_patches = list(MAD_UPDATES.keys())
         try:
             last_ver = all_patches.index(self._installed_ver)
             first_patch = last_ver + 1
         except ValueError:
             # The current version of the patch was most likely removed as it was no longer needed.  Determine
             # where to start by finding the last executed
             next_patch = None
             for patch_ver in all_patches:
                 if self._installed_ver > patch_ver:
                     continue
                 next_patch = patch_ver
                 break
             try:
                 first_patch = all_patches.index(next_patch)
             except ValueError:
                 logger.critical('Unable to find the next patch to apply')
         updates_to_apply = all_patches[first_patch:]
         logger.info('Patches to apply: {}', updates_to_apply)
         for patch_ver in updates_to_apply:
             self.__apply_update(patch_ver)
         logger.success('Updates to version {} finished', self._installed_ver)
Example #2
0
    def __validate_versions_schema(self):
        """ Verify status of the versions table

            Validate the PK exists for the version table.  If it does not, attempt to create it.  If we run into
            duplicate keys, de-dupe the table then apply the PK
        """
        try:
            sql = "SHOW FIELDS FROM `versions`"
            columns = self.dbwrapper.autofetch_all(sql, suppress_log=True)
        except mysql.connector.Error:
            # Version table does not exist.  This is installed with the base install so we can assume the required
            # tables have not been created
            self.__install_schema()
        else:
            for column in columns:
                if column['Field'] != 'key':
                    continue
                if column['Key'] != 'PRI':
                    logger.info('Primary key not configured on the versions table.  Applying fix')
                    try:
                        self.__update_versions_table()
                    except mysql.connector.Error as err:
                        if err.errno == 1062:
                            logger.info('Multiple versions detected in the table.  Performing maintenance on the table')
                            sql = "SELECT `key`, MAX(`val`) AS 'val' FROM `versions`"
                            max_vers = self.dbwrapper.autofetch_all(sql)
                            logger.info('Versions: {}', max_vers)
                            sql = "DELETE FROM `versions`"
                            self.dbwrapper.execute(sql, commit=True)
                            for elem in max_vers:
                                self.dbwrapper.autoexec_insert('versions', elem)
                            logger.success('Successfully de-duplicated versions table and set each key to the '
                                           'maximum value from the table')
                            self.__update_versions_table()
Example #3
0
 def __get_installed_version(self):
     try:
         self._installed_ver = self.dbwrapper.get_mad_version()
         if self._installed_ver:
             logger.info("Internal MAD version in DB is {}",
                         self._installed_ver)
         else:
             logger.info(
                 'Partial schema detected.  Additional steps required')
             self.__install_instance_table()
             # Attempt to read the old version.json file to get the latest install version in the database
             try:
                 with open('version.json') as f:
                     self._installed_ver = json.load(f)['version']
                 logger.success("Moving internal MAD version to database")
                 self.__set_installed_ver(self._installed_ver)
             except FileNotFoundError:
                 logger.info(
                     'New installation detected with a partial schema detected.  Updates will be attempted'
                 )
                 self._installed_ver = 0
                 self.__set_installed_ver(self._installed_ver)
             self.__reload_instance_id()
             logger.success(
                 "Moved internal MAD version to database as version {}",
                 self._installed_ver)
     except Exception:
         logger.opt(exception=True).critical(
             'Unknown exception occurred during getting the MAD DB version.'
             '  Exiting')
Example #4
0
 def __apply_update(self, patch_ver):
     filename = MAD_UPDATES[patch_ver]
     patch_name = 'mapadroid.patcher.%s' % filename
     try:
         patch_base = importlib.import_module(patch_name)
     except ImportError:
         logger.opt(exception=True).error(
             'Unable to import patch {}.  Exiting', patch_name)
         sys.exit(1)
     else:
         # Execute the patch and catch any errors for logging
         try:
             patch = patch_base.Patch(logger, self.dbwrapper,
                                      self.data_manager,
                                      self._application_args)
             if patch.completed and not patch.issues:
                 self.__set_installed_ver(patch_ver)
                 logger.success('Successfully applied patch')
             else:
                 logger.error('Patch was unsuccessful.  Exiting')
                 sys.exit(1)
         except Exception:
             logger.opt(
                 exception=True).error('Patch was unsuccessful.  Exiting')
             sys.exit(1)
Example #5
0
    def get_version(self):
        # checking mappings.json
        convert_mappings()
        dbVersion = self.dbwrapper.get_mad_version()
        if not dbVersion:
            logger.warning("Moving internal MAD version to database")
            try:
                with open('version.json') as f:
                    version = json.load(f)
                self._version = int(version['version'])
                self.dbwrapper.update_mad_version(self._version)
            except FileNotFoundError:
                logger.warning("Could not find version.json during move to DB"
                               ", will use version 0")
                self.dbwrapper.update_mad_version(0)
                self.start_update()
            dbVersion = self.dbwrapper.get_mad_version()
            if dbVersion:
                logger.success(
                    "Moved internal MAD version to database "
                    "as version {}", dbVersion)
            else:
                logger.error("Moving internal MAD version to DB failed!")
        else:
            logger.info("Internal MAD version in DB is {}", dbVersion)
            self._version = int(dbVersion)

        if int(self._version) < int(current_version):
            logger.warning('Performing updates from version {} to {} now',
                           self._version, current_version)
            self.start_update()
            logger.success('Updates to version {} finished', self._version)
Example #6
0
 def __get_installed_version(self):
     # checking mappings.json
     self.__convert_mappings()
     try:
         self._installed_ver = self.dbwrapper.get_mad_version()
         if self._installed_ver:
             logger.info("Internal MAD version in DB is {}",
                         self._installed_ver)
         else:
             logger.info(
                 'Partial schema detected.  Additional steps required')
             sql = "ALTER TABLE versions ADD PRIMARY KEY(`key`)"
             self.dbwrapper.execute(sql, commit=True, suppress_log=True)
             self.__install_instance_table()
             # Attempt to read the old version.json file to get the latest install version in the database
             try:
                 with open('version.json') as f:
                     self._installed_ver = json.load(f)['version']
                 logger.success("Moving internal MAD version to database")
                 self.__set_installed_ver(self._installed_ver)
             except FileNotFoundError:
                 logger.info(
                     'New installation detected with a partial schema detected.  Updates will be attempted'
                 )
                 self._installed_ver = 0
                 self.__set_installed_ver(self._installed_ver)
             self.__reload_instance_id()
             logger.success(
                 "Moved internal MAD version to database as version {}",
                 self._installed_ver)
     except mysql.connector.Error:
         # Version table does not exist.  This is installed with the base install so we can assume the required
         # tables have not been created
         self.__install_schema()
Example #7
0
    def process_data(self, received_timestamp, data, origin):
        data_type = data.get("type", None)
        raw = data.get("raw", False)
        logger.debug2("Processing data of {}".format(origin))
        if raw:
            logger.debug5("Received raw payload: {}", data["payload"])

        if data_type and not raw:
            logger.debug2("Running stats collector of {}".format(origin))
            if self.__application_args.game_stats:
                self.__mitm_mapper.run_stats_collector(origin)

            logger.debug4("Received data of {}: {}", origin, data)
            if data_type == 106:
                # process GetMapObject
                logger.success("Processing GMO received from {}. Received at {}", str(
                    origin), str(datetime.fromtimestamp(received_timestamp)))

                if self.__application_args.weather:
                    self.__db_submit.weather(origin, data["payload"], received_timestamp)

                self.__db_submit.stops(origin, data["payload"])
                self.__db_submit.gyms(origin, data["payload"])
                self.__db_submit.raids(origin, data["payload"], self.__mitm_mapper)

                self.__db_submit.spawnpoints(origin, data["payload"])
                mon_ids_iv = self.__mitm_mapper.get_mon_ids_iv(origin)
                self.__db_submit.mons(origin, data["payload"], mon_ids_iv, self.__mitm_mapper)
                self.__db_submit.cells(origin, data["payload"])
                self.__mitm_mapper.submit_gmo_for_location(origin, data["payload"])
                logger.debug2("Done processing GMO of {}".format(origin))
            elif data_type == 102:
                playerlevel = self.__mitm_mapper.get_playerlevel(origin)
                if playerlevel >= 30:
                    logger.info("Processing Encounter received from {} at {}", str(origin),
                                str(received_timestamp))
                    self.__db_submit.mon_iv(origin, received_timestamp, data["payload"], self.__mitm_mapper)
                    logger.debug2("Done processing encounter of {}".format(origin))
                else:
                    logger.debug('Playerlevel lower than 30 - not processing encounter Data')
            elif data_type == 101:
                logger.debug2("Processing proto 101 of {}".format(origin))
                self.__db_submit.quest(origin, data["payload"], self.__mitm_mapper)
                logger.debug2("Done processing proto 101 of {}".format(origin))
            elif data_type == 104:
                logger.debug2("Processing proto 104 of {}".format(origin))
                self.__db_submit.stop_details(data["payload"])
                logger.debug2("Done processing proto 104 of {}".format(origin))
            elif data_type == 4:
                logger.debug2("Processing proto 4 of {}".format(origin))
                self.__mitm_mapper.generate_player_stats(origin, data["payload"])
                logger.debug2("Done processing proto 4 of {}".format(origin))
            elif data_type == 156:
                logger.debug2("Processing proto 156 of {}".format(origin))
                self.__db_submit.gym(origin, data["payload"])
                logger.debug2("Done processing proto 156 of {}".format(origin))
Example #8
0
 def __install_schema(self):
     try:
         with open('scripts/SQL/rocketmap.sql') as fh:
             tables = "".join(fh.readlines()).split(";")
             for table in tables:
                 install_cmd = '%s;%s;%s'
                 args = ('SET FOREIGN_KEY_CHECKS=0', 'SET NAMES utf8mb4', table)
                 self.dbwrapper.execute(install_cmd % args, commit=True, suppress_log=True)
         self.__set_installed_ver(self._madver)
         logger.success('Successfully installed MAD version {} to the database', self._installed_ver)
         self.__reload_instance_id()
     except Exception:
         logger.critical('Unable to install default MAD schema.  Please install the schema from '
                         'scripts/SQL/rocketmap.sql')
         sys.exit(1)
Example #9
0
 def __update_required(self):
     if self._installed_ver == 0:
         sql = "SELECT COUNT(*) FROM `information_schema`.`views` WHERE `TABLE_NAME` = 'v_trs_status'"
         count = self.dbwrapper.autofetch_value(sql)
         if count:
             logger.success(
                 'It looks like the database has been successfully installed.  Setting version to {}',
                 self._madver)
             self.__set_installed_ver(self._madver)
             return False
         else:
             return True
     else:
         # TODO - When should these be executed?  Can the be added somewhere and ignored?
         # Execute these weird unversioned elements.  Seriously dont know a good time for them
         self._schema_updater.ensure_unversioned_tables_exist()
         self._schema_updater.ensure_unversioned_columns_exist()
         self._schema_updater.create_madmin_databases_if_not_exists()
         self._schema_updater.ensure_unversioned_madmin_columns_exist()
         return self._installed_ver < self._madver
Example #10
0
 def __update_mad(self):
     if self._madver < self._installed_ver:
         logger.error(
             'Mis-matched version number detected.  Not applying any updates'
         )
     else:
         logger.warning('Performing updates from version {} to {} now',
                        self._installed_ver, self._madver)
         all_patches = list(MAD_UPDATES.keys())
         try:
             last_ver = all_patches.index(self._installed_ver)
             first_patch = last_ver + 1
         except ValueError:
             first_patch = 0
         updates_to_apply = all_patches[first_patch:]
         logger.info('Patches to apply: {}', updates_to_apply)
         for patch_ver in updates_to_apply:
             self.__apply_update(patch_ver)
         logger.success('Updates to version {} finished',
                        self._installed_ver)
Example #11
0
 def __init__(self, args, data_manager):
     self._application_args = args
     self.data_manager = data_manager
     self.dbwrapper = self.data_manager.dbc
     self._schema_updater: DbSchemaUpdater = self.dbwrapper.schema_updater
     self._madver = list(MAD_UPDATES.keys())[-1]
     self._installed_ver = None
     self.__validate_versions_schema()
     # If the versions table does not exist we have to install the schema.  When the schema is set, we are then on
     # the latest table so none of these checks are required
     if self._installed_ver is None:
         self.__convert_mappings()
         self.__get_installed_version()
         if self._installed_ver in [23, 24]:
             self.__validate_trs_schema()
         self._schema_updater.ensure_unversioned_tables_exist()
         self._schema_updater.ensure_unversioned_columns_exist()
         self._schema_updater.create_madmin_databases_if_not_exists()
         self._schema_updater.ensure_unversioned_madmin_columns_exist()
         if self.__update_required():
             self.__update_mad()
         else:
             logger.success('MAD DB is running latest version')
Example #12
0
def main():
    args = Args()
    initLogging(args)

    if len(sys.argv) != 2:
        logger.error("usage: remove_all_spawns_within_geofence.py GEOFENCE_FILENAME")
        sys.exit(1)

    LocationWithID = collections.namedtuple('Location', ['lat', 'lng', 'spawnpoint'])

    geofence_filename = sys.argv[1]
    # print("Argument: '%s'" % (geofence_filename))
    # no .txt, add it
    if ".txt" not in geofence_filename:
        geofence_filename = geofence_filename + ".txt"
    # no / in filename, probably not an absolute path, append standard MAD path
    if "/" not in geofence_filename:
        geofence_filename = "../configs/geofences/" + geofence_filename
    logger.info("Trying to use file: {}", geofence_filename)
    if not os.path.isfile(geofence_filename):
        logger.error("Geofence file {} not found, exit", geofence_filename)
        sys.exit(1)

    geofence_helper = GeofenceHelper(geofence_filename, None)
    minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence()
    query = (
        "SELECT latitude, longitude, spawnpoint "
        "FROM trs_spawn "
        "WHERE (latitude >= {} AND longitude >= {} "
        "AND latitude <= {} AND longitude <= {}) "
    ).format(minLat, minLon, maxLat, maxLon)

    delete_query = (
        "DELETE FROM trs_spawn "
        "WHERE spawnpoint = {} "
    )

    list_of_coords: List[LocationWithID] = []

    dbip = get_value_for(r'\s+dbip:\s+([^\s]+)')
    dbport = get_value_for(r'\s+dbport:\s+([^.\s]*)', False)
    if dbport is None:  # if dbport is not set, use default
        dbport = '3306'
    dbusername = get_value_for(r'\s+dbusername:\s+([^.\s]*)')
    dbpassword = get_value_for(r'\s+dbpassword:\s+([^.\s]*)')
    dbname = get_value_for(r'\s+dbname:\s+([^.\s]*)')

    # print("Successfully parsed config.ini, using values:")
    # print("dbport: %s" % dbport)
    # print("dbusername: %s" % dbusername)
    # print("dbname: %s" % dbname)
    # print("dbip: %s" % dbip)

    connection = mysql.connector.connect(
        host=dbip,
        port=dbport,
        user=dbusername,
        passwd=dbpassword,
        database=dbname)
    cursor = connection.cursor()

    cursor.execute(query)
    res = cursor.fetchall()
    for (latitude, longitude, spawnpoint) in res:
        list_of_coords.append(LocationWithID(latitude, longitude, spawnpoint))

    geofenced_coords = geofence_helper.get_geofenced_coordinates(list_of_coords)
    spawnpointcount = len(geofenced_coords)
    for coords in geofenced_coords:
        sql = delete_query.format(coords.spawnpoint)
        cursor.execute(sql)
        # print(sql)

    connection.commit()

    cursor.close()
    connection.close()
    logger.success("Done, deleted {} spawnpoints", spawnpointcount)
Example #13
0
File: start.py Project: Terrycy/MAD
         loader = unittest.TestLoader()
         start_dir = 'mapadroid/tests/'
         suite = loader.discover(start_dir)
         runner = unittest.TextTestRunner()
         result = runner.run(suite)
         exit_code = 0 if result.wasSuccessful() else 1
         raise KeyboardInterrupt
     else:
         while True:
             time.sleep(10)
 except KeyboardInterrupt or Exception:
     logger.info("Shutdown signal received")
 finally:
     try:
         db_wrapper = None
         logger.success("Stop called")
         terminate_mad.set()
         # now cleanup all threads...
         # TODO: check against args or init variables to None...
         if mitm_receiver_process is not None:
             # mitm_receiver_thread.kill()
             logger.info("Trying to stop receiver")
             mitm_receiver_process.shutdown()
             logger.debug(
                 "MITM child threads successfully shutdown.  Terminating parent thread"
             )
             mitm_receiver_process.terminate()
             logger.debug("Trying to join MITMReceiver")
             mitm_receiver_process.join()
             logger.debug("MITMReceiver joined")
         if device_Updater is not None:
Example #14
0
    mapping_manager_manager = MappingManagerManager()
    mapping_manager_manager.start()
    mapping_manager_stop_event = mapping_manager_manager.Event()
    mapping_manager: MappingManager = MappingManager(db_wrapper, args,
                                                     data_manager, True)

    ws_server = WebsocketServer(args,
                                None,
                                db_wrapper,
                                mapping_manager,
                                None,
                                data_manager,
                                configmode=True)
    t_ws = Thread(name='scanner', target=ws_server.start_server)
    t_ws.daemon = False
    t_ws.start()

    jobstatus: dict = {}

    device_Updater = deviceUpdater(ws_server, args, jobstatus, db_wrapper)

    logger.success(
        'Starting MADmin on port {} - Open a browser, visit MADmin and go to "Settings"',
        int(args.madmin_port))
    t_flask = Thread(name='madmin',
                     target=start_madmin,
                     args=(args, db_wrapper, ws_server, mapping_manager,
                           data_manager, device_Updater, jobstatus))
    t_flask.daemon = False
    t_flask.start()
Example #15
0
    def __send_webhook(self, payload):
        if len(payload) == 0:
            logger.debug("Payload empty. Skip sending to webhook.")
            return

        # get list of urls
        webhooks = self.__args.webhook_url.replace(" ", "").split(",")

        webhook_count = len(webhooks)
        current_wh_num = 1

        for webhook in webhooks:
            payloadToSend = []
            subTypes = "all"
            url = webhook.strip()

            if url.startswith("["):
                endIndex = webhook.rindex("]")
                endIndex += 1
                subTypes = webhook[:endIndex]
                url = url[endIndex:]

                for payloadData in payload:
                    if payloadData["type"] in subTypes:
                        payloadToSend.append(payloadData)
            else:
                payloadToSend = payload

            if len(payloadToSend) == 0:
                logger.debug("Payload empty. Skip sending to: {} (Filter: {})",
                             url, subTypes)
                continue
            else:
                logger.debug("Sending to webhook url: {} (Filter: {})", url,
                             subTypes)

            payload_list = self.__payload_chunk(
                payloadToSend, self.__args.webhook_max_payload_size)

            current_pl_num = 1
            for payload_chunk in payload_list:
                logger.debug4("Python data for payload: {}",
                              str(payload_chunk))
                logger.debug3("Payload: {}", str(json.dumps(payload_chunk)))

                try:
                    response = requests.post(
                        url,
                        data=json.dumps(payload_chunk),
                        headers={"Content-Type": "application/json"},
                        timeout=5,
                    )

                    if response.status_code != 200:
                        logger.warning(
                            "Got status code other than 200 OK from webhook destination: {}",
                            str(response.status_code),
                        )
                    else:
                        if webhook_count > 1:
                            whcount_text = " [wh {}/{}]".format(
                                current_wh_num, webhook_count)
                        else:
                            whcount_text = ""

                        if len(payload_list) > 1:
                            whchunk_text = " [pl {}/{}]".format(
                                current_pl_num, len(payload_list))
                        else:
                            whchunk_text = ""

                        logger.success(
                            "Successfully sent payload to webhook{}{}. Stats: {}",
                            whchunk_text,
                            whcount_text,
                            json.dumps(
                                self.__payload_type_count(payload_chunk)),
                        )
                except Exception as e:
                    logger.warning(
                        "Exception occured while sending webhook: {}", str(e))

                current_pl_num += 1
            current_wh_num += 1
Example #16
0
    def _wait_for_data(self,
                       timestamp: float = None,
                       proto_to_wait_for=106,
                       timeout=None):
        if timestamp is None:
            timestamp = time.time()

        if timeout is None:
            timeout = self.get_devicesettings_value("mitm_wait_timeout", 45)

        # since the GMOs may only contain mons if we are not "too fast" (which is the case when teleporting) after
        # waiting a certain period of time (usually the 2nd GMO), we will multiply the timeout by 2 for mon-modes
        mode = self._mapping_manager.routemanager_get_mode(
            self._routemanager_name)
        if mode in ["mon_mitm", "iv_mitm"
                    ] or self._mapping_manager.routemanager_get_init(
                        self._routemanager_name):
            timeout *= 2
        # let's fetch the latest data to add the offset to timeout (in case device and server times are off...)
        latest = self._mitm_mapper.request_latest(self._origin)
        timestamp_last_data = latest.get("timestamp_last_data", 0)
        timestamp_last_received = latest.get("timestamp_receiver", 0)

        # we can now construct the rough estimate of the diff of time of mobile vs time of server, subtract our
        # timestamp by the diff
        # TODO: discuss, probably wiser to add to timeout or get the diff of how long it takes for RGC to issue a cmd
        timestamp = timestamp - (timestamp_last_received - timestamp_last_data)

        logger.info('Waiting for data after {}',
                    datetime.fromtimestamp(timestamp))
        data_requested = LatestReceivedType.UNDEFINED

        while data_requested == LatestReceivedType.UNDEFINED and timestamp + timeout >= int(time.time()) \
                and not self._stop_worker_event.is_set():
            latest = self._mitm_mapper.request_latest(self._origin)
            latest_location: Optional[Location] = latest.get("location", None)
            check_data = True
            if (proto_to_wait_for == 106 and latest_location is not None
                    and latest_location.lat != 0.0
                    and latest_location.lng != 0.0):
                logger.debug(
                    "Checking worker location {} against real data location {}",
                    self.current_location, latest_location)
                distance_to_data = get_distance_of_two_points_in_meters(
                    float(latest_location.lat), float(latest_location.lng),
                    float(self.current_location.lat),
                    float(self.current_location.lng))
                max_distance_of_mode = self._mapping_manager.routemanager_get_max_radius(
                    self._routemanager_name)
                max_distance_for_worker: int = self._applicationArgs.maximum_valid_distance \
                    if max_distance_of_mode < self._applicationArgs.maximum_valid_distance else max_distance_of_mode
                logger.debug("Distance of worker {} to data location: {}",
                             str(self._origin), str(distance_to_data))
                if max_distance_for_worker and distance_to_data > max_distance_for_worker:
                    logger.debug(
                        "Real data too far from worker position, waiting...")
                    check_data = False

            if check_data:
                data_requested = self._wait_data_worker(
                    latest, proto_to_wait_for, timestamp)
            if not self._mapping_manager.routemanager_present(self._routemanager_name) \
                    or self._stop_worker_event.is_set():
                logger.error("Worker {} get killed while sleeping",
                             str(self._origin))
                raise InternalStopWorkerException

            time.sleep(1)

        position_type = self._mapping_manager.routemanager_get_position_type(
            self._routemanager_name, self._origin)
        if position_type is None:
            logger.warning(
                "Mappings/Routemanagers have changed, stopping worker to be created again"
            )
            raise InternalStopWorkerException
        if data_requested != LatestReceivedType.UNDEFINED:
            logger.success('Got the data requested')
            self._reboot_count = 0
            self._restart_count = 0
            self._rec_data_time = datetime.now()

            self._mitm_mapper.collect_location_stats(
                self._origin, self.current_location, 1,
                self._waittime_without_delays, position_type, time.time(),
                self._mapping_manager.routemanager_get_mode(
                    self._routemanager_name), self._transporttype)

        else:
            # TODO: timeout also happens if there is no useful data such as mons nearby in mon_mitm mode, we need to
            # TODO: be more precise (timeout vs empty data)
            logger.warning("Timeout waiting for data")

            self._mitm_mapper.collect_location_stats(
                self._origin, self.current_location, 0,
                self._waittime_without_delays, position_type, 0,
                self._mapping_manager.routemanager_get_mode(
                    self._routemanager_name), self._transporttype)

            self._restart_count += 1

            restart_thresh = self.get_devicesettings_value("restart_thresh", 5)
            reboot_thresh = self.get_devicesettings_value("reboot_thresh", 3)
            if self._mapping_manager.routemanager_get_route_stats(
                    self._routemanager_name, self._origin) is not None:
                if self._init:
                    restart_thresh = self.get_devicesettings_value(
                        "restart_thresh", 5) * 2
                    reboot_thresh = self.get_devicesettings_value(
                        "reboot_thresh", 3) * 2

            if self._restart_count > restart_thresh:
                self._reboot_count += 1
                if self._reboot_count > reboot_thresh \
                        and self.get_devicesettings_value("reboot", False):
                    logger.error("Too many timeouts - Rebooting device {}",
                                 str(self._origin))
                    self._reboot(mitm_mapper=self._mitm_mapper)
                    raise InternalStopWorkerException

                # self._mitm_mapper.
                self._restart_count = 0
                logger.error("Too many timeouts - Restarting game on {}",
                             str(self._origin))
                self._restart_pogo(True, self._mitm_mapper)

        self.worker_stats()
        return data_requested