def __screendetection_get_type_internal( self, image, identifier ) -> Optional[Tuple[ScreenType, Optional[dict], int, int, int]]: returntype: ScreenType = ScreenType.UNDEFINED globaldict: Optional[dict] = {} diff: int = 1 logger.debug( "__screendetection_get_type_internal: Detecting screen type - identifier {}", identifier) texts = [] try: with Image.open(image) as frame_org: width, height = frame_org.size logger.debug("Screensize of origin {}: W:{} x H:{}".format( str(identifier), str(width), str(height))) if width < 1080: logger.info('Resize screen ...') frame_org = frame_org.resize( [int(2 * s) for s in frame_org.size], Image.ANTIALIAS) diff: int = 2 frame = frame_org.convert('LA') texts = [frame, frame_org] for text in texts: try: globaldict = pytesseract.image_to_data( text, output_type=Output.DICT, timeout=40, config='--dpi 70') except Exception as e: logger.error( "Tesseract Error for device {}: {}. Exception: {}". format(str(identifier), str(globaldict), e)) globaldict = None logger.debug("Screentext: {}".format(str(globaldict))) if globaldict is None or 'text' not in globaldict: continue n_boxes = len(globaldict['level']) for i in range(n_boxes): if returntype != ScreenType.UNDEFINED: break if len(globaldict['text'][i]) > 3: for z in self._ScreenType: if globaldict['top'][i] > height / 4 and globaldict['text'][i] in \ self._ScreenType[z]: returntype = ScreenType(z) if returntype != ScreenType.UNDEFINED: break del texts frame.close() except (FileNotFoundError, ValueError) as e: logger.error("Failed opening image {} with exception {}", image, e) return None return returntype, globaldict, width, height, diff
def parse_permission(self, xml) -> bool: if xml is None: logger.warning('Something wrong with processing - getting None Type from Websocket...') return False click_text = ('ZULASSEN', 'ALLOW', 'AUTORISER') try: parser = ET.XMLParser(encoding="utf-8") xmlroot = ET.fromstring(xml, parser=parser) bounds: str = "" for item in xmlroot.iter('node'): if str(item.attrib['text']).upper() in click_text: logger.debug("Found text {}", str(item.attrib['text'])) bounds = item.attrib['bounds'] logger.debug("Bounds {}", str(item.attrib['bounds'])) match = re.search(r'^\[(\d+),(\d+)\]\[(\d+),(\d+)\]$', bounds) click_x = int(match.group(1)) + ((int(match.group(3)) - int(match.group(1))) / 2) click_y = int(match.group(2)) + ((int(match.group(4)) - int(match.group(2))) / 2) logger.debug('Click ' + str(click_x) + ' / ' + str(click_y)) self._communicator.click(click_x, click_y) time.sleep(2) return True except Exception as e: logger.error('Something wrong while parsing xml: {}'.format(str(e))) return False time.sleep(2) logger.warning('Dont find any button...') return False
def __update_mad(self): if self._madver < self._installed_ver: logger.error('Mis-matched version number detected. Not applying any updates') else: logger.warning('Performing updates from version {} to {} now', self._installed_ver, self._madver) all_patches = list(MAD_UPDATES.keys()) try: last_ver = all_patches.index(self._installed_ver) first_patch = last_ver + 1 except ValueError: # The current version of the patch was most likely removed as it was no longer needed. Determine # where to start by finding the last executed next_patch = None for patch_ver in all_patches: if self._installed_ver > patch_ver: continue next_patch = patch_ver break try: first_patch = all_patches.index(next_patch) except ValueError: logger.critical('Unable to find the next patch to apply') updates_to_apply = all_patches[first_patch:] logger.info('Patches to apply: {}', updates_to_apply) for patch_ver in updates_to_apply: self.__apply_update(patch_ver) logger.success('Updates to version {} finished', self._installed_ver)
def _clear_thread(self): logger.info('Starting clear Quest Thread') while not self._stop_worker_event.is_set(): if self.clear_thread_task == ClearThreadTasks.IDLE: time.sleep(1) continue try: self._work_mutex.acquire() time.sleep(1) if self.clear_thread_task == ClearThreadTasks.BOX: logger.info("Clearing box") self.clear_box(self._delay_add) self.clear_thread_task = ClearThreadTasks.IDLE elif self.clear_thread_task == ClearThreadTasks.QUEST and not self._level_mode: logger.info("Clearing quest") self._clear_quests(self._delay_add) self.clear_thread_task = ClearThreadTasks.IDLE time.sleep(1) except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException) as e: logger.error("Worker removed while clearing quest/box") self._stop_worker_event.set() return finally: self.clear_thread_task = ClearThreadTasks.IDLE self._work_mutex.release()
def __fetch_coords(self, mode: str, geofence_helper: GeofenceHelper, coords_spawns_known: bool = False, init: bool = False, range_init: int = 630, including_stops: bool = False) -> List[ Location]: coords: List[Location] = [] if not init: # grab data from DB depending on mode # TODO: move routemanagers to factory if mode == "raids_mitm": coords = self.__db_wrapper.gyms_from_db(geofence_helper) if including_stops: coords.extend(self.__db_wrapper.stops_from_db(geofence_helper)) elif mode == "mon_mitm": if coords_spawns_known: logger.debug("Reading known Spawnpoints from DB") coords = self.__db_wrapper.get_detected_spawns(geofence_helper) else: logger.debug("Reading unknown Spawnpoints from DB") coords = self.__db_wrapper.get_undetected_spawns(geofence_helper) elif mode == "pokestops": coords = self.__db_wrapper.stops_from_db(geofence_helper) else: logger.error("Mode not implemented yet: {}", str(mode)) exit(1) else: # calculate all level N cells (mapping back from mapping above linked to mode) coords = S2Helper._generate_locations(range_init, geofence_helper) return coords
def executemany(self, sql, args, commit=False, **kwargs): """ Execute with many args. Similar with executemany() function in pymysql. args should be a sequence. :param sql: sql clause :param args: args :param commit: commit or not. :return: if commit, return None, else, return result """ # get connection form connection pool instead of create one. self._connection_semaphore.acquire() conn = self._pool.get_connection() cursor = conn.cursor() try: cursor.executemany(sql, args, **kwargs) if commit is True: conn.commit() return None else: res = cursor.fetchall() return res except mysql.connector.Error as err: logger.error("Failed executing query: {}", str(err)) return None except Exception as e: logger.error("Unspecified exception in dbWrapper: {}", str(e)) return None finally: self.close(conn, cursor) self._connection_semaphore.release()
def __init__(self, websocket, args, returning, db): self._websocket = websocket self._update_queue = Queue() self._update_mutex = RLock() self._db = db self._log = {} self._args = args self._commands: dict = {} self._globaljoblog: dict = {} self._current_job_id = [] self._current_job_device = [] self._returning = returning try: if os.path.exists('update_log.json'): with open('update_log.json') as logfile: self._log = json.load(logfile) except json.decoder.JSONDecodeError: logger.error( 'Corrupted update_log.json file found. Deleting the ' 'file. Please check remaining disk space or disk health.') os.remove('update_log.json') self.init_jobs() self.kill_old_jobs() self.load_automatic_jobs() self._stop_updater_threads: Event = Event() self.t_updater = [] for i in range(self._args.job_thread_count): t = Thread(name='apk_updater-{}'.format(str(i)), target=self.process_update_queue, args=(i, )) t.daemon = True self.t_updater.append(t) t.start()
def get_screen_text(self, screenpath: str, identifier) -> Optional[dict]: if screenpath is None: logger.error("get_screen_text: image does not exist") return None return self.__thread_pool.apply_async(self.__internal_get_screen_text, (screenpath, identifier)).get()
def __internal_get_screen_text(self, screenpath: str, identifier) -> Optional[dict]: returning_dict: Optional[dict] = {} logger.debug("get_screen_text: Reading screen text - identifier {}", identifier) try: with Image.open(screenpath) as frame: frame = frame.convert('LA') try: returning_dict = pytesseract.image_to_data( frame, output_type=Output.DICT, timeout=40, config='--dpi 70') except Exception as e: logger.error( "Tesseract Error for device {}: {}. Exception: {}". format(str(identifier), str(returning_dict), e)) returning_dict = None except (FileNotFoundError, ValueError) as e: logger.error("Failed opening image {} with exception {}", screenpath, e) return None if isinstance(returning_dict, dict): return returning_dict else: logger.warning("Could not read text in image: {}", returning_dict) return None
def __internal_get_inventory_text(self, filename, identifier, x1, x2, y1, y2) -> Optional[str]: screenshot_read = cv2.imread(filename) temp_path_item = self.temp_dir_path + "/" + str( identifier) + "_inventory.png" h = x1 - x2 w = y1 - y2 gray = cv2.cvtColor(screenshot_read, cv2.COLOR_BGR2GRAY) gray = gray[int(y2):(int(y2) + int(w)), int(x2):(int(x2) + int(h))] scale_percent = 200 # percent of original size width = int(gray.shape[1] * scale_percent / 100) height = int(gray.shape[0] * scale_percent / 100) dim = (width, height) # resize image gray = cv2.resize(gray, dim, interpolation=cv2.INTER_AREA) cv2.imwrite(temp_path_item, gray) try: with Image.open(temp_path_item) as im: try: text = pytesseract.image_to_string(im) except Exception as e: logger.error( "Error running tesseract on inventory text: {}", e) return None except (FileNotFoundError, ValueError) as e: logger.error("Failed opening image {} with exception {}", temp_path_item, e) return None return text
def __check_close_present(self, filename, identifier, communicator, radiusratio=12, Xcord=True): if not os.path.isfile(filename): logger.warning("__check_close_present: {} does not exist", str(filename)) return False try: image = cv2.imread(filename) height, width, _ = image.shape except Exception as e: logger.error("Screenshot corrupted: {}", e) return False cv2.imwrite( os.path.join(self.temp_dir_path, str(identifier) + '_exitcircle.jpg'), image) if self.__read_circle_count(os.path.join( self.temp_dir_path, str(identifier) + '_exitcircle.jpg'), identifier, float(radiusratio), communicator, xcord=False, crop=True, click=True, canny=True) > 0: return True
def __internal_check_nearby(self, filename, identifier, communicator): try: screenshot_read = cv2.imread(filename) except Exception: logger.error("Screenshot corrupted :(") return False if screenshot_read is None: logger.error("Screenshot corrupted :(") return False if self.__check_raid_line(filename, identifier, communicator): logger.info('Nearby already open') return True if self.__check_raid_line(filename, identifier, communicator, leftSide=True, clickinvers=True): logger.info('Raidscreen not running but nearby open') return False height, width, _ = screenshot_read.shape logger.info('Raidscreen not running...') communicator.click(int(width - (width / 7.2)), int(height - (height / 12.19))) time.sleep(4) return False
def _check_unprocessed_stops(self): self._manager_mutex.acquire() try: list_of_stops_to_return: List[Location] = [] if len(self._stoplist) == 0: return list_of_stops_to_return else: # we only want to add stops that we haven't spun yet for stop in self._stoplist: if stop not in self._stops_not_processed and stop not in self._get_unprocessed_coords_from_worker(): self._stops_not_processed[stop] = 1 else: self._stops_not_processed[stop] += 1 for stop, error_count in self._stops_not_processed.items(): if stop not in self._stoplist: logger.info( "Location {} is no longer in our stoplist and will be ignored".format(str(stop))) self._coords_to_be_ignored.add(stop) elif error_count < 4: logger.warning("Found stop not processed yet: {}".format(str(stop))) list_of_stops_to_return.append(stop) else: logger.error("Stop {} has not been processed thrice in a row, " "please check your DB".format(str(stop))) self._coords_to_be_ignored.add(stop) if len(list_of_stops_to_return) > 0: logger.info("Found stops not yet processed, retrying those in the next round") return list_of_stops_to_return finally: self._manager_mutex.release()
def send_webhook(self, id_, status): if not self._log[str(id_)]['auto']: return try: if jobReturn(status).name not in self._args.job_dt_send_type.split( '|') or not self._args.job_dt_wh: return from discord_webhook import DiscordWebhook, DiscordEmbed _webhook = DiscordWebhook(url=self._args.job_dt_wh_url) origin = self._log[str(id_)]['origin'] file_ = self._log[str(id_)]['file'] processtime = self._log[str(id_)].get('processingdate', None) returning = self._log[str(id_)].get('returning', '-') logger.info("Send discord status for device {} (Job: {})".format(str(origin), str(file_))) embed = DiscordEmbed(title='MAD Job Status', description='Automatic Job processed', color=242424) embed.set_author(name='MADBOT') embed.add_embed_field(name='Origin', value=origin) embed.add_embed_field(name='Jobname', value=file_) embed.add_embed_field(name='Retuning', value=returning) embed.add_embed_field(name='Status', value=jobReturn(status).name) embed.add_embed_field(name='Next run', value=str(datetime.fromtimestamp( processtime) if processtime is not None else "-")) _webhook.add_embed(embed) _webhook.execute() embed = None except Exception as e: logger.error('Cannot send discord webhook for origin {} - Job {} - Reason: {}'.format( str(origin), str(file_), str(e)))
def _clear_thread(self): logger.info('Starting clear Quest Thread') while not self._stop_worker_event.is_set(): if self.clear_thread_task == 0: time.sleep(1) continue try: self._work_mutex.acquire() # TODO: less magic numbers? time.sleep(1) if self.clear_thread_task == 1: logger.info("Clearing box") self.clear_box(self._delay_add) self.clear_thread_task = 0 self.set_devicesettings_value('last_cleanup_time', time.time()) elif self.clear_thread_task == 2 and not self._level_mode: logger.info("Clearing quest") self._clear_quests(self._delay_add) self.clear_thread_task = 0 time.sleep(1) except (WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException) as e: logger.error("Worker removed while clearing quest/box") self._stop_worker_event.set() return finally: self.clear_thread_task = 0 self._work_mutex.release()
def parse_ggl(self, xml, mail: str) -> bool: if xml is None: logger.warning( 'Something wrong with processing - getting None Type from Websocket...' ) return False try: parser = ET.XMLParser(encoding="utf-8") xmlroot = ET.fromstring(xml, parser=parser) for item in xmlroot.iter('node'): if mail.lower() in str(item.attrib['text']).lower(): logger.info("Found mail {}", self.censor_account(str(item.attrib['text']))) bounds = item.attrib['bounds'] logger.debug("Bounds {}", str(item.attrib['bounds'])) match = re.search(r'^\[(\d+),(\d+)\]\[(\d+),(\d+)\]$', bounds) click_x = int(match.group(1)) + ( (int(match.group(3)) - int(match.group(1))) / 2) click_y = int(match.group(2)) + ( (int(match.group(4)) - int(match.group(2))) / 2) logger.debug('Click ' + str(click_x) + ' / ' + str(click_y)) self._communicator.click(click_x, click_y) time.sleep(2) return True except Exception as e: logger.error('Something wrong while parsing xml: {}'.format( str(e))) return False time.sleep(2) logger.warning('Dont find any mailaddress...') return False
def _takeScreenshot(self, delayAfter=0.0, delayBefore=0.0, errorscreen: bool = False): logger.debug("Taking screenshot...") time.sleep(delayBefore) # TODO: area settings for jpg/png and quality? screenshot_type: ScreenshotType = ScreenshotType.JPEG if self.get_devicesettings_value("screenshot_type", "jpeg") == "png": screenshot_type = ScreenshotType.PNG screenshot_quality: int = 80 take_screenshot = self._communicator.get_screenshot( self.get_screenshot_path(fileaddon=errorscreen), screenshot_quality, screenshot_type) if not take_screenshot: logger.error("takeScreenshot: Failed retrieving screenshot") logger.debug("Failed retrieving screenshot") return False else: logger.debug("Success retrieving screenshot") self._lastScreenshotTaken = time.time() time.sleep(delayAfter) return True
def gyms_from_db(self, geofence_helper): """ Retrieve all the gyms valid within the area set by geofence_helper :return: numpy array with coords """ logger.debug("DbWrapper::gyms_from_db called") if geofence_helper is None: logger.error("No geofence_helper! Not fetching gyms.") return [] logger.debug("Filtering with rectangle") rectangle = geofence_helper.get_polygon_from_fence() query = ("SELECT latitude, longitude " "FROM gym " "WHERE " "latitude >= %s AND longitude >= %s AND " "latitude <= %s AND longitude <= %s") res = self.execute(query, rectangle) list_of_coords: List[Location] = [] for (latitude, longitude) in res: list_of_coords.append(Location(latitude, longitude)) logger.debug( "Got {} coordinates in this rect (minLat, minLon, " "maxLat, maxLon): {}", len(list_of_coords), str(rectangle)) geofenced_coords = geofence_helper.get_geofenced_coordinates( list_of_coords) return geofenced_coords
def checkQuest(self, screenpath: str) -> ScreenType: if screenpath is None or len(screenpath) == 0: logger.error("Invalid screen path: {}", screenpath) return ScreenType.ERROR globaldict = self._pogoWindowManager.get_screen_text( screenpath, self._id) frame = None click_text = 'FIELD,SPECIAL,FELD,SPEZIAL,SPECIALES,TERRAIN' if not globaldict: # dict is empty return ScreenType.ERROR n_boxes = len(globaldict['level']) for i in range(n_boxes): if any(elem in (globaldict['text'][i]) for elem in click_text.split(",")): logger.info('Found research menu') self._communicator.click(100, 100) return ScreenType.QUEST logger.info('Listening to Dr. blabla - please wait') self._communicator.back_button() time.sleep(3) return ScreenType.UNDEFINED
def _wait_for_injection(self): self._not_injected_count = 0 injection_thresh_reboot = int(self.get_devicesettings_value("injection_thresh_reboot", 20)) while not self._mitm_mapper.get_injection_status(self._origin): self._check_for_mad_job() if self._not_injected_count >= injection_thresh_reboot: logger.error("Worker {} not injected in time - reboot", str(self._origin)) self._reboot(self._mitm_mapper) return False logger.info("PogoDroid on worker {} didn't connect yet. Probably not injected? (Count: {}/{})", str(self._origin), str(self._not_injected_count), str(injection_thresh_reboot)) if self._not_injected_count in [3, 6, 9, 15, 18] and not self._stop_worker_event.is_set(): logger.info("Worker {} will retry check_windows while waiting for injection at count {}", str(self._origin), str(self._not_injected_count)) self._ensure_pogo_topmost() self._not_injected_count += 1 wait_time = 0 while wait_time < 20: wait_time += 1 if self._stop_worker_event.is_set(): logger.error("Worker {} killed while waiting for injection", str(self._origin)) return False time.sleep(1) return True
def generate_token_list(self, args) -> List[str]: token_list = [] if args.token_dispenser_user: try: with open(args.token_dispenser_user, 'rb') as fh: for host in fh: if not host.strip(): continue if host.strip() not in fh: token_list.append(host.strip()) except FileNotFoundError: logger.error('Unable to find token file {}', args.token_dispenser_user) if args.token_dispenser: try: with open(args.token_dispenser, 'r') as fh: for host in fh: if not host.strip(): continue if host.strip() not in fh: token_list.append(host.strip()) except FileNotFoundError: logger.error('Unable to find token file {}', args.token_dispenser) logger.debug('Token Dispensers: {}', token_list) return token_list
async def get_worker_using_settings(self, origin: str, enable_configmode: bool, communicator: AbstractCommunicator) \ -> Optional[AbstractWorker]: if enable_configmode: return self.get_configmode_worker(origin, communicator) # not a configmore worker, move on adjusting devicesettings etc # TODO: get worker walker_configuration: Optional[ WalkerConfiguration] = await self.__prep_settings(origin) if walker_configuration is None: logger.error("Failed to find a walker configuration for {}", origin) return None logger.debug("Setting up worker for {}", str(origin)) await self.__update_settings_of_origin(origin, walker_configuration) dev_id = self.__mapping_manager.get_all_devicemappings( )[origin]['device_id'] area_id = walker_configuration.walker_settings['walkerarea'] walker_routemanager_mode: WorkerType = self.__mapping_manager.routemanager_get_mode( walker_configuration.walker_area_name) if dev_id is None or area_id is None or walker_routemanager_mode == WorkerType.UNDEFINED: logger.error( "Failed to instantiate worker for {} due to invalid settings found", origin) return None # we can finally create an instance of the worker, bloody hell... # TODO: last_known_state has never been used and got kinda deprecated due to devicesettings... return self.get_worker(origin, walker_routemanager_mode, communicator, dev_id, {}, area_id, walker_configuration.walker_settings, walker_configuration.walker_area_name)
def _clear_quests(self, delayadd, openmenu=True): logger.debug('{_clear_quests} called') if openmenu: x, y = self._resocalc.get_coords_quest_menu(self)[0], \ self._resocalc.get_coords_quest_menu(self)[1] self._communicator.click(int(x), int(y)) time.sleep(6 + int(delayadd)) trashcancheck = self._get_trash_positions(full_screen=True) if trashcancheck is None: logger.error('Could not find any trashcan - abort') return logger.info("Found {} trashcan(s) on screen", len(trashcancheck)) # get confirm box coords x, y = self._resocalc.get_confirm_delete_quest_coords(self)[0], \ self._resocalc.get_confirm_delete_quest_coords(self)[1] for trash in range(len(trashcancheck)): logger.info("Delete old quest {}", int(trash) + 1) self._communicator.click(int(trashcancheck[0].x), int(trashcancheck[0].y)) time.sleep(1 + int(delayadd)) self._communicator.click(int(x), int(y)) time.sleep(1 + int(delayadd)) x, y = self._resocalc.get_close_main_button_coords(self)[0], \ self._resocalc.get_close_main_button_coords(self)[1] self._communicator.click(int(x), int(y)) time.sleep(1.5) logger.debug('{_clear_quests} finished') return
def __apply_update(self, patch_ver): filename = MAD_UPDATES[patch_ver] patch_name = 'mapadroid.patcher.%s' % filename try: patch_base = importlib.import_module(patch_name) except ImportError: logger.opt(exception=True).error( 'Unable to import patch {}. Exiting', patch_name) sys.exit(1) else: # Execute the patch and catch any errors for logging try: patch = patch_base.Patch(logger, self.dbwrapper, self.data_manager, self._application_args) if patch.completed and not patch.issues: self.__set_installed_ver(patch_ver) logger.success('Successfully applied patch') else: logger.error('Patch was unsuccessful. Exiting') sys.exit(1) except Exception: logger.opt( exception=True).error('Patch was unsuccessful. Exiting') sys.exit(1)
def send_and_wait(self, id, worker_instance, message, timeout, byte_command: int = None): if isinstance(message, bytes): logger.debug("{} sending binary: {}", str(id), str(message[:10])) else: logger.debug("{} sending command: {}", str(id), message.strip()) try: # future: Handle = self._add_task_to_loop(self.__send_and_wait_internal(id, worker_instance, message, # timeout)) logger.debug("Appending send_and_wait to {}".format( str(self.__loop))) with self.__loop_mutex: future = asyncio.run_coroutine_threadsafe( self.__send_and_wait_internal(id, worker_instance, message, timeout, byte_command=byte_command), self.__loop) result = future.result() except WebsocketWorkerRemovedException: logger.error( "Worker {} was removed, propagating exception".format(id)) raise WebsocketWorkerRemovedException except WebsocketWorkerTimeoutException: logger.error( "Sending message failed due to timeout ({})".format(id)) raise WebsocketWorkerTimeoutException return result
def get_version(self): # checking mappings.json convert_mappings() dbVersion = self.dbwrapper.get_mad_version() if not dbVersion: logger.warning("Moving internal MAD version to database") try: with open('version.json') as f: version = json.load(f) self._version = int(version['version']) self.dbwrapper.update_mad_version(self._version) except FileNotFoundError: logger.warning("Could not find version.json during move to DB" ", will use version 0") self.dbwrapper.update_mad_version(0) self.start_update() dbVersion = self.dbwrapper.get_mad_version() if dbVersion: logger.success( "Moved internal MAD version to database " "as version {}", dbVersion) else: logger.error("Moving internal MAD version to DB failed!") else: logger.info("Internal MAD version in DB is {}", dbVersion) self._version = int(dbVersion) if int(self._version) < int(current_version): logger.warning('Performing updates from version {} to {} now', self._version, current_version) self.start_update() logger.success('Updates to version {} finished', self._version)
def get_worker(self, origin: str, worker_type: WorkerType, communicator: AbstractCommunicator, dev_id: str, last_known_state: dict, area_id: int, walker_settings: dict, walker_area_name: str) -> Optional[AbstractWorker]: if origin is None or worker_type is None or worker_type == WorkerType.UNDEFINED: return None elif worker_type in [ WorkerType.CONFIGMODE, WorkerType.CONFIGMODE.value ]: logger.error( "WorkerFactory::get_worker called with configmode arg, use get_configmode_worker instead" ) return None # TODO: validate all values elif worker_type in [ WorkerType.IV_MITM, WorkerType.IV_MITM.value, WorkerType.MON_MITM, WorkerType.MON_MITM.value, WorkerType.RAID_MITM, WorkerType.RAID_MITM.value ]: return WorkerMITM(self.__args, dev_id, origin, last_known_state, communicator, area_id=area_id, routemanager_name=walker_area_name, mitm_mapper=self.__mitm_mapper, mapping_manager=self.__mapping_manager, db_wrapper=self.__db_wrapper, pogo_window_manager=self.__pogo_windows, walker=walker_settings) elif worker_type in [WorkerType.STOPS, WorkerType.STOPS.value]: return WorkerQuests(self.__args, dev_id, origin, last_known_state, communicator, area_id=area_id, routemanager_name=walker_area_name, mitm_mapper=self.__mitm_mapper, mapping_manager=self.__mapping_manager, db_wrapper=self.__db_wrapper, pogo_window_manager=self.__pogo_windows, walker=walker_settings) elif worker_type in [WorkerType.IDLE, WorkerType.IDLE.value]: return WorkerConfigmode(self.__args, dev_id, origin, communicator, walker=walker_settings, mapping_manager=self.__mapping_manager, mitm_mapper=self.__mitm_mapper, db_wrapper=self.__db_wrapper, area_id=area_id, routemanager_name=walker_area_name) else: logger.error( "WorkerFactor::get_worker failed to create a worker...") return None
def is_gps_signal_lost(self, filename, identifier) -> Optional[bool]: # run the check for the file here once before having the subprocess check it (as well) if not os.path.isfile(filename): logger.error("isGpsSignalLost: {} does not exist", str(filename)) return None return self.__thread_pool.apply_async( self.__internal_is_gps_signal_lost, (filename, identifier)).get()
def add_endpoint(self, endpoint=None, endpoint_name=None, handler=None, options=None, methods_passed=None): if methods_passed is None: logger.error("Invalid REST method specified") sys.exit(1) self.app.add_url_rule(endpoint, endpoint_name, EndpointAction(handler, self.__application_args, self.__mapping_manager), methods=methods_passed)
def check_nearby(self, filename, identifier, communicator): if not os.path.isfile(filename): logger.error("check_nearby: {} does not exist", str(filename)) return False return self.__thread_pool.apply_async( self.__internal_check_nearby, (filename, identifier, communicator)).get()