示例#1
0
 def save(self,
          force_insert: Optional[bool] = False,
          ignore_issues: Optional[List[str]] = []) -> int:
     self.presave_validation(ignore_issues=ignore_issues)
     core_data = {'monlist': self._data['fields']['monlist']}
     super().save(core_data,
                  force_insert=force_insert,
                  ignore_issues=ignore_issues)
     del_data = {'monlist_id': self.identifier}
     self._dbc.autoexec_delete('settings_monivlist_to_mon', del_data)
     for ind, mon in enumerate(self._data['fields']['mon_ids_iv']):
         mon_data = {
             'monlist_id': self.identifier,
             'mon_id': mon,
             'mon_order': ind
         }
         try:
             self._dbc.autoexec_insert('settings_monivlist_to_mon',
                                       mon_data)
         except mysql.connector.Error:
             logger.info('Duplicate pokemon %s detected in list %s' % (
                 mon,
                 self.identifier,
             ))
     return self.identifier
示例#2
0
 def _wait_for_injection(self):
     self._not_injected_count = 0
     while not self._mitm_mapper.get_injection_status(self._origin):
         if self._not_injected_count >= 20:
             logger.error("Worker {} not get injected in time - reboot",
                          str(self._origin))
             self._reboot()
             return False
         logger.info(
             "PogoDroid on worker {} didn't connect yet. Probably not injected? (Count: {})",
             str(self._origin), str(self._not_injected_count))
         if self._stop_worker_event.isSet():
             logger.error(
                 "Worker {} get killed while waiting for injection",
                 str(self._origin))
             return False
         self._not_injected_count += 1
         wait_time = 0
         while wait_time < 20:
             wait_time += 1
             if self._stop_worker_event.isSet():
                 logger.error(
                     "Worker {} get killed while waiting for injection",
                     str(self._origin))
                 return False
             time.sleep(1)
     return True
示例#3
0
 def stop_worker(self):
     if self._stop_worker_event.set():
         logger.info('Worker {} already stopped - waiting for it',
                     str(self._origin))
     else:
         self._stop_worker_event.set()
         logger.warning("Worker {} stop called", str(self._origin))
示例#4
0
    def preadd_job(self, origin, job, id_, type, globalid=None):
        logger.info(
            'Adding Job {} for Device {} - File/Job: {} (ID: {})'.format(
                str(type), str(origin), str(job), str(id_)))

        globalid = globalid if globalid is not None else id_

        if globalid not in self._globaljoblog:
            self._globaljoblog[globalid] = {}

        self._globaljoblog[globalid]['laststatus'] = None
        self._globaljoblog[globalid]['lastjobend'] = None

        if jobType[type.split('.')[1]] == jobType.CHAIN:

            for subjob in self._commands[job]:
                logger.debug(subjob)
                self.add_job(globalid=globalid,
                             origin=origin,
                             file=subjob['SYNTAX'],
                             id_=int(time.time()),
                             type=subjob['TYPE'],
                             waittime=subjob.get('WAITTIME', 0),
                             redo=self._globaljoblog[globalid].get(
                                 'redo', False),
                             fieldname=subjob.get('FIELDNAME', 'unknown'),
                             jobname=job)
                time.sleep(1)
        else:
            self.add_job(globalid=globalid,
                         origin=origin,
                         file=job,
                         id_=int(id_),
                         type=type)
示例#5
0
文件: screenPath.py 项目: nepixl/MAD
    def parse_ggl(self, xml, mail: str) -> bool:
        if xml is None:
            logger.warning(
                'Something wrong with processing - getting None Type from Websocket...'
            )
            return False
        try:
            parser = ET.XMLParser(encoding="utf-8")
            xmlroot = ET.fromstring(xml, parser=parser)
            for item in xmlroot.iter('node'):
                if mail.lower() in str(item.attrib['text']).lower():
                    logger.info("Found mail {}",
                                self.censor_account(str(item.attrib['text'])))
                    bounds = item.attrib['bounds']
                    logger.debug("Bounds {}", str(item.attrib['bounds']))
                    match = re.search(r'^\[(\d+),(\d+)\]\[(\d+),(\d+)\]$',
                                      bounds)
                    click_x = int(match.group(1)) + (
                        (int(match.group(3)) - int(match.group(1))) / 2)
                    click_y = int(match.group(2)) + (
                        (int(match.group(4)) - int(match.group(2))) / 2)
                    logger.debug('Click ' + str(click_x) + ' / ' +
                                 str(click_y))
                    self._communicator.click(click_x, click_y)
                    time.sleep(2)
                    return True
        except Exception as e:
            logger.error('Something wrong while parsing xml: {}'.format(
                str(e)))
            return False

        time.sleep(2)
        logger.warning('Dont find any mailaddress...')
        return False
示例#6
0
    def _clear_quests(self, delayadd, openmenu=True):
        logger.debug('{_clear_quests} called')
        if openmenu:
            x, y = self._resocalc.get_coords_quest_menu(self)[0], \
                   self._resocalc.get_coords_quest_menu(self)[1]
            self._communicator.click(int(x), int(y))
            time.sleep(6 + int(delayadd))

        trashcancheck = self._get_trash_positions(full_screen=True)
        if trashcancheck is None:
            logger.error('Could not find any trashcan - abort')
            return
        logger.info("Found {} trashcan(s) on screen", len(trashcancheck))
        # get confirm box coords
        x, y = self._resocalc.get_confirm_delete_quest_coords(self)[0], \
               self._resocalc.get_confirm_delete_quest_coords(self)[1]

        for trash in range(len(trashcancheck)):
            logger.info("Delete old quest {}", int(trash) + 1)
            self._communicator.click(int(trashcancheck[0].x),
                                     int(trashcancheck[0].y))
            time.sleep(1 + int(delayadd))
            self._communicator.click(int(x), int(y))
            time.sleep(1 + int(delayadd))

        x, y = self._resocalc.get_close_main_button_coords(self)[0], \
               self._resocalc.get_close_main_button_coords(self)[1]
        self._communicator.click(int(x), int(y))

        time.sleep(1.5)

        logger.debug('{_clear_quests} finished')
        return
示例#7
0
    def _wait_for_injection(self):
        self._not_injected_count = 0
        injection_thresh_reboot = int(self.get_devicesettings_value("injection_thresh_reboot", 20))
        while not self._mitm_mapper.get_injection_status(self._origin):

            self._check_for_mad_job()

            if self._not_injected_count >= injection_thresh_reboot:
                logger.error("Worker {} not injected in time - reboot", str(self._origin))
                self._reboot(self._mitm_mapper)
                return False
            logger.info("PogoDroid on worker {} didn't connect yet. Probably not injected? (Count: {}/{})",
                        str(self._origin), str(self._not_injected_count), str(injection_thresh_reboot))
            if self._not_injected_count in [3, 6, 9, 15, 18] and not self._stop_worker_event.is_set():
                logger.info("Worker {} will retry check_windows while waiting for injection at count {}",
                            str(self._origin), str(self._not_injected_count))
                self._ensure_pogo_topmost()
            self._not_injected_count += 1
            wait_time = 0
            while wait_time < 20:
                wait_time += 1
                if self._stop_worker_event.is_set():
                    logger.error("Worker {} killed while waiting for injection", str(self._origin))
                    return False
                time.sleep(1)
        return True
示例#8
0
    def generate_stop_list(self):
        time.sleep(5)
        stops = self.db_wrapper.stop_from_db_without_quests(self.geofence_helper)

        logger.info('Detected stops without quests: {}', str(len(stops)))
        logger.debug('Detected stops without quests: {}', str(stops))
        self._stoplist: List[Location] = stops
示例#9
0
    def _get_coords_after_finish_route(self) -> bool:
        self._manager_mutex.acquire()
        try:

            if self._shutdown_route:
                logger.info('Other worker shutdown route {} - leaving it', str(self.name))
                return False

            if self._start_calc:
                logger.info("Another process already calculate the new route")
                return True
            self._start_calc = True
            self.generate_stop_list()
            if len(self._stoplist) == 0:
                logger.info("Dont getting new stops - leaving now.")
                self._shutdown_route = True
                self._restore_original_route()
                self._start_calc = False
                return False
            coords: List[Location] = self._check_unprocessed_stops()
            # remove coords to be ignored from coords
            coords = [coord for coord in coords if coord not in self._coords_to_be_ignored]
            if len(coords) > 0:
                logger.info("Getting new coords - recalc quick route")
                self._recalc_stop_route(coords)
                self._start_calc = False
            else:
                logger.info("Dont getting new stops - leaving now.")
                self._shutdown_route = True
                self._start_calc = False
                self._restore_original_route()
                return False
            return True
        finally:
            self._manager_mutex.release()
示例#10
0
    def __find_latest_head(self, package, architecture, url) -> NoReturn:
        """ Determine if there is a newer version by checking the size of the package from the HEAD response

        Args:
            package (APK_Type): Package to download
            architecture (APK_Arch): Architecture of the package to download
            url (str): URL to perform the HEAD against
        """
        (curr_info, status) = lookup_package_info(self.storage, package)
        installed_size = None
        if curr_info:
            installed_size = curr_info.get('size', None)
        head = requests.head(url,
                             verify=False,
                             headers=APK_HEADERS,
                             allow_redirects=True)
        mirror_size = int(head.headers['Content-Length'])
        if not curr_info or (installed_size and installed_size != mirror_size):
            logger.info('Newer version found on the mirror of size {}',
                        mirror_size)
        else:
            logger.info('No newer version found')
        self.set_last_searched(package,
                               architecture,
                               version=mirror_size,
                               url=url)
示例#11
0
    def _check_unprocessed_stops(self):
        self._manager_mutex.acquire()

        try:
            list_of_stops_to_return: List[Location] = []

            if len(self._stoplist) == 0:
                return list_of_stops_to_return
            else:
                # we only want to add stops that we haven't spun yet
                for stop in self._stoplist:
                    if stop not in self._stops_not_processed and stop not in self._get_unprocessed_coords_from_worker():
                        self._stops_not_processed[stop] = 1
                    else:
                        self._stops_not_processed[stop] += 1

            for stop, error_count in self._stops_not_processed.items():
                if stop not in self._stoplist:
                    logger.info(
                        "Location {} is no longer in our stoplist and will be ignored".format(str(stop)))
                    self._coords_to_be_ignored.add(stop)
                elif error_count < 4:
                    logger.warning("Found stop not processed yet: {}".format(str(stop)))
                    list_of_stops_to_return.append(stop)
                else:
                    logger.error("Stop {} has not been processed thrice in a row, "
                                 "please check your DB".format(str(stop)))
                    self._coords_to_be_ignored.add(stop)

            if len(list_of_stops_to_return) > 0:
                logger.info("Found stops not yet processed, retrying those in the next round")
            return list_of_stops_to_return
        finally:
            self._manager_mutex.release()
示例#12
0
    def __init__(self, db_wrapper: DbWrapper, args, data_manager, ws_server, configmode: bool = False):
        self.__db_wrapper: DbWrapper = db_wrapper
        self.__args = args
        self.__configmode: bool = configmode
        self.__data_manager = data_manager
        self.__ws_server = ws_server

        self._devicemappings: Optional[dict] = None
        self._areas: Optional[dict] = None
        self._routemanagers: Optional[Dict[str, dict]] = None
        self._auths: Optional[dict] = None
        self._monlists: Optional[dict] = None
        self.__stop_file_watcher_event: Event = Event()
        self.join_routes_queue = JoinQueue(self.__stop_file_watcher_event, self)
        self.__raw_json: Optional[dict] = None
        self.__mappings_mutex: Lock = Lock()

        self.update(full_lock=True)

        if self.__args.auto_reload_config:
            logger.info("Starting file watcher for mappings.json changes.")
            self.__t_file_watcher = Thread(name='file_watcher', target=self.__file_watcher, )
            self.__t_file_watcher.daemon = False
            self.__t_file_watcher.start()
        self.__devicesettings_setter_queue: Queue = Queue()
        self.__devicesettings_setter_consumer_thread: Thread = Thread(name='devicesettings_setter_consumer',
                                                                      target=self.__devicesettings_setter_consumer, )
        self.__devicesettings_setter_consumer_thread.daemon = True
        self.__devicesettings_setter_consumer_thread.start()
示例#13
0
 def __apply_update(self, patch_ver):
     filename = MAD_UPDATES[patch_ver]
     patch_name = 'mapadroid.patcher.%s' % filename
     try:
         patch_base = importlib.import_module(patch_name)
     except ImportError:
         logger.opt(exception=True).error(
             'Unable to import patch {}.  Exiting', patch_name)
         sys.exit(1)
     else:
         # Execute the patch and catch any errors for logging
         try:
             patch = patch_base.Patch(logger, self.dbwrapper,
                                      self.data_manager,
                                      self._application_args)
             if patch.completed and not patch.issues:
                 self.__set_installed_ver(patch_ver)
                 logger.info('Successfully applied patch')
             else:
                 logger.error('Patch was unsuccessful.  Exiting')
                 sys.exit(1)
         except Exception:
             logger.opt(
                 exception=True).error('Patch was unsuccessful.  Exiting')
             sys.exit(1)
示例#14
0
    def _post_move_location_routine(self, timestamp: float):
        if self._stop_worker_event.is_set():
            raise InternalStopWorkerException
        position_type = self._mapping_manager.routemanager_get_position_type(self._routemanager_name,
                                                                             self._origin)
        if position_type is None:
            logger.warning("Mappings/Routemanagers have changed, stopping worker to be created again")
            raise InternalStopWorkerException

        if self.get_devicesettings_value('rotate_on_lvl_30', False) and \
                self._mitm_mapper.get_playerlevel(self._origin) >= 30 and self._level_mode:
            # switch if player lvl >= 30
            self.switch_account()

        try:
            self._work_mutex.acquire()
            if not self._mapping_manager.routemanager_get_init(self._routemanager_name):
                logger.info("Processing Stop / Quest...")

                reachedMainMenu = self._check_pogo_main_screen(10, False)
                if not reachedMainMenu:
                    self._restart_pogo(mitm_mapper=self._mitm_mapper)

                logger.info('Open Stop')
                self._stop_process_time = math.floor(time.time())
                data_received = self._open_pokestop(self._stop_process_time)
                if data_received is not None and data_received == LatestReceivedType.STOP:
                    self._handle_stop(self._stop_process_time)

            else:
                logger.debug('Currently in INIT Mode - no Stop processing')
                time.sleep(5)
        finally:
            logger.debug("Releasing lock")
            self._work_mutex.release()
示例#15
0
    async def __client_message_receiver(
            self, origin: str,
            client_entry: WebsocketConnectedClientEntry) -> None:
        if client_entry is None:
            return
        logger.info("Consumer handler of {} starting", origin)
        while client_entry.websocket_client_connection.open:
            message = None
            try:
                message = await asyncio.wait_for(
                    client_entry.websocket_client_connection.recv(),
                    timeout=4.0)
            except asyncio.TimeoutError:
                await asyncio.sleep(0.02)
            except websockets.exceptions.ConnectionClosed as cc:
                # TODO: cleanup needed here? better suited for the handler
                logger.warning(
                    "Connection to {} was closed, stopping receiver. Exception: ",
                    origin, cc)
                return

            if message is not None:
                await self.__on_message(client_entry, message)
        logger.warning("Connection of {} closed in __client_message_receiver",
                       str(origin))
示例#16
0
    def get_login_accounts(self):
        self._logintype = LoginType[self.get_devicesettings_value('logintype', 'google')]
        logger.info("Set logintype: {}".format(self._logintype))
        if self._logintype == LoginType.ptc:
            temp_accounts = self.get_devicesettings_value('ptc_login', False)
            if not temp_accounts:
                logger.warning('No PTC Accounts are set - hope we are login and never logout!')
                self._accountcount = 0
                return

            temp_accounts = temp_accounts.replace(' ', '').split('|')
            for account in temp_accounts:
                ptc_temp = account.split(',')
                if 2 < len(ptc_temp) > 2:
                    logger.warning('Cannot use this account (Wrong format!): {}'.format(str(account)))
                username = ptc_temp[0]
                password = ptc_temp[1]
                self._PTC_accounts.append(Login_PTC(username, password))
            self._accountcount = len(self._PTC_accounts)
        else:
            temp_accounts = self.get_devicesettings_value('ggl_login_mail', '@gmail.com')
            if not temp_accounts:
                logger.warning('No GGL Accounts are set - using first @gmail.com Account')
            temp_accounts = temp_accounts.replace(' ', '').split('|')

            for account in temp_accounts:
                self._GGL_accounts.append(Login_GGL(account))
            self._accountcount = len(self._GGL_accounts)

        logger.info('Added {} account(s) to memory'.format(str(self._accountcount)))
        return
示例#17
0
    def run(self):
        # build a private DbWrapper instance...
        logger.info("Starting MITMDataProcessor")
        while True:
            try:
                item = self.__queue.get()

                try:
                    items_left = self.__queue.qsize()
                except NotImplementedError:
                    items_left = 0

                logger.debug(
                    "MITM data processing worker retrieved data. Queue length left afterwards: {}",
                    str(items_left))
                if items_left > 50:
                    logger.warning(
                        "MITM data processing workers are falling behind! Queue length: {}", str(items_left))

                if item is None:
                    logger.warning("Received none from queue of data")
                    break
                self.process_data(item[0], item[1], item[2])
                self.__queue.task_done()
            except KeyboardInterrupt as e:
                logger.info("MITMDataProcessor received keyboard interrupt, stopping")
                break
示例#18
0
文件: updater.py 项目: sairon/MAD
    def send_webhook(self, id_, status):
        if not self._log[str(id_)]['auto']:
            return

        try:
            if jobReturn(status).name not in self._args.job_dt_send_type.split(
                    '|') or not self._args.job_dt_wh:
                return

            from discord_webhook import DiscordWebhook, DiscordEmbed
            _webhook = DiscordWebhook(url=self._args.job_dt_wh_url)

            origin = self._log[str(id_)]['origin']
            file_ = self._log[str(id_)]['file']
            processtime = self._log[str(id_)].get('processingdate', None)
            returning = self._log[str(id_)].get('returning', '-')

            logger.info("Send discord status for device {} (Job: {})".format(str(origin), str(file_)))

            embed = DiscordEmbed(title='MAD Job Status', description='Automatic Job processed', color=242424)
            embed.set_author(name='MADBOT')
            embed.add_embed_field(name='Origin', value=origin)
            embed.add_embed_field(name='Jobname', value=file_)
            embed.add_embed_field(name='Retuning', value=returning)
            embed.add_embed_field(name='Status', value=jobReturn(status).name)
            embed.add_embed_field(name='Next run',
                                  value=str(datetime.fromtimestamp(
                                      processtime) if processtime is not None else "-"))
            _webhook.add_embed(embed)
            _webhook.execute()
            embed = None
        except Exception as e:
            logger.error('Cannot send discord webhook for origin {} - Job {} - Reason: {}'.format(
                str(origin), str(file_), str(e)))
示例#19
0
    def __process_stats(self, stats, client_id: int,
                        last_processed_timestamp: float):
        logger.info('Submitting stats for origin {}', str(client_id))
        data_send_stats = []
        data_send_location = []

        data_send_stats.append(
            PlayerStats.stats_complete_parser(client_id, stats,
                                              last_processed_timestamp))
        data_send_location.append(
            PlayerStats.stats_location_parser(client_id, stats,
                                              last_processed_timestamp))

        self._db_stats_submit.submit_stats_complete(data_send_stats)
        self._db_stats_submit.submit_stats_locations(data_send_location)
        if self.__application_args.game_stats_raw:
            data_send_location_raw = PlayerStats.stats_location_raw_parser(
                client_id, stats, last_processed_timestamp)
            data_send_detection_raw = PlayerStats.stats_detection_raw_parser(
                client_id, stats, last_processed_timestamp)
            self._db_stats_submit.submit_stats_locations_raw(
                data_send_location_raw)
            self._db_stats_submit.submit_stats_detections_raw(
                data_send_detection_raw)

        data_send_stats.clear()
        data_send_location.clear()

        self._db_stats_submit.cleanup_statistics()
示例#20
0
文件: version.py 项目: Akhrameev/MAD
    def get_version(self):
        # checking mappings.json
        convert_mappings()
        dbVersion = self.dbwrapper.get_mad_version()
        if not dbVersion:
            logger.warning("Moving internal MAD version to database")
            try:
                with open('version.json') as f:
                    version = json.load(f)
                self._version = int(version['version'])
                self.dbwrapper.update_mad_version(self._version)
            except FileNotFoundError:
                logger.warning("Could not find version.json during move to DB"
                               ", will use version 0")
                self.dbwrapper.update_mad_version(0)
                self.start_update()
            dbVersion = self.dbwrapper.get_mad_version()
            if dbVersion:
                logger.success(
                    "Moved internal MAD version to database "
                    "as version {}", dbVersion)
            else:
                logger.error("Moving internal MAD version to DB failed!")
        else:
            logger.info("Internal MAD version in DB is {}", dbVersion)
            self._version = int(dbVersion)

        if int(self._version) < int(current_version):
            logger.warning('Performing updates from version {} to {} now',
                           self._version, current_version)
            self.start_update()
            logger.success('Updates to version {} finished', self._version)
示例#21
0
 def _worker_specific_setup_start(self):
     logger.info("Starting pogodroid")
     start_result = self._communicator.start_app("com.mad.pogodroid")
     time.sleep(5)
     # won't work if PogoDroid is repackaged!
     self._communicator.passthrough("am startservice com.mad.pogodroid/.services.HookReceiverService")
     return start_result
示例#22
0
    def __build_excluded_areas(self, mapping_manager: MappingManager):
        self.__excluded_areas: List[GeofenceHelper] = []

        if self.__args.webhook_excluded_areas == "":
            pass

        tmp_excluded_areas = {}
        for rm in mapping_manager.get_all_routemanager_names():
            name = mapping_manager.routemanager_get_name(rm)
            gfh = mapping_manager.routemanager_get_geofence_helper(rm)
            tmp_excluded_areas[name] = gfh

        area_names = self.__args.webhook_excluded_areas.split(",")
        for area_name in area_names:
            area_name = area_name.strip()
            for name, gf in tmp_excluded_areas.items():
                if (area_name.endswith("*") and name.startswith(
                        area_name[:-1])) or area_name == name:
                    self.__excluded_areas.append(gf)

        tmp_excluded_areas = None

        if len(self.__excluded_areas) > 0:
            logger.info("Excluding {} areas from webhooks",
                        len(self.__excluded_areas))
示例#23
0
文件: screenPath.py 项目: nepixl/MAD
    def checkQuest(self, screenpath: str) -> ScreenType:
        if screenpath is None or len(screenpath) == 0:
            logger.error("Invalid screen path: {}", screenpath)
            return ScreenType.ERROR
        globaldict = self._pogoWindowManager.get_screen_text(
            screenpath, self._id)
        frame = None

        click_text = 'FIELD,SPECIAL,FELD,SPEZIAL,SPECIALES,TERRAIN'
        if not globaldict:
            # dict is empty
            return ScreenType.ERROR
        n_boxes = len(globaldict['level'])
        for i in range(n_boxes):
            if any(elem in (globaldict['text'][i])
                   for elem in click_text.split(",")):
                logger.info('Found research menu')
                self._communicator.click(100, 100)
                return ScreenType.QUEST

        logger.info('Listening to Dr. blabla - please wait')

        self._communicator.back_button()
        time.sleep(3)
        return ScreenType.UNDEFINED
示例#24
0
    async def __consumer_handler(
            self,
            websocket_client_connection: websockets.WebSocketClientProtocol):
        if websocket_client_connection is None:
            return
        worker_id = str(
            websocket_client_connection.request_headers.get_all("Origin")[0])
        logger.info("Consumer handler of {} starting", str(worker_id))
        while websocket_client_connection.open:
            message = None
            try:
                message = await asyncio.wait_for(
                    websocket_client_connection.recv(), timeout=4.0)
            except asyncio.TimeoutError as te:
                await asyncio.sleep(0.02)
            except websockets.exceptions.ConnectionClosed as cc:
                logger.warning("Connection to {} was closed, stopping worker",
                               str(worker_id))
                async with self.__users_mutex:
                    worker = self.__current_users.get(worker_id, None)
                if worker is not None:
                    # TODO: do it abruptly in the worker, maybe set a flag to be checked for in send_and_wait to
                    # TODO: throw an exception
                    worker[1].stop_worker()
                await self.__internal_clean_up_user(worker_id, None)
                return

            if message is not None:
                await self.__on_message(message)
        logger.warning("Connection of {} closed in consumer_handler",
                       str(worker_id))
示例#25
0
文件: screenPath.py 项目: nepixl/MAD
    def get_next_account(self):
        if self._accountcount == 0:
            logger.info('Cannot return new account - no one is set')
            return None
        if self._accountindex <= self._accountcount - 1:
            logger.info('Request next Account - Using Nr. {}'.format(
                self._accountindex + 1))
            self._accountindex += 1
        elif self._accountindex > self._accountcount - 1:
            logger.info('Request next Account - Restarting with Nr. 1')
            self._accountindex = 0

        self.set_devicesettings_value('accountindex', self._accountindex)

        if self._logintype == LoginType.ptc:
            logger.info('Using PTC Account: {}'.format(
                self.censor_account(self._PTC_accounts[self._accountindex -
                                                       1].username,
                                    isPTC=True)))
            return self._PTC_accounts[self._accountindex - 1]
        else:
            logger.info('Using GGL Account: {}'.format(
                self.censor_account(self._GGL_accounts[self._accountindex -
                                                       1].username)))
            return self._GGL_accounts[self._accountindex - 1]
示例#26
0
    async def __prep_settings(self,
                              origin: str) -> Optional[WalkerConfiguration]:
        last_known_state = {}
        client_mapping = self.__mapping_manager.get_devicemappings_of(origin)
        devicesettings = self.__mapping_manager.get_devicesettings_of(origin)
        logger.info("Setting up routemanagers for {}", str(origin))

        walker_configuration: Optional[
            WalkerConfiguration] = await self.__get_walker_settings(
                origin, client_mapping, devicesettings)
        if walker_configuration is None:
            # logging is done in __get_walker_settings...
            return None

        if walker_configuration.walker_area_name not in self.__mapping_manager.get_all_routemanager_names(
        ):
            raise WrongAreaInWalker()

        logger.debug('Devicesettings {}: {}', str(origin), devicesettings)
        logger.info(
            '{} using walker area {} [{}/{}]', str(origin),
            str(
                self.__mapping_manager.routemanager_get_name(
                    walker_configuration.walker_area_name)),
            str(walker_configuration.walker_index + 1),
            str(walker_configuration.total_walkers_allowed_for_assigned_area))
        return walker_configuration
示例#27
0
 def start_worker(self):
     logger.info("Worker {} started in configmode", str(self._origin))
     self._mapping_manager.register_worker_to_routemanager(
         self._routemanager_name, self._origin)
     logger.debug("Setting device to idle for routemanager")
     self._db_wrapper.save_idle_status(self._dev_id, True)
     logger.debug("Device set to idle for routemanager {}",
                  str(self._origin))
     while self.check_walker() and not self._stop_worker_event.is_set():
         if self._args.config_mode:
             time.sleep(10)
         else:
             position_type = self._mapping_manager.routemanager_get_position_type(
                 self._routemanager_name, self._origin)
             if position_type is None:
                 logger.warning(
                     "Mappings/Routemanagers have changed, stopping worker to be created again"
                 )
                 self._stop_worker_event.set()
                 time.sleep(1)
             else:
                 time.sleep(10)
     self.set_devicesettings_value('finished', True)
     self._mapping_manager.unregister_worker_from_routemanager(
         self._routemanager_name, self._origin)
     try:
         self._communicator.cleanup()
     finally:
         logger.info("Internal cleanup of {} finished", str(self._origin))
     return
示例#28
0
 def __internal_worker_join(self):
     while not self.__stop_server.is_set() \
             or (self.__stop_server.is_set() and not self.__worker_shutdown_queue.empty()):
         try:
             next_item: Optional[
                 Thread] = self.__worker_shutdown_queue.get_nowait()
         except queue.Empty:
             time.sleep(1)
             continue
         if next_item is not None:
             logger.info("Trying to join worker thread")
             try:
                 next_item.join(10)
             except RuntimeError as e:
                 logger.warning(
                     "Caught runtime error trying to join thread, the thread likely did not start "
                     "at all. Exact message: {}", e)
             if next_item.is_alive():
                 logger.debug(
                     "Error while joining worker thread - requeue it")
                 self.__worker_shutdown_queue.put(next_item)
             else:
                 logger.debug("Done with worker thread, moving on")
         self.__worker_shutdown_queue.task_done()
     logger.info("Worker join-thread done")
示例#29
0
 def __update_mad(self):
     if self._madver < self._installed_ver:
         logger.error('Mis-matched version number detected.  Not applying any updates')
     else:
         logger.warning('Performing updates from version {} to {} now',
                        self._installed_ver, self._madver)
         all_patches = list(MAD_UPDATES.keys())
         try:
             last_ver = all_patches.index(self._installed_ver)
             first_patch = last_ver + 1
         except ValueError:
             # The current version of the patch was most likely removed as it was no longer needed.  Determine
             # where to start by finding the last executed
             next_patch = None
             for patch_ver in all_patches:
                 if self._installed_ver > patch_ver:
                     continue
                 next_patch = patch_ver
                 break
             try:
                 first_patch = all_patches.index(next_patch)
             except ValueError:
                 logger.critical('Unable to find the next patch to apply')
         updates_to_apply = all_patches[first_patch:]
         logger.info('Patches to apply: {}', updates_to_apply)
         for patch_ver in updates_to_apply:
             self.__apply_update(patch_ver)
         logger.success('Updates to version {} finished', self._installed_ver)
示例#30
0
    def __screendetection_get_type_internal(
        self, image, identifier
    ) -> Optional[Tuple[ScreenType, Optional[dict], int, int, int]]:
        returntype: ScreenType = ScreenType.UNDEFINED
        globaldict: Optional[dict] = {}
        diff: int = 1
        logger.debug(
            "__screendetection_get_type_internal: Detecting screen type - identifier {}",
            identifier)

        texts = []
        try:
            with Image.open(image) as frame_org:
                width, height = frame_org.size

                logger.debug("Screensize of origin {}: W:{} x H:{}".format(
                    str(identifier), str(width), str(height)))

                if width < 1080:
                    logger.info('Resize screen ...')
                    frame_org = frame_org.resize(
                        [int(2 * s) for s in frame_org.size], Image.ANTIALIAS)
                    diff: int = 2

                frame = frame_org.convert('LA')
                texts = [frame, frame_org]
                for text in texts:
                    try:
                        globaldict = pytesseract.image_to_data(
                            text,
                            output_type=Output.DICT,
                            timeout=40,
                            config='--dpi 70')
                    except Exception as e:
                        logger.error(
                            "Tesseract Error for device {}: {}. Exception: {}".
                            format(str(identifier), str(globaldict), e))
                        globaldict = None
                    logger.debug("Screentext: {}".format(str(globaldict)))
                    if globaldict is None or 'text' not in globaldict:
                        continue
                    n_boxes = len(globaldict['level'])
                    for i in range(n_boxes):
                        if returntype != ScreenType.UNDEFINED:
                            break
                        if len(globaldict['text'][i]) > 3:
                            for z in self._ScreenType:
                                if globaldict['top'][i] > height / 4 and globaldict['text'][i] in \
                                        self._ScreenType[z]:
                                    returntype = ScreenType(z)
                    if returntype != ScreenType.UNDEFINED:
                        break

                del texts
                frame.close()
        except (FileNotFoundError, ValueError) as e:
            logger.error("Failed opening image {} with exception {}", image, e)
            return None

        return returntype, globaldict, width, height, diff