示例#1
0
    async def on_ready(self):
        logger.info(
            'Globibot ({}) is online'
                .format(self.user.id)
        )

        self.plugin_collection.load_plugins()
    def load_plugin(self):
        logger.info('Loading plugin: {}...'.format(self.name))

        self.module = import_module(*self.import_args)
        self.module_imported = True
        self.plugin = self.module.plugin_cls(*self.args)
        self.plugin.do_load()
    def reload_plugin(self):
        logger.info('Unloading plugin: {}'.format(self.name))
        self.plugin.do_unload()

        logger.info('Reloading module: {}...'.format(self.module))
        self.reload_module(self.module)

        logger.info('Loading plugin: {}'.format(self.name))
        self.plugin = self.module.plugin_cls(*self.args)
        self.plugin.do_load()
示例#4
0
def _format_to_bert(params):
    json_file, args, save_file = params
    # print(params[2])
    if os.path.exists(save_file):
        logger.info('Ignore %s' % save_file)
        return

    bert = BertData(args)

    logger.info('Processing %s' % json_file)
    print('Processing %s' % json_file)
    jobs = json.load(open(json_file, 'r', encoding='UTF-8'))
    # print(jobs)
    print('Load json file success')
    datasets = []
    for d in jobs:
        source, tgt = d['src'], d['tgt']
        # print(source)
        if args.oracle_mode == 'greedy':
            oracle_ids = greedy_selection(source, tgt, 3)
        elif args.oracle_mode == 'combination':
            oracle_ids = combination_selection(source, tgt, 3)
        else:
            raise ValueError
        if not oracle_ids:
            continue
        b_data = bert.pre_process(source, tgt, oracle_ids)
        if b_data is None:
            continue
        indexed_tokens, labels, segments_ids, cls_ids, src_txt, tgt_txt = b_data
        b_data_dict = {
            "src": indexed_tokens,
            "labels": labels,
            "segs": segments_ids,
            'clss': cls_ids,
            'src_txt': src_txt,
            "tgt_txt": tgt_txt
        }
        datasets.append(b_data_dict)
    logger.info('Saving to %s' % save_file)
    torch.save(datasets, save_file)
    datasets = []
    gc.collect()
示例#5
0
 def unregister_worker(self, worker_name):
     self._workers_registered_mutex.acquire()
     try:
         if worker_name in self._workers_registered:
             logger.info("Worker {} unregistering from routemanager {}",
                         str(worker_name), str(self.name))
             self._workers_registered.remove(worker_name)
             del self._rounds[worker_name]
         else:
             # TODO: handle differently?
             logger.info(
                 "Worker {} failed unregistering from routemanager {} since subscription was previously lifted",
                 str(worker_name), str(self.name))
         if len(self._workers_registered) == 0 and self._is_started:
             logger.info(
                 "Routemanager {} does not have any subscribing workers anymore, calling stop",
                 str(self.name))
             self._quit_route()
     finally:
         self._workers_registered_mutex.release()
示例#6
0
    def _post_move_location_routine(self, timestamp: float):
        if self._stop_worker_event.is_set():
            raise InternalStopWorkerException
        self._work_mutex.acquire()
        if not self._walker_routemanager.init:
            logger.info("Processing Stop / Quest...")

            reachedMainMenu = self._check_pogo_main_screen(10, False)
            if not reachedMainMenu:
                self._restart_pogo(mitm_mapper=self._mitm_mapper)

            logger.info('Open Stop')

            data_received = self._open_pokestop(timestamp)
            if data_received is not None and data_received == LatestReceivedType.STOP:
                self._handle_stop(timestamp)
        else:
            logger.info('Currently in INIT Mode - no Stop processing')
        logger.debug("Releasing lock")
        self._work_mutex.release()
示例#7
0
文件: control.py 项目: madBeavis/MAD
    def research_trigger(self):
        logger.info("Starting research trigger thread")
        while True:
            try:
                try:
                    origin = self.research_trigger_queue.get()
                except Empty:
                    time.sleep(2)
                    continue

                logger.info("Trigger research menu for device {}".format(str(origin)))
                self._ws_server.trigger_worker_check_research(origin)
                self.generate_screenshot(origin)
                time.sleep(3)

            except KeyboardInterrupt as e:
                logger.info("research_trigger received keyboard interrupt, stopping")
                if self.trigger_thread is not None:
                    self.trigger_thread.join()
                break
示例#8
0
 def _start_priority_queue(self):
     logger.info("Try to activate PrioQ thread for route {}".format(
         str(self.name)))
     if (self.delay_after_timestamp_prio is not None
             or self.mode == "iv_mitm") and not self.mode == "pokestops":
         logger.info("PrioQ thread for route {} could be activate".format(
             str(self.name)))
         self._prio_queue = []
         if self.mode not in ["iv_mitm", "pokestops"]:
             self.clustering_helper = ClusteringHelper(
                 self._max_radius, self._max_coords_within_radius,
                 self._cluster_priority_queue_criteria())
         self._update_prio_queue_thread = Thread(
             name="prio_queue_update_" + self.name,
             target=self._update_priority_queue_loop)
         self._update_prio_queue_thread.daemon = True
         self._update_prio_queue_thread.start()
     else:
         logger.info(
             "Cannot activate Prio Q - maybe wrong mode or delay_after_prio_event is null"
         )
示例#9
0
    def _handle_stop(self):
        to = 0
        data_received = '-'
        while not 'Quest' in data_received and int(to) < 3:
            logger.info('Spin Stop')
            data_received = self._wait_for_data(
                timestamp=self._stop_process_time, proto_to_wait_for=101, timeout=25)
            if data_received is not None:

                if 'Box' in data_received:
                    logger.error('Box is full ... Next round!')
                    self.clear_thread_task = 1
                    break

                if 'Quest' in data_received:
                    logger.info('Getting new Quest')
                    self.clear_thread_task = 2
                    break

                if 'SB' in data_received or 'Time' in data_received:
                    logger.error('Softban - waiting...')
                    time.sleep(10)
                    self._stop_process_time = time.time()
                    self._open_pokestop()
                else:
                    logger.error('Other Return: {}', str(data_received))
                to += 1
            else:
                data_received = '-'
                logger.info(
                    'Did not get any data ... Maybe already turned or softban.')
                self._close_gym(self._delay_add)
                self._turn_map(self._delay_add)
                time.sleep(3)
                self._stop_process_time = time.time()
                self._open_pokestop()
                to += 1

        if data_received == 'Quest':
            self._devicesettings['last_action_time'] = time.time()
示例#10
0
    async def handler(self, websocket_client_connection, path):
        if self.__stop_server.is_set():
            await websocket_client_connection.close()
            return

        logger.info("Waiting for connection...")
        # wait for a connection...
        continue_work = await self.__register(websocket_client_connection)

        if not continue_work:
            logger.error("Failed registering client, closing connection")
            await websocket_client_connection.close()
            return

        consumer_task = asyncio.ensure_future(
            self.__consumer_handler(websocket_client_connection))
        producer_task = asyncio.ensure_future(
            self.__producer_handler(websocket_client_connection))
        done, pending = await asyncio.wait(
            [producer_task, consumer_task],
            return_when=asyncio.FIRST_COMPLETED,
        )
        logger.debug(
            "consumer or producer of {} stopped, cancelling pending tasks",
            str(
                websocket_client_connection.request_headers.get_all("Origin")
                [0]))
        for task in pending:
            task.cancel()
        logger.info(
            "Awaiting unregister of {}",
            str(
                websocket_client_connection.request_headers.get_all("Origin")
                [0]))
        await self.__unregister(websocket_client_connection)
        logger.info(
            "All done with {}",
            str(
                websocket_client_connection.request_headers.get_all("Origin")
                [0]))
示例#11
0
    def _post_move_location_routine(self, timestamp):
        if self._stop_worker_event.is_set():
            raise InternalStopWorkerException
        self._work_mutex.acquire()
        if not self._walker_routemanager.init:
            logger.info("Processing Stop / Quest...")

            data_received = '-'

            reachedMainMenu = self._check_pogo_main_screen(10, False)
            if not reachedMainMenu:
                self._restart_pogo()

            logger.info('Open Stop')
            self._stop_process_time = time.time()
            data_received = self._open_pokestop()
            if data_received == 'Stop':
                self._handle_stop()
        else:
            logger.info('Currently in INIT Mode - no Stop processing')
        logger.debug("Releasing lock")
        self._work_mutex.release()
示例#12
0
 def __internal_playerstats_db_update_consumer(self):
     try:
         while not self.__playerstats_db_update_stop.is_set():
             if not self.__application_args.game_stats:
                 logger.info("Playerstats are disabled")
                 break
             try:
                 with self.__playerstats_db_update_mutex:
                     next_item = self.__playerstats_db_update_queue.get_nowait(
                     )
             except Empty:
                 time.sleep(0.5)
                 continue
             if next_item is not None:
                 client_id, stats, last_processed_timestamp = next_item
                 logger.info("Running stats processing on {}".format(
                     str(client_id)))
                 self.__process_stats(stats, client_id,
                                      last_processed_timestamp)
     except Exception as e:
         logger.error("Playerstats consumer stopping because of {}".format(
             str(e)))
     logger.info("Shutting down Playerstats update consumer")
示例#13
0
    def _check_ggl_login(self):
        topmostapp = self._communicator.topmostApp()
        if not topmostapp: return False

        if "AccountPickerActivity" in topmostapp or 'SignInActivity' in topmostapp:

            if not self._takeScreenshot(
                    delayBefore=self.get_devicesettings_value(
                        "post_screenshot_delay", 1),
                    delayAfter=10):
                logger.error("_check_ggl_login: Failed getting screenshot")
                return False

            logger.info('GGL Login Window found on {} - processing',
                        str(self._id))
            if not self._pogoWindowManager.look_for_ggl_login(
                    self.get_screenshot_path(), self._communicator):
                logger.error("_check_ggl_login: Failed reading screenshot")
                return False

            buttontimeout = 0
            logger.info('Waiting for News Popup ...')

            buttoncheck = self._checkPogoButton()
            while not buttoncheck and not self._stop_worker_event.isSet(
            ) and buttontimeout < 6:
                time.sleep(5)
                buttoncheck = self._checkPogoButton()
                buttontimeout += 1
                if buttontimeout == 5:
                    logger.info('Timeout while waiting for after-login Button')

            return True

        logger.debug('No GGL Login Window found on {}', str(self._id))
        return False
示例#14
0
 def before_push(self, metadata):
     if 'python-lint' in self.plugin_actions:
         controller = PythonLintController(self.current_workspace)
         controller.run(**self.plugin_actions['python-lint'])
         try:
             self.current_repo.git.add('-u')
             self.current_repo.git.commit('-m',
                                          'autopep8 by automation tool')
         except git.exc.GitCommandError:
             logger.info('No lint has been done for branch {}'.format(
                 self.current_branch))
     if 'vue-lint' in self.plugin_actions:
         controller = VueLintController(
             os.path.join(self.current_workspace,
                          self.plugin_actions['vue-lint']['path']))
         controller.run()
         try:
             self.current_repo.git.add('-u')
             self.current_repo.git.commit(
                 '-m', 'npm run lint by automation tool')
         except git.exc.GitCommandError:
             logger.info('No lint has been done for branch {}'.format(
                 self.current_branch))
     logger.info('Push branch {}'.format(self.current_branch))
示例#15
0
 def check_walker(self):
     mode = self._walker['walkertype']
     if mode == "countdown":
         logger.info("Checking walker mode 'countdown'")
         countdown = self._walker['walkervalue']
         if not countdown:
             logger.error(
                 "No Value for Mode - check your settings! Killing worker")
             return False
         if self.workerstart is None:
             self.workerstart = math.floor(time.time())
         else:
             if math.floor(
                     time.time()) >= int(self.workerstart) + int(countdown):
                 return False
         return True
     elif mode == "timer":
         logger.debug("Checking walker mode 'timer'")
         exittime = self._walker['walkervalue']
         if not exittime or ':' not in exittime:
             logger.error(
                 "No or wrong Value for Mode - check your settings! Killing worker"
             )
             return False
         return check_walker_value_type(exittime)
     elif mode == "round":
         logger.debug("Checking walker mode 'round'")
         rounds = self._walker['walkervalue']
         if len(rounds) == 0:
             logger.error(
                 "No Value for Mode - check your settings! Killing worker")
             return False
         processed_rounds = self._mapping_manager.routemanager_get_rounds(
             self._routemanager_name, self._id)
         if int(processed_rounds) >= int(rounds):
             return False
         return True
     elif mode == "period":
         logger.debug("Checking walker mode 'period'")
         period = self._walker['walkervalue']
         if len(period) == 0:
             logger.error(
                 "No Value for Mode - check your settings! Killing worker")
             return False
         return check_walker_value_type(period)
     elif mode == "coords":
         exittime = self._walker['walkervalue']
         if len(exittime) > 0:
             return check_walker_value_type(exittime)
         return True
     elif mode == "idle":
         logger.debug("Checking walker mode 'idle'")
         if len(self._walker['walkervalue']) == 0:
             logger.error(
                 "Wrong Value for mode - check your settings! Killing worker"
             )
             return False
         sleeptime = self._walker['walkervalue']
         logger.info('{} going to sleep', str(self._id))
         killpogo = False
         if check_walker_value_type(sleeptime):
             self._stop_pogo()
             killpogo = True
         while not self._stop_worker_event.isSet(
         ) and check_walker_value_type(sleeptime):
             time.sleep(1)
         logger.info('{} just woke up', str(self._id))
         if killpogo:
             self._start_pogo()
         return False
     else:
         logger.error("Unknown walker mode! Killing worker")
         return False
示例#16
0
    def _main_work_thread(self):
        # TODO: signal websocketserver the removal
        try:
            self._internal_pre_work()
        except (InternalStopWorkerException, WebsocketWorkerRemovedException,
                WebsocketWorkerTimeoutException):
            logger.error(
                "Failed initializing worker {}, connection terminated exceptionally",
                str(self._id))
            self._internal_cleanup()
            return

        if not self.check_max_walkers_reached():
            logger.warning('Max. Walkers in Area {} - closing connections',
                           str(self._routemanager_name))
            self.set_devicesettings_value('finished', True)
            self._internal_cleanup()
            return

        while not self._stop_worker_event.isSet():
            # check for ggl login
            self._check_ggl_login()
            try:
                # TODO: consider getting results of health checks and aborting the entire worker?
                walkercheck = self.check_walker()
                if not walkercheck:
                    self.set_devicesettings_value('finished', True)
                    break
            except (InternalStopWorkerException,
                    WebsocketWorkerRemovedException,
                    WebsocketWorkerTimeoutException):
                logger.warning("Worker {} killed by walker settings",
                               str(self._id))
                break

            try:
                # TODO: consider getting results of health checks and aborting the entire worker?
                self._internal_health_check()
                self._health_check()
            except (InternalStopWorkerException,
                    WebsocketWorkerRemovedException,
                    WebsocketWorkerTimeoutException):
                logger.error(
                    "Websocket connection to {} lost while running healthchecks, connection terminated "
                    "exceptionally", str(self._id))
                break

            try:
                settings = self._internal_grab_next_location()
                if settings is None:
                    continue
            except (InternalStopWorkerException,
                    WebsocketWorkerRemovedException,
                    WebsocketWorkerTimeoutException):
                logger.warning(
                    "Worker of {} does not support mode that's to be run, connection terminated exceptionally",
                    str(self._id))
                break

            try:
                logger.debug('Checking if new location is valid')
                valid = self._check_location_is_valid()
                if not valid:
                    break
            except (InternalStopWorkerException,
                    WebsocketWorkerRemovedException,
                    WebsocketWorkerTimeoutException):
                logger.warning("Worker {} get non valid coords!",
                               str(self._id))
                break

            try:
                self._pre_location_update()
            except (InternalStopWorkerException,
                    WebsocketWorkerRemovedException,
                    WebsocketWorkerTimeoutException):
                logger.warning(
                    "Worker of {} stopping because of stop signal in pre_location_update, connection terminated "
                    "exceptionally", str(self._id))
                break

            try:
                logger.debug(
                    'main worker {}: LastLat: {}, LastLng: {}, CurLat: {}, CurLng: {}',
                    str(self._id),
                    self.get_devicesettings_value("last_location",
                                                  Location(0, 0)).lat,
                    self.get_devicesettings_value("last_location",
                                                  Location(0, 0)).lng,
                    self.current_location.lat, self.current_location.lng)
                time_snapshot, process_location = self._move_to_location()
            except (InternalStopWorkerException,
                    WebsocketWorkerRemovedException,
                    WebsocketWorkerTimeoutException):
                logger.warning(
                    "Worker {} failed moving to new location, stopping worker, connection terminated exceptionally",
                    str(self._id))
                break

            if process_location:
                self._add_task_to_loop(self._update_position_file())
                self._location_count += 1
                if self._applicationArgs.last_scanned:
                    logger.debug("Seting new 'scannedlocation' in Database")
                    # self.update_scanned_location(currentLocation.lat, currentLocation.lng, curTime)
                    self._add_task_to_loop(
                        self.update_scanned_location(self.current_location.lat,
                                                     self.current_location.lng,
                                                     time_snapshot))

                try:
                    self._post_move_location_routine(time_snapshot)
                except (InternalStopWorkerException,
                        WebsocketWorkerRemovedException,
                        WebsocketWorkerTimeoutException):
                    logger.warning(
                        "Worker {} failed running post_move_location_routine, stopping worker",
                        str(self._id))
                    break
                logger.info("Worker {} finished iteration, continuing work",
                            str(self._id))

        self._internal_cleanup()
示例#17
0
    def __init__(self,
                 args,
                 id,
                 last_known_state,
                 websocket_handler,
                 mapping_manager: MappingManager,
                 routemanager_name: str,
                 db_wrapper: DbWrapperBase,
                 pogoWindowManager: PogoWindows,
                 NoOcr: bool = True,
                 walker=None):
        # self.thread_pool = ThreadPool(processes=2)
        self._mapping_manager: MappingManager = mapping_manager
        self._routemanager_name: str = routemanager_name
        self._websocket_handler = websocket_handler
        self._communicator: Communicator = Communicator(
            websocket_handler, id, self, args.websocket_command_timeout)
        self._id: str = id
        self._applicationArgs = args
        self._last_known_state = last_known_state
        self._work_mutex = Lock()
        self.loop = None
        self.loop_started = Event()
        self.loop_tid = None
        self._async_io_looper_thread = None
        self._location_count = 0
        self._init: bool = self._mapping_manager.routemanager_get_init(
            self._routemanager_name)
        self._walker = walker

        self._lastScreenshotTaken = 0
        self._stop_worker_event = Event()
        self._db_wrapper = db_wrapper
        self._redErrorCount = 0
        self._lastScreenHash = None
        self._lastScreenHashCount = 0
        self._resocalc = Resocalculator
        self._screen_x = 0
        self._screen_y = 0
        self._lastStart = ""
        self._geofix_sleeptime = 0
        self._pogoWindowManager = pogoWindowManager
        self._waittime_without_delays = 0
        self._transporttype = 0
        self._not_injected_count = 0

        self.current_location = Location(0.0, 0.0)
        self.last_location = self.get_devicesettings_value(
            "last_location", None)

        if self.last_location is None:
            self.last_location = Location(0.0, 0.0)

        if self.get_devicesettings_value('last_mode', None) is not None and \
                self.get_devicesettings_value('last_mode') in ("raids_mitm", "mon_mitm", "iv_mitm", "raids_ocr"):
            # Reset last_location - no useless waiting delays (otherwise stop mode)
            logger.info('{}: last Mode not pokestop - reset saved location',
                        str(self._id))
            self.last_location = Location(0.0, 0.0)

        self.set_devicesettings_value(
            "last_mode",
            self._mapping_manager.routemanager_get_mode(
                self._routemanager_name))
        self.last_processed_location = Location(0.0, 0.0)
        self.workerstart = None
示例#18
0
文件: version.py 项目: bbdoc/MAD
    def start_update(self):
        # BACKUP ALL THE THINGS! if we need to update
        if self._version != current_version:
            target = '%s.%s.bk' % (self._application_args.mappings,
                                   self._version)
            try:
                shutil.copy(self._application_args.mappings, target)
            except IOError:
                logger.exception('Unable to clone configuration. Exiting')
                sys.exit(1)

        if self._version < 1:
            logger.info('Execute Update for Version 1')
            # Adding quest_reward for PMSF ALT
            if self.dbwrapper.check_column_exists('trs_quest',
                                                  'quest_reward') == 0:
                alter_query = (
                    "ALTER TABLE trs_quest "
                    "ADD quest_reward VARCHAR(500) NULL AFTER quest_condition")
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            # Adding quest_task = ingame quest conditions
            if self.dbwrapper.check_column_exists('trs_quest',
                                                  'quest_task') == 0:
                alter_query = (
                    "ALTER TABLE trs_quest "
                    "ADD quest_task VARCHAR(150) NULL AFTER quest_reward")
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            # Adding form column if it doesnt exist
            if self._application_args.db_method == "rm":
                alter_query = ("ALTER TABLE raid "
                               "ADD form smallint(6) DEFAULT NULL")
                column_exist = self.dbwrapper.check_column_exists(
                    'raid', 'form')
            else:
                logger.error("Invalid db_method in config. Exiting")
                sys.exit(1)

            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

        if self._version < 2:
            alter_query = ("ALTER TABLE trs_quest "
                           "CHANGE quest_reward "
                           "quest_reward VARCHAR(1000) NULL DEFAULT NULL")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)
        if self._version < 7:
            alter_query = ("ALTER TABLE trs_status "
                           "ADD lastPogoReboot varchar(50) NULL DEFAULT NULL")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_status', 'lastPogoReboot')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status "
                           "ADD globalrebootcount int(11) NULL DEFAULT '0'")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_status', 'globalrebootcount')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status "
                           "ADD globalrestartcount int(11) NULL DEFAULT '0'")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_status', 'globalrestartcount')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.info("Unexpected error: {}", e)

            alter_query = ("ALTER TABLE trs_status CHANGE lastPogoRestart "
                           "lastPogoRestart VARCHAR(50) NULL DEFAULT NULL")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)

            alter_query = (
                "ALTER TABLE trs_status "
                "CHANGE currentPos currentPos VARCHAR(50) NULL DEFAULT NULL, "
                "CHANGE lastPos lastPos VARCHAR(50) NULL DEFAULT NULL, "
                "CHANGE routePos routePos INT(11) NULL DEFAULT NULL, "
                "CHANGE routeMax routeMax INT(11) NULL DEFAULT NULL, "
                "CHANGE rebootingOption rebootingOption TEXT NULL, "
                "CHANGE rebootCounter rebootCounter INT(11) NULL DEFAULT NULL, "
                "CHANGE routemanager routemanager VARCHAR(255) NULL DEFAULT NULL, "
                "CHANGE lastProtoDateTime lastProtoDateTime VARCHAR(50), "
                "CHANGE lastPogoRestart lastPogoRestart VARCHAR(50), "
                "CHANGE init init TEXT NULL, "
                "CHANGE restartCounter restartCounter TEXT NULL")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.info("Unexpected error: {}", e)

        if self._version < 8:
            alter_query = ("ALTER TABLE trs_quest "
                           "ADD quest_template VARCHAR(100) NULL DEFAULT NULL "
                           "AFTER quest_reward")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_quest', 'quest_template')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(alter_query, commit=True)
                except Exception as e:
                    logger.exception("Unexpected error: {}", e)

        if self._version < 9:
            alter_query = (
                "UPDATE trs_quest "
                "SET quest_condition=REPLACE(quest_condition,'\\\"','\"'),"
                " quest_reward=REPLACE(quest_reward,'\\\"','\"')")
            try:
                self.dbwrapper.execute(alter_query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 10:
            query = ("CREATE TABLE IF NOT EXISTS trs_s2cells ( "
                     "id bigint(20) unsigned NOT NULL, "
                     "level int(11) NOT NULL, "
                     "center_latitude double NOT NULL, "
                     "center_longitude double NOT NULL, "
                     "updated int(11) NOT NULL, "
                     "PRIMARY KEY (id)) ")
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 11:
            query = ("ALTER TABLE trs_stats_detect_raw "
                     "ADD is_shiny TINYINT(1) NOT NULL DEFAULT '0' "
                     "AFTER count")
            column_exist = self.dbwrapper.check_column_exists(
                'trs_stats_detect_raw', 'is_shiny')
            if column_exist == 0:
                try:
                    self.dbwrapper.execute(query, commit=True)
                except Exception as e:
                    logger.exception("Unexpected error: {}", e)

        if self._version < 12:
            query = ("ALTER TABLE trs_stats_detect_raw "
                     "ADD INDEX typeworker (worker, type_id)")
            index_exist = self.dbwrapper.check_index_exists(
                'trs_stats_detect_raw', 'typeworker')

            if index_exist >= 1:
                query = (
                    "ALTER TABLE trs_stats_detect_raw DROP INDEX typeworker, ADD INDEX typeworker (worker, type_id)"
                )
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

            query = ("ALTER TABLE trs_stats_detect_raw "
                     "ADD INDEX shiny (is_shiny)")
            index_exist = self.dbwrapper.check_index_exists(
                'trs_stats_detect_raw', 'shiny')

            if index_exist >= 1:
                query = (
                    "ALTER TABLE trs_stats_detect_raw DROP INDEX shiny, ADD INDEX shiny (is_shiny)"
                )
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        if self._version < 13:
            # Adding current_sleep for worker status
            if self.dbwrapper.check_column_exists('trs_status',
                                                  'currentSleepTime') == 0:
                query = ("ALTER TABLE trs_status "
                         "ADD currentSleepTime INT(11) NOT NULL DEFAULT 0")
                try:
                    self.dbwrapper.execute(query, commit=True)
                except Exception as e:
                    logger.exception("Unexpected error: {}", e)

        if self._version < 14:
            update_order = [
                'monivlist', 'auth', 'devicesettings', 'areas', 'walker',
                'devices'
            ]
            old_data = {}
            new_data = {}
            cache = {}
            target = '%s.bk' % (self._application_args.mappings, )
            try:
                shutil.copy(self._application_args.mappings, target)
            except IOError:
                logger.exception('Unable to clone configuration. Exiting')
                sys.exit(1)
            with open(self._application_args.mappings, 'rb') as fh:
                old_data = json.load(fh)

            if "migrated" in old_data and old_data["migrated"] is True:
                with open(self._application_args.mappings, 'w') as outfile:
                    json.dump(old_data, outfile, indent=4, sort_keys=True)
            else:
                walkerarea = 'walkerarea'
                walkerarea_ind = 0
                for key in update_order:
                    try:
                        entries = old_data[key]
                    except Exception:
                        entries = []
                    cache[key] = {}
                    index = 0
                    new_data[key] = {'index': index, 'entries': {}}
                    if key == 'walker':
                        new_data[walkerarea] = {'index': index, 'entries': {}}

                    for entry in entries:
                        if key == 'monivlist':
                            cache[key][entry['monlist']] = index
                        if key == 'devicesettings':
                            cache[key][entry['devicepool']] = index
                        elif key == 'areas':
                            cache[key][entry['name']] = index
                            try:
                                mon_list = entry['settings']['mon_ids_iv']
                                if type(mon_list) is list:
                                    monlist_ind = new_data['monivlist'][
                                        'index']
                                    new_data['monivlist']['entries'][index] = {
                                        'monlist': 'Update List',
                                        'mon_ids_iv': mon_list
                                    }
                                    entry['settings'][
                                        'mon_ids_iv'] = '/api/monivlist/%s' % (
                                            monlist_ind)
                                    new_data['monivlist']['index'] += 1
                                else:
                                    try:
                                        name = mon_list
                                        uri = '/api/monivlist/%s' % (
                                            cache['monivlist'][name])
                                        entry['settings']['mon_ids_iv'] = uri
                                    except Exception:
                                        # No name match.  Maybe an old record so lets toss it
                                        del entry['settings']['mon_ids_iv']
                            except KeyError:
                                # Monlist is not defined for the area
                                pass
                            except Exception:
                                # No monlist specified
                                pass
                        elif key == 'walker':
                            cache[key][entry['walkername']] = index
                            valid_areas = []
                            if 'setup' in entry:
                                for ind, area in enumerate(entry['setup']):
                                    try:
                                        area['walkerarea'] = '/api/area/%s' % (
                                            cache['areas'][area['walkerarea']],
                                        )
                                    except KeyError:
                                        # The area no longer exists.  Remove from the path
                                        pass
                                    else:
                                        new_data[walkerarea]['entries'][
                                            walkerarea_ind] = area
                                        valid_areas.append(
                                            '/api/walkerarea/%s' %
                                            walkerarea_ind)
                                        walkerarea_ind += 1
                                entry['setup'] = valid_areas
                                new_data[walkerarea]['index'] = walkerarea_ind
                            else:
                                entry['setup'] = []
                        elif key == 'devices':
                            if 'pool' in entry:
                                try:
                                    entry['pool'] = '/api/devicesetting/%s' % (
                                        cache['devicesettings'][entry['pool']],
                                    )
                                except Exception:
                                    if entry['pool'] is not None:
                                        logger.error(
                                            'DeviceSettings {} is not valid',
                                            entry['pool'])
                                    del entry['pool']
                            try:
                                entry['walker'] = '/api/walker/%s' % (
                                    cache['walker'][entry['walker']], )
                            except Exception:
                                # The walker no longer exists.  Skip the device
                                continue
                        new_data[key]['entries'][index] = entry
                        index += 1
                    new_data[key]['index'] = index

                new_data['migrated'] = True

                with open(self._application_args.mappings, 'w') as outfile:
                    json.dump(new_data, outfile, indent=4, sort_keys=True)
        if self._version < 15:
            with open(self._application_args.mappings, 'rb') as fh:
                settings = json.load(fh)
            self.__convert_to_id(settings)
            with open(self._application_args.mappings, 'w') as outfile:
                json.dump(settings, outfile, indent=4, sort_keys=True)

        if self._version < 15:
            query = (
                "CREATE TABLE IF NOT EXISTS `trs_visited` ("
                "`pokestop_id` varchar(50) NOT NULL collate utf8mb4_unicode_ci,"
                "`origin` varchar(50) NOT NULL collate utf8mb4_unicode_ci,"
                "PRIMARY KEY (`pokestop_id`,`origin`)"
                ")")
            try:
                self.dbwrapper.execute(query, commit=True)
            except Exception as e:
                logger.exception("Unexpected error: {}", e)

        self.set_version(current_version)
示例#19
0
    def predict(self, test_iter, step, cal_lead=False, cal_oracle=False):
        """ Validate models.
            valid_iter: validate data iterator
        Returns:
            :obj:`nmt.Statistics`: validation loss statistics
        """

        # Set models in validating mode.
        def _get_ngrams(n, text):
            ngram_set = set()
            text_length = len(text)
            max_index_ngram_start = text_length - n
            for i in range(max_index_ngram_start + 1):
                ngram_set.add(tuple(text[i:i + n]))
            return ngram_set

        def _block_tri(c, p):
            tri_c = _get_ngrams(3, c.split())
            for s in p:
                tri_s = _get_ngrams(3, s.split())
                if len(tri_c.intersection(tri_s)) > 0:
                    return True
            return False

        if not cal_lead and not cal_oracle:
            self.model.eval()
        stats = Statistics()

        can_path = '%s_step%d.candidate' % (self.args.result_path +
                                            self.args.data_name, step)
        gold_path = '%s_step%d.gold' % (self.args.result_path +
                                        self.args.data_name, step)
        origin_path = '%s_step%d.origin' % (self.args.result_path +
                                            self.args.data_name, step)
        with open(can_path, 'w', encoding='utf-8') as save_pred:
            with open(gold_path, 'w', encoding='utf-8') as save_gold:
                with torch.no_grad():
                    origin = []
                    for batch in test_iter:

                        src = batch.src  # 7 sentences
                        # logger.info('origin sent: %s' % len(batch.src_str))  # 7 sentences

                        labels = batch.labels
                        segs = batch.segs
                        clss = batch.clss
                        mask = batch.mask
                        mask_cls = batch.mask_cls

                        gold = []
                        pred = []

                        if cal_lead:
                            selected_ids = [list(range(batch.clss.size(1)))
                                            ] * batch.batch_size
                        elif cal_oracle:
                            selected_ids = [[
                                j for j in range(batch.clss.size(1))
                                if labels[i][j] == 1
                            ] for i in range(batch.batch_size)]
                        else:
                            sent_scores, mask = self.model(
                                src, segs, clss, mask, mask_cls)

                            loss = self.loss(sent_scores, labels.float())
                            loss = (loss * mask.float()).sum()
                            batch_stats = Statistics(
                                float(loss.cpu().data.numpy()), len(labels))
                            stats.update(batch_stats)

                            sent_scores = sent_scores + mask.float()
                            sent_scores = sent_scores.cpu().data.numpy()
                            selected_ids = np.argsort(-sent_scores, 1)

                        # selected_ids = np.sort(selected_ids,1)

                        for i, idx in enumerate(selected_ids):
                            _pred = []
                            if len(batch.src_str[i]) == 0:
                                continue
                            for j in selected_ids[i][:len(batch.src_str[i])]:
                                if j >= len(batch.src_str[i]):
                                    continue
                                candidate = batch.src_str[i][j].strip()
                                if self.args.block_trigram:
                                    if not _block_tri(candidate, _pred):
                                        _pred.append(candidate)
                                        # print(candidate)
                                else:
                                    _pred.append(candidate)

                                if (not cal_oracle) and (
                                        not self.args.recall_eval
                                ) and len(_pred) == 3:
                                    break
                            # exit()
                            _pred = '<q>'.join(_pred)
                            # logger.info('pred sent: %s' % (_pred))
                            if self.args.recall_eval:
                                _pred = ' '.join(
                                    _pred.split()
                                    [:len(batch.tgt_str[i].split())])
                                # _src = ' '.join()
                            # logger.info('origin sent: %s' % (batch.src_str[i]))
                            # logger.info('pred sent: %s' % (_pred))
                            pred.append(_pred)
                            gold.append(batch.tgt_str[i])
                            _origin = ' '.join(batch.src_str[i])
                            if self.args.vy_predict:
                                doc_id = batch.doc_id
                                _origin = str(doc_id[i]) + '\t' + _origin
                            origin.append(_origin)
                        for i in range(len(gold)):
                            save_gold.write(gold[i].strip() + '\n')
                        for i in range(len(pred)):
                            save_pred.write(pred[i].strip() + '\n')
                    save_txt_file(origin, origin_path)

        if step != -1 and self.args.report_rouge:
            rouges = test_rouge(self.args.temp_dir, can_path, gold_path)
            logger.info('Rouges at step %d \n%s' %
                        (step, rouge_results_to_str(rouges)))
        self._report_step(0, step, valid_stats=stats)

        return stats
示例#20
0
    def train(self,
              train_iter_fct,
              train_steps,
              valid_iter_fct=None,
              valid_steps=-1):
        """
        The main training loops.
        by iterating over training data (i.e. `train_iter_fct`)
        and running validation (i.e. iterating over `valid_iter_fct`

        Args:
            train_iter_fct(function): a function that returns the train
                iterator. e.g. something like
                train_iter_fct = lambda: generator(*args, **kwargs)
            valid_iter_fct(function): same as train_iter_fct, for valid data
            train_steps(int):
            valid_steps(int):
            save_checkpoint_steps(int):

        Return:
            None
        """
        logger.info('Start training...')

        # step =  self.optim._step + 1
        step = self.optimizer._step + 1
        true_batchs = []
        accum = 0
        normalization = 0
        train_iter = train_iter_fct()

        total_stats = Statistics()
        report_stats = Statistics()
        self._start_report_manager(start_time=total_stats.start_time)

        while step <= train_steps:

            reduce_counter = 0
            for i, batch in enumerate(train_iter):
                # print(batch.src)
                # print(len(batch))
                if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):

                    true_batchs.append(batch)
                    normalization += batch.batch_size
                    accum += 1
                    if accum == self.grad_accum_count:
                        reduce_counter += 1
                        if self.n_gpu > 1:
                            normalization = sum(
                                distributed.all_gather_list(normalization))

                        self._gradient_accumulation(true_batchs, normalization,
                                                    total_stats, report_stats)

                        report_stats = self._maybe_report_training(
                            step, train_steps, self.optimizer.learning_rate,
                            report_stats)
                        true_batchs = []
                        accum = 0
                        normalization = 0
                        if step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0:
                            self._save(step)

                        step += 1
                        if step > train_steps:
                            break

            train_iter = train_iter_fct()

        return total_stats
示例#21
0
    def __get_latest_routemanagers(self) -> Optional[Dict[str, dict]]:
        global mode_mapping
        areas: Optional[Dict[str, dict]] = {}

        if self.__configmode:
            return areas

        area_arr = self.__raw_json["areas"]

        thread_pool = ThreadPool(processes=4)

        areas_procs = {}
        for area in area_arr:
            if area["geofence_included"] is None:
                raise RuntimeError("Cannot work without geofence_included")

            geofence_included = Path(area["geofence_included"])
            if not geofence_included.is_file():
                raise RuntimeError(
                        "geofence_included for area '{}' is specified but file does not exist ('{}').".format(
                                area["name"], geofence_included.resolve()
                        )
                )

            geofence_excluded_raw_path = area.get("geofence_excluded", None)
            if geofence_excluded_raw_path is not None:
                geofence_excluded = Path(geofence_excluded_raw_path)
                if not geofence_excluded.is_file():
                    raise RuntimeError(
                            "geofence_excluded for area '{}' is specified but file does not exist ('{}').".format(
                                    area["name"], geofence_excluded.resolve()
                            )
                    )

            area_dict = {"mode":              area["mode"],
                         "geofence_included": area["geofence_included"],
                         "geofence_excluded": area.get("geofence_excluded", None),
                         "routecalc":         area["routecalc"]}
            # also build a routemanager for each area...

            # grab coords
            # first check if init is false, if so, grab the coords from DB
            # coords = np.loadtxt(area["coords"], delimiter=',')
            geofence_helper = GeofenceHelper(
                    area["geofence_included"], area.get("geofence_excluded", None))
            mode = area["mode"]
            # build routemanagers

            #map iv list to ids
            if area.get('settings', None) is not None and 'mon_ids_iv' in area['settings']:
                # replace list name
                area['settings']['mon_ids_iv_raw'] = \
                    self.get_monlist(area['settings'].get('mon_ids_iv', None), area.get("name", "unknown"))

            route_manager = RouteManagerFactory.get_routemanager(self.__db_wrapper, None,
                                                                 mode_mapping.get(mode, {}).get("range", 0),
                                                                 mode_mapping.get(mode, {}).get("max_count", 99999999),
                                                                 area["geofence_included"],
                                                                 area.get("geofence_excluded", None),
                                                                 mode=mode, settings=area.get("settings", None),
                                                                 init=area.get("init", False),
                                                                 name=area.get("name", "unknown"),
                                                                 level=area.get("level", False),
                                                                 coords_spawns_known=area.get(
                                                                         "coords_spawns_known", False),
                                                                 routefile=area["routecalc"],
                                                                 calctype=area.get("route_calc_algorithm", "optimized"),
                                                                 joinqueue=self.join_routes_queue
                                                                 )

            if mode not in ("iv_mitm", "idle"):
                coords = self.__fetch_coords(mode, geofence_helper,
                                             coords_spawns_known=area.get("coords_spawns_known", False),
                                             init=area.get("init", False),
                                             range_init=mode_mapping.get(area["mode"], {}).get("range_init", 630),
                                             including_stops=area.get("including_stops", False))
                route_manager.add_coords_list(coords)
                max_radius = mode_mapping[area["mode"]]["range"]
                max_count_in_radius = mode_mapping[area["mode"]]["max_count"]
                if not area.get("init", False):
                    logger.info("Initializing area {}", area["name"])
                    proc = thread_pool.apply_async(route_manager.recalc_route, args=(max_radius, max_count_in_radius,
                                                                                     0, False))
                    areas_procs[area["name"]] = proc
                else:
                    logger.info(
                            "Init mode enabled. Going row-based for {}", str(area.get("name", "unknown")))
                    # we are in init, let's write the init route to file to make it visible in madmin
                    if area["routecalc"] is not None:
                        routefile = os.path.join(
                                self.__args.file_path, area["routecalc"])
                        if os.path.isfile(routefile + '.calc'):
                            os.remove(routefile + '.calc')
                        with open(routefile + '.calc', 'a') as f:
                            for loc in coords:
                                f.write(str(loc.lat) + ', ' +
                                        str(loc.lng) + '\n')
                    # gotta feed the route to routemanager... TODO: without recalc...
                    proc = thread_pool.apply_async(route_manager.recalc_route, args=(1, 99999999,
                                                                                     0, False))
                    areas_procs[area["name"]] = proc

            area_dict["routemanager"] = route_manager
            areas[area["name"]] = area_dict

        for area in areas_procs.keys():
            to_be_checked = areas_procs[area]
            to_be_checked.get()

        thread_pool.close()
        thread_pool.join()
        return areas
示例#22
0
文件: start.py 项目: Romfrosk/MAD
def create_folder(folder):
    if not os.path.exists(folder):
        logger.info(str(folder) + ' created')
        os.makedirs(folder)
示例#23
0
 async def run(self):
     try:
         self.listen(self.port)
         logger.info("Web server listening on port {}".format(self.port))
     except Exception as e:
         logger.error("Could not start web server: {}".format(e))
示例#24
0
 def _quit_route(self):
     logger.info("Shutdown Route {}", str(self.name))
     self._is_started = False
     self._round_started_time = None
示例#25
0
    def __init__(self, communicator, pogoWindowManager, id, resocalc,
                 mapping_mananger: MappingManager, args):
        self._ScreenType: dict = {}
        self._id = id
        self._applicationArgs = args
        self._mapping_manager = mapping_mananger
        detect_ReturningScreen: list = ('ZURUCKKEHRENDER', 'ZURÜCKKEHRENDER',
                                        'GAME', 'FREAK', 'SPIELER')
        detect_LoginScreen: list = ('KIDS', 'Google', 'Facebook')
        detect_PTC: list = ('Benutzername', 'Passwort', 'Username', 'Password',
                            'DRESSEURS')
        detect_FailureRetryScreen: list = ('TRY', 'DIFFERENT', 'ACCOUNT',
                                           'Anmeldung', 'Konto', 'anderes',
                                           'connexion.', 'connexion')
        detect_FailureLoginScreen: list = ('Authentifizierung',
                                           'fehlgeschlagen', 'Unable',
                                           'authenticate', 'Authentification',
                                           'Essaye')
        detect_WrongPassword: list = ('incorrect.', 'attempts', 'falsch.',
                                      'gesperrt')
        detect_Birthday: list = ('Geburtdatum', 'birth.', 'naissance.', 'date')
        detect_Marketing: list = ('Events,', 'Benachrichtigungen',
                                  'Einstellungen', 'events,', 'offers,',
                                  'notifications', 'évenements,',
                                  'evenements,', 'offres')
        detect_Gamedata: list = ('Spieldaten', 'abgerufen', 'lecture',
                                 'depuis', 'server', 'data')
        detect_SN: list = ('kompatibel', 'compatible', 'OS', 'software',
                           'device', 'Gerät', 'Betriebssystem', 'logiciel')
        detect_Forceupdate: list = ('continuer...', 'aktualisieren?', 'now?',
                                    'Aktualisieren', 'Aktualisieren,',
                                    'aktualisieren', 'update', 'continue...',
                                    'Veux-tu', 'Fais', 'continuer')

        self._ScreenType[2] = detect_ReturningScreen
        self._ScreenType[3] = detect_LoginScreen
        self._ScreenType[4] = detect_PTC
        self._ScreenType[5] = detect_FailureLoginScreen
        self._ScreenType[6] = detect_FailureRetryScreen
        self._ScreenType[8] = detect_Gamedata
        self._ScreenType[1] = detect_Birthday
        self._ScreenType[12] = detect_Marketing
        self._ScreenType[14] = detect_SN
        self._ScreenType[7] = detect_WrongPassword
        self._ScreenType[15] = detect_Forceupdate
        self._globaldict: dict = []
        self._ratio: float = 0.0

        self._logintype: LoginType = -1
        self._PTC_accounts: List[Login_PTC] = []
        self._GGL_accounts: List[Login_GGL] = []
        self._accountcount: int = 0
        self._accountindex: int = self.get_devicesettings_value(
            'accountindex', 0)
        self._screenshot_y_offset: int = self.get_devicesettings_value(
            'screenshot_y_offset', 0)
        self._nextscreen: ScreenType = ScreenType.UNDEFINED

        self._pogoWindowManager = pogoWindowManager
        self._communicator = communicator
        self._resocalc = resocalc
        logger.info("Starting Screendetector")
        self._width: int = 0
        self._height: int = 0
        self.get_login_accounts()
def build_model(model_opt, opt, fields, checkpoint):
    """ Build the Model """
    logger.info('Building model...')
    model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
    logger.info(model)
    return model
示例#27
0
 async def boot(self):
     logger.info('Globibot is booting up...')
     await self.start(self.token)
示例#28
0
    async def __register(self, websocket_client_connection):
        logger.info("Client {} registering", str(
            websocket_client_connection.request_headers.get_all("Origin")[0]))
        if self.__stop_server.is_set():
            logger.info(
                "MAD is set to shut down, not accepting new connection")
            return False

        try:
            origin = str(
                websocket_client_connection.request_headers.get_all("Origin")[0])
        except IndexError:
            logger.warning("Client from {} tried to connect without Origin header", str(
                websocket_client_connection.request_headers.get_all("Origin")[0]))
            return False

        if origin not in self.__mapping_manager.get_all_devicemappings().keys():
            logger.warning("Register attempt of unknown Origin: {}".format(origin))
            return False

        if origin in self.__users_connecting:
            logger.info("Client {} is already connecting".format(origin))
            return False

        auths = self.__mapping_manager.get_auths()
        if auths:
            try:
                authBase64 = str(
                    websocket_client_connection.request_headers.get_all("Authorization")[0])
            except IndexError:
                logger.warning("Client from {} tried to connect without auth header", str(
                    websocket_client_connection.request_headers.get_all("Origin")[0]))
                return False

        async with self.__users_mutex:
            logger.debug("Checking if {} is already present", str(origin))
            if origin in self.__current_users:
                logger.warning(
                    "Worker with origin {} is already running, killing the running one and have client reconnect",
                    str(origin))
                self.__current_users.get(origin)[1].stop_worker()
                return

            self.__users_connecting.append(origin)

        # reset pref. error counter if exist
        await self.__reset_fail_counter(origin)
        try:
            if auths and authBase64 and not check_auth(authBase64, self.args, auths):
                logger.warning("Invalid auth details received from {}", str(
                    websocket_client_connection.request_headers.get_all("Origin")[0]))
                return False
            logger.info("Starting worker {}".format(origin))
            if self._configmode:
                worker = WorkerConfigmode(self.args, origin, self, walker = None,
                                          mapping_manager = self.__mapping_manager, mitm_mapper = self.__mitm_mapper,
                                          db_wrapper = self.__db_wrapper, routemanager_name=None)
                logger.debug("Starting worker for {}", str(origin))
                new_worker_thread = Thread(
                    name='worker_%s' % origin, target=worker.start_worker)
                async with self.__users_mutex:
                    self.__current_users[origin] = [
                        new_worker_thread, worker, websocket_client_connection, 0]
                return True

            last_known_state = {}
            client_mapping = self.__mapping_manager.get_devicemappings_of(origin)
            devicesettings = self.__mapping_manager.get_devicesettings_of(origin)
            logger.info("Setting up routemanagers for {}", str(origin))

            if client_mapping.get("walker", None) is not None:
                if devicesettings is not None and "walker_area_index" not in devicesettings:
                    logger.debug("Initializing devicesettings")
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', 0)
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'finished', False)
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'last_action_time', None)
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'last_cleanup_time', None)
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'job', False)
                    await asyncio.sleep(1) # give the settings a moment... (dirty "workaround" against race condition)
                walker_index = devicesettings.get('walker_area_index', 0)

                if walker_index > 0:
                    # check status of last area
                    if not devicesettings.get('finished', False):
                        logger.info(
                            'Something wrong with last round - get back to old area')
                        walker_index -= 1
                        self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', walker_index)
                        # devicesettings['walker_area_index'] = walker_index

                walker_area_array = client_mapping["walker"]
                walker_settings = walker_area_array[walker_index]

                # preckeck walker setting
                while not pre_check_value(walker_settings) and walker_index-1 <= len(walker_area_array):
                    walker_area_name = walker_area_array[walker_index]['walkerarea']
                    logger.info(
                        '{} not using area {} - Walkervalue out of range', str(origin), str(walker_area_name))
                    if walker_index >= len(walker_area_array) - 1:
                        logger.error(
                            'Could not find any working area at this time - check your mappings for device: {}',
                             str(origin))
                        walker_index = 0
                        self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', walker_index)
                        walker_settings = walker_area_array[walker_index]
                        await websocket_client_connection.close()
                        return
                    walker_index += 1
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', walker_index)
                    walker_settings = walker_area_array[walker_index]

                devicesettings = self.__mapping_manager.get_devicesettings_of(origin)
                logger.debug("Checking walker_area_index length")
                if (devicesettings.get("walker_area_index", None) is None
                        or devicesettings['walker_area_index'] >= len(walker_area_array)):
                    # check if array is smaller than expected - f.e. on the fly changes in mappings.json
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', 0)
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'finished', False)
                    walker_index = 0

                walker_area_name = walker_area_array[walker_index]['walkerarea']

                if walker_area_name not in self.__mapping_manager.get_all_routemanager_names():
                    await websocket_client_connection.close()
                    raise WrongAreaInWalker()

                logger.debug('Devicesettings {}: {}', str(origin), devicesettings)
                logger.info('{} using walker area {} [{}/{}]', str(origin), str(
                    walker_area_name), str(walker_index+1), str(len(walker_area_array)))
                walker_routemanager_mode = self.__mapping_manager.routemanager_get_mode(walker_area_name)
                self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', walker_index+1)
                self.__mapping_manager.set_devicesetting_value_of(origin, 'finished', False)
                if walker_index >= len(walker_area_array) - 1:
                    self.__mapping_manager.set_devicesetting_value_of(origin, 'walker_area_index', 0)

                # set global mon_iv
                routemanager_settings = self.__mapping_manager.routemanager_get_settings(walker_area_name)
                if routemanager_settings is not None:
                    client_mapping['mon_ids_iv'] =\
                        self.__mapping_manager.get_monlist(routemanager_settings.get("mon_ids_iv", None),
                                                           walker_area_name)
            else:
                walker_routemanager_mode = None

            if "last_location" not in devicesettings:
                devicesettings['last_location'] = Location(0.0, 0.0)

            logger.debug("Setting up worker for {}", str(origin))
            worker = None
            if walker_routemanager_mode is None:
                pass
            elif walker_routemanager_mode in ["raids_mitm", "mon_mitm", "iv_mitm"]:
                worker = WorkerMITM(self.args, origin, last_known_state, self, routemanager_name=walker_area_name,
                                    mitm_mapper=self.__mitm_mapper, mapping_manager=self.__mapping_manager,
                                    db_wrapper=self.__db_wrapper,
                                    pogo_window_manager=self.__pogoWindowManager, walker=walker_settings)
            elif walker_routemanager_mode in ["pokestops"]:
                worker = WorkerQuests(self.args, origin, last_known_state, self, routemanager_name=walker_area_name,
                                      mitm_mapper=self.__mitm_mapper, mapping_manager=self.__mapping_manager,
                                      db_wrapper=self.__db_wrapper, pogo_window_manager=self.__pogoWindowManager,
                                      walker=walker_settings)
            elif walker_routemanager_mode in ["idle"]:
                worker = WorkerConfigmode(self.args, origin, self, walker=walker_settings,
                                          mapping_manager=self.__mapping_manager, mitm_mapper=self.__mitm_mapper,
                                          db_wrapper=self.__db_wrapper, routemanager_name=walker_area_name)
            else:
                logger.error("Mode not implemented")
                sys.exit(1)

            if worker is None:
                logger.error("Invalid walker mode for {}. Closing connection".format(str(origin)))
                await websocket_client_connection.close()
            else:
                logger.debug("Starting worker for {}", str(origin))
                new_worker_thread = Thread(
                    name='worker_%s' % origin, target=worker.start_worker)

                new_worker_thread.daemon = True
                async with self.__users_mutex:
                    self.__current_users[origin] = [new_worker_thread,
                                                worker, websocket_client_connection, 0]
                new_worker_thread.start()
        except WrongAreaInWalker:
            logger.error('Unknown Area in Walker settings - check config')
            await websocket_client_connection.close()
        except Exception as e:
            exc_type, exc_value, exc_trace = sys.exc_info()
            logger.error("Other unhandled exception during register: {}\n{}, {}".format(e.with_traceback(None),
                                                                                        exc_value, str(e)))
            await websocket_client_connection.close()
        finally:
            async with self.__users_mutex:
                self.__users_connecting.remove(origin)
        return True
示例#29
0
文件: start.py 项目: Romfrosk/MAD
        filename = args.mappings
        if not os.path.exists(filename):
            logger.error(
                "No mappings.json found - start madmin with with_madmin in config or copy example"
            )
            sys.exit(1)

            logger.error(
                "No mappings.json found - starting setup mode with madmin.")
            logger.error("Open Madmin (ServerIP with Port " +
                         str(args.madmin_port) +
                         ") - 'Mapping Editor' and restart.")
            generate_mappingjson()
        else:
            if args.only_routes:
                logger.info("Done calculating routes!")
                # TODO: shutdown managers properly...
                sys.exit(0)

            pogoWindowManager = None
            jobstatus: dict = {}
            MitmMapperManager.register('MitmMapper', MitmMapper)
            mitm_mapper_manager = MitmMapperManager()
            mitm_mapper_manager.start()
            mitm_mapper: MitmMapper = mitm_mapper_manager.MitmMapper(
                mapping_manager, db_wrapper)

            from ocr.pogoWindows import PogoWindows
            pogoWindowManager = PogoWindows(args.temp_path,
                                            args.ocr_thread_count)
示例#30
0
    def matchScreen(self, quickcheck=False):
        pogoTopmost = self._communicator.isPogoTopmost()
        screenpath = self.get_screenshot_path()
        topmostapp = self._communicator.topmostApp()
        if not topmostapp:
            return ScreenType.ERROR

        returntype: ScreenType = -1

        if "AccountPickerActivity" in topmostapp or 'SignInActivity' in topmostapp:
            returntype = 10
        elif "GrantPermissionsActivity" in topmostapp:
            returntype = 11
        elif "ConsentActivity" in topmostapp:
            returntype = 13
        elif not pogoTopmost:
            return ScreenType.CLOSE
        elif self._nextscreen != ScreenType.UNDEFINED:
            returntype = ScreenType(self._nextscreen)
        elif not self.get_devicesettings_value('screendetection',
                                               False) or quickcheck:
            logger.info('No more screen detection - disabled or quickcheck...')
            return ScreenType.DISABLED
        else:
            if not self._takeScreenshot(
                    delayBefore=self.get_devicesettings_value(
                        "post_screenshot_delay", 1),
                    delayAfter=2):
                logger.error("_check_windows: Failed getting screenshot")
                return ScreenType.ERROR
            try:
                frame_org = cv2.imread(screenpath)
            except Exception:
                logger.error("Screenshot corrupted :(")
                return ScreenType.ERROR

            if frame_org is None:
                logger.error("Screenshot corrupted :(")
                return ScreenType.ERROR

            self._height, self._width, _ = frame_org.shape
            frame_color = frame_org
            diff: int = 1
            if self._width < 1080:
                logger.info('Resize screen ...')
                frame_color = cv2.resize(frame_org, None, fx=2, fy=2)
                diff = 2
            frame = cv2.cvtColor(frame_color, cv2.COLOR_BGR2GRAY)
            self._ratio = self._height / self._width
            self._globaldict = self._pogoWindowManager.get_screen_text(
                frame, self._id)
            if 'text' not in self._globaldict:
                logger.error('Error while text detection')
                return ScreenType.ERROR
            n_boxes = len(self._globaldict['level'])
            for i in range(n_boxes):
                if returntype != -1: break
                if len(self._globaldict['text'][i]) > 3:
                    for z in self._ScreenType:
                        if self._globaldict['top'][i] > self._height / 4 and \
                                self._globaldict['text'][i] in self._ScreenType[z]:
                            returntype = z

        if ScreenType(returntype) != ScreenType.UNDEFINED:
            logger.info("Processing Screen: {}", str(ScreenType(returntype)))

        if ScreenType(returntype) == ScreenType.GGL:
            self._nextscreen = ScreenType.UNDEFINED

            if self._logintype == LoginType.ptc:
                logger.warning(
                    'Really dont know how i get there ... using first @ggl address ... :)'
                )
                username = self.get_devicesettings_value(
                    'ggl_login_mail', '@gmail.com')
            else:
                ggl_login = self.get_next_account()
                username = ggl_login.username

            if self.parse_ggl(self._communicator.uiautomator(), username):
                time.sleep(40)
                return ScreenType.GGL
            return ScreenType.ERROR

        elif ScreenType(returntype) == ScreenType.PERMISSION:
            self._nextscreen = ScreenType.UNDEFINED
            if self.parse_permission(self._communicator.uiautomator()):
                time.sleep(2)
                return ScreenType.PERMISSION
            time.sleep(2)
            return ScreenType.ERROR

        elif ScreenType(returntype) == ScreenType.CONSENT:
            self._nextscreen = ScreenType.UNDEFINED
            return ScreenType.CONSENT

        elif ScreenType(returntype) == ScreenType.UPDATE:
            self._nextscreen = ScreenType.UNDEFINED
            return ScreenType.UPDATE

        elif ScreenType(returntype) == ScreenType.SN:
            self._nextscreen = ScreenType.UNDEFINED
            return ScreenType.SN

        elif ScreenType(returntype) == ScreenType.GAMEDATA:
            self._nextscreen = ScreenType.UNDEFINED
            return ScreenType.GAMEDATA

        elif ScreenType(returntype) == ScreenType.MARKETING:
            self._nextscreen = ScreenType.POGO
            click_text = 'ERLAUBEN,ALLOW,AUTORISER'
            n_boxes = len(self._globaldict['level'])
            for i in range(n_boxes):
                if any(elem.lower() in (self._globaldict['text'][i].lower())
                       for elem in click_text.split(",")):
                    (x, y, w, h) = (self._globaldict['left'][i],
                                    self._globaldict['top'][i],
                                    self._globaldict['width'][i],
                                    self._globaldict['height'][i])
                    click_x, click_y = (x + w / 2) / diff, (y + h / 2) / diff
                    logger.debug('Click ' + str(click_x) + ' / ' +
                                 str(click_y))
                    self._communicator.click(click_x, click_y)
                    time.sleep(2)

            return ScreenType.MARKETING

        elif ScreenType(returntype) == ScreenType.BIRTHDATE:
            self._nextscreen = ScreenType.UNDEFINED
            click_x = (self._width / 2) + (self._width / 4)
            click_y = (self._height / 1.69) + self._screenshot_y_offset
            logger.debug('Click ' + str(click_x) + ' / ' + str(click_y))
            self._communicator.click(click_x, click_y)
            self._communicator.touchandhold(click_x, click_y, click_x,
                                            click_y - (self._height / 2), 200)
            time.sleep(1)
            self._communicator.click(click_x, click_y)
            time.sleep(1)
            click_x = self._width / 2
            click_y = click_y + (self._height / 8.53)
            self._communicator.click(click_x, click_y)
            time.sleep(1)
            return ScreenType.BIRTHDATE

        elif ScreenType(returntype) == ScreenType.RETURNING:
            self._nextscreen = ScreenType.UNDEFINED
            self._pogoWindowManager.look_for_button(screenpath,
                                                    2.20,
                                                    3.01,
                                                    self._communicator,
                                                    upper=True)
            time.sleep(2)
            return ScreenType.RETURNING

        elif ScreenType(returntype) == ScreenType.WRONG:
            self._nextscreen = ScreenType.UNDEFINED
            self._pogoWindowManager.look_for_button(screenpath,
                                                    2.20,
                                                    3.01,
                                                    self._communicator,
                                                    upper=True)
            time.sleep(2)
            return ScreenType.ERROR

        elif ScreenType(returntype) == ScreenType.LOGINSELECT:
            temp_dict: dict = {}
            n_boxes = len(self._globaldict['level'])
            for i in range(n_boxes):
                if 'Facebook' in (self._globaldict['text'][i]):
                    temp_dict['Facebook'] = self._globaldict['top'][i] / diff
                if 'CLUB' in (self._globaldict['text'][i]):
                    temp_dict['CLUB'] = self._globaldict['top'][i] / diff
                # french ...
                if 'DRESSEURS' in (self._globaldict['text'][i]):
                    temp_dict['CLUB'] = self._globaldict['top'][i] / diff

                if self.get_devicesettings_value('logintype',
                                                 'google') == 'ptc':
                    self._nextscreen = ScreenType.PTC
                    if 'CLUB' in (self._globaldict['text'][i]):
                        (x, y, w, h) = (self._globaldict['left'][i],
                                        self._globaldict['top'][i],
                                        self._globaldict['width'][i],
                                        self._globaldict['height'][i])
                        click_x, click_y = (x + w / 2) / diff, (y +
                                                                h / 2) / diff
                        logger.debug('Click ' + str(click_x) + ' / ' +
                                     str(click_y))
                        self._communicator.click(click_x, click_y)
                        time.sleep(5)
                        return ScreenType.LOGINSELECT

                else:
                    self._nextscreen = ScreenType.UNDEFINED
                    if 'Google' in (self._globaldict['text'][i]):
                        (x, y, w, h) = (self._globaldict['left'][i],
                                        self._globaldict['top'][i],
                                        self._globaldict['width'][i],
                                        self._globaldict['height'][i])
                        click_x, click_y = (x + w / 2) / diff, (y +
                                                                h / 2) / diff
                        logger.debug('Click ' + str(click_x) + ' / ' +
                                     str(click_y))
                        self._communicator.click(click_x, click_y)
                        time.sleep(5)
                        return ScreenType.LOGINSELECT

                    # alternative select
                    if 'Facebook' in temp_dict and 'TRAINER' in temp_dict:
                        click_x = self._width / 2
                        click_y = (temp_dict['Facebook'] + (
                            (temp_dict['TRAINER'] - temp_dict['Facebook']) /
                            2))
                        logger.debug('Click ' + str(click_x) + ' / ' +
                                     str(click_y))
                        self._communicator.click(click_x, click_y)
                        time.sleep(5)
                        return ScreenType.LOGINSELECT

                    # alternative select
                    if 'Facebook' in temp_dict:
                        click_x = self._width / 2
                        click_y = (temp_dict['Facebook'] +
                                   self._height / 10.11)
                        logger.debug('Click ' + str(click_x) + ' / ' +
                                     str(click_y))
                        self._communicator.click(click_x, click_y)
                        time.sleep(5)
                        return ScreenType.LOGINSELECT

                    # alternative select
                    if 'CLUB' in temp_dict:
                        click_x = self._width / 2
                        click_y = (temp_dict['CLUB'] - self._height / 10.11)
                        logger.debug('Click ' + str(click_x) + ' / ' +
                                     str(click_y))
                        self._communicator.click(click_x, click_y)
                        time.sleep(5)
                        return ScreenType.LOGINSELECT

        elif ScreenType(returntype) == ScreenType.PTC:
            self._nextscreen = ScreenType.UNDEFINED
            ptc = self.get_next_account()
            if not ptc:
                logger.error('No PTC Username and Password is set')
                return ScreenType.ERROR

            if float(self._ratio) >= 2:
                username_y = self._height / 2.5 + self._screenshot_y_offset
                password_y = self._height / 2.105 + self._screenshot_y_offset
                button_y = self._height / 1.7777 + self._screenshot_y_offset
            elif float(self._ratio) >= 1.7:
                username_y = self._height / 2.224797219003476 + self._screenshot_y_offset
                password_y = self._height / 1.875 + self._screenshot_y_offset
                button_y = self._height / 1.58285243198681 + self._screenshot_y_offset
            elif float(self._ratio) < 1.7:
                username_y = self._height / 2.224797219003476 + self._screenshot_y_offset
                password_y = self._height / 1.875 + self._screenshot_y_offset
                button_y = self._height / 1.58285243198681 + self._screenshot_y_offset

            # username
            self._communicator.click(self._width / 2, username_y)
            time.sleep(.5)
            self._communicator.sendText(ptc.username)
            self._communicator.click(100, 100)
            time.sleep(2)

            # password
            self._communicator.click(self._width / 2, password_y)
            time.sleep(.5)
            self._communicator.sendText(ptc.password)
            self._communicator.click(100, 100)
            time.sleep(2)

            # button
            self._communicator.click(self._width / 2, button_y)
            time.sleep(40)
            return ScreenType.PTC

        elif ScreenType(returntype) == ScreenType.FAILURE:
            self._nextscreen = ScreenType.UNDEFINED
            self._pogoWindowManager.look_for_button(screenpath, 2.20, 3.01,
                                                    self._communicator)
            time.sleep(2)
            return ScreenType.ERROR

        elif ScreenType(returntype) == ScreenType.RETRY:
            self._nextscreen = ScreenType.UNDEFINED
            click_text = 'DIFFERENT,AUTRE,AUTORISER,ANDERES,KONTO,ACCOUNT'
            n_boxes = len(self._globaldict['level'])
            for i in range(n_boxes):
                if any(elem in (self._globaldict['text'][i])
                       for elem in click_text.split(",")):
                    (x, y, w, h) = (self._globaldict['left'][i],
                                    self._globaldict['top'][i],
                                    self._globaldict['width'][i],
                                    self._globaldict['height'][i])
                    click_x, click_y = (x + w / 2) / diff, (y + h / 2) / diff
                    logger.debug('Click ' + str(click_x) + ' / ' +
                                 str(click_y))
                    self._communicator.click(click_x, click_y)
                    time.sleep(2)
            return ScreenType.RETRY

        else:
            return ScreenType.POGO
def main():
    opt = parse_args()
    if (opt.shuffle > 0):
        raise AssertionError("-shuffle is not implemented, please make sure \
                         you shuffle your data before pre-processing.")
    init_logger(opt.log_file)
    logger.info("Input args: %r", opt)
    logger.info("Extracting features...")

    logger.info("Building `Fields` object...")
    fields = get_fields()
    task1_fields = get_task_fields()
    task2_fields = get_task2_fields()

    logger.info("Building & saving task training data...")
    train_dataset_files = build_save_dataset('train', 'task', fields, opt)
    logger.info("Building & saving task2 training data...")
    train_dataset_files2 = build_save_dataset('train', 'task2', fields, opt)

    logger.info("Building & saving task validation data...")
    build_save_dataset('valid', 'task', fields, opt)
    logger.info("Building & saving task2 validation data...")
    build_save_dataset('valid', 'task2', fields, opt)

    logger.info("Building & saving vocabulary...")

    build_save_vocab(train_dataset_files + train_dataset_files2, fields, opt)
    def train(self, train_iter_fct, valid_iter_fct, train_steps, valid_steps):
        """
    The main training loops.
    by iterating over training data (i.e. `train_iter_fct`)
    and running validation (i.e. iterating over `valid_iter_fct`

    Args:
        train_iter_fct(function): a function that returns the train
            iterator. e.g. something like
            train_iter_fct = lambda: generator(*args, **kwargs)
        valid_iter_fct(function): same as train_iter_fct, for valid data
        train_steps(int):
        valid_steps(int):
        save_checkpoint_steps(int):

    Return:
        None
    """
        logger.info('Start training...')

        step = self.optim._step + 1
        true_batchs = []
        accum = 0
        normalization = 0
        train_iter = train_iter_fct()

        total_stats = Statistics()
        report_stats = Statistics()
        self._start_report_manager(start_time=total_stats.start_time)

        while step <= train_steps:

            reduce_counter = 0
            for i, batch in enumerate(train_iter):
                if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
                    if self.gpu_verbose_level > 1:
                        logger.info("GpuRank %d: index: %d accum: %d" %
                                    (self.gpu_rank, i, accum))

                    true_batchs.append(batch)

                    if self.norm_method == "tokens":
                        num_tokens = batch.tgt[1:].ne(
                            self.train_loss.padding_idx).sum()
                        normalization += num_tokens.item()
                    else:
                        normalization += batch.batch_size
                    accum += 1
                    if accum == self.grad_accum_count:
                        reduce_counter += 1
                        if self.gpu_verbose_level > 0:
                            logger.info("GpuRank %d: reduce_counter: %d \
                          n_minibatch %d" % (self.gpu_rank, reduce_counter,
                                             len(true_batchs)))
                        if self.n_gpu > 1:
                            normalization = sum(all_gather_list(normalization))

                        self._gradient_accumulation(true_batchs, normalization,
                                                    total_stats, report_stats)

                        report_stats = self._maybe_report_training(
                            step, train_steps, self.optim.learning_rate,
                            report_stats)

                        true_batchs = []
                        accum = 0
                        normalization = 0
                        if (step % valid_steps == 0):
                            if self.gpu_verbose_level > 0:
                                logger.info('GpuRank %d: validate step %d' %
                                            (self.gpu_rank, step))
                            valid_iter = valid_iter_fct()
                            valid_stats = self.validate(valid_iter)
                            if self.gpu_verbose_level > 0:
                                logger.info('GpuRank %d: gather valid stat \
                              step %d' % (self.gpu_rank, step))
                            valid_stats = self._maybe_gather_stats(valid_stats)
                            if self.gpu_verbose_level > 0:
                                logger.info('GpuRank %d: report stat step %d' %
                                            (self.gpu_rank, step))
                            self._report_step(self.optim.learning_rate,
                                              step,
                                              valid_stats=valid_stats)

                        if self.gpu_rank == 0:
                            self._maybe_save(step)
                        step += 1
                        if step > train_steps:
                            break
            if self.gpu_verbose_level > 0:
                logger.info('GpuRank %d: we completed an epoch \
                    at step %d' % (self.gpu_rank, step))
            train_iter = train_iter_fct()

        return total_stats
def build_vocab(train_dataset_files, fields, share_vocab, src_vocab_size,
                src_words_min_frequency, tgt_vocab_size,
                tgt_words_min_frequency, tgt2_vocab_size,
                tgt2_words_min_frequency, vocab_file):
    counter = {}

    for k in fields:
        counter[k] = Counter()

    # Load vocabulary
    for _, path in enumerate(train_dataset_files):
        dataset = torch.load(path)
        logger.info(" * reloading %s." % path)
        for ex in dataset.examples:
            for k in fields:
                val = getattr(ex, k, None)
                if not fields[k].sequential:
                    continue
                counter[k].update(val)

        dataset.examples = None
        gc.collect()
        del dataset.examples
        gc.collect()
        del dataset
        gc.collect()

    build_field_vocab(fields["src"],
                      counter["src"],
                      max_size=src_vocab_size,
                      min_freq=src_words_min_frequency)
    logger.info(" * src vocab size: %d." % len(fields["src"].vocab))

    if vocab_file:
        logger.info('update src vocab')
        logger.info('orin {}'.format(len(counter["src"])))
        with open(vocab_file, mode='r', encoding='utf-8') as vf:
            for line in vf:
                vocab = line.strip().split()
                counter["src"].update(vocab)
        logger.info('with vocab {}'.format(len(counter["src"])))

    build_field_vocab(fields["tgt"],
                      counter["tgt"],
                      max_size=tgt_vocab_size,
                      min_freq=tgt_words_min_frequency)

    build_field_vocab(fields["tgt2"],
                      counter["tgt2"],
                      max_size=tgt2_vocab_size,
                      min_freq=tgt2_words_min_frequency)

    merged_vocab = merge_vocabs([fields["tgt"].vocab, fields["tgt2"].vocab],
                                vocab_size=tgt_vocab_size,
                                min_frequency=tgt_vocab_size)
    fields["tgt"].vocab = merged_vocab
    fields["tgt2"].vocab = merged_vocab
    logger.info(" * tgt & tgt2 vocab size: %d." % len(fields["tgt"].vocab))

    # Merge the input and output vocabularies.
    if share_vocab:
        # `tgt_vocab_size` is ignored when sharing vocabularies
        logger.info(" * merging src, tgt and tgt2 vocab...")
        merged_vocab = merge_vocabs([fields["src"].vocab, fields["tgt"].vocab],
                                    vocab_size=src_vocab_size,
                                    min_frequency=src_words_min_frequency)

        fields["src"].vocab = merged_vocab
        fields["tgt"].vocab = merged_vocab
        fields["tgt2"].vocab = merged_vocab
        logger.info(" * src vocab size: %d." % len(fields["src"].vocab))
        logger.info(" * tgt vocab size: %d." % len(fields["tgt"].vocab))
        logger.info(" * tgt2 vocab size: %d." % len(fields["tgt2"].vocab))

    return fields
def main(opt, device_id):
    opt = training_opt_postprocessing(opt, device_id)
    init_logger(opt.log_file)
    logger.info("Input args: %r", opt)
    # Load checkpoint if we resume from a previous training.
    if opt.train_from:
        logger.info('Loading checkpoint from %s' % opt.train_from)
        checkpoint = torch.load(opt.train_from,
                                map_location=lambda storage, loc: storage)

        # Load default opts values then overwrite it with opts from
        # the checkpoint. It's usefull in order to re-train a model
        # after adding a new option (not set in checkpoint)
        dummy_parser = configargparse.ArgumentParser()
        opts.model_opts(dummy_parser)
        default_opt = dummy_parser.parse_known_args([])[0]

        model_opt = default_opt
        model_opt.__dict__.update(checkpoint['opt'].__dict__)
    else:
        checkpoint = None
        model_opt = opt

    # Load fields generated from preprocess phase.
    fields = load_fields(opt, checkpoint)

    # Build model.
    model = build_model(model_opt, opt, fields, checkpoint)
    n_params, enc, dec = _tally_parameters(model)
    logger.info('encoder: %d' % enc)
    logger.info('decoder: %d' % dec)
    logger.info('* number of parameters: %d' % n_params)
    _check_save_model_path(opt)

    # Build optimizer.
    optim = build_optim(model, opt, checkpoint)

    # Build model saver
    model_saver = build_model_saver(model_opt, opt, model, fields, optim)

    trainer = build_trainer(opt,
                            device_id,
                            model,
                            fields,
                            optim,
                            model_saver=model_saver)

    def train_iter_fct():
        return build_dataset_iter(load_dataset("train", opt), fields, opt)

    def valid_iter_fct():
        return build_dataset_iter(load_dataset("valid", opt),
                                  fields,
                                  opt,
                                  is_train=False)

    # Do training.
    if len(opt.gpu_ranks):
        logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
    else:
        logger.info('Starting training on CPU, could be very slow')
    trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
                  opt.valid_steps)

    if opt.tensorboard:
        trainer.report_manager.tensorboard_writer.close()
示例#35
0
def main(_):
    """ start training
    """
    ## assert ##
    logger.info('Asserting parameters')
    assert (FLAGS.f_log_step > 0 or FLAGS.f_log_step == None)
    assert (FLAGS.f_save_step > 0 or FLAGS.f_save_step == None)
    assert (FLAGS.f_summary_step > 0 or FLAGS.f_summary_step == None)

    ## build graph ##
    logger.info('Building graph, using %s...' % (FLAGS.model_name))
    config_dict = {
        'multiscale_feats': FLAGS.multiscale_feats,
        'backbone': FLAGS.backbone_name
    }
    det_loss, clf_loss = build_graph(model_name=FLAGS.model_name,
                                     attention_module=FLAGS.attention_module,
                                     is_training=True,
                                     config_dict=config_dict)
    ## build optimizer ##
    train_ops = build_optimizer(det_loss, clf_loss)

    ## summary ops ##
    merge_ops = tf.summary.merge_all()
    logger.info('Build graph success...')
    logger.info('Total trainable parameters:%s' % str(
        np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ])))

    ## saver
    saver = tf.train.Saver(tf.global_variables())
    init = tf.global_variables_initializer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        ## create a summary writer ##
        summary_dir = os.path.join(FLAGS.summary_dir)
        writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)

        if FLAGS.checkpoint_dir == None:
            sess.run(init)
            logger.info('TF variables init success...')
        else:
            model_name = os.path.join(FLAGS.checkpoint_dir,
                                      FLAGS.model_name + '.model')
            tf.train.Saver().restore(sess, model_name)
            logger.info('Load checkpoint success...')

        pd = provider(batch_size=FLAGS.batch_size,
                      for_what='train',
                      whether_aug=True)
        avg_det_loss = 0.
        avg_clf_loss = 0.
        avg_time = 0.
        while (True):
            start = time()
            imgs, labels, t_bboex = pd.load_batch()
            imgs = np.array(imgs)
            labels = np.reshape(np.array(labels),
                                newshape=[FLAGS.batch_size, -1, 1])
            t_bboex = np.reshape(np.array(t_bboex),
                                 newshape=[FLAGS.batch_size, -1, 4])
            t_ops, m_ops, current_step, d_loss, c_loss \
                =sess.run([train_ops, merge_ops, global_step, det_loss, clf_loss],
                            feed_dict={inputs: imgs, label_gt: labels, bboxes_gt:t_bboex})
            t = round(time() - start, 3)

            if FLAGS.f_log_step != None:
                ## caculate average loss ##
                step = current_step % FLAGS.f_log_step
                avg_det_loss = (avg_det_loss * step + d_loss) / (step + 1.)
                avg_clf_loss = (avg_clf_loss * step + c_loss) / (step + 1.)
                avg_time = (avg_time * step + t) / (step + 1.)
                if current_step % FLAGS.f_log_step == FLAGS.f_log_step - 1:
                    ## print info ##
                    logger.info('Step%s det_loss:%s clf_loss:%s time:%s' %
                                (str(current_step), str(avg_det_loss),
                                 str(avg_clf_loss), str(avg_time)))
                    avg_det_loss = 0.
                    avg_clf_loss = 0.

            if FLAGS.f_summary_step != None:
                if current_step % FLAGS.f_summary_step == FLAGS.f_summary_step - 1:
                    ## summary ##
                    writer.add_summary(m_ops, current_step)

            if FLAGS.f_save_step != None:
                if current_step % FLAGS.f_save_step == FLAGS.f_save_step - 1:
                    ## save model ##
                    logger.info('Saving model...')
                    model_name = os.path.join(FLAGS.train_dir,
                                              FLAGS.model_name + '.model')
                    saver.save(sess, model_name)
                    logger.info('Save model sucess...')

            if FLAGS.training_step != None:
                if current_step >= FLAGS.training_step:
                    logger.info('Exit training...')
                    break