示例#1
0
 def _restartPogoDroid(self):
     successfulStop = self._stopPogoDroid()
     time.sleep(1)
     logger.debug("restartPogoDroid: stop pogodriud resulted in {}",
                  str(successfulStop))
     if successfulStop:
         return self._start_pogodroid()
     else:
         return False
示例#2
0
 def _turn_map(self, delayadd):
     logger.debug('{_turn_map} called')
     logger.info('Turning map')
     x1, x2, y = self._resocalc.get_gym_spin_coords(self)[0], self._resocalc.get_gym_spin_coords(self)[1], \
         self._resocalc.get_gym_spin_coords(self)[2]
     self._communicator.swipe(int(x1), int(y), int(x2), int(y))
     time.sleep(int(delayadd))
     logger.debug('{_turn_map} called')
     return
示例#3
0
 async def _update_position_file(self):
     logger.debug("Updating .position file")
     if self.current_location is not None:
         with open(
                 os.path.join(self._applicationArgs.file_path,
                              self._id + '.position'), 'w') as outfile:
             outfile.write(
                 str(self.current_location.lat) + ", " +
                 str(self.current_location.lng))
示例#4
0
 def debug_message(self, message, event_type):
     logger.debug(
         '{:8} {:15.15} {:20} {}'.format(
             event_type,
             message.author.name,
             message.author.id,
             message.content
         )
     )
示例#5
0
 def _open_gym(self, delayadd):
     logger.debug('{_open_gym} called')
     time.sleep(.5)
     x, y = self._resocalc.get_gym_click_coords(
         self)[0], self._resocalc.get_gym_click_coords(self)[1]
     self._communicator.click(int(x), int(y))
     time.sleep(.5 + int(delayadd))
     logger.debug('{_open_gym} finished')
     return
示例#6
0
文件: routecalc.py 项目: mossmap/MAD
    def getJsonRoute(self, coords, maxRadius, maxCoordsInRadius, in_memory, num_processes=1, algorithm='optimized',
                     useS2: bool = False, S2level: int=15, route_name: str = 'Unknown'):
        export_data = []
        if useS2: logger.debug("Using S2 method for calculation with S2 level: {}", S2level)
        if not in_memory and \
           (self._data['fields']['routefile'] is not None and len(self._data['fields']['routefile']) > 0):
            logger.debug('Using routefile from DB')
            for line in self._data['fields']['routefile']:
                # skip empty lines
                if not line.strip():
                    continue
                lineSplit = line.split(',')
                export_data.append({'lat': float(lineSplit[0].strip()),
                                    'lng': float(lineSplit[1].strip())})
            return export_data

        lessCoordinates = coords
        if len(coords) > 0 and maxRadius and maxCoordsInRadius:
            logger.info("Calculating route for {}", route_name)
            newCoords = self.getLessCoords(coords, maxRadius, maxCoordsInRadius, useS2, S2level)
            lessCoordinates = np.zeros(shape=(len(newCoords), 2))
            for i in range(len(lessCoordinates)):
                lessCoordinates[i][0] = newCoords[i][0]
                lessCoordinates[i][1] = newCoords[i][1]
            logger.debug("Coords summed up: {}, that's just {} coords",
                         str(lessCoordinates), str(len(lessCoordinates)))
        logger.debug("Got {} coordinates", len(lessCoordinates))
        if len(lessCoordinates) < 3:
            logger.debug("less than 3 coordinates... not gonna take a shortest route on that")
            export_data = []
            for i in range(len(lessCoordinates)):
                export_data.append({'lat': lessCoordinates[i][0].item(),
                                    'lng': lessCoordinates[i][1].item()})
        else:
            logger.info("Calculating a short route through all those coords. Might take a while")
            from timeit import default_timer as timer
            start = timer()
            if algorithm == 'quick':
                from route.routecalc.calculate_route_quick import route_calc_impl
            else:
                from route.routecalc.calculate_route_optimized import route_calc_impl
            sol_best = route_calc_impl(lessCoordinates, route_name, num_processes)
            end = timer()
            logger.info("Calculated route in {} minutes", str((end - start) / 60))
            for i in range(len(sol_best)):
                export_data.append({'lat': lessCoordinates[int(sol_best[i])][0].item(),
                                    'lng': lessCoordinates[int(sol_best[i])][1].item()})
        if not in_memory:
            calc_coords = []
            for coord in export_data:
                calc_coord = '%s,%s' % (coord['lat'], coord['lng'])
                calc_coords.append(calc_coord)
            # Only save if we aren't calculating in memory
            self._data['fields']['routefile'] = calc_coords
            self.save()
        return export_data
示例#7
0
 def get_gym_count(self):
     logger.debug('Fetching gym count from db')
     query = (
         "SELECT If(team_id=0, 'WHITE', if(team_id=1, 'BLUE', if (team_id=2, 'RED', 'YELLOW'))) "
         "AS Color, count(team_id) AS Count "
         "FROM `gym` "
         "GROUP BY team_id"
     )
     res = self._db_exec.execute(query)
     return res
示例#8
0
 def get_shiny_stats_hour(self):
     logger.debug('Fetching shiny pokemon stats from db')
     query = (
         "SELECT hour(FROM_UNIXTIME(timestamp_scan)) AS hour, type_id "
         "FROM trs_stats_detect_raw "
         "WHERE is_shiny = 1 "
         "GROUP BY type_id, hour ORDER BY hour ASC"
     )
     res = self._db_exec.execute(query)
     return res
示例#9
0
    def insert_usage(self, instance, cpu, mem, garbage, timestamp):
        logger.debug("dbWrapper::insert_usage")

        query = (
            "INSERT into trs_usage (instance, cpu, memory, garbage, timestamp) VALUES "
            "(%s, %s, %s, %s, %s)")
        vals = (instance, cpu, mem, garbage, timestamp)
        self.execute(query, vals, commit=True)

        return
示例#10
0
 async def read(self) -> Optional[Any]:
     try:
         dataBytes = await self.pipeReader.read(1024)
         data = json.loads(dataBytes[8:].decode("utf-8"))
         logger.debug("[READ] %s", data)
         return data
     except:
         logger.exception(
             "An unexpected error occured during a RPC read operation")
         self.connected = False
示例#11
0
 def get_location_info(self):
     logger.debug('Fetching all empty locations from db')
     query = (
         "SELECT worker, sum(location_count), sum(location_ok), sum(location_nok), "
         "sum(location_nok) / sum(location_count) * 100 as Loc_fail_rate "
         "FROM trs_stats_location "
         "GROUP BY worker"
     )
     res = self._db_exec.execute(query)
     return res
示例#12
0
 def get_all_empty_scans(self):
     logger.debug('Fetching all empty locations from db')
     query = ("SELECT count(b.id) as Count, b.lat, b.lng, GROUP_CONCAT(DISTINCT b.worker order by worker asc "
              "SEPARATOR ', '), if(b.type=0,'Normal','PrioQ'), max(b.period), (select count(c.id) "
              "from trs_stats_location_raw c where c.lat=b.lat and c.lng=b.lng and c.success=1) as successcount from "
              "trs_stats_location_raw b where success=0 group by lat, lng HAVING Count > 5 and successcount=0 "
              "ORDER BY count(id) DESC"
              )
     res = self._db_exec.execute(query)
     return res
示例#13
0
文件: WorkerBase.py 项目: snooter/MAD
 def _get_screen_size(self):
     screen = self._communicator.getscreensize().split(' ')
     self._screen_x = screen[0]
     self._screen_y = screen[1]
     x_offset = self._devicesettings.get("screenshot_x_offset", 0)
     y_offset = self._devicesettings.get("screenshot_y_offset", 0)
     logger.debug('Get Screensize of {}: X: {}, Y: {}, X-Offset: {}, Y-Offset: {}', str(
             self._id), str(self._screen_x), str(self._screen_y), str(x_offset), str(y_offset))
     self._resocalc.get_x_y_ratio(
             self, self._screen_x, self._screen_y, x_offset, y_offset)
示例#14
0
 def process(filename, args, db_wrapper, hash, raidno, captureTime,
             captureLat, captureLng, src_path, radius):
     logger.debug("Cropscanning started")
     scanner = Scanner(args, db_wrapper, hash)
     logger.info("Initialized scanned, starting analysis of {}",
                 str(filename))
     checkcrop = scanner.start_detect(filename, hash, raidno, captureTime,
                                      captureLat, captureLng, src_path,
                                      radius)
     return checkcrop
示例#15
0
 def _merge_priority_queue(self, new_queue):
     if new_queue is not None:
         with self._manager_mutex:
             merged = list(new_queue)
             logger.info("New raw priority queue with {} entries", len(merged))
             merged = self._filter_priority_queue_internal(merged)
             heapq.heapify(merged)
             self._prio_queue = merged
         logger.info("New clustered priority queue with {} entries", len(merged))
         logger.debug("Priority queue entries: {}", str(merged))
示例#16
0
 def write(self, op: int, payload: Any) -> None:
     try:
         logger.debug("[WRITE] %s", payload)
         payload = json.dumps(payload)
         self.pipeWriter.write(
             struct.pack("<ii", op, len(payload)) + payload.encode("utf-8"))
     except:
         logger.exception(
             "An unexpected error occured during a RPC write operation")
         self.connected = False
示例#17
0
    def retrieve_next_spawns(self, geofence_helper):
        """
        Retrieve the spawnpoints with their respective unixtimestamp that are due in the next 300 seconds
        :return:
        """

        logger.debug("DbWrapper::retrieve_next_spawns called")

        current_time_of_day = datetime.now().replace(microsecond=0)
        minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence(
        )

        query = (
            "SELECT latitude, longitude, spawndef, calc_endminsec "
            "FROM trs_spawn "
            "WHERE calc_endminsec IS NOT NULL "
            "AND (latitude >= {} AND longitude >= {} AND latitude <= {} AND longitude <= {}) "
            "AND DATE_FORMAT(STR_TO_DATE(calc_endminsec,'%i:%s'),'%i:%s') BETWEEN DATE_FORMAT(DATE_ADD(NOW(), "
            " INTERVAL if(spawndef=15,60,30) MINUTE),'%i:%s') "
            "AND DATE_FORMAT(DATE_ADD(NOW(), INTERVAL if(spawndef=15,70,40) MINUTE),'%i:%s')"
        ).format(minLat, minLon, maxLat, maxLon)

        res = self.execute(query)
        next_up = []
        current_time = time.time()
        for (latitude, longitude, spawndef, calc_endminsec) in res:
            if geofence_helper and not geofence_helper.is_coord_inside_include_geofence(
                [latitude, longitude]):
                continue
            endminsec_split = calc_endminsec.split(":")
            minutes = int(endminsec_split[0])
            seconds = int(endminsec_split[1])
            temp_date = current_time_of_day.replace(minute=minutes,
                                                    second=seconds)
            if minutes < datetime.now().minute:
                temp_date = temp_date + timedelta(hours=1)

            if temp_date < current_time_of_day:
                # spawn has already happened, we should've added it in the past, let's move on
                # TODO: consider crosschecking against current mons...
                continue

            spawn_duration_minutes = 60 if spawndef == 15 else 30

            timestamp = time.mktime(temp_date.timetuple()) - \
                spawn_duration_minutes * 60
            # check if we calculated a time in the past, if so, add an hour to it...
            timestamp = timestamp + 60 * 60 if timestamp < current_time else timestamp
            # TODO: consider the following since I am not sure if the prio Q clustering handles stuff properly yet
            # if timestamp >= current_time + 600:
            #     # let's skip monspawns that are more than 10minutes in the future
            #     continue
            next_up.append((timestamp, Location(latitude, longitude)))
        return next_up
示例#18
0
文件: updater.py 项目: madBeavis/MAD
 def kill_old_jobs(self):
     logger.info("Checking for outdated jobs")
     for job in self._log.copy():
         if self._log[job]['status'] in ('pending', 'starting', 'processing', 'not connected', 'future') \
                 and not self._log[job].get('auto', False):
             logger.debug("Cancel job {} - it is outdated".format(str(job)))
             self.write_status_log(str(job),
                                   field='status',
                                   value='cancelled')
         elif self._log[job].get('auto', False):
             self.write_status_log(str(job), delete=True)
示例#19
0
    def _wait_data_worker(self, latest, proto_to_wait_for, timestamp):
        data_requested = LatestReceivedType.UNDEFINED
        if latest is None:
            logger.debug(
                "Nothing received from {} since MAD started", str(self._id))
            time.sleep(0.5)
        elif proto_to_wait_for not in latest:
            logger.debug(
                "No data linked to the requested proto since MAD started.")
            time.sleep(0.5)
        else:
            # proto has previously been received, let's check the timestamp...
            # TODO: int vs str-key?
            latest_proto = latest.get(proto_to_wait_for, None)

            mode = self._mapping_manager.routemanager_get_mode(self._routemanager_name)
            latest_timestamp = latest_proto.get("timestamp", 0)
            if latest_timestamp >= timestamp:
                # TODO: consider reseting timestamp here since we clearly received SOMETHING
                latest_data = latest_proto.get("values", None)
                if latest_data is None:
                    time.sleep(0.5)
                    return LatestReceivedType.UNDEFINED
                elif mode in ["mon_mitm", "iv_mitm"]:
                    # check if the GMO contains mons
                    for data_extract in latest_data['payload']['cells']:
                        for WP in data_extract['wild_pokemon']:
                            # TODO: teach Prio Q / Clusterer to hold additional data such as mon/encounter IDs
                            if WP['spawnpoint_id']:
                                data_requested = latest_data
                                break
                    if data_requested is None:
                        logger.debug("No spawnpoints in data requested")
                        time.sleep(1)
                elif mode in ["raids_mitm"]:
                    for data_extract in latest_data['payload']['cells']:
                        for forts in data_extract['forts']:
                            if forts['id']:
                                data_requested = latest_data
                                break
                    if data_requested is None:
                        logger.debug("No forts in data received")
                        time.sleep(0.5)
                else:
                    logger.warning(
                        "No mode specified to wait for - this should not even happen...")
                    time.sleep(0.5)
            else:
                logger.debug("latest timestamp of proto {} ({}) is older than {}",
                             str(proto_to_wait_for), str(latest_timestamp), str(timestamp))
                # TODO: timeout error instead of data_error_counter? Differentiate timeout vs missing data (the
                # TODO: latter indicates too high speeds for example
                time.sleep(0.5)
        return data_requested
示例#20
0
 def del_from_route(self):
     logger.debug(
         "{}: Location available, acquiring lock and trying to return location",
         str(self.name))
     self._manager_mutex.acquire()
     logger.info('Removing coords from Route')
     self._route.pop(int(self._current_index_of_route) - 1)
     self._current_index_of_route -= 1
     if len(self._route) == 0:
         logger.info('No more coords are available... Sleeping.')
     self._manager_mutex.release()
示例#21
0
 def _init_route_queue(self):
     self._manager_mutex.acquire()
     try:
         if len(self._route) > 0:
             self._route_queue.queue.clear()
             logger.debug("Creating queue for coords")
             for latlng in self._route:
                 self._route_queue.put((latlng.lat, latlng.lng))
             logger.debug("Finished creating queue")
     finally:
         self._manager_mutex.release()
示例#22
0
 async def __retrieve_next_send(self, websocket_client_connection):
     found = None
     while found is None and websocket_client_connection.open:
         try:
             found = self.__send_queue.get_nowait()
         except Exception as e:
             await asyncio.sleep(0.02)
     if not websocket_client_connection.open:
         logger.debug(
             "retrieve_next_send: connection closed, returning None")
     return found
示例#23
0
 def _check_coords_before_returning(self, lat, lng):
     if self.init:
         logger.debug('Init Mode - coord is valid')
         return True
     stop = Location(lat, lng)
     logger.info('Checking Stop with ID {}', str(stop))
     if stop not in self._stoplist and not self._level:
         logger.info('Already got this Stop')
         return False
     logger.info('Getting new Stop')
     return True
示例#24
0
    def generate_stop_list(self):
        time.sleep(5)
        stops, stops_with_visits = self.db_wrapper.stop_from_db_without_quests(
            self.geofence_helper, True)

        logger.info('Detected stops without quests: {}',
                    str(len(stops_with_visits)))
        logger.debug('Detected stops without quests: {}',
                     str(stops_with_visits))
        self._stoplist: List[Location] = stops
        self._stops_with_visits: List[LocationWithVisits] = stops_with_visits
示例#25
0
    def get_all_hash(self, type):
        logger.debug("DbWrapperBase::get_all_hash called")
        query = ("SELECT id, hash, type, count, modify "
                 "FROM trshash "
                 "HAVING type = %s")
        vals = (str(type), )
        logger.debug(query)

        res = self.execute(query, vals)

        return res
示例#26
0
 def _check_coords_before_returning(self, lat, lng, origin):
     if self.init:
         logger.debug('Init Mode - coord is valid')
         return True
     stop = Location(lat, lng)
     logger.info('Checking Stop with ID {}', str(stop))
     if stop in self._coords_to_be_ignored:
         logger.info('Already tried this Stop and failed it')
         return False
     logger.info('DB knows nothing of this stop for {} lets try and go there', origin)
     return True
示例#27
0
 def get_best_pokemon_spawns(self):
     logger.debug('Fetching best pokemon spawns from db')
     query = (
         "SELECT encounter_id, pokemon_id, unix_timestamp(last_modified), "
         "individual_attack, individual_defense, individual_stamina, cp_multiplier, cp "
         "FROM pokemon "
         "WHERE individual_attack = 15 and individual_defense = 15 and individual_stamina = 15 "
         "ORDER BY last_modified DESC LIMIT 300"
     )
     res = self._db_exec.execute(query)
     return res
示例#28
0
 async def __producer_handler(self, websocket_client_connection):
     while websocket_client_connection.open:
         # logger.debug("Connection still open, trying to send next message")
         # retrieve next message from queue to be sent, block if empty
         next = None
         while next is None and websocket_client_connection.open:
             logger.debug("Fetching next message to send")
             next = await self.__retrieve_next_send(websocket_client_connection)
             if next is None:
                 # logger.debug("next is None, stopping connection...")
                 return
             await self.__send_specific(websocket_client_connection, next.id, next.message)
示例#29
0
 def get_stop_quest(self):
     logger.debug('Fetching gym count from db')
     query = (
         "SELECT "
         "If(FROM_UNIXTIME(trs_quest.quest_timestamp, '%y-%m-%d') IS NULL, 'NO QUEST', "
         "FROM_UNIXTIME(trs_quest.quest_timestamp, '%y-%m-%d')) AS Quest, "
         "count(pokestop.pokestop_id) AS Count "
         "FROM pokestop LEFT JOIN trs_quest ON pokestop.pokestop_id = trs_quest.GUID "
         "GROUP BY FROM_UNIXTIME(trs_quest.quest_timestamp, '%y-%m-%d')"
     )
     res = self._db_exec.execute(query)
     return res
示例#30
0
    def stats_location_raw_parser(client_id: int, data, period):

        data_location_raw = []

        if 'location' in data:
            for loc_raw in data['location']:
                data_location_raw.append(loc_raw)

        logger.debug('Submit raw location stats for {} - Period: {} - Count: {}', str(client_id), str(period),
                    str(len(data_location_raw)))

        return data_location_raw
示例#31
0
 def get_shiny_stats(self):
     logger.debug('Fetching shiny pokemon stats from db')
     query = (
         "SELECT (select count(DISTINCT encounter_id) from pokemon inner join trs_stats_detect_raw on "
         "CAST(trs_stats_detect_raw.type_id as unsigned int)=pokemon.encounter_id where pokemon.pokemon_id=a.pokemon_id and "
         "trs_stats_detect_raw.worker=b.worker and pokemon.form=a.form), count(DISTINCT encounter_id), a.pokemon_id,"
         "b.worker, GROUP_CONCAT(DISTINCT encounter_id ORDER BY encounter_id DESC SEPARATOR '<br>'), a.form, b.timestamp_scan "
         "FROM pokemon a left join trs_stats_detect_raw b on a.encounter_id=CAST(b.type_id as unsigned int) where b.is_shiny=1 group by "
         "b.is_shiny, a.pokemon_id, a.form, b.worker order by b.timestamp_scan DESC "
     )
     res = self._db_exec.execute(query)
     return res
示例#32
0
 async def on_channel_delete(self, channel):
     logger.debug(
         'The "{}" channel has been removed on the server "{}"'
             .format(channel.name, channel.server.name)
     )
示例#33
0
 async def on_channel_update(self, before, after):
     logger.debug(
         'The "{}" channel has been modified on the server "{}"'
             .format(before.name, before.server.name)
     )
示例#34
0
 async def on_member_join(self, member):
     logger.debug(
         '{} ({}) has joined the server "{}"'
             .format(member.name, member.id, member.server.name)
     )
示例#35
0
 async def on_member_remove(self, member):
     logger.debug(
         '{} ({}) has left the server "{}"'
             .format(member.name, member.id, member.server.name)
     )