def __handle_proto_data_dict(self, origin: str, data: dict) -> None: origin_logger = get_origin_logger(logger, origin=origin) proto_type = data.get("type", None) if proto_type is None or proto_type == 0: origin_logger.warning( "Could not read method ID. Stopping processing of proto") return if proto_type not in (106, 102, 101, 104, 4, 156): # trash protos - ignoring return timestamp: float = data.get("timestamp", int(time.time())) if self.__application_args.mitm_ignore_pre_boot is True and timestamp < self.__mitmreceiver_startup_time: return location_of_data: Location = Location(data.get("lat", 0.0), data.get("lng", 0.0)) if (location_of_data.lat > 90 or location_of_data.lat < -90 or location_of_data.lng > 180 or location_of_data.lng < -180): location_of_data: Location = Location(0, 0) self.__mitm_mapper.update_latest( origin, timestamp_received_raw=timestamp, timestamp_received_receiver=time.time(), key=proto_type, values_dict=data, location=location_of_data) origin_logger.debug2("Placing data received to data_queue") self._add_to_queue((timestamp, data, origin))
def __handle_proto_data_dict(self, origin: str, data: dict) -> None: origin_logger = get_origin_logger(logger, origin=origin) proto_type = data.get("type", None) if proto_type is None or proto_type == 0: origin_logger.warning( "Could not read method ID. Stopping processing of proto") return timestamp: float = data.get("timestamp", int(time.time())) location_of_data: Location = Location(data.get("lat", 0.0), data.get("lng", 0.0)) if (location_of_data.lat > 90 or location_of_data.lat < -90 or location_of_data.lng > 180 or location_of_data.lng < -180): origin_logger.warning("Received invalid location in data: {}", location_of_data) location_of_data: Location = Location(0, 0) self.__mitm_mapper.update_latest( origin, timestamp_received_raw=timestamp, timestamp_received_receiver=time.time(), key=proto_type, values_dict=data, location=location_of_data) origin_logger.debug2("Placing data received to data_queue") self._data_queue.put((timestamp, data, origin))
def _worker_changed_update_routepools(self): with self._manager_mutex and self._workers_registered_mutex: self.logger.info( "Updating all routepools in level mode for {} origins", len(self._routepool)) if len(self._workers_registered) == 0: self.logger.info( "No registered workers, aborting __worker_changed_update_routepools..." ) return False any_at_all = False for origin in self._routepool: origin_local_list = [] entry: RoutePoolEntry = self._routepool[origin] if len(entry.queue) > 0: self.logger.debug( "origin {} already has a queue, do not touch...", origin) continue unvisited_stops = self.db_wrapper.stops_from_db_unvisited( self.geofence_helper, origin) if len(unvisited_stops) == 0: self.logger.info( "There are no unvisited stops left in DB for {} - nothing more to do!", origin) continue if len(self._route) > 0: self.logger.info("Making a subroute of unvisited stops..") for coord in self._route: coord_location = Location(coord.lat, coord.lng) if coord_location in self._coords_to_be_ignored: self.logger.info( 'Already tried this Stop but it failed spinnable test, skip it' ) continue if coord_location in unvisited_stops: origin_local_list.append(coord_location) if len(origin_local_list) == 0: self.logger.info( "None of the stops in original route was unvisited, recalc a route" ) new_route = self._local_recalc_subroute(unvisited_stops) for coord in new_route: origin_local_list.append( Location(coord["lat"], coord["lng"])) # subroute is all stops unvisited self.logger.info( "Origin {} has {} unvisited stops for this route", origin, len(origin_local_list)) entry.subroute = origin_local_list # let's clean the queue just to make sure entry.queue.clear() [entry.queue.append(i) for i in origin_local_list] any_at_all = len(origin_local_list) > 0 or any_at_all return any_at_all
def _worker_changed_update_routepools(self): with self._manager_mutex, self._workers_registered_mutex: self.logger.info("Updating all routepools in level mode for {} origins", len(self._routepool)) if len(self._workers_registered) == 0: self.logger.info("No registered workers, aborting __worker_changed_update_routepools...") return False any_at_all = False for origin in self._routepool: origin_local_list = [] entry: RoutePoolEntry = self._routepool[origin] if len(entry.queue) > 0: self.logger.debug("origin {} already has a queue, do not touch...", origin) continue current_worker_pos = entry.current_pos unvisited_stops = self.db_wrapper.get_nearest_stops_from_position(geofence_helper=self.geofence_helper, origin=origin, lat=current_worker_pos.lat, lon=current_worker_pos.lng, limit=30, ignore_spinned=self.settings.get( "ignore_spinned_stops", True), maxdistance=5) if len(unvisited_stops) == 0: self.logger.info("There are no unvisited stops left in DB for {} - nothing more to do!", origin) continue for coord in unvisited_stops: coord_location = Location(coord.lat, coord.lng) if coord_location in self._coords_to_be_ignored: self.logger.info('Already tried this Stop but it failed spinnable test, skip it') continue origin_local_list.append(coord_location) if len(unvisited_stops) > 0: self.logger.info("Recalc a route") new_route = self._local_recalc_subroute(unvisited_stops) origin_local_list.clear() for coord in new_route: origin_local_list.append(Location(coord["lat"], coord["lng"])) # subroute is all stops unvisited self.logger.info("Origin {} has {} unvisited stops for this route", origin, len(origin_local_list)) entry.subroute = origin_local_list # let's clean the queue just to make sure entry.queue.clear() [entry.queue.append(i) for i in origin_local_list] any_at_all = len(origin_local_list) > 0 or any_at_all # saving new startposition of walker in db newstartposition: Location = entry.queue[0] self.db_wrapper.save_last_walker_position(origin=origin, lat=newstartposition.lat, lng=newstartposition.lng) return True
def get_route(self): routeinfo_by_id = {} routemanager_names = self._mapping_manager.get_all_routemanager_names() for routemanager in routemanager_names: (memory_route, workers) = self._mapping_manager.routemanager_get_current_route( routemanager) if memory_route is None: continue mode = self._mapping_manager.routemanager_get_mode(routemanager) name = self._mapping_manager.routemanager_get_name(routemanager) routecalc_id = self._mapping_manager.routemanager_get_routecalc_id( routemanager) routeinfo_by_id[routecalc_id] = routeinfo = { "id": routecalc_id, "route": memory_route, "name": name, "mode": mode, "subroutes": [] } if len(workers) > 1: for worker, worker_route in workers.items(): routeinfo["subroutes"].append({ "id": "%d_sub_%s" % (routecalc_id, worker), "route": worker_route, "name": "%s - %s" % (routeinfo["name"], worker), "tag": "subroute" }) if len(routeinfo_by_id) > 0: routecalcs = self._data_manager.get_root_resource("routecalc") for routecalc_id, routecalc in routecalcs.items(): if routecalc_id in routeinfo_by_id: routeinfo = routeinfo_by_id[routecalc_id] db_route = list( map(lambda coord: Location(coord["lat"], coord["lng"]), routecalc.get_saved_json_route())) if db_route != routeinfo["route"]: routeinfo["subroutes"].append({ "id": "%d_unapplied" % routeinfo["id"], "route": db_route, "name": "%s (unapplied)" % routeinfo["name"], "tag": "unapplied" }) return jsonify( list( map(lambda route: get_routepool_route(route), routeinfo_by_id.values())))
def send_gps(self): origin = request.args.get('origin') devicemappings = self._mapping_manager.get_all_devicemappings() useadb = request.args.get('adb') if useadb is None: useadb = devicemappings.get(origin, {}).get('adb', False) coords = request.args.get('coords').replace(' ', '').split(',') sleeptime = request.args.get('sleeptime', "0") if len(coords) < 2: return 'Wrong Format!' self._logger.info('MADmin: Set GPS Coords {}, {} - WS Mode only! ({})', str(coords[0]), str(coords[1]), str(origin)) try: temp_comm = self._ws_server.get_origin_communicator(origin) temp_comm.set_location(Location(coords[0], coords[1]), 0) if int(sleeptime) > 0: self._logger.info("MADmin: Set additional sleeptime: {} ({})", str(sleeptime), str(origin)) self._ws_server.set_geofix_sleeptime_worker(origin, sleeptime) except Exception as e: self._logger.exception( 'MADmin: Exception occurred while set gps coords: {}.', e) time.sleep(2) return self.take_screenshot(origin, useadb)
def get_middle_of_coord_list(list_of_coords): if len(list_of_coords) == 1: return list_of_coords[0] coord_x = 0 coord_y = 0 coord_z = 0 for coord in list_of_coords: # transform to radians... lat_rad = math.radians(coord.lat) lng_rad = math.radians(coord.lng) coord_x += math.cos(lat_rad) * math.cos(lng_rad) coord_y += math.cos(lat_rad) * math.sin(lng_rad) coord_z += math.sin(lat_rad) amount_of_coords = len(list_of_coords) coord_x = coord_x / amount_of_coords coord_y = coord_y / amount_of_coords coord_z = coord_z / amount_of_coords central_lng = math.atan2(coord_y, coord_x) central_square_root = math.sqrt(coord_x * coord_x + coord_y * coord_y) central_lat = math.atan2(coord_z, central_square_root) return Location(math.degrees(central_lat), math.degrees(central_lng))
def get_next_raid_hatches(self, delay_after_hatch, geofence_helper=None): """ In order to build a priority queue, we need to be able to check for the next hatches of raid eggs The result may not be sorted by priority, to be done at a higher level! :return: unsorted list of next hatches within delay_after_hatch """ logger.debug("DbWrapper::get_next_raid_hatches called") db_time_to_check = datetime.utcfromtimestamp( time.time()).strftime("%Y-%m-%d %H:%M:%S") query = ( "SELECT start, latitude, longitude " "FROM raid " "LEFT JOIN gym ON raid.gym_id = gym.gym_id WHERE raid.end > %s AND raid.pokemon_id IS NULL" ) vals = (db_time_to_check, ) res = self.execute(query, vals) data = [] for (start, latitude, longitude) in res: if latitude is None or longitude is None: logger.warning("lat or lng is none") continue elif geofence_helper and not geofence_helper.is_coord_inside_include_geofence( [latitude, longitude]): logger.debug( "Excluded hatch at {}, {} since the coordinate is not inside the given include fences", str(latitude), str(longitude)) continue timestamp = self.__db_timestring_to_unix_timestamp(str(start)) data.append( (timestamp + delay_after_hatch, Location(latitude, longitude))) logger.debug("Latest Q: {}", str(data)) return data
def stops_from_db(self, geofence_helper): """ Retrieve all the pokestops valid within the area set by geofence_helper :return: numpy array with coords """ logger.debug("DbWrapper::stops_from_db called") minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence( ) query = ("SELECT latitude, longitude " "FROM pokestop " "WHERE (latitude >= {} AND longitude >= {} " "AND latitude <= {} AND longitude <= {}) ").format( minLat, minLon, maxLat, maxLon) res = self.execute(query) list_of_coords: List[Location] = [] for (latitude, longitude) in res: list_of_coords.append(Location(latitude, longitude)) if geofence_helper is not None: geofenced_coords = geofence_helper.get_geofenced_coordinates( list_of_coords) return geofenced_coords else: return list_of_coords
def gyms_from_db(self, geofence_helper): """ Retrieve all the gyms valid within the area set by geofence_helper :return: numpy array with coords """ logger.debug("DbWrapper::gyms_from_db called") if geofence_helper is None: logger.error("No geofence_helper! Not fetching gyms.") return [] logger.debug("Filtering with rectangle") rectangle = geofence_helper.get_polygon_from_fence() query = ("SELECT latitude, longitude " "FROM gym " "WHERE " "latitude >= %s AND longitude >= %s AND " "latitude <= %s AND longitude <= %s") res = self.execute(query, rectangle) list_of_coords: List[Location] = [] for (latitude, longitude) in res: list_of_coords.append(Location(latitude, longitude)) logger.debug( "Got {} coordinates in this rect (minLat, minLon, " "maxLat, maxLon): {}", len(list_of_coords), str(rectangle)) geofenced_coords = geofence_helper.get_geofenced_coordinates( list_of_coords) return geofenced_coords
def stops_from_db_unvisited(self, geofence_helper: GeofenceHelper, origin: str): logger.debug("DbWrapper::stops_from_db_unvisited called") minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence( ) query = ( "SELECT pokestop.latitude, pokestop.longitude " "FROM pokestop " "LEFT JOIN trs_visited ON (pokestop.pokestop_id = trs_visited.pokestop_id AND trs_visited.origin='{}') " "WHERE pokestop.latitude >= {} AND pokestop.longitude >= {} " "AND pokestop.latitude <= {} AND pokestop.longitude <= {} " "AND trs_visited.origin IS NULL").format(origin, minLat, minLon, maxLat, maxLon) res = self.execute(query) unvisited: List[Location] = [] for (latitude, longitude) in res: unvisited.append(Location(latitude, longitude)) if geofence_helper is not None: geofenced_coords = geofence_helper.get_geofenced_coordinates( unvisited) return geofenced_coords else: return unvisited
def stop_from_db_without_quests(self, geofence_helper): logger.debug("DbWrapper::stop_from_db_without_quests called") minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence( ) query = ( "SELECT pokestop.latitude, pokestop.longitude " "FROM pokestop " "LEFT JOIN trs_quest ON pokestop.pokestop_id = trs_quest.GUID " "WHERE (pokestop.latitude >= {} AND pokestop.longitude >= {} " "AND pokestop.latitude <= {} AND pokestop.longitude <= {}) " "AND (DATE(from_unixtime(trs_quest.quest_timestamp,'%Y-%m-%d')) <> CURDATE() " "OR trs_quest.GUID IS NULL)").format(minLat, minLon, maxLat, maxLon) res = self.execute(query) list_of_coords: List[Location] = [] for (latitude, longitude) in res: list_of_coords.append(Location(latitude, longitude)) if geofence_helper is not None: geofenced_coords = geofence_helper.get_geofenced_coordinates( list_of_coords) return geofenced_coords else: return list_of_coords
def get_detected_spawns(self, geofence_helper) -> List[Location]: logger.debug("DbWrapper::get_detected_spawns called") minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence( ) query = ("SELECT latitude, longitude " "FROM trs_spawn " "WHERE (latitude >= {} AND longitude >= {} " "AND latitude <= {} AND longitude <= {}) ").format( minLat, minLon, maxLat, maxLon) list_of_coords: List[Location] = [] logger.debug("DbWrapper::get_detected_spawns executing select query") res = self.execute(query) logger.debug("DbWrapper::get_detected_spawns result of query: {}", str(res)) for (latitude, longitude) in res: list_of_coords.append(Location(latitude, longitude)) if geofence_helper is not None: logger.debug("DbWrapper::get_detected_spawns applying geofence") geofenced_coords = geofence_helper.get_geofenced_coordinates( list_of_coords) logger.debug(geofenced_coords) return geofenced_coords else: logger.debug("DbWrapper::get_detected_spawns converting to numpy") # to_return = np.zeros(shape=(len(list_of_coords), 2)) # for i in range(len(to_return)): # to_return[i][0] = list_of_coords[i][0] # to_return[i][1] = list_of_coords[i][1] return list_of_coords
def get_undetected_spawns(self, geofence_helper): logger.debug("DbWrapper::get_undetected_spawns called") query = ("SELECT latitude, longitude " "FROM trs_spawn " "WHERE calc_endminsec is NULL") list_of_coords: List[Location] = [] logger.debug("DbWrapper::get_undetected_spawns executing select query") res = self.execute(query) logger.debug("DbWrapper::get_undetected_spawns result of query: {}", str(res)) for (latitude, longitude) in res: list_of_coords.append(Location(latitude, longitude)) if geofence_helper is not None: logger.debug("DbWrapper::get_undetected_spawns applying geofence") geofenced_coords = geofence_helper.get_geofenced_coordinates( list_of_coords) logger.debug(geofenced_coords) return geofenced_coords else: logger.debug( "DbWrapper::get_undetected_spawns converting to numpy") # to_return = np.zeros(shape=(len(list_of_coords), 2)) # for i in range(len(to_return)): # to_return[i][0] = list_of_coords[i][0] # to_return[i][1] = list_of_coords[i][1] return list_of_coords
def get_middle_of_coord_list(list_of_coords): if len(list_of_coords) == 1: return list_of_coords[0] x = 0 y = 0 z = 0 for coord in list_of_coords: # transform to radians... lat_rad = math.radians(coord.lat) lng_rad = math.radians(coord.lng) x += math.cos(lat_rad) * math.cos(lng_rad) y += math.cos(lat_rad) * math.sin(lng_rad) z += math.sin(lat_rad) amount_of_coords = len(list_of_coords) x = x / amount_of_coords y = y / amount_of_coords z = z / amount_of_coords central_lng = math.atan2(y, x) central_square_root = math.sqrt(x * x + y * y) central_lat = math.atan2(z, central_square_root) return Location(math.degrees(central_lat), math.degrees(central_lng))
def get_position(self): positions = [] devicemappings = self._mapping_manager.get_all_devicemappings() for name, values in devicemappings.items(): lat = values.get("settings").get("last_location", Location(0.0, 0.0)).lat lon = values.get("settings").get("last_location", Location(0.0, 0.0)).lng worker = { "name": str(name), "lat": getCoordFloat(lat), "lon": getCoordFloat(lon) } positions.append(worker) return jsonify(positions)
def _generate_locations(distance: float, geofence_helper: GeofenceHelper): south, east, north, west = geofence_helper.get_polygon_from_fence() corners = [ Location(south, east), Location(south, west), Location(north, east), Location(north, west) ] # get the center center = get_middle_of_coord_list(corners) # get the farthest to the center... farthest_dist = 0 for corner in corners: dist_temp = get_distance_of_two_points_in_meters( center.lat, center.lng, corner.lat, corner.lng) if dist_temp > farthest_dist: farthest_dist = dist_temp # calculate step_limit, round up to reduce risk of losing stuff step_limit = math.ceil(farthest_dist / distance) # This will loop thorugh all the rings in the hex from the centre # moving outwards logger.info("Calculating positions for init scan") num_cores = multiprocessing.cpu_count() with multiprocessing.Pool(processes=num_cores) as pool: temp = [pool.apply(S2Helper._generate_star_locs, args=( center, distance, i)) for i in range(1, step_limit)] results = [item for sublist in temp for item in sublist] results.append(Location(center.lat, center.lng)) logger.info("Filtering positions for init scan") # Geofence results. if geofence_helper is not None and geofence_helper.is_enabled(): results = geofence_helper.get_geofenced_coordinates(results) if not results: logger.error('No cells regarded as valid for desired scan area. Check your provided geofences. ' 'Aborting.') else: logger.info("Ordering location") results = S2Helper.order_location_list_rows(results) return results
def get_new_coords(init_loc, distance, bearing): """ Given an initial lat/lng, a distance(in kms), and a bearing (degrees), this will calculate the resulting lat/lng coordinates. """ # TODO: check for implementation with gpxdata start = gpxdata.TrackPoint(init_loc.lat, init_loc.lng) destination = start + gpxdata.CourseDistance(bearing, distance) return Location(destination.lat, destination.lon)
def _check_coords_before_returning(self, lat, lng, origin): if self.init: self.logger.debug('Init Mode - coord is valid') return True stop = Location(lat, lng) self.logger.info('Checking Stop with ID {}', str(stop)) if stop in self._coords_to_be_ignored: self.logger.info('Already tried this Stop and failed it') return False self.logger.info('DB knows nothing of this stop for {} lets try and go there', origin) return True
def retrieve_next_spawns(self, geofence_helper): """ Retrieve the spawnpoints with their respective unixtimestamp that are due in the next 300 seconds :return: """ logger.debug("DbWrapper::retrieve_next_spawns called") current_time_of_day = datetime.now().replace(microsecond=0) minLat, minLon, maxLat, maxLon = geofence_helper.get_polygon_from_fence( ) query = ( "SELECT latitude, longitude, spawndef, calc_endminsec " "FROM trs_spawn " "WHERE calc_endminsec IS NOT NULL " "AND (latitude >= {} AND longitude >= {} AND latitude <= {} AND longitude <= {}) " ).format(minLat, minLon, maxLat, maxLon) res = self.execute(query) next_up = [] current_time = time.time() for (latitude, longitude, spawndef, calc_endminsec) in res: if geofence_helper and not geofence_helper.is_coord_inside_include_geofence( [latitude, longitude]): continue endminsec_split = calc_endminsec.split(":") minutes = int(endminsec_split[0]) seconds = int(endminsec_split[1]) temp_date = current_time_of_day.replace(minute=minutes, second=seconds) if minutes < datetime.now().minute: temp_date = temp_date + timedelta(hours=1) if temp_date < current_time_of_day: # spawn has already happened, we should've added it in the past, let's move on # TODO: consider crosschecking against current mons... continue spawn_duration_minutes = 60 if spawndef == 15 else 30 timestamp = time.mktime(temp_date.timetuple()) - \ spawn_duration_minutes * 60 # check if we calculated a time in the past, if so, add an hour to it... timestamp = timestamp + 60 * 60 if timestamp < current_time else timestamp # TODO: consider the following since I am not sure if the prio Q clustering handles stuff properly yet # if timestamp >= current_time + 600: # # let's skip monspawns that are more than 10minutes in the future # continue next_up.append((timestamp, Location(latitude, longitude))) return next_up
def _move_to_location(self): distance, routemanager_settings = self._get_route_manager_settings_and_distance_to_current_location( ) if not self._mapping_manager.routemanager_get_init( self._routemanager_name): speed = routemanager_settings.get("speed", 0) max_distance = routemanager_settings.get("max_distance", None) else: speed = int(25) max_distance = int(200) if (speed == 0 or (max_distance and 0 < max_distance < distance) or (self.last_location.lat == 0.0 and self.last_location.lng == 0.0)): self.logger.debug("main: Teleporting...") self._transporttype = 0 self._communicator.set_location( Location(self.current_location.lat, self.current_location.lng), 0) # the time we will take as a starting point to wait for data... timestamp_to_use = math.floor(time.time()) delay_used = self.get_devicesettings_value('post_teleport_delay', 0) # Test for cooldown / teleported distance TODO: check this block... if self.get_devicesettings_value('cool_down_sleep', False): if distance > 10000: delay_used = 15 elif distance > 5000: delay_used = 10 elif distance > 2500: delay_used = 8 self.logger.debug( "Need more sleep after Teleport: {} seconds!", delay_used) walk_distance_post_teleport = self.get_devicesettings_value( 'walk_after_teleport_distance', 0) if 0 < walk_distance_post_teleport < distance: self._walk_after_teleport(walk_distance_post_teleport) else: self.logger.info("main: Walking...") timestamp_to_use = self._walk_to_location(speed) delay_used = self.get_devicesettings_value('post_walk_delay', 0) self.logger.debug2("Sleeping for {}s", delay_used) time.sleep(float(delay_used)) self.set_devicesettings_value("last_location", self.current_location) self.last_location = self.current_location self._waittime_without_delays = time.time() return timestamp_to_use, True
def get_less_coords(self, np_coords: List[Tuple[str, str]], max_radius: int, max_coords_within_radius: int, use_s2: bool = False, s2_level: int = 15): coordinates = [] for coord in np_coords: coordinates.append( (0, Location(coord[0].item(), coord[1].item())) ) clustering_helper = ClusteringHelper(max_radius=max_radius, max_count_per_circle=max_coords_within_radius, max_timedelta_seconds=0, use_s2=use_s2, s2_level=s2_level) clustered_events = clustering_helper.get_clustered(coordinates) coords_cleaned_up = [] for event in clustered_events: coords_cleaned_up.append(event[1]) return coords_cleaned_up
async def __update_settings_of_origin( self, origin: str, walker_configuration: WalkerConfiguration): self.__mapping_manager.set_devicesetting_value_of( origin, 'walker_area_index', walker_configuration.walker_index + 1) self.__mapping_manager.set_devicesetting_value_of( origin, 'finished', False) if walker_configuration.walker_index >= walker_configuration.total_walkers_allowed_for_assigned_area - 1: self.__mapping_manager.set_devicesetting_value_of( origin, 'walker_area_index', 0) if "last_location" not in self.__mapping_manager.get_devicesettings_of( origin): # TODO: I hope this does not cause issues... self.__mapping_manager.set_devicesetting_value_of( origin, "last_location", Location(0.0, 0.0))
def get_to_be_encountered(self, geofence_helper, min_time_left_seconds, eligible_mon_ids: Optional[List[int]]): if min_time_left_seconds is None or eligible_mon_ids is None: logger.warning( "DbWrapper::get_to_be_encountered: Not returning any encounters since no time left or " "eligible mon IDs specified") return [] logger.debug("Getting mons to be encountered") query = ( "SELECT latitude, longitude, encounter_id, spawnpoint_id, pokemon_id, " "TIMESTAMPDIFF(SECOND, UTC_TIMESTAMP(), disappear_time) AS expire " "FROM pokemon " "WHERE individual_attack IS NULL AND individual_defense IS NULL AND individual_stamina IS NULL " "AND encounter_id != 0 " "and (disappear_time BETWEEN DATE_ADD(UTC_TIMESTAMP(), INTERVAL %s SECOND) " "and DATE_ADD(UTC_TIMESTAMP(), INTERVAL 60 MINUTE))" "ORDER BY expire ASC") vals = (int(min_time_left_seconds), ) results = self.execute(query, vals, commit=False) next_to_encounter = [] for latitude, longitude, encounter_id, spawnpoint_id, pokemon_id, expire in results: if pokemon_id not in eligible_mon_ids: continue elif latitude is None or longitude is None: logger.warning("lat or lng is none") continue elif geofence_helper and not geofence_helper.is_coord_inside_include_geofence( [latitude, longitude]): logger.debug( "Excluded encounter at {}, {} since the coordinate is not inside the given include fences", str(latitude), str(longitude)) continue next_to_encounter.append( (pokemon_id, Location(latitude, longitude), encounter_id)) # now filter by the order of eligible_mon_ids to_be_encountered = [] i = 0 for mon_prio in eligible_mon_ids: for mon in next_to_encounter: if mon_prio == mon[0]: to_be_encountered.append((i, mon[1], mon[2])) i += 1 return to_be_encountered
def __handle_proto_data_dict(self, origin: str, data: dict) -> None: type = data.get("type", None) if type is None or type == 0: logger.warning( "Could not read method ID. Stopping processing of proto") return timestamp: float = data.get("timestamp", int(time.time())) location_of_data: Location = Location(data.get("lat", 0.0), data.get("lng", 0.0)) self.__mitm_mapper.update_latest( origin, timestamp_received_raw=timestamp, timestamp_received_receiver=time.time(), key=type, values_dict=data, location=location_of_data) logger.debug3("Placing data received by {} to data_queue".format(origin)) self._data_queue.put( (timestamp, data, origin) )
def getLessCoords(self, npCoordinates, maxRadius, maxCountPerCircle, useS2: bool = False, S2level: int = 15): coordinates = [] for coord in npCoordinates: coordinates.append((0, Location(coord[0].item(), coord[1].item()))) clustering_helper = ClusteringHelper( max_radius=maxRadius, max_count_per_circle=maxCountPerCircle, max_timedelta_seconds=0, useS2=useS2, S2level=S2level) clustered_events = clustering_helper.get_clustered(coordinates) coords_cleaned_up = [] for event in clustered_events: coords_cleaned_up.append(event[1]) return coords_cleaned_up
def _move_to_location(self): if not self._mapping_manager.routemanager_present(self._routemanager_name) \ or self._stop_worker_event.is_set(): raise InternalStopWorkerException routemanager_settings = self._mapping_manager.routemanager_get_settings( self._routemanager_name) # get the distance from our current position (last) to the next gym (cur) distance = get_distance_of_two_points_in_meters( float(self.last_location.lat), float(self.last_location.lng), float(self.current_location.lat), float(self.current_location.lng)) logger.debug('Moving {} meters to the next position', round(distance, 2)) if not self._mapping_manager.routemanager_get_init( self._routemanager_name): speed = routemanager_settings.get("speed", 0) max_distance = routemanager_settings.get("max_distance", None) else: speed = int(25) max_distance = int(200) if (speed == 0 or (max_distance and 0 < max_distance < distance) or (self.last_location.lat == 0.0 and self.last_location.lng == 0.0)): logger.debug("main: Teleporting...") self._transporttype = 0 self._communicator.set_location( Location(self.current_location.lat, self.current_location.lng), 0) # the time we will take as a starting point to wait for data... cur_time = math.floor(time.time()) delay_used = self.get_devicesettings_value('post_teleport_delay', 7) # Test for cooldown / teleported distance TODO: check this block... if self.get_devicesettings_value('cool_down_sleep', False): if distance > 10000: delay_used = 15 elif distance > 5000: delay_used = 10 elif distance > 2500: delay_used = 8 logger.debug("Need more sleep after Teleport: {} seconds!", str(delay_used)) # curTime = math.floor(time.time()) # the time we will take as a starting point to wait for data... walk_distance_post_teleport = self.get_devicesettings_value( 'walk_after_teleport_distance', 0) if 0 < walk_distance_post_teleport < distance: # TODO: actually use to_walk for distance lat_offset, lng_offset = get_lat_lng_offsets_by_distance( walk_distance_post_teleport) to_walk = get_distance_of_two_points_in_meters( float(self.current_location.lat), float(self.current_location.lng), float(self.current_location.lat) + lat_offset, float(self.current_location.lng) + lng_offset) logger.info("Walking roughly: {}", str(to_walk)) time.sleep(0.3) self._communicator.walk_from_to( self.current_location, Location(self.current_location.lat + lat_offset, self.current_location.lng + lng_offset), 11) logger.debug("Walking back") time.sleep(0.3) self._communicator.walk_from_to( Location(self.current_location.lat + lat_offset, self.current_location.lng + lng_offset), self.current_location, 11) logger.debug("Done walking") time.sleep(1) else: logger.info("main: Walking...") self._transporttype = 1 self._communicator.walk_from_to(self.last_location, self.current_location, speed) # the time we will take as a starting point to wait for data... cur_time = math.floor(time.time()) logger.debug2("Done walking, fetching time to sleep") delay_used = self.get_devicesettings_value('post_walk_delay', 7) logger.debug2("Sleeping for {}s".format(str(delay_used))) time.sleep(float(delay_used)) self.set_devicesettings_value("last_location", self.current_location) self.last_location = self.current_location self._waittime_without_delays = time.time() return cur_time, True
def __init__(self, args, dev_id, origin, last_known_state, communicator: AbstractCommunicator, mapping_manager: MappingManager, area_id: int, routemanager_name: str, db_wrapper: DbWrapper, pogo_window_manager: PogoWindows, walker=None, event=None): AbstractWorker.__init__(self, origin=origin, communicator=communicator) self._mapping_manager: MappingManager = mapping_manager self._routemanager_name: str = routemanager_name self._area_id = area_id self._dev_id: int = dev_id self._event = event self._origin: str = origin self._applicationArgs = args self._last_known_state = last_known_state self._work_mutex = Lock() self.loop = None self.loop_started = Event() self.loop_tid = None self._async_io_looper_thread = None self._location_count = 0 self._init: bool = self._mapping_manager.routemanager_get_init( self._routemanager_name) self._walker = walker self._lastScreenshotTaken = 0 self._stop_worker_event = Event() self._db_wrapper = db_wrapper self._resocalc = Resocalculator self._screen_x = 0 self._screen_y = 0 self._geofix_sleeptime = 0 self._pogoWindowManager = pogo_window_manager self._waittime_without_delays = 0 self._transporttype = 0 self._not_injected_count: int = 0 self._same_screen_count: int = 0 self._last_screen_type: ScreenType = ScreenType.UNDEFINED self._loginerrorcounter: int = 0 self._mode = self._mapping_manager.routemanager_get_mode( self._routemanager_name) self._levelmode = self._mapping_manager.routemanager_get_level( self._routemanager_name) self._geofencehelper = self._mapping_manager.routemanager_get_geofence_helper( self._routemanager_name) self.current_location = Location(0.0, 0.0) self.last_location = self.get_devicesettings_value( "last_location", None) if self.last_location is None: self.last_location = Location(0.0, 0.0) if self.get_devicesettings_value('last_mode', None) is not None and \ self.get_devicesettings_value('last_mode') in ("raids_mitm", "mon_mitm", "iv_mitm"): # Reset last_location - no useless waiting delays (otherwise stop mode) self.last_location = Location(0.0, 0.0) self.set_devicesettings_value( "last_mode", self._mapping_manager.routemanager_get_mode( self._routemanager_name)) self.workerstart = None self._WordToScreenMatching = WordToScreenMatching( self._communicator, self._pogoWindowManager, self._origin, self._resocalc, mapping_manager, self._applicationArgs)
def _main_work_thread(self): # TODO: signal websocketserver the removal try: self._internal_pre_work() except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.error( "Failed initializing worker, connection terminated exceptionally" ) self._internal_cleanup() return if not self.check_max_walkers_reached(): self.logger.warning( 'Max. Walkers in Area {} - closing connections', self._mapping_manager.routemanager_get_name( self._routemanager_name)) self.set_devicesettings_value('finished', True) self._internal_cleanup() return while not self._stop_worker_event.isSet(): try: # TODO: consider getting results of health checks and aborting the entire worker? walkercheck = self.check_walker() if not walkercheck: self.set_devicesettings_value('finished', True) break except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.warning("Worker killed by walker settings") break try: # TODO: consider getting results of health checks and aborting the entire worker? self._internal_health_check() self._health_check() except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.error( "Websocket connection to lost while running healthchecks, connection terminated " "exceptionally") break try: settings = self._internal_grab_next_location() if settings is None: continue except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.warning( "Worker of does not support mode that's to be run, connection terminated " "exceptionally") break try: self.logger.debug('Checking if new location is valid') valid = self._check_location_is_valid() if not valid: break except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.warning("Worker received non valid coords!") break try: self._pre_location_update() except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.warning( "Worker of stopping because of stop signal in pre_location_update, connection " "terminated exceptionally") break try: self.logger.debug2( 'LastLat: {}, LastLng: {}, CurLat: {}, CurLng: {}', self.get_devicesettings_value("last_location", Location(0, 0)).lat, self.get_devicesettings_value("last_location", Location(0, 0)).lng, self.current_location.lat, self.current_location.lng) time_snapshot, process_location = self._move_to_location() except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.warning( "Worker failed moving to new location, stopping worker, connection terminated " "exceptionally") break if process_location: self._add_task_to_loop(self._update_position_file()) self._location_count += 1 if self._applicationArgs.last_scanned: self.logger.debug( "Seting new 'scannedlocation' in Database") self._add_task_to_loop( self.update_scanned_location(self.current_location.lat, self.current_location.lng, time_snapshot)) try: self._post_move_location_routine(time_snapshot) except (InternalStopWorkerException, WebsocketWorkerRemovedException, WebsocketWorkerTimeoutException, WebsocketWorkerConnectionClosedException): self.logger.warning( "Worker failed running post_move_location_routine, stopping worker" ) break self.logger.info("Worker finished iteration, continuing work") self._internal_cleanup()
def _move_to_location(self): distance, routemanager_settings = self._get_route_manager_settings_and_distance_to_current_location() self.logger.debug("Getting time") speed = routemanager_settings.get("speed", 0) max_distance = routemanager_settings.get("max_distance", None) if (speed == 0 or (max_distance and 0 < max_distance < distance) or (self.last_location.lat == 0.0 and self.last_location.lng == 0.0)): self.logger.debug("main: Teleporting...") self._transporttype = 0 self._communicator.set_location( Location(self.current_location.lat, self.current_location.lng), 0) # the time we will take as a starting point to wait for data... cur_time = math.floor(time.time()) delay_used = self.get_devicesettings_value('post_teleport_delay', 0) speed = 16.67 # Speed can be 60 km/h up to distances of 3km if self.last_location.lat == 0.0 and self.last_location.lng == 0.0: self.logger.info('Starting fresh round - using lower delay') else: delay_used = calculate_cooldown(distance, speed) self.logger.debug( "Need more sleep after Teleport: {} seconds!", int(delay_used)) else: delay_used = distance / (speed / 3.6) # speed is in kmph , delay_used need mps self.logger.info("main: Walking {} m, this will take {} seconds", distance, delay_used) cur_time = self._walk_to_location(speed) delay_used = self.get_devicesettings_value('post_walk_delay', 0) walk_distance_post_teleport = self.get_devicesettings_value('walk_after_teleport_distance', 0) if 0 < walk_distance_post_teleport < distance: # TODO: actually use to_walk for distance to_walk = self._walk_after_teleport(walk_distance_post_teleport) delay_used -= (to_walk / 3.05) - 1. # We already waited for a bit because of this walking part if delay_used < 0: delay_used = 0 if self._init: delay_used = 5 if self.get_devicesettings_value('last_action_time', None) is not None: timediff = time.time() - self.get_devicesettings_value('last_action_time', 0) self.logger.info("Timediff between now and last action time: {}", int(timediff)) delay_used = delay_used - timediff elif self.get_devicesettings_value('last_action_time', None) is None and not self._level_mode: self.logger.info('Starting first time - we wait because of some default pogo delays ...') delay_used = 20 else: self.logger.debug("No last action time found - no calculation") delay_used = -1 if self.get_devicesettings_value('screendetection', True) and \ self._WordToScreenMatching.return_memory_account_count() > 1 and delay_used >= self._rotation_waittime \ and self.get_devicesettings_value('account_rotation', False) and not self._level_mode: # Waiting time to long and more then one account - switch! (not level mode!!) self.logger.info('Could use more then 1 account - switch & no cooldown') self.switch_account() delay_used = -1 if delay_used < 0: self._current_sleep_time = 0 self.logger.info('No need to wait before spinning, continuing...') else: delay_used = math.floor(delay_used) self.logger.info("Real sleep time: {} seconds: next action {}", delay_used, datetime.now() + timedelta(seconds=delay_used)) cleanupbox: bool = False lastcleanupbox = self.get_devicesettings_value('last_cleanup_time', None) self._current_sleep_time = delay_used self.worker_stats() if lastcleanupbox is not None: if time.time() - lastcleanupbox > 900: # just cleanup if last cleanup time > 15 minutes ago cleanupbox = True else: cleanupbox = True self._mapping_manager.routemanager_set_worker_sleeping(self._routemanager_name, self._origin, delay_used) while time.time() <= int(cur_time) + int(delay_used): if delay_used > 200 and cleanupbox and not self._enhanced_mode: self.clear_thread_task = ClearThreadTasks.BOX cleanupbox = False if not self._mapping_manager.routemanager_present(self._routemanager_name) \ or self._stop_worker_event.is_set(): self.logger.error("Worker was killed while sleeping") self._current_sleep_time = 0 raise InternalStopWorkerException time.sleep(1) self._current_sleep_time = 0 self.set_devicesettings_value("last_location", self.current_location) self.last_location = self.current_location return cur_time, True