def hidden(self, hidden): if isinstance(hidden, str): hidden = parse_date(hidden) elif not isinstance(hidden, datetime.date): raise errors.ValueError( "Passed object is not datetime.date instance nor string containing a date.") self._hidden = hidden
def load_quick(self): """Load basic cache details. Use information from geocaching map tooltips. Therefore loading is very quick, but the only loaded properties are: `name`, `type`, `state`, `size`, `difficulty`, `terrain`, `hidden`, `author`, `favorites` and `pm_only`. :raise .LoadError: If cache loading fails (probably because of not existing cache). """ res = self.geocaching._request("http://tiles01.geocaching.com/map.details", params={ "i": self.wp }, expect="json") if res["status"] == "failed" or len(res["data"]) != 1: msg = res["msg"] if "msg" in res else "Unknown error (probably not existing cache)" raise errors.LoadError("Cache {} cannot be loaded: {}".format(self, msg)) data = res["data"][0] # prettify data self.name = data["name"] self.type = Type.from_string(data["type"]["text"]) self.state = data["available"] self.size = Size.from_string(data["container"]["text"]) self.difficulty = data["difficulty"]["text"] self.terrain = data["terrain"]["text"] self.hidden = parse_date(data["hidden"]) self.author = data["owner"]["text"] self.favorites = int(data["fp"]) self.pm_only = data["subrOnly"] logging.debug("Cache loaded: {}".format(self))
def visited(self, visited): if isinstance(visited, str): visited = parse_date(visited) elif not isinstance(visited, datetime.date): raise errors.ValueError( "Passed object is not datetime.date instance nor string containing a date.") self._visited = visited
def hidden(self, hidden): if _type(hidden) is str: hidden = parse_date(hidden) elif _type(hidden) is not datetime.date: raise errors.ValueError( "Passed object is not datetime.date instance nor string containing a date.") self._hidden = hidden
def test_load_by_guid(self, mock_load_quick, mock_load): with self.subTest("normal"): cache = Cache(self.gc, "GC2WXPN", guid="5f45114d-1d79-4fdb-93ae-8f49f1d27188") with self.recorder.use_cassette('cache_guidload_normal'): cache.load_by_guid() self.assertEqual(cache.name, "Der Schatz vom Luftschloss") self.assertEqual(cache.location, Point("N 49° 57.895' E 008° 12.988'")) self.assertEqual(cache.type, Type.mystery) self.assertEqual(cache.size, Size.large) self.assertEqual(cache.difficulty, 2.5) self.assertEqual(cache.terrain, 1.5) self.assertEqual(cache.author, "engelmz & Punxsutawney Phil") self.assertEqual(cache.hidden, parse_date("23/06/2011")) self.assertDictEqual( cache.attributes, { "bicycles": True, "available": True, "parking": True, "onehour": True, "kids": True, "s-tool": True, }) self.assertEqual(cache.summary, "Gibt es das Luftschloss wirklich?") self.assertIn("Seit dem 16.", cache.description) self.assertEqual(cache.hint, "Das ist nicht nötig") self.assertGreater(cache.favorites, 350) self.assertEqual(len(cache.waypoints), 2) self.assertDictEqual( cache.log_counts, { LogType.found_it: 800, LogType.note: 35, LogType.archive: 1, LogType.needs_archive: 1, LogType.temp_disable_listing: 5, LogType.enable_listing: 4, LogType.publish_listing: 1, LogType.needs_maintenance: 5, LogType.owner_maintenance: 3, LogType.post_reviewer_note: 2, }) with self.subTest("PM-only"): cache = Cache(self.gc, "GC6MKEF", guid="53d34c4d-12b5-4771-86d3-89318f71efb1") with self.recorder.use_cassette('cache_guidload_PMO'): with self.assertRaises(PMOnlyException): cache.load_by_guid() with self.subTest("calls load_quick if no guid"): cache = Cache(self.gc, "GC2WXPN") with self.recorder.use_cassette('cache_guidload_fallback'): with self.assertRaises(Exception): cache.load_by_guid( ) # Raises error since we mocked load_quick() self.assertTrue(mock_load_quick.called)
def visited(self, visited): if _type(visited) is str: visited = parse_date(visited) elif _type(visited) is not datetime.date: raise errors.ValueError( "Passed object is not datetime.date instance nor string containing a date.") self._visited = visited
def visited(self, visited): if isinstance(visited, str): visited = parse_date(visited) elif not isinstance(visited, datetime.date): raise errors.ValueError( "Passed object is not datetime.date instance nor string containing a date." ) self._visited = visited
def releaseDate(self, releaseDate): if releaseDate is not None: if "," in releaseDate: self._releaseDate = parse_date( str(releaseDate.strip().rsplit(', ', 1)[1])) else: self._releaseDate = "" else: self._releaseDate = ""
def test_parse_date(self): dates = datetime.date(2014, 1, 30), datetime.date(2000, 1, 1), datetime.date(2020, 12, 13) patterns = ("%Y-%m-%d", "%Y/%m/%d", "%m/%d/%Y", "%d/%m/%Y", "%d.%m.%Y", "%d/%b/%Y", "%d.%b.%Y", "%b/%d/%Y", "%d %b %y") # generate all possible formats for all dates and test equality for date, pattern in itertools.product(dates, patterns): formatted_date = datetime.datetime.strftime(date, pattern) self.assertEqual(date, parse_date(formatted_date))
def search(self, point, limit=float("inf")): """Return a generator of caches around some point. Search for caches around some point by loading search pages and parsing the data from these pages. Yield :class:`.Cache` objects filled with data from search page. You can provide limit as a convinient way to stop generator after certain number of caches. :param .geo.Point point: Search center point. :param int limit: Maximum number of caches to generate. """ logging.info("Searching at {}".format(point)) start_index = 0 while True: # get one page page = self._search_get_page(point, start_index) if not page: # result is empty - no more caches raise StopIteration() # parse caches in result for start_index, row in enumerate(page.find_all("tr"), start_index): limit -= 1 # handle limit if limit < 0: raise StopIteration() # parse raw data cache_details = row.find("span", "cache-details").text.split("|") wp = cache_details[1].strip() # create and fill cache object c = Cache(self, wp) c.type = Type.from_string(cache_details[0].strip()) c.name = row.find("span", "cache-name").text c.found = row.find("img", title="Found It!") is not None c.favorites = int(row.find(attrs={"data-column": "FavoritePoint"}).text) c.state = not (row.get("class") and "disabled" in row.get("class")) c.pm_only = row.find("td", "pm-upsell") is not None if c.pm_only: # PM only caches doesn't have other attributes filled in yield c continue c.size = Size.from_string(row.find(attrs={"data-column": "ContainerSize"}).text) c.difficulty = float(row.find(attrs={"data-column": "Difficulty"}).text) c.terrain = float(row.find(attrs={"data-column": "Terrain"}).text) c.hidden = parse_date(row.find(attrs={"data-column": "PlaceDate"}).text) c.author = row.find("span", "owner").text[3:] # delete "by " logging.debug("Cache parsed: {}".format(c)) yield c start_index += 1
def _from_print_page(cls, geocaching, guid, soup): """Create a cache instance from a souped print-page and a GUID.""" if soup.find("p", "Warning") is not None: raise errors.PMOnlyException() cache_info = dict() cache_info["guid"] = guid cache_info["wp"] = soup.find( class_="HalfRight").find("h1").text.strip() content = soup.find(id="Content") cache_info["name"] = content.find("h2").text.strip() cache_info["type"] = Type.from_filename( content.h2.img["src"].split("/")[-1].partition(".")[0]) cache_info["author"] = content.find( class_="Meta").text.partition(":")[2].strip() diff_terr = content.find(class_="DiffTerr").find_all("img") assert len(diff_terr) == 2 cache_info["difficulty"] = float(diff_terr[0]["alt"].split()[0]) cache_info["terrain"] = float(diff_terr[1]["alt"].split()[0]) cache_info["size"] = Size.from_string( content.find( class_="Third AlignCenter").p.img["alt"].partition(":")[2]) fav_text = content.find(class_="Third AlignRight").p.contents[2] try: cache_info["favorites"] = int(fav_text) except ValueError: # element not present when 0 favorites cache_info["favorites"] = 0 cache_info["hidden"] = parse_date( content.find(class_="HalfRight AlignRight").p.text.strip(). partition(":")[2].strip()) cache_info["location"] = Point.from_string( content.find(class_="LatLong").text.strip()) cache_info["state"] = None # not on the page attributes = [ img["src"].split("/")[-1].partition(".")[0].rpartition("-") for img in content.find(class_="sortables").find_all("img") if img.get("src") and img["src"].startswith("/images/attributes/") ] cache_info["attributes"] = { attr_name: attr_setting == "yes" for attr_name, _, attr_setting in attributes } if "attribute" in cache_info["attributes"]: # 'blank' attribute del cache_info["attributes"]["attribute"] cache_info["summary"] = content.find( "h2", text="Short Description").find_next("div").text cache_info["description"] = content.find( "h2", text="Long Description").find_next("div").text hint = content.find(id="uxEncryptedHint") cache_info["hint"] = hint.text.strip() if hint else None cache_info["waypoints"] = Waypoint.from_html(content, table_id="Waypoints") cache_info["log_counts"] = Cache._get_log_counts_from_print_page(soup) return Cache(geocaching, **cache_info)
def _from_print_page(cls, geocaching, guid, soup): """Create a cache instance from a souped print-page and a GUID""" if soup.find("p", "Warning") is not None: raise errors.PMOnlyException() cache_info = dict() cache_info['guid'] = guid cache_info['wp'] = soup.find( class_='HalfRight').find('h1').text.strip() content = soup.find(id="Content") cache_info['name'] = content.find("h2").text.strip() cache_info['type'] = Type.from_filename( content.h2.img['src'].split('/')[-1].partition('.')[0]) cache_info['author'] = content.find( class_='Meta').text.partition(':')[2].strip() diff_terr = content.find(class_='DiffTerr').find_all('img') assert len(diff_terr) == 2 cache_info['difficulty'] = float(diff_terr[0]['alt'].split()[0]) cache_info['terrain'] = float(diff_terr[1]['alt'].split()[0]) cache_info['size'] = Size.from_string( content.find( class_='Third AlignCenter').p.img['alt'].partition(':')[2]) fav_text = content.find(class_='Third AlignRight').p.contents[2] try: cache_info['favorites'] = int(fav_text) except ValueError: # element not present when 0 favorites cache_info['favorites'] = 0 cache_info['hidden'] = parse_date( content.find(class_='HalfRight AlignRight').p.text.strip(). partition(':')[2].strip()) cache_info['location'] = Point.from_string( content.find(class_='LatLong').text.strip()) cache_info['state'] = None # not on the page attributes = [ img['src'].split('/')[-1].partition('.')[0].rpartition('-') for img in content.find(class_='sortables').find_all('img') if img.get('src') and img['src'].startswith('/images/attributes/') ] cache_info['attributes'] = { attr_name: attr_setting == 'yes' for attr_name, _, attr_setting in attributes } if 'attribute' in cache_info['attributes']: # 'blank' attribute del cache_info['attributes']['attribute'] cache_info['summary'] = content.find( "h2", text="Short Description").find_next("div").text cache_info['description'] = content.find( "h2", text="Long Description").find_next("div").text hint = content.find(id='uxEncryptedHint') cache_info['hint'] = hint.text.strip() if hint else None cache_info['waypoints'] = Waypoint.from_html(content, table_id="Waypoints") return Cache(geocaching, **cache_info)
def test_load_by_guid(self, mock_load_quick, mock_load): with self.subTest("normal"): cache = Cache(self.gc, "GC2WXPN", guid="5f45114d-1d79-4fdb-93ae-8f49f1d27188") cache.load_by_guid() self.assertEqual(cache.name, "Der Schatz vom Luftschloss") self.assertEqual(cache.location, Point("N 49° 57.895' E 008° 12.988'")) self.assertEqual(cache.type, Type.mystery) self.assertEqual(cache.size, Size.large) self.assertEqual(cache.difficulty, 2.5) self.assertEqual(cache.terrain, 1.5) self.assertEqual(cache.author, "engelmz & Punxsutawney Phil") self.assertEqual(cache.hidden, parse_date("23/06/2011")) self.assertDictEqual( cache.attributes, { "bicycles": True, "available": True, "firstaid": True, "parking": True, "onehour": True, "kids": True, "s-tool": True, }) self.assertEqual(cache.summary, "Gibt es das Luftschloss wirklich?") self.assertIn("Seit dem 16.", cache.description) self.assertEqual(cache.hint, "Das ist nicht nötig") self.assertGreater(cache.favorites, 380) self.assertEqual(len(cache.waypoints), 2) with self.subTest("PM-only"): cache = Cache(self.gc, "GC6MKEF", guid="53d34c4d-12b5-4771-86d3-89318f71efb1") with self.assertRaises(PMOnlyException): cache.load_by_guid() with self.subTest("calls load_quick if no guid"): cache = Cache(self.gc, "GC2WXPN") with self.assertRaises(Exception): cache.load_by_guid( ) # Raises error since we mocked load_quick() self.assertTrue(mock_load_quick.called)
def _from_print_page(cls, geocaching, guid, soup): """Create a cache instance from a souped print-page and a GUID""" if soup.find("p", "Warning") is not None: raise errors.PMOnlyException() cache_info = dict() cache_info['guid'] = guid cache_info['wp'] = soup.find(class_='HalfRight').find('h1').text.strip() content = soup.find(id="Content") cache_info['name'] = content.find("h2").text.strip() cache_info['type'] = Type.from_filename(content.h2.img['src'].split('/')[-1].partition('.')[0]) cache_info['author'] = content.find(class_='Meta').text.partition(':')[2].strip() diff_terr = content.find(class_='DiffTerr').find_all('img') assert len(diff_terr) == 2 cache_info['difficulty'] = float(diff_terr[0]['alt'].split()[0]) cache_info['terrain'] = float(diff_terr[1]['alt'].split()[0]) cache_info['size'] = Size.from_string(content.find(class_='Third AlignCenter').p.img['alt'].partition(':')[2]) fav_text = content.find(class_='Third AlignRight').p.contents[2] try: cache_info['favorites'] = int(fav_text) except ValueError: # element not present when 0 favorites cache_info['favorites'] = 0 cache_info['hidden'] = parse_date( content.find(class_='HalfRight AlignRight').p.text.strip().partition(':')[2].strip()) cache_info['location'] = Point.from_string(content.find(class_='LatLong').text.strip()) cache_info['state'] = None # not on the page attributes = [img['src'].split('/')[-1].partition('.')[0].rpartition('-') for img in content.find(class_='sortables').find_all('img') if img.get('src') and img['src'].startswith('/images/attributes/')] cache_info['attributes'] = {attr_name: attr_setting == 'yes' for attr_name, _, attr_setting in attributes} if 'attribute' in cache_info['attributes']: # 'blank' attribute del cache_info['attributes']['attribute'] cache_info['summary'] = content.find("h2", text="Short Description").find_next("div").text cache_info['description'] = content.find("h2", text="Long Description").find_next("div").text hint = content.find(id='uxEncryptedHint') cache_info['hint'] = hint.text.strip() if hint else None cache_info['waypoints'] = Waypoint.from_html(content, table_id="Waypoints") return Cache(geocaching, **cache_info)
def test_load_by_guid(self, mock_load_quick, mock_load): with self.subTest("normal"): cache = Cache(self.gc, "GC2WXPN", guid="5f45114d-1d79-4fdb-93ae-8f49f1d27188") with self.recorder.use_cassette('cache_guidload_normal'): cache.load_by_guid() self.assertEqual(cache.name, "Der Schatz vom Luftschloss") self.assertEqual(cache.location, Point("N 49° 57.895' E 008° 12.988'")) self.assertEqual(cache.type, Type.mystery) self.assertEqual(cache.size, Size.large) self.assertEqual(cache.difficulty, 2.5) self.assertEqual(cache.terrain, 1.5) self.assertEqual(cache.author, "engelmz & Punxsutawney Phil") self.assertEqual(cache.hidden, parse_date("23/06/2011")) self.assertDictEqual(cache.attributes, { "bicycles": True, "available": True, "parking": True, "onehour": True, "kids": True, "s-tool": True, }) self.assertEqual(cache.summary, "Gibt es das Luftschloss wirklich?") self.assertIn("Seit dem 16.", cache.description) self.assertEqual(cache.hint, "Das ist nicht nötig") self.assertGreater(cache.favorites, 350) self.assertEqual(len(cache.waypoints), 2) with self.subTest("PM-only"): cache = Cache(self.gc, "GC6MKEF", guid="53d34c4d-12b5-4771-86d3-89318f71efb1") with self.recorder.use_cassette('cache_guidload_PMO'): with self.assertRaises(PMOnlyException): cache.load_by_guid() with self.subTest("calls load_quick if no guid"): cache = Cache(self.gc, "GC2WXPN") with self.recorder.use_cassette('cache_guidload_fallback'): with self.assertRaises(Exception): cache.load_by_guid() # Raises error since we mocked load_quick() self.assertTrue(mock_load_quick.called)
def load(self): """Load all possible details about the trackable. .. note:: This method is called automatically when you access a property which isn't yet filled in (so-called "lazy loading"). You don't have to call it explicitly. :raise .LoadError: If trackable loading fails (probably because of not existing cache). """ # pick url based on what info we have right now if hasattr(self, "url"): url = self.url elif hasattr(self, "_tid"): url = "track/details.aspx?tracker={}".format(self._tid) else: raise errors.LoadError("Trackable lacks info for loading") # make request root = self.geocaching._request(url) # parse data self.tid = root.find("span", "CoordInfoCode").text self.name = root.find(id="ctl00_ContentBody_lbHeading").text self.type = root.find(id="ctl00_ContentBody_BugTypeImage").get("alt") bugDetails = root.find(id="ctl00_ContentBody_BugDetails_BugOwner") if bugDetails is not None: self.owner = root.find( id="ctl00_ContentBody_BugDetails_BugOwner").text else: self.owner = "" tbGoal = root.find(id="TrackableGoal") if tbGoal is not None: self.goal = root.find(id="TrackableGoal").text else: self.goal = "" tbDescription = root.find(id="TrackableDetails") if tbDescription is not None: self.description = root.find(id="TrackableDetails").text else: self.description = "" tbKml = root.find(id="ctl00_ContentBody_lnkGoogleKML") if tbKml is not None: self._kml_url = root.find( id="ctl00_ContentBody_lnkGoogleKML").get("href") bugOrigin = root.find(id="ctl00_ContentBody_BugDetails_BugOrigin") if bugOrigin is not None: self.origin = root.find( id="ctl00_ContentBody_BugDetails_BugOrigin").text else: self.origin = "" tbReleaseDate = root.find( id="ctl00_ContentBody_BugDetails_BugReleaseDate") if tbReleaseDate is not None: self.releaseDate = root.find( id="ctl00_ContentBody_BugDetails_BugReleaseDate").text else: self.releaseDate = "" # another Groundspeak trick... inconsistent relative / absolute URL on one page logLink = root.find(id="ctl00_ContentBody_LogLink") if logLink is not None: self._log_page_url = "/track/" + root.find( id="ctl00_ContentBody_LogLink")["href"] location_raw = root.find(id="ctl00_ContentBody_BugDetails_BugLocation") if location_raw is not None: location_url = location_raw.get("href", "") else: location_url = "" if "cache_details" in location_url: self.location = location_url else: if location_raw is not None: self.location = location_raw.text else: self.location = "" # Load logs which have been already loaded by that request into log object lastTBLogsTmp = [] soup = BeautifulSoup(str(root), 'html.parser') # Parse the HTML as a string table = soup.find( "table", {"class": "TrackableItemLogTable Table"}) # Grab log table if table is not None: # handle no logs eg when TB is not active for row in table.find_all('tr'): if "BorderTop" in row["class"]: header = row.find('th') # there should only be one tbLogType = header.img["title"] tbLogDate = parse_date(header.get_text().replace( " ", "").strip()) tbLogOwnerRow = row.find('td') # we need to first tbLogOwner = tbLogOwnerRow.a.get_text().strip() tbLogGUIDRow = row.findAll('td')[2] # we the third one tbLogGUID = tbLogGUIDRow.a["href"].strip().replace( "https://www.geocaching.com/track/log.aspx?LUID=", "") if "BorderBottom" in row["class"]: logRow = row.find('td') # there should only be one tbLogText = logRow.div.get_text().strip() # create and fill log object lastTBLogsTmp.append( Log( uuid=tbLogGUID, type=tbLogType, text=tbLogText, visited=tbLogDate, author=tbLogOwner, )) self.lastTBLogs = lastTBLogsTmp
def load(self): """Load all possible cache details. Use full cache details page. Therefore all possible properties are filled in, but the loading is a bit slow. If you want to load basic details about a PM only cache, the :class:`.PMOnlyException` is still thrown, but avaliable details are filled in. If you know, that the cache you are loading is PM only, please consider using :meth:`load_quick` as it will load the same details, but quicker. .. note:: This method is called automatically when you access a property which isn't yet filled in (so-called "lazy loading"). You don't have to call it explicitly. :raise .PMOnlyException: If cache is PM only and current user is basic member. :raise .LoadError: If cache loading fails (probably because of not existing cache). """ try: # pick url based on what info we have right now if hasattr(self, "url"): root = self.geocaching._request(self.url) elif hasattr(self, "_wp"): root = self.geocaching._request("seek/cache_details.aspx", params={"wp": self._wp}) else: raise errors.LoadError("Cache lacks info for loading") except errors.Error as e: # probably 404 during cache loading - cache not exists raise errors.LoadError("Error in loading cache") from e # check for PM only caches if using free account self.pm_only = root.find("section", "pmo-banner") is not None cache_details = root.find(id="ctl00_divContentMain") if self.pm_only else root.find(id="cacheDetails") # details also avaliable for basic members for PM only caches ----------------------------- if self.pm_only: self.wp = cache_details.find("li", "li__gccode").text.strip() self.name = cache_details.find("h1").text.strip() author = cache_details.find(id="ctl00_ContentBody_uxCacheBy").text self.author = author[len("A cache by "):] # parse cache detail list into a python list details = cache_details.find("ul", "ul__hide-details").text.split("\n") self.difficulty = float(details[2]) self.terrain = float(details[5]) self.size = Size.from_string(details[8]) self.favorites = int(details[11]) else: # parse from <title> - get first word try: self.wp = root.title.string.split(" ")[0] except: raise errors.LoadError self.name = cache_details.find("h2").text self.author = cache_details("a")[1].text size = root.find("div", "CacheSize") D_and_T_img = root.find("div", "CacheStarLabels").find_all("img") size = size.find("img").get("src") # size img src size = size.split("/")[-1].rsplit(".", 1)[0] # filename w/o extension self.size = Size.from_filename(size) self.difficulty, self.terrain = [float(img.get("alt").split()[0]) for img in D_and_T_img] type = cache_details.find("img").get("src") # type img src type = type.split("/")[-1].rsplit(".", 1)[0] # filename w/o extension self.type = Type.from_filename(type) if self.pm_only: raise errors.PMOnlyException() # details not avaliable for basic members for PM only caches ------------------------------ pm_only_warning = root.find("p", "Warning NoBottomSpacing") self.pm_only = pm_only_warning and ("Premium Member Only" in pm_only_warning.text) or False attributes_widget, inventory_widget, *_ = root.find_all("div", "CacheDetailNavigationWidget") hidden = cache_details.find("div", "minorCacheDetails").find_all("div")[1].text self.hidden = parse_date(hidden.split(":")[-1]) self.location = Point.from_string(root.find(id="uxLatLon").text) self.state = root.find("ul", "OldWarning") is None found = root.find("div", "FoundStatus") self.found = found and ("Found It!" or "Attended" in found.text) or False attributes_raw = attributes_widget.find_all("img") attributes_raw = [_.get("src").split("/")[-1].rsplit("-", 1) for _ in attributes_raw] self.attributes = {attribute_name: appendix.startswith("yes") for attribute_name, appendix in attributes_raw if not appendix.startswith("blank")} user_content = root.find_all("div", "UserSuppliedContent") self.summary = user_content[0].text self.description = str(user_content[1]) self.hint = rot13(root.find(id="div_hint").text.strip()) favorites = root.find("span", "favorite-value") self.favorites = 0 if favorites is None else int(favorites.text) self._log_page_url = root.find(id="ctl00_ContentBody_GeoNav_logButton")["href"] js_content = "\n".join(map(lambda i: i.text, root.find_all("script"))) self._logbook_token = re.findall("userToken\\s*=\\s*'([^']+)'", js_content)[0] # find original location if any if "oldLatLng\":" in js_content: old_lat_long = js_content.split("oldLatLng\":")[1].split(']')[0].split('[')[1] self.original_location = Point(old_lat_long) else: self.original_location = None # if there are some trackables if len(inventory_widget.find_all("a")) >= 3: trackable_page_url = inventory_widget.find(id="ctl00_ContentBody_uxTravelBugList_uxViewAllTrackableItems") self._trackable_page_url = trackable_page_url.get("href")[3:] # has "../" on start else: self._trackable_page_url = None logging.debug("Cache loaded: {}".format(self))
def search(self, point, limit=float("inf")): """Return a generator of caches around some point. Search for caches around some point by loading search pages and parsing the data from these pages. Yield :class:`.Cache` objects filled with data from search page. You can provide limit as a convinient way to stop generator after certain number of caches. :param .geo.Point point: Search center point. :param int limit: Maximum number of caches to generate. """ logging.info("Searching at {}".format(point)) start_index = 0 while True: # get one page page = self._search_get_page(point, start_index) if not page: # result is empty - no more caches raise StopIteration() # parse caches in result for start_index, row in enumerate(page.find_all("tr"), start_index): limit -= 1 # handle limit if limit < 0: raise StopIteration() # parse raw data cache_details = row.find("span", "cache-details").text.split("|") wp = cache_details[1].strip() # create and fill cache object c = Cache(self, wp) c.type = Type.from_string(cache_details[0].strip()) c.name = row.find("span", "cache-name").text badge = row.find("svg", class_="badge") c.found = "found" in str(badge) if badge is not None else False c.favorites = int( row.find(attrs={ "data-column": "FavoritePoint" }).text) c.state = not (row.get("class") and "disabled" in row.get("class")) c.pm_only = row.find("td", "pm-upsell") is not None if c.pm_only: # PM only caches doesn't have other attributes filled in yield c continue c.size = Size.from_string( row.find(attrs={ "data-column": "ContainerSize" }).text) c.difficulty = float( row.find(attrs={ "data-column": "Difficulty" }).text) c.terrain = float( row.find(attrs={ "data-column": "Terrain" }).text) c.hidden = parse_date( row.find(attrs={ "data-column": "PlaceDate" }).text) c.author = row.find("span", "owner").text[3:] # delete "by " logging.debug("Cache parsed: {}".format(c)) yield c start_index += 1