def test_post_log(self, mock_request, mock_load_log_page): # mock _load_log_page valid_log_types = { # intentionally missing "found it" to test invalid log type "4", # write note "3", # didn't find it } mock_load_log_page.return_value = (valid_log_types, {}) test_log_text = "Test log." with self.subTest("empty log text"): l = Log(text="", visited=date.today(), type=LogType.note) with self.assertRaises(PycachingValueError): self.c.post_log(l) with self.subTest("invalid log type"): l = Log(text=test_log_text, visited=date.today(), type=LogType.found_it) with self.assertRaises(PycachingValueError): self.c.post_log(l) with self.subTest("valid log"): l = Log(text=test_log_text, visited=date.today(), type=LogType.didnt_find_it) self.c.post_log(l) # test call to _request mock expected_post_data = { "LogTypeId": "3", # DNF - see valid_log_types "LogDate": date.today().strftime("%Y-%m-%d"), "LogText": test_log_text, } mock_request.assert_called_with(self.c._get_log_page_url(), method="POST", data=expected_post_data)
def test_post_log(self, mock_request, mock_load_log_page): # mock _load_log_page valid_log_types = { # intentionally missing "grabbed it" to test invalid log type "4", # write note "48", # discovered it } mock_load_log_page.return_value = (valid_log_types, {}, "mm/dd/YYYY") test_log_text = "Test log." test_log_date = date.today() test_tracking_code = "ABCDEF" with self.subTest("empty log text"): l = Log(text="", visited=test_log_date, type=LogType.note) with self.assertRaises(PycachingValueError): self.t.post_log(l, test_tracking_code) with self.subTest("invalid log type"): l = Log(text=test_log_text, visited=test_log_date, type=LogType.grabbed_it) with self.assertRaises(PycachingValueError): self.t.post_log(l, test_tracking_code) with self.subTest("valid log"): l = Log(text=test_log_text, visited=test_log_date, type=LogType.discovered_it) self.t.post_log(l, test_tracking_code) # test call to _request mock expected_post_data = { "ctl00$ContentBody$LogBookPanel1$btnSubmitLog": "Submit Log Entry", "ctl00$ContentBody$LogBookPanel1$ddLogType": "48", # discovered it - see valid_log_types "ctl00$ContentBody$LogBookPanel1$uxDateVisited": test_log_date.strftime("%m/%d/%Y"), "ctl00$ContentBody$LogBookPanel1$tbCode": test_tracking_code, "ctl00$ContentBody$LogBookPanel1$uxLogInfo": test_log_text, } mock_request.assert_called_with(self.t._log_page_url, method="POST", data=expected_post_data)
def test_post_log(self, mock_request, mock_load_log_page): # mock _load_log_page valid_log_types = { # intentionally missing "found it" to test invalid log type "4", # write note "3", # didn't find it } mock_load_log_page.return_value = (valid_log_types, {}, "mm/dd/YYYY") test_log_text = "Test log." with self.subTest("empty log text"): l = Log(text="", visited=date.today(), type=LogType.note) with self.assertRaises(PycachingValueError): self.c.post_log(l) with self.subTest("invalid log type"): l = Log(text=test_log_text, visited=date.today(), type=LogType.found_it) with self.assertRaises(PycachingValueError): self.c.post_log(l) with self.subTest("valid log"): l = Log(text=test_log_text, visited=date.today(), type=LogType.didnt_find_it) self.c.post_log(l) # test call to _request mock expected_post_data = { "ctl00$ContentBody$LogBookPanel1$btnSubmitLog": "Submit Log Entry", "ctl00$ContentBody$LogBookPanel1$ddLogType": "3", # DNF - see valid_log_types "ctl00$ContentBody$LogBookPanel1$uxDateVisited": date.today().strftime("%m/%d/%Y"), "ctl00$ContentBody$LogBookPanel1$uxLogInfo": test_log_text, } mock_request.assert_called_with(self.c._log_page_url, method="POST", data=expected_post_data)
def load_logbook(self, limit=float("inf")): """Return a generator of logs for this cache. Yield instances of :class:`.Log` filled with log data. :param int limit: Maximum number of logs to generate. """ logging.info("Loading logbook for {}...".format(self)) page = 0 per_page = min(limit, 100) # max number to fetch in one request is 100 items while True: # get one page logbook_page = self._logbook_get_page(page, per_page) page += 1 if not logbook_page: # result is empty - no more logs raise StopIteration() for log_data in logbook_page: limit -= 1 # handle limit if limit < 0: raise StopIteration() # create and fill log object l = Log() l.type = log_data["LogType"] l.text = log_data["LogText"] l.visited = log_data["Visited"] l.author = log_data["UserName"] yield l
def post_log(self, wp, text, type=LogType.found_it, date=None): """Post a log for cache. :param str wp: Cache waypoint. :param str text: Log text. :param .log.Type type: Type of log. :param datetime.date date: Log date. If set to :code:`None`, :meth:`datetime.date.today` is used instead. """ if not date: date = datetime.date.today() l = Log(type=type, text=text, visited=date) self.get_cache(wp).post_log(l)
def load_logbook(self, limit=float("inf")): """Return a generator of logs for this cache. Yield instances of :class:`.Log` filled with log data. :param int limit: Maximum number of logs to generate. """ logging.info("Loading logbook for {}...".format(self)) page = 0 per_page = min(limit, 100) # max number to fetch in one request is 100 items while True: # get one page logbook_page = self._logbook_get_page(page, per_page) page += 1 if not logbook_page: # result is empty - no more logs return for log_data in logbook_page: limit -= 1 # handle limit if limit < 0: return img_filename = log_data["LogTypeImage"].rsplit( ".", 1)[0] # filename w/o extension # create and fill log object yield Log(uuid=log_data['LogGuid'], type=LogType.from_filename(img_filename), text=log_data["LogText"], visited=log_data["Visited"], author=log_data["UserName"])
def load(self): """Load all possible cache details. Use full cache details page. Therefore all possible properties are filled in, but the loading is a bit slow. If you want to load basic details about a PM only cache, the :class:`.PMOnlyException` is still thrown, but avaliable details are filled in. If you know, that the cache you are loading is PM only, please consider using :meth:`load_quick` as it will load the same details, but quicker. .. note:: This method is called automatically when you access a property which isn't yet filled in (so-called "lazy loading"). You don't have to call it explicitly. :raise .PMOnlyException: If cache is PM only and current user is basic member. :raise .LoadError: If cache loading fails (probably because of not existing cache). """ try: # pick url based on what info we have right now if hasattr(self, "url"): root = self.geocaching._request(self.url) elif hasattr(self, "_wp"): root = self.geocaching._request(self._urls["cache_details"], params={"wp": self._wp}) else: raise errors.LoadError("Cache lacks info for loading") except errors.Error as e: # probably 404 during cache loading - cache not exists raise errors.LoadError("Error in loading cache") from e # check for PM only caches if using free account self.pm_only = root.find("section", "pmo-banner") is not None cache_details = root.find( id="ctl00_divContentMain") if self.pm_only else root.find( id="cacheDetails") # details also avaliable for basic members for PM only caches ----------------------------- if self.pm_only: self.wp = cache_details.find("li", "li__gccode").text.strip() self.name = cache_details.find("h1").text.strip() type = cache_details.find("img").get( "src") # "/play/Content/images/cache-types/3.png" type = type.split("/")[-1].rsplit(".", 1)[0] # "3" self.type = Type.from_filename(type) author = cache_details.find(id="ctl00_ContentBody_uxCacheBy").text self.author = author[len("A cache by "):] # parse cache detail list into a python list details = cache_details.find("ul", "ul__hide-details").text.split("\n") self.difficulty = float(details[2]) self.terrain = float(details[5]) self.size = Size.from_string(details[8]) self.favorites = int(details[11]) else: # parse from <title> - get first word try: self.wp = root.title.string.split(" ")[0] except: raise errors.LoadError() self.name = cache_details.find("h2").text type = cache_details.select_one("svg.cache-icon use").get( "xlink:href") # "cache-types.svg#icon-3-disabled" type = type.split("#")[-1].replace("_", "-").split("-")[1] # "3" self.type = Type.from_filename(type) self.author = cache_details("a")[1].text D_and_T_img = root.find("div", "CacheStarLabels").find_all("img") self.difficulty, self.terrain = [ float(img.get("alt").split()[0]) for img in D_and_T_img ] size = root.find("div", "CacheSize") size = size.find("img").get("src") # size img src size = size.split("/")[-1].rsplit(".", 1)[0] # filename w/o extension self.size = Size.from_filename(size) if self.pm_only: raise errors.PMOnlyException() # details not avaliable for basic members for PM only caches ------------------------------ pm_only_warning = root.find("p", "Warning NoBottomSpacing") self.pm_only = pm_only_warning and ("Premium Member Only" in pm_only_warning.text) or False attributes_widget, inventory_widget, *_ = root.find_all( "div", "CacheDetailNavigationWidget") hidden = cache_details.find( "div", "minorCacheDetails").find_all("div")[1].text self.hidden = parse_date(hidden.split(":")[-1]) self.location = Point.from_string(root.find(id="uxLatLon").text) self.state = root.find("ul", "OldWarning") is None log_image = root.find(id="ctl00_ContentBody_GeoNav_logTypeImage") if log_image: log_image_filename = log_image.get("src").split("/")[-1].rsplit( ".", 1)[0] # filename w/o extension self._found_status = Log( type=LogType.from_filename(log_image_filename)) else: self._found_status = None attributes_raw = attributes_widget.find_all("img") attributes_raw = [ _.get("src").split("/")[-1].rsplit("-", 1) for _ in attributes_raw ] self.attributes = { attribute_name: appendix.startswith("yes") for attribute_name, appendix in attributes_raw if not appendix.startswith("blank") } self.summary = root.find(id="ctl00_ContentBody_ShortDescription").text self.description = root.find( id="ctl00_ContentBody_LongDescription").text self.hint = rot13(root.find(id="div_hint").text.strip()) favorites = root.find("span", "favorite-value") if favorites: self.favorites = int(favorites.text) else: self.favorites = 0 js_content = "\n".join(map(lambda i: i.text, root.find_all("script"))) self._logbook_token = re.findall("userToken\\s*=\\s*'([^']+)'", js_content)[0] # find original location if any if "oldLatLng\":" in js_content: old_lat_long = js_content.split("oldLatLng\":")[1].split( ']')[0].split('[')[1] self.original_location = Point(old_lat_long) else: self.original_location = None # if there are some trackables if len(inventory_widget.find_all("a")) >= 3: trackable_page_url = inventory_widget.find( id="ctl00_ContentBody_uxTravelBugList_uxViewAllTrackableItems") self._trackable_page_url = trackable_page_url.get("href")[ 3:] # has "../" on start else: self._trackable_page_url = None # Additional Waypoints self.waypoints = Waypoint.from_html(root, "ctl00_ContentBody_Waypoints") logging.debug("Cache loaded: {}".format(self))
def found(self, found_status): if bool(found_status): # TODO set found_it / attended based on cache type self._found_status = Log(type=LogType.found_it) else: self._found_status = None
def setUp(self): self.log = Log(type=Type.found_it, text="text", visited="2012-02-02", author="human")
def load(self): """Load all possible details about the trackable. .. note:: This method is called automatically when you access a property which isn't yet filled in (so-called "lazy loading"). You don't have to call it explicitly. :raise .LoadError: If trackable loading fails (probably because of not existing cache). """ # pick url based on what info we have right now if hasattr(self, "url"): url = self.url elif hasattr(self, "_tid"): url = "track/details.aspx?tracker={}".format(self._tid) else: raise errors.LoadError("Trackable lacks info for loading") # make request root = self.geocaching._request(url) # parse data self.tid = root.find("span", "CoordInfoCode").text self.name = root.find(id="ctl00_ContentBody_lbHeading").text self.type = root.find(id="ctl00_ContentBody_BugTypeImage").get("alt") bugDetails = root.find(id="ctl00_ContentBody_BugDetails_BugOwner") if bugDetails is not None: self.owner = root.find( id="ctl00_ContentBody_BugDetails_BugOwner").text else: self.owner = "" tbGoal = root.find(id="TrackableGoal") if tbGoal is not None: self.goal = root.find(id="TrackableGoal").text else: self.goal = "" tbDescription = root.find(id="TrackableDetails") if tbDescription is not None: self.description = root.find(id="TrackableDetails").text else: self.description = "" tbKml = root.find(id="ctl00_ContentBody_lnkGoogleKML") if tbKml is not None: self._kml_url = root.find( id="ctl00_ContentBody_lnkGoogleKML").get("href") bugOrigin = root.find(id="ctl00_ContentBody_BugDetails_BugOrigin") if bugOrigin is not None: self.origin = root.find( id="ctl00_ContentBody_BugDetails_BugOrigin").text else: self.origin = "" tbReleaseDate = root.find( id="ctl00_ContentBody_BugDetails_BugReleaseDate") if tbReleaseDate is not None: self.releaseDate = root.find( id="ctl00_ContentBody_BugDetails_BugReleaseDate").text else: self.releaseDate = "" # another Groundspeak trick... inconsistent relative / absolute URL on one page logLink = root.find(id="ctl00_ContentBody_LogLink") if logLink is not None: self._log_page_url = "/track/" + root.find( id="ctl00_ContentBody_LogLink")["href"] location_raw = root.find(id="ctl00_ContentBody_BugDetails_BugLocation") if location_raw is not None: location_url = location_raw.get("href", "") else: location_url = "" if "cache_details" in location_url: self.location = location_url else: if location_raw is not None: self.location = location_raw.text else: self.location = "" # Load logs which have been already loaded by that request into log object lastTBLogsTmp = [] soup = BeautifulSoup(str(root), 'html.parser') # Parse the HTML as a string table = soup.find( "table", {"class": "TrackableItemLogTable Table"}) # Grab log table if table is not None: # handle no logs eg when TB is not active for row in table.find_all('tr'): if "BorderTop" in row["class"]: header = row.find('th') # there should only be one tbLogType = header.img["title"] tbLogDate = parse_date(header.get_text().replace( " ", "").strip()) tbLogOwnerRow = row.find('td') # we need to first tbLogOwner = tbLogOwnerRow.a.get_text().strip() tbLogGUIDRow = row.findAll('td')[2] # we the third one tbLogGUID = tbLogGUIDRow.a["href"].strip().replace( "https://www.geocaching.com/track/log.aspx?LUID=", "") if "BorderBottom" in row["class"]: logRow = row.find('td') # there should only be one tbLogText = logRow.div.get_text().strip() # create and fill log object lastTBLogsTmp.append( Log( uuid=tbLogGUID, type=tbLogType, text=tbLogText, visited=tbLogDate, author=tbLogOwner, )) self.lastTBLogs = lastTBLogsTmp
def setUp(self): self.l = Log(type="found it", text="text", visited="2012-02-02", author="human")