def api_response(r: requests.Response) -> dict: if r.status_code != 200: r.raise_for_status() xmldoc = parse_xml_string(r.text) err = check_error(xmldoc.documentElement) if err: return err return { "type": "response", "response": get_dictionary_from_children(xmldoc.documentElement) }
def run(self): try: # If we have detected an error, wait a bit before running again if self._error_countdown > 0: self._error_countdown -= 1 return if self._ctx is None: # Log in required - This is all hard-coded logger.debug( "Attempting to login with {} / {} / {}".format( settings.USERNAME, settings.PASSWORD, settings.MODEM_HOST ) ) self._ctx = quick_login( settings.USERNAME, settings.PASSWORD, settings.MODEM_HOST ) # Begin the scrape... for end_point in END_POINTS: result = get_from_url_raw( url="http://{host}{end_point}".format( host=settings.MODEM_HOST, end_point=end_point ), ctx=self._ctx, ) if result.status_code == 200: text = result.text xmldoc = parse_xml_string(text) err = check_error(xmldoc.documentElement) if err: self._ctx = None self._error_countdown = settings.ERROR_COUNTDOWN logger.error("request error: {}".format(err)) break # Everything is OK - store the result self._modem_data.set_data(key=end_point, value=text) except Exception: # Don't blow up or it will break APScheduler logger.error("Error in scraper:\n{}".format(traceback.format_exc())) self._ctx = None self._error_countdown = settings.ERROR_COUNTDOWN finally: self._modem_data.log_contents()