def handle_response( cls, response: requests.models.Response) -> Generator[str, None, None]: """Handle a worker response. Read all lines Check if last line is OK Then get the response file And stream its contents Finally delete the response file from the server :param response: The response from the request made. """ worker_id = response.headers.get(cls._WORKER_ID_RESPONSE) current_request_id = response.headers.get(cls._REQUEST_ID) logger.info( f"Worker response {worker_id} (request {current_request_id}) started." ) last_line = None for line in response.iter_lines(): last_line = line last_line = last_line.decode() if last_line == cls._WORKER_RESULT_FAILURE: logger.info( f"Worker response {worker_id} (request {current_request_id}) failed" ) raise requests.exceptions.RequestException( "Worker response failed") elif last_line != cls._WORKER_RESULT_OK: logger.info( f"Worker response {worker_id} (request {current_request_id}) ended prematurely" ) raise requests.exceptions.RequestException( "Worker response ended prematurely") else: logger.info( f"Worker result {worker_id} (request {current_request_id}) OK") try: # Request worker result url = f"{cls._WORKER_API}/{worker_id}" response = requests.get(url=url, stream=True) response.raise_for_status() for line in response.iter_lines(): yield line except Exception as e: logger.error(f"Worker result {worker_id} failed", exc_info=True) raise e finally: # Always try to cleanup worker files (even if an exception has occurred) logger.info( f"Worker result {worker_id} (request {current_request_id}) clear..." ) url = f"{cls._WORKER_API}/end/{worker_id}" response = requests.delete(url=url) response.raise_for_status()
def parse_tsp_csv(response: requests.models.Response) -> OrderedDict: """ Parses a Thrift Savings Plan output CSV file. Function takes in a requests response and returns an OrderedDict with newest closing cost at front of OrderedDict. """ data = OrderedDict() text = response.iter_lines(decode_unicode=True) reader = csv.DictReader(text, dialect='tsp') for row in reader: # Date from TSP looks like "July 30. 2020" # There is indeed a period after the day of month. date = datetime.datetime.strptime(row['Date'], "%b %d. %Y") date = date.replace(hour=16, tzinfo=TIMEZONE) names = [ 'L Income', 'L 2025', 'L 2030', 'L 2035', 'L 2040', 'L 2045', 'L 2050', 'L 2055', 'L 2060', 'L 2065', 'G Fund', 'F Fund', 'C Fund', 'S Fund', 'I Fund' ] data[date] = [ Decimal(row[name]) if row[name] else Decimal() for name in map(str.strip, names) ] return OrderedDict(sorted(data.items(), key=lambda t: t[0], reverse=True))
def iter_events(r: requests.models.Response) -> Event: """Iterates over the source data, one Event at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ event = None r.encoding = r.encoding if r.encoding else "utf-8" for line in r.iter_lines(chunk_size=128, decode_unicode=True): if not line: if event is not None and event.data is not None: yield event event = None elif line.startswith(":"): # Ignore comments pass else: if event is None: event = Event() k, v = line.split(":", maxsplit=1) if ":" in line else (line, "") if hasattr(event, k): event.__setattr__(k, v[1:] if v.startswith(" ") else v)