def process_data(self, data): # If this is the first data block, then immediately try # for an insertion using full set of criteria. If this # works then we are done, else we move to next phase of # buffering up content until we find the body element. def html_to_be_inserted(): header = self.transaction.browser_timing_header() if not header: return b'' footer = self.transaction.browser_timing_footer() return six.b(header) + six.b(footer) if not self.response_data: modified = insert_html_snippet(data, html_to_be_inserted) if modified is not None: if self.debug: _logger.debug('RUM insertion from WSGI middleware ' 'triggered on first yielded string from ' 'response. Bytes added was %r.', len(modified) - len(data)) if self.content_length is not None: length = len(modified) - len(data) self.content_length += length return [modified] # Buffer up the data. If we haven't found the start of # the body element, that is all we do. If we have reached # the limit of buffering allowed, then give up and return # the buffered data. if not self.response_data or not verify_body_exists(data): self.response_length += len(data) self.response_data.append(data) if self.response_length >= self.search_maximum: buffered_data = self.response_data self.response_data = [] return buffered_data return # Now join back together any buffered data into a single # string. This makes it easier to process, but there is a # risk that we could temporarily double memory use for # the response content if had small data blocks followed # by very large data block. Expect that the risk of this # occurring is very small. if self.response_data: self.response_data.append(data) data = b''.join(self.response_data) self.response_data = [] # Perform the insertion of the HTML. This should always # succeed as we would only have got here if we had found # the body element, which is the fallback point for # insertion. modified = insert_html_snippet(data, html_to_be_inserted) if modified is not None: if self.debug: _logger.debug('RUM insertion from WSGI middleware ' 'triggered on subsequent string yielded from ' 'response. Bytes added was %r.', len(modified) - len(data)) if self.content_length is not None: length = len(modified) - len(data) self.content_length += length return [modified] # Something went very wrong as we should never get here. return [data]
async def send_inject_browser_agent(self, message): if self.pass_through: return await self.send(message) # Store messages in case of an abort self.messages.append(message) message_type = message["type"] if message_type == "http.response.start" and not self.initial_message: headers = list(message.get("headers", ())) if not self.should_insert_html(headers): await self.abort() return message["headers"] = headers self.initial_message = message elif message_type == "http.response.body" and self.initial_message: body = message.get("body", b"") self.more_body = message.get("more_body", False) # Add this message to the current body self.body += body # if there's a valid body string, attempt to insert the HTML if verify_body_exists(self.body): header = self.transaction.browser_timing_header() if not header: # If there's no header, abort browser monitoring injection await self.send_buffered() return footer = self.transaction.browser_timing_footer() browser_agent_data = six.b(header) + six.b(footer) body = insert_html_snippet(self.body, lambda: browser_agent_data, self.search_maximum) # If we have inserted the browser agent if len(body) != len(self.body): # check to see if we have to modify the content-length # header headers = self.initial_message["headers"] for header_index, header_data in enumerate(headers): header_name, header_value = header_data if header_name.lower() == b"content-length": break else: header_value = None try: content_length = int(header_value) except ValueError: # Invalid content length results in an abort await self.send_buffered() return if content_length is not None: delta = len(body) - len(self.body) headers[header_index] = ( b"content-length", str(content_length + delta).encode("utf-8"), ) # Body is found and modified so we can now send the # modified data and stop searching self.body = body await self.send_buffered() return # 1. Body is found but not modified # 2. Body is not found # No more body if not self.more_body: await self.send_buffered() # We have hit our search limit elif len(self.body) >= self.search_maximum: await self.send_buffered() # Protocol error, unexpected message: abort else: await self.abort()