def test_detects_cylces(self):
     r_1 = request_graph.Request(100, 200)
     r_2 = request_graph.Request(200, 300, {r_1})
     r_1.happens_after = frozenset({r_2})
     graph = request_graph.RequestGraph({r_1, r_2})
     with self.assertRaises(simulation.GraphHasCyclesError):
         simulation.total_time_for_request_graph(graph, self.net_model)
  def test_total_request_bytes(self):
    r_1 = request_graph.Request(1, 2)
    r_2 = request_graph.Request(3, 4, {r_1})
    r_3 = request_graph.Request(5, 6, {r_1})
    r_4 = request_graph.Request(7, 8, {r_1, r_2})
    graph = request_graph.RequestGraph({r_1, r_2, r_3, r_4})

    self.assertEqual(graph.total_request_bytes(), 1 + 3 + 5 + 7)
    self.assertEqual(graph.total_response_bytes(), 2 + 4 + 6 + 8)
  def test_graph_has_independent_requests_not_independent(self):
    r_1 = request_graph.Request(1, 2)
    r_2 = request_graph.Request(3, 4, {r_1})
    r_3 = request_graph.Request(5, 6)
    graph = request_graph.RequestGraph({r_1, r_2, r_3})

    self.assertFalse(
        request_graph.graph_has_independent_requests(graph, [(3, 4), (1, 2),
                                                             (5, 6)]))
  def test_requests_that_can_run(self):
    r_1 = request_graph.Request(1, 2)
    r_2 = request_graph.Request(2, 3, {r_1})
    r_3 = request_graph.Request(4, 5, {r_1})
    r_4 = request_graph.Request(6, 7, {r_1, r_2})
    graph = request_graph.RequestGraph({r_1, r_2, r_3, r_4})

    self.assertEqual(graph.requests_that_can_run({}), {r_1})
    self.assertEqual(graph.requests_that_can_run({r_1}), {r_2, r_3})
    self.assertEqual(graph.requests_that_can_run({r_1, r_2, r_3}), {r_4})
Exemple #5
0
    def setUp(self):
        self.net_model = simulation.NetworkModel(name="NetModel",
                                                 rtt=50,
                                                 bandwidth_up=100,
                                                 bandwidth_down=200,
                                                 category="New",
                                                 weight=1)

        self.graph_1 = request_graph.RequestGraph({
            request_graph.Request(1000, 1000),
        })
        self.mock_pfe_method = MockPfeMethod()
        self.mock_pfe_session = MockPfeSession()
        self.mock_pfe_method.name = mock.MagicMock(return_value="Mock_PFE_1")
        self.mock_pfe_method.start_session = mock.MagicMock(
            return_value=self.mock_pfe_session)
        self.mock_pfe_session.page_view = mock.MagicMock(
            side_effect=mock_pfe_session_page_view)
        self.mock_pfe_session.get_request_graphs = mock.MagicMock(
            return_value=[self.graph_1])

        graph_2 = request_graph.RequestGraph({
            request_graph.Request(1000, 1000),
        })
        self.mock_pfe_method_2 = MockPfeMethod()
        self.mock_pfe_session_2 = MockPfeSession()
        self.mock_pfe_method_2.name = mock.MagicMock(return_value="Mock_PFE_2")
        self.mock_pfe_method_2.start_session = mock.MagicMock(
            return_value=self.mock_pfe_session_2)
        self.mock_pfe_session_2.page_view = mock.MagicMock()
        self.mock_pfe_session_2.get_request_graphs = mock.MagicMock(
            return_value=[graph_2] * 2)

        self.mock_logged_pfe_method = MockPfeMethod()
        self.mock_logged_pfe_session = MockLoggedPfeSession()
        self.mock_logged_pfe_method.name = mock.MagicMock(
            return_value="Logged_PFE")
        self.mock_logged_pfe_method.start_session = mock.MagicMock(
            return_value=self.mock_logged_pfe_session)
        self.mock_logged_pfe_session.page_view_proto = mock.MagicMock()
        self.mock_logged_pfe_session.get_request_graphs = mock.MagicMock(
            return_value=[self.graph_1])

        self.page_view_sequence = sequence([
            {
                "roboto": [1, 2, 3],
                "open_sans": [4, 5, 6]
            },
            {
                "roboto": [7, 8, 9]
            },
            {
                "open_sans": [10, 11, 12]
            },
        ])
    def test_total_time_for_request_graph(self):
        r_1 = request_graph.Request(100, 200)
        r_2 = request_graph.Request(200, 300)
        r_3 = request_graph.Request(300, 400, {r_2})
        r_4 = request_graph.Request(400, 500, {r_1, r_2})
        r_5 = request_graph.Request(500, 600, {r_3, r_4})
        graph = request_graph.RequestGraph({r_1, r_2, r_3, r_4, r_5})

        self.assertEqual(
            simulation.total_time_for_request_graph(graph, self.net_model),
            175)
  def test_all_requests_completed(self):
    r_1 = request_graph.Request(1, 2)
    r_2 = request_graph.Request(2, 3, {r_1})
    r_3 = request_graph.Request(4, 5, {r_1})
    r_4 = request_graph.Request(6, 7, {r_1, r_2})
    r_5 = request_graph.Request(6, 7, {r_3})
    graph = request_graph.RequestGraph({r_1, r_2, r_3, r_4})

    self.assertFalse(graph.all_requests_completed({}))
    self.assertFalse(graph.all_requests_completed({r_1, r_2, r_3}))
    self.assertTrue(graph.all_requests_completed({r_1, r_2, r_3, r_4}))
    self.assertTrue(graph.all_requests_completed({r_1, r_2, r_3, r_4, r_5}))
    def page_view_for_font(self, font_id, codepoints):
        """Processes a page for for a single font.

    Returns the set of requests needed to load all unicode range subsets for
    the given codepoints.
    """
        font_bytes = self.font_loader.load_font(font_id)

        strategy_name, strategy = slicing_strategy_for_font(
            font_id, font_bytes)

        subset_sizes = {
            "%s:%s:%s" % (font_id, strategy_name, index):
            self.subset_sizer.subset_size(
                "%s:%s:%s" % (font_id, strategy_name, index), subset,
                font_bytes)
            for index, subset in enumerate(strategy)
            if subset.intersection(codepoints)
        }

        # Unicode range requests can happen in parallel, so there's
        # no deps between individual requests.
        requests = {
            request_graph.Request(
                network_models.ESTIMATED_HTTP_REQUEST_HEADER_SIZE,
                network_models.ESTIMATED_HTTP_RESPONSE_HEADER_SIZE + size)
            for key, size in subset_sizes.items()
            if key not in self.already_loaded_subsets
        }

        self.already_loaded_subsets.update(subset_sizes.keys())
        return requests
  def test_can_run(self):
    r_1 = request_graph.Request(1, 2)
    r_2 = request_graph.Request(2, 3, {r_1})
    r_3 = request_graph.Request(2, 3, {r_1, r_2})

    self.assertTrue(r_1.can_run({}))
    self.assertTrue(r_1.can_run({r_2}))

    self.assertFalse(r_2.can_run({}))
    self.assertFalse(r_2.can_run({r_3}))
    self.assertTrue(r_2.can_run({r_1}))

    self.assertFalse(r_3.can_run({}))
    self.assertFalse(r_3.can_run({r_2}))
    self.assertFalse(r_3.can_run({r_1}))
    self.assertTrue(r_3.can_run({r_1, r_2}))
    def get_request_graphs(self):
        """Returns a graph of requests that would have resulted from the page views.

    Returns a list of request graphs, one per page view.
    """
        request = request_graph.Request(1000, 1000)
        return [request_graph.RequestGraph({request})] * self.page_view_count
def to_request_graph(records):
  """Convert a list of records into a request graph.

  In this graph each request depends on the previous request.
  """
  result = set()
  last_request = None
  for record in records:
    request = request_graph.Request(
        network_models.ESTIMATED_HTTP_REQUEST_HEADER_SIZE + record.request_size,
        network_models.ESTIMATED_HTTP_RESPONSE_HEADER_SIZE +
        record.response_size,
        happens_after=last_request)
    last_request = request
    result.add(request)
  return result
    def get_request_graphs(self):
        """Get a list of request graphs, one per page view."""
        request_graphs = [
            request_graph.RequestGraph(set())
            for i in range(self.page_view_count - 1)
        ]

        requests = set()
        for font_id, codepoints in self.codepoints_by_font.items():
            font_bytes = self.font_loader.load_font(font_id)
            size = self.subset_sizer.subset_size(
                "%s:%s" % (font_id, len(codepoints)), codepoints, font_bytes)
            if size:
                requests.add(request_graph.Request(0, size))

        request_graphs.insert(0, request_graph.RequestGraph(requests))
        return request_graphs
    def page_view_proto(self, page_view):
        """Processes a page view."""
        requests = set()

        for content in page_view.contents:
            previous_request = None
            for logged_request in content.logged_requests:
                happens_after = None if previous_request is None else {
                    previous_request
                }
                next_request = request_graph.Request(
                    logged_request.request_size,
                    logged_request.response_size,
                    happens_after=happens_after)
                requests.add(next_request)
                previous_request = next_request

        self.request_graphs.append(request_graph.RequestGraph(requests))
Exemple #14
0
  def page_view_for_font(self, font_id, codepoints):
    """Processes a page for for a single font."""
    font_bytes = self.font_loader.load_font(font_id)

    existing_codepoints = self.codepoints_by_font.get(font_id, set())
    existing_codepoints.update(codepoints)
    self.codepoints_by_font[font_id] = existing_codepoints

    size = self.subset_sizer.subset_size(
        "%s:%s" % (font_id, len(existing_codepoints)), existing_codepoints,
        font_bytes)

    delta = size - self.subset_size_by_font.get(font_id, 0)
    self.subset_size_by_font[font_id] = size

    if delta > 0:
      return {request_graph.Request(0, delta)}

    return set()
Exemple #15
0
  def page_view(self, usage_by_font):
    """Processes a page view.

    For each font referenced in the page view record a request to
    load it if it has not been encountered yet.
    """
    requests = set()
    for font_id, usage in usage_by_font.items():
      if font_id in self.loaded_fonts or not usage or not usage.codepoints:
        continue

      self.loaded_fonts.add(font_id)
      requests.add(
          request_graph.Request(
              network_models.ESTIMATED_HTTP_REQUEST_HEADER_SIZE,
              network_models.ESTIMATED_HTTP_RESPONSE_HEADER_SIZE +
              self.get_font_size(font_id)))

    graph = request_graph.RequestGraph(set())
    if requests:
      graph = request_graph.RequestGraph(requests)
    self.request_graphs.append(graph)
    def page_view(self, usage_by_font):
        requests = set()
        base_requests = dict()
        necessary_glyphs = defaultdict(set)
        for font_id, usage in usage_by_font.items():
            if font_id not in GLYPH_DATA_CACHE:
                GLYPH_DATA_CACHE[font_id] = self.compute_glyph_data(font_id)
            font_data, glyph_data = GLYPH_DATA_CACHE[font_id]

            needs_base_request = font_id not in self.loaded_glyphs
            glyphs = codepoints_to_glyphs(self.font_loader.load_font(font_id),
                                          usage.codepoints)
            present_glyphs = self.loaded_glyphs[font_id]
            glyphs_to_download = set(
                [glyph for glyph in glyphs if glyph not in present_glyphs])

            if len(glyphs_to_download) == 0:
                continue

            self.loaded_glyphs[font_id].update(glyphs_to_download)

            necessary_glyph_ranges, unnecessary_glyph_ranges = self.compute_range_parallel_arrays(
                [len(data) for data in glyph_data], glyphs_to_download)
            extra_glyphs_to_download = self.coalesce_runs(
                necessary_glyph_ranges, unnecessary_glyph_ranges)
            self.loaded_glyphs[font_id].update(extra_glyphs_to_download)

            # FIXME: Figure out if it's cheaper to just download the base and all the necessary glyphs in a single range request
            # This will improve performance on small fonts.

            starting_index = 0
            if needs_base_request:
                payload_start, payload_end, extra_start, extra_end, starting_index = self.compute_initial_state(
                    necessary_glyph_ranges, unnecessary_glyph_ranges)

                # Assume the font has been optimized correctly, and glyph data is placed at the end
                base_size = len(font_data) - sum(
                    [len(data) for data in glyph_data])
                payload = font_data[:base_size] + b"".join(
                    glyph_data[payload_start:payload_end])

                compressed_payload = zlib.compress(payload)
                self.loaded_glyphs[font_id].update(
                    range(extra_start, extra_end))
                base_request = request_graph.Request(
                    network_models.ESTIMATED_HTTP_REQUEST_HEADER_SIZE,
                    network_models.ESTIMATED_HTTP_RESPONSE_HEADER_SIZE +
                    len(compressed_payload))
                requests.add(base_request)

            happens_after = None
            if needs_base_request:
                happens_after = {base_request}
            for i in range(starting_index, len(necessary_glyph_ranges)):
                if necessary_glyph_ranges[i].byte_length == 0:
                    continue
                payload = b"".join(
                    glyph_data[necessary_glyph_ranges[i].begin_glyph:
                               necessary_glyph_ranges[i].end_glyph])
                compressed_payload = zlib.compress(payload)
                request = request_graph.Request(
                    network_models.ESTIMATED_HTTP_REQUEST_HEADER_SIZE,
                    network_models.ESTIMATED_HTTP_RESPONSE_HEADER_SIZE +
                    len(compressed_payload),
                    happens_after=happens_after)
                requests.add(request)

        graph = request_graph.RequestGraph(requests)
        self.request_graphs.append(graph)