def test_TimingSplit(self):
        # Timing adds node 1 as a parent to 2 but not 3.
        requests = [
            self.MakeParserRequest(0,
                                   'null',
                                   100,
                                   110,
                                   magic_content_type=True),
            self.MakeParserRequest(1, 0, 115, 120, magic_content_type=True),
            self.MakeParserRequest(2, 0, 121, 122, magic_content_type=True),
            self.MakeParserRequest(3, 0, 112, 119),
            self.MakeParserRequest(4, 2, 122, 126),
            self.MakeParserRequest(5, 2, 122, 126)
        ]
        graph = loading_model.ResourceGraph(requests)
        self.assertEqual(self.SuccessorIndicies(graph._nodes[0]), [1, 3])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[1]), [2])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[2]), [4, 5])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[3]), [])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[4]), [])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[5]), [])
        self.assertEqual(self.SortedIndicies(graph), [0, 1, 3, 2, 4, 5])

        # Change node 1 so it is a parent of 3, which become parent of 2.
        requests[1] = self.MakeParserRequest(1,
                                             0,
                                             110,
                                             111,
                                             magic_content_type=True)
        graph = loading_model.ResourceGraph(requests)
        self.assertEqual(self.SuccessorIndicies(graph._nodes[0]), [1])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[1]), [3])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[2]), [4, 5])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[3]), [2])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[4]), [])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[5]), [])
        self.assertEqual(self.SortedIndicies(graph), [0, 1, 3, 2, 4, 5])

        # Add an initiator dependence to 1 that will become the parent of 3.
        requests[1] = self.MakeParserRequest(1, 0, 110, 111)
        requests.append(self.MakeParserRequest(6, 1, 111, 112))
        graph = loading_model.ResourceGraph(requests)
        # Check it doesn't change until we change the content type of 1.
        self.assertEqual(self.SuccessorIndicies(graph._nodes[1]), [3, 6])
        requests[1] = self.MakeParserRequest(1,
                                             0,
                                             110,
                                             111,
                                             magic_content_type=True)
        graph = loading_model.ResourceGraph(requests)
        self.assertEqual(self.SuccessorIndicies(graph._nodes[0]), [1])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[1]), [6])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[2]), [4, 5])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[3]), [2])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[4]), [])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[5]), [])
        self.assertEqual(self.SuccessorIndicies(graph._nodes[6]), [3])
        self.assertEqual(self.SortedIndicies(graph), [0, 1, 6, 3, 2, 4, 5])
def _FullFetch(url, json_output, prefetch):
    """Do a full fetch with optional prefetching."""
    if not url.startswith('http') and not url.startswith('file'):
        url = 'http://' + url
    logging.warning('Cold fetch')
    cold_data = _LogRequests(url)
    assert cold_data, 'Cold fetch failed to produce data. Check your phone.'
    if prefetch:
        assert not OPTIONS.local
        logging.warning('Generating prefetch')
        prefetch_html = _GetPrefetchHtml(
            loading_model.ResourceGraph(cold_data), name=url)
        tmp = tempfile.NamedTemporaryFile()
        tmp.write(prefetch_html)
        tmp.flush()
        # We hope that the tmpfile name is unique enough for the device.
        target = os.path.join('/sdcard/Download', os.path.basename(tmp.name))
        device = device_setup.GetFirstDevice()
        device.adb.Push(tmp.name, target)
        logging.warning('Pushed prefetch %s to device at %s' %
                        (tmp.name, target))
        _LoadPage(device, 'file://' + target)
        time.sleep(OPTIONS.prefetch_delay_seconds)
        logging.warning('Warm fetch')
        warm_data = _LogRequests(url, clear_cache_override=False)
        with open(json_output, 'w') as f:
            _WriteJson(f, warm_data)
        logging.warning('Wrote ' + json_output)
        with open(json_output + '.cold', 'w') as f:
            _WriteJson(f, cold_data)
        logging.warning('Wrote ' + json_output + '.cold')
    else:
        with open(json_output, 'w') as f:
            _WriteJson(f, cold_data)
        logging.warning('Wrote ' + json_output)
 def test_EndToEnd(self):
   # Test that we don't crash. This also runs through frame_load_lens.
   tmp = tempfile.NamedTemporaryFile()
   with gzip.GzipFile(self._ROLLING_STONE) as f:
     trace = loading_trace.LoadingTrace.FromJsonDict(json.load(f))
     frame_lens = frame_load_lens.FrameLoadLens(trace)
     graph = loading_model.ResourceGraph(trace=trace, frame_lens=frame_lens)
     visualization = model_graph.GraphVisualization(graph)
     visualization.OutputDot(tmp)
def _ProcessRequests(filename):
    with open(filename) as f:
        trace = loading_trace.LoadingTrace.FromJsonDict(json.load(f))
        content_lens = (content_classification_lens.ContentClassificationLens.
                        WithRulesFiles(trace, OPTIONS.ad_rules,
                                       OPTIONS.tracking_rules))
        frame_lens = frame_load_lens.FrameLoadLens(trace)
        activity = activity_lens.ActivityLens(trace)
        graph = loading_model.ResourceGraph(trace, content_lens, frame_lens,
                                            activity)
        if OPTIONS.noads:
            graph.Set(node_filter=graph.FilterAds)
        return graph
 def test_MaxPath(self):
     requests = [
         self.MakeParserRequest(0, 'null', 100, 110),
         self.MakeParserRequest(1, 0, 115, 120),
         self.MakeParserRequest(2, 0, 112, 120),
         self.MakeParserRequest(3, 1, 122, 126),
         self.MakeParserRequest(4, 3, 127, 128),
         self.MakeParserRequest(5, 'null', 100, 105),
         self.MakeParserRequest(6, 5, 105, 110)
     ]
     graph = loading_model.ResourceGraph(requests)
     path_list = []
     self.assertEqual(28, graph.Cost(path_list))
     self.assertEqual([0, 1, 3, 4], [n.Index() for n in path_list])
Exemple #6
0
def ColdGraph(datadir, site):
  """Return a loading model graph for the cold pull of site.

  Based on ./analyze.py fetch file name conventions.

  Args:
    datadir: the directory containing site JSON data.
    site: a site string.

  Returns:
    A loading model object.
  """
  with file(os.path.join(datadir, site + '.json.cold')) as f:
    return loading_model.ResourceGraph(loading_trace.LoadingTrace.FromJsonDict(
        json.load(f)))
Exemple #7
0
def WarmGraph(datadir, site):
    """Return a loading model graph for the warm pull of site.

  Based on ./analyze.py fetch file name conventions.

  Args:
    datadir: the directory containing site JSON data.
    site: a site string.

  Returns:
    A loading model object.
  """
    return loading_model.ResourceGraph(
        log_parser.FilterRequests(
            log_parser.ParseJsonFile(os.path.join(datadir, site + '.json'))))
def _Main():
  import json
  import logging
  import sys

  import loading_model
  import loading_trace
  import resource_sack

  sack = resource_sack.GraphSack()
  for fname in sys.argv[1:]:
    trace = loading_trace.LoadingTrace.FromJsonDict(
      json.load(open(fname)))
    logging.info('Making graph from %s', fname)
    model = loading_model.ResourceGraph(trace, content_lens=None)
    sack.ConsumeGraph(model)
    logging.info('Finished %s', fname)
  ToDot(sack, sys.stdout, prune=.1)
 def test_TimingSplitImage(self):
     # If we're all image types, then we shouldn't split by timing.
     requests = [
         self.MakeParserRequest(0, 'null', 100, 110),
         self.MakeParserRequest(1, 0, 115, 120),
         self.MakeParserRequest(2, 0, 121, 122),
         self.MakeParserRequest(3, 0, 112, 119),
         self.MakeParserRequest(4, 2, 122, 126),
         self.MakeParserRequest(5, 2, 122, 126)
     ]
     for r in requests:
         r.headers['Content-Type'] = 'image/gif'
     graph = loading_model.ResourceGraph(requests)
     self.assertEqual(self.SuccessorIndicies(graph._nodes[0]), [1, 2, 3])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[1]), [])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[2]), [4, 5])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[3]), [])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[4]), [])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[5]), [])
     self.assertEqual(self.SortedIndicies(graph), [0, 1, 2, 3, 4, 5])
 def test_Costing(self):
     requests = [
         self.MakeParserRequest(0, 'null', 100, 110),
         self.MakeParserRequest(1, 0, 115, 120),
         self.MakeParserRequest(2, 0, 112, 120),
         self.MakeParserRequest(3, 1, 122, 126),
         self.MakeParserRequest(4, 3, 127, 128),
         self.MakeParserRequest(5, 'null', 100, 105),
         self.MakeParserRequest(6, 5, 105, 110)
     ]
     graph = loading_model.ResourceGraph(requests)
     self.assertEqual(self.SuccessorIndicies(graph._nodes[0]), [1, 2])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[1]), [3])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[2]), [])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[3]), [4])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[4]), [])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[5]), [6])
     self.assertEqual(self.SuccessorIndicies(graph._nodes[6]), [])
     self.assertEqual(self.SortedIndicies(graph), [0, 5, 1, 2, 6, 3, 4])
     self.assertEqual(28, graph.Cost())
     graph.Set(cache_all=True)
     self.assertEqual(8, graph.Cost())
 def testUserSatisfiedLens(self):
     # We track all times in milliseconds, but raw trace events are in
     # microseconds.
     MILLI_TO_MICRO = 1000
     loading_trace = test_utils.LoadingTraceFromEvents(
         [self._RequestAt(1),
          self._RequestAt(10),
          self._RequestAt(20)],
         trace_events=[{
             'ts': 0,
             'ph': 'I',
             'cat': 'blink.some_other_user_timing',
             'name': 'firstContentfulPaint'
         }, {
             'ts': 9 * MILLI_TO_MICRO,
             'ph': 'I',
             'cat': 'blink.user_timing',
             'name': 'firstDiscontentPaint'
         }, {
             'ts': 12 * MILLI_TO_MICRO,
             'ph': 'I',
             'cat': 'blink.user_timing',
             'name': 'firstContentfulPaint'
         }, {
             'ts': 22 * MILLI_TO_MICRO,
             'ph': 'I',
             'cat': 'blink.user_timing',
             'name': 'firstContentfulPaint'
         }])
     graph = loading_model.ResourceGraph(loading_trace)
     lens = user_satisfied_lens.UserSatisfiedLens(loading_trace, graph)
     for n in graph.Nodes():
         if n.Request().frame_id == '123.20':
             self.assertFalse(lens.Filter(n))
         else:
             self.assertTrue(lens.Filter(n))
Exemple #12
0
def _ProcessJson(json_data):
  assert json_data
  return loading_model.ResourceGraph(log_parser.FilterRequests(
      [log_parser.RequestData.FromDict(r) for r in json_data]))
Exemple #13
0
def _ProcessRequests(filename):
  requests = log_parser.FilterRequests(log_parser.ParseJsonFile(filename))
  return loading_model.ResourceGraph(requests)