Example #1
0
  def Post(self, *args):
    """Returns timeseries data in response to API requests.

    Argument:
      test_path: Full path of test timeseries

    Outputs:
      JSON timeseries data for the test_path, see README.md.
    """
    try:
      days = int(self.request.get('num_days', 30))
    except ValueError:
      raise api_request_handler.BadRequestError(
          'Invalid num_days parameter %s' % self.request.get('num_days'))
    if days <= 0:
      raise api_request_handler.BadRequestError(
          'num_days cannot be negative (%s)' % days)
    before = datetime.datetime.now() - datetime.timedelta(days=days)

    test_path = args[0]
    test_key = utils.TestKey(test_path)
    test = test_key.get()
    if not test:
      raise api_request_handler.BadRequestError(
          'Invalid test_path %s' % test_path)

    assert(
        datastore_hooks.IsUnalteredQueryPermitted() or not test.internal_only)
    datastore_hooks.SetSinglePrivilegedRequest()

    q = graph_data.Row.query()
    q = q.filter(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
    q = q.filter(graph_data.Row.timestamp > before)

    rows = q.fetch()
    if not rows:
      return []
    revisions = [rev for rev in rows[0].to_dict() if rev.startswith('r_')]
    header = ['revision', 'value', 'timestamp'] + revisions
    timeseries = [header]
    for row in sorted(rows, key=lambda r: r.revision):
      timeseries.append([self._GetValue(row, a) for a in header])

    return {
        'timeseries': timeseries,
        'test_path': test_path,
        'revision_logs': namespaced_stored_object.Get('revision_info'),
        'improvement_direction': test.improvement_direction,
    }
Example #2
0
def ComputeRevision(histograms):
    _CheckRequest(len(histograms) > 0, 'Must upload at least one histogram')
    rev = _GetDiagnosticValue(reserved_infos.POINT_ID.name,
                              histograms.GetFirstHistogram(),
                              optional=True)

    if rev is None:
        rev = _GetDiagnosticValue(
            reserved_infos.CHROMIUM_COMMIT_POSITIONS.name,
            histograms.GetFirstHistogram(),
            optional=True)

    if rev is None:
        revision_timestamps = histograms.GetFirstHistogram().diagnostics.get(
            reserved_infos.REVISION_TIMESTAMPS.name)
        _CheckRequest(
            revision_timestamps is not None,
            'Must specify REVISION_TIMESTAMPS, CHROMIUM_COMMIT_POSITIONS,'
            ' or POINT_ID')
        rev = revision_timestamps.max_timestamp

    if not isinstance(rev, int):
        raise api_request_handler.BadRequestError(
            'Point ID must be an integer.')

    return rev
Example #3
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON much be a list of dicts')
    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    histograms.DeduplicateDiagnostics()

    _LogDebugInfo(histograms)

    InlineDenseSharedDiagnostics(histograms)
    revision = ComputeRevision(histograms)
    suite_key = GetSuiteKey(histograms)

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics(
        histograms, suite_key, revision)

    # TODO(eakuefner): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    new_guids_to_old_diagnostics = DeduplicateAndPut(
        suite_level_sparse_diagnostic_entities, suite_key, revision)
    for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems():
        histograms.ReplaceSharedDiagnostic(
            new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    tasks = _BatchHistogramsIntoTasks(histograms, revision)

    _QueueHistogramTasks(tasks)
Example #4
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON much be a list of dicts')
    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    InlineDenseSharedDiagnostics(histograms)

    revision = ComputeRevision(histograms)

    task_list = []

    suite_key = GetSuiteKey(histograms)

    suite_level_sparse_diagnostic_entities = []
    diagnostic_names_added = {}

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    for hist in histograms:
        for name, diag in hist.diagnostics.iteritems():
            if name in SUITE_LEVEL_SPARSE_DIAGNOSTIC_NAMES:
                if diagnostic_names_added.get(name) is None:
                    diagnostic_names_added[name] = diag.guid

                if diagnostic_names_added.get(name) != diag.guid:
                    raise ValueError(
                        name +
                        ' diagnostics must be the same for all histograms')

            if name in SUITE_LEVEL_SPARSE_DIAGNOSTIC_NAMES:
                suite_level_sparse_diagnostic_entities.append(
                    histogram.SparseDiagnostic(id=diag.guid,
                                               data=diag.AsDict(),
                                               test=suite_key,
                                               start_revision=revision,
                                               end_revision=sys.maxint,
                                               name=name))

    # TODO(eakuefner): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    new_guids_to_old_diagnostics = DeduplicateAndPut(
        suite_level_sparse_diagnostic_entities, suite_key, revision)
    for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems():
        histograms.ReplaceSharedDiagnostic(
            new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    for hist in histograms:
        guid = hist.guid
        diagnostics = FindHistogramLevelSparseDiagnostics(guid, histograms)
        # TODO(eakuefner): Don't compute full diagnostics, because we need anyway to
        # call GetOrCreate here and in the queue.
        test_path = ComputeTestPath(guid, histograms)
        # TODO(eakuefner): Batch these better than one per task.
        task_list.append(_MakeTask(hist, test_path, revision, diagnostics))

    queue = taskqueue.Queue(TASK_QUEUE_NAME)
    queue.add(task_list)
Example #5
0
    def AuthorizedPost(self):
        datastore_hooks.SetPrivilegedRequest()

        with timing.WallTimeLogger('decompress'):
            try:
                data_str = self.request.body
                zlib.decompress(data_str)
                logging.info('Recieved compressed data.')
            except zlib.error:
                data_str = self.request.get('data')
                data_str = zlib.compress(data_str)
                logging.info('Recieved uncompressed data.')

        if not data_str:
            raise api_request_handler.BadRequestError(
                'Missing "data" parameter')

        filename = uuid.uuid4()
        params = {'gcs_file_path': '/add-histograms-cache/%s' % filename}

        gcs_file = cloudstorage.open(params['gcs_file_path'],
                                     'w',
                                     content_type='application/octet-stream',
                                     retry_params=_RETRY_PARAMS)
        gcs_file.write(data_str)
        gcs_file.close()

        retry_options = taskqueue.TaskRetryOptions(
            task_retry_limit=_TASK_RETRY_LIMIT)
        queue = taskqueue.Queue('default')
        queue.add(
            taskqueue.Task(url='/add_histograms/process',
                           payload=json.dumps(params),
                           retry_options=retry_options))
Example #6
0
  def Post(self):
    # Pull out the Job ID and reason in the request.
    args = self.request.params.mixed()
    job_id = args.get('job_id')
    reason = args.get('reason')
    if not job_id or not reason:
      raise api_request_handler.BadRequestError()

    job = job_module.JobFromId(job_id)
    if not job:
      raise api_request_handler.NotFoundError()

    # Enforce first that only the users that started the job and administrators
    # can cancel jobs.
    email = utils.GetEmail()
    if not utils.IsAdministrator() and email != job.user:
      raise api_request_handler.ForbiddenError()

    # Truncate the reason down to 255 caracters including ellipses.
    try:
      job.Cancel(email, reason[:252] + '...' if len(reason) > 255 else reason)
      return {'job_id': job.job_id, 'state': 'Cancelled'}
    except errors.CancelError as e:
      self.response.set_status(400)
      return {'job_id': job.job_id, 'message': e.message}
Example #7
0
    def AuthorizedPost(self):
        datastore_hooks.SetPrivilegedRequest()

        try:
            data_str = self.request.get('data')
            if not data_str:
                raise api_request_handler.BadRequestError(
                    'Missing "data" parameter')

            logging.info('Received data: %s', data_str)

            histogram_dicts = json.loads(data_str)
            ProcessHistogramSet(histogram_dicts)
        except api_request_handler.BadRequestError:
            # TODO(simonhatch, eakuefner: Remove this later.
            # When this has all stabilized a bit, remove and let this 400 to clients,
            # but for now to preven the waterfall from re-uploading over and over
            # while we bug fix, let's just log the error.
            # https://github.com/catapult-project/catapult/issues/4019
            logging.error(traceback.format_exc())
        except Exception:  # pylint: disable=broad-except
            # TODO(simonhatch, eakuefner: Remove this later.
            # We shouldn't be catching ALL exceptions, this is just while the
            # stability of the endpoint is being worked on.
            logging.error(traceback.format_exc())
Example #8
0
def _CreateHistogramTasks(suite_path,
                          histograms,
                          revision,
                          benchmark_description,
                          completion_token=None):
    tasks = []
    test_paths = set()

    for hist in histograms:
        diagnostics = FindHistogramLevelSparseDiagnostics(hist)
        test_path = '%s/%s' % (suite_path,
                               histogram_helpers.ComputeTestPath(hist))

        # Log the information here so we can see which histograms are being queued.
        logging.debug('Queueing: %s', test_path)

        if test_path in test_paths:
            raise api_request_handler.BadRequestError(
                'Duplicate histogram detected: %s' % test_path)

        test_paths.add(test_path)

        # We create one task per histogram, so that we can get as much time as we
        # need for processing each histogram per task.
        task_dict = _MakeTaskDict(hist, test_path, revision,
                                  benchmark_description, diagnostics,
                                  completion_token)
        tasks.append(_MakeTask([task_dict]))

    if completion_token is not None:
        completion_token.PopulateMeasurements(test_paths)

    return tasks
Example #9
0
    def Post(self):
        if utils.IsDevAppserver():
            # Don't require developers to zip the body.
            # In prod, the data will be written to cloud storage and processed on the
            # taskqueue, so the caller will not see any errors. In dev_appserver,
            # process the data immediately so the caller will see errors.
            ProcessHistogramSet(
                _LoadHistogramList(StringIO.StringIO(self.request.body)))
            return

        with timing.WallTimeLogger('decompress'):
            try:
                data_str = self.request.body

                # Try to decompress at most 100 bytes from the data, only to determine
                # if we've been given compressed payload.
                zlib.decompressobj().decompress(data_str, 100)
                logging.info('Received compressed data.')
            except zlib.error:
                data_str = self.request.get('data')
                if not data_str:
                    raise api_request_handler.BadRequestError(
                        'Missing or uncompressed data.')
                data_str = zlib.compress(data_str)
                logging.info('Received uncompressed data.')

        if not data_str:
            raise api_request_handler.BadRequestError(
                'Missing "data" parameter')

        filename = uuid.uuid4()
        params = {'gcs_file_path': '/add-histograms-cache/%s' % filename}

        gcs_file = cloudstorage.open(params['gcs_file_path'],
                                     'w',
                                     content_type='application/octet-stream',
                                     retry_params=_RETRY_PARAMS)
        gcs_file.write(data_str)
        gcs_file.close()

        retry_options = taskqueue.TaskRetryOptions(
            task_retry_limit=_TASK_RETRY_LIMIT)
        queue = taskqueue.Queue('default')
        queue.add(
            taskqueue.Task(url='/add_histograms/process',
                           payload=json.dumps(params),
                           retry_options=retry_options))
Example #10
0
 def Post(self, *args, **kwargs):
     # We translate the order of the arguments, because the first arg is the
     # project and the second is the bug id.
     if len(args) != 2:
         raise api_request_handler.BadRequestError(
             'Must have two non-empty arguments to URI.')
     return super(BugsWithProjectHandler, self).Post(args[1],
                                                     project=args[0])
Example #11
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON much be a list of dicts')

    bot_whitelist_future = stored_object.GetAsync(
        add_point_queue.BOT_WHITELIST_KEY)

    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    histograms.DeduplicateDiagnostics()

    if len(histograms) == 0:
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON must contain at least one histogram.')

    _LogDebugInfo(histograms)

    InlineDenseSharedDiagnostics(histograms)
    revision = ComputeRevision(histograms)
    master, bot, benchmark = _GetMasterBotBenchmarkFromHistogram(
        histograms.GetFirstHistogram())
    suite_key = utils.TestKey('%s/%s/%s' % (master, bot, benchmark))

    bot_whitelist = bot_whitelist_future.get_result()
    internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist)

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics(
        histograms, suite_key, revision, internal_only)

    # TODO(eakuefner): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    new_guids_to_old_diagnostics = DeduplicateAndPut(
        suite_level_sparse_diagnostic_entities, suite_key, revision)
    for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems():
        histograms.ReplaceSharedDiagnostic(
            new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    tasks = _BatchHistogramsIntoTasks(suite_key.id(), histograms, revision)

    _QueueHistogramTasks(tasks)
Example #12
0
  def AuthorizedPost(self):
    datastore_hooks.SetPrivilegedRequest()

    data_str = self.request.get('data')
    if not data_str:
      raise api_request_handler.BadRequestError('Missing "data" parameter')

    histogram_dicts = json.loads(data_str)
    ProcessHistogramSet(histogram_dicts)
Example #13
0
    def AuthorizedPost(self):
        try:
            job = _CreateJob(self.request)
            job.Start()

            return {
                'jobId': job.job_id,
                'jobUrl': job.url,
            }
        except (KeyError, TypeError, ValueError) as e:
            raise api_request_handler.BadRequestError(e.message)
Example #14
0
 def Post(self):
     git_hash = self.request.get('git_hash')
     try:
         c = change.Commit.FromDict({
             'repository': 'chromium',
             'git_hash': git_hash,
         })
         return c.AsDict()
     except KeyError:
         raise api_request_handler.BadRequestError('Unknown git hash: %s' %
                                                   git_hash)
Example #15
0
class ApiRequestHandlerTest(testing_common.TestCase):

  def setUp(self):
    super(ApiRequestHandlerTest, self).setUp()

    app = webapp2.WSGIApplication(
        [(r'/api/test', TestApiRequestHandler)])
    self.testapp = webtest.TestApp(app)

  @mock.patch.object(api_auth, 'TryAuthorize')
  def testPost_Authorized_AuthorizedPostCalled(self, mock_authorize):
    response = self.testapp.post('/api/test')
    self.assertEqual(
        {'foo': 'bar'},
        json.loads(response.body))
    self.assertTrue(mock_authorize.called)

  @mock.patch.object(
      api_auth, 'TryAuthorize', mock.MagicMock(side_effect=api_auth.OAuthError))
  @mock.patch.object(
      TestApiRequestHandler, 'AuthorizedPost')
  def testPost_Unauthorized_AuthorizedPostNotCalled(self, mock_post):
    response = self.testapp.post('/api/test', status=403)
    self.assertEqual(
        {'error': 'User authentication error'},
        json.loads(response.body))
    self.assertFalse(mock_post.called)

  @mock.patch.object(
      api_auth, 'TryAuthorize',
      mock.MagicMock(side_effect=api_request_handler.BadRequestError('foo')))
  def testPost_BadRequest_400(self):
    response = self.testapp.post('/api/test', status=400)
    self.assertEqual(
        {'error': 'foo'},
        json.loads(response.body))

  @mock.patch.object(
      api_auth, 'TryAuthorize',
      mock.MagicMock(side_effect=api_auth.OAuthError))
  def testPost_OAuthError_403(self):
    response = self.testapp.post('/api/test', status=403)
    self.assertEqual(
        {'error': 'User authentication error'},
        json.loads(response.body))

  @mock.patch.object(
      api_auth, 'TryAuthorize',
      mock.MagicMock(side_effect=api_auth.NotLoggedInError))
  def testPost_NotLoggedInError_403(self):
    response = self.testapp.post('/api/test', status=403)
    self.assertEqual(
        {'error': 'User not authenticated'},
        json.loads(response.body))
Example #16
0
def ComputeRevision(histograms):
    _CheckRequest(len(histograms) > 0, 'Must upload at least one histogram')
    commit_position = _GetDiagnosticValue(
        reserved_infos.CHROMIUM_COMMIT_POSITIONS.name,
        histograms.GetFirstHistogram())

    # TODO(eakuefner): Allow users to specify other types of revisions to be used
    # for computing revisions of dashboard points. See
    # https://github.com/catapult-project/catapult/issues/3623.
    if not isinstance(commit_position, int):
        raise api_request_handler.BadRequestError(
            'Commit Position must be an integer.')
    return commit_position
Example #17
0
def _BatchHistogramsIntoTasks(suite_path, histograms, revision,
                              benchmark_description):
    params = []
    tasks = []

    base_size = _MakeTask([]).size
    estimated_size = 0

    duplicate_check = set()

    for hist in histograms:
        diagnostics = FindHistogramLevelSparseDiagnostics(hist)

        # TODO(896856): Don't compute full diagnostics, because we need anyway to
        # call GetOrCreate here and in the queue.
        test_path = '%s/%s' % (suite_path,
                               histogram_helpers.ComputeTestPath(hist))

        # Log the information here so we can see which histograms are being queued.
        logging.debug('Queueing: %s', test_path)

        if test_path in duplicate_check:
            raise api_request_handler.BadRequestError(
                'Duplicate histogram detected: %s' % test_path)
        duplicate_check.add(test_path)

        # TODO(#4135): Batch these better than one per task.
        task_dict = _MakeTaskDict(hist, test_path, revision,
                                  benchmark_description, diagnostics)

        estimated_size_dict = len(json.dumps(task_dict))
        estimated_size += estimated_size_dict

        # Creating the task directly and getting the size back is slow, so we just
        # keep a running total of estimated task size. A bit hand-wavy but the #
        # of histograms per task doesn't need to be perfect, just has to be under
        # the max task size.
        estimated_total_size = estimated_size * 1.05 + base_size + 1024
        if estimated_total_size > taskqueue.MAX_TASK_SIZE_BYTES:
            t = _MakeTask(params)
            tasks.append(t)
            params = []
            estimated_size = estimated_size_dict

        params.append(task_dict)

    if params:
        t = _MakeTask(params)
        tasks.append(t)

    return tasks
Example #18
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON much be a list of dicts')
    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    histograms.DeduplicateDiagnostics()

    _LogDebugInfo(histograms)

    InlineDenseSharedDiagnostics(histograms)
    revision = ComputeRevision(histograms)
    suite_key = GetSuiteKey(histograms)

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics(
        histograms, suite_key, revision)

    # TODO(eakuefner): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    new_guids_to_old_diagnostics = DeduplicateAndPut(
        suite_level_sparse_diagnostic_entities, suite_key, revision)
    for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems():
        histograms.ReplaceSharedDiagnostic(
            new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    task_list = []

    for hist in histograms:
        guid = hist.guid
        diagnostics = FindHistogramLevelSparseDiagnostics(guid, histograms)
        # TODO(eakuefner): Don't compute full diagnostics, because we need anyway to
        # call GetOrCreate here and in the queue.
        test_path = ComputeTestPath(guid, histograms)
        # TODO(eakuefner): Batch these better than one per task.
        task_list.append(_MakeTask(hist, test_path, revision, diagnostics))

    queue = taskqueue.Queue(TASK_QUEUE_NAME)

    futures = []
    for i in xrange(0, len(task_list), taskqueue.MAX_TASKS_PER_ADD):
        f = queue.add_async(task_list[i:i + taskqueue.MAX_TASKS_PER_ADD])
        futures.append(f)
    for f in futures:
        f.get_result()
Example #19
0
  def AuthorizedPost(self):
    datastore_hooks.SetPrivilegedRequest()

    try:
      data_str = zlib.decompress(self.request.body)
      logging.info('Recieved compressed data.')
    except zlib.error:
      data_str = self.request.get('data')
      logging.info('Recieved uncompressed data.')
    if not data_str:
      raise api_request_handler.BadRequestError('Missing "data" parameter')

    logging.info('Received data: %s', data_str[:100])

    histogram_dicts = json.loads(data_str)
    ProcessHistogramSet(histogram_dicts)
Example #20
0
def ComputeRevision(histograms):
    _CheckRequest(len(histograms) > 0, 'Must upload at least one histogram')
    rev = _GetDiagnosticValue(reserved_infos.POINT_ID.name,
                              histograms.GetFirstHistogram(),
                              optional=True)

    if rev is None:
        rev = _GetDiagnosticValue(
            reserved_infos.CHROMIUM_COMMIT_POSITIONS.name,
            histograms.GetFirstHistogram())

    if not isinstance(rev, (long, int)):
        raise api_request_handler.BadRequestError(
            'Point ID must be an integer.')

    return rev
def ComputeRevision(histograms):
    _CheckRequest(len(histograms) > 0, 'Must upload at least one histogram')
    diagnostics = histograms.GetFirstHistogram().diagnostics
    _CheckRequest(reserved_infos.CHROMIUM_COMMIT_POSITIONS.name in diagnostics,
                  'Histograms must have Chromium commit position attached')
    chromium_commit_position = list(
        diagnostics[reserved_infos.CHROMIUM_COMMIT_POSITIONS.name])

    _CheckRequest(
        len(chromium_commit_position) == 1,
        'Chromium commit position must have 1 value')

    # TODO(eakuefner): Allow users to specify other types of revisions to be used
    # for computing revisions of dashboard points. See
    # https://github.com/catapult-project/catapult/issues/3623.
    commit_position = chromium_commit_position[0]
    if not isinstance(commit_position, int):
        raise api_request_handler.BadRequestError(
            'Commit Position must be an integer.')
    return commit_position
Example #22
0
    def AuthorizedPost(self):
        datastore_hooks.SetPrivilegedRequest()

        try:
            data_str = self.request.get('data')
            if not data_str:
                raise api_request_handler.BadRequestError(
                    'Missing "data" parameter')

            logging.info('Received data: %s', data_str)

            histogram_dicts = json.loads(data_str)
            ProcessHistogramSet(histogram_dicts)
        except api_request_handler.BadRequestError as e:
            # TODO(simonhatch, eakuefner: Remove this later.
            # When this has all stabilized a bit, remove and let this 400 to clients,
            # but for now to preven the waterfall from re-uploading over and over
            # while we bug fix, let's just log the error.
            # https://github.com/catapult-project/catapult/issues/4019
            logging.error(e.message)
Example #23
0
  def Post(self, *args):
    """Returns list in response to API requests.

    Argument:
      benchmark: name of the benchmark to list tests for

    Outputs:
      JSON list of monitored timeseries for the benchmark, see README.md.
    """
    benchmark = args[0]
    sheriff_name = self.request.get('sheriff', 'Chromium Perf Sheriff')
    query = graph_data.TestMetadata.query()
    query = query.filter(graph_data.TestMetadata.suite_name == benchmark)
    query = query.filter(graph_data.TestMetadata.has_rows == True)
    query = query.filter(graph_data.TestMetadata.deprecated == False)
    if sheriff_name and sheriff_name != 'all':
      print(sheriff_name, 'xxxxxxxxxxxxxxxxxxxxxxxx')
      raise api_request_handler.BadRequestError(
          'Not supporting sheriff name anymore. Use `all` instead.')

    keys = query.fetch(keys_only=True)
    return [utils.TestPath(key) for key in keys]
Example #24
0
 def Post(self):
     try:
         c1 = change.Commit.FromDict({
             'repository':
             'chromium',
             'git_hash':
             self.request.get('start_git_hash'),
         })
         c2 = change.Commit.FromDict({
             'repository':
             'chromium',
             'git_hash':
             self.request.get('end_git_hash'),
         })
         commits = change.Commit.CommitRange(c1, c2)
         commits = [
             change.Commit('chromium', c['commit']).AsDict()
             for c in commits
         ]
         return [c1.AsDict()] + commits
     except request.RequestError as e:
         raise api_request_handler.BadRequestError(e.message)
Example #25
0
def _CreateHistogramTasks(suite_path,
                          histograms,
                          revision,
                          benchmark_description,
                          completion_token=None):
    tasks = []
    duplicate_check = set()
    measurement_add_futures = []
    sheriff_client = sheriff_config_client.GetSheriffConfigClient()

    for hist in histograms:
        diagnostics = FindHistogramLevelSparseDiagnostics(hist)
        test_path = '%s/%s' % (suite_path,
                               histogram_helpers.ComputeTestPath(hist))

        # Log the information here so we can see which histograms are being queued.
        logging.debug('Queueing: %s', test_path)

        if test_path in duplicate_check:
            raise api_request_handler.BadRequestError(
                'Duplicate histogram detected: %s' % test_path)

        duplicate_check.add(test_path)

        # We create one task per histogram, so that we can get as much time as we
        # need for processing each histogram per task.
        task_dict = _MakeTaskDict(hist, test_path, revision,
                                  benchmark_description, diagnostics,
                                  completion_token)
        tasks.append(_MakeTask([task_dict]))

        if completion_token is not None:
            measurement_add_futures.append(
                completion_token.AddMeasurement(
                    test_path, utils.IsMonitored(sheriff_client, test_path)))
    ndb.Future.wait_all(measurement_add_futures)

    return tasks
Example #26
0
def _CheckRequest(condition, msg):
    if not condition:
        raise api_request_handler.BadRequestError(msg)
Example #27
0
def _ValidateMasterBotBenchmarkName(master, bot, benchmark):
    for n in (master, bot, benchmark):
        if '/' in n:
            raise api_request_handler.BadRequestError('Illegal slash in %s' %
                                                      n)
Example #28
0
def ProcessHistogramSet(histogram_dicts, completion_token=None):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON must be a list of dicts')

    histograms = histogram_set.HistogramSet()

    with timing.WallTimeLogger('hs.ImportDicts'):
        histograms.ImportDicts(histogram_dicts)

    with timing.WallTimeLogger('hs.DeduplicateDiagnostics'):
        histograms.DeduplicateDiagnostics()

    if len(histograms) == 0:
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON must contain at least one histogram.')

    with timing.WallTimeLogger('hs._LogDebugInfo'):
        _LogDebugInfo(histograms)

    with timing.WallTimeLogger('InlineDenseSharedDiagnostics'):
        InlineDenseSharedDiagnostics(histograms)

    # TODO(#4242): Get rid of this.
    # https://github.com/catapult-project/catapult/issues/4242
    with timing.WallTimeLogger('_PurgeHistogramBinData'):
        _PurgeHistogramBinData(histograms)

    with timing.WallTimeLogger('_GetDiagnosticValue calls'):
        master = _GetDiagnosticValue(reserved_infos.MASTERS.name,
                                     histograms.GetFirstHistogram())
        bot = _GetDiagnosticValue(reserved_infos.BOTS.name,
                                  histograms.GetFirstHistogram())
        benchmark = _GetDiagnosticValue(reserved_infos.BENCHMARKS.name,
                                        histograms.GetFirstHistogram())
        benchmark_description = _GetDiagnosticValue(
            reserved_infos.BENCHMARK_DESCRIPTIONS.name,
            histograms.GetFirstHistogram(),
            optional=True)

    with timing.WallTimeLogger('_ValidateMasterBotBenchmarkName'):
        _ValidateMasterBotBenchmarkName(master, bot, benchmark)

    with timing.WallTimeLogger('ComputeRevision'):
        suite_key = utils.TestKey('%s/%s/%s' % (master, bot, benchmark))
        logging.info('Suite: %s', suite_key.id())

        revision = ComputeRevision(histograms)
        logging.info('Revision: %s', revision)

        internal_only = graph_data.Bot.GetInternalOnlySync(master, bot)

    revision_record = histogram.HistogramRevisionRecord.GetOrCreate(
        suite_key, revision)
    revision_record.put()

    last_added = histogram.HistogramRevisionRecord.GetLatest(
        suite_key).get_result()

    # On first upload, a query immediately following a put may return nothing.
    if not last_added:
        last_added = revision_record

    _CheckRequest(last_added, 'No last revision')

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    with timing.WallTimeLogger('FindSuiteLevelSparseDiagnostics'):
        suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics(
            histograms, suite_key, revision, internal_only)

    # TODO(896856): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    with timing.WallTimeLogger('DeduplicateAndPut'):
        new_guids_to_old_diagnostics = (
            histogram.SparseDiagnostic.FindOrInsertDiagnostics(
                suite_level_sparse_diagnostic_entities, suite_key, revision,
                last_added.revision).get_result())

    with timing.WallTimeLogger('ReplaceSharedDiagnostic calls'):
        for new_guid, old_diagnostic in new_guids_to_old_diagnostics.items():
            histograms.ReplaceSharedDiagnostic(
                new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    with timing.WallTimeLogger('_CreateHistogramTasks'):
        tasks = _CreateHistogramTasks(suite_key.id(), histograms, revision,
                                      benchmark_description, completion_token)

    with timing.WallTimeLogger('_QueueHistogramTasks'):
        _QueueHistogramTasks(tasks)
Example #29
0
    def UnprivilegedPost(self, *args):
        """Returns alert data in response to API requests.

    Possible list types:
      keys: A comma-separated list of urlsafe Anomaly keys.
      bug_id: A bug number on the Chromium issue tracker.
      rev: A revision number.

    Outputs:
      Alerts data; see README.md.
    """
        alert_list = None
        response = {}
        try:
            if len(args) == 0:
                is_improvement = utils.ParseBool(
                    self.request.get('is_improvement', None))
                recovered = utils.ParseBool(self.request.get(
                    'recovered', None))
                start_cursor = self.request.get('cursor', None)
                if start_cursor:
                    start_cursor = datastore_query.Cursor(urlsafe=start_cursor)
                min_timestamp = utils.ParseISO8601(
                    self.request.get('min_timestamp', None))
                max_timestamp = utils.ParseISO8601(
                    self.request.get('max_timestamp', None))

                test_keys = []
                for template_id in self.request.get_all('report'):
                    test_keys.extend(
                        report_template.TestKeysForReportTemplate(template_id))

                try:
                    alert_list, next_cursor, _ = anomaly.Anomaly.QueryAsync(
                        bot_name=self.request.get('bot', None),
                        bug_id=self.request.get('bug_id', None),
                        is_improvement=is_improvement,
                        key=self.request.get('key', None),
                        limit=int(self.request.get('limit', 100)),
                        master_name=self.request.get('master', None),
                        max_end_revision=self.request.get(
                            'max_end_revision', None),
                        max_start_revision=self.request.get(
                            'max_start_revision', None),
                        max_timestamp=max_timestamp,
                        min_end_revision=self.request.get(
                            'min_end_revision', None),
                        min_start_revision=self.request.get(
                            'min_start_revision', None),
                        min_timestamp=min_timestamp,
                        recovered=recovered,
                        sheriff=self.request.get('sheriff', None),
                        start_cursor=start_cursor,
                        test=self.request.get('test', None),
                        test_keys=test_keys,
                        test_suite_name=self.request.get('test_suite',
                                                         None)).get_result()
                except AssertionError:
                    alert_list, next_cursor = [], None
                if next_cursor:
                    response['next_cursor'] = next_cursor.urlsafe()
            else:
                list_type = args[0]
                if list_type.startswith('bug_id'):
                    bug_id = list_type.replace('bug_id/', '')
                    try:
                        bug_id = int(bug_id)
                    except ValueError as e:
                        raise api_request_handler.BadRequestError(
                            'Invalid bug ID "%s".' % bug_id)
                    response['DEPRECATION WARNING'] = (
                        'Please use /api/alerts?bug_id=%s' % bug_id)
                    alert_list, _, _ = anomaly.Anomaly.QueryAsync(
                        bug_id=bug_id).get_result()
                elif list_type.startswith('keys'):
                    keys = list_type.replace('keys/', '').split(',')
                    response['DEPRECATION WARNING'] = (
                        'Please use /api/alerts?key=%s' % keys[0])
                    alert_list = group_report.GetAlertsForKeys(keys)
                elif list_type.startswith('rev'):
                    rev = list_type.replace('rev/', '')
                    response['DEPRECATION WARNING'] = (
                        'Please use /api/alerts?max_end_revision=%s&min_start_revision=%s'
                        % (rev, rev))
                    alert_list = group_report.GetAlertsAroundRevision(rev)
                elif list_type.startswith('history'):
                    try:
                        days = int(list_type.replace('history/', ''))
                    except ValueError:
                        days = 7
                    cutoff = datetime.datetime.now() - datetime.timedelta(
                        days=days)
                    sheriff_name = self.request.get('sheriff',
                                                    'Chromium Perf Sheriff')
                    sheriff_key = ndb.Key('Sheriff', sheriff_name)
                    sheriff = sheriff_key.get()
                    if not sheriff:
                        raise api_request_handler.BadRequestError(
                            'Invalid sheriff %s' % sheriff_name)
                    response['DEPRECATION WARNING'] = (
                        'Please use /api/alerts?min_timestamp=%s&sheriff=%s' %
                        (urllib.quote(
                            cutoff.isoformat()), urllib.quote(sheriff_name)))
                    include_improvements = bool(
                        self.request.get('improvements'))
                    filter_for_benchmark = self.request.get('benchmark')

                    is_improvement = None
                    if not include_improvements:
                        is_improvement = False
                        response[
                            'DEPRECATION WARNING'] += '&is_improvement=false'
                    if filter_for_benchmark:
                        response['DEPRECATION WARNING'] += (
                            '&test_suite=' + filter_for_benchmark)

                    alert_list, _, _ = anomaly.Anomaly.QueryAsync(
                        sheriff=sheriff_key.id(),
                        min_timestamp=cutoff,
                        is_improvement=is_improvement,
                        test_suite_name=filter_for_benchmark).get_result()
                else:
                    raise api_request_handler.BadRequestError(
                        'Invalid alert type %s' % list_type)
        except AssertionError:
            # The only known assertion is in InternalOnlyModel._post_get_hook when a
            # non-internal user requests an internal-only entity.
            raise api_request_handler.BadRequestError('Not found')
        except request_handler.InvalidInputError as e:
            raise api_request_handler.BadRequestError(e.message)

        anomaly_dicts = alerts.AnomalyDicts(
            [a for a in alert_list if a.key.kind() == 'Anomaly'])

        response['anomalies'] = anomaly_dicts

        return response
class ApiRequestHandlerTest(testing_common.TestCase):
    def setUp(self):
        super(ApiRequestHandlerTest, self).setUp()

        app = webapp2.WSGIApplication([(r'/api/test', TestApiRequestHandler)])
        self.testapp = webtest.TestApp(app)

    @mock.patch.object(api_auth, 'Authorize')
    def testPost_Authorized_AuthorizedPostCalled(self, mock_authorize):
        response = self.testapp.post('/api/test')
        self.assertEqual({'foo': 'bar'}, json.loads(response.body))
        self.assertTrue(mock_authorize.called)

    @mock.patch.object(api_auth, 'Authorize',
                       mock.MagicMock(side_effect=api_auth.OAuthError))
    @mock.patch.object(TestApiRequestHandler, 'AuthorizedPost')
    def testPost_Unauthorized_AuthorizedPostNotCalled(self, mock_post):
        response = self.testapp.post('/api/test', status=403)
        self.assertEqual({'error': 'User authentication error'},
                         json.loads(response.body))
        self.assertFalse(mock_post.called)

    @mock.patch.object(api_auth, 'Authorize')
    @mock.patch.object(
        TestApiRequestHandler, 'AuthorizedPost',
        mock.MagicMock(side_effect=api_request_handler.BadRequestError('foo')))
    def testPost_BadRequest_400(self, _):
        response = self.testapp.post('/api/test', status=400)
        self.assertEqual({'error': 'foo'}, json.loads(response.body))

    @mock.patch.object(api_auth, 'Authorize',
                       mock.MagicMock(side_effect=api_auth.OAuthError))
    def testPost_OAuthError_403(self):
        response = self.testapp.post('/api/test', status=403)
        self.assertEqual({'error': 'User authentication error'},
                         json.loads(response.body))

    @mock.patch.object(api_auth, 'Authorize',
                       mock.MagicMock(side_effect=api_auth.NotLoggedInError))
    def testPost_NotLoggedInError_401(self):
        response = self.testapp.post('/api/test', status=401)
        self.assertEqual({'error': 'User not authenticated'},
                         json.loads(response.body))

    @mock.patch.object(api_auth, 'Authorize')
    def testOptions_NoOrigin_HeadersNotSet(self, _):
        response = self.testapp.options('/api/test')
        self.assertListEqual(
            [('Content-Length', '0'), ('Cache-Control', 'no-cache'),
             ('Content-Type', 'application/json; charset=utf-8')],
            response.headerlist)

    @mock.patch.object(api_auth, 'Authorize')
    def testOptions_InvalidOrigin_HeadersNotSet(self, _):
        api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com']
        response = self.testapp.options(
            '/api/test', headers={'origin': 'https://bar.appspot.com'})
        self.assertListEqual(
            [('Content-Length', '0'), ('Cache-Control', 'no-cache'),
             ('Content-Type', 'application/json; charset=utf-8')],
            response.headerlist)

    @mock.patch.object(api_auth, 'Authorize')
    def testPost_ValidProdOrigin_HeadersSet(self, _):
        api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com']
        response = self.testapp.post(
            '/api/test', headers={'origin': 'https://foo.appspot.com'})
        self.assertListEqual(
            [('Cache-Control', 'no-cache'),
             ('Content-Type', 'application/json; charset=utf-8'),
             ('Access-Control-Allow-Origin', 'https://foo.appspot.com'),
             ('Access-Control-Allow-Credentials', 'true'),
             ('Access-Control-Allow-Methods', 'GET,OPTIONS,POST'),
             ('Access-Control-Allow-Headers',
              'Accept,Authorization,Content-Type'),
             ('Access-Control-Max-Age', '3600'),
             ('Content-Length', '14')], response.headerlist)

    @mock.patch.object(api_auth, 'Authorize')
    def testPost_ValidDevOrigin_HeadersSet(self, _):
        api_request_handler._ALLOWED_ORIGINS = ['foo.appspot.com']
        response = self.testapp.post(
            '/api/test',
            headers={'origin': 'https://123jkjasdf-dot-foo.appspot.com'})
        self.assertListEqual(
            [('Cache-Control', 'no-cache'),
             ('Content-Type', 'application/json; charset=utf-8'),
             ('Access-Control-Allow-Origin',
              'https://123jkjasdf-dot-foo.appspot.com'),
             ('Access-Control-Allow-Credentials', 'true'),
             ('Access-Control-Allow-Methods', 'GET,OPTIONS,POST'),
             ('Access-Control-Allow-Headers',
              'Accept,Authorization,Content-Type'),
             ('Access-Control-Max-Age', '3600'),
             ('Content-Length', '14')], response.headerlist)

    @mock.patch.object(api_auth, 'Authorize')
    def testPost_InvalidOrigin_HeadersNotSet(self, _):
        response = self.testapp.post('/api/test')
        self.assertListEqual(
            [('Cache-Control', 'no-cache'),
             ('Content-Type', 'application/json; charset=utf-8'),
             ('Content-Length', '14')], response.headerlist)