예제 #1
0
def _task_delete_many_builds(bucket_id, status, tags=None, created_by=None):
    @ndb.transactional_tasklet
    def txn(key):
        build = yield key.get_async()
        if not build or build.status_legacy != status:  # pragma: no cover
            raise ndb.Return(False)
        futs = [key.delete_async()]

        sw = build.parse_infra().swarming
        if sw.hostname and sw.task_id:  # pragma: no branch
            futs.append(
                swarming.cancel_task_transactionally_async(
                    sw.hostname, sw.task_id))
        yield futs
        raise ndb.Return(True)

    @ndb.tasklet
    def del_if_unchanged(key):
        if (yield txn(key)):  # pragma: no branch
            logging.debug('Deleted %s', key.id())

    assert status in (model.BuildStatus.SCHEDULED, model.BuildStatus.STARTED)
    tags = tags or []
    created_by = user.parse_identity(created_by)
    q = model.Build.query(model.Build.bucket_id == bucket_id,
                          model.Build.status_legacy == status)
    for t in tags:
        q = q.filter(model.Build.tags == t)
    if created_by:
        q = q.filter(model.Build.created_by == created_by)
    q.map(del_if_unchanged, keys_only=True)
예제 #2
0
    def test_parse_identity(self):
        self.assertEqual(
            user.parse_identity('user:[email protected]'),
            auth.Identity('user', '*****@*****.**'),
        )
        self.assertEqual(
            auth.Identity('user', '*****@*****.**'),
            auth.Identity('user', '*****@*****.**'),
        )

        self.assertEqual(
            user.parse_identity('*****@*****.**'),
            auth.Identity('user', '*****@*****.**'),
        )

        with self.assertRaises(errors.InvalidInputError):
            user.parse_identity('a:b')
예제 #3
0
def delete_many_builds(bucket_id, status, tags=None, created_by=None):
    if status not in (model.BuildStatus.SCHEDULED, model.BuildStatus.STARTED):
        raise errors.InvalidInputError(
            'status can be STARTED or SCHEDULED, not %s' % status)
    if not user.can_delete_scheduled_builds_async(bucket_id).get_result():
        raise user.current_identity_cannot('delete builds of %s', bucket_id)
    # Validate created_by prior scheduled a push task.
    created_by = user.parse_identity(created_by)
    deferred.defer(
        _task_delete_many_builds,
        bucket_id,
        status,
        tags=tags,
        created_by=created_by,
        # Schedule it on the backend module of the same version.
        # This assumes that both frontend and backend are uploaded together.
        _target='%s.backend' % modules.get_current_version_name(),
        # Retry immediatelly.
        _retry_options=taskqueue.TaskRetryOptions(
            min_backoff_seconds=0,
            max_backoff_seconds=1,
        ),
    )
예제 #4
0
def search_async(q):
    """Searches for builds.

  Args:
    q (Query): the query.

  Returns:
    A tuple:
      builds (list of Build): query result.
      next_cursor (string): cursor for the next page.
        None if there are no more builds.

  Raises:
    errors.InvalidInputError if q is invalid.
  """
    q.validate()
    q = q.copy()
    if (q.create_time_low is not None
            and q.create_time_low < model.BEGINING_OF_THE_WORLD):
        q.create_time_low = None
    if q.create_time_high is not None:
        if q.create_time_high <= model.BEGINING_OF_THE_WORLD:
            raise ndb.Return([], None)
        if (q.create_time_low is not None
                and q.create_time_low >= q.create_time_high):
            raise ndb.Return([], None)

    q.tags = q.tags or []
    q.max_builds = fix_max_builds(q.max_builds)
    q.created_by = user.parse_identity(q.created_by)
    q.status = q.status if q.status != common_pb2.STATUS_UNSPECIFIED else None

    if not q.bucket_ids and q.retry_of is not None:
        retry_of_build = yield model.Build.get_by_id_async(q.retry_of)
        if retry_of_build:
            q.bucket_ids = [retry_of_build.bucket_id]
    if q.bucket_ids:
        yield check_acls_async(q.bucket_ids)
        q.bucket_ids = set(q.bucket_ids)

    is_tag_index_cursor = (q.start_cursor and RE_TAG_INDEX_SEARCH_CURSOR.match(
        q.start_cursor))
    can_use_tag_index = (indexed_tags(q.tags)
                         and (not q.start_cursor or is_tag_index_cursor))
    if is_tag_index_cursor and not can_use_tag_index:
        raise errors.InvalidInputError('invalid cursor')
    can_use_query_search = not q.start_cursor or not is_tag_index_cursor
    assert can_use_tag_index or can_use_query_search

    # Try searching using tag index.
    if can_use_tag_index:
        try:
            search_start_time = utils.utcnow()
            results = yield _tag_index_search_async(q)
            logging.info('tag index search took %dms',
                         (utils.utcnow() - search_start_time).total_seconds() *
                         1000)
            raise ndb.Return(results)
        except errors.TagIndexIncomplete:
            if not can_use_query_search:
                raise
            logging.info('falling back to querying')

    # Searching using datastore query.
    assert can_use_query_search
    search_start_time = utils.utcnow()
    results = yield _query_search_async(q)
    logging.info('query search took %dms',
                 (utils.utcnow() - search_start_time).total_seconds() * 1000)
    raise ndb.Return(results)