Пример #1
0
  def test_unordered_first_concurrent_jobs(self):
    items = [10, 5, 0]

    log = []

    @ndb.tasklet
    def fn_async(x):
      log.append('%d started' % x)
      yield ndb.sleep(float(x) / 1000)
      log.append('%d finishing' % x)
      raise ndb.Return(x)

    expected = [(5, 5), (0, 0), (10, 10)]
    actual = utils.async_apply(
        items, fn_async, concurrent_jobs=2, unordered=True)
    self.assertFalse(isinstance(actual, list))
    self.assertEqual(expected, list(actual))
    self.assertEqual(log, [
        '10 started',
        '5 started',
        '5 finishing',
        '0 started',
        '0 finishing',
        '10 finishing',
    ])
Пример #2
0
  def test_ordered_concurrent_jobs(self):
    items = range(4)

    log = []

    @ndb.tasklet
    def fn_async(x):
      log.append('%d started' % x)
      yield ndb.sleep(0.01)
      log.append('%d finishing' % x)
      raise ndb.Return(x + 10)

    expected = [(i, i + 10) for i in items]
    actual = utils.async_apply(items, fn_async, concurrent_jobs=2)
    self.assertFalse(isinstance(actual, list))
    self.assertEqual(expected, list(actual))
    self.assertEqual(log, [
        '0 started',
        '1 started',
        '0 finishing',
        '2 started',
        '1 finishing',
        '3 started',
        '2 finishing',
        '3 finishing',
    ])
Пример #3
0
def _fetch_configs(paths):
    """Fetches a bunch of config files in parallel and validates them.

  Returns:
    dict {path -> (Revision tuple, <config>)}.

  Raises:
    CannotLoadConfigError if some config is missing or invalid.
  """
    paths = sorted(paths)
    configs_url = _get_configs_url()
    out = {}
    configs = utils.async_apply(
        paths, lambda p: config.get_self_config_async(
            p,
            dest_type=_CONFIG_SCHEMAS[p]['proto_class'],
            store_last_good=False))
    for path, (rev, conf) in configs:
        if conf is None:
            default = _CONFIG_SCHEMAS[path].get('default')
            if default is None:
                raise CannotLoadConfigError('Config %s is missing' % path)
            rev, conf = '0' * 40, default
        try:
            validation.validate(config.self_config_set(), path, conf)
        except ValueError as exc:
            raise CannotLoadConfigError(
                'Config %s at rev %s failed to pass validation: %s' %
                (path, rev, exc))
        out[path] = (Revision(rev, _gitiles_url(configs_url, rev, path)), conf)
    return out
Пример #4
0
def check_scheduling_permissions(bucket_ids):
  """Checks if the requester can schedule builds in any of the buckets.

  Raises auth.AuthorizationError on insufficient permissions.
  """
  can_add = utils.async_apply(set(bucket_ids), user.can_add_build_async)
  forbidden = [b for b, can in can_add if not can]
  if forbidden:
    raise user.current_identity_cannot('add builds to buckets %s', forbidden)
Пример #5
0
  def test_ordered(self):
    items = range(3)

    @ndb.tasklet
    def fn_async(x):
      raise ndb.Return(x + 10)

    expected = [(0, 10), (1, 11), (2, 12)]
    actual = utils.async_apply(items, fn_async)
    self.assertFalse(isinstance(actual, list))
    self.assertEqual(expected, list(actual))
Пример #6
0
 def PermittedActions(self, request, _context):
     """Returns a set of permitted actions for the requested resources."""
     logging.debug('Received request from %s for: %s',
                   auth.get_current_identity(), request)
     if request.resource_kind != 'bucket':
         return access_pb2.PermittedActionsResponse()
     bucket_ids = dict(
         utils.async_apply(request.resource_ids,
                           api_common.to_bucket_id_async))
     roles = dict(
         utils.async_apply(bucket_ids.itervalues(), user.get_role_async))
     permitted = {
         rid: create_resource_permissions(roles[bucket_ids[rid]])
         for rid in request.resource_ids
     }
     logging.debug('Permitted: %s', permitted)
     return access_pb2.PermittedActionsResponse(
         permitted=permitted,
         validity_duration=duration_pb2.Duration(seconds=10),
     )
Пример #7
0
  def test_unordered(self):
    items = [10, 5, 0]

    @ndb.tasklet
    def fn_async(x):
      yield ndb.sleep(float(x) / 1000)
      raise ndb.Return(x)

    expected = [(0, 0), (5, 5), (10, 10)]
    actual = utils.async_apply(items, fn_async, unordered=True)
    self.assertFalse(isinstance(actual, list))
    self.assertEqual(expected, list(actual))
Пример #8
0
    def get_builders(self, request):
        """Returns defined swarmbucket builders.

    Returns legacy bucket names, e.g. "luci.chromium.try", not "chromium/try".

    Can be used to discover builders.
    """
        if len(request.bucket) > 100:
            raise endpoints.BadRequestException(
                'Number of buckets cannot be greater than 100')
        if request.bucket:
            # Buckets were specified explicitly.
            bucket_ids = map(api_common.parse_luci_bucket, request.bucket)
            bucket_ids = [bid for bid in bucket_ids if bid]
            # Filter out inaccessible ones.
            bids_can = utils.async_apply(bucket_ids,
                                         user.can_access_bucket_async)
            bucket_ids = [bid for bid, can in bids_can if can]
        else:
            # Buckets were not specified explicitly.
            # Use the available ones.
            bucket_ids = user.get_accessible_buckets_async().get_result()
            # bucket_ids is None => all buckets are available.

        res = GetBuildersResponseMessage()
        buckets = config.get_buckets_async(bucket_ids).get_result()
        for bucket_id, cfg in buckets.iteritems():
            if not cfg or not cfg.swarming.builders:
                continue

            def to_dims(b):
                return flatten_swarmingcfg.format_dimensions(
                    swarmingcfg.read_dimensions(b))

            res.buckets.append(
                BucketMessage(
                    name=api_common.format_luci_bucket(bucket_id),
                    builders=[
                        BuilderMessage(name=builder.name,
                                       category=builder.category,
                                       properties_json=json.dumps(
                                           flatten_swarmingcfg.read_properties(
                                               builder.recipe)),
                                       swarming_hostname=builder.swarming_host,
                                       swarming_dimensions=to_dims(builder))
                        for builder in cfg.swarming.builders
                    ],
                    swarming_hostname=cfg.swarming.hostname,
                ))
        return res
Пример #9
0
def refetch_config(force=False):
    """Refetches all configs from luci-config (if enabled).

  Called as a cron job.
  """
    if not is_remote_configured():
        logging.info('Config remote is not configured')
        return

    # Grab and validate all new configs in parallel.
    try:
        configs = _fetch_configs(_CONFIG_SCHEMAS)
    except CannotLoadConfigError as exc:
        logging.error('Failed to fetch configs\n%s', exc)
        return

    # Figure out what needs to be updated.
    dirty = {}
    dirty_in_authdb = {}

    cur_revs = dict(utils.async_apply(configs, _get_config_revision_async))
    for path, (new_rev, conf) in sorted(configs.iteritems()):
        assert path in _CONFIG_SCHEMAS, path
        cur_rev = cur_revs[path]
        if cur_rev != new_rev or force:
            if _CONFIG_SCHEMAS[path]['use_authdb_transaction']:
                dirty_in_authdb[path] = (new_rev, conf)
            else:
                dirty[path] = (new_rev, conf)
        else:
            logging.info('Config %s is up-to-date at rev %s', path,
                         cur_rev.revision)

    # First update configs that do not touch AuthDB, one by one.
    for path, (rev, conf) in sorted(dirty.iteritems()):
        dirty = _CONFIG_SCHEMAS[path]['updater'](None, rev, conf)
        logging.info('Processed %s at rev %s: %s', path, rev.revision,
                     'updated' if dirty else 'up-to-date')

    # Configs that touch AuthDB are updated in a single transaction so that config
    # update generates single AuthDB replication task instead of a bunch of them.
    if dirty_in_authdb:
        _update_authdb_configs(dirty_in_authdb)
Пример #10
0
    def check_entries_exist(entries):
        """Assess which entities already exist in the datastore.

    Arguments:
      entries: a DigestCollection to be posted

    Yields:
      (Digest, ContentEntry or None)

    Raises:
      BadRequestException if any digest is not a valid hexadecimal number.
    """

        # Kick off all queries in parallel. Build mapping Future -> digest.
        def fetch(digest):
            key = entry_key_or_error(entries.namespace.namespace,
                                     digest.digest)
            return key.get_async(use_cache=False)

        return utils.async_apply(entries.items, fetch, unordered=True)
Пример #11
0
def import_external_groups():
    """Refetches external groups specified via 'tarball' or 'plainlist' entries.

  Runs as a cron task. Raises BundleImportError in case of import errors.
  """
    config = load_config()
    if not config:
        logging.info('Not configured')
        return

    # Fetch files specified in the config in parallel.
    entries = list(config.tarball) + list(config.plainlist)
    files = utils.async_apply(
        entries, lambda e: fetch_file_async(e.url, e.oauth_scopes))

    # {system name -> group name -> list of identities}
    bundles = {}
    for e, contents in files:
        # Unpack tarball into {system name -> group name -> list of identities}.
        if isinstance(e, config_pb2.GroupImporterConfig.TarballEntry):
            fetched = load_tarball(contents, e.systems, e.groups, e.domain)
            assert not (set(fetched) & set(bundles)), (fetched.keys(),
                                                       bundles.keys())
            bundles.update(fetched)
            continue

        # Add plainlist group to 'external/*' bundle.
        if isinstance(e, config_pb2.GroupImporterConfig.PlainlistEntry):
            group = load_group_file(contents, e.domain)
            name = 'external/%s' % e.group
            if 'external' not in bundles:
                bundles['external'] = {}
            assert name not in bundles['external'], name
            bundles['external'][name] = group
            continue

        assert False, 'Unreachable'

    import_bundles(bundles, model.get_service_self_identity(),
                   'External group import')
Пример #12
0
def schedule_build_multi(batch):
    """Schedules multiple builds.

  Args:
    batch: list of _ReqRes where
      request is rpc_pb2.ScheduleBuildRequest and
      response is rpc_pb2.BatchResponse.Response.
      Response objects will be mutated.
  """
    # Validate requests.
    valid_items = []
    for rr in batch:
        try:
            validation.validate_schedule_build_request(rr.request)
        except validation.Error as ex:
            rr.response.error.code = prpc.StatusCode.INVALID_ARGUMENT.value
            rr.response.error.message = ex.message
            continue

        # Parse the field mask.
        # Normally it is done by rpc_impl_async.
        mask = None
        if rr.request.HasField('fields'):
            try:
                mask = protoutil.Mask.from_field_mask(
                    rr.request.fields, build_pb2.Build.DESCRIPTOR)
            except ValueError as ex:
                rr.response.error.code = prpc.StatusCode.INVALID_ARGUMENT.value
                rr.response.error.message = 'invalid fields: %s' % ex.message
                continue

        valid_items.append(_ScheduleItem(rr.request, rr.response, mask))

    # Check permissions.
    def get_bucket_id(req):
        return config.format_bucket_id(req.builder.project, req.builder.bucket)

    bucket_ids = {get_bucket_id(x.request) for x in valid_items}
    can_add = dict(utils.async_apply(bucket_ids, user.can_add_build_async))
    identity_str = auth.get_current_identity().to_bytes()
    to_schedule = []
    for x in valid_items:
        bid = get_bucket_id(x.request)
        if can_add[bid]:
            to_schedule.append(x)
        else:
            x.response.error.code = prpc.StatusCode.PERMISSION_DENIED.value
            x.response.error.message = (
                '%s cannot schedule builds in bucket %s' % (identity_str, bid))

    # Schedule builds.
    if not to_schedule:  # pragma: no cover
        return
    build_requests = [
        creation.BuildRequest(schedule_build_request=x.request)
        for x in to_schedule
    ]
    results = creation.add_many_async(build_requests).get_result()
    futs = []
    for x, (build, ex) in zip(to_schedule, results):
        res = x.response
        err = res.error
        if isinstance(ex, errors.Error):
            err.code = ex.code.value
            err.message = ex.message
        elif isinstance(ex, auth.AuthorizationError):
            err.code = prpc.StatusCode.PERMISSION_DENIED.value
            err.message = ex.message
        elif ex:
            err.code = prpc.StatusCode.INTERNAL.value
            err.message = ex.message
        else:
            futs.append(build_to_proto_async(build, res.schedule_build,
                                             x.mask))
    for f in futs:
        f.get_result()
Пример #13
0
def _fix_builds(build_keys):  # pragma: no cover
  res_iter = utils.async_apply(build_keys, _fix_build_async, unordered=True)
  # async_apply returns an iterator. We need to traverse it, otherwise nothing
  # will happen.
  for _ in res_iter:
    pass
Пример #14
0
def get_revisions():
    """Returns a mapping {config file name => Revision instance or None}."""
    return dict(utils.async_apply(_CONFIG_SCHEMAS, _get_config_revision_async))