Пример #1
0
 def get_projects(self, _request):
   project_configs = config.get_project_configs(
       'project.cfg', project_config_pb2.ProjectCfg)
   configList = []
   for project_id, (revision, project_cfg) in project_configs.iteritems():
     if _has_access(project_cfg.access):
       configList.append(Config(id=project_id,
                                revision=revision,
                                access=project_cfg.access[:]))
   return Configs(configs=configList)
Пример #2
0
 def get_projects(self, _request):
     project_configs = config.get_project_configs(
         'project.cfg', project_config_pb2.ProjectCfg)
     configList = []
     for project_id, (revision, project_cfg) in project_configs.iteritems():
         if _has_access(project_cfg.access):
             configList.append(
                 Config(id=project_id,
                        revision=revision,
                        access=project_cfg.access[:]))
     return Configs(configs=configList)
Пример #3
0
  def test_get_project_configs(self):
    self.provider.get_project_configs_async.return_value = ndb.Future()
    self.provider.get_project_configs_async.return_value.set_result({
      'projects/chromium': ('deadbeef', 'param: "value"'),
      'projects/v8': ('aaaabbbb', 'param: "value2"'),
      'projects/skia': ('deadbeef', 'invalid config'),
    })

    expected = {
      'chromium': ('deadbeef', test_config_pb2.Config(param='value')),
      'v8': ('aaaabbbb', test_config_pb2.Config(param='value2')),
    }
    actual = config.get_project_configs('bar.cfg', test_config_pb2.Config)
    self.assertEqual(expected, actual)
Пример #4
0
  def test_get_project_configs(self):
    self.provider.get_project_configs_async.return_value = ndb.Future()
    self.provider.get_project_configs_async.return_value.set_result({
      'projects/chromium': ('deadbeef', 'param: "value"'),
      'projects/v8': ('aaaabbbb', 'param: "value2"'),
      'projects/skia': ('deadbeef', 'invalid config'),
    })

    expected = {
      'chromium': ('deadbeef', test_config_pb2.Config(param='value')),
      'v8': ('aaaabbbb', test_config_pb2.Config(param='value2')),
    }
    actual = config.get_project_configs('bar.cfg', test_config_pb2.Config)
    self.assertEqual(expected, actual)
Пример #5
0
  def test_get_project_configs(self):
    self.provider.get_project_configs_async.return_value = ndb.Future()
    self.provider.get_project_configs_async.return_value.set_result({
      'projects/chromium': ('deadbeef', 'param: "value"'),
      'projects/v8': ('aaaabbbb', 'param: "value2"'),
      'projects/skia': ('badcoffee', 'invalid config'),
    })

    actual = config.get_project_configs('bar.cfg', test_config_pb2.Config)
    self.assertIsInstance(actual['skia'][2], config.ConfigFormatError)
    expected = {
      'chromium': ('deadbeef', test_config_pb2.Config(param='value'), None),
      'v8': ('aaaabbbb', test_config_pb2.Config(param='value2'), None),
      'skia': ('badcoffee', None, actual['skia'][2]),
    }
    self.assertEqual(expected, actual)
Пример #6
0
def cron_update_buckets():
  """Synchronizes Bucket entities with configs fetched from luci-config."""
  config_map = config.get_project_configs(
    cfg_path(), project_config_pb2.BuildbucketCfg)

  buckets_of_project = {
    pid: set(b.name for b in pcfg.buckets)
    for pid, (_, pcfg) in config_map.iteritems()
  }

  for project_id, (revision, project_cfg) in config_map.iteritems():
    # revision is None in file-system mode. Use SHA1 of the config as revision.
    revision = revision or 'sha1:%s' % hashlib.sha1(
      project_cfg.SerializeToString()).hexdigest()
    for bucket_cfg in project_cfg.buckets:
      bucket = Bucket.get_by_id(bucket_cfg.name)
      if (bucket and
          bucket.project_id == project_id and
          bucket.revision == revision):
        continue

      for acl in bucket_cfg.acls:
        if acl.identity and ':' not in acl.identity:
          acl.identity = 'user:%s' % acl.identity

      @ndb.transactional
      def update_bucket():
        bucket = Bucket.get_by_id(bucket_cfg.name)
        if bucket and bucket.project_id != project_id:
          # Does bucket.project_id still claim this bucket?
          if bucket_cfg.name in buckets_of_project.get(bucket.project_id, []):
            logging.error(
              'Failed to reserve bucket %s for project %s: '
              'already reserved by %s',
              bucket_cfg.name, project_id, bucket.project_id)
            return
        if (bucket and
            bucket.project_id == project_id and
            bucket.revision == revision):  # pragma: no coverage
          return

        report_reservation = bucket is None or bucket.project_id != project_id
        Bucket(
          id=bucket_cfg.name,
          project_id=project_id,
          revision=revision,
          config_content=protobuf.text_format.MessageToString(bucket_cfg),
        ).put()
        if report_reservation:
          logging.warning(
            'Reserved bucket %s for project %s', bucket_cfg.name, project_id)
        logging.info(
          'Updated bucket %s to revision %s', bucket_cfg.name, revision)

      update_bucket()

  # Delete/unreserve non-existing buckets.
  all_bucket_keys = Bucket.query().fetch(keys_only=True)
  existing_bucket_keys = [
    ndb.Key(Bucket, b)
    for buckets in buckets_of_project.itervalues()
    for b in buckets
  ]
  to_delete = set(all_bucket_keys).difference(existing_bucket_keys)
  if to_delete:
    logging.warning(
      'Deleting buckets: %s', ', '.join(k.id() for k in to_delete))
    ndb.delete_multi(to_delete)
Пример #7
0
def cron_update_buckets():
    """Synchronizes bucket entities with configs fetched from luci-config.

  When storing in the datastore, inlines the referenced ACL sets and clears
  the acl_sets message field. Also inlines swarmbucket builder defaults and
  mixins and clears Builder.mixins field.
  """
    import flatten_swarmingcfg

    config_map = config.get_project_configs(cfg_path(),
                                            project_config_pb2.BuildbucketCfg)

    to_delete = collections.defaultdict(set)  # project_id -> ndb keys
    for key in Bucket.query().fetch(keys_only=True):
        to_delete[key.parent().id()].add(key)

    for project_id, (revision, project_cfg, _) in config_map.iteritems():
        if project_cfg is None:
            logging.error('config of project %s is broken', project_id)
            # Do not delete all buckets of a broken project.
            to_delete.pop(project_id, None)
            continue

        # revision is None in file-system mode. Use SHA1 of the config as revision.
        revision = revision or 'sha1:%s' % hashlib.sha1(
            project_cfg.SerializeToString()).hexdigest()
        acl_sets_by_name = {a.name: a for a in project_cfg.acl_sets}
        builder_mixins_by_name = {
            m.name: m
            for m in project_cfg.builder_mixins
        }

        for bucket_cfg in project_cfg.buckets:
            bucket_key = Bucket.make_key(project_id,
                                         short_bucket_name(bucket_cfg.name))
            to_delete[project_id].discard(bucket_key)
            bucket = bucket_key.get()
            if (bucket and bucket.entity_schema_version
                    == CURRENT_BUCKET_SCHEMA_VERSION
                    and bucket.revision == revision):
                continue

            # Inline ACL sets.
            for name in bucket_cfg.acl_sets:
                acl_set = acl_sets_by_name.get(name)
                if not acl_set:
                    logging.error(
                        'referenced acl_set not found.\n'
                        'Bucket: %s\n'
                        'ACL set name: %r\n'
                        'Config revision: %r', bucket_key, name, revision)
                    continue
                bucket_cfg.acls.extend(acl_set.acls)
            del bucket_cfg.acl_sets[:]

            _normalize_acls(bucket_cfg.acls)

            if bucket_cfg.HasField('swarming'):
                # Pull builder defaults out and apply defaults.
                defaults = bucket_cfg.swarming.builder_defaults
                bucket_cfg.swarming.ClearField('builder_defaults')
                if not any(d.startswith('pool:') for d in defaults.dimensions):
                    defaults.dimensions.append(
                        'pool:luci.%s.%s' %
                        (project_id, short_bucket_name(bucket_cfg.name)))
                defaults.swarming_host = (defaults.swarming_host
                                          or bucket_cfg.swarming.hostname)

                f = 'task_template_canary_percentage'
                if not defaults.HasField(f) and bucket_cfg.swarming.HasField(
                        f):
                    defaults.task_template_canary_percentage.CopyFrom(
                        bucket_cfg.swarming.task_template_canary_percentage)

                for b in bucket_cfg.swarming.builders:
                    flatten_swarmingcfg.flatten_builder(
                        b, defaults, builder_mixins_by_name)

            # pylint: disable=no-value-for-parameter
            @ndb.transactional(xg=True)
            def update_bucket():
                bucket = bucket_key.get()
                if (bucket and bucket.entity_schema_version
                        == CURRENT_BUCKET_SCHEMA_VERSION and bucket.revision
                        == revision):  # pragma: no coverage
                    return

                put_bucket(project_id, revision, bucket_cfg)
                logging.info('Updated bucket %s to revision %s', bucket_key,
                             revision)

            update_bucket()

    # Delete non-existing buckets.
    to_delete_flat = sum([list(n) for n in to_delete.itervalues()], [])
    if to_delete_flat:
        logging.warning('Deleting buckets: %s',
                        ', '.join(map(str, to_delete_flat)))
        ndb.delete_multi(to_delete_flat)
Пример #8
0
def cron_update_buckets():
    """Synchronizes Bucket entities with configs fetched from luci-config.

  When storing in the datastore, inlines the referenced ACL sets and clears
  the acl_sets message field. Also inlines swarmbucket builder defaults and
  mixins and clears Builder.mixins field.
  """
    from swarming import swarmingcfg

    config_map = config.get_project_configs(cfg_path(),
                                            project_config_pb2.BuildbucketCfg)

    buckets_of_project = {}
    for pid, (_, pcfg, _) in config_map.iteritems():
        if pcfg is not None:
            buckets_of_project[pid] = set(b.name for b in pcfg.buckets)
        else:
            logging.error('config of project %s is broken', pid)
            # Find buckets that are currently reserved by the project.
            # We don't expect many projects to be broken at the same time
            # so fetching sequentially is fine.
            bucket_keys = Bucket.query(Bucket.project_id == pid).fetch(
                keys_only=True)
            # Make sure not to delete these buckets below.
            buckets_of_project[pid] = set(k.id() for k in bucket_keys)

    for project_id, (revision, project_cfg, _) in config_map.iteritems():
        if project_cfg is None:
            continue
        # revision is None in file-system mode. Use SHA1 of the config as revision.
        revision = revision or 'sha1:%s' % hashlib.sha1(
            project_cfg.SerializeToString()).hexdigest()
        acl_sets_by_name = {a.name: a for a in project_cfg.acl_sets}
        builder_mixins_by_name = {
            m.name: m
            for m in project_cfg.builder_mixins
        }

        for bucket_cfg in project_cfg.buckets:
            bucket = Bucket.get_by_id(bucket_cfg.name)
            if (bucket and bucket.entity_schema_version
                    == CURRENT_BUCKET_SCHEMA_VERSION
                    and bucket.project_id == project_id
                    and bucket.revision == revision
                    and bucket.config_content_binary):
                continue

            # Inline ACL sets.
            for name in bucket_cfg.acl_sets:
                acl_set = acl_sets_by_name.get(name)
                if not acl_set:
                    logging.error(
                        'referenced acl_set not found.\n'
                        'Bucket: %r\n'
                        'ACL set name: %r\n'
                        'Project id: %r\n'
                        'Config revision: %r', bucket_cfg.name, name,
                        project_id, revision)
                    continue
                bucket_cfg.acls.extend(acl_set.acls)
            del bucket_cfg.acl_sets[:]

            _normalize_acls(bucket_cfg.acls)

            if bucket_cfg.HasField('swarming'):
                # Flatten builders before putting to datastore.
                for b in bucket_cfg.swarming.builders:
                    swarmingcfg.flatten_builder(
                        b, bucket_cfg.swarming.builder_defaults,
                        builder_mixins_by_name)
                bucket_cfg.swarming.ClearField('builder_defaults')

            @ndb.transactional
            def update_bucket():
                bucket = Bucket.get_by_id(bucket_cfg.name)
                if bucket and bucket.project_id != project_id:
                    # Does bucket.project_id still claim this bucket?
                    if bucket_cfg.name in buckets_of_project.get(
                            bucket.project_id, []):
                        logging.error(
                            'Failed to reserve bucket %s for project %s: '
                            'already reserved by %s', bucket_cfg.name,
                            project_id, bucket.project_id)
                        return
                if (bucket and bucket.entity_schema_version
                        == CURRENT_BUCKET_SCHEMA_VERSION
                        and bucket.project_id == project_id
                        and bucket.revision == revision and
                        bucket.config_content_binary):  # pragma: no coverage
                    return

                report_reservation = bucket is None or bucket.project_id != project_id
                Bucket(
                    id=bucket_cfg.name,
                    entity_schema_version=CURRENT_BUCKET_SCHEMA_VERSION,
                    project_id=project_id,
                    revision=revision,
                    config_content=protobuf.text_format.MessageToString(
                        bucket_cfg),
                    config_content_binary=bucket_cfg.SerializeToString(),
                ).put()
                if report_reservation:
                    logging.warning('Reserved bucket %s for project %s',
                                    bucket_cfg.name, project_id)
                logging.info('Updated bucket %s to revision %s',
                             bucket_cfg.name, revision)

            update_bucket()

    # Delete/unreserve non-existing buckets.
    all_bucket_keys = Bucket.query().fetch(keys_only=True)
    existing_bucket_keys = [
        ndb.Key(Bucket, b) for buckets in buckets_of_project.itervalues()
        for b in buckets
    ]
    to_delete = set(all_bucket_keys).difference(existing_bucket_keys)
    if to_delete:
        logging.warning('Deleting buckets: %s',
                        ', '.join(k.id() for k in to_delete))
        ndb.delete_multi(to_delete)
Пример #9
0
def cron_update_buckets():
    """Synchronizes Bucket entities with configs fetched from luci-config."""
    config_map = config.get_project_configs(cfg_path(),
                                            project_config_pb2.BuildbucketCfg)

    buckets_of_project = {
        pid: set(b.name for b in pcfg.buckets)
        for pid, (_, pcfg) in config_map.iteritems()
    }

    for project_id, (revision, project_cfg) in config_map.iteritems():
        # revision is None in file-system mode. Use SHA1 of the config as revision.
        revision = revision or 'sha1:%s' % hashlib.sha1(
            project_cfg.SerializeToString()).hexdigest()
        for bucket_cfg in project_cfg.buckets:
            bucket = Bucket.get_by_id(bucket_cfg.name)
            if (bucket and bucket.project_id == project_id
                    and bucket.revision == revision):
                continue

            for acl in bucket_cfg.acls:
                if acl.identity and ':' not in acl.identity:
                    acl.identity = 'user:%s' % acl.identity

            @ndb.transactional
            def update_bucket():
                bucket = Bucket.get_by_id(bucket_cfg.name)
                if bucket and bucket.project_id != project_id:
                    # Does bucket.project_id still claim this bucket?
                    if bucket_cfg.name in buckets_of_project.get(
                            bucket.project_id, []):
                        logging.error(
                            'Failed to reserve bucket %s for project %s: '
                            'already reserved by %s', bucket_cfg.name,
                            project_id, bucket.project_id)
                        return
                if (bucket and bucket.project_id == project_id and
                        bucket.revision == revision):  # pragma: no coverage
                    return

                report_reservation = bucket is None or bucket.project_id != project_id
                Bucket(
                    id=bucket_cfg.name,
                    project_id=project_id,
                    revision=revision,
                    config_content=protobuf.text_format.MessageToString(
                        bucket_cfg),
                ).put()
                if report_reservation:
                    logging.warning('Reserved bucket %s for project %s',
                                    bucket_cfg.name, project_id)
                logging.info('Updated bucket %s to revision %s',
                             bucket_cfg.name, revision)

            update_bucket()

    # Delete/unreserve non-existing buckets.
    all_bucket_keys = Bucket.query().fetch(keys_only=True)
    existing_bucket_keys = [
        ndb.Key(Bucket, b) for buckets in buckets_of_project.itervalues()
        for b in buckets
    ]
    to_delete = set(all_bucket_keys).difference(existing_bucket_keys)
    if to_delete:
        logging.warning('Deleting buckets: %s',
                        ', '.join(k.id() for k in to_delete))
        ndb.delete_multi(to_delete)