Пример #1
0
def update_images(namespace_name, repo_name):
    permission = ModifyRepositoryPermission(namespace_name, repo_name)
    if permission.can():
        logger.debug("Looking up repository")
        repository_ref = registry_model.lookup_repository(namespace_name,
                                                          repo_name,
                                                          kind_filter="image")
        if repository_ref is None:
            # Make sure the repo actually exists.
            image_pushes.labels("v1", 404, "").inc()
            abort(404, message="Unknown repository", issue="unknown-repo")

        builder = lookup_manifest_builder(repository_ref,
                                          session.get("manifest_builder"),
                                          storage, docker_v2_signing_key)
        if builder is None:
            image_pushes.labels("v1", 400, "").inc()
            abort(400)

        # Generate a job for each notification that has been added to this repo
        logger.debug("Adding notifications for repository")
        event_data = {
            "updated_tags": [tag.name for tag in builder.committed_tags],
        }

        builder.done()

        track_and_log("push_repo", repository_ref)
        spawn_notification(repository_ref, "repo_push", event_data)
        image_pushes.labels("v1", 204, "").inc()
        return make_response("Updated", 204)

    image_pushes.labels("v1", 403, "").inc()
    abort(403)
Пример #2
0
def emit_log(mirror,
             log_kind,
             verb,
             message,
             tag=None,
             tags=None,
             stdout=None,
             stderr=None):
    logs_model.log_action(
        log_kind,
        namespace_name=mirror.repository.namespace_user.username,
        repository_name=mirror.repository.name,
        metadata={
            "verb": verb,
            "namespace": mirror.repository.namespace_user.username,
            "repo": mirror.repository.name,
            "message": message,
            "tag": tag,
            "tags": tags,
            "stdout": stdout,
            "stderr": stderr,
        },
    )

    if log_kind in (
            "repo_mirror_sync_started",
            "repo_mirror_sync_failed",
            "repo_mirror_sync_success",
    ):
        spawn_notification(wrap_repository(mirror.repository), log_kind,
                           {"message": message})
Пример #3
0
def _write_manifest_and_log(namespace_name, repo_name, tag_name, manifest_impl):
    repository_ref, manifest, tag = _write_manifest(
        namespace_name, repo_name, tag_name, manifest_impl
    )

    # Queue all blob manifests for replication.
    if features.STORAGE_REPLICATION:
        blobs = registry_model.get_manifest_local_blobs(manifest)
        if blobs is None:
            logger.error("Could not lookup blobs for manifest `%s`", manifest.digest)
        else:
            with queue_replication_batch(namespace_name) as queue_storage_replication:
                for blob_digest in blobs:
                    queue_storage_replication(blob_digest)

    track_and_log("push_repo", repository_ref, tag=tag_name)
    spawn_notification(repository_ref, "repo_push", {"updated_tags": [tag_name]})
    metric_queue.repository_push.Inc(labelvalues=[namespace_name, repo_name, "v2", True])

    return Response(
        "OK",
        status=202,
        headers={
            "Docker-Content-Digest": manifest.digest,
            "Location": url_for(
                "v2.fetch_manifest_by_digest",
                repository="%s/%s" % (namespace_name, repo_name),
                manifest_ref=manifest.digest,
            ),
        },
    )
Пример #4
0
def _write_manifest_and_log(namespace_name, repo_name, tag_name,
                            manifest_impl):
    repository_ref, manifest, tag = _write_manifest(namespace_name, repo_name,
                                                    tag_name, manifest_impl)

    # Queue all blob manifests for replication.
    if features.STORAGE_REPLICATION:
        blobs = registry_model.get_manifest_local_blobs(manifest)
        if blobs is None:
            logger.error('Could not lookup blobs for manifest `%s`',
                         manifest.digest)
        else:
            with queue_replication_batch(
                    namespace_name) as queue_storage_replication:
                for blob_digest in blobs:
                    queue_storage_replication(blob_digest)

    track_and_log('push_repo', repository_ref, tag=tag_name)
    spawn_notification(repository_ref, 'repo_push',
                       {'updated_tags': [tag_name]})
    metric_queue.repository_push.Inc(
        labelvalues=[namespace_name, repo_name, 'v2', True])

    return Response(
        'OK',
        status=202,
        headers={
            'Docker-Content-Digest':
            manifest.digest,
            'Location':
            url_for('v2.fetch_manifest_by_digest',
                    repository='%s/%s' % (namespace_name, repo_name),
                    manifest_ref=manifest.digest),
        },
    )
Пример #5
0
def update_images(namespace_name, repo_name):
    permission = ModifyRepositoryPermission(namespace_name, repo_name)

    if permission.can():
        logger.debug('Looking up repository')
        repository_ref = registry_model.lookup_repository(namespace_name,
                                                          repo_name,
                                                          kind_filter='image')
        if repository_ref is None:
            # Make sure the repo actually exists.
            abort(404, message='Unknown repository', issue='unknown-repo')

        builder = lookup_manifest_builder(repository_ref,
                                          session.get('manifest_builder'),
                                          storage, docker_v2_signing_key)
        if builder is None:
            abort(400)

        # Generate a job for each notification that has been added to this repo
        logger.debug('Adding notifications for repository')
        event_data = {
            'updated_tags': [tag.name for tag in builder.committed_tags],
        }

        builder.done()

        track_and_log('push_repo', repository_ref)
        spawn_notification(repository_ref, 'repo_push', event_data)
        metric_queue.repository_push.Inc(
            labelvalues=[namespace_name, repo_name, 'v1', True])
        return make_response('Updated', 204)

    abort(403)
Пример #6
0
    def send_notification(self,
                          kind,
                          error_message=None,
                          image_id=None,
                          manifest_digests=None):
        with UseThenDisconnect(app.config):
            tags = self.build_config.get("docker_tags", ["latest"])
            trigger = self.repo_build.trigger
            if trigger is not None and trigger.id is not None:
                trigger_kind = trigger.service.name
            else:
                trigger_kind = None

            event_data = {
                "build_id": self.repo_build.uuid,
                "build_name": self.repo_build.display_name,
                "docker_tags": tags,
                "trigger_id": trigger.uuid if trigger is not None else None,
                "trigger_kind": trigger_kind,
                "trigger_metadata":
                self.build_config.get("trigger_metadata", {}),
            }

            if image_id is not None:
                event_data["image_id"] = image_id

            if manifest_digests:
                event_data["manifest_digests"] = manifest_digests

            if error_message is not None:
                event_data["error_message"] = error_message

            # TODO: remove when more endpoints have been converted to using
            # interfaces
            repo = AttrDict({
                "namespace_name":
                self.repo_build.repository.namespace_user.username,
                "name":
                self.repo_build.repository.name,
            })
            spawn_notification(
                repo,
                kind,
                event_data,
                subpage="build/%s" % self.repo_build.uuid,
                pathargs=["build", self.repo_build.uuid],
            )
Пример #7
0
    def send_notification(self,
                          kind,
                          error_message=None,
                          image_id=None,
                          manifest_digests=None):
        with UseThenDisconnect(app.config):
            tags = self.build_config.get('docker_tags', ['latest'])
            trigger = self.repo_build.trigger
            if trigger is not None and trigger.id is not None:
                trigger_kind = trigger.service.name
            else:
                trigger_kind = None

            event_data = {
                'build_id': self.repo_build.uuid,
                'build_name': self.repo_build.display_name,
                'docker_tags': tags,
                'trigger_id': trigger.uuid if trigger is not None else None,
                'trigger_kind': trigger_kind,
                'trigger_metadata':
                self.build_config.get('trigger_metadata', {})
            }

            if image_id is not None:
                event_data['image_id'] = image_id

            if manifest_digests:
                event_data['manifest_digests'] = manifest_digests

            if error_message is not None:
                event_data['error_message'] = error_message

            # TODO: remove when more endpoints have been converted to using
            # interfaces
            repo = AttrDict({
                'namespace_name':
                self.repo_build.repository.namespace_user.username,
                'name':
                self.repo_build.repository.name,
            })
            spawn_notification(repo,
                               kind,
                               event_data,
                               subpage='build/%s' % self.repo_build.uuid,
                               pathargs=['build', self.repo_build.uuid])
Пример #8
0
    def _analyze(self, layer, force_parents=False):
        """ Analyzes a single layer.

        Return a tuple of two bools:
          - The first one tells us if we should evaluate its children.
          - The second one is set to False when another worker pre-empted the candidate's analysis
            for us.
    """
        # If the parent couldn't be analyzed with the target version or higher, we can't analyze
        # this image. Mark it as failed with the current target version.
        if not force_parents and (
                layer.parent_id and not layer.parent.security_indexed and
                layer.parent.security_indexed_engine >= self._target_version):
            if not set_secscan_status(layer, False, self._target_version):
                raise PreemptedException

            # Nothing more to do.
            return

        # Make sure the image's storage is not marked as uploading. If so, nothing more to do.
        if layer.storage.uploading:
            if not set_secscan_status(layer, False, self._target_version):
                raise PreemptedException

            # Nothing more to do.
            return

        # Analyze the image.
        previously_security_indexed_successfully = layer.security_indexed
        previous_security_indexed_engine = layer.security_indexed_engine

        logger.info('Analyzing layer %s', layer.docker_image_id)
        analyzed_version = self._api.analyze_layer(layer)

        logger.info('Analyzed layer %s successfully with version %s',
                    layer.docker_image_id, analyzed_version)

        # Mark the image as analyzed.
        if not set_secscan_status(layer, True, analyzed_version):
            # If the image was previously successfully marked as resolved, then set_secscan_status
            # might return False because we're not changing it (since this is a fixup).
            if not previously_security_indexed_successfully:
                raise PreemptedException

        # If we are the one who've done the job successfully first, then we need to decide if we should
        # send notifications. Notifications are sent if:
        #  1) This is a new layer
        #  2) This is an existing layer that previously did not index properly
        # We don't always send notifications as if we are re-indexing a successful layer for a newer
        # feature set in the security scanner, notifications will be spammy.
        is_new_image = previous_security_indexed_engine == IMAGE_NOT_SCANNED_ENGINE_VERSION
        is_existing_image_unindexed = not is_new_image and not previously_security_indexed_successfully
        if (features.SECURITY_NOTIFICATIONS
                and (is_new_image or is_existing_image_unindexed)):
            # Get the tags of the layer we analyzed.
            repository_map = defaultdict(list)
            event = ExternalNotificationEvent.get(name='vulnerability_found')
            matching = list(
                filter_tags_have_repository_event(get_tags_for_image(layer.id),
                                                  event))

            for tag in matching:
                repository_map[tag.repository_id].append(tag)

            # If there is at least one tag,
            # Lookup the vulnerabilities for the image, now that it is analyzed.
            if len(repository_map) > 0:
                logger.debug('Loading data for layer %s', layer.id)
                try:
                    layer_data = self._api.get_layer_data(
                        layer, include_vulnerabilities=True)
                except APIRequestFailure:
                    raise

                if layer_data is not None:
                    # Dispatch events for any detected vulnerabilities
                    logger.debug('Got data for layer %s: %s', layer.id,
                                 layer_data)
                    found_features = layer_data['Layer'].get('Features', [])
                    for repository_id in repository_map:
                        tags = repository_map[repository_id]
                        vulnerabilities = dict()

                        # Collect all the vulnerabilities found for the layer under each repository and send
                        # as a batch notification.
                        for feature in found_features:
                            if 'Vulnerabilities' not in feature:
                                continue

                            for vulnerability in feature.get(
                                    'Vulnerabilities', []):
                                vuln_data = {
                                    'id':
                                    vulnerability['Name'],
                                    'description':
                                    vulnerability.get('Description', None),
                                    'link':
                                    vulnerability.get('Link', None),
                                    'has_fix':
                                    'FixedBy' in vulnerability,

                                    # TODO: Change this key name if/when we change the event format.
                                    'priority':
                                    vulnerability.get('Severity', 'Unknown'),
                                }

                                vulnerabilities[
                                    vulnerability['Name']] = vuln_data

                        # TODO: remove when more endpoints have been converted to using
                        # interfaces
                        repository = AttrDict({
                            'namespace_name':
                            tags[0].repository.namespace_user.username,
                            'name':
                            tags[0].repository.name,
                        })

                        repo_vulnerabilities = list(vulnerabilities.values())
                        if not repo_vulnerabilities:
                            continue

                        priority_key = lambda v: PRIORITY_LEVELS.get(
                            v['priority'], {}).get('index', 100)
                        repo_vulnerabilities.sort(key=priority_key)

                        event_data = {
                            'tags': [tag.name for tag in tags],
                            'vulnerabilities': repo_vulnerabilities,
                            'vulnerability': repo_vulnerabilities[
                                0],  # For back-compat with existing events.
                        }

                        spawn_notification(repository, 'vulnerability_found',
                                           event_data)
Пример #9
0
def start_build(repository, prepared_build, pull_robot_name=None):
    # Ensure that builds are only run in image repositories.
    if repository.kind.name != "image":
        raise Exception(
            "Attempt to start a build for application repository %s" %
            repository.id)

    # Ensure the repository isn't in mirror or read-only mode.
    if repository.state != RepositoryState.NORMAL:
        raise Exception(
            ("Attempt to start a build for a non-normal repository: %s %s" %
             (repository.id, repository.state)))

    # Ensure that disabled triggers are not run.
    if prepared_build.trigger is not None and not prepared_build.trigger.enabled:
        raise BuildTriggerDisabledException

    if repository.namespace_user.maximum_queued_builds_count is not None:
        queue_item_canonical_name = [repository.namespace_user.username]
        alive_builds = dockerfile_build_queue.num_alive_jobs(
            queue_item_canonical_name)
        if alive_builds >= repository.namespace_user.maximum_queued_builds_count:
            logger.debug(
                "Prevented queueing of build under namespace %s due to reaching max: %s",
                repository.namespace_user.username,
                repository.namespace_user.maximum_queued_builds_count,
            )
            raise MaximumBuildsQueuedException()

    host = app.config["SERVER_HOSTNAME"]
    repo_path = "%s/%s/%s" % (host, repository.namespace_user.username,
                              repository.name)

    new_token = model.token.create_access_token(
        repository,
        "write",
        kind="build-worker",
        friendly_name="Repository Build Token")
    logger.debug(
        "Creating build %s with repo %s tags %s",
        prepared_build.build_name,
        repo_path,
        prepared_build.tags,
    )

    job_config = {
        "docker_tags":
        prepared_build.tags,
        "registry":
        host,
        "build_subdir":
        prepared_build.subdirectory,
        "context":
        prepared_build.context,
        "trigger_metadata":
        prepared_build.metadata or {},
        "is_manual":
        prepared_build.is_manual,
        "manual_user":
        get_authenticated_user().username
        if get_authenticated_user() else None,
        "archive_url":
        prepared_build.archive_url,
    }

    with app.config["DB_TRANSACTION_FACTORY"](db):
        build_request = model.build.create_repository_build(
            repository,
            new_token,
            job_config,
            prepared_build.dockerfile_id,
            prepared_build.build_name,
            prepared_build.trigger,
            pull_robot_name=pull_robot_name,
        )

        pull_creds = model.user.get_pull_credentials(
            pull_robot_name) if pull_robot_name else None

        json_data = json.dumps({
            "build_uuid": build_request.uuid,
            "pull_credentials": pull_creds
        })

        queue_id = dockerfile_build_queue.put(
            [repository.namespace_user.username, repository.name],
            json_data,
            retries_remaining=3)

        build_request.queue_id = queue_id
        build_request.save()

    # Add the build to the repo's log and spawn the build_queued notification.
    event_log_metadata = {
        "build_id":
        build_request.uuid,
        "docker_tags":
        prepared_build.tags,
        "repo":
        repository.name,
        "namespace":
        repository.namespace_user.username,
        "is_manual":
        prepared_build.is_manual,
        "manual_user":
        get_authenticated_user().username
        if get_authenticated_user() else None,
    }

    if prepared_build.trigger:
        event_log_metadata["trigger_id"] = prepared_build.trigger.uuid
        event_log_metadata[
            "trigger_kind"] = prepared_build.trigger.service.name
        event_log_metadata["trigger_metadata"] = prepared_build.metadata or {}

    logs_model.log_action(
        "build_dockerfile",
        repository.namespace_user.username,
        ip=get_request_ip(),
        metadata=event_log_metadata,
        repository=repository,
    )

    # TODO: remove when more endpoints have been converted to using interfaces
    repo = AttrDict({
        "namespace_name": repository.namespace_user.username,
        "name": repository.name,
    })

    spawn_notification(
        repo,
        "build_queued",
        event_log_metadata,
        subpage="build/%s" % build_request.uuid,
        pathargs=["build", build_request.uuid],
    )

    return build_request
Пример #10
0
def start_build(repository, prepared_build, pull_robot_name=None):
    # Ensure that builds are only run in image repositories.
    if repository.kind.name != 'image':
        raise Exception(
            'Attempt to start a build for application repository %s' %
            repository.id)

    # Ensure the repository isn't in mirror or read-only mode.
    if repository.state != RepositoryState.NORMAL:
        raise Exception(
            ('Attempt to start a build for a non-normal repository: %s %s' %
             (repository.id, repository.state)))

    # Ensure that disabled triggers are not run.
    if prepared_build.trigger is not None and not prepared_build.trigger.enabled:
        raise BuildTriggerDisabledException

    if repository.namespace_user.maximum_queued_builds_count is not None:
        queue_item_canonical_name = [repository.namespace_user.username]
        alive_builds = dockerfile_build_queue.num_alive_jobs(
            queue_item_canonical_name)
        if alive_builds >= repository.namespace_user.maximum_queued_builds_count:
            logger.debug(
                'Prevented queueing of build under namespace %s due to reaching max: %s',
                repository.namespace_user.username,
                repository.namespace_user.maximum_queued_builds_count)
            raise MaximumBuildsQueuedException()

    host = app.config['SERVER_HOSTNAME']
    repo_path = '%s/%s/%s' % (host, repository.namespace_user.username,
                              repository.name)

    new_token = model.token.create_access_token(
        repository,
        'write',
        kind='build-worker',
        friendly_name='Repository Build Token')
    logger.debug('Creating build %s with repo %s tags %s',
                 prepared_build.build_name, repo_path, prepared_build.tags)

    job_config = {
        'docker_tags':
        prepared_build.tags,
        'registry':
        host,
        'build_subdir':
        prepared_build.subdirectory,
        'context':
        prepared_build.context,
        'trigger_metadata':
        prepared_build.metadata or {},
        'is_manual':
        prepared_build.is_manual,
        'manual_user':
        get_authenticated_user().username
        if get_authenticated_user() else None,
        'archive_url':
        prepared_build.archive_url
    }

    with app.config['DB_TRANSACTION_FACTORY'](db):
        build_request = model.build.create_repository_build(
            repository,
            new_token,
            job_config,
            prepared_build.dockerfile_id,
            prepared_build.build_name,
            prepared_build.trigger,
            pull_robot_name=pull_robot_name)

        pull_creds = model.user.get_pull_credentials(
            pull_robot_name) if pull_robot_name else None

        json_data = json.dumps({
            'build_uuid': build_request.uuid,
            'pull_credentials': pull_creds
        })

        queue_id = dockerfile_build_queue.put(
            [repository.namespace_user.username, repository.name],
            json_data,
            retries_remaining=3)

        build_request.queue_id = queue_id
        build_request.save()

    # Add the queueing of the build to the metrics queue.
    metric_queue.repository_build_queued.Inc(
        labelvalues=[repository.namespace_user.username, repository.name])

    # Add the build to the repo's log and spawn the build_queued notification.
    event_log_metadata = {
        'build_id':
        build_request.uuid,
        'docker_tags':
        prepared_build.tags,
        'repo':
        repository.name,
        'namespace':
        repository.namespace_user.username,
        'is_manual':
        prepared_build.is_manual,
        'manual_user':
        get_authenticated_user().username if get_authenticated_user() else None
    }

    if prepared_build.trigger:
        event_log_metadata['trigger_id'] = prepared_build.trigger.uuid
        event_log_metadata[
            'trigger_kind'] = prepared_build.trigger.service.name
        event_log_metadata['trigger_metadata'] = prepared_build.metadata or {}

    logs_model.log_action('build_dockerfile',
                          repository.namespace_user.username,
                          ip=get_request_ip(),
                          metadata=event_log_metadata,
                          repository=repository)

    # TODO: remove when more endpoints have been converted to using interfaces
    repo = AttrDict({
        'namespace_name': repository.namespace_user.username,
        'name': repository.name,
    })

    spawn_notification(repo,
                       'build_queued',
                       event_log_metadata,
                       subpage='build/%s' % build_request.uuid,
                       pathargs=['build', build_request.uuid])

    return build_request