def InitializeApiClients(cls, http):
     cls._debug_client = apis.GetClientInstance('debug', 'v2', http)
     cls._debug_messages = apis.GetMessagesModule('debug', 'v2')
     cls._resource_client = apis.GetClientInstance('projects', 'v1beta1',
                                                   http)
     cls._resource_messages = apis.GetMessagesModule('projects', 'v1beta1')
Exemplo n.º 2
0
def GetAdminClient():
    """Shortcut to get the latest Bigtable Admin client."""
    return apis.GetClientInstance('bigtableadmin', 'v2')
Exemplo n.º 3
0
 def __init__(self, api_client=None):
   self.api_client = api_client
   if self.api_client is None:
     self.api_client = core_apis.GetClientInstance(API_NAME, API_VERSION)
   self.api_messages = self.api_client.MESSAGES_MODULE
Exemplo n.º 4
0
 def OrganizationsClient(self):
     client = apis.GetClientInstance('cloudresourcemanager', 'v1')
     return client.organizations
Exemplo n.º 5
0
def GetClientInstance():
    return apis.GetClientInstance('servicemanagement', 'v1')
Exemplo n.º 6
0
def GetApiClientInstance():
    return core_apis.GetClientInstance('replicapoolupdater', 'v1beta1')
Exemplo n.º 7
0
def GetClient():
    """Import and return the appropriate storage client."""
    return core_apis.GetClientInstance('storage', 'v1')
Exemplo n.º 8
0
def Get(job):
  client = apis.GetClientInstance('ml', 'v1beta1')
  ref = resources.REGISTRY.Parse(job, collection='ml.projects.jobs')
  req = client.MESSAGES_MODULE.MlProjectsJobsGetRequest(
      projectsId=ref.projectsId, jobsId=ref.jobsId)
  return client.projects_jobs.Get(req)
 def SetApiEndpoint(cls):
     cls._client = apis.GetClientInstance('sourcerepo', 'v1')
Exemplo n.º 10
0
 def SetApiEndpoint(cls, http):
   cls._client = apis.GetClientInstance('source', 'v1', http)
def GetClientInstance():
    return apis.GetClientInstance(DATAFLOW_API_NAME, DATAFLOW_API_VERSION)
Exemplo n.º 12
0
def FetchLogs(log_filter=None,
              log_ids=None,
              order_by='DESC',
              limit=None,
              parent=None):
    """Fetches log entries.

  This method uses Cloud Logging V2 api.
  https://cloud.google.com/logging/docs/api/introduction_v2

  Entries are sorted on the timestamp field, and afterwards filter is applied.
  If limit is passed, returns only up to that many matching entries.

  It is recommended to provide a filter with resource.type, and log_ids.

  If neither log_filter nor log_ids are passed, no filtering is done.

  Args:
    log_filter: filter expression used in the request.
    log_ids: if present, contructs full log names based on parent and filters
      only those logs in addition to filtering with log_filter.
    order_by: the sort order, either DESC or ASC.
    limit: how many entries to return.
    parent: the name of the log's parent resource, e.g. "projects/foo" or
      "organizations/123". Defaults to the current project.

  Returns:
    A generator that returns matching log entries.
    Callers are responsible for handling any http exceptions.
  """
    if parent:
        if not ('projects/' in parent or 'organizations/' in parent):
            raise exceptions.InvalidArgumentException(
                'parent', 'Unknown parent type in parent %s' % parent)
    else:
        parent = 'projects/%s' % properties.VALUES.core.project.Get(
            required=True)
    # The backend has an upper limit of 1000 for page_size.
    # However, there is no need to retrieve more entries if limit is specified.
    page_size = min(limit or 1000, 1000)
    id_filter = _LogFilterForIds(log_ids, parent)
    if id_filter and log_filter:
        combined_filter = '%s AND (%s)' % (id_filter, log_filter)
    else:
        combined_filter = id_filter or log_filter
    if order_by.upper() == 'DESC':
        order_by = 'timestamp desc'
    else:
        order_by = 'timestamp asc'

    client = apis.GetClientInstance('logging', 'v2beta1')
    messages = apis.GetMessagesModule('logging', 'v2beta1')
    request = messages.ListLogEntriesRequest(resourceNames=[parent],
                                             filter=combined_filter,
                                             orderBy=order_by)
    if 'projects/' in parent:
        request.projectIds = [parent[len('projects/'):]]
    return list_pager.YieldFromList(client.entries,
                                    request,
                                    field='entries',
                                    limit=limit,
                                    batch_size=page_size,
                                    batch_size_attribute='pageSize')
Exemplo n.º 13
0
 def SetApiEndpoint(cls):
     cls._client = core_apis.GetClientInstance('bigquery', 'v2')
     cls._messages = core_apis.GetMessagesModule('bigquery', 'v2')
Exemplo n.º 14
0
def OperationsClient():
    return apis.GetClientInstance('cloudresourcemanager',
                                  OPERATIONS_API_VERSION)
Exemplo n.º 15
0
 def client(self):
   return apis.GetClientInstance('deploymentmanager', 'v2')
Exemplo n.º 16
0
def Delete(session):
    client = apis.GetClientInstance('spanner', 'v1')
    msgs = apis.GetMessagesModule('spanner', 'v1')
    req = msgs.SpannerProjectsInstancesDatabasesSessionsDeleteRequest(
        name=session.name)
    return client.projects_instances_databases_sessions.Delete(req)
Exemplo n.º 17
0
 def Filter(self, context, args):
   context['iam-client'] = apis.GetClientInstance('iam', 'v1')
   context['iam-messages'] = apis.GetMessagesModule('iam', 'v1')
   context['iam-resources'] = resources
Exemplo n.º 18
0
def GetClientInstance(no_http=False):
    return apis.GetClientInstance('ml', 'v1beta1', no_http=no_http)
Exemplo n.º 19
0
def GetClientInstance(use_http=True):
    return apis.GetClientInstance(_API_NAME,
                                  _API_VERSION,
                                  no_http=(not use_http))
Exemplo n.º 20
0
def GetClient():
  """Returns the client for the logging API."""
  return core_apis.GetClientInstance('logging', 'v2')
Exemplo n.º 21
0
 def __init__(self, project):
     self.project = project
     self.client = core_apis.GetClientInstance('storage', 'v1')
     self.messages = core_apis.GetMessagesModule('storage', 'v1')
Exemplo n.º 22
0
def GetClientV1():
  """Returns the client for the v1 logging API."""
  return core_apis.GetClientInstance('logging', 'v1beta3')
Exemplo n.º 23
0
    if not result:
        return

    messages = GetMessagesModule()

    RaiseIfResultNotTypeOf(result, messages.Operation)

    result_dict = encoding.MessageToDict(result)

    if not async:
        op_name = result_dict['name']
        log.status.Print(
            'Waiting for async operation {0} to complete...'.format(op_name))
        result_dict = encoding.MessageToDict(
            WaitForOperation(op_name,
                             apis.GetClientInstance('servicemanagement',
                                                    'v1')))

    # Convert metadata startTime to local time
    if 'metadata' in result_dict and 'startTime' in result_dict['metadata']:
        result_dict['metadata']['startTime'] = (
            ConvertUTCDateTimeStringToLocalTimeString(
                result_dict['metadata']['startTime']))

    return result_dict


def RaiseIfResultNotTypeOf(test_object, expected_type, nonetype_ok=False):
    if nonetype_ok and test_object is None:
        return
    if not isinstance(test_object, expected_type):
        raise TypeError('result must be of type %s' % expected_type)
Exemplo n.º 24
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """

        if args.gcs_source_staging_dir is None:
            args.gcs_source_staging_dir = 'gs://{project}_cloudbuild/source'.format(
                project=properties.VALUES.core.project.Get(), )
        if args.gcs_log_dir is None:
            args.gcs_log_dir = 'gs://{project}_cloudbuild/logs'.format(
                project=properties.VALUES.core.project.Get(), )

        client = core_apis.GetClientInstance('cloudbuild', 'v1')
        messages = core_apis.GetMessagesModule('cloudbuild', 'v1')
        registry = self.context['registry']

        gcs_client = storage_api.StorageClient()

        # First, create the build request.
        build_timeout = properties.VALUES.container.build_timeout.Get()
        if build_timeout is not None:
            timeout_str = build_timeout + 's'
        else:
            timeout_str = None

        if args.tag:
            build_config = messages.Build(
                images=[args.tag],
                steps=[
                    messages.BuildStep(
                        name='gcr.io/cloud-builders/docker',
                        args=['build', '-t', args.tag, '.'],
                    ),
                ],
                timeout=timeout_str,
            )
        elif args.config:
            build_config = config.LoadCloudbuildConfig(args.config, messages)

        if build_config.timeout is None:
            build_config.timeout = timeout_str

        suffix = '.tgz'
        if args.source.startswith('gs://') or os.path.isfile(args.source):
            _, suffix = os.path.splitext(args.source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}_{tag_ish}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            tag_ish='_'.join(build_config.images or 'null').replace('/', '_'),
            suffix=suffix,
        )
        gcs_source_staging_dir = registry.Parse(args.gcs_source_staging_dir,
                                                collection='storage.objects')
        gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)
        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object

        gcs_source_staging = registry.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if args.source.startswith('gs://'):
            gcs_source = registry.Parse(args.source,
                                        collection='storage.objects')
            staged_source_obj = gcs_client.Copy(gcs_source, gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(args.source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=args.source))
            if os.path.isdir(args.source):
                source_snapshot = snapshot.Snapshot(args.source)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.write(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.\n'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(args.source):
                unused_root, ext = os.path.splitext(args.source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        _ALLOWED_SOURCE_EXT.join(', '))
                log.status.write('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}]\n'.format(
                                     src=args.source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    storage_util.BucketReference.FromBucketUrl(
                        gcs_source_staging.bucket), args.source,
                    gcs_source_staging.object)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))

        gcs_log_dir = registry.Parse(args.gcs_log_dir,
                                     collection='storage.objects')

        if gcs_log_dir.bucket != gcs_source_staging.bucket:
            # Create the logs bucket if it does not yet exist.
            gcs_client.CreateBucketIfNotExists(gcs_log_dir.bucket)
        build_config.logsBucket = 'gs://' + gcs_log_dir.bucket + '/' + gcs_log_dir.object

        log.debug('creating build: ' + repr(build_config))

        # Start the build.
        op = client.projects_builds.Create(
            messages.CloudbuildProjectsBuildsCreateRequest(
                build=build_config,
                projectId=properties.VALUES.core.project.Get()))
        json = encoding.MessageToJson(op.metadata)
        build = encoding.JsonToMessage(messages.BuildOperationMetadata,
                                       json).build

        build_ref = registry.Create(collection='cloudbuild.projects.builds',
                                    projectId=build.projectId,
                                    id=build.id)

        log.CreatedResource(build_ref)
        if build.logUrl:
            log.status.write(
                'Logs are permanently available at [{log_url}]\n'.format(
                    log_url=build.logUrl))
        else:
            log.status.write('Logs are available in the Cloud Console.\n')

        # If the command is run --async, we just print out a reference to the build.
        if args. async:
            return build

        # Otherwise, logs are streamed from GCS.
        return cb_logs.CloudBuildClient(client, messages).Stream(build_ref)
Exemplo n.º 25
0
def GetApiKeysClientInstance():
    return apis.GetClientInstance('apikeys', 'v1')
Exemplo n.º 26
0
 def __init__(self, client=None, messages=None):
     self.client = client or apis.GetClientInstance('ml', 'v1beta1')
     self.messages = messages or apis.GetMessagesModule('ml', 'v1beta1')
Exemplo n.º 27
0
def GetGenomicsClient(version='v1'):
    return core_apis.GetClientInstance('genomics', version)
Exemplo n.º 28
0
def ExecuteCloudBuild(project, bucket_ref, object_name, output_image):
    """Execute a call to CloudBuild service and wait for it to finish.

  Args:
    project: the cloud project ID.
    bucket_ref: Reference to GCS bucket containing source to build.
    object_name: GCS object name containing source to build.
    output_image: GCR location for the output docker image;
                  eg, gcr.io/test-gae/hardcoded-output-tag.

  Raises:
    BuildFailedError: when the build fails.
  """
    builder = properties.VALUES.app.container_builder_image.Get()
    log.debug('Using builder image: [{0}]'.format(builder))
    logs_bucket = bucket_ref.bucket

    cloud_build_timeout = properties.VALUES.app.cloud_build_timeout.Get()
    if cloud_build_timeout is not None:
        timeout_str = cloud_build_timeout + 's'
    else:
        timeout_str = None

    cloudbuild_client = core_apis.GetClientInstance('cloudbuild', 'v1')
    cloudbuild_messages = core_apis.GetMessagesModule('cloudbuild', 'v1')

    build_op = cloudbuild_client.projects_builds.Create(
        cloudbuild_messages.CloudbuildProjectsBuildsCreateRequest(
            projectId=project,
            build=cloudbuild_messages.Build(
                timeout=timeout_str,
                source=cloudbuild_messages.Source(
                    storageSource=cloudbuild_messages.StorageSource(
                        bucket=bucket_ref.bucket,
                        object=object_name,
                    ), ),
                steps=[
                    cloudbuild_messages.BuildStep(
                        name=builder, args=['build', '-t', output_image, '.'])
                ],
                images=[output_image],
                logsBucket=logs_bucket,
            ),
        ))
    # Find build ID from operation metadata and print the logs URL.
    build_id = None
    logs_uri = None
    if build_op.metadata is not None:
        for prop in build_op.metadata.additionalProperties:
            if prop.key == 'build':
                for build_prop in prop.value.object_value.properties:
                    if build_prop.key == 'id':
                        build_id = build_prop.value.string_value
                        if logs_uri is not None:
                            break
                    if build_prop.key == 'logUrl':
                        logs_uri = build_prop.value.string_value
                        if build_id is not None:
                            break
                break

    if build_id is None:
        raise BuildFailedError('Could not determine build ID')
    log.status.Print(
        'Started cloud build [{build_id}].'.format(build_id=build_id))
    log_object = CLOUDBUILD_LOGFILE_FMT_STRING.format(build_id=build_id)
    log_tailer = cloudbuild_logs.LogTailer(bucket=logs_bucket, obj=log_object)
    log_loc = None
    if logs_uri:
        log.status.Print('To see logs in the Cloud Console: ' + logs_uri)
        log_loc = 'at ' + logs_uri
    else:
        log.status.Print('Logs can be found in the Cloud Console.')
        log_loc = 'in the Cloud Console.'
    op = operations.WaitForOperation(
        operation_service=cloudbuild_client.operations,
        operation=build_op,
        retry_interval=1,
        max_retries=60 * 60,
        retry_callback=log_tailer.Poll)
    # Poll the logs one final time to ensure we have everything. We know this
    # final poll will get the full log contents because GCS is strongly consistent
    # and Container Builder waits for logs to finish pushing before marking the
    # build complete.
    log_tailer.Poll(is_last=True)
    final_status = _GetStatusFromOp(op)
    if final_status != CLOUDBUILD_SUCCESS:
        raise BuildFailedError('Cloud build failed with status ' +
                               final_status + '. Check logs ' + log_loc)
    def Run(self, args):
        project = properties.VALUES.core.project.Get(required=True)
        version = args.version or util.GenerateVersionId()
        use_cloud_build = properties.VALUES.app.use_cloud_build.GetBool()

        config_cleanup = None
        if args.deployables:
            app_config = yaml_parsing.AppConfigSet(args.deployables)
        else:
            if not os.path.exists(DEFAULT_DEPLOYABLE):
                console_io.PromptContinue(
                    'Deployment to Google App Engine requires an app.yaml file. '
                    'This command will run `gcloud preview app gen-config` to generate '
                    'an app.yaml file for you in the current directory (if the current '
                    'directory does not contain an App Engine module, please answer '
                    '"no").',
                    cancel_on_no=True)
                # This generates the app.yaml AND the Dockerfile (and related files).
                params = ext_runtime.Params(deploy=True)
                configurator = fingerprinter.IdentifyDirectory(os.getcwd(),
                                                               params=params)
                if configurator is None:
                    raise NoAppIdentifiedError(
                        'Could not identify an app in the current directory.\n\n'
                        'Please prepare an app.yaml file for your application manually '
                        'and deploy again.')
                config_cleanup = configurator.GenerateConfigs()
                log.status.Print(
                    '\nCreated [{0}] in the current directory.\n'.format(
                        DEFAULT_DEPLOYABLE))
            app_config = yaml_parsing.AppConfigSet([DEFAULT_DEPLOYABLE])

        # If the app has enabled Endpoints API Management features, pass
        # control to the cloud_endpoints handler.
        for _, module in app_config.Modules().items():
            if module and module.parsed and module.parsed.beta_settings:
                bs = module.parsed.beta_settings
                use_endpoints = bs.get('use_endpoints_api_management',
                                       '').lower()
                if (use_endpoints in ('true', '1', 'yes')
                        and bs.get('endpoints_swagger_spec_file')):
                    cloud_endpoints.PushServiceConfig(
                        bs.get('endpoints_swagger_spec_file'), project,
                        apis.GetClientInstance('servicemanagement', 'v1',
                                               self.Http()),
                        apis.GetMessagesModule('servicemanagement', 'v1'))

        remote_build = True
        docker_build_property = properties.VALUES.app.docker_build.Get()
        if args.docker_build:
            remote_build = args.docker_build == 'remote'
        elif docker_build_property:
            remote_build = docker_build_property == 'remote'

        clients = _AppEngineClients(
            appengine_client.AppengineClient(args.server,
                                             args.ignore_bad_certs),
            appengine_api_client.GetApiClient(self.Http(timeout=None)))
        log.debug(
            'API endpoint: [{endpoint}], API version: [{version}]'.format(
                endpoint=clients.api.client.url,
                version=clients.api.api_version))
        cloudbuild_client = apis.GetClientInstance('cloudbuild', 'v1',
                                                   self.Http())
        storage_client = apis.GetClientInstance('storage', 'v1', self.Http())
        promote = properties.VALUES.app.promote_by_default.GetBool()
        deployed_urls = _DisplayProposedDeployment(project, app_config,
                                                   version, promote)
        if args.version or promote:
            # Prompt if there's a chance that you're overwriting something important:
            # If the version is set manually, you could be deploying over something.
            # If you're setting the new deployment to be the default version, you're
            # changing the target of the default URL.
            # Otherwise, all existing URLs will continue to work, so need to prompt.
            console_io.PromptContinue(default=True,
                                      throw_if_unattended=False,
                                      cancel_on_no=True)

        log.status.Print('Beginning deployment...')

        source_contexts = []
        if args.repo_info_file:
            if args.image_url:
                raise NoRepoInfoWithImageUrlError()

            try:
                with open(args.repo_info_file, 'r') as f:
                    source_contexts = json.load(f)
            except (ValueError, IOError) as ex:
                raise RepoInfoLoadError(args.repo_info_file, ex)
            if isinstance(source_contexts, dict):
                # This is an old-style source-context.json file. Convert to a new-
                # style array of extended contexts.
                source_contexts = [
                    context_util.ExtendContextDict(source_contexts)
                ]

        code_bucket_ref = None
        if use_cloud_build or app_config.NonHermeticModules():
            # If using Argo CloudBuild, we'll need to upload source to a GCS bucket.
            code_bucket_ref = self._GetCodeBucket(clients.api, args)
            metrics.CustomTimedEvent(metric_names.GET_CODE_BUCKET)
            log.debug('Using bucket [{b}].'.format(b=code_bucket_ref))

        modules = app_config.Modules()
        if any([m.RequiresImage() for m in modules.values()]):
            deploy_command_util.DoPrepareManagedVms(clients.gae)
        if args.image_url:
            if len(modules) != 1:
                raise MultiDeployError()
            for registry in constants.ALL_SUPPORTED_REGISTRIES:
                if args.image_url.startswith(registry):
                    break
            else:
                raise UnsupportedRegistryError(args.image_url)
            module = modules.keys()[0]
            images = {module: args.image_url}
        else:
            images = deploy_command_util.BuildAndPushDockerImages(
                modules, version, cloudbuild_client, storage_client,
                self.Http(), code_bucket_ref, self.cli, remote_build,
                source_contexts, config_cleanup)

        deployment_manifests = {}
        if app_config.NonHermeticModules():
            if properties.VALUES.app.use_gsutil.GetBool():
                copy_func = deploy_app_command_util.CopyFilesToCodeBucket
                metric_name = metric_names.COPY_APP_FILES
            else:
                copy_func = deploy_app_command_util.CopyFilesToCodeBucketNoGsUtil
                metric_name = metric_names.COPY_APP_FILES_NO_GSUTIL

            deployment_manifests = copy_func(
                app_config.NonHermeticModules().items(), code_bucket_ref,
                source_contexts, storage_client)
            metrics.CustomTimedEvent(metric_name)

        all_services = clients.api.ListServices()
        # Now do deployment.
        for (module, info) in app_config.Modules().iteritems():
            message = 'Updating module [{module}]'.format(module=module)
            with console_io.ProgressTracker(message):
                if args.force:
                    log.warning(
                        'The --force argument is deprecated and no longer '
                        'required. It will be removed in a future release.')

                clients.api.DeployModule(module, version, info,
                                         deployment_manifests.get(module),
                                         images.get(module))
                metrics.CustomTimedEvent(metric_names.DEPLOY_API)

                stop_previous_version = (
                    deploy_command_util.GetStopPreviousVersionFromArgs(args))
                if promote:
                    new_version = version_util.Version(project, module,
                                                       version)
                    _Promote(all_services, new_version, clients,
                             stop_previous_version)
                elif stop_previous_version:
                    log.info(
                        'Not stopping previous version because new version was not '
                        'promoted.')

        # Config files.
        for (c, info) in app_config.Configs().iteritems():
            message = 'Updating config [{config}]'.format(config=c)
            with console_io.ProgressTracker(message):
                clients.gae.UpdateConfig(c, info.parsed)
        return deployed_urls
Exemplo n.º 30
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Yields:
      Some value that we want to have printed later.
    """

    client = core_apis.GetClientInstance('cloudbuild', 'v1')
    messages = core_apis.GetMessagesModule('cloudbuild', 'v1')

    if args.ongoing:
      tz = times.GetTimeZone('UTC')
      now = times.Now(tz)
      now_seconds = times.GetTimeStampFromDateTime(now)

    # We are wrapping list_pager.YieldFromList in another yield loop so that
    # we can use custom exit-early and filtering functionality. This code will
    # be simplified once the cloudbuild service supports server-side filtering.
    #
    # The exit-early is to ensure that, when listing ongoing builds, the command
    # doesn't page through the entire history of terminated builds to find out
    # that there weren't any. The build list will always be delivered in sorted
    # order with createTime descending.
    #
    # The custom filtering checks build.status to see if a build is ongoing or
    # not, and skips those that are terminated.
    #
    # We copy and decrement the limit, because otherwise YieldFromList would
    # not understand when to stop, due to skipping terminated builds.
    #
    # We cannot give YieldFromList a predicate, because it would not know that
    # it needs to stop paging after a certain time threshold - with no ongoing
    # builds it would page through the entire build history.

    limit = args.limit

    for build in list_pager.YieldFromList(
        client.projects_builds,
        messages.CloudbuildProjectsBuildsListRequest(
            pageSize=args.page_size,
            projectId=properties.VALUES.core.project.Get()),
        field='builds',
        batch_size_attribute='pageSize'):
      if args.ongoing:
        tz_create_time = build.createTime
        create_time = times.ParseDateTime(tz_create_time, tz)
        create_seconds = times.GetTimeStampFromDateTime(create_time)
        delta_seconds = now_seconds - create_seconds
        if delta_seconds > _ONGOING_THRESHOLD_SECONDS:
          break
        if build.status not in [
            messages.Build.StatusValueValuesEnum.QUEUED,
            messages.Build.StatusValueValuesEnum.WORKING]:
          continue
      yield build
      limit -= 1
      if limit == 0:
        break