def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. Raises: FailedBuildException: If the build is completed and not 'SUCCESS'. """ project = properties.VALUES.core.project.Get(required=True) safe_project = project.replace(':', '_') safe_project = safe_project.replace('.', '_') # The string 'google' is not allowed in bucket names. safe_project = safe_project.replace('google', 'elgoog') default_bucket_name = '{}_cloudbuild'.format(safe_project) default_gcs_source = False if args.gcs_source_staging_dir is None: default_gcs_source = True args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name) client = cloudbuild_util.GetClientInstance() messages = cloudbuild_util.GetMessagesModule() gcs_client = storage_api.StorageClient() # First, create the build request. build_timeout = properties.VALUES.container.build_timeout.Get() if build_timeout is not None: try: # A bare number is interpreted as seconds. build_timeout_secs = int(build_timeout) except ValueError: build_timeout_duration = times.ParseDuration(build_timeout) build_timeout_secs = int(build_timeout_duration.total_seconds) timeout_str = str(build_timeout_secs) + 's' else: timeout_str = None if args.tag: if (properties.VALUES.container.build_check_tag.GetBool() and 'gcr.io/' not in args.tag): raise c_exceptions.InvalidArgumentException( '--tag', 'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.') build_config = messages.Build( images=[args.tag], steps=[ messages.BuildStep( name='gcr.io/cloud-builders/docker', args=['build', '--no-cache', '-t', args.tag, '.'], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions(args.substitutions, messages) ) elif args.config: build_config = config.LoadCloudbuildConfigFromPath( args.config, messages, params=args.substitutions) # If timeout was set by flag, overwrite the config file. if timeout_str: build_config.timeout = timeout_str # --no-source overrides the default --source. if not args.IsSpecified('source') and args.no_source: args.source = None gcs_source_staging = None if args.source: suffix = '.tgz' if args.source.startswith('gs://') or os.path.isfile(args.source): _, suffix = os.path.splitext(args.source) # Next, stage the source to Cloud Storage. staged_object = '{stamp}-{uuid}{suffix}'.format( stamp=times.GetTimeStampFromDateTime(times.Now()), uuid=uuid.uuid4().hex, suffix=suffix, ) gcs_source_staging_dir = resources.REGISTRY.Parse( args.gcs_source_staging_dir, collection='storage.objects') # We create the bucket (if it does not exist) first. If we do an existence # check and then create the bucket ourselves, it would be possible for an # attacker to get lucky and beat us to creating the bucket. Block on this # creation to avoid this race condition. gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket) # If no bucket is specified (for the source `default_gcs_source`), check # that the default bucket is also owned by the project (b/33046325). if default_gcs_source: # This request returns only the buckets owned by the project. bucket_list_req = gcs_client.messages.StorageBucketsListRequest( project=project, prefix=default_bucket_name) bucket_list = gcs_client.client.buckets.List(bucket_list_req) found_bucket = False for bucket in bucket_list.items: if bucket.id == default_bucket_name: found_bucket = True break if not found_bucket: if default_gcs_source: raise c_exceptions.RequiredArgumentException( 'gcs_source_staging_dir', 'A bucket with name {} already exists and is owned by ' 'another project. Specify a bucket using ' '--gcs_source_staging_dir.'.format(default_bucket_name)) if gcs_source_staging_dir.object: staged_object = gcs_source_staging_dir.object + '/' + staged_object gcs_source_staging = resources.REGISTRY.Create( collection='storage.objects', bucket=gcs_source_staging_dir.bucket, object=staged_object) if args.source.startswith('gs://'): gcs_source = resources.REGISTRY.Parse( args.source, collection='storage.objects') staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) else: if not os.path.exists(args.source): raise c_exceptions.BadFileException( 'could not find source [{src}]'.format(src=args.source)) if os.path.isdir(args.source): source_snapshot = snapshot.Snapshot(args.source) size_str = resource_transform.TransformSize( source_snapshot.uncompressed_size) log.status.Print( 'Creating temporary tarball archive of {num_files} file(s)' ' totalling {size} before compression.'.format( num_files=len(source_snapshot.files), size=size_str)) staged_source_obj = source_snapshot.CopyTarballToGCS( gcs_client, gcs_source_staging) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) elif os.path.isfile(args.source): unused_root, ext = os.path.splitext(args.source) if ext not in _ALLOWED_SOURCE_EXT: raise c_exceptions.BadFileException( 'Local file [{src}] is none of '+', '.join(_ALLOWED_SOURCE_EXT)) log.status.Print( 'Uploading local file [{src}] to ' '[gs://{bucket}/{object}].'.format( src=args.source, bucket=gcs_source_staging.bucket, object=gcs_source_staging.object, )) staged_source_obj = gcs_client.CopyFileToGCS( storage_util.BucketReference.FromBucketUrl( gcs_source_staging.bucket), args.source, gcs_source_staging.object) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) else: # No source if not args.no_source: raise c_exceptions.InvalidArgumentException( '--no-source', 'To omit source, use the --no-source flag.') if args.gcs_log_dir: gcs_log_dir = resources.REGISTRY.Parse( args.gcs_log_dir, collection='storage.objects') build_config.logsBucket = ( 'gs://'+gcs_log_dir.bucket+'/'+gcs_log_dir.object) # Machine type. if args.machine_type is not None: machine_type = Submit._machine_type_flag_map.GetEnumForChoice( args.machine_type) if not build_config.options: build_config.options = messages.BuildOptions() build_config.options.machineType = machine_type # Disk size. if args.disk_size is not None: disk_size = compute_utils.BytesToGb(args.disk_size) if not build_config.options: build_config.options = messages.BuildOptions() build_config.options.diskSizeGb = int(disk_size) log.debug('submitting build: '+repr(build_config)) # Start the build. op = client.projects_builds.Create( messages.CloudbuildProjectsBuildsCreateRequest( build=build_config, projectId=properties.VALUES.core.project.Get())) json = encoding.MessageToJson(op.metadata) build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build build_ref = resources.REGISTRY.Create( collection='cloudbuild.projects.builds', projectId=build.projectId, id=build.id) log.CreatedResource(build_ref) if build.logUrl: log.status.Print('Logs are available at [{log_url}].'.format( log_url=build.logUrl)) else: log.status.Print('Logs are available in the Cloud Console.') # If the command is run --async, we just print out a reference to the build. if args.async: return build mash_handler = execution.MashHandler( execution.GetCancelBuildHandler(client, messages, build_ref)) # Otherwise, logs are streamed from GCS. with execution_utils.CtrlCSection(mash_handler): build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref) if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT: log.status.Print( 'Your build timed out. Use the [--timeout=DURATION] flag to change ' 'the timeout threshold.') if build.status != messages.Build.StatusValueValuesEnum.SUCCESS: raise FailedBuildException(build) return build
def CreatePRPreviewBuildTrigger( messages, name, description, build_timeout, github_repo_owner, github_repo_name, pr_pattern, preview_expiry_days, comment_control, image, dockerfile_path, app_name, config_path, expose_port, gcs_config_staging_path, cluster, location, build_tags, build_trigger_tags ): """Creates the Cloud BuildTrigger config that deploys an application when triggered by a PR create/update. Args: messages: Cloud Build messages module. This is the value returned from cloudbuild_util.GetMessagesModule(). name: Trigger name, which must be unique amongst all triggers in a project. description: Trigger description. build_timeout: An optional maximum time a triggered build is run before it times out. For example, "2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you do not specify a unit, seconds is assumed. If this value is None, a timeout is not set. github_repo_owner: A GitHub repo owner to be used in the trigger's github field. github_repo_name: A GitHub repo name to be used in the trigger's github field. pr_pattern: A regex value that is the base branch that the PR is targeting, which triggers the creation of the PR preview deployment. preview_expiry_days: How long a deployed preview application can exist before it is expired, in days, that is set to a substitution variable. comment_control: Whether or not a user must comment /gcbrun to trigger the deployment build. image: The image that will be built and deployed. The image can include a tag or digest. dockerfile_path: An optional path to the source repository's Dockerfile, relative to the source repository's root directory that is set to a substitution variable. If this value is not provided, 'Dockerfile' is used. app_name: An optional app name that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. config_path: An optional path to the source repository's Kubernetes configs, relative to the source repository's root directory that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. expose_port: An optional port that the deployed application listens to that is set to a substitution variable. If this value is None, the substitution variable is set to 0 to indicate its absence. gcs_config_staging_path: An optional path to a GCS subdirectory to copy application configs that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. cluster: The name of the target cluster to deploy to that is set to a substitution variable. location: The zone/region of the target cluster to deploy to that is set to a substitution variable. build_tags: Tags to append to build tags in addition to default tags. build_trigger_tags: Tags to append to build trigger tags in addition to default tags. Returns: messages.BuildTrigger, the Cloud BuildTrigger config. """ substitutions = _BaseBuildSubstitutionsDict(dockerfile_path, app_name, config_path, expose_port, cluster, location, gcs_config_staging_path) substitutions[_PREVIEW_EXPIRY_SUB_VAR] = six.text_type(preview_expiry_days) build = messages.Build( steps=[ _BuildBuildStep(messages, image), _PushBuildStep(messages, image), messages.BuildStep( id=_PREPARE_DEPLOY_BUILD_STEP_ID, name=_GKE_DEPLOY_PROD, entrypoint='sh', args=[ '-c', _PREPARE_PREVIEW_DEPLOY_SCRIPT.format( image=image, cluster=_GKE_CLUSTER_SUB_VAR, location=_GKE_LOCATION_SUB_VAR, k8s_yaml_path=_K8S_YAML_PATH_SUB_VAR, app_name=_APP_NAME_SUB_VAR, k8s_annotations=_K8S_ANNOTATIONS_SUB_VAR, expose_port=_EXPOSE_PORT_SUB_VAR, ) ] ), _SaveConfigsBuildStep(messages), messages.BuildStep( id=_APPLY_DEPLOY_BUILD_STEP_ID, name=_GKE_DEPLOY_PROD, entrypoint='sh', args=[ '-c', _APPLY_PREVIEW_DEPLOY_SCRIPT ] ), messages.BuildStep( id=_ANNOTATE_PREVIEW_NAMESPACE_BUILD_STEP_ID, name='gcr.io/cloud-builders/kubectl', entrypoint='sh', args=[ '-c', _ANNOTATE_PREVIEW_NAMESPACE_SCRIPT ] ) ], substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages), options=messages.BuildOptions( substitutionOption=messages.BuildOptions .SubstitutionOptionValueValuesEnum.ALLOW_LOOSE ), images=[image], artifacts=messages.Artifacts( objects=messages.ArtifactObjects( location='gs://' + _EXPANDED_CONFIGS_PATH_DYNAMIC, paths=['output/expanded/*'] ) ) ) if build_timeout is not None: try: # A bare number is interpreted as seconds. build_timeout_secs = int(build_timeout) except ValueError: build_timeout_duration = times.ParseDuration(build_timeout) build_timeout_secs = int(build_timeout_duration.total_seconds) build.timeout = six.text_type(build_timeout_secs) + 's' build.tags = _DEFAULT_PR_PREVIEW_TAGS[:] if build_tags: for tag in build_tags: build.tags.append(tag) github_config = messages.GitHubEventsConfig( owner=github_repo_owner, name=github_repo_name, pullRequest=messages.PullRequestFilter( branch=pr_pattern ) ) if comment_control: github_config.pullRequest.commentControl = messages.PullRequestFilter.CommentControlValueValuesEnum.COMMENTS_ENABLED build_trigger = messages.BuildTrigger( name=name, description=description, build=build, github=github_config, substitutions=cloudbuild_util.EncodeTriggerSubstitutions( substitutions, messages) ) build_trigger.tags = _DEFAULT_PR_PREVIEW_TAGS[:] if build_trigger_tags: for tag in build_trigger_tags: build_trigger.tags.append(tag) return build_trigger
def CreateCleanPreviewBuildTrigger(messages, name, description, github_repo_owner, github_repo_name, cluster, location, build_tags, build_trigger_tags): """Creates the Cloud BuildTrigger config that deletes expired preview deployments. Args: messages: Cloud Build messages module. This is the value returned from cloudbuild_util.GetMessagesModule(). name: Trigger name, which must be unique amongst all triggers in a project. description: Trigger description. github_repo_owner: A GitHub repo owner to be used in the trigger's github field. github_repo_name: A GitHub repo name to be used in the trigger's github field. cluster: The name of the target cluster to check for expired deployments that is set to a substitution variable. location: The zone/region of the target cluster to check for the expired deployments that is set to a substitution variable. build_tags: Tags to append to build tags in addition to default tags. build_trigger_tags: Tags to append to build trigger tags in addition to default tags. Returns: messages.BuildTrigger, the Cloud BuildTrigger config. """ substitutions = { _GKE_CLUSTER_SUB_VAR: cluster, _GKE_LOCATION_SUB_VAR: location, } build_trigger = messages.BuildTrigger( name=name, description=description, github=messages.GitHubEventsConfig( owner=github_repo_owner, name=github_repo_name, push=messages.PushFilter( branch='$manual-only^', ) ), build=messages.Build( steps=[ messages.BuildStep( id=_CLEANUP_PREVIEW_BUILD_STEP_ID, name='gcr.io/cloud-builders/kubectl', entrypoint='bash', args=[ '-c', _CLEANUP_PREVIEW_SCRIPT ] ) ], substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages), timeout='600s' ), substitutions=cloudbuild_util.EncodeTriggerSubstitutions( substitutions, messages) ) build_trigger.build.tags = _DEFAULT_CLEAN_PREVIEW_TAGS[:] if build_tags: for tag in build_tags: build_trigger.build.tags.append(tag) build_trigger.tags = _DEFAULT_CLEAN_PREVIEW_TAGS[:] if build_trigger_tags: for tag in build_trigger_tags: build_trigger.tags.append(tag) return build_trigger
def CreateBuild( messages, build_timeout, build_and_push, staged_source, image, dockerfile_path, app_name, app_version, config_path, namespace, expose_port, gcs_config_staging_path, cluster, location, build_tags ): """Creates the Cloud Build config to run. Args: messages: Cloud Build messages module. This is the value returned from cloudbuild_util.GetMessagesModule(). build_timeout: An optional maximum time a build is run before it times out. For example, "2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you do not specify a unit, seconds is assumed. If this value is None, a timeout is not set. build_and_push: If True, the created build will have Build and Push steps. staged_source: An optional GCS object for a staged source repository. The object must have bucket, name, and generation fields. If this value is None, the created build will not have a source. image: The image that will be deployed and optionally built beforehand. The image can include a tag or digest. dockerfile_path: An optional path to the source repository's Dockerfile, relative to the source repository's root directory. If this value is not provided, 'Dockerfile' is used. app_name: An optional app name that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. app_version: A app version that is set to the deployed application's version. If this value is None, the version will be set to '' to indicate its absence. config_path: An optional path to the source repository's Kubernetes configs, relative to the source repository's root directory that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. namespace: An optional Kubernetes namespace of the cluster to deploy to that is set to a substitution variable. If this value is None, the substitution variable is set to 'default'. expose_port: An optional port that the deployed application listens to that is set to a substitution variable. If this value is None, the substitution variable is set to 0 to indicate its absence. gcs_config_staging_path: An optional path to a GCS subdirectory to copy application configs that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. cluster: The name of the target cluster to deploy to. location: The zone/region of the target cluster to deploy to. build_tags: Tags to append to build tags in addition to default tags. Returns: messages.Build, the Cloud Build config. """ build = messages.Build() if build_timeout is not None: try: # A bare number is interpreted as seconds. build_timeout_secs = int(build_timeout) except ValueError: build_timeout_duration = times.ParseDuration(build_timeout) build_timeout_secs = int(build_timeout_duration.total_seconds) build.timeout = six.text_type(build_timeout_secs) + 's' if staged_source: build.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source.bucket, object=staged_source.name, generation=staged_source.generation ) ) if config_path is None: config_path = '' if not expose_port: expose_port = '0' else: expose_port = six.text_type(expose_port) if app_version is None: app_version = '' build.steps = [] if build_and_push: build.steps.append(_BuildBuildStep(messages, image)) build.steps.append(_PushBuildStep(messages, image)) build.steps.append(messages.BuildStep( id=_PREPARE_DEPLOY_BUILD_STEP_ID, name=_GKE_DEPLOY_PROD, args=[ 'prepare', '--filename=${}'.format(_K8S_YAML_PATH_SUB_VAR), '--image={}'.format(image), '--app=${}'.format(_APP_NAME_SUB_VAR), '--version={}'.format(app_version), '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR), '--output=output', '--annotation=gcb-build-id=$BUILD_ID,${}'.format( _K8S_ANNOTATIONS_SUB_VAR), # You cannot embed a substitution # variable in another, so gcb-build-id=$BUILD_ID must be hard-coded. '--expose=${}'.format(_EXPOSE_PORT_SUB_VAR) ], )) build.steps.append(_SaveConfigsBuildStep(messages)) build.steps.append(messages.BuildStep( id=_APPLY_DEPLOY_BUILD_STEP_ID, name=_GKE_DEPLOY_PROD, args=[ 'apply', '--filename=output/expanded', '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR), '--cluster=${}'.format(_GKE_CLUSTER_SUB_VAR), '--location=${}'.format(_GKE_LOCATION_SUB_VAR), '--timeout=24h' # Set this to max value allowed for a build so that # this step never times out. We prefer the timeout given to the build # to take precedence. ], )) substitutions = _BaseBuildSubstitutionsDict(dockerfile_path, app_name, config_path, expose_port, cluster, location, gcs_config_staging_path) if namespace is None: namespace = 'default' substitutions[_K8S_NAMESPACE_SUB_VAR] = namespace build.substitutions = cloudbuild_util.EncodeSubstitutions( substitutions, messages) build.tags = _DEFAULT_TAGS[:] if build_tags: for tag in build_tags: build.tags.append(tag) build.options = messages.BuildOptions() build.options.substitutionOption = messages.BuildOptions.SubstitutionOptionValueValuesEnum.ALLOW_LOOSE if build_and_push: build.images = [image] build.artifacts = messages.Artifacts( objects=messages.ArtifactObjects( location='gs://' + _EXPANDED_CONFIGS_PATH_DYNAMIC, paths=['output/expanded/*'] ) ) return build
def _SetBuildSteps(tag, no_cache, messages, substitutions, arg_config, timeout_str): """Set build steps.""" if tag is not None: if (properties.VALUES.builds.check_tag.GetBool() and not any(reg in tag for reg in _SUPPORTED_REGISTRIES)): raise c_exceptions.InvalidArgumentException( '--tag', 'Tag value must be in the gcr.io/*, *.gcr.io/*, ' 'or *.pkg.dev/* namespace.') if properties.VALUES.builds.use_kaniko.GetBool(): if no_cache: ttl = '0h' else: ttl = '{}h'.format( properties.VALUES.builds.kaniko_cache_ttl.Get()) build_config = messages.Build( steps=[ messages.BuildStep( name=properties.VALUES.builds.kaniko_image.Get(), args=[ '--destination', tag, '--cache', '--cache-ttl', ttl, '--cache-dir', '', ], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) else: if no_cache: raise c_exceptions.InvalidArgumentException( 'no-cache', 'Cannot specify --no-cache if builds/use_kaniko property is ' 'False') build_config = messages.Build( images=[tag], steps=[ messages.BuildStep( name='gcr.io/cloud-builders/docker', args=[ 'build', '--network', 'cloudbuild', '--no-cache', '-t', tag, '.' ], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) elif arg_config is not None: if no_cache: raise c_exceptions.ConflictingArgumentsException( '--config', '--no-cache') if not arg_config: raise c_exceptions.InvalidArgumentException( '--config', 'Config file path must not be empty.') build_config = config.LoadCloudbuildConfigFromPath( arg_config, messages, params=substitutions) else: raise c_exceptions.OneOfArgumentsRequiredException( ['--tag', '--config'], 'Requires either a docker tag or a config file.') # If timeout was set by flag, overwrite the config file. if timeout_str: build_config.timeout = timeout_str return build_config
def _SetBuildStepsAlpha(tag, no_cache, messages, substitutions, arg_config, timeout_str, buildpack): """Set build steps.""" if tag is not None: if (properties.VALUES.builds.check_tag.GetBool() and not any(reg in tag for reg in _SUPPORTED_REGISTRIES)): raise c_exceptions.InvalidArgumentException( '--tag', 'Tag value must be in the gcr.io/*, *.gcr.io/*, ' 'or *.pkg.dev/* namespace.') if properties.VALUES.builds.use_kaniko.GetBool(): if no_cache: ttl = '0h' else: ttl = '{}h'.format( properties.VALUES.builds.kaniko_cache_ttl.Get()) build_config = messages.Build( steps=[ messages.BuildStep( name=properties.VALUES.builds.kaniko_image.Get(), args=[ '--destination', tag, '--cache', '--cache-ttl', ttl, '--cache-dir', '', ], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) else: if no_cache: raise c_exceptions.InvalidArgumentException( 'no-cache', 'Cannot specify --no-cache if builds/use_kaniko property is ' 'False') build_config = messages.Build( images=[tag], steps=[ messages.BuildStep( name='gcr.io/cloud-builders/docker', args=[ 'build', '--network', 'cloudbuild', '--no-cache', '-t', tag, '.' ], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) elif buildpack is not None: if not buildpack: raise c_exceptions.InvalidArgumentException( '--pack', 'Image value must not be empty.') if buildpack[0].get('builder') is None: builder = _DEFAULT_BUILDPACK_BUILDER else: builder = buildpack[0].get('builder') if buildpack[0].get('image') is None: raise c_exceptions.InvalidArgumentException( '--pack', 'Image value must not be empty.') image = buildpack[0].get('image') if (properties.VALUES.builds.check_tag.GetBool() and not any(reg in image for reg in _SUPPORTED_REGISTRIES)): raise c_exceptions.InvalidArgumentException( '--pack', 'Image value must be in the gcr.io/*, *.gcr.io/*, or *.pkg.dev/* namespace.' ) env = buildpack[0].get('env') pack_args = ['build', image, '--builder', builder] if env is not None: pack_args.append('--env') pack_args.append(env) build_config = messages.Build( images=[image], steps=[ messages.BuildStep( name='gcr.io/k8s-skaffold/pack', entrypoint='pack', args=pack_args, ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) elif arg_config is not None: if no_cache: raise c_exceptions.ConflictingArgumentsException( '--config', '--no-cache') if not arg_config: raise c_exceptions.InvalidArgumentException( '--config', 'Config file path must not be empty.') build_config = config.LoadCloudbuildConfigFromPath( arg_config, messages, params=substitutions) else: raise c_exceptions.OneOfArgumentsRequiredException( ['--tag', '--config', '--pack'], 'Requires either a docker tag, a config file, or pack argument.') # If timeout was set by flag, overwrite the config file. if timeout_str: build_config.timeout = timeout_str return build_config
def CreateBuild(messages, build_timeout, build_and_push, staged_source, image, dockerfile_path, app_name, app_version, config_path, namespace, expose_port, gcs_config_staging_path, cluster, location, build_tags): """Creates the Cloud Build config to run. Args: messages: Cloud Build messages module. i.e., the return value of cloudbuild_util.GetMessagesModule(). build_timeout: An optional maximum time a build is run before it times out. For example, "2h15m5s" is 2 hours, 15 minutes, and 5 seconds. If you do not specify a unit, seconds is assumed. If this value is None, a timeout is not set. build_and_push: If True, the created build will have Build and Push steps. staged_source: An optional GCS object for a staged source repository. The object must have bucket, name, and generation fields. If this value is None, the created build will not have a source. image: The image that will deployed and optionally built beforehand. The image can include a tag or digest. dockerfile_path: A path to the source repository's Dockerfile, relative to the source repository's root directory. app_name: An app name that is set to a substitution variable. app_version: An app version that is set to a substitution variable. config_path: An optional path to the source repository's Kubernetes configs, relative to the source repository's root directory that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. namespace: A Kubernetes namespace of the cluster to deploy to that is set to a substitution variable. expose_port: An optional port that the deployed application listens to that is set to a substitution variable. If this value is None, the substitution variable is set to 0 to indicate its absence. gcs_config_staging_path: An optional path to a GCS subdirectory to copy application configs that is set to a substitution variable. If this value is None, the substitution variable is set to '' to indicate its absence. cluster: The name of the target cluster to deploy to. location: The zone/region of the target cluster to deploy to. build_tags: Tags to append to build tags in additional to default tags. Returns: messages.Build, the Cloud Build config. """ build = messages.Build() if build_timeout is not None: try: # A bare number is interpreted as seconds. build_timeout_secs = int(build_timeout) except ValueError: build_timeout_duration = times.ParseDuration(build_timeout) build_timeout_secs = int(build_timeout_duration.total_seconds) build.timeout = six.text_type(build_timeout_secs) + 's' if staged_source: build.source = messages.Source(storageSource=messages.StorageSource( bucket=staged_source.bucket, object=staged_source.name, generation=staged_source.generation)) if config_path is None: config_path = '' if not expose_port: expose_port = '0' else: expose_port = str(expose_port) build.steps = [] if build_and_push: build.steps.append( messages.BuildStep(id=_BUILD_BUILD_STEP_ID, name='gcr.io/cloud-builders/docker', args=[ 'build', '--network', 'cloudbuild', '--no-cache', '-t', image, '-f', '${}'.format(_DOCKERFILE_PATH_SUB_VAR), '.' ])) build.steps.append( messages.BuildStep(id=_PUSH_BUILD_STEP_ID, name='gcr.io/cloud-builders/docker', args=[ 'push', image, ])) build.steps.append( messages.BuildStep( id=_PREPARE_DEPLOY_BUILD_STEP_ID, name=_GKE_DEPLOY_PROD, args=[ 'prepare', '--filename=${}'.format(_K8S_YAML_PATH_SUB_VAR), '--image={}'.format(image), '--app=${}'.format(_APP_NAME_SUB_VAR), '--version={}'.format(app_version), '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR), '--output=output', '--annotation=gcb-build-id=$BUILD_ID', '--expose=${}'.format(_EXPOSE_PORT_SUB_VAR) ], )) build.steps.append( messages.BuildStep(id=_SAVE_CONFIGS_BUILD_STEP_ID, name='gcr.io/cloud-builders/gsutil', entrypoint='sh', args=['-c', _SAVE_CONFIGS_SCRIPT])) build.steps.append( messages.BuildStep( id=_APPLY_DEPLOY_BUILD_STEP_ID, name=_GKE_DEPLOY_PROD, args=[ 'apply', '--filename=output/expanded', '--namespace=${}'.format(_K8S_NAMESPACE_SUB_VAR), '--cluster=${}'.format(_GKE_CLUSTER_SUB_VAR), '--location=${}'.format(_GKE_LOCATION_SUB_VAR), '--timeout=24h' # Set this to max value allowed for a build so that # this step never times out. We prefer the timeout given to the build # to take precedence. ], )) build.substitutions = cloudbuild_util.EncodeSubstitutions( _BuildSubstitutionsDict(dockerfile_path, app_name, config_path, namespace, expose_port, cluster, location, gcs_config_staging_path), messages) build.tags = _DEFAULT_BUILD_TAGS[:] if build_tags: for tag in build_tags: build.tags.append(tag) build.options = messages.BuildOptions() build.options.substitutionOption = messages.BuildOptions.SubstitutionOptionValueValuesEnum.ALLOW_LOOSE return build
def CreateBuildConfig(tag, no_cache, messages, substitutions, arg_config, is_specified_source, no_source, source, gcs_source_staging_dir, ignore_file, arg_gcs_log_dir, arg_machine_type, arg_disk_size): """Returns a build config.""" # Get the build timeout. build_timeout = properties.VALUES.builds.timeout.Get() if build_timeout is not None: try: # A bare number is interpreted as seconds. build_timeout_secs = int(build_timeout) except ValueError: build_timeout_duration = times.ParseDuration(build_timeout) build_timeout_secs = int(build_timeout_duration.total_seconds) timeout_str = six.text_type(build_timeout_secs) + 's' else: timeout_str = None if tag is not None: if (properties.VALUES.builds.check_tag.GetBool() and 'gcr.io/' not in tag): raise c_exceptions.InvalidArgumentException( '--tag', 'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.') if properties.VALUES.builds.use_kaniko.GetBool(): if no_cache: ttl = '0h' else: ttl = '{}h'.format( properties.VALUES.builds.kaniko_cache_ttl.Get()) build_config = messages.Build( steps=[ messages.BuildStep( name=properties.VALUES.builds.kaniko_image.Get(), args=[ '--destination', tag, '--cache', '--cache-ttl', ttl, '--cache-dir', '', ], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) else: if no_cache: raise c_exceptions.InvalidArgumentException( 'no-cache', 'Cannot specify --no-cache if builds/use_kaniko property is ' 'False') build_config = messages.Build( images=[tag], steps=[ messages.BuildStep( name='gcr.io/cloud-builders/docker', args=[ 'build', '--network', 'cloudbuild', '--no-cache', '-t', tag, '.' ], ), ], timeout=timeout_str, substitutions=cloudbuild_util.EncodeSubstitutions( substitutions, messages)) elif arg_config is not None: if no_cache: raise c_exceptions.ConflictingArgumentsException( '--config', '--no-cache') if not arg_config: raise c_exceptions.InvalidArgumentException( '--config', 'Config file path must not be empty.') build_config = config.LoadCloudbuildConfigFromPath( arg_config, messages, params=substitutions) else: raise c_exceptions.OneOfArgumentsRequiredException( ['--tag', '--config'], 'Requires either a docker tag or a config file.') # If timeout was set by flag, overwrite the config file. if timeout_str: build_config.timeout = timeout_str # Set the source for the build config. default_gcs_source = False default_bucket_name = None if gcs_source_staging_dir is None: default_gcs_source = True default_bucket_name = staging_bucket_util.GetDefaultStagingBucket() gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name) gcs_client = storage_api.StorageClient() # --no-source overrides the default --source. if not is_specified_source and no_source: source = None gcs_source_staging = None if source: suffix = '.tgz' if source.startswith('gs://') or os.path.isfile(source): _, suffix = os.path.splitext(source) # Next, stage the source to Cloud Storage. staged_object = '{stamp}-{uuid}{suffix}'.format( stamp=times.GetTimeStampFromDateTime(times.Now()), uuid=uuid.uuid4().hex, suffix=suffix, ) gcs_source_staging_dir = resources.REGISTRY.Parse( gcs_source_staging_dir, collection='storage.objects') # We create the bucket (if it does not exist) first. If we do an existence # check and then create the bucket ourselves, it would be possible for an # attacker to get lucky and beat us to creating the bucket. Block on this # creation to avoid this race condition. gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket) # If no bucket is specified (for the source `default_gcs_source`), check # that the default bucket is also owned by the project (b/33046325). if default_gcs_source and not staging_bucket_util.BucketIsInProject( gcs_client, default_bucket_name): raise c_exceptions.RequiredArgumentException( 'gcs-source-staging-dir', 'A bucket with name {} already exists and is owned by ' 'another project. Specify a bucket using ' '--gcs-source-staging-dir.'.format(default_bucket_name)) if gcs_source_staging_dir.object: staged_object = gcs_source_staging_dir.object + '/' + staged_object gcs_source_staging = resources.REGISTRY.Create( collection='storage.objects', bucket=gcs_source_staging_dir.bucket, object=staged_object) if source.startswith('gs://'): gcs_source = resources.REGISTRY.Parse(source, collection='storage.objects') staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) else: if not os.path.exists(source): raise c_exceptions.BadFileException( 'could not find source [{src}]'.format(src=source)) if os.path.isdir(source): source_snapshot = snapshot.Snapshot(source, ignore_file=ignore_file) size_str = resource_transform.TransformSize( source_snapshot.uncompressed_size) log.status.Print( 'Creating temporary tarball archive of {num_files} file(s)' ' totalling {size} before compression.'.format( num_files=len(source_snapshot.files), size=size_str)) staged_source_obj = source_snapshot.CopyTarballToGCS( gcs_client, gcs_source_staging, ignore_file=ignore_file) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) elif os.path.isfile(source): unused_root, ext = os.path.splitext(source) if ext not in _ALLOWED_SOURCE_EXT: raise c_exceptions.BadFileException( 'Local file [{src}] is none of ' + ', '.join(_ALLOWED_SOURCE_EXT)) log.status.Print('Uploading local file [{src}] to ' '[gs://{bucket}/{object}].'.format( src=source, bucket=gcs_source_staging.bucket, object=gcs_source_staging.object, )) staged_source_obj = gcs_client.CopyFileToGCS( source, gcs_source_staging) build_config.source = messages.Source( storageSource=messages.StorageSource( bucket=staged_source_obj.bucket, object=staged_source_obj.name, generation=staged_source_obj.generation, )) else: # No source if not no_source: raise c_exceptions.InvalidArgumentException( '--no-source', 'To omit source, use the --no-source flag.') # Set a Google Cloud Storage directory to hold build logs. if arg_gcs_log_dir: gcs_log_dir = resources.REGISTRY.Parse(arg_gcs_log_dir, collection='storage.objects') build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' + gcs_log_dir.object) # Set the machine type used to run the build. if arg_machine_type is not None: machine_type = flags.GetMachineType(arg_machine_type) if not build_config.options: build_config.options = messages.BuildOptions() build_config.options.machineType = machine_type # Set the disk size used to run the build. if arg_disk_size is not None: disk_size = compute_utils.BytesToGb(arg_disk_size) if not build_config.options: build_config.options = messages.BuildOptions() build_config.options.diskSizeGb = int(disk_size) return build_config