def Run(self, args): self._CheckPlatform() conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) with serverless_operations.Connect(conn_context) as client: serv = client.GetService(service_ref) if not serv: raise exceptions.ArgumentError('Cannot find service [{}]'.format( service_ref.servicesId)) bind = '127.0.0.1:' + (args.port if args.port else '8080') host = self._GetUrl(serv, args.tag, service_ref.servicesId) command_executor = proxy.ProxyWrapper() log.Print('Proxying service [{}] in region [{}] locally...'.format( service_ref.servicesId, serv.region)) log.Print('http://{} proxies to {}'.format(bind, host)) if args.token: response = command_executor(host=host, token=args.token, bind=bind) else: # Keep restarting the proxy with fresh token before the token expires (1h) # until hitting a failure. while True: response = command_executor(host=host, token=_GetFreshIdToken(), bind=bind, duration='55m') if response.failed: break return self._DefaultOperationResponseHandler(response)
def Run(self, args): """Update the service resource. Different from `deploy` in that it can only update the service spec but no IAM or Cloud build changes. Args: args: Args! Returns: googlecloudsdk.api_lib.run.Service, the updated service """ changes = flags.GetServiceConfigurationChanges(args) if not changes or (len(changes) == 1 and isinstance( changes[0], config_changes.SetClientNameAndVersionAnnotationChange)): raise exceptions.NoConfigurationChangeError( 'No configuration change requested. ' 'Did you mean to include the flags `--update-env-vars`, ' '`--memory`, `--concurrency`, `--timeout`, `--connectivity`, ' '`--image`?') changes.insert( 0, config_changes.DeleteAnnotationChange( k8s_object.BINAUTHZ_BREAKGLASS_ANNOTATION)) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) with serverless_operations.Connect(conn_context) as client: service = client.GetService(service_ref) resource_change_validators.ValidateClearVpcConnector(service, args) has_latest = (service is None or traffic.LATEST_REVISION_KEY in service.spec_traffic) deployment_stages = stages.ServiceStages( include_iam_policy_set=False, include_route=has_latest) with progress_tracker.StagedProgressTracker( 'Deploying...', deployment_stages, failure_message='Deployment failed', suppress_output=args.async_) as tracker: service = client.ReleaseService(service_ref, changes, tracker, asyn=args.async_, prefetch=service) if args.async_: pretty_print.Success( 'Service [{{bold}}{serv}{{reset}}] is deploying ' 'asynchronously.'.format(serv=service.name)) else: service = client.GetService(service_ref) pretty_print.Success( messages_util.GetSuccessMessageForSynchronousDeploy( service)) return service
def Run(self, args): """Update the traffic split for the service. Args: args: Args! Returns: List of traffic.TrafficTargetStatus instances reflecting the change. """ conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) changes = flags.GetServiceConfigurationChanges(args) if not changes: raise exceptions.NoConfigurationChangeError( 'No traffic configuration change requested.') changes.insert( 0, config_changes.DeleteAnnotationChange( k8s_object.BINAUTHZ_BREAKGLASS_ANNOTATION)) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) is_managed = platforms.GetPlatform() == platforms.PLATFORM_MANAGED with serverless_operations.Connect(conn_context) as client: deployment_stages = stages.UpdateTrafficStages() try: with progress_tracker.StagedProgressTracker( 'Updating traffic...', deployment_stages, failure_message='Updating traffic failed', suppress_output=args.async_) as tracker: client.UpdateTraffic(service_ref, changes, tracker, args.async_) except: serv = client.GetService(service_ref) if serv: resources = traffic_pair.GetTrafficTargetPairs( serv.spec_traffic, serv.status_traffic, is_managed, serv.status.latestReadyRevisionName, serv.status.url) display.Displayer( self, args, resources, display_info=args.GetDisplayInfo()).Display() raise if args.async_: pretty_print.Success('Updating traffic asynchronously.') else: serv = client.GetService(service_ref) resources = traffic_pair.GetTrafficTargetPairs( serv.spec_traffic, serv.status_traffic, is_managed, serv.status.latestReadyRevisionName, serv.status.url) return resources
def Run(self, args): """Execute a Job on Cloud Run.""" job_ref = args.CONCEPTS.job.Parse() flags.ValidateResource(job_ref) conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) with serverless_operations.Connect(conn_context) as operations: with progress_tracker.StagedProgressTracker( 'Creating execution...', stages.ExecutionStages(include_completion=args.wait), failure_message='Executing job failed', suppress_output=args.async_) as tracker: e = operations.RunJob(job_ref, args.wait, tracker, asyn=args.async_) if args.async_: pretty_print.Success( 'Execution [{{bold}}{execution}{{reset}}] is being' ' started asynchronously.'.format(execution=e.name)) else: operation = 'completed' if args.wait else 'started running' pretty_print.Success( 'Execution [{{bold}}{execution}{{reset}}] has ' 'successfully {operation}.'.format(execution=e.name, operation=operation)) log.status.Print( messages_util.GetExecutionCreatedMessage( self.ReleaseTrack(), e)) return e
def Run(self, args): """Obtain details about a given service.""" conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) with serverless_operations.Connect(conn_context) as client: serv = client.GetService(service_ref) if not serv: raise exceptions.ArgumentError('Cannot find service [{}]'.format( service_ref.servicesId)) return serv
def Run(self, args): """Delete a service.""" conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) console_io.PromptContinue( message='Service [{service}] will be deleted.'.format( service=service_ref.servicesId), throw_if_unattended=True, cancel_on_no=True) with serverless_operations.Connect(conn_context) as client: deletion.Delete(service_ref, client.GetService, client.DeleteService, args.async_) log.DeletedResource(service_ref.servicesId, 'service')
def Run(self, args): """Deploy a container to Cloud Run.""" job_ref = args.CONCEPTS.job.Parse() flags.ValidateResource(job_ref) conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack(), version_override='v1alpha1') changes = flags.GetConfigurationChanges(args) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) with serverless_operations.Connect(conn_context) as operations: pretty_print.Info( messages_util.GetStartDeployMessage(conn_context, job_ref, 'job')) header_msg = 'Creating and {} job...'.format( 'running' if args.wait_for_completion else 'starting') with progress_tracker.StagedProgressTracker( header_msg, stages.JobStages(include_completion=args.wait_for_completion), failure_message='Job failed', suppress_output=args.async_) as tracker: job = operations.CreateJob( job_ref, changes, args.wait_for_completion, tracker, asyn=args.async_) if args.async_: pretty_print.Success('Job [{{bold}}{job}{{reset}}] is being created ' 'asynchronously.'.format(job=job.name)) else: job = operations.GetJob(job_ref) pretty_print.Success( 'Job [{{bold}}{job}{{reset}}] has successfully ' '{operation}.'.format( job=job.name, operation=('completed' if args.wait_for_completion else 'started running'))) return job
def Run(self, args): """Update a Job on Cloud Run.""" job_ref = args.CONCEPTS.job.Parse() flags.ValidateResource(job_ref) conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) changes = flags.GetJobConfigurationChanges(args) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) with serverless_operations.Connect(conn_context) as operations: pretty_print.Info( messages_util.GetStartDeployMessage(conn_context, job_ref, 'Updating', 'job')) header_msg = 'Updating job...' with progress_tracker.StagedProgressTracker( header_msg, stages.JobStages(), failure_message='Job failed to deploy', suppress_output=args.async_) as tracker: job = operations.UpdateJob(job_ref, changes, tracker, asyn=args.async_) if args.async_: pretty_print.Success( 'Job [{{bold}}{job}{{reset}}] is being updated ' 'asynchronously.'.format(job=job.name)) else: job = operations.GetJob(job_ref) pretty_print.Success( 'Job [{{bold}}{job}{{reset}}] has been successfully updated' .format(job=job.name)) log.status.Print( messages_util.GetRunJobMessage(self.ReleaseTrack(), job.name)) return job
def Run(self, args): """Deploy a Job to Cloud Run.""" job_ref = args.CONCEPTS.job.Parse() flags.ValidateResource(job_ref) conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) changes = flags.GetJobConfigurationChanges(args) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) execute_now = args.execute_now or args.wait execution = None with serverless_operations.Connect(conn_context) as operations: pretty_print.Info( messages_util.GetStartDeployMessage(conn_context, job_ref, 'Creating', 'job')) if execute_now: header_msg = 'Creating and running job...' else: header_msg = 'Creating job...' with progress_tracker.StagedProgressTracker( header_msg, stages.JobStages(execute_now=execute_now, include_completion=args.wait), failure_message='Job failed to deploy', suppress_output=args.async_) as tracker: job = operations.CreateJob(job_ref, changes, tracker, asyn=(args.async_ and not execute_now)) if execute_now: execution = operations.RunJob(job_ref, args.wait, tracker, args.async_) if args.async_ and not execute_now: pretty_print.Success( 'Job [{{bold}}{job}{{reset}}] is being created ' 'asynchronously.'.format(job=job.name)) else: job = operations.GetJob(job_ref) operation = 'been created' if args.wait: operation += ' and completed execution [{}]'.format( execution.name) elif execute_now: operation += ' and started running execution [{}]'.format( execution.name) pretty_print.Success( 'Job [{{bold}}{job}{{reset}}] has successfully ' '{operation}.'.format(job=job.name, operation=operation)) msg = '' if execute_now: msg += messages_util.GetExecutionCreatedMessage( self.ReleaseTrack(), execution) msg += '\n' msg += messages_util.GetRunJobMessage(self.ReleaseTrack(), job.name, repeat=execute_now) log.status.Print(msg) return job
def Run(self, args): """Deploy a container to Cloud Run.""" platform = flags.GetAndValidatePlatform(args, self.ReleaseTrack(), flags.Product.RUN) include_build = flags.FlagIsExplicitlySet(args, 'source') if not include_build and not args.IsSpecified('image'): if console_io.CanPrompt(): args.source = flags.PromptForDefaultSource() include_build = True else: raise c_exceptions.RequiredArgumentException( '--image', 'Requires a container image to deploy (e.g. ' '`gcr.io/cloudrun/hello:latest`) if no build source is provided.' ) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) # Obtaining the connection context prompts the user to select a region if # one hasn't been provided. We want to do this prior to preparing a source # deploy so that we can use that region for the Artifact Registry repo. conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) build_type = None image = None pack = None source = None operation_message = 'Deploying container to' repo_to_create = None # Build an image from source if source specified if include_build: source = args.source ar_repo = docker_util.DockerRepo( project_id=properties.VALUES.core.project.Get(required=True), location_id=artifact_registry.RepoRegion( args, cluster_location=(conn_context.cluster_location if platform == platforms.PLATFORM_GKE else None)), repo_id='cloud-run-source-deploy') if artifact_registry.ShouldCreateRepository(ar_repo): repo_to_create = ar_repo # The image is built with latest tag. After build, the image digest # from the build result will be added to the image of the service spec. args.image = '{repo}/{service}'.format( repo=ar_repo.GetDockerString(), service=service_ref.servicesId) # Use GCP Buildpacks if Dockerfile doesn't exist docker_file = source + '/Dockerfile' if os.path.exists(docker_file): build_type = BuildType.DOCKERFILE else: pack = [{'image': args.image}] build_type = BuildType.BUILDPACKS image = None if pack else args.image operation_message = ( 'Building using {build_type} and deploying container' ' to').format(build_type=build_type.value) pretty_print.Info( messages_util.GetBuildEquivalentForSourceRunMessage( service_ref.servicesId, pack, source)) # Deploy a container with an image changes = flags.GetServiceConfigurationChanges(args) changes.insert( 0, config_changes.DeleteAnnotationChange( k8s_object.BINAUTHZ_BREAKGLASS_ANNOTATION)) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) with serverless_operations.Connect(conn_context) as operations: service = operations.GetService(service_ref) allow_unauth = GetAllowUnauth(args, operations, service_ref, service) resource_change_validators.ValidateClearVpcConnector(service, args) pretty_print.Info( messages_util.GetStartDeployMessage(conn_context, service_ref, operation_message)) has_latest = (service is None or traffic.LATEST_REVISION_KEY in service.spec_traffic) deployment_stages = stages.ServiceStages( include_iam_policy_set=allow_unauth is not None, include_route=has_latest, include_build=include_build, include_create_repo=repo_to_create is not None, ) header = None if include_build: header = 'Building and deploying' else: header = 'Deploying' if service is None: header += ' new service' header += '...' with progress_tracker.StagedProgressTracker( header, deployment_stages, failure_message='Deployment failed', suppress_output=args.async_) as tracker: service = operations.ReleaseService( service_ref, changes, tracker, asyn=args.async_, allow_unauthenticated=allow_unauth, prefetch=service, build_image=image, build_pack=pack, build_source=source, repo_to_create=repo_to_create) if args.async_: pretty_print.Success( 'Service [{{bold}}{serv}{{reset}}] is deploying ' 'asynchronously.'.format(serv=service.name)) else: service = operations.GetService(service_ref) pretty_print.Success( messages_util.GetSuccessMessageForSynchronousDeploy( service)) return service
def Run(self, args): """Deploy a container to Cloud Run.""" flags.GetAndValidatePlatform(args, self.ReleaseTrack(), flags.Product.RUN) service_ref = args.CONCEPTS.service.Parse() flags.ValidateResource(service_ref) build_type = None image = None pack = None source = None include_build = flags.FlagIsExplicitlySet(args, 'source') operation_message = 'Deploying container' # Build an image from source if source specified if include_build: # Create a tag for the image creation source = args.source if not args.IsSpecified('image'): args.image = 'gcr.io/{projectID}/cloud-run-source-deploy/{service}:{tag}'.format( projectID=properties.VALUES.core.project.Get( required=True), service=service_ref.servicesId, tag=uuid.uuid4().hex) # Use GCP Buildpacks if Dockerfile doesn't exist docker_file = args.source + '/Dockerfile' if os.path.exists(docker_file): build_type = BuildType.DOCKERFILE else: pack = [{'image': args.image}] build_type = BuildType.BUILDPACKS image = None if pack else args.image operation_message = 'Building using {build_type} and deploying container'.format( build_type=build_type.value) elif not args.IsSpecified('image'): raise c_exceptions.RequiredArgumentException( '--image', 'Requires a container image to deploy (e.g. ' '`gcr.io/cloudrun/hello:latest`) if no build source is provided.' ) # Deploy a container with an image conn_context = connection_context.GetConnectionContext( args, flags.Product.RUN, self.ReleaseTrack()) changes = flags.GetConfigurationChanges(args) changes.insert( 0, config_changes.DeleteAnnotationChange( k8s_object.BINAUTHZ_BREAKGLASS_ANNOTATION)) changes.append( config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack())) with serverless_operations.Connect(conn_context) as operations: service = operations.GetService(service_ref) allow_unauth = GetAllowUnauth(args, operations, service_ref, service) resource_change_validators.ValidateClearVpcConnector(service, args) pretty_print.Info( messages_util.GetStartDeployMessage(conn_context, service_ref, operation_message)) has_latest = (service is None or traffic.LATEST_REVISION_KEY in service.spec_traffic) deployment_stages = stages.ServiceStages( include_iam_policy_set=allow_unauth is not None, include_route=has_latest, include_build=include_build) header = None if include_build: header = 'Building and deploying' else: header = 'Deploying' if service is None: header += ' new service' header += '...' with progress_tracker.StagedProgressTracker( header, deployment_stages, failure_message='Deployment failed', suppress_output=args.async_) as tracker: service = operations.ReleaseService( service_ref, changes, tracker, asyn=args.async_, allow_unauthenticated=allow_unauth, prefetch=service, build_image=image, build_pack=pack, build_source=source) if args.async_: pretty_print.Success( 'Service [{{bold}}{serv}{{reset}}] is deploying ' 'asynchronously.'.format(serv=service.name)) else: service = operations.GetService(service_ref) pretty_print.Success( messages_util.GetSuccessMessageForSynchronousDeploy( service)) return service