def BuildAndPushDockerImages(module_configs, version_id, gae_client, cloudbuild_client, code_bucket, cli, remote): """Builds and pushes a set of docker images. Args: module_configs: A map of module name to parsed config. version_id: The version id to deploy these modules under. gae_client: An App Engine API client. cloudbuild_client: An instance of the cloudbuild.CloudBuildV1 api client. code_bucket: The name of the GCS bucket where the source will be uploaded. cli: calliope.cli.CLI, The CLI object representing this command line tool. remote: Whether the user specified a remote build. Returns: A dictionary mapping modules to the name of the pushed container image. """ project = properties.VALUES.core.project.Get(required=True) use_cloud_build = properties.VALUES.app.use_cloud_build.GetBool() # Prepare temporary dockerfile creators for all modules that need them # before doing the heavy lifting so we can fail fast if there are errors. modules = [ (name, info, _GetDockerfileCreator(info)) for (name, info) in module_configs.iteritems() if info.RequiresImage()] if not modules: # No images need to be built. return {} log.status.Print('Verifying that Managed VMs are enabled and ready.') _DoPrepareManagedVms(gae_client) if use_cloud_build: return _BuildImagesWithCloudBuild(project, modules, version_id, code_bucket, cloudbuild_client) # Update docker client's credentials. for registry_host in constants.ALL_SUPPORTED_REGISTRIES: docker.UpdateDockerCredentials(registry_host) metrics.CustomTimedEvent(metric_names.DOCKER_UPDATE_CREDENTIALS) # Build docker images. images = {} with docker_util.DockerHost(cli, version_id, remote) as docker_client: # Build and push all images. for module, info, ensure_dockerfile in modules: log.status.Print( 'Building and pushing image for module [{module}]' .format(module=module)) cleanup = ensure_dockerfile() try: image_name = _GetImageName(project, module, version_id) images[module] = BuildAndPushDockerImage( info.file, docker_client, image_name) finally: cleanup() metric_name = (metric_names.DOCKER_REMOTE_BUILD if remote else metric_names.DOCKER_BUILD) metrics.CustomTimedEvent(metric_name) return images
def __enter__(self): """Creates a fake metadata environment.""" log.Print('Surfacing credentials via {metadata}...'.format( metadata=self.name)) # Use JSON to inject structured data into the YAML since it # is an effective way to create indentation-insensitive YAML # NOTE: YAML 1.2 is a superset of JSON. with open(self.manifest_file, 'w') as f_out: f_out.write( MANIFEST_FORMAT.format( attributes=json.dumps(self._options.attributes), project_id=self._options.project, email=self._options.account, scopes=json.dumps(self._options.scopes))) # We refresh credentials in case a pull is needed. docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) result = docker.Execute([ 'run', '-d', '--name', self.name, '-v', self.manifest_file + ':' + self.manifest_file, self.image, # Arguments to the //cloud/containers/metadata binary, # which is the entrypoint: '-manifest_file=' + self.manifest_file, '-refresh_token=' + self._options.credential.refresh_token ]) if result != 0: raise exceptions.Error('Unable to launch fake-metadata.') return self
def testMultipleUpdateCredHelperNotConfigured(self): self.WriteNewDockerConfig('{}') docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) docker.UpdateDockerCredentials(constants.REGIONAL_GCR_REGISTRIES[0], refresh=False) self.assertEqual(1, self.refreshes) self.CheckDockerConfigAuths({ 'https://' + constants.DEFAULT_REGISTRY: { 'email': _EMAIL, 'auth': self.auth }, 'https://' + constants.REGIONAL_GCR_REGISTRIES[0]: { 'email': _EMAIL, 'auth': self.auth } })
def testMultipleUpdateCredHelperConfigured(self): self.WriteNewDockerConfig(json.dumps({_CREDENTIAL_STORE_KEY: 'helper'})) self.popen_mock.side_effect = self.AssertDockerLoginForRegistry( constants.DEFAULT_REGISTRY, _EXPECTED_DOCKER_OPTIONS) self.assertFalse(self.popen_mock.called) docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.popen_mock.side_effect = self.AssertDockerLoginForRegistry( constants.REGIONAL_GCR_REGISTRIES[0], _EXPECTED_DOCKER_OPTIONS) docker.UpdateDockerCredentials(constants.REGIONAL_GCR_REGISTRIES[0], refresh=False) self.assertEqual(1, self.refreshes) # 2x(docker login) self.assertEqual(self.popen_mock.call_count, 2)
def testUpdateWithCredHelperConfigured(self): self.WriteNewDockerConfig(json.dumps({_CREDENTIAL_STORE_KEY: 'helper'})) self.popen_mock.side_effect = self.AssertDockerLoginForRegistry( constants.DEFAULT_REGISTRY, _EXPECTED_DOCKER_OPTIONS) docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.popen_mock.assert_called_once()
def testUpdateInvalidJsonFile(self): self.WriteNewDockerConfig('not-json') with self.assertRaisesRegex( client_lib.InvalidDockerConfigError, r'Docker configuration file \[.*config\.json\] could not be read as ' r'JSON: .*'): docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.assertFalse(self.popen_mock.called)
def testUpdateWithOldConfig(self): self.TouchOldDockerConfig() docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.CheckDockerConfigAuths({ 'https://' + constants.DEFAULT_REGISTRY: { 'email': _EMAIL, 'auth': self.auth } })
def testUpdateCredHelperConfiguredAndDockerFails(self): self.WriteNewDockerConfig(json.dumps({_CREDENTIAL_STORE_KEY: 'helper'})) self.popen_mock.side_effect = self.AssertDockerLoginForRegistry( constants.DEFAULT_REGISTRY, _EXPECTED_DOCKER_OPTIONS) self.process_mock.returncode = -1 # A failure code. with self.assertRaisesRegex(exceptions.Error, 'login failed'): docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.popen_mock.assert_called_once()
def testUpdateWhitespaceFile(self): self.WriteNewDockerConfig(' \t\n') docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.CheckDockerConfigAuths({ 'https://' + constants.DEFAULT_REGISTRY: { 'email': _EMAIL, 'auth': self.auth } }) self.assertFalse(self.popen_mock.called)
def testUpdateCredHelperNotConfigured(self): # We get to modify the docker config directly in the non-cred-helper case. self.WriteNewDockerConfig('{}') docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.CheckDockerConfigAuths({ 'https://' + constants.DEFAULT_REGISTRY: { 'email': _EMAIL, 'auth': self.auth } }) self.assertFalse(self.popen_mock.called)
def Run(self, args): """Executes the given docker command, after refreshing our credentials. Args: args: An argparse.Namespace that contains the values for the arguments specified in the .Args() method. Raises: exceptions.ExitCodeNoError: The docker command execution failed. """ if args.account: # Since the docker binary invokes `gcloud auth docker-helper` through # `docker-credential-gcloud`, it cannot forward the command line # arguments. Subsequently, we are unable to set the account (or any # flag for that matter) used by `docker-credential-gcloud` with # the global `--account` flag. log.warning('Docker uses the account from the gcloud config.' 'To set the account in the gcloud config, run ' '`gcloud config set account <account_name>`.') with base.WithLegacyQuota(): force_refresh = True for server in args.server: if server not in _DEFAULT_REGISTRIES: log.warning( 'Authenticating to a non-default server: {server}.'. format(server=server)) docker.UpdateDockerCredentials(server, refresh=force_refresh) # Only force a refresh for the first server we authorize force_refresh = False if args.authorize_only: # NOTE: We don't know at this point how long the access token we have # placed in the docker configuration will last. More information needs # to be exposed from all credential kinds in order for us to have an # accurate awareness of lifetime here. log.err.Print( 'Short-lived access for {server} configured.'.format( server=args.server)) return docker_args = args.docker_args or [] docker_args = (docker_args if not args.docker_host else ['-H', args.docker_host] + docker_args) result = docker_client_utils.Execute(docker_args) # Explicitly avoid displaying an error message that might # distract from the docker error message already displayed. if result: raise exceptions.ExitCodeNoError(exit_code=result) return
def testUpdateWorksWhenDockerNotInstalled(self): # A subprocess may or may not be created, but needs to be mocked if so. self.popen_mock.side_effect = OSError(errno.ENOENT, 'No such file or directory', 'foo') docker.UpdateDockerCredentials(constants.DEFAULT_REGISTRY) self.CheckDockerConfigAuths({ 'https://' + constants.DEFAULT_REGISTRY: { 'email': _EMAIL, 'auth': self.auth } })
def BuildAndPushDockerImages(module_configs, version_id, client, cli, remote, implicit_remote_build): # PrepareVmRuntime only needs to be called once per deployment. project = properties.VALUES.core.project.Get(required=True) if any(info.RequiresImage() for info in module_configs.values()): log.status.Print('Verifying that Managed VMs are enabled and ready.') message = 'If this is your first deployment, this may take a while' try: with console_io.DelayedProgressTracker(message, _PREPARE_VM_MESSAGE_DELAY): client.PrepareVmRuntime() log.status.Print() except util.RPCError as err: log.warn('If this is your first deployment, please try again.') raise err for registry in constants.ALL_SUPPORTED_REGISTRIES: docker.UpdateDockerCredentials(registry) if remote and implicit_remote_build: # Test for presence of local Docker try: with docker_util.DockerHost(cli, version_id, False) as docker_client: if os.environ.get('DOCKER_HOST'): docker_client.ping() log.warn( 'A hosted build is being performed, but a local Docker ' 'was found. Specify `--docker-build=local` to use it, or ' '`--docker-build=remote` to silence this warning.') except containers.DockerDaemonConnectionError: pass with docker_util.DockerHost(cli, version_id, remote) as docker_client: # Build and push all images. for (module, info) in module_configs.iteritems(): if info.RequiresImage(): log.status.Print( 'Building and pushing image for module [{module}]'. format(module=module)) info.UpdateManagedVMConfig() push.BuildAndPushDockerImage(info.file, project, module, version_id, info.runtime, docker_client) metric_name = _REMOTE_BUILD if remote else _BUILD metrics.CustomTimedEvent(metric_name)
def Run(self, args): """Executes the given docker command, after refreshing our credentials. Args: args: An argparse.Namespace that contains the values for the arguments specified in the .Args() method. Raises: exceptions.ExitCodeNoError: The docker command execution failed. """ force_refresh = True for server in args.server: if server not in _DEFAULT_REGISTRIES: log.warn( 'Authenticating to a non-default server: {server}.'.format( server=server)) docker.UpdateDockerCredentials(server, refresh=force_refresh) # Only force a refresh for the first server we authorize force_refresh = False if args.authorize_only: # NOTE: We don't know at this point how long the access token we have # placed in the docker configuration will last. More information needs # to be exposed from all credential kinds in order for us to have an # accurate awareness of lifetime here. log.err.Print('Short-lived access for {server} configured.'.format( server=args.server)) return # TODO(user): reconcile with the 'gcloud app' docker stuff, # which should be using a gcloud config property. docker_args = args.docker_args or [] docker_args = (docker_args if not args.docker_host else ['-H', args.docker_host] + docker_args) result = docker.Execute(docker_args) # Explicitly avoid displaying an error message that might # distract from the docker error message already displayed. if result: raise exceptions.ExitCodeNoError(exit_code=result) return
def BuildAndPushDockerImages(module_configs, version_id, cloudbuild_client, code_bucket, cli, remote, source_contexts, config_cleanup): """Builds and pushes a set of docker images. Args: module_configs: A map of module name to parsed config. version_id: The version id to deploy these modules under. cloudbuild_client: An instance of the cloudbuild.CloudBuildV1 api client. code_bucket: The name of the GCS bucket where the source will be uploaded. cli: calliope.cli.CLI, The CLI object representing this command line tool. remote: Whether the user specified a remote build. source_contexts: A list of json-serializable source contexts to place in the application directory for each config. config_cleanup: (callable() or None) If a temporary Dockerfile has already been created during the course of the deployment, this should be a callable that deletes it. Returns: A dictionary mapping modules to the name of the pushed container image. """ project = properties.VALUES.core.project.Get(required=True) use_cloud_build = properties.VALUES.app.use_cloud_build.GetBool() # Prepare temporary dockerfile creators for all modules that need them # before doing the heavy lifting so we can fail fast if there are errors. modules = [] for (name, info) in module_configs.iteritems(): if info.RequiresImage(): context_creator = context_util.GetSourceContextFilesCreator( os.path.dirname(info.file), source_contexts) modules.append( (name, info, _GetDockerfileCreator(info, config_cleanup), context_creator)) if not modules: # No images need to be built. return {} log.status.Print('Verifying that Managed VMs are enabled and ready.') if use_cloud_build: return _BuildImagesWithCloudBuild(project, modules, version_id, code_bucket, cloudbuild_client) # Update docker client's credentials. for registry_host in constants.ALL_SUPPORTED_REGISTRIES: docker.UpdateDockerCredentials(registry_host) metrics.CustomTimedEvent(metric_names.DOCKER_UPDATE_CREDENTIALS) # Build docker images. images = {} with docker_util.DockerHost(cli, version_id, remote, project) as docker_client: # Build and push all images. for module, info, ensure_dockerfile, ensure_context in modules: log.status.Print( 'Building and pushing image for module [{module}]'.format( module=module)) cleanup_dockerfile = ensure_dockerfile() cleanup_context = ensure_context() try: image_name = _GetImageName(project, module, version_id) images[module] = BuildAndPushDockerImage( info.file, docker_client, image_name) finally: cleanup_dockerfile() cleanup_context() metric_name = (metric_names.DOCKER_REMOTE_BUILD if remote else metric_names.DOCKER_BUILD) metrics.CustomTimedEvent(metric_name) return images