def GetUpdateManager(group_args): """Construct the UpdateManager to use based on the common args for the group. Args: group_args: An argparse namespace. Returns: update_manager.UpdateManager, The UpdateManager to use for the commands. """ try: os_override = platforms.OperatingSystem.FromId( group_args.operating_system_override) except platforms.InvalidEnumValue as e: raise exceptions.InvalidArgumentException('operating-system-override', e) try: arch_override = platforms.Architecture.FromId( group_args.architecture_override) except platforms.InvalidEnumValue as e: raise exceptions.InvalidArgumentException('architecture-override', e) platform = platforms.Platform.Current(os_override, arch_override) root = (files.ExpandHomeDir(group_args.sdk_root_override) if group_args.sdk_root_override else None) url = (files.ExpandHomeDir(group_args.snapshot_url_override) if group_args.snapshot_url_override else None) return update_manager.UpdateManager(sdk_root=root, url=url, platform_filter=platform)
def DeployConnectAgent(kube_client, args, service_account_key_data, image_pull_secret_data, membership_ref, release_track=None): """Deploys the GKE Connect agent to the cluster. Args: kube_client: A Kubernetes Client for the cluster to be registered. args: arguments of the command. service_account_key_data: The contents of a Google IAM service account JSON file image_pull_secret_data: The contents of image pull secret to use for private registries. membership_ref: The membership should be associated with the connect agent in the format of `project/[PROJECT]/location/global/memberships/[MEMBERSHIP]`. release_track: the release_track used in the gcloud command, or None if it is not available. Raises: exceptions.Error: If the agent cannot be deployed properly calliope_exceptions.MinimumArgumentException: If the agent cannot be deployed properly """ project_id = properties.VALUES.core.project.GetOrFail() log.status.Print('Generating connect agent manifest...') full_manifest = _GenerateManifest(args, service_account_key_data, image_pull_secret_data, False, membership_ref, release_track) # Generate a manifest file if necessary. if args.manifest_output_file: try: files.WriteFileContents(files.ExpandHomeDir( args.manifest_output_file), full_manifest, private=True) except files.Error as e: exceptions.Error('could not create manifest file: {}'.format(e)) log.status.Print( MANIFEST_SAVED_MESSAGE.format(args.manifest_output_file)) return log.status.Print('Deploying GKE Connect agent to cluster...') namespace = _GKEConnectNamespace(kube_client, project_id) # Delete the ns if necessary kube_util.DeleteNamespaceForReinstall(kube_client, namespace) # TODO(b/138816749): add check for cluster-admin permissions _PurgeAlphaInstaller(kube_client, namespace, project_id) # # Create or update the agent install deployment and related resources. _, err = kube_client.Apply(full_manifest) if err: raise exceptions.Error( 'Failed to apply manifest to cluster: {}'.format(err))
def CopyReportFile(context, download_dir, report_filepath): """Copies the report file from the VM to the local machine. Runs `gcloud compute scp` as a subprocess with no configuration. Any other setup will require manual copying from the user. Args: context: The command running context download_dir: Local path where the report will be downloaded report_filepath: Path to the report within the VM Returns: The path of the local file. """ instance = context['instance'] local_path = files.ExpandHomeDir( os.path.join(download_dir, os.path.basename(report_filepath))) log.status.Print('Copying file by running "gcloud compute scp"') cmd = [ 'gcloud', 'compute', 'scp', '--zone', instance.zone, instance.name + ':' + report_filepath, local_path ] external_helper.CallSubprocess('gcloud_copy', cmd, dry_run=context.get('args').dry_run) return local_path
def _GetKubeconfigAndContext(self, kubeconfig_file, context): """Gets the kubeconfig and cluster context from arguments and defaults. Args: kubeconfig_file: The kubecontext file to use context: The value of the context flag Returns: the kubeconfig filepath and context name Raises: calliope_exceptions.MinimumArgumentException: if a kubeconfig file cannot be deduced from the command line flags or environment exceptions.Error: if the context does not exist in the deduced kubeconfig file """ kubeconfig_file = ( kubeconfig_file or os.getenv('KUBECONFIG') or '~/.kube/config') kubeconfig = files.ExpandHomeDir(kubeconfig_file) if not kubeconfig: raise calliope_exceptions.MinimumArgumentException( ['--kubeconfig-file'], 'Please specify --kubeconfig, set the $KUBECONFIG environment ' 'variable, or ensure that $HOME/.kube/config exists') kc = kconfig.Kubeconfig.LoadFromFile(kubeconfig) context_name = context if context_name not in kc.contexts: raise exceptions.Error( 'context [{}] does not exist in kubeconfig [{}]'.format( context_name, kubeconfig)) return kubeconfig, context_name
def main(): parser = argparse.ArgumentParser(description=( 'This command generates .tar.gz files for a bunch of fake ' 'components as well as some fake JSON snapshots. This ' 'allows you to test the updater from the CLI manually with ' 'some fake data.')) parser.add_argument('--output_dir', '-o', metavar='output-dir', required=True, help='The directory to generate the data to.') args = parser.parse_args() out_dir = files.ExpandHomeDir(args.output_dir) util.Directories.TEMP_DIR = out_dir util.Directories.SetUpDirectories() tuples = [('a', 1, ['b']), ('b', 1, ['c']), ('c', 1, []), ('e', 1, []), ('f', 1, ['g']), ('g', 1, [])] GenerateSnapshot(out_dir, 1, tuples) tuples = [('a', 1, ['b']), ('b', 2, ['c']), ('c', 1, []), ('e', 2, []), ('f', 1, ['g']), ('g', 1, [])] GenerateSnapshot(out_dir, 2, tuples) tuples = [('a', 2, ['b']), ('b', 3, []), ('e', 2, []), ('f', 2, ['g']), ('g', 1, []), ('h', 1, [])] GenerateSnapshot(out_dir, 3, tuples) tuples = [('a', 2, ['b']), ('b', 3, []), ('e', 2, []), ('f', 2, ['g']), ('g', 1, []), ('h', 2, ['i']), ('i', 1, [])] GenerateSnapshot(out_dir, 4, tuples)
def GetUpdateManager(group_args): """Construct the UpdateManager to use based on the common args for the group. Args: group_args: An argparse namespace. Returns: update_manager.UpdateManager, The UpdateManager to use for the commands. """ try: os_override = platforms.OperatingSystem.FromId( group_args.operating_system_override) except platforms.InvalidEnumValue as e: raise exceptions.InvalidArgumentException('operating-system-override', e) try: arch_override = platforms.Architecture.FromId( group_args.architecture_override) except platforms.InvalidEnumValue as e: raise exceptions.InvalidArgumentException('architecture-override', e) platform = platforms.Platform.Current(os_override, arch_override) # darwin-arm machines thats are running a darwin_x86_64 python binary will # report arch as darwin_x86_64 because Architecture.Current() uses # platform.machine() as the source of truth. Here in the UpdateManager we want # to know the "real" truth so we call IsActuallyM1ArmArchitecture as the # source of truth which breaks out of the python env to see the underlying # arch. if not os_override and not arch_override: if (platform.operating_system == platforms.OperatingSystem.MACOSX and platform.architecture == platforms.Architecture.x86_64): if platforms.Platform.IsActuallyM1ArmArchitecture(): platform.architecture = platforms.Architecture.arm root = (files.ExpandHomeDir(group_args.sdk_root_override) if group_args.sdk_root_override else None) url = (files.ExpandHomeDir(group_args.snapshot_url_override) if group_args.snapshot_url_override else None) compile_python = True if hasattr(group_args, 'compile_python'): compile_python = group_args.compile_python return update_manager.UpdateManager( sdk_root=root, url=url, platform_filter=platform, skip_compile_python=(not compile_python))
def _WriteFileContents(self, filepath, contents): """Writes contents to a path, ensuring mkdirs. Args: filepath: str, The path of the file to write. contents: str, The contents to write to the file. """ full_path = os.path.realpath(files.ExpandHomeDir(filepath)) files.WriteFileContents(full_path, contents, private=True)
def GetKubeconfigAndContext(self, flags, temp_kubeconfig_dir): """Gets the kubeconfig and cluster context from arguments and defaults. Args: flags: the flags passed to the enclosing command. It must include kubeconfig and context. temp_kubeconfig_dir: a TemporaryDirectoryObject. Returns: the kubeconfig filepath and context name Raises: calliope_exceptions.MinimumArgumentException: if a kubeconfig file cannot be deduced from the command line flags or environment exceptions.Error: if the context does not exist in the deduced kubeconfig file """ # Parsing flags to get the name and location of the GKE cluster to register if flags.gke_uri or flags.gke_cluster: location, name = _ParseGKEURI( flags.gke_uri) if flags.gke_uri else _ParseGKECluster( flags.gke_cluster) return _GetGKEKubeconfig(location, name, temp_kubeconfig_dir), None # We need to support in-cluster configuration so that gcloud can run from # a container on the Cluster we are registering. KUBERNETES_SERICE_PORT # and KUBERNETES_SERVICE_HOST environment variables are set in a kubernetes # cluster automatically, which can be used by kubectl to talk to # the API server. if not flags.kubeconfig and encoding.GetEncodedValue( os.environ, 'KUBERNETES_SERVICE_PORT') and encoding.GetEncodedValue( os.environ, 'KUBERNETES_SERVICE_HOST'): return None, None kubeconfig_file = (flags.kubeconfig or encoding.GetEncodedValue( os.environ, 'KUBECONFIG') or '~/.kube/config') kubeconfig = files.ExpandHomeDir(kubeconfig_file) if not kubeconfig: raise calliope_exceptions.MinimumArgumentException( ['--kubeconfig'], 'Please specify --kubeconfig, set the $KUBECONFIG environment ' 'variable, or ensure that $HOME/.kube/config exists') kc = kconfig.Kubeconfig.LoadFromFile(kubeconfig) context_name = flags.context if context_name not in kc.contexts: raise exceptions.Error( 'context [{}] does not exist in kubeconfig [{}]'.format( context_name, kubeconfig)) return kubeconfig, context_name
def _CheckNetrc(self): """Warn on stderr if ~/.netrc contains redundant credentials.""" def Check(p): """Warn about other credential helpers that will be ignored.""" if not os.path.exists(p): return try: data = files.ReadFileContents(p) if 'source.developers.google.com' in data: sys.stderr.write(textwrap.dedent("""\ You have credentials for your Google repository in [{path}]. This repository's git credential helper is set correctly, so the credentials in [{path}] will not be used, but you may want to remove them to avoid confusion. """.format(path=p))) # pylint:disable=broad-except, If something went wrong, forget about it. except Exception: pass Check(files.ExpandHomeDir(os.path.join('~', '.netrc'))) Check(files.ExpandHomeDir(os.path.join('~', '_netrc')))
def _GetEnvs(): """Get the environment variables that should be passed to kubectl/gcloud commands. Returns: The dictionary that includes the environment varialbes. """ env = dict(os.environ) if _KUBECONFIGENV not in env: env[_KUBECONFIGENV] = files.ExpandHomeDir( os.path.join('~', '.kube', _DEFAULTKUBECONFIG)) return env
def _RaiseErrorIfNotExists(local_package_path, flag_name): """Validate the local package is valid. Args: local_package_path: str, path of the local directory to check. flag_name: str, indicates in which flag the path is specified. """ work_dir = os.path.abspath(files.ExpandHomeDir(local_package_path)) if not os.path.exists(work_dir) or not os.path.isdir(work_dir): raise exceptions.InvalidArgumentException( flag_name, r"Directory '{}' is not found.".format(work_dir))
def GetKubeconfig(args): """Get config from kubeconfig file. Get config from potentially 3 different places, falling back to the next option as necessary: 1. file_path specified as argument by the user 2. List of file paths specified in $KUBECONFIG 3. Default config path (~/.kube/config) Args: args: Namespace, The args namespace. Returns: dict: config object Raises: KubeconfigError: if $KUBECONFIG is set but contains no valid paths """ if getattr(args, 'kubeconfig', None): return kubeconfig.Kubeconfig.LoadFromFile( files.ExpandHomeDir(args.kubeconfig)) if os.getenv('KUBECONFIG'): config_paths = os.getenv('KUBECONFIG').split(os.pathsep) config = None # Merge together all valid paths into single config for path in config_paths: try: other_config = kubeconfig.Kubeconfig.LoadFromFile( files.ExpandHomeDir(path)) if not config: config = other_config else: config.Merge(other_config) except kubeconfig.Error: pass if not config: raise KubeconfigError('No valid file paths found in $KUBECONFIG') return config return kubeconfig.Kubeconfig.LoadFromFile( files.ExpandHomeDir(_DEFAULT_KUBECONFIG_PATH))
def ExpandLocalDirAndVersion(directory): """Expand HOME relative (~) directory with optional git_ref. Args: directory: str, directory path in the format PATH[/][@git_ref]. Returns: str, expanded full directory path with git_ref (if provided) """ path = directory.split('@') if directory else '' full_dir = files.ExpandHomeDir(path[0]) if len(path) == 2: full_dir += '@' + path[1] return full_dir
def Base64EncodedFileContents(filename): """Reads the provided file, and returns its contents, base64-encoded. Args: filename: The path to the file, absolute or relative to the current working directory. Returns: A string, the contents of filename, base64-encoded. Raises: files.Error: if the file cannot be read. """ return base64.b64encode( files.ReadBinaryFileContents(files.ExpandHomeDir(filename)))
def ValidateLocalRunArgs(args): """Validates the arguments specified in `local-run` command and normalize them.""" args_local_package_pach = args.local_package_path if args_local_package_pach: work_dir = os.path.abspath( files.ExpandHomeDir(args_local_package_pach)) if not os.path.exists(work_dir) or not os.path.isdir(work_dir): raise exceptions.InvalidArgumentException( '--local-package-path', r"Directory '{}' is not found.".format(work_dir)) else: work_dir = files.GetCWD() args.local_package_path = work_dir _ValidBuildArgsOfLocalRun(args) return args
def GetKubeconfigAndContext(self, flags): """Gets the kubeconfig and cluster context from arguments and defaults. Args: flags: the flags passed to the enclosing command. It must include kubeconfig and context. Returns: the kubeconfig filepath and context name Raises: calliope_exceptions.MinimumArgumentException: if a kubeconfig file cannot be deduced from the command line flags or environment exceptions.Error: if the context does not exist in the deduced kubeconfig file """ # We need to support in-cluster configuration so that gcloud can run from # a container on the Cluster we are registering. if not flags.kubeconfig and encoding.GetEncodedValue( os.environ, 'KUBERNETES_SERVICE_PORT') and encoding.GetEncodedValue( os.environ, 'KUBERNETES_SERVICE_HOST'): return None, None kubeconfig_file = (flags.kubeconfig or encoding.GetEncodedValue( os.environ, 'KUBECONFIG') or '~/.kube/config') kubeconfig = files.ExpandHomeDir(kubeconfig_file) if not kubeconfig: raise calliope_exceptions.MinimumArgumentException( ['--kubeconfig'], 'Please specify --kubeconfig, set the $KUBECONFIG environment ' 'variable, or ensure that $HOME/.kube/config exists') kc = kconfig.Kubeconfig.LoadFromFile(kubeconfig) context_name = flags.context if not context_name: raise exceptions.Error('argument --context: Must be specified.') if context_name not in kc.contexts: raise exceptions.Error( 'context [{}] does not exist in kubeconfig [{}]'.format( context_name, kubeconfig)) return kubeconfig, context_name
def __init__(self, key_file, env=None): """Create a Keys object which manages the given files. Args: key_file: str, The file path to the private SSH key file (other files are derived from this name). Automatically handles symlinks and user expansion. env: Environment, Current environment or None to infer from current. """ private_key_file = os.path.realpath(files.ExpandHomeDir(key_file)) self.dir = os.path.dirname(private_key_file) self.env = env or Environment.Current() # TODO(b/71388306): Enums aren't handled well by pytype. self.keys = { _KeyFileKind.PRIVATE: self.KeyFileData(private_key_file), _KeyFileKind.PUBLIC: self.KeyFileData(private_key_file + '.pub') } # type: dict[enum.Enum, Keys.KeyFileData] if self.env.suite is Suite.PUTTY: self.keys[_KeyFileKind.PPK] = self.KeyFileData(private_key_file + '.ppk')
def _GetAndUpdateRcPath(completion_update, path_update, rc_path, host_os): """Returns an rc path based on the default rc path or user input. Gets default rc path based on environment. If prompts are enabled, allows user to update to preferred file path. Otherwise, prints a warning that the default rc path will be updated. Args: completion_update: bool, Whether or not to do command completion. path_update: bool, Whether or not to update PATH. rc_path: str, the rc path given by the user, from --rc-path arg. host_os: str, The host os identification string. Returns: str, A path to the rc file to update. """ # If we aren't updating the RC file for either completions or PATH, there's # no point. if not (completion_update or path_update): return None if rc_path: return rc_path # A first guess at user preferred shell. preferred_shell = _GetPreferredShell( encoding.GetEncodedValue(os.environ, 'SHELL', '/bin/sh')) default_rc_path = os.path.join( files.GetHomeDir(), _GetShellRcFileName(preferred_shell, host_os)) # If in quiet mode, we'll use default path. if not console_io.CanPrompt(): _TraceAction( 'You specified that you wanted to update your rc file. The ' 'default file will be updated: [{rc_path}]'.format( rc_path=default_rc_path)) return default_rc_path rc_path_update = console_io.PromptResponse( ('The Google Cloud SDK installer will now prompt you to update an rc ' 'file to bring the Google Cloud CLIs into your environment.\n\n' 'Enter a path to an rc file to update, or leave blank to use ' '[{rc_path}]: ').format(rc_path=default_rc_path)) return (files.ExpandHomeDir(rc_path_update) if rc_path_update else default_rc_path)
def NormalizeAndValidateObbFileNames(obb_files): """Confirm that any OBB file names follow the required Android pattern. Also expand local paths with "~" Args: obb_files: list of obb file references. Each one is either a filename on the local FS or a gs:// reference. """ if obb_files: obb_files[:] = [ obb_file if not obb_file or obb_file.startswith(storage_util.GSUTIL_BUCKET_PREFIX) else files.ExpandHomeDir(obb_file) for obb_file in obb_files ] for obb_file in (obb_files or []): if not _OBB_FILE_REGEX.match(obb_file): raise test_exceptions.InvalidArgException( 'obb_files', '[{0}] is not a valid OBB file name, which must have the format: ' '(main|patch).<versionCode>.<package.name>.obb'.format(obb_file))
def _CreateBotoConfig(self): gsutil_path = _FindGsutil() if not gsutil_path: log.debug('Unable to find [gsutil]. Not configuring default .boto ' 'file') return boto_path = files.ExpandHomeDir(os.path.join('~', '.boto')) if os.path.exists(boto_path): log.debug('Not configuring default .boto file. File already ' 'exists at [{boto_path}].'.format(boto_path=boto_path)) return # 'gsutil config -n' creates a default .boto file that the user can read and # modify. command_args = ['config', '-n', '-o', boto_path] if platforms.OperatingSystem.Current( ) == platforms.OperatingSystem.WINDOWS: gsutil_args = execution_utils.ArgsForCMDTool( gsutil_path, *command_args) else: gsutil_args = execution_utils.ArgsForExecutableTool( gsutil_path, *command_args) return_code = execution_utils.Exec(gsutil_args, no_exit=True, out_func=log.file_only_logger.debug, err_func=log.file_only_logger.debug) if return_code == 0: log.status.write("""\ Created a default .boto configuration file at [{boto_path}]. See this file and [https://cloud.google.com/storage/docs/gsutil/commands/config] for more information about configuring Google Cloud Storage. """.format(boto_path=boto_path)) else: log.status.write( 'Error creating a default .boto configuration file. ' 'Please run [gsutil config -n] if you would like to ' 'create this file.\n')
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. if self.ReleaseTrack() is base.ReleaseTrack.BETA or self.ReleaseTrack( ) is base.ReleaseTrack.ALPHA: api_adapter = gke_api_adapter.NewAPIAdapter('v1beta1') else: api_adapter = gke_api_adapter.NewAPIAdapter('v1') with kube_util.KubernetesClient( api_adapter=api_adapter, gke_uri=getattr(args, 'gke_uri', None), gke_cluster=getattr(args, 'gke_cluster', None), kubeconfig=getattr(args, 'kubeconfig', None), internal_ip=getattr(args, 'internal_ip', False), cross_connect_subnetwork=getattr(args, 'cross_connect_subnetwork', None), private_endpoint_fqdn=getattr(args, 'private_endpoint_fqdn', None), context=getattr(args, 'context', None), public_issuer_url=getattr(args, 'public_issuer_url', None), enable_workload_identity=getattr(args, 'enable_workload_identity', False), ) as kube_client: location = getattr(args, 'location', 'global') if location is None: location = 'global' kube_client.CheckClusterAdminPermissions() kube_util.ValidateClusterIdentifierFlags(kube_client, args) if self.ReleaseTrack() is not base.ReleaseTrack.GA: flags.VerifyGetCredentialsFlags(args) uuid = kube_util.GetClusterUUID(kube_client) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. # Service Account key file is required if Workload Identity is not # enabled. # If Workload Identity is enabled, then the Connect Agent uses # a Kubernetes Service Account token instead and hence a Google Cloud # Platform Service Account key is not required. service_account_key_data = '' if args.service_account_key_file: try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: file_content = files.ReadBinaryFileContents( files.ExpandHomeDir(args.docker_credential_file)) docker_credential_data = six.ensure_str( file_content, encoding='utf-8') except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = kube_client.processor.gke_cluster_self_link issuer_url = None private_keyset_json = None if args.enable_workload_identity: # public_issuer_url can be None or given by user or gke_cluster_uri # (incase of a gke cluster). # args.public_issuer_url takes precedence over gke_cluster_uri. public_issuer_url = args.public_issuer_url or kube_client.processor.gke_cluster_uri or None try: openid_config_json = six.ensure_str( kube_client.GetOpenIDConfiguration(issuer_url=public_issuer_url), encoding='utf-8') except Exception as e: # pylint: disable=broad-except raise exceptions.Error( 'Error getting the OpenID Provider Configuration: ' '{}'.format(e)) # Extract the issuer URL from the discovery doc. issuer_url = json.loads(openid_config_json).get('issuer') if not issuer_url: raise exceptions.Error( 'Invalid OpenID Config: ' 'missing issuer: {}'.format(openid_config_json)) # Ensure public_issuer_url (only non-empty) matches what came back in # the discovery doc. if public_issuer_url and (public_issuer_url != issuer_url): raise exceptions.Error('--public-issuer-url {} did not match issuer ' 'returned in discovery doc: {}'.format( public_issuer_url, issuer_url)) # In the private issuer case, we set private_keyset_json, # which is used later to upload the JWKS # in the Fleet Membership. if args.has_private_issuer: private_keyset_json = kube_client.GetOpenIDKeyset() # Attempt to create a membership. already_exists = False obj = None # For backward compatiblity, check if a membership was previously created # using the cluster uuid. parent = api_util.ParentRef(project, location) membership_id = uuid resource_name = api_util.MembershipRef(project, location, uuid) obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME) # get api version version to pass into create/update membership api_server_version = kube_util.GetClusterServerVersion(kube_client) if obj: # The membership exists and has the same description. already_exists = True else: # Attempt to create a new membership using cluster_name. membership_id = args.CLUSTER_NAME resource_name = api_util.MembershipRef(project, location, args.CLUSTER_NAME) try: self._VerifyClusterExclusivity(kube_client, parent, membership_id) obj = api_util.CreateMembership(project, args.CLUSTER_NAME, args.CLUSTER_NAME, location, gke_cluster_self_link, uuid, self.ReleaseTrack(), issuer_url, private_keyset_json, api_server_version) # Generate CRD Manifest should only be called afer create/update. self._InstallOrUpdateExclusivityArtifacts(kube_client, resource_name) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise obj = api_util.GetMembership(resource_name, self.ReleaseTrack()) if not obj.externalId: raise exceptions.Error( 'invalid membership {0} does not have ' 'external_id field set. We cannot determine ' 'if registration is requested against a ' 'valid existing Membership. Consult the ' 'documentation on container fleet memberships ' 'update for more information or run gcloud ' 'container fleet memberships delete {0} if you ' 'are sure that this is an invalid or ' 'otherwise stale Membership'.format(membership_id)) if obj.externalId != uuid: raise exceptions.Error( 'membership {0} already exists in the project' ' with another cluster. If this operation is' ' intended, please run `gcloud container ' 'fleet memberships delete {0}` and register ' 'again.'.format(membership_id)) # The membership exists with same cluster_name. already_exists = True # In case of an existing membership, check with the user to upgrade the # Connect-Agent. if already_exists: # Update Membership when required. Scenarios that require updates: # 1. membership.authority is set, but there is now no issuer URL. # This means the user is disabling Workload Identity. # 2. membership.authority is not set, but there is now an # issuer URL. This means the user is enabling Workload Identity. # 3. membership.authority is set, but the issuer URL is different # from that set in membership.authority.issuer. This is technically # an error, but we defer to validation in the API. # 4. membership.authority.oidcJwks is set, but the private keyset # we got from the cluster differs from the keyset in the membership. # This means the user is updating the public keys, and we should # update to the latest keyset in the membership. if ( # scenario 1, disabling WI (obj.authority and not issuer_url) or # scenario 2, enabling WI (issuer_url and not obj.authority) or (obj.authority and # scenario 3, issuer changed ((obj.authority.issuer != issuer_url) or # scenario 4, JWKS changed (private_keyset_json and obj.authority.oidcJwks and (obj.authority.oidcJwks.decode('utf-8') != private_keyset_json)) ))): console_io.PromptContinue( message=hub_util.GenerateWIUpdateMsgString( obj, issuer_url, resource_name, args.CLUSTER_NAME), cancel_on_no=True) try: api_util.UpdateMembership( resource_name, obj, 'authority', self.ReleaseTrack(), issuer_url=issuer_url, oidc_jwks=private_keyset_json) # Generate CRD Manifest should only be called afer create/update. self._InstallOrUpdateExclusivityArtifacts(kube_client, resource_name) log.status.Print( 'Updated the membership [{}] for the cluster [{}]'.format( resource_name, args.CLUSTER_NAME)) except Exception as e: raise exceptions.Error( 'Error in updating the membership [{}]:{}'.format( resource_name, e)) else: console_io.PromptContinue( message='A membership [{}] for the cluster [{}] already exists. ' 'Continuing will reinstall the Connect agent deployment to use a ' 'new image (if one is available).'.format(resource_name, args.CLUSTER_NAME), cancel_on_no=True) else: log.status.Print( 'Created a new membership [{}] for the cluster [{}]'.format( resource_name, args.CLUSTER_NAME)) # Attempt to update the existing agent deployment, or install a new agent # if necessary. try: agent_util.DeployConnectAgent(kube_client, args, service_account_key_data, docker_credential_data, resource_name, self.ReleaseTrack()) except Exception as e: log.status.Print('Error in installing the Connect Agent: {}'.format(e)) # In case of a new membership, we need to clean up membership and # resources if we failed to install the Connect Agent. if not already_exists: api_util.DeleteMembership(resource_name, self.ReleaseTrack()) exclusivity_util.DeleteMembershipResources(kube_client) raise log.status.Print( 'Finished registering the cluster [{}] with the Fleet.'.format( args.CLUSTER_NAME)) return obj
def GetKubeconfigAndContext(self, temp_kubeconfig_dir): """Gets the kubeconfig, cluster context and resource link from arguments and defaults. Args: temp_kubeconfig_dir: a TemporaryDirectoryObject. Returns: the kubeconfig filepath and context name Raises: calliope_exceptions.MinimumArgumentException: if a kubeconfig file cannot be deduced from the command line flags or environment exceptions.Error: if the context does not exist in the deduced kubeconfig file """ # Parsing flags to get the name and location of the GKE cluster to register if self.gke_uri or self.gke_cluster: cluster_project = None if self.gke_uri: cluster_project, location, name = _ParseGKEURI(self.gke_uri) else: cluster_project = properties.VALUES.core.project.GetOrFail() location, name = _ParseGKECluster(self.gke_cluster) self.gke_cluster_self_link, self.gke_cluster_uri = api_util.GetGKEURIAndResourceName( cluster_project, location, name) return _GetGKEKubeconfig(self.api_adapter, cluster_project, location, name, temp_kubeconfig_dir, self.internal_ip, self.cross_connect_subnetwork, self.private_endpoint_fqdn), None # We need to support in-cluster configuration so that gcloud can run from # a container on the Cluster we are registering. KUBERNETES_SERICE_PORT # and KUBERNETES_SERVICE_HOST environment variables are set in a kubernetes # cluster automatically, which can be used by kubectl to talk to # the API server. if not self.kubeconfig and encoding.GetEncodedValue( os.environ, 'KUBERNETES_SERVICE_PORT') and encoding.GetEncodedValue( os.environ, 'KUBERNETES_SERVICE_HOST'): return None, None kubeconfig_file = (self.kubeconfig or encoding.GetEncodedValue( os.environ, 'KUBECONFIG') or '~/.kube/config') kubeconfig = files.ExpandHomeDir(kubeconfig_file) if not kubeconfig: raise calliope_exceptions.MinimumArgumentException( ['--kubeconfig'], 'Please specify --kubeconfig, set the $KUBECONFIG environment ' 'variable, or ensure that $HOME/.kube/config exists') kc = kconfig.Kubeconfig.LoadFromFile(kubeconfig) context_name = self.context if context_name not in kc.contexts: raise exceptions.Error( 'context [{}] does not exist in kubeconfig [{}]'.format( context_name, kubeconfig)) return kubeconfig, context_name
def DeployConnectAgent(args, service_account_key_data, docker_credential_data, upgrade=False): """Deploys the GKE Connect agent to the cluster. Args: args: arguments of the command. service_account_key_data: The contents of a Google IAM service account JSON file docker_credential_data: A credential that can be used to access Docker, to be stored in a secret and referenced from pod.spec.ImagePullSecrets. upgrade: whether to attempt to upgrade the agent, rather than replacing it. Raises: exceptions.Error: If the agent cannot be deployed properly calliope_exceptions.MinimumArgumentException: If the agent cannot be deployed properly """ kube_client = KubernetesClient(args) image = args.docker_image if not image: # Get the SHA for the default image. try: digest = ImageDigestForContainerImage(DEFAULT_CONNECT_AGENT_IMAGE, DEFAULT_CONNECT_AGENT_TAG) image = '{}@{}'.format(DEFAULT_CONNECT_AGENT_IMAGE, digest) except Exception as exp: raise exceptions.Error( 'could not determine image digest for {}:{}: {}'.format( DEFAULT_CONNECT_AGENT_IMAGE, DEFAULT_CONNECT_AGENT_TAG, exp)) project_id = properties.VALUES.core.project.GetOrFail() namespace = _GKEConnectNamespace(kube_client, project_id) full_manifest, agent_install_deployment_name = GenerateInstallManifest( project_id, namespace, image, service_account_key_data, docker_credential_data, args.CLUSTER_NAME, args.proxy) # Generate a manifest file if necessary. if args.manifest_output_file: try: files.WriteFileContents( files.ExpandHomeDir(args.manifest_output_file), full_manifest, private=True) except files.Error as e: exceptions.Error('could not create manifest file: {}'.format(e)) log.status.Print(MANIFEST_SAVED_MESSAGE.format(args.manifest_output_file)) return log.status.Print('Deploying GKE Connect agent to cluster...') # During an upgrade, the namespace should not be deleted. if not upgrade: # Delete the ns if necessary if kube_client.NamespaceExists(namespace): console_io.PromptContinue( message='Namespace [{namespace}] already exists in the cluster. This ' 'may be from a previous installation of the agent. If you want to ' 'investigate, enter "n" and run\n\n' ' kubectl \\\n' ' --kubeconfig={kubeconfig} \\\n' ' --context={context} \\\n' ' get all -n {namespace}\n\n' 'Continuing will delete namespace [{namespace}].'.format( namespace=namespace, kubeconfig=kube_client.kubeconfig, context=kube_client.context), cancel_on_no=True) try: succeeded, error = waiter.WaitFor( KubernetesPoller(), NamespaceDeleteOperation(namespace, kube_client), 'Deleting namespace [{}] in the cluster'.format(namespace), pre_start_sleep_ms=NAMESPACE_DELETION_INITIAL_WAIT_MS, max_wait_ms=NAMESPACE_DELETION_TIMEOUT_MS, wait_ceiling_ms=NAMESPACE_DELETION_MAX_POLL_INTERVAL_MS, sleep_ms=NAMESPACE_DELETION_INITIAL_POLL_INTERVAL_MS) except waiter.TimeoutError as e: # waiter.TimeoutError assumes that the operation is a Google API # operation, and prints a debugging string to that effect. raise exceptions.Error( 'Could not delete namespace [{}] from cluster.'.format(namespace)) if not succeeded: raise exceptions.Error( 'Could not delete namespace [{}] from cluster. Error: {}'.format( namespace, error)) # Create or update the agent install deployment and related resources. err = kube_client.Apply(full_manifest) if err: raise exceptions.Error( 'Failed to apply manifest to cluster: {}'.format(err)) kubectl_log_cmd = ( 'kubectl --kubeconfig={} --context={} logs -n {} -l app={}'.format( kube_client.kubeconfig, kube_client.context, namespace, AGENT_INSTALL_APP_LABEL)) def _WriteAgentLogs(): """Writes logs from the agent install deployment to a temporary file.""" logs, err = kube_client.Logs( namespace, 'deployment/{}'.format(agent_install_deployment_name)) if err: log.warning( 'Could not fetch Connect agent installation deployment logs: {}' .format(err)) return _, tmp_file = tempfile.mkstemp( suffix='_{}.log'.format(times.Now().strftime('%Y%m%d-%H%M%S')), prefix='gke_connect_', ) files.WriteFileContents(tmp_file, logs, private=True) log.status.Print( 'Connect agent installation deployment logs saved to [{}]'.format( tmp_file)) try: succeeded, error = waiter.WaitFor( KubernetesPoller(), DeploymentPodsAvailableOperation(namespace, RUNTIME_CONNECT_AGENT_DEPLOYMENT_NAME, image, kube_client), 'Waiting for Connect agent to be installed', pre_start_sleep_ms=AGENT_INSTALL_INITIAL_WAIT_MS, max_wait_ms=AGENT_INSTALL_TIMEOUT_MS, wait_ceiling_ms=AGENT_INSTALL_MAX_POLL_INTERVAL_MS, sleep_ms=AGENT_INSTALL_INITIAL_POLL_INTERVAL_MS) except waiter.TimeoutError: # waiter.TimeoutError assumes that the operation is a Google API operation, # and prints a debugging string to that effect. _WriteAgentLogs() raise exceptions.Error( 'Connect agent installation timed out. Leaving deployment in cluster ' 'for further debugging.\nTo view logs from the cluster:\n\n' '{}\n'.format(kubectl_log_cmd)) _WriteAgentLogs() if not succeeded: raise exceptions.Error( 'Connect agent installation did not succeed. To view logs from the ' 'cluster: {}\nKubectl error log: {}'.format(kubectl_log_cmd, error)) log.status.Print('Connect agent installation succeeded.')
class KnownHosts(object): """Represents known hosts file, supports read, write and basic key management. Currently a very naive, but sufficient, implementation where each entry is simply a string, and all entries are list of those strings. """ # TODO(b/33467618): Rename the file itself DEFAULT_PATH = os.path.realpath(files.ExpandHomeDir( os.path.join('~', '.ssh', 'google_compute_known_hosts'))) def __init__(self, known_hosts, file_path): """Construct a known hosts representation based on a list of key strings. Args: known_hosts: str, list each corresponding to a line in known_hosts_file. file_path: str, path to the known_hosts_file. """ self.known_hosts = known_hosts self.file_path = file_path @classmethod def FromFile(cls, file_path): """Create a KnownHosts object given a known_hosts_file. Args: file_path: str, path to the known_hosts_file. Returns: KnownHosts object corresponding to the file. If the file could not be opened, the KnownHosts object will have no entries. """ try: known_hosts = files.ReadFileContents(file_path).splitlines() except files.Error as e: known_hosts = [] log.debug('SSH Known Hosts File [{0}] could not be opened: {1}' .format(file_path, e)) return KnownHosts(known_hosts, file_path) @classmethod def FromDefaultFile(cls): """Create a KnownHosts object from the default known_hosts_file. Returns: KnownHosts object corresponding to the default known_hosts_file. """ return KnownHosts.FromFile(KnownHosts.DEFAULT_PATH) def ContainsAlias(self, host_key_alias): """Check if a host key alias exists in one of the known hosts. Args: host_key_alias: str, the host key alias Returns: bool, True if host_key_alias is in the known hosts file. If the known hosts file couldn't be opened it will be treated as if empty and False returned. """ return any(host_key_alias in line for line in self.known_hosts) def Add(self, hostname, host_key, overwrite=False): """Add or update the entry for the given hostname. If there is no entry for the given hostname, it will be added. If there is an entry already and overwrite_keys is False, nothing will be changed. If there is an entry and overwrite_keys is True, the key will be updated if it has changed. Args: hostname: str, The hostname for the known_hosts entry. host_key: str, The host key for the given hostname. overwrite: bool, If true, will overwrite the entry corresponding to hostname with the new host_key if it already exists. If false and an entry already exists for hostname, will ignore the new host_key value. """ new_key_entry = '{0} {1}'.format(hostname, host_key) for i, key in enumerate(self.known_hosts): if key.startswith(hostname): if overwrite: self.known_hosts[i] = new_key_entry break else: self.known_hosts.append(new_key_entry) def Write(self): """Writes the file to disk.""" files.WriteFileContents( self.file_path, '\n'.join(self.known_hosts) + '\n', private=True)
def Run(self, args): """See ssh_utils.BaseSSHCommand.Run.""" holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client ssh_helper = ssh_utils.BaseSSHHelper() ssh_helper.Run(args) ssh_helper.keys.EnsureKeysExist(args.force_key_file_overwrite, allow_passphrase=True) ssh_config_file = files.ExpandHomeDir(args.ssh_config_file or ssh.PER_USER_SSH_CONFIG_FILE) instances = None try: existing_content = files.ReadFileContents(ssh_config_file) except files.Error as e: existing_content = '' log.debug('SSH Config File [{0}] could not be opened: {1}'.format( ssh_config_file, e)) if args.remove: compute_section = '' try: new_content = _RemoveComputeSection(existing_content) except MultipleComputeSectionsError: raise MultipleComputeSectionsError(ssh_config_file) else: ssh_helper.EnsureSSHKeyIsInProject( client, ssh.GetDefaultSshUsername(warn_on_account_user=True), None) instances = list(self.GetRunningInstances(client)) if instances: compute_section = _BuildComputeSection( instances, ssh_helper.keys.key_file, ssh.KnownHosts.DEFAULT_PATH) else: compute_section = '' if existing_content and not args.remove: try: new_content = _MergeComputeSections(existing_content, compute_section) except MultipleComputeSectionsError: raise MultipleComputeSectionsError(ssh_config_file) elif not existing_content: new_content = compute_section if args.dry_run: log.out.write(new_content or '') return if new_content != existing_content: if (os.path.exists(ssh_config_file) and platforms.OperatingSystem.Current() is not platforms.OperatingSystem.WINDOWS): ssh_config_perms = os.stat(ssh_config_file).st_mode # From `man 5 ssh_config`: # this file must have strict permissions: read/write for the user, # and not accessible by others. # We check that here: if not (ssh_config_perms & stat.S_IRWXU == stat.S_IWUSR | stat.S_IRUSR and ssh_config_perms & stat.S_IWGRP == 0 and ssh_config_perms & stat.S_IWOTH == 0): log.warning( 'Invalid permissions on [{0}]. Please change to match ssh ' 'requirements (see man 5 ssh).') # TODO(b/36050483): This write will not work very well if there is # a lot of write contention for the SSH config file. We should # add a function to do a better job at "atomic file writes". files.WriteFileContents(ssh_config_file, new_content, private=True) if compute_section: log.out.write( textwrap.dedent("""\ You should now be able to use ssh/scp with your instances. For example, try running: $ ssh {alias} """.format(alias=_CreateAlias(instances[0])))) elif not instances and not args.remove: log.warning( 'No host aliases were added to your SSH configs because you do not ' 'have any running instances. Try running this command again after ' 'running some instances.')