def TemporaryKubeconfig(location_id, cluster_id): """Context manager that manages a temporary kubeconfig file for a GKE cluster. The kubeconfig file will be automatically created and destroyed and will contain only the credentials for the specified GKE cluster. The 'KUBECONFIG' value in `os.environ` will be temporarily updated with the temporary kubeconfig's path. Consequently, subprocesses started with googlecloudsdk.core.execution_utils.Exec while in this context manager will see the temporary KUBECONFIG environment variable. Args: location_id: string, the id of the location to which the cluster belongs cluster_id: string, the id of the cluster Raises: Error: If unable to get credentials for kubernetes cluster. Returns: the path to the temporary kubeconfig file Yields: Due to b/73533917, linter crashes without yields. """ gke_util.CheckKubectlInstalled() with files.TemporaryDirectory() as tempdir: kubeconfig = os.path.join(tempdir, 'kubeconfig') old_kubeconfig = encoding.GetEncodedValue(os.environ, KUBECONFIG_ENV_VAR_NAME) try: encoding.SetEncodedValue(os.environ, KUBECONFIG_ENV_VAR_NAME, kubeconfig) gke_api = gke_api_adapter.NewAPIAdapter(GKE_API_VERSION) cluster_ref = gke_api.ParseCluster(cluster_id, location_id) cluster = gke_api.GetCluster(cluster_ref) auth = cluster.masterAuth missing_creds = not (auth and auth.clientCertificate and auth.clientKey) if missing_creds and not gke_util.ClusterConfig.UseGCPAuthProvider( ): raise Error( 'Unable to get cluster credentials. User must have edit ' 'permission on {}'.format(cluster_ref.projectId)) gke_util.ClusterConfig.Persist(cluster, cluster_ref.projectId) yield kubeconfig finally: encoding.SetEncodedValue(os.environ, KUBECONFIG_ENV_VAR_NAME, old_kubeconfig)
def _GetGKEKubeconfig(project, location_id, cluster_id, temp_kubeconfig_dir): """The kubeconfig of GKE Cluster is fetched using the GKE APIs. The 'KUBECONFIG' value in `os.environ` will be temporarily updated with the temporary kubeconfig's path if the kubeconfig arg is not None. Consequently, subprocesses started with googlecloudsdk.core.execution_utils.Exec will see the temporary KUBECONFIG environment variable. Using GKE APIs the GKE cluster is validated, and the ClusterConfig object, is persisted in the temporarily updated 'KUBECONFIG'. Args: project: string, the project id of the cluster for which kube config is to be fetched location_id: string, the id of the location to which the cluster belongs cluster_id: string, the id of the cluster temp_kubeconfig_dir: TemporaryDirectory object Raises: Error: If unable to get credentials for kubernetes cluster. Returns: the path to the kubeconfig file """ kubeconfig = os.path.join(temp_kubeconfig_dir.path, 'kubeconfig') old_kubeconfig = encoding.GetEncodedValue(os.environ, 'KUBECONFIG') try: encoding.SetEncodedValue(os.environ, 'KUBECONFIG', kubeconfig) gke_api = gke_api_adapter.NewAPIAdapter('v1') cluster_ref = gke_api.ParseCluster(cluster_id, location_id, project) cluster = gke_api.GetCluster(cluster_ref) auth = cluster.masterAuth valid_creds = auth and auth.clientCertificate and auth.clientKey # c_util.ClusterConfig.UseGCPAuthProvider() checks for # container/use_client_certificate setting if not valid_creds and not c_util.ClusterConfig.UseGCPAuthProvider(): raise c_util.Error( 'Unable to get cluster credentials. User must have edit ' 'permission on {}'.format(cluster_ref.projectId)) c_util.ClusterConfig.Persist(cluster, cluster_ref.projectId) finally: if old_kubeconfig: encoding.SetEncodedValue(os.environ, 'KUBECONFIG', old_kubeconfig) else: del os.environ['KUBECONFIG'] return kubeconfig
def _BaseRun(args): """Base operations for `get-credentials` run command.""" container_util.CheckKubectlInstalled() cluster_id = 'krmapihost-' + args.name location_id = args.location project = None gke_api = container_api_adapter.NewAPIAdapter('v1') log.status.Print('Fetching cluster endpoint and auth data.') cluster_ref = gke_api.ParseCluster(cluster_id, location_id, project) cluster = gke_api.GetCluster(cluster_ref) if not gke_api.IsRunning(cluster): log.warning(NOT_RUNNING_MSG.format(cluster_ref.clusterId)) return cluster, cluster_ref
def Filter(self, context, args): """Modify the context that will be given to this group's commands when run. Args: context: {str:object}, A set of key-value pairs that can be used for common initialization among commands. args: argparse.Namespace: The same namespace given to the corresponding .Run() invocation. Returns: The refined command context. """ base.DisableUserProjectQuota() base.OptOutRequests( ) # TODO(b/169085077): Remove to migrate to requests. context['api_adapter'] = api_adapter.NewAPIAdapter('v1alpha1') return context
def Filter(self, context, args): """Modify the context that will be given to this group's commands when run. Args: context: {str:object}, A set of key-value pairs that can be used for common initialization among commands. args: argparse.Namespace: The same namespace given to the corresponding .Run() invocation. Returns: The refined command context. """ if properties.VALUES.container.use_v1_api_client.GetBool(): api_version = 'v1' else: api_version = 'v1alpha1' context['api_adapter'] = api_adapter.NewAPIAdapter(api_version) return context
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Cluster message for the successfully updated node pool. Raises: util.Error, if creation failed. """ adapter = api_adapter.NewAPIAdapter() pool_ref = adapter.ParseNodePool(args.name) options = self.ParseUpdateNodePoolOptions(args) if options.enable_autorepair is None and options.enable_autoupgrade is None: raise exceptions.MinimumArgumentException( ['--[no-]enable-autoupgrade', '--[no-]enable-autorepair'], 'Please reformat your request.') if options.enable_autorepair is not None: log.status.Print( messages.AutoUpdateUpgradeRepairMessage( options.enable_autorepair, 'autorepair')) if options.enable_autoupgrade is not None: log.status.Print( messages.AutoUpdateUpgradeRepairMessage( options.enable_autoupgrade, 'autoupgrade')) try: operation_ref = adapter.UpdateNodePool(pool_ref, options) adapter.WaitForOperation(operation_ref, 'Updating node pool {0}'.format( pool_ref.nodePoolId), timeout_s=args.timeout) pool = adapter.GetNodePool(pool_ref) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) log.UpdatedResource(pool_ref) return pool
def Filter(self, context, args): """Modify the context that will be given to this group's commands when run. Args: context: {str:object}, A set of key-value pairs that can be used for common initialization among commands. args: argparse.Namespace: The same namespace given to the corresponding .Run() invocation. Returns: The refined command context. """ base.DisableUserProjectQuota() if container_command_util.GetUseV1APIProperty(): api_version = 'v1' else: api_version = 'v1beta1' context['api_adapter'] = api_adapter.NewAPIAdapter(api_version) return context
def Filter(self, context, args): """Modify the context that will be given to this group's commands when run. Args: context: {str:object}, A set of key-value pairs that can be used for common initialization among commands. args: argparse.Namespace: The same namespace given to the corresponding .Run() invocation. Returns: The refined command context. """ api_version = (properties.VALUES.api_client_overrides.container.Get() or self.DEFAULT_API_VERSION) endpoint_url = properties.VALUES.api_endpoint_overrides.container.Get() context['api_adapter'] = api_adapter.NewAPIAdapter( api_version, endpoint_url, self.Http()) return context
def ExtractGkeClusterLocationId(env_object): """Finds the location ID of the GKE cluster running the provided environment. Args: env_object: Environment, the environment, likely returned by an API call, whose cluster location to extract Raises: Error: if Kubernetes cluster is not found. Returns: str, the location ID (a short name like us-central1-b) of the GKE cluster running the environment """ if env_object.config.nodeConfig.location: return env_object.config.nodeConfig.location[ env_object.config.nodeConfig.location.rfind('/') + 1:] gke_cluster = env_object.config.gkeCluster[env_object.config.gkeCluster. rfind('/') + 1:] gke_api = gke_api_adapter.NewAPIAdapter(GKE_API_VERSION) # GKE is in the middle of deprecating zones in favor of locations, so we # read from whichever one has a value. cluster_zones = [ c.location[c.location.rfind('/') + 1:] or c.zone for c in gke_api.ListClusters(parsers.GetProject()).clusters if c.name == gke_cluster ] if not cluster_zones: # This should never happen unless the user has deleted their cluster out of # band. raise Error('Kubernetes Engine cluster not found.') elif len(cluster_zones) == 1: return cluster_zones[0] return cluster_zones[console_io.PromptChoice( ['[{}]'.format(z) for z in cluster_zones], default=0, message= 'Cluster found in more than one location. Please select the desired ' 'location:')]
def SetUp(self): self.assertIsNone( c_util.ClusterConfig.Load(self.CLUSTER_NAME, self.ZONE, self.PROJECT_ID)) self.api_adapter = api_adapter.NewAPIAdapter(self.API_VERSION)
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. if self.ReleaseTrack() is base.ReleaseTrack.BETA or self.ReleaseTrack( ) is base.ReleaseTrack.ALPHA: api_adapter = gke_api_adapter.NewAPIAdapter('v1beta1') else: api_adapter = gke_api_adapter.NewAPIAdapter('v1') with kube_util.KubernetesClient( api_adapter=api_adapter, gke_uri=getattr(args, 'gke_uri', None), gke_cluster=getattr(args, 'gke_cluster', None), kubeconfig=getattr(args, 'kubeconfig', None), internal_ip=getattr(args, 'internal_ip', False), cross_connect_subnetwork=getattr(args, 'cross_connect_subnetwork', None), private_endpoint_fqdn=getattr(args, 'private_endpoint_fqdn', None), context=getattr(args, 'context', None), public_issuer_url=getattr(args, 'public_issuer_url', None), enable_workload_identity=getattr(args, 'enable_workload_identity', False), ) as kube_client: location = getattr(args, 'location', 'global') if location is None: location = 'global' kube_client.CheckClusterAdminPermissions() kube_util.ValidateClusterIdentifierFlags(kube_client, args) if self.ReleaseTrack() is not base.ReleaseTrack.GA: flags.VerifyGetCredentialsFlags(args) uuid = kube_util.GetClusterUUID(kube_client) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. # Service Account key file is required if Workload Identity is not # enabled. # If Workload Identity is enabled, then the Connect Agent uses # a Kubernetes Service Account token instead and hence a Google Cloud # Platform Service Account key is not required. service_account_key_data = '' if args.service_account_key_file: try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: file_content = files.ReadBinaryFileContents( files.ExpandHomeDir(args.docker_credential_file)) docker_credential_data = six.ensure_str( file_content, encoding='utf-8') except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = kube_client.processor.gke_cluster_self_link issuer_url = None private_keyset_json = None if args.enable_workload_identity: # public_issuer_url can be None or given by user or gke_cluster_uri # (incase of a gke cluster). # args.public_issuer_url takes precedence over gke_cluster_uri. public_issuer_url = args.public_issuer_url or kube_client.processor.gke_cluster_uri or None try: openid_config_json = six.ensure_str( kube_client.GetOpenIDConfiguration(issuer_url=public_issuer_url), encoding='utf-8') except Exception as e: # pylint: disable=broad-except raise exceptions.Error( 'Error getting the OpenID Provider Configuration: ' '{}'.format(e)) # Extract the issuer URL from the discovery doc. issuer_url = json.loads(openid_config_json).get('issuer') if not issuer_url: raise exceptions.Error( 'Invalid OpenID Config: ' 'missing issuer: {}'.format(openid_config_json)) # Ensure public_issuer_url (only non-empty) matches what came back in # the discovery doc. if public_issuer_url and (public_issuer_url != issuer_url): raise exceptions.Error('--public-issuer-url {} did not match issuer ' 'returned in discovery doc: {}'.format( public_issuer_url, issuer_url)) # In the private issuer case, we set private_keyset_json, # which is used later to upload the JWKS # in the Fleet Membership. if args.has_private_issuer: private_keyset_json = kube_client.GetOpenIDKeyset() # Attempt to create a membership. already_exists = False obj = None # For backward compatiblity, check if a membership was previously created # using the cluster uuid. parent = api_util.ParentRef(project, location) membership_id = uuid resource_name = api_util.MembershipRef(project, location, uuid) obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME) # get api version version to pass into create/update membership api_server_version = kube_util.GetClusterServerVersion(kube_client) if obj: # The membership exists and has the same description. already_exists = True else: # Attempt to create a new membership using cluster_name. membership_id = args.CLUSTER_NAME resource_name = api_util.MembershipRef(project, location, args.CLUSTER_NAME) try: self._VerifyClusterExclusivity(kube_client, parent, membership_id) obj = api_util.CreateMembership(project, args.CLUSTER_NAME, args.CLUSTER_NAME, location, gke_cluster_self_link, uuid, self.ReleaseTrack(), issuer_url, private_keyset_json, api_server_version) # Generate CRD Manifest should only be called afer create/update. self._InstallOrUpdateExclusivityArtifacts(kube_client, resource_name) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise obj = api_util.GetMembership(resource_name, self.ReleaseTrack()) if not obj.externalId: raise exceptions.Error( 'invalid membership {0} does not have ' 'external_id field set. We cannot determine ' 'if registration is requested against a ' 'valid existing Membership. Consult the ' 'documentation on container fleet memberships ' 'update for more information or run gcloud ' 'container fleet memberships delete {0} if you ' 'are sure that this is an invalid or ' 'otherwise stale Membership'.format(membership_id)) if obj.externalId != uuid: raise exceptions.Error( 'membership {0} already exists in the project' ' with another cluster. If this operation is' ' intended, please run `gcloud container ' 'fleet memberships delete {0}` and register ' 'again.'.format(membership_id)) # The membership exists with same cluster_name. already_exists = True # In case of an existing membership, check with the user to upgrade the # Connect-Agent. if already_exists: # Update Membership when required. Scenarios that require updates: # 1. membership.authority is set, but there is now no issuer URL. # This means the user is disabling Workload Identity. # 2. membership.authority is not set, but there is now an # issuer URL. This means the user is enabling Workload Identity. # 3. membership.authority is set, but the issuer URL is different # from that set in membership.authority.issuer. This is technically # an error, but we defer to validation in the API. # 4. membership.authority.oidcJwks is set, but the private keyset # we got from the cluster differs from the keyset in the membership. # This means the user is updating the public keys, and we should # update to the latest keyset in the membership. if ( # scenario 1, disabling WI (obj.authority and not issuer_url) or # scenario 2, enabling WI (issuer_url and not obj.authority) or (obj.authority and # scenario 3, issuer changed ((obj.authority.issuer != issuer_url) or # scenario 4, JWKS changed (private_keyset_json and obj.authority.oidcJwks and (obj.authority.oidcJwks.decode('utf-8') != private_keyset_json)) ))): console_io.PromptContinue( message=hub_util.GenerateWIUpdateMsgString( obj, issuer_url, resource_name, args.CLUSTER_NAME), cancel_on_no=True) try: api_util.UpdateMembership( resource_name, obj, 'authority', self.ReleaseTrack(), issuer_url=issuer_url, oidc_jwks=private_keyset_json) # Generate CRD Manifest should only be called afer create/update. self._InstallOrUpdateExclusivityArtifacts(kube_client, resource_name) log.status.Print( 'Updated the membership [{}] for the cluster [{}]'.format( resource_name, args.CLUSTER_NAME)) except Exception as e: raise exceptions.Error( 'Error in updating the membership [{}]:{}'.format( resource_name, e)) else: console_io.PromptContinue( message='A membership [{}] for the cluster [{}] already exists. ' 'Continuing will reinstall the Connect agent deployment to use a ' 'new image (if one is available).'.format(resource_name, args.CLUSTER_NAME), cancel_on_no=True) else: log.status.Print( 'Created a new membership [{}] for the cluster [{}]'.format( resource_name, args.CLUSTER_NAME)) # Attempt to update the existing agent deployment, or install a new agent # if necessary. try: agent_util.DeployConnectAgent(kube_client, args, service_account_key_data, docker_credential_data, resource_name, self.ReleaseTrack()) except Exception as e: log.status.Print('Error in installing the Connect Agent: {}'.format(e)) # In case of a new membership, we need to clean up membership and # resources if we failed to install the Connect Agent. if not already_exists: api_util.DeleteMembership(resource_name, self.ReleaseTrack()) exclusivity_util.DeleteMembershipResources(kube_client) raise log.status.Print( 'Finished registering the cluster [{}] with the Fleet.'.format( args.CLUSTER_NAME)) return obj