def testSuccessfulAgentDeploymentWorkloadIdentityAlpha(self): properties.VALUES.core.project.Set('my-project') self.mock_kubernetes_client.Apply.return_value = ('some output', None) self.mock_kubernetes_client.Logs.return_value = ('Fake log', None) self.mock_kubernetes_client.Delete.return_value = None self.mock_kubernetes_client.NamespaceExists.return_value = False self.mock_kubernetes_client.NamespacesWithLabelSelector.return_value = None self.StartObjectPatch(gkehub_api_adapter, 'InitAPIAdapter', return_value=self.mock_api_adapter) self.StartObjectPatch(api_util, 'GenerateConnectAgentManifest') self.StartObjectPatch(p_util, 'GetProjectNumber', return_value=12321) # Current SetUp doesn't add alpha track arguments, so manually add # --enable-workload-identity and other required args for testing. # This is ugly, but it seems like there is no public API to manually set # the release track for a command, so we can't just call Register.Args() # with the alpha track here. # TODO(b/152240680): This is another example of over-reliance on args in # deep methods. # TODO(b/152762400): Make alpha track arguments easier to test. parser = test_util.ArgumentParser() parser.add_argument('CLUSTER_NAME', help='foo', type=str) parser.add_argument('--kubeconfig', help='foo', type=str) parser.add_argument('--context', help='foo', type=str) parser.add_argument('--proxy', help='foo', type=str) parser.add_argument('--docker-registry', help='foo', type=str) parser.add_argument('--version', help='foo', type=str) parser.add_argument('--manifest-output-file', help='foo', type=str) parser.add_argument('--enable-workload-identity', help='foo', action='store_true') # This omits --service-account-key-file and ensures that doing so does not # cause an exception when deploying Connect Agent with Workload Identity # enabled. args = parser.parse_args([ 'my-membership', '--kubeconfig', '/tmp/kubeconfig', '--context', 'default', '--enable-workload-identity', ]) agent_util.DeployConnectAgent( self.mock_kubernetes_client, args, 'some data', 'some other data', 'project/my-project/locations/global/memberships/my-membership', calliope_base.ReleaseTrack.ALPHA)
def testSuccessfulAgentDeploymentGA(self): properties.VALUES.core.project.Set('my-project') self.mock_kubernetes_client.Apply.return_value = ('some output', None) self.mock_kubernetes_client.Logs.return_value = ('Fake log', None) self.mock_kubernetes_client.Delete.return_value = None self.mock_kubernetes_client.NamespaceExists.return_value = False self.mock_kubernetes_client.NamespacesWithLabelSelector.return_value = None self.StartObjectPatch(p_util, 'GetProjectNumber', return_value=12321) self.StartObjectPatch(api_util, 'GenerateConnectAgentManifest') args = self.parser.parse_args([ 'my-membership', '--kubeconfig', '/tmp/kubeconfig', '--context', 'default', '--service-account-key-file', '/tmp/key.json' ]) agent_util.DeployConnectAgent( self.mock_kubernetes_client, args, 'some data', 'some other data', 'project/my-project/locations/global/memberships/my-membership', calliope_base.ReleaseTrack.GA)
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. with kube_util.KubernetesClient(args) as kube_client: kube_client.CheckClusterAdminPermissions() kube_util.ValidateClusterIdentifierFlags(kube_client, args) uuid = kube_util.GetClusterUUID(kube_client) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. # Service Account key file is required if Workload Identity is not # enabled. # If Workload Identity is enabled, then the Connect Agent uses # a Kubernetes Service Account token instead and hence a GCP Service # Account key is not required. service_account_key_data = '' if args.service_account_key_file: try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: docker_credential_data = hub_util.Base64EncodedFileContents( args.docker_credential_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = kube_client.processor.gke_cluster_self_link issuer_url = None private_keyset_json = None if args.enable_workload_identity: # public_issuer_url can be None or given by user or gke_cluster_uri # (incase of a gke cluster). # args.public_issuer_url takes precedence over gke_cluster_uri. public_issuer_url = args.public_issuer_url or kube_client.processor.gke_cluster_uri or None try: openid_config_json = six.ensure_str( kube_client.GetOpenIDConfiguration( issuer_url=public_issuer_url), encoding='utf-8') except Exception as e: # pylint: disable=broad-except raise exceptions.Error( 'Error getting the OpenID Provider Configuration: ' '{}'.format(e)) # Extract the issuer URL from the discovery doc. issuer_url = json.loads(openid_config_json).get('issuer') if not issuer_url: raise exceptions.Error( 'Invalid OpenID Config: ' 'missing issuer: {}'.format(openid_config_json)) # Ensure public_issuer_url (only non-empty) matches what came back in # the discovery doc. if public_issuer_url and (public_issuer_url != issuer_url): raise exceptions.Error( '--public-issuer-url {} did not match issuer ' 'returned in discovery doc: {}'.format( public_issuer_url, issuer_url)) # Request the JWKS from the cluster if we need it (either for setting # up the GCS bucket or getting public keys for private issuers). In # the private issuer case, we set private_keyset_json, which is used # later to upload the JWKS in the Hub Membership. if self.ReleaseTrack() is base.ReleaseTrack.ALPHA: if args.manage_workload_identity_bucket: api_util.CreateWorkloadIdentityBucket( project, issuer_url, openid_config_json, kube_client.GetOpenIDKeyset()) elif args.has_private_issuer: private_keyset_json = kube_client.GetOpenIDKeyset() # Attempt to create a membership. already_exists = False obj = None # For backward compatiblity, check if a membership was previously created # using the cluster uuid. parent = api_util.ParentRef(project, 'global') membership_id = uuid resource_name = api_util.MembershipRef(project, 'global', uuid) obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME) if obj: # The membership exists and has the same description. already_exists = True else: # Attempt to create a new membership using cluster_name. membership_id = args.CLUSTER_NAME resource_name = api_util.MembershipRef(project, 'global', args.CLUSTER_NAME) try: self._VerifyClusterExclusivity(kube_client, parent, membership_id) obj = api_util.CreateMembership( project, args.CLUSTER_NAME, args.CLUSTER_NAME, gke_cluster_self_link, uuid, self.ReleaseTrack(), issuer_url, private_keyset_json) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise obj = api_util.GetMembership(resource_name, self.ReleaseTrack()) if not obj.externalId: raise exceptions.Error( 'invalid membership {0} does not have ' 'external_id field set. We cannot determine ' 'if registration is requested against a ' 'valid existing Membership. Consult the ' 'documentation on container hub memberships ' 'update for more information or run gcloud ' 'container hub memberships delete {0} if you ' 'are sure that this is an invalid or ' 'otherwise stale Membership'.format(membership_id)) if obj.externalId != uuid: raise exceptions.Error( 'membership {0} already exists in the project' ' with another cluster. If this operation is' ' intended, please run `gcloud container ' 'hub memberships delete {0}` and register ' 'again.'.format(membership_id)) # The membership exists with same cluster_name. already_exists = True # In case of an existing membership, check with the user to upgrade the # Connect-Agent. if already_exists: # Update Membership when required. Scenarios that require updates: # 1. membership.authority is set, but there is now no issuer URL. # This means the user is disabling Workload Identity. # 2. membership.authority is not set, but there is now an # issuer URL. This means the user is enabling Workload Identity. # 3. membership.authority is set, but the issuer URL is different # from that set in membership.authority.issuer. This is technically # an error, but we defer to validation in the API. # 4. membership.authority.oidcJwks is set, but the private keyset # we got from the cluster differs from the keyset in the membership. # This means the user is updating the public keys, and we should # update to the latest keyset in the membership. if ( # scenario 1, disabling WI (obj.authority and not issuer_url) or # scenario 2, enabling WI (issuer_url and not obj.authority) or (obj.authority and # scenario 3, issuer changed ((obj.authority.issuer != issuer_url) or # scenario 4, JWKS changed (private_keyset_json and obj.authority.oidcJwks and (obj.authority.oidcJwks.decode('utf-8') != private_keyset_json))))): console_io.PromptContinue( message=hub_util.GenerateWIUpdateMsgString( obj, issuer_url, resource_name, args.CLUSTER_NAME), cancel_on_no=True) try: api_util.UpdateMembership( resource_name, obj, 'authority', self.ReleaseTrack(), issuer_url=issuer_url, oidc_jwks=private_keyset_json) log.status.Print( 'Updated the membership [{}] for the cluster [{}]'. format(resource_name, args.CLUSTER_NAME)) except Exception as e: raise exceptions.Error( 'Error in updating the membership [{}]:{}'.format( resource_name, e)) else: console_io.PromptContinue( message= 'A membership [{}] for the cluster [{}] already exists. ' 'Continuing will reinstall the Connect agent deployment to use a ' 'new image (if one is available).'.format( resource_name, args.CLUSTER_NAME), cancel_on_no=True) else: log.status.Print( 'Created a new membership [{}] for the cluster [{}]'. format(resource_name, args.CLUSTER_NAME)) # Attempt to update the existing agent deployment, or install a new agent # if necessary. try: self._InstallOrUpdateExclusivityArtifacts( kube_client, resource_name) agent_util.DeployConnectAgent(kube_client, args, service_account_key_data, docker_credential_data, resource_name, self.ReleaseTrack()) except Exception as e: log.status.Print( 'Error in installing the Connect Agent: {}'.format(e)) # In case of a new membership, we need to clean up membership and # resources if we failed to install the Connect Agent. if not already_exists: api_util.DeleteMembership(resource_name, self.ReleaseTrack()) exclusivity_util.DeleteMembershipResources(kube_client) raise log.status.Print( 'Finished registering the cluster [{}] with the Hub.'.format( args.CLUSTER_NAME)) return obj
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. kube_client = kube_util.KubernetesClient(args) uuid = kube_util.GetClusterUUID(kube_client) self._VerifyClusterExclusivity(kube_client, project, args.context, uuid) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: docker_credential_data = hub_util.Base64EncodedFileContents( args.docker_credential_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = api_util.GKEClusterSelfLink(args) # The full resource name of the membership for this registration flow. name = 'projects/{}/locations/global/memberships/{}'.format( project, uuid) # Attempt to create a membership. already_exists = False try: exclusivity_util.ApplyMembershipResources(kube_client, project) obj = api_util.CreateMembership(project, uuid, args.CLUSTER_NAME, gke_cluster_self_link) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise # The membership already exists. Check to see if it has the same # description (i.e., user-visible cluster name). # # This intentionally does not verify that the gke_cluster_self_link is # equivalent: this check is meant to prevent the user from updating the # Connect agent in a cluster that is different from the one that they # expect, and is not required for the proper functioning of the agent or # the Hub. obj = api_util.GetMembership(name) if obj.description != args.CLUSTER_NAME: # A membership exists, but does not have the same description. This is # possible if two different users attempt to register the same # cluster, or if the user is upgrading and has passed a different # cluster name. Treat this as an error: even in the upgrade case, # this is useful to prevent the user from upgrading the wrong cluster. raise exceptions.Error( 'There is an existing membership, [{}], that conflicts with [{}]. ' 'Please delete it before continuing:\n\n' ' gcloud {}container hub memberships delete {}'.format( obj.description, args.CLUSTER_NAME, hub_util.ReleaseTrackCommandPrefix( self.ReleaseTrack()), name)) # The membership exists and has the same description. already_exists = True console_io.PromptContinue( message='A membership for [{}] already exists. Continuing will ' 'reinstall the Connect agent deployment to use a new image (if one ' 'is available).'.format(args.CLUSTER_NAME), cancel_on_no=True) # A membership exists. Attempt to update the existing agent deployment, or # install a new agent if necessary. if already_exists: obj = api_util.GetMembership(name) agent_util.DeployConnectAgent(args, service_account_key_data, docker_credential_data, name) return obj # No membership exists. Attempt to create a new one, and install a new # agent. try: agent_util.DeployConnectAgent(args, service_account_key_data, docker_credential_data, name) except: api_util.DeleteMembership(name) exclusivity_util.DeleteMembershipResources(kube_client) raise return obj
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. with kube_util.KubernetesClient(args) as kube_client: kube_client.CheckClusterAdminPermissions() kube_util.ValidateClusterIdentifierFlags(kube_client, args) uuid = kube_util.GetClusterUUID(kube_client) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. # Service Account key file is required if Workload Identity is not # enabled. # If Workload Identity is enabled, then the Connect Agent uses # a Kubernetes Service Account token instead and hence a GCP Service # Account key is not required. service_account_key_data = '' if args.service_account_key_file: try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: docker_credential_data = hub_util.Base64EncodedFileContents( args.docker_credential_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = kube_client.processor.gke_cluster_self_link issuer_url = None # enable_workload_identity, public_issuer_url, and # manage_workload_identity_bucket are only properties if we are on the # alpha track if (self.ReleaseTrack() is base.ReleaseTrack.ALPHA and args.enable_workload_identity): if args.public_issuer_url: issuer_url = args.public_issuer_url # Use the user-provided public URL, and ignore the built-in endpoints. try: openid_config_json = kube_client.GetOpenIDConfiguration( issuer_url=args.public_issuer_url) except Exception as e: # pylint: disable=broad-except raise exceptions.Error( 'Please double check that --public-issuer-url was set ' 'correctly: {}'.format(e)) else: # Since the user didn't specify a public URL, try to use the cluster's # built-in endpoints. try: openid_config_json = kube_client.GetOpenIDConfiguration( ) except Exception as e: # pylint: disable=broad-except raise exceptions.Error( 'Please double check that it is possible to access the ' '/.well-known/openid-configuration endpoint on the cluster: ' '{}'.format(e)) # Extract the issuer URL from the discovery doc. issuer_url = json.loads(openid_config_json).get('issuer') if not issuer_url: raise exceptions.Error( 'Invalid OpenID Config: ' 'missing issuer: {}'.format(openid_config_json)) # If a public issuer URL was provided, ensure it matches what came back # in the discovery doc. elif args.public_issuer_url \ and args.public_issuer_url != issuer_url: raise exceptions.Error( '--public-issuer-url {} did not match issuer ' 'returned in discovery doc: {}'.format( args.public_issuer_url, issuer_url)) # Set up the GCS bucket that serves OpenID Provider Config and JWKS. if args.manage_workload_identity_bucket: openid_keyset_json = kube_client.GetOpenIDKeyset() api_util.CreateWorkloadIdentityBucket( project, issuer_url, openid_config_json, openid_keyset_json) # Attempt to create a membership. already_exists = False obj = None # For backward compatiblity, check if a membership was previously created # using the cluster uuid. parent = api_util.ParentRef(project, 'global') membership_id = uuid resource_name = api_util.MembershipRef(project, 'global', uuid) obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME) if obj: # The membership exists and has the same description. already_exists = True else: # Attempt to create a new membership using cluster_name. membership_id = args.CLUSTER_NAME resource_name = api_util.MembershipRef(project, 'global', args.CLUSTER_NAME) try: self._VerifyClusterExclusivity(kube_client, parent, membership_id) obj = api_util.CreateMembership(project, args.CLUSTER_NAME, args.CLUSTER_NAME, gke_cluster_self_link, uuid, self.ReleaseTrack(), issuer_url) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise obj = api_util.GetMembership(resource_name, self.ReleaseTrack()) if not obj.externalId: raise exceptions.Error( 'invalid membership {} does not have ' 'external_id field set. We cannot determine ' 'if registration is requested against a ' 'valid existing Membership. Consult the ' 'documentation on container hub memberships ' 'update for more information or run gcloud ' 'container hub memberships delete {} if you ' 'are sure that this is an invalid or ' 'otherwise stale Membership'.format( membership_id, membership_id)) if obj.externalId != uuid: raise exceptions.Error( 'membership {} already exists in the project' ' with another cluster. If this operation is' ' intended, please run `gcloud container ' 'hub memberships delete {}` and register ' 'again.'.format(membership_id, membership_id)) # The membership exists with same cluster_name. already_exists = True # In case of an existing membership, check with the user to upgrade the # Connect-Agent. if already_exists: console_io.PromptContinue( message= 'A membership [{}] for the cluster [{}] already exists. ' 'Continuing will reinstall the Connect agent deployment to use a ' 'new image (if one is available).'.format( resource_name, args.CLUSTER_NAME), cancel_on_no=True) else: log.status.Print( 'Created a new membership [{}] for the cluster [{}]'. format(resource_name, args.CLUSTER_NAME)) # Attempt to update the existing agent deployment, or install a new agent # if necessary. try: self._InstallOrUpdateExclusivityArtifacts( kube_client, resource_name) agent_util.DeployConnectAgent(kube_client, args, service_account_key_data, docker_credential_data, resource_name, self.ReleaseTrack()) except Exception as e: log.status.Print( 'Error in installing the Connect Agent: {}'.format(e)) # In case of a new membership, we need to clean up membership and # resources if we failed to install the Connect Agent. if not already_exists: api_util.DeleteMembership(resource_name, self.ReleaseTrack()) exclusivity_util.DeleteMembershipResources(kube_client) raise log.status.Print( 'Finished registering the cluster [{}] with the Hub.'.format( args.CLUSTER_NAME)) return obj
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. with kube_util.KubernetesClient(args) as kube_client: kube_client.CheckClusterAdminPermissions() kube_util.ValidateClusterIdentifierFlags(kube_client, args) uuid = kube_util.GetClusterUUID(kube_client) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: docker_credential_data = hub_util.Base64EncodedFileContents( args.docker_credential_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = kube_client.processor.gke_cluster_self_link issuer_url = None # public_issuer_url is only a property if we are on the alpha track if self.ReleaseTrack() is base.ReleaseTrack.ALPHA and \ args.public_issuer_url: issuer_url = args.public_issuer_url # Attempt to create a membership. already_exists = False obj = None # For backward compatiblity, check if a membership was previously created # using the cluster uuid. parent = api_util.ParentRef(project, 'global') membership_id = uuid resource_name = api_util.MembershipRef(project, 'global', uuid) obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME) if obj: # The membership exists and has the same description. already_exists = True else: # Attempt to create a new membership using cluster_name. membership_id = args.CLUSTER_NAME resource_name = api_util.MembershipRef(project, 'global', args.CLUSTER_NAME) try: self._VerifyClusterExclusivity(kube_client, parent, membership_id) obj = api_util.CreateMembership(project, args.CLUSTER_NAME, args.CLUSTER_NAME, gke_cluster_self_link, uuid, self.ReleaseTrack(), issuer_url) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise obj = api_util.GetMembership(resource_name, self.ReleaseTrack()) if not obj.externalId: raise exceptions.Error( 'invalid membership {} does not have ' 'external_id field set. We cannot determine ' 'if registration is requested against a ' 'valid existing Membership. Consult the ' 'documentation on container hub memberships ' 'update for more information or run gcloud ' 'container hub memberships delete {} if you ' 'are sure that this is an invalid or ' 'otherwise stale Membership'.format( membership_id, membership_id)) if obj.externalId != uuid: raise exceptions.Error( 'membership {} already exists in the project' ' with another cluster. If this operation is' ' intended, please run `gcloud container ' 'hub memberships delete {}` and register ' 'again.'.format(membership_id, membership_id)) # The membership exists with same cluster_name. already_exists = True # In case of an existing membership, check with the user to upgrade the # Connect-Agent. if already_exists: console_io.PromptContinue( message= 'A membership [{}] for the cluster [{}] already exists. ' 'Continuing will reinstall the Connect agent deployment to use a ' 'new image (if one is available).'.format( resource_name, args.CLUSTER_NAME), cancel_on_no=True) else: log.status.Print( 'Created a new membership [{}] for the cluster [{}]'. format(resource_name, args.CLUSTER_NAME)) # Attempt to update the existing agent deployment, or install a new agent # if necessary. try: self._InstallOrUpdateExclusivityArtifacts( kube_client, resource_name) agent_util.DeployConnectAgent(kube_client, args, service_account_key_data, docker_credential_data, resource_name, self.ReleaseTrack()) except Exception as e: log.status.Print( 'Error in installing the Connect Agent: {}'.format(e)) # In case of a new membership, we need to clean up membership and # resources if we failed to install the Connect Agent. if not already_exists: api_util.DeleteMembership(resource_name, self.ReleaseTrack()) exclusivity_util.DeleteMembershipResources(kube_client) raise log.status.Print( 'Finished registering the cluster [{}] with the Hub.'.format( args.CLUSTER_NAME)) return obj
def Run(self, args): project = arg_utils.GetFromNamespace(args, '--project', use_defaults=True) # This incidentally verifies that the kubeconfig and context args are valid. kube_client = kube_util.KubernetesClient(args) uuid = kube_util.GetClusterUUID(kube_client) gke_cluster_self_link = api_util.GKEClusterSelfLink(args) # Read the service account files provided in the arguments early, in order # to catch invalid files before performing mutating operations. try: service_account_key_data = hub_util.Base64EncodedFileContents( args.service_account_key_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( SERVICE_ACCOUNT_KEY_FILE_FLAG, e)) docker_credential_data = None if args.docker_credential_file: try: docker_credential_data = hub_util.Base64EncodedFileContents( args.docker_credential_file) except files.Error as e: raise exceptions.Error('Could not process {}: {}'.format( DOCKER_CREDENTIAL_FILE_FLAG, e)) gke_cluster_self_link = api_util.GKEClusterSelfLink(args) # Attempt to create a membership. already_exists = False obj = None # For backward compatiblity, check if a membership was previously created # using the cluster uuid. parent = api_util.ParentRef(project, 'global') membership_id = uuid resource_name = api_util.MembershipRef(project, 'global', uuid) obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME) if obj: # The membership exists and has the same description. already_exists = True else: # Attempt to create a new membership using cluster_name. membership_id = args.CLUSTER_NAME resource_name = api_util.MembershipRef(project, 'global', args.CLUSTER_NAME) try: self._VerifyClusterExclusivity(kube_client, parent, membership_id) obj = api_util.CreateMembership(project, args.CLUSTER_NAME, args.CLUSTER_NAME, gke_cluster_self_link, uuid, self.ReleaseTrack()) except apitools_exceptions.HttpConflictError as e: # If the error is not due to the object already existing, re-raise. error = core_api_exceptions.HttpErrorPayload(e) if error.status_description != 'ALREADY_EXISTS': raise # The membership exists with same cluster_name. already_exists = True obj = api_util.GetMembership(resource_name, self.ReleaseTrack()) # In case of an existing membership, check with the user to upgrade the # Connect-Agent. if already_exists: console_io.PromptContinue( message='A membership for [{}] already exists. Continuing will ' 'reinstall the Connect agent deployment to use a new image (if one ' 'is available).'.format(resource_name), cancel_on_no=True) # No membership exists. Attempt to create a new one, and install a new # agent. try: self._InstallOrUpdateExclusivityArtifacts(kube_client, resource_name) agent_util.DeployConnectAgent(args, service_account_key_data, docker_credential_data, resource_name, self.ReleaseTrack()) except: # In case of a new membership, we need to clean up membership and # resources if we failed to install the Connect Agent. if not already_exists: api_util.DeleteMembership(resource_name, self.ReleaseTrack()) exclusivity_util.DeleteMembershipResources(kube_client) raise return obj