示例#1
0
 def testErrorsParsingProviderID(self, provider_id):
     self.mock_old_kubernetes_client.GetResourceField.side_effect = [
         ('instance_id', None),
         (provider_id, None),
     ]
     with self.assertRaisesRegex(exceptions.Error, 'parsing.*provider ID'):
         api_util.GKEClusterSelfLink(self.mock_old_kubernetes_client)
示例#2
0
    def testInstanceWithoutMetadataFromComputeAPIRequest(self):
        self.mock_old_kubernetes_client.GetResourceField.side_effect = [
            ('instance_id', None),
            ('gce://project-id/vm_zone/vm-name', None),
        ]

        self.mock_compute_client.instances.Get.Expect(
            request=self.compute_messages.ComputeInstancesGetRequest(
                instance='instance_id', zone='vm_zone', project='project-id'),
            response=self.compute_messages.Instance())

        with self.assertRaisesRegex(exceptions.Error, 'empty metadata'):
            api_util.GKEClusterSelfLink(self.mock_old_kubernetes_client)
示例#3
0
    def testNoInstanceIDOld(self):
        self.mock_old_kubernetes_client.GetResourceField.return_value = (None,
                                                                         None)
        self.assertIsNone(
            api_util.GKEClusterSelfLink(self.mock_old_kubernetes_client))

        self.mock_old_kubernetes_client.GetResourceField.assert_has_calls([
            mock.call(
                mock.ANY, mock.ANY,
                SubstringValidator(
                    'container\\.googleapis\\.com/instance_id')),
        ])

        self.assertEqual(
            self.mock_old_kubernetes_client.GetResourceField.call_count, 1)
示例#4
0
    def testComputeAPIErrorOld(self):
        self.mock_kubernetes_client.GetResourceField.side_effect = [
            ('instance_id', None),
            ('gce://project_id/vm_zone/instance_id', None),
        ]

        self.mock_compute_client.instances.Get.Expect(
            request=self.compute_messages.ComputeInstancesGetRequest(
                instance='instance_id', zone='vm_zone', project='project_id'),
            exception=api_exceptions.HttpError({'status': 404}, '', ''))

        self_link = None
        with self.assertRaises(api_exceptions.HttpError):
            self_link = api_util.GKEClusterSelfLink(
                self.mock_kubernetes_client)
        self.assertIsNone(self_link)
示例#5
0
    def testInstanceWithoutClusterNameFromComputeAPIRequest(self):
        self.mock_old_kubernetes_client.GetResourceField.side_effect = [
            ('instance_id', None),
            ('gce://project-id/vm_zone/instance_id', None),
        ]

        item = self.compute_messages.Metadata.ItemsValueListEntry

        self.mock_compute_client.instances.Get.Expect(
            request=self.compute_messages.ComputeInstancesGetRequest(
                instance='instance_id', zone='vm_zone', project='project-id'),
            response=self.compute_messages.Instance(
                metadata=self.compute_messages.Metadata(
                    items=[item(key='foo', value='bar')])))

        with self.assertRaisesRegex(exceptions.Error, 'cluster name'):
            api_util.GKEClusterSelfLink(self.mock_old_kubernetes_client)
示例#6
0
    def testGetSelfLink(self):
        self.mock_old_kubernetes_client.GetResourceField.side_effect = [
            ('instance_id', None),
            ('gce://project-id/vm_zone/instance_id', None),
        ]

        item = self.compute_messages.Metadata.ItemsValueListEntry

        self.mock_compute_client.instances.Get.Expect(
            request=self.compute_messages.ComputeInstancesGetRequest(
                instance='instance_id', zone='vm_zone', project='project-id'),
            response=self.compute_messages.Instance(
                metadata=self.compute_messages.Metadata(items=[
                    item(key='foo', value='bar'),
                    item(key='cluster-name', value='cluster'),
                    item(key='cluster-location', value='location'),
                ])))

        self.assertEqual(
            api_util.GKEClusterSelfLink(self.mock_old_kubernetes_client),
            '//container.googleapis.com/projects/project-id/locations/location/clusters/cluster'
        )
示例#7
0
    def testNoProviderID(self):
        self.mock_old_kubernetes_client.GetResourceField.side_effect = [
            ('instance_id', None),
            (None, None),
        ]

        self_link = None
        with self.assertRaisesRegex(exceptions.Error, 'provider ID'):
            self_link = api_util.GKEClusterSelfLink(
                self.mock_old_kubernetes_client)
        self.assertIsNone(self_link)

        self.mock_old_kubernetes_client.GetResourceField.assert_has_calls([
            mock.call(
                mock.ANY, mock.ANY,
                SubstringValidator(
                    'annotations.container\\.googleapis\\.com/instance_id')),
            mock.call(mock.ANY, mock.ANY,
                      SubstringValidator('spec.providerID')),
        ])

        self.assertEqual(
            self.mock_old_kubernetes_client.GetResourceField.call_count, 2)
示例#8
0
 def testErrorGettingInstanceID(self):
     self.mock_old_kubernetes_client.GetResourceField.return_value = (
         None, 'error')
     api_util.GKEClusterSelfLink(self.mock_old_kubernetes_client)
示例#9
0
    def Run(self, args):
        project = arg_utils.GetFromNamespace(args,
                                             '--project',
                                             use_defaults=True)

        # This incidentally verifies that the kubeconfig and context args are valid.
        kube_client = kube_util.KubernetesClient(args)
        uuid = kube_util.GetClusterUUID(kube_client)

        self._VerifyClusterExclusivity(kube_client, project, args.context,
                                       uuid)

        # Read the service account files provided in the arguments early, in order
        # to catch invalid files before performing mutating operations.
        try:
            service_account_key_data = hub_util.Base64EncodedFileContents(
                args.service_account_key_file)
        except files.Error as e:
            raise exceptions.Error('Could not process {}: {}'.format(
                SERVICE_ACCOUNT_KEY_FILE_FLAG, e))

        docker_credential_data = None
        if args.docker_credential_file:
            try:
                docker_credential_data = hub_util.Base64EncodedFileContents(
                    args.docker_credential_file)
            except files.Error as e:
                raise exceptions.Error('Could not process {}: {}'.format(
                    DOCKER_CREDENTIAL_FILE_FLAG, e))

        gke_cluster_self_link = api_util.GKEClusterSelfLink(args)

        # The full resource name of the membership for this registration flow.
        name = 'projects/{}/locations/global/memberships/{}'.format(
            project, uuid)
        # Attempt to create a membership.
        already_exists = False
        try:
            exclusivity_util.ApplyMembershipResources(kube_client, project)
            obj = api_util.CreateMembership(project, uuid, args.CLUSTER_NAME,
                                            gke_cluster_self_link)
        except apitools_exceptions.HttpConflictError as e:
            # If the error is not due to the object already existing, re-raise.
            error = core_api_exceptions.HttpErrorPayload(e)
            if error.status_description != 'ALREADY_EXISTS':
                raise

            # The membership already exists. Check to see if it has the same
            # description (i.e., user-visible cluster name).
            #
            # This intentionally does not verify that the gke_cluster_self_link is
            # equivalent: this check is meant to prevent the user from updating the
            # Connect agent in a cluster that is different from the one that they
            # expect, and is not required for the proper functioning of the agent or
            # the Hub.
            obj = api_util.GetMembership(name)
            if obj.description != args.CLUSTER_NAME:
                # A membership exists, but does not have the same description. This is
                # possible if two different users attempt to register the same
                # cluster, or if the user is upgrading and has passed a different
                # cluster name. Treat this as an error: even in the upgrade case,
                # this is useful to prevent the user from upgrading the wrong cluster.
                raise exceptions.Error(
                    'There is an existing membership, [{}], that conflicts with [{}]. '
                    'Please delete it before continuing:\n\n'
                    '  gcloud {}container hub memberships delete {}'.format(
                        obj.description, args.CLUSTER_NAME,
                        hub_util.ReleaseTrackCommandPrefix(
                            self.ReleaseTrack()), name))

            # The membership exists and has the same description.
            already_exists = True
            console_io.PromptContinue(
                message='A membership for [{}] already exists. Continuing will '
                'reinstall the Connect agent deployment to use a new image (if one '
                'is available).'.format(args.CLUSTER_NAME),
                cancel_on_no=True)

        # A membership exists. Attempt to update the existing agent deployment, or
        # install a new agent if necessary.
        if already_exists:
            obj = api_util.GetMembership(name)
            agent_util.DeployConnectAgent(args, service_account_key_data,
                                          docker_credential_data, name)
            return obj

        # No membership exists. Attempt to create a new one, and install a new
        # agent.
        try:
            agent_util.DeployConnectAgent(args, service_account_key_data,
                                          docker_credential_data, name)
        except:
            api_util.DeleteMembership(name)
            exclusivity_util.DeleteMembershipResources(kube_client)
            raise
        return obj
示例#10
0
    def Run(self, args):
        project = arg_utils.GetFromNamespace(args,
                                             '--project',
                                             use_defaults=True)

        # This incidentally verifies that the kubeconfig and context args are valid.
        kube_client = kube_util.KubernetesClient(args)
        uuid = kube_util.GetClusterUUID(kube_client)
        gke_cluster_self_link = api_util.GKEClusterSelfLink(args)
        # Read the service account files provided in the arguments early, in order
        # to catch invalid files before performing mutating operations.
        try:
            service_account_key_data = hub_util.Base64EncodedFileContents(
                args.service_account_key_file)
        except files.Error as e:
            raise exceptions.Error('Could not process {}: {}'.format(
                SERVICE_ACCOUNT_KEY_FILE_FLAG, e))

        docker_credential_data = None
        if args.docker_credential_file:
            try:
                docker_credential_data = hub_util.Base64EncodedFileContents(
                    args.docker_credential_file)
            except files.Error as e:
                raise exceptions.Error('Could not process {}: {}'.format(
                    DOCKER_CREDENTIAL_FILE_FLAG, e))

        gke_cluster_self_link = api_util.GKEClusterSelfLink(args)

        # Attempt to create a membership.
        already_exists = False

        obj = None
        # For backward compatiblity, check if a membership was previously created
        # using the cluster uuid.
        parent = api_util.ParentRef(project, 'global')
        membership_id = uuid
        resource_name = api_util.MembershipRef(project, 'global', uuid)
        obj = self._CheckMembershipWithUUID(resource_name, args.CLUSTER_NAME)
        if obj:
            # The membership exists and has the same description.
            already_exists = True
        else:
            # Attempt to create a new membership using cluster_name.
            membership_id = args.CLUSTER_NAME
            resource_name = api_util.MembershipRef(project, 'global',
                                                   args.CLUSTER_NAME)
            try:
                self._VerifyClusterExclusivity(kube_client, parent,
                                               membership_id)
                obj = api_util.CreateMembership(project, args.CLUSTER_NAME,
                                                args.CLUSTER_NAME,
                                                gke_cluster_self_link, uuid,
                                                self.ReleaseTrack())
            except apitools_exceptions.HttpConflictError as e:
                # If the error is not due to the object already existing, re-raise.
                error = core_api_exceptions.HttpErrorPayload(e)
                if error.status_description != 'ALREADY_EXISTS':
                    raise
                # The membership exists with same cluster_name.
                already_exists = True
                obj = api_util.GetMembership(resource_name,
                                             self.ReleaseTrack())

        # In case of an existing membership, check with the user to upgrade the
        # Connect-Agent.
        if already_exists:
            console_io.PromptContinue(
                message='A membership for [{}] already exists. Continuing will '
                'reinstall the Connect agent deployment to use a new image (if one '
                'is available).'.format(resource_name),
                cancel_on_no=True)

        # No membership exists. Attempt to create a new one, and install a new
        # agent.
        try:
            self._InstallOrUpdateExclusivityArtifacts(kube_client,
                                                      resource_name)
            agent_util.DeployConnectAgent(args, service_account_key_data,
                                          docker_credential_data,
                                          resource_name, self.ReleaseTrack())
        except:
            # In case of a new membership, we need to clean up membership and
            # resources if we failed to install the Connect Agent.
            if not already_exists:
                api_util.DeleteMembership(resource_name, self.ReleaseTrack())
                exclusivity_util.DeleteMembershipResources(kube_client)
            raise
        return obj