def LoadClusterConfig(self, args): """Load and return ClusterConfig prior to calling a kubectl command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: ClusterConfig for the project,zone,cluster specified by args/properties. Raises: util.Error: if container API reports cluster is not running. """ name = properties.VALUES.container.cluster.Get(required=True) cluster_ref = util.ParseCluster(name, self.context) c_config = util.ClusterConfig.Load( cluster_ref.clusterId, cluster_ref.zoneId, cluster_ref.projectId) if args.purge_config_cache: util.ClusterConfig.Purge( cluster_ref.clusterId, cluster_ref.zoneId, cluster_ref.projectId) c_config = None if not c_config or not c_config.has_certs: log.status.Print('Fetching cluster endpoint and auth data.') # Call DescribeCluster to get auth info and cache for next time cluster = util.DescribeCluster(cluster_ref, self.context) messages = self.context['container_messages'] if cluster.status != messages.Cluster.StatusValueValuesEnum.running: raise util.Error('cluster %s is not running' % cluster_ref.clusterId) c_config = util.ClusterConfig.Persist( cluster, cluster_ref.projectId, self.cli) return c_config
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. """ cluster_ref = util.ParseCluster(args.name, self.context) return util.DescribeCluster(cluster_ref, self.context)
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Raises: util.Error: if the cluster is unreachable or not running. """ name = properties.VALUES.container.cluster.Get(required=True) cluster_ref = util.ParseCluster(name, self.context) log.status.Print('Fetching cluster endpoint and auth data.') # Call DescribeCluster to get auth info and cache for next time cluster = util.DescribeCluster(cluster_ref, self.context) messages = self.context['container_messages'] if cluster.status != messages.Cluster.StatusValueValuesEnum.running: raise util.Error('cluster %s is not running' % cluster_ref.clusterId) c_config = util.ClusterConfig.Persist( cluster, cluster_ref.projectId, self.cli) kubeconfig = kconfig.Kubeconfig.Default() kubeconfig.SetCurrentContext(c_config.kube_context) kubeconfig.SaveToFile()
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. """ client = self.context['container_client'] messages = self.context['container_messages'] resources = self.context['registry'] properties.VALUES.compute.zone.Get(required=True) properties.VALUES.core.project.Get(required=True) cluster_refs = [] for name in args.names: cluster_refs.append( resources.Parse( name, collection='container.projects.zones.clusters')) if not console_io.PromptContinue(message=util.ConstructList( 'The following clusters will be deleted.', [ '[{name}] in [{zone}]'.format(name=ref.clusterId, zone=ref.zoneId) for ref in cluster_refs ]), throw_if_unattended=True): raise exceptions.ToolException('Deletion aborted by user.') operations = [] errors = [] # Issue all deletes first for ref in cluster_refs: try: # Make sure it exists (will raise appropriate error if not) util.DescribeCluster(ref, self.context) op = client.projects_zones_clusters.Delete( messages.ContainerProjectsZonesClustersDeleteRequest( clusterId=ref.clusterId, zoneId=ref.zoneId, projectId=ref.projectId)) operations.append((op, ref)) except apitools_base.HttpError as error: errors.append(util.GetError(error)) except util.Error as error: errors.append(error) if args.wait: # Poll each operation for completion for operation, ref in operations: try: util.WaitForOperation( operation, ref.projectId, self.context, 'Deleting cluster {0}'.format(ref.clusterId)) # Purge cached config files util.ClusterConfig.Purge(ref.clusterId, ref.zoneId, ref.projectId) if properties.VALUES.container.cluster.Get( ) == ref.clusterId: properties.PersistProperty( properties.VALUES.container.cluster, None) log.DeletedResource(ref) except apitools_base.HttpError as error: errors.append(util.GetError(error)) except util.Error as error: errors.append(error) if errors: raise exceptions.ToolException( util.ConstructList('Some requests did not succeed:', errors))
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Cluster message for the successfully created cluster. Raises: ToolException, if creation failed. """ client = self.context['container_client'] messages = self.context['container_messages'] cluster_ref = util.ParseCluster(args.name, self.context) if args.password: password = args.password else: password = ''.join( random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)) node_config = messages.NodeConfig() if args.machine_type: node_config.machineType = args.machine_type if args.source_image: node_config.sourceImage = args.source_image node_config.serviceAccounts = self.CreateServiceAccountMessages( args, messages) create_cluster_req = messages.CreateClusterRequest( cluster=messages.Cluster( name=cluster_ref.clusterId, numNodes=args.num_nodes, nodeConfig=node_config, masterAuth=messages.MasterAuth(user=args.user, password=password), enableCloudLogging=args.enable_cloud_logging)) if args.cluster_api_version: create_cluster_req.cluster.clusterApiVersion = args.cluster_api_version if args.network: create_cluster_req.cluster.network = args.network if args.container_ipv4_cidr: create_cluster_req.cluster.containerIpv4Cidr = args.container_ipv4_cidr req = messages.ContainerProjectsZonesClustersCreateRequest( createClusterRequest=create_cluster_req, projectId=cluster_ref.projectId, zoneId=cluster_ref.zoneId) cluster = None try: operation = client.projects_zones_clusters.Create(req) if not args.wait: return util.DescribeCluster(cluster_ref, self.context) operation = util.WaitForOperation( operation, cluster_ref.projectId, self.context, 'Creating cluster {0}'.format(cluster_ref.clusterId)) # Get Cluster cluster = util.DescribeCluster(cluster_ref, self.context) except apitools_base.HttpError as error: raise exceptions.HttpException(util.GetError(error)) log.CreatedResource(cluster_ref) # Persist cluster config c_config = util.ClusterConfig.Persist(cluster, cluster_ref.projectId, self.cli) if c_config: if not c_config.has_certs: # Purge config so we retry the cert fetch on next kubectl command util.ClusterConfig.Purge(cluster.name, cluster.zone, cluster_ref.projectId) # Exit with non-success returncode if certs could not be fetched self.exit_code = 1 else: # Set current-context to new cluster if one is not already set kubeconfig = kconfig.Kubeconfig.Default() if not kubeconfig.current_context: kubeconfig.SetCurrentContext(c_config.kube_context) kubeconfig.SaveToFile() return cluster