def ParseCreateOptionsBase(args, is_autogke, get_default): """Parses the flags provided with the cluster creation command.""" if hasattr(args, 'addons') and args.IsSpecified('addons') and \ api_adapter.DASHBOARD in args.addons: log.warning( 'The `KubernetesDashboard` addon is deprecated, and will be removed as ' 'an option for new clusters starting in 1.15. It is recommended to use ' 'the Cloud Console to manage and monitor your Kubernetes clusters, ' 'workloads and applications. See: ' 'https://cloud.google.com/kubernetes-engine/docs/concepts/dashboards' ) flags.LogBasicAuthDeprecationWarning(args) flags.MungeBasicAuthFlags(args) MaybeLogAuthWarning(args) enable_ip_alias = get_default('enable_ip_alias') if hasattr(args, 'enable_ip_alias'): flags.WarnForUnspecifiedIpAllocationPolicy(args) enable_autorepair = False if hasattr(args, 'enable_autorepair'): enable_autorepair = cmd_util.GetAutoRepair(args) if enable_autorepair: flags.WarnForNodeModification(args, enable_autorepair) metadata = metadata_utils.ConstructMetadataDict( get_default('metadata'), get_default('metadata_from_file')) flags.ValidateCloudRunConfigCreateArgs(get_default('cloud_run_config'), get_default('addons')) return api_adapter.CreateClusterOptions( accelerators=get_default('accelerator'), additional_zones=get_default('additional_zones'), addons=get_default('addons'), boot_disk_kms_key=get_default('boot_disk_kms_key'), cluster_ipv4_cidr=get_default('cluster_ipv4_cidr'), cluster_secondary_range_name=get_default('cluster_secondary_range_name'), cluster_version=get_default('cluster_version'), cloud_run_config=get_default('cloud_run_config'), node_version=get_default('node_version'), create_subnetwork=get_default('create_subnetwork'), disable_default_snat=get_default('disable_default_snat'), disk_type=get_default('disk_type'), enable_autorepair=enable_autorepair, enable_autoscaling=get_default('enable_autoscaling'), enable_autoupgrade=(cmd_util.GetAutoUpgrade(args) if hasattr(args, 'enable_autoupgrade') else None), enable_binauthz=get_default('enable_binauthz'), enable_stackdriver_kubernetes=_GetEnableStackdriver(args), enable_cloud_logging=args.enable_cloud_logging if (hasattr(args, 'enable_cloud_logging') and args.IsSpecified('enable_cloud_logging')) else None, enable_cloud_monitoring=args.enable_cloud_monitoring if (hasattr(args, 'enable_cloud_monitoring') and args.IsSpecified('enable_cloud_monitoring')) else None, enable_ip_alias=enable_ip_alias, enable_intra_node_visibility=get_default('enable_intra_node_visibility'), enable_kubernetes_alpha=get_default('enable_kubernetes_alpha'), enable_cloud_run_alpha=args.enable_cloud_run_alpha if (hasattr(args, 'enable_cloud_run_alpha') and args.IsSpecified('enable_cloud_run_alpha')) else None, enable_legacy_authorization=get_default('enable_legacy_authorization'), enable_master_authorized_networks=\ get_default('enable_master_authorized_networks'), enable_master_global_access=get_default('enable_master_global_access'), enable_network_policy=get_default('enable_network_policy'), enable_private_nodes=get_default('enable_private_nodes'), enable_private_endpoint=get_default('enable_private_endpoint'), enable_gke_oidc=getattr(args, 'enable_gke_oidc', None), image_type=get_default('image_type'), image=get_default('image'), image_project=get_default('image_project'), image_family=get_default('image_family'), issue_client_certificate=get_default('issue_client_certificate'), labels=get_default('labels'), local_ssd_count=get_default('local_ssd_count'), maintenance_window=get_default('maintenance_window'), maintenance_window_start=get_default('maintenance_window_start'), maintenance_window_end=get_default('maintenance_window_end'), maintenance_window_recurrence=get_default('maintenance_window_recurrence'), master_authorized_networks=get_default('master_authorized_networks'), master_ipv4_cidr=get_default('master_ipv4_cidr'), max_nodes=get_default('max_nodes'), max_nodes_per_pool=get_default('max_nodes_per_pool'), min_cpu_platform=get_default('min_cpu_platform'), min_nodes=get_default('min_nodes'), network=get_default('network'), node_disk_size_gb=utils.BytesToGb(args.disk_size) if hasattr(args, 'disk_size') else None, node_labels=get_default('node_labels'), node_locations=get_default('node_locations'), node_machine_type=get_default('machine_type'), node_taints=get_default('node_taints'), num_nodes=get_default('num_nodes'), password=get_default('password'), preemptible=get_default('preemptible'), scopes=get_default('scopes'), service_account=get_default('service_account'), services_ipv4_cidr=get_default('services_ipv4_cidr'), services_secondary_range_name=get_default('services_secondary_range_name'), subnetwork=get_default('subnetwork'), tags=get_default('tags'), user=get_default('username'), metadata=metadata, default_max_pods_per_node=get_default('default_max_pods_per_node'), max_pods_per_node=get_default('max_pods_per_node'), enable_tpu=get_default('enable_tpu'), tpu_ipv4_cidr=get_default('tpu_ipv4_cidr'), resource_usage_bigquery_dataset=get_default('resource_usage_bigquery_dataset'), enable_network_egress_metering=get_default('enable_network_egress_metering'), enable_resource_consumption_metering=get_default('enable_resource_consumption_metering'), database_encryption_key=get_default('database_encryption_key'), workload_pool=get_default('workload_pool'), identity_provider=get_default('identity_provider'), workload_metadata=get_default('workload_metadata'), workload_metadata_from_node=get_default('workload_metadata_from_node'), enable_vertical_pod_autoscaling=get_default('enable_vertical_pod_autoscaling'), enable_autoprovisioning=get_default('enable_autoprovisioning'), autoprovisioning_config_file=get_default('autoprovisioning_config_file'), autoprovisioning_service_account=get_default('autoprovisioning_service_account'), autoprovisioning_scopes=get_default('autoprovisioning_scopes'), autoprovisioning_locations=get_default('autoprovisioning_locations'), autoprovisioning_max_surge_upgrade=get_default('autoprovisioning_max_surge_upgrade'), autoprovisioning_max_unavailable_upgrade=get_default('autoprovisioning_max_unavailable_upgrade'), enable_autoprovisioning_autorepair=get_default('enable_autoprovisioning_autorepair'), enable_autoprovisioning_autoupgrade=get_default('enable_autoprovisioning_autoupgrade'), autoprovisioning_min_cpu_platform=get_default('autoprovisioning_min_cpu_platform'), min_cpu=get_default('min_cpu'), max_cpu=get_default('max_cpu'), min_memory=get_default('min_memory'), max_memory=get_default('max_memory'), min_accelerator=get_default('min_accelerator'), max_accelerator=get_default('max_accelerator'), shielded_secure_boot=get_default('shielded_secure_boot'), shielded_integrity_monitoring=get_default('shielded_integrity_monitoring'), reservation_affinity=get_default('reservation_affinity'), reservation=get_default('reservation'), release_channel=get_default('release_channel'), enable_shielded_nodes=get_default('enable_shielded_nodes'), max_surge_upgrade=get_default('max_surge_upgrade'), max_unavailable_upgrade=get_default('max_unavailable_upgrade'), auto_gke=is_autogke)
def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later. """ adapter = self.context['api_adapter'] location_get = self.context['location_get'] location = location_get(args) cluster_ref = adapter.ParseCluster(args.name, location) cluster_name = args.name cluster_node_count = None cluster_zone = cluster_ref.zone cluster_is_required = self.IsClusterRequired(args) try: # Attempt to get cluster for better prompts and to validate args. # Error is a warning but not fatal. Should only exit with a failure on # the actual update API calls below. cluster = adapter.GetCluster(cluster_ref) cluster_name = cluster.name cluster_node_count = cluster.currentNodeCount cluster_zone = cluster.zone except (exceptions.HttpException, apitools_exceptions.HttpForbiddenError, util.Error) as error: if cluster_is_required: raise log.warning( ('Problem loading details of cluster to update:\n\n{}\n\n' 'You can still attempt updates to the cluster.\n').format( console_attr.SafeText(error))) # locations will be None if additional-zones was specified, an empty list # if it was specified with no argument, or a populated list if zones were # provided. We want to distinguish between the case where it isn't # specified (and thus shouldn't be passed on to the API) and the case where # it's specified as wanting no additional zones, in which case we must pass # the cluster's primary zone to the API. # TODO(b/29578401): Remove the hasattr once the flag is GA. locations = None if hasattr(args, 'additional_zones') and args.additional_zones is not None: locations = sorted([cluster_ref.zone] + args.additional_zones) if hasattr(args, 'node_locations') and args.node_locations is not None: locations = sorted(args.node_locations) flags.LogBasicAuthDeprecationWarning(args) if args.IsSpecified('username') or args.IsSpecified( 'enable_basic_auth'): flags.MungeBasicAuthFlags(args) options = api_adapter.SetMasterAuthOptions( action=api_adapter.SetMasterAuthOptions.SET_USERNAME, username=args.username, password=args.password) try: op_ref = adapter.SetMasterAuth(cluster_ref, options) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif (args.generate_password or args.set_password or args.IsSpecified('password')): if args.generate_password: password = '' options = api_adapter.SetMasterAuthOptions( action=api_adapter.SetMasterAuthOptions.GENERATE_PASSWORD, password=password) else: password = args.password if not args.IsSpecified('password'): password = input('Please enter the new password:'******'Enabling/Disabling Network Policy causes a rolling ' 'update of all cluster nodes, similar to performing a cluster ' 'upgrade. This operation is long-running and will block other ' 'operations on the cluster (including delete) until it has run ' 'to completion.', cancel_on_no=True) options = api_adapter.SetNetworkPolicyOptions( enabled=args.enable_network_policy) try: op_ref = adapter.SetNetworkPolicy(cluster_ref, options) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif args.start_ip_rotation or args.start_credential_rotation: if args.start_ip_rotation: msg_tmpl = """This will start an IP Rotation on cluster [{name}]. The \ master will be updated to serve on a new IP address in addition to the current \ IP address. Kubernetes Engine will then recreate all nodes ({num_nodes} nodes) \ to point to the new IP address. This operation is long-running and will block \ other operations on the cluster (including delete) until it has run to \ completion.""" rotate_credentials = False elif args.start_credential_rotation: msg_tmpl = """This will start an IP and Credentials Rotation on cluster\ [{name}]. The master will be updated to serve on a new IP address in addition \ to the current IP address, and cluster credentials will be rotated. Kubernetes \ Engine will then recreate all nodes ({num_nodes} nodes) to point to the new IP \ address. This operation is long-running and will block other operations on the \ cluster (including delete) until it has run to completion.""" rotate_credentials = True console_io.PromptContinue(message=msg_tmpl.format( name=cluster_name, num_nodes=cluster_node_count if cluster_node_count else '?'), cancel_on_no=True) try: op_ref = adapter.StartIpRotation( cluster_ref, rotate_credentials=rotate_credentials) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif args.complete_ip_rotation or args.complete_credential_rotation: if args.complete_ip_rotation: msg_tmpl = """This will complete the in-progress IP Rotation on \ cluster [{name}]. The master will be updated to stop serving on the old IP \ address and only serve on the new IP address. Make sure all API clients have \ been updated to communicate with the new IP address (e.g. by running `gcloud \ container clusters get-credentials --project {project} --zone {zone} {name}`). \ This operation is long-running and will block other operations on the cluster \ (including delete) until it has run to completion.""" elif args.complete_credential_rotation: msg_tmpl = """This will complete the in-progress Credential Rotation on\ cluster [{name}]. The master will be updated to stop serving on the old IP \ address and only serve on the new IP address. Old cluster credentials will be \ invalidated. Make sure all API clients have been updated to communicate with \ the new IP address (e.g. by running `gcloud container clusters get-credentials \ --project {project} --zone {zone} {name}`). This operation is long-running and \ will block other operations on the cluster (including delete) until it has run \ to completion.""" console_io.PromptContinue(message=msg_tmpl.format( name=cluster_name, project=cluster_ref.projectId, zone=cluster_zone), cancel_on_no=True) try: op_ref = adapter.CompleteIpRotation(cluster_ref) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif args.update_labels is not None: try: op_ref = adapter.UpdateLabels(cluster_ref, args.update_labels) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif args.remove_labels is not None: try: op_ref = adapter.RemoveLabels(cluster_ref, args.remove_labels) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif args.logging_service is not None and args.monitoring_service is None: try: op_ref = adapter.SetLoggingService(cluster_ref, args.logging_service) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif args.maintenance_window is not None: try: op_ref = adapter.SetDailyMaintenanceWindow( cluster_ref, cluster.maintenancePolicy, args.maintenance_window) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'maintenance_window_start', None) is not None: try: op_ref = adapter.SetRecurringMaintenanceWindow( cluster_ref, cluster.maintenancePolicy, args.maintenance_window_start, args.maintenance_window_end, args.maintenance_window_recurrence) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'clear_maintenance_window', None): try: op_ref = adapter.RemoveMaintenanceWindow( cluster_ref, cluster.maintenancePolicy) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'add_maintenance_exclusion_end', None) is not None: try: op_ref = adapter.AddMaintenanceExclusion( cluster_ref, cluster.maintenancePolicy, args.add_maintenance_exclusion_name, args.add_maintenance_exclusion_start, args.add_maintenance_exclusion_end) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'remove_maintenance_exclusion', None) is not None: try: op_ref = adapter.RemoveMaintenanceExclusion( cluster_ref, cluster.maintenancePolicy, args.remove_maintenance_exclusion) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'add_cross_connect_subnetworks', None) is not None: try: op_ref = adapter.ModifyCrossConnectSubnetworks( cluster_ref, cluster.privateClusterConfig.crossConnectConfig, add_subnetworks=args.add_cross_connect_subnetworks) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'remove_cross_connect_subnetworks', None) is not None: try: op_ref = adapter.ModifyCrossConnectSubnetworks( cluster_ref, cluster.privateClusterConfig.crossConnectConfig, remove_subnetworks=args.remove_cross_connect_subnetworks) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) elif getattr(args, 'clear_cross_connect_subnetworks', None) is not None: try: op_ref = adapter.ModifyCrossConnectSubnetworks( cluster_ref, cluster.privateClusterConfig.crossConnectConfig, clear_all_subnetworks=True) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT) else: if args.enable_legacy_authorization is not None: op_ref = adapter.SetLegacyAuthorization( cluster_ref, args.enable_legacy_authorization) else: options = self.ParseUpdateOptions(args, locations) op_ref = adapter.UpdateCluster(cluster_ref, options) if not args.async_: adapter.WaitForOperation(op_ref, 'Updating {0}'.format( cluster_ref.clusterId), timeout_s=args.timeout) log.UpdatedResource(cluster_ref) cluster_url = util.GenerateClusterUrl(cluster_ref) log.status.Print( 'To inspect the contents of your cluster, go to: ' + cluster_url) if (args.start_ip_rotation or args.complete_ip_rotation or args.start_credential_rotation or args.complete_credential_rotation): cluster = adapter.GetCluster(cluster_ref) try: util.ClusterConfig.Persist(cluster, cluster_ref.projectId) except kconfig.MissingEnvVarError as error: log.warning(error)