def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) index_endpoint_ref = args.CONCEPTS.index_endpoint.Parse() region = index_endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): index_endpoint_client = client.IndexEndpointsClient( version=version) try: if version == constants.GA_VERSION: result = index_endpoint_client.Patch( index_endpoint_ref, args) else: result = index_endpoint_client.PatchBeta( index_endpoint_ref, args) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'description', 'update_labels', 'clear_labels', 'remove_labels' ] if not any( args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource(result.name, kind='Vertex AI index endpoint') return result
def _Run(args, version): """Create a new Vertex AI endpoint.""" validation.ValidateDisplayName(args.display_name) region_ref = args.CONCEPTS.region.Parse() args.region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) operation_client = operations.OperationsClient() if version == constants.GA_VERSION: op = endpoints_client.Create( region_ref, args.display_name, labels_util.ParseCreateArgs( args, endpoints_client.messages. GoogleCloudAiplatformV1Endpoint.LabelsValue), args.description, args.network, args.endpoint_id) else: op = endpoints_client.CreateBeta( region_ref, args.display_name, labels_util.ParseCreateArgs( args, endpoints_client.messages. GoogleCloudAiplatformV1beta1Endpoint.LabelsValue), args.description, args.network, args.endpoint_id) response_msg = operations_util.WaitForOpMaybe( operation_client, op, endpoints_util.ParseOperation(op.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'name' in response: log.status.Print(('Created Vertex AI endpoint: {}.').format( response['name'])) return response_msg
def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) index_endpoint_ref = args.CONCEPTS.index_endpoint.Parse() project_id = index_endpoint_ref.AsDict()['projectsId'] region = index_endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): index_endpoint_client = client.IndexEndpointsClient( version=version) if version == constants.GA_VERSION: operation = index_endpoint_client.DeployIndex( index_endpoint_ref, args) else: operation = index_endpoint_client.DeployIndexBeta( index_endpoint_ref, args) op_ref = index_endpoints_util.ParseIndexEndpointOperation( operation.name) # TODO(b/208506223): Support `--async` flag. index_endpoint_id = op_ref.AsDict()['indexEndpointsId'] log.status.Print( constants.OPERATION_CREATION_DISPLAY_MESSAGE.format( name=operation.name, verb='deploy index', id=op_ref.Name(), sub_commands='--index-endpoint={} [--project={}]'.format( index_endpoint_id, project_id))) return operation
def _Run(args, version): """Run method for update command.""" validation.ValidateDisplayName(args.display_name) model_monitoring_job_ref = args.CONCEPTS.monitoring_job.Parse() region = model_monitoring_job_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): try: result = client.ModelMonitoringJobsClient(version=version).Patch( model_monitoring_job_ref, args) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'emails', 'prediction_sampling_rate', 'feature_thresholds', 'monitoring_config_from_file', 'monitoring_frequency', 'analysis_instance_schema', 'log_ttl', 'update_labels', 'clear_labels', 'remove_labels', 'anomaly_cloud_logging', ] if not any(args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource( result.name, kind='Vertex AI model deployment monitoring job') return result
def _Run(args, version): """Deploy a model to an existing Vertex AI endpoint.""" validation.ValidateDisplayName(args.display_name) if version != constants.GA_VERSION: validation.ValidateAutoscalingMetricSpecs( args.autoscaling_metric_specs) endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) operation_client = operations.OperationsClient() if version == constants.GA_VERSION: op = endpoints_client.DeployModel( endpoint_ref, args.model, args.region, args.display_name, machine_type=args.machine_type, accelerator_dict=args.accelerator, min_replica_count=args.min_replica_count, max_replica_count=args.max_replica_count, enable_access_logging=args.enable_access_logging, disable_container_logging=args.disable_container_logging, service_account=args.service_account, traffic_split=args.traffic_split, deployed_model_id=args.deployed_model_id) else: op = endpoints_client.DeployModelBeta( endpoint_ref, args.model, args.region, args.display_name, machine_type=args.machine_type, accelerator_dict=args.accelerator, min_replica_count=args.min_replica_count, max_replica_count=args.max_replica_count, autoscaling_metric_specs=args.autoscaling_metric_specs, enable_access_logging=args.enable_access_logging, enable_container_logging=args.enable_container_logging, service_account=args.service_account, traffic_split=args.traffic_split, deployed_model_id=args.deployed_model_id) response_msg = operations_util.WaitForOpMaybe( operation_client, op, endpoints_util.ParseOperation(op.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'deployedModel' in response and 'id' in response[ 'deployedModel']: log.status.Print(('Deployed a model to the endpoint {}. ' 'Id of the deployed model: {}.').format( endpoint_ref.AsDict()['endpointsId'], response['deployedModel']['id'])) return response_msg
def _Run(args, version): """Create a new Vertex AI Tensorboard experiment.""" validation.ValidateDisplayName(args.display_name) tensorboard_ref = args.CONCEPTS.tensorboard.Parse() region = tensorboard_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): tensorboard_experiments_client = client.TensorboardExperimentsClient( version=version) response = tensorboard_experiments_client.Create(tensorboard_ref, args) if response.name: log.status.Print(('Created Vertex AI Tensorboard experiment: {}.').format( response.name)) return response
def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) region_ref = args.CONCEPTS.region.Parse() region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): operation = client.IndexesClient().CreateBeta(region_ref, args) op_ref = indexes_util.ParseIndexOperation(operation.name) index_id = op_ref.AsDict()['indexesId'] log.status.Print( constants.OPERATION_CREATION_DISPLAY_MESSAGE.format( name=operation.name, verb='create index', id=op_ref.Name(), sub_commands='--index={}'.format(index_id))) return operation
def _Run(args, version): """Update an existing Vertex AI endpoint.""" validation.ValidateDisplayName(args.display_name) endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) def GetLabels(): return endpoints_client.Get(endpoint_ref).labels try: if version == constants.GA_VERSION: op = endpoints_client.Patch( endpoint_ref, labels_util.ProcessUpdateArgsLazy( args, endpoints_client.messages. GoogleCloudAiplatformV1Endpoint.LabelsValue, GetLabels), display_name=args.display_name, description=args.description, traffic_split=args.traffic_split, clear_traffic_split=args.clear_traffic_split) else: op = endpoints_client.PatchBeta( endpoint_ref, labels_util.ProcessUpdateArgsLazy( args, endpoints_client.messages. GoogleCloudAiplatformV1beta1Endpoint.LabelsValue, GetLabels), display_name=args.display_name, description=args.description, traffic_split=args.traffic_split, clear_traffic_split=args.clear_traffic_split) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'traffic_split', 'clear_traffic_split', 'update_labels', 'clear_labels', 'remove_labels', 'description' ] if not any(args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource(op.name, kind='Vertex AI endpoint') return op
def _Run(args, version): """Create a new Vertex AI Tensorboard time series.""" validation.ValidateDisplayName(args.display_name) tensorboard_run_ref = args.CONCEPTS.tensorboard_run.Parse() region = tensorboard_run_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): tensorboard_runs_client = client.TensorboardTimeSeriesClient( version=version) response = tensorboard_runs_client.Create(tensorboard_run_ref, args) response_msg = encoding.MessageToPyValue(response) if 'name' in response_msg: log.status.Print( ('Created Vertex AI Tensorboard time series: {}.').format( response_msg['name'])) return response
def _Run(args, version, release_prefix): """Run method for create command.""" validation.ValidateDisplayName(args.display_name) region_ref = args.CONCEPTS.region.Parse() region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version=version, region=region): response = client.ModelMonitoringJobsClient(version=version).Create( region_ref, args) cmd_prefix = 'gcloud' if release_prefix: cmd_prefix += ' ' + release_prefix log.status.Print( constants.MODEL_MONITORING_JOB_CREATION_DISPLAY_MESSAGE.format( id=model_monitoring_jobs_util.ParseJobName(response.name), cmd_prefix=cmd_prefix, state=response.state)) return response
def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) region_ref = args.CONCEPTS.region.Parse() region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): operation = client.IndexEndpointsClient().CreateBeta( region_ref, args) response_msg = operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=index_endpoints_util.ParseIndexEndpointOperation( operation.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'name' in response: log.status.Print( ('Created AI Platform index endpoint: {}.').format( response['name'])) return response_msg
def _Run(args, version): """Create a new AI Platform endpoint.""" validation.ValidateDisplayName(args.display_name) region_ref = args.CONCEPTS.region.Parse() args.region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) operation_client = operations.OperationsClient() op = endpoints_client.CreateBeta(region_ref, args) response_msg = operations_util.WaitForOpMaybe( operation_client, op, endpoints_util.ParseOperation(op.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'name' in response: log.status.Print(('Created AI Platform endpoint: {}.').format( response['name'])) return response_msg
def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) index_endpoint_ref = args.CONCEPTS.index_endpoint.Parse() region = index_endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): operation = client.IndexEndpointsClient().DeployIndexBeta( index_endpoint_ref, args) response_msg = operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=index_endpoints_util.ParseIndexEndpointOperation( operation.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'deployedIndex' in response and 'id' in response[ 'deployedIndex']: log.status.Print(('Id of the deployed index: {}.').format( response['deployedIndex']['id'])) return response_msg
def _Run(args, version): """Deploy a model to an existing AI Platform endpoint.""" validation.ValidateDisplayName(args.display_name) endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) operation_client = operations.OperationsClient() op = endpoints_client.DeployModelBeta(endpoint_ref, args) response_msg = operations_util.WaitForOpMaybe( operation_client, op, endpoints_util.ParseOperation(op.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'deployedModel' in response and 'id' in response[ 'deployedModel']: log.status.Print(('Deployed a model to the endpoint {}. ' 'Id of the deployed model: {}.').format( endpoint_ref.AsDict()['endpointsId'], response['deployedModel']['id'])) return response_msg
def _Run(args, version): """Update an existing AI Platform endpoint.""" validation.ValidateDisplayName(args.display_name) endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) try: op = endpoints_client.PatchBeta(endpoint_ref, args) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'traffic_split', 'clear_traffic_split', 'update_labels', 'clear_labels', 'remove_labels', 'description' ] if not any(args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource(op.name, kind='AI Platform endpoint') return op
def Run(self, args): validation.ValidateDisplayName(args.display_name) return _Run(args, constants.ALPHA_VERSION)