def _Run(self, args, version): index_ref = args.CONCEPTS.index.Parse() region = index_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): operation = client.IndexesClient().PatchBeta(index_ref, args) if args.metadata_file is not None: # Update index content. op_ref = indexes_util.ParseIndexOperation(operation.name) log.status.Print( constants.OPERATION_CREATION_DISPLAY_MESSAGE.format( name=operation.name, verb='update index', id=op_ref.Name(), sub_commands='--index={}'.format(index_ref.Name()))) # We will not wait for the operation since it can take up to hours. return operation # Update index meta data. response_msg = operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=indexes_util.ParseIndexOperation(operation.name), log_method='update') if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'name' in response: log.UpdatedResource(response['name'], kind='AI Platform index') return response_msg
def Run(self, args): region_ref = args.CONCEPTS.region.Parse() region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.BETA_VERSION, region=region): return client.HpTuningJobsClient().List( region=region_ref.RelativeName())
def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) index_endpoint_ref = args.CONCEPTS.index_endpoint.Parse() project_id = index_endpoint_ref.AsDict()['projectsId'] region = index_endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): index_endpoint_client = client.IndexEndpointsClient( version=version) if version == constants.GA_VERSION: operation = index_endpoint_client.DeployIndex( index_endpoint_ref, args) else: operation = index_endpoint_client.DeployIndexBeta( index_endpoint_ref, args) op_ref = index_endpoints_util.ParseIndexEndpointOperation( operation.name) # TODO(b/208506223): Support `--async` flag. index_endpoint_id = op_ref.AsDict()['indexEndpointsId'] log.status.Print( constants.OPERATION_CREATION_DISPLAY_MESSAGE.format( name=operation.name, verb='deploy index', id=op_ref.Name(), sub_commands='--index-endpoint={} [--project={}]'.format( index_endpoint_id, project_id))) return operation
def _Run(args, version): """Run Vertex AI online prediction.""" endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): if args.request.startswith('@'): request = console_io.ReadFromFileOrStdin(args.request[1:], binary=True) else: request = args.request.encode('utf-8') endpoints_client = client.EndpointsClient(version=version) _, response = endpoints_client.RawPredict(endpoint_ref, args.http_headers, request) # Workaround since gcloud only supports protobufs as JSON objects. Since # raw predict can return anything, write raw bytes to stdout. if not args.IsSpecified('format'): sys.stdout.buffer.write(response) return None # If user asked for formatting, assume it's a JSON object. try: return json.loads(response.decode('utf-8')) except ValueError: raise core_exceptions.Error( 'No JSON object could be decoded from the ' 'HTTP response body:\n' + six.text_type(response))
def _Run(args, version): """Run method for update command.""" validation.ValidateDisplayName(args.display_name) model_monitoring_job_ref = args.CONCEPTS.monitoring_job.Parse() region = model_monitoring_job_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): try: result = client.ModelMonitoringJobsClient(version=version).Patch( model_monitoring_job_ref, args) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'emails', 'prediction_sampling_rate', 'feature_thresholds', 'monitoring_config_from_file', 'monitoring_frequency', 'analysis_instance_schema', 'log_ttl', 'update_labels', 'clear_labels', 'remove_labels', 'anomaly_cloud_logging', ] if not any(args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource( result.name, kind='Vertex AI model deployment monitoring job') return result
def _Run(args, version): """Update an existing Vertex AI Tensorboard.""" tensorboard_ref = args.CONCEPTS.tensorboard.Parse() args.region = tensorboard_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): tensorboards_client = client.TensorboardsClient(version=version) try: op = tensorboards_client.Patch(tensorboard_ref, args) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'description', 'update_labels', 'clear_labels', 'remove_labels', ] if not any(args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource(op.name, kind='Vertex AI Tensorboard') return op
def _Run(self, args): model_ref = args.CONCEPTS.model.Parse() region = model_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.BETA_VERSION, region=region): response = client.ModelsClient().Get(model_ref) return response
def _Run(self, args): tensorboard_ref = args.CONCEPTS.tensorboard.Parse() region = tensorboard_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.ALPHA_VERSION, region=region): response = client.TensorboardsClient().Get(tensorboard_ref) return response
def _Run(self, args, version): validation.ValidateDisplayName(args.display_name) index_endpoint_ref = args.CONCEPTS.index_endpoint.Parse() region = index_endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): index_endpoint_client = client.IndexEndpointsClient( version=version) try: if version == constants.GA_VERSION: result = index_endpoint_client.Patch( index_endpoint_ref, args) else: result = index_endpoint_client.PatchBeta( index_endpoint_ref, args) except errors.NoFieldsSpecifiedError: available_update_args = [ 'display_name', 'description', 'update_labels', 'clear_labels', 'remove_labels' ] if not any( args.IsSpecified(arg) for arg in available_update_args): raise log.status.Print('No update to perform.') return None else: log.UpdatedResource(result.name, kind='Vertex AI index endpoint') return result
def Run(self, args): hptuning_job_ref = args.CONCEPTS.hptuning_job.Parse() region = hptuning_job_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.BETA_VERSION, region=region): response = client.HpTuningJobsClient().Get( hptuning_job_ref.RelativeName()) return response
def _Run(args, version): tensorboard_ref = args.CONCEPTS.tensorboard.Parse() region = tensorboard_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version=version, region=region): response = client.TensorboardsClient( version=version).Get(tensorboard_ref) return response
def _Run(self, args): region_ref = args.CONCEPTS.region.Parse() region = region_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.ALPHA_VERSION, region=region): return client.TensorboardsClient().List(limit=args.limit, page_size=args.page_size, region_ref=region_ref, sort_by=args.sort_by)
def _Run(self, args): model_ref = args.CONCEPTS.model.Parse() region = model_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.BETA_VERSION, region=region): operation = client.ModelsClient().Delete(model_ref) return operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=models_util.ParseModelOperation(operation.name))
def Run(self, args): custom_job_ref = args.CONCEPTS.custom_job.Parse() region = custom_job_ref.AsDict()['locationsId'] validation.ValidateRegion(region) with endpoint_util.AiplatformEndpointOverrides( version=self._api_version, region=region): response = client.CustomJobsClient(version=self._api_version).Get( custom_job_ref.RelativeName()) return response
def _Run(args, version): tensorboard_run_ref = args.CONCEPTS.tensorboard_run_id.Parse() region = tensorboard_run_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=version, region=region): return client.TensorboardTimeSeriesClient(version=version).List( tensorboard_run_ref=tensorboard_run_ref, limit=args.limit, page_size=args.page_size, sort_by=args.sort_by)
def _Run(args, version): tensorboard_time_series_ref = args.CONCEPTS.tensorboard_time_series.Parse() region = tensorboard_time_series_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version=version, region=region): response = client.TensorboardTimeSeriesClient(version=version).Read( tensorboard_time_series_ref=tensorboard_time_series_ref, max_data_points=args.max_data_points, data_filter=args.filter) return response
def _Run(args, version): """Deploy a model to an existing Vertex AI endpoint.""" validation.ValidateDisplayName(args.display_name) if version != constants.GA_VERSION: validation.ValidateAutoscalingMetricSpecs( args.autoscaling_metric_specs) endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) operation_client = operations.OperationsClient() if version == constants.GA_VERSION: op = endpoints_client.DeployModel( endpoint_ref, args.model, args.region, args.display_name, machine_type=args.machine_type, accelerator_dict=args.accelerator, min_replica_count=args.min_replica_count, max_replica_count=args.max_replica_count, enable_access_logging=args.enable_access_logging, disable_container_logging=args.disable_container_logging, service_account=args.service_account, traffic_split=args.traffic_split, deployed_model_id=args.deployed_model_id) else: op = endpoints_client.DeployModelBeta( endpoint_ref, args.model, args.region, args.display_name, machine_type=args.machine_type, accelerator_dict=args.accelerator, min_replica_count=args.min_replica_count, max_replica_count=args.max_replica_count, autoscaling_metric_specs=args.autoscaling_metric_specs, enable_access_logging=args.enable_access_logging, enable_container_logging=args.enable_container_logging, service_account=args.service_account, traffic_split=args.traffic_split, deployed_model_id=args.deployed_model_id) response_msg = operations_util.WaitForOpMaybe( operation_client, op, endpoints_util.ParseOperation(op.name)) if response_msg is not None: response = encoding.MessageToPyValue(response_msg) if 'deployedModel' in response and 'id' in response[ 'deployedModel']: log.status.Print(('Deployed a model to the endpoint {}. ' 'Id of the deployed model: {}.').format( endpoint_ref.AsDict()['endpointsId'], response['deployedModel']['id'])) return response_msg
def _Run(args, version): """Undeploy a model fro man existing AI Platform endpoint.""" endpoint_ref = args.CONCEPTS.endpoint.Parse() args.region = endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=args.region): endpoints_client = client.EndpointsClient(version=version) operation_client = operations.OperationsClient() op = endpoints_client.UndeployModelBeta(endpoint_ref, args) return operations_util.WaitForOpMaybe( operation_client, op, endpoints_util.ParseOperation(op.name))
def Run(self, args): hptuning_job_ref = args.CONCEPTS.hptuning_job.Parse() name = hptuning_job_ref.Name() region = hptuning_job_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.BETA_VERSION, region=region): response = client.HpTuningJobsClient().Cancel( hptuning_job_ref.RelativeName()) log.status.Print( constants.HPTUNING_JOB_CANCEL_DISPLAY_MESSAGE.format(id=name)) return response
def _Run(self, args, version): index_endpoint_ref = args.CONCEPTS.index_endpoint.Parse() region = index_endpoint_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): operation = client.IndexEndpointsClient().UndeployIndexBeta( index_endpoint_ref, args) return operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=index_endpoints_util.ParseIndexEndpointOperation( operation.name))
def _Run(self, args): tensorboard_ref = args.CONCEPTS.tensorboard.Parse() region = tensorboard_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.ALPHA_VERSION, region=region): operation = client.TensorboardsClient().Delete(tensorboard_ref) return operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=tensorboards_util.ParseTensorboardOperation( operation.name))
def _Run(args, version): tensorboard_run_ref = args.CONCEPTS.tensorboard_run.Parse() region = tensorboard_run_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version=version, region=region): operation = client.TensorboardRunsClient( version=version).Delete(tensorboard_run_ref) return operations_util.WaitForOpMaybe( operations_client=operations.OperationsClient(), op=operation, op_ref=tensorboards_util.ParseTensorboardOperation(operation.name))
def _Run(args, version): """Create a new Vertex AI Tensorboard run.""" tensorboard_exp_ref = args.CONCEPTS.tensorboard_experiment.Parse() region = tensorboard_exp_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides(version, region=region): tensorboard_runs_client = client.TensorboardRunsClient(version=version) response = tensorboard_runs_client.Create(tensorboard_exp_ref, args) response_msg = encoding.MessageToPyValue(response) if 'name' in response_msg: log.status.Print(('Created Vertex AI Tensorboard run: {}.').format( response_msg['name'])) return response
def Run(self, args): hptuning_job_ref = args.CONCEPTS.hptuning_job.Parse() region = hptuning_job_ref.AsDict()['locationsId'] with endpoint_util.AiplatformEndpointOverrides( version=constants.BETA_VERSION, region=region): relative_name = hptuning_job_ref.RelativeName() return log_util.StreamLogs( hptuning_job_ref.AsDict()['hyperparameterTuningJobsId'], continue_function=client.HpTuningJobsClient().CheckJobComplete( relative_name), polling_interval=args.polling_interval, task_name=args.task_name, allow_multiline=args.allow_multiline_logs)