def delete(self, name, namespace=None): """ Delete the inference service :param name: inference service name :param namespace: defaults to current or default namespace :return: """ if namespace is None: namespace = utils.get_default_target_namespace() try: return self.api_instance.delete_namespaced_custom_object( constants.KFSERVING_GROUP, constants.KFSERVING_VERSION, namespace, constants.KFSERVING_PLURAL, name, client.V1DeleteOptions()) except client.rest.ApiException as e: raise RuntimeError( "Exception when calling CustomObjectsApi->delete_namespaced_custom_object:\ %s\n" % e)
def delete(self): """ Delete a deployment resource. Idempotent. :returns: The deployment deletion status. :rtype: bool """ try: v1beta1 = client.ExtensionsV1beta1Api() v1beta1.delete_namespaced_deployment( name=self.definition['name'], namespace=self.definition['namespace'], body=client.V1DeleteOptions( propagation_policy='Foreground', grace_period_seconds=5)) except ApiException as exc: if exc.status != 404: LOGGER.error(exc) return False return True
def finalizer(): api = k8sclient.CoreV1Api() claim = api.read_namespaced_persistent_volume_claim( name=pvc_manifest['metadata']['name'], namespace='default') volume_name = claim.spec.volume_name api = get_core_api_client() api.delete_namespaced_persistent_volume_claim( name=pvc_manifest['metadata']['name'], namespace='default', body=k8sclient.V1DeleteOptions()) # Working around line break issue. key = 'volume.beta.kubernetes.io/storage-provisioner' # If not using StorageClass (such as in CSI test), the Longhorn volume # will not be automatically deleted, causing this to throw an error. if (key in claim.metadata.annotations): client = get_longhorn_api_client() wait_for_volume_delete(client, volume_name)
def delete_namespaced_role_binding_with_api(name, namespace): """Delete namespaced role binding with K8s API.""" logging.info('Deleting namespaced role binding with API') _load_kube_config() body = client.V1DeleteOptions() with client.ApiClient() as api_client: api_instance = client.RbacAuthorizationV1Api(api_client) try: api_instance.delete_namespaced_role_binding( name=name, namespace=namespace, body=body, pretty=True ) return True except ApiException: logging.exception("Exception when calling RbacAuthorizationV1Api" "->delete_namespaced_role_binding.")
def delete_k8s_nfs_resources(self) -> bool: del_options = k8sclient.V1DeleteOptions() k8s_api_client = k8sclient.CoreV1Api() try: k8s_api_client.delete_persistent_volume(self.params.pv_name, del_options) except k8sclient.rest.ApiException as e: print("Got exception: %s\n while deleting NFS PV", e) return False try: k8s_api_client.delete_namespaced_persistent_volume_claim( self.params.pvc_name, self.params.namespace, del_options) except k8sclient.rest.ApiException as e: print("Got exception: %s\n while deleting NFS PVC", e) return False return True
def deploy_fake_backend_proxy_pod(namespace): """ Deploys a socat pod named "fake-backend" that is ready to be used to tunnel datapoints back to this process. """ corev1 = kube_client.CoreV1Api() pod_yaml = Path(SCRIPT_DIR / "tunnel/pod.yaml").read_bytes() pod = corev1.create_namespaced_pod(body=yaml.safe_load(pod_yaml), namespace=namespace) name = pod.metadata.name try: assert wait_for(p(pod_is_ready, name, namespace=namespace), timeout_seconds=K8S_CREATE_TIMEOUT) yield corev1.read_namespaced_pod(name, namespace=namespace).status.pod_ip finally: print("Fake backend proxy logs: %s" % (get_pod_logs(name, namespace=namespace))) corev1.delete_namespaced_pod( name, namespace=namespace, body=kube_client.V1DeleteOptions(grace_period_seconds=0, propagation_policy="Background"), )
def delete_ap_policy(custom_objects: CustomObjectsApi, name, namespace) -> None: """ Delete a AppProtect policy. :param custom_objects: CustomObjectsApi :param namespace: namespace :param name: :return: """ print(f"Delete a AP policy: {name}") delete_options = client.V1DeleteOptions() custom_objects.delete_namespaced_custom_object("appprotect.f5.com", "v1beta1", namespace, "appolicies", name, delete_options) ensure_item_removal(custom_objects.get_namespaced_custom_object, "appprotect.f5.com", "v1beta1", namespace, "appolicies", name) print(f"AP policy was removed with name: {name}")
def _delete_k8s_resource(self, yaml_data): for data in yaml_data: if data is None: continue kind = data.get('kind', None) name = data.get('metadata').get('name', None) namespace = data.get('metadata').get('namespace', None) delete_data = client.V1DeleteOptions() logs = "Delete namespace={}, name={}, kind={}".format( namespace, name, kind) logger.info(logs) if kind in self.support_namespace: self.delete_func_dict.get(kind)(name, namespace, delete_data) else: self.delete_func_dict.get(kind)(name, delete_data) time.sleep(3)
def delete(self): """ delete service from the Kubernetes cluster :return: None """ body = client.V1DeleteOptions() try: status = self.api.delete_namespaced_service(self.name, self.namespace, body) logger.info( "Deleting Service %s in namespace: %s", self.name, self.namespace) except ApiException as e: raise ConuException( "Exception when calling Kubernetes API - delete_namespaced_service: {}\n".format(e)) if status.status == 'Failure': raise ConuException("Deletion of Service failed")
def delete_deployment(extensions_v1_beta1: ExtensionsV1beta1Api, name, namespace) -> None: """ Delete a deployment. :param extensions_v1_beta1: ExtensionsV1beta1Api :param name: :param namespace: :return: """ print(f"Delete a deployment: {name}") delete_options = client.V1DeleteOptions() delete_options.grace_period_seconds = 0 delete_options.propagation_policy = 'Foreground' extensions_v1_beta1.delete_namespaced_deployment(name, namespace, delete_options) ensure_item_removal(extensions_v1_beta1.read_namespaced_deployment_status, name, namespace) print(f"Deployment was removed with name '{name}'")
def delete_config_map(name, namespace='default', wait=False, skip_codes=None, **kwargs): # pylint: disable=unused-argument skip_codes = [] if not skip_codes else skip_codes core_v1_api = get_core_api() body = kwargs.get('body', client.V1DeleteOptions()) kwargs.pop('body', None) try: core_v1_api.delete_namespaced_config_map(name, namespace, body, **kwargs) except ApiException as e: if e.status in skip_codes: LOG.info(e.reason) else: raise exceptions.KubernetesApiException(action='delete', resource='ConfigMap')
def delete_job(): body = client.V1DeleteOptions() k8s_beta = client.BatchV1Api() pod_api = client.CoreV1Api() name = "cyb" name_job = "cyb-sub" namespace = "default" pod_list = pod_api.list_pod_for_all_namespaces( label_selector="app={}".format(name), pretty=True) for item in pod_list.items: pod_name = item.metadata.name print("Found '{}', deleting...".format(pod_name)) pod_resp = pod_api.delete_namespaced_pod(pod_name, namespace, body) resp = k8s_beta.delete_namespaced_job(name_job, namespace, body) print("Jobs deleted. status='%s'" % str(resp.status))
def run( self, pod_name: str = None, namespace: str = "default", kube_kwargs: dict = None, kubernetes_api_key_secret: str = "KUBERNETES_API_KEY", delete_option_kwargs: dict = None, ) -> None: """ Task run method. Args: - pod_name (str, optional): The name of a pod to delete - namespace (str, optional): The Kubernetes namespace to delete this pod in, defaults to the `default` namespace - kube_kwargs (dict, optional): Optional extra keyword arguments to pass to the Kubernetes API (e.g. `{"pretty": "...", "dry_run": "..."}`) - kubernetes_api_key_secret (str, optional): the name of the Prefect Secret which stored your Kubernetes API Key; this Secret must be a string and in BearerToken format - delete_option_kwargs (dict, optional): Optional keyword arguments to pass to the V1DeleteOptions object (e.g. {"propagation_policy": "...", "grace_period_seconds": "..."}. Raises: - ValueError: if `pod_name` is `None` """ if not pod_name: raise ValueError("The name of a Kubernetes pod must be provided.") api_client = cast( client.CoreV1Api, get_kubernetes_client("pod", kubernetes_api_key_secret) ) kube_kwargs = {**self.kube_kwargs, **(kube_kwargs or {})} delete_option_kwargs = delete_option_kwargs or {} api_client.delete_namespaced_pod( name=pod_name, namespace=namespace, body=client.V1DeleteOptions(**delete_option_kwargs), **kube_kwargs )
def delete(self, report=True): with STATSD_CLIENT.timer(e2e_globals.ACTION_METRIC_NAME % self.action_data("delete")): try: self.api.delete_namespaced_persistent_volume_claim( self.name, self.namespace, client.V1DeleteOptions()) except ApiException as e: error_code, error_dict = self.parse_error(e.body) LOGGER.error( "Error deleting volume claim %s, API exception: %s msg: %s", self.name, error_code.name.lower(), error_dict['message']) self.add_error(error_dict['message']) self.incr_error_metric(error_code.name.lower()) else: self.wait_on_deleted(report) super().delete(report)
def kill(self, name: str, store: Store) -> None: """Kill an MPIJob :param name: A name :type name: str :param store: The job store :type store: Store """ delete_options = client.V1DeleteOptions( api_version=MPIJobHandler.VERSION, propagation_policy="Background") resource_id = store.get(name)[Store.RESOURCE_ID] self.api.delete_namespaced_custom_object( MPIJobHandler.GROUP, MPIJobHandler.VERSION, self.namespace, MPIJobHandler.PLURAL, resource_id, body=delete_options, )
def main(): group = "argoproj.io" version = "v1alpha1" plural = "hyperparamworkflows" namespace = 'default' api_client = client.ApiClient() custom_api = client.CustomObjectsApi(api_client) watch = kwatch.Watch(return_type=object) logger.info("Starting loop") for event in watch.stream(custom_api.list_cluster_custom_object, group, version, plural): logger.debug(event) if event['type'] == 'ADDED': namespace = event['metadata']['namespace'] hparams = event['raw_object']['spec']['hyperparams'] if event['raw_object']['spec']['algorithm'] == 'grid': experiments = grid_search(hparams) wf = generate_workflow(event['raw_object'], experiments) try: resp = custom_api.create_namespaced_custom_object( group, version, namespace, "workflows", wf, pretty=True) except client.rest.ApiException: continue logger.info(yaml.dump(wf)) if event['type'] == 'DELETED': namespace = event['metadata']['namespace'] # TODO: This would be better managed with resource owners name = event['raw_object']['metadata']['name'] custom_api.delete_namespaced_custom_object( group, version, namespace, "workflows", name=name, body=client.V1DeleteOptions())
def on_kill(self): if self.kube_client is not None: logging.info('Stopping kubernetes pod') i = 0 while i < 5: try: self.kube_client.delete_namespaced_pod( name=self.pod.metadata.name, namespace=self.pod.metadata.namespace, body=client.V1DeleteOptions()) return except rest.ApiException as e: if e.reason == 'Unauthorized': logging.warning('API token expired!') self._initialize_kube_config() if i > 0: time.sleep(random.randint(0, 10)) i += 1 continue
def build(self): logging.info("Building image using cluster builder.") install_reqs_before_copy = self.preprocessor.is_requirements_txt_file_present() dockerfile_path = dockerfile.write_dockerfile( dockerfile_path=self.dockerfile_path, path_prefix=self.preprocessor.path_prefix, base_image=self.base_image, install_reqs_before_copy=install_reqs_before_copy ) self.preprocessor.output_map[dockerfile_path] = 'Dockerfile' context_path, context_hash = self.preprocessor.context_tar_gz() self.image_tag = self.full_image_name(context_hash) self.context_source.prepare(context_path) labels = {'fairing-builder': 'kaniko'} labels['fairing-build-id'] = str(uuid.uuid1()) pod_spec = self.context_source.generate_pod_spec(self.image_tag, self.push) for fn in self.pod_spec_mutators: fn(self.manager, pod_spec, self.namespace) build_pod = client.V1Pod( api_version="v1", kind="Pod", metadata=client.V1ObjectMeta( generate_name="fairing-builder-", labels=labels, namespace=self.namespace, ), spec=pod_spec ) created_pod = client. \ CoreV1Api(). \ create_namespaced_pod(self.namespace, build_pod) self.manager.log( name=created_pod.metadata.name, namespace=created_pod.metadata.namespace, selectors=labels, container="kaniko") # clean up created pod and secret self.context_source.cleanup() client.CoreV1Api().delete_namespaced_pod( created_pod.metadata.name, created_pod.metadata.namespace, body=client.V1DeleteOptions())
def deleteConfigMap(parser_args, CorV1Client): print("delete config map") if listConfigMap(parser_args, CorV1Client): # create an instance of the API class name = parser_args.name # str | name of the ConfigMap namespace = parser_args.namespace # str | object name and auth scope, such as for teams and projects body = client.V1DeleteOptions() # V1DeleteOptions | pretty = 'pretty_example' # str | If 'true', then the output is pretty printed. (optional) try: api_response = CorV1Client.delete_namespaced_config_map( name, namespace, body, pretty=pretty) print(api_response) except ApiException as e: print( "Exception when calling CoreV1Api->delete_namespaced_config_map: %s\n" % e) else: print("Not Found target cofigMap ojbect %s exist" % parser_args.name)
def kill(self, name: str, store: Store) -> None: """Kill a multi-worker PyTorch task with given name :param name: The task to kill :type name: str :param store: The jobs store :type store: Store :return: None """ delete_options = client.V1DeleteOptions(api_version=PyTorchElasticJobHandler.VERSION, propagation_policy="Background") resource_id = store.get(name)[Store.RESOURCE_ID] return self.api.delete_namespaced_custom_object( PyTorchElasticJobHandler.GROUP, PyTorchElasticJobHandler.VERSION, self.namespace, PyTorchElasticJobHandler.PLURAL, resource_id, body=delete_options, )
def delete_virtual_server(custom_objects: CustomObjectsApi, name, namespace) -> None: """ Delete a VirtualServer. :param custom_objects: CustomObjectsApi :param namespace: namespace :param name: :return: """ print(f"Delete a VirtualServer: {name}") delete_options = client.V1DeleteOptions() custom_objects.delete_namespaced_custom_object("k8s.nginx.org", "v1", namespace, "virtualservers", name, delete_options) ensure_item_removal(custom_objects.get_namespaced_custom_object, "k8s.nginx.org", "v1", namespace, "virtualservers", name) print(f"VirtualServer was removed with name '{name}'")
def stop_worker(config_path, namespaces=('development', 'beta')): # Load configuration k8s_config.load_kube_config(get_full_config_path(config_path)) body = k8s_client.V1DeleteOptions() core_v1 = k8s_client.CoreV1Api() app_v1 = k8s_client.AppsV1Api() autoscaling_v1_api = k8s_client.AutoscalingV1Api() custom_object_api = k8s_client.CustomObjectsApi() for ns in namespaces: # Delete Deployments for deployment in app_v1.list_namespaced_deployment(ns).items: app_v1.delete_namespaced_deployment(name=deployment.metadata.name, namespace=ns, body=body) # Delete Services for svc in core_v1.list_namespaced_service(ns).items: core_v1.delete_namespaced_service(name=svc.metadata.name, namespace=ns, body=body) # Delete autoscaling for autoscaler in autoscaling_v1_api.list_namespaced_horizontal_pod_autoscaler( ns).items: autoscaling_v1_api.delete_namespaced_horizontal_pod_autoscaler( name=autoscaler.metadata.name, namespace=ns, body=body) # Delete Istio for istio in custom_object_api.list_namespaced_custom_object( group="networking.istio.io", version="v1alpha3", namespace=ns, plural="virtualservices")["items"]: custom_object_api.delete_namespaced_custom_object( group="networking.istio.io", version="v1alpha3", namespace=ns, plural="virtualservices", name=istio["metadata"]["name"], body=body) # Delete Secret for secret in core_v1.list_namespaced_secret(ns).items: core_v1.delete_namespaced_secret(name=secret.metadata.name, namespace=ns, body=body)
def stop_pod_by_name(self, pod_id, namespace="flow-jobs"): max_tries = 10 delay = 6 tries = 0 found = False req = client.V1DeleteOptions() print("") self.log.info("################ Deleting Pod: {}".format(pod_id)) try: pods_list = [ pod.metadata.name for pod in self._client.list_namespaced_pod( namespace=namespace, pretty=True).items ] while pod_id in pods_list and tries < max_tries: self.log.info("### attempt: {}".format(tries)) found = True self._client.delete_namespaced_pod(name=pod_id, body=req, namespace=namespace, grace_period_seconds=0) time.sleep(delay + tries) pods_list = [ pod.metadata.name for pod in self._client.list_namespaced_pod( namespace=namespace, pretty=True).items ] tries += 1 if tries >= max_tries: self.log.info("################ Could not delete pod!") elif not found: self.log.info("################ Pod not found!") else: self.log.info("################ Pod deleted.") print("") except Exception as e: self.log.exception( "Exception when attempting to delete namespaced Pod.") self.log.exceptionrint("Could not delete pod: {}".format(pod_id)) self.log.exception(e)
def deploy_unique_rbac_resources(self): """ The cluster-wide RBAC resources (clusterrole/clusterroldbinding) are not namespaced, so they have to be handled specially to ensure they are unique amongst potentially multiple deployments of the agent in the same cluster. Basically just sticks the test namespace as a suffix to the resource names. """ corev1 = kube_client.CoreV1Api() rbacv1beta1 = kube_client.RbacAuthorizationV1beta1Api() serviceaccount = corev1.create_namespaced_service_account( body=load_resource_yaml(AGENT_SERVICEACCOUNT_PATH), namespace=self.namespace) clusterrole_base = load_resource_yaml(AGENT_CLUSTERROLE_PATH) clusterrole_base["metadata"][ "name"] = f"signalfx-agent-{self.namespace}" clusterrole = rbacv1beta1.create_cluster_role(body=clusterrole_base) crb_base = load_resource_yaml(AGENT_CLUSTERROLEBINDING_PATH) # Make the binding refer to our testing namespace's role and service account crb_base["metadata"]["name"] = f"signalfx-agent-{self.namespace}" crb_base["roleRef"]["name"] = clusterrole.metadata.name crb_base["subjects"][0]["namespace"] = self.namespace crb = rbacv1beta1.create_cluster_role_binding(body=crb_base) try: yield finally: delete_opts = kube_client.V1DeleteOptions( grace_period_seconds=0, propagation_policy="Background") rbacv1beta1.delete_cluster_role_binding(crb.metadata.name, body=delete_opts) rbacv1beta1.delete_cluster_role(clusterrole.metadata.name, body=delete_opts) corev1.delete_namespaced_service_account( serviceaccount.metadata.name, namespace=self.namespace, body=delete_opts) print("Deleted RBAC resources")
def _delete_job_action(self, list_func, delete_func, job_type_description, name, namespace="default", propagation_policy='Foreground', timeout=DEFAULT_K8S_TIMEOUT): try: LOG.debug('Deleting %s %s, Wait timeout=%s', job_type_description, name, timeout) body = client.V1DeleteOptions() w = watch.Watch() issue_delete = True for event in w.stream(list_func, namespace=namespace, timeout_seconds=timeout): if issue_delete: delete_func(name=name, namespace=namespace, body=body, propagation_policy=propagation_policy) issue_delete = False event_type = event['type'].upper() job_name = event['object'].metadata.name if event_type == 'DELETED' and job_name == name: LOG.debug('Successfully deleted %s %s', job_type_description, job_name) return err_msg = ('Reached timeout while waiting to delete %s: ' 'name=%s, namespace=%s' % (job_type_description, name, namespace)) LOG.error(err_msg) raise exceptions.KubernetesWatchTimeoutException(err_msg) except ApiException as e: LOG.exception("Exception when deleting %s: name=%s, namespace=%s", job_type_description, name, namespace) raise e
def copy_image(self, pool, ori, dest, size=1): sizes = {'debian': 2, 'centos': 8, 'fedora': 4, 'rhel': 10, 'trusty': 2.2, 'xenial': 2.2, 'yakkety': 2.2, 'zesty': 2.2, 'artful': 2.2} core = self.core namespace = self.namespace ori = ori.replace('_', '-').replace('.', '-').lower() for key in sizes: if key in ori and ori.endswith('qcow2'): size = sizes[key] break size = 1024 * int(size) + 100 now = datetime.datetime.now().strftime("%Y%M%d%H%M") podname = '%s-%s-copy' % (now, dest) pvc = {'kind': 'PersistentVolumeClaim', 'spec': {'storageClassName': pool, 'accessModes': ['ReadWriteOnce'], 'resources': {'requests': {'storage': '%sMi' % size}}}, 'apiVersion': 'v1', 'metadata': {'name': dest}} pod = {'kind': 'Pod', 'spec': {'restartPolicy': 'Never', 'containers': [{'image': 'alpine', 'volumeMounts': [{'mountPath': '/storage1', 'name': 'storage1'}, {'mountPath': '/storage2', 'name': 'storage2'}], 'name': 'copy', 'command': ['cp'], 'args': ['/storage1/disk.img', '/storage2']}], 'volumes': [{'name': 'storage1', 'persistentVolumeClaim': {'claimName': ori}}, {'name': 'storage2', 'persistentVolumeClaim': {'claimName': dest}}]}, 'apiVersion': 'v1', 'metadata': {'name': podname}} try: core.read_namespaced_persistent_volume_claim(dest, namespace) common.pprint("Using existing pvc") except: core.create_namespaced_persistent_volume_claim(namespace, pvc) bound = self.pvc_bound(dest, namespace) if not bound: return {'result': 'failure', 'reason': 'timeout waiting for pvc to get bound'} core.create_namespaced_pod(namespace, pod) completed = self.pod_completed(podname, namespace) if not completed: common.pprint("Using with pod %s. Leaving it for debugging purposes" % podname, color='red') return {'result': 'failure', 'reason': 'timeout waiting for copy to finish'} else: core.delete_namespaced_pod(podname, namespace, client.V1DeleteOptions()) return {'result': 'success'}
def ray_exec(func, args, tid, tag=None, tags=("latest",), image=None): start = time.time() data = pickle.dumps({ "func": func, "args": args, }) # k8s object name should not contain underscore func_name = func.__name__ print("executing:", func_name) task_id = func_name.replace("_", "-") + "-" + str(uuid.uuid4()) image = image if image else func_image_map[func_name] from kubernetes import config, client config.load_kube_config() k8s_client = core_v1_api.CoreV1Api() # launch pod tag = np.random.choice(tags, 1)[0] if tag is None else tag image = image + ":" + tag pod_name = launch(task_id, image, k8s_client) # set up port forwarding local_port = str(get_free_tcp_port()) mapped_port = "{}:{}".format(local_port, handler_port) port_forward(pod_name, mapped_port) # submit task try: resp = requests.get("http://127.0.0.1:{}".format(local_port), data=data) result = pickle.loads(resp.content) latency = time.time() - start k8s_client.delete_namespaced_pod(pod_name, namespace="default", body=client.V1DeleteOptions()) # print("result: {} time: {}".format(result, latency)) return { "func": func_name, "lat": latency, "tid": tid, } except: return None
def clean_nuts(self, valid_id_rotation): crds = client.CustomObjectsApi() nuts = crds.list_cluster_custom_object(self.squirrel.domain_api, self.squirrel.api_version, 'nuts')["items"] for n in nuts: if n["data"].get("id_rotation", 10000000000) != valid_id_rotation: try: api_response = crds.delete_namespaced_custom_object(\ self.squirrel.domain_api, \ self.squirrel.api_version, \ n["metadata"]["namespace"], \ 'nuts', \ n["metadata"]["name"], client.V1DeleteOptions()) print(api_response) except ApiException as e: print( "Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e)
def delete(self, options=None): """Delete the Namespace. Args: options (client.V1DeleteOptions): Options for Namespace deletion. Returns: client.V1Status: The status of the delete operation. """ if options is None: options = client.V1DeleteOptions() log.info('deleting namespace "%s"', self.name) log.debug('delete options: %s', options) log.debug('namespace: %s', self.obj) return self.api_client.delete_namespace( name=self.name, body=options, )
def delete_namespaced_svcacct_objs(self): """Remove service accounts, roles, and rolebindings from namespace. You don't usually have to call this, since they will get cleaned up as part of namespace deletion. """ with start_action(action_type="delete_namespaced_svcacct_objs"): namespace = self.namespace account = self.service_account if not account: self.log.info("Service account not defined.") return dopts = client.V1DeleteOptions() self.log.info("Deleting service accounts/role/rolebinding " + "for %s" % namespace) self.parent.rbac_api.delete_namespaced_role_binding( account, namespace, dopts) self.parent.rbac_api.delete_namespaced_role( account, namespace, dopts) self.parent.api.delete_namespaced_service_account( account, namespace, dopts)