def obj_update(self, bundle, **kwargs): if "pk" in kwargs: return super(LNetConfigurationResource, self).obj_update(bundle, **kwargs) lnet_configurations_data = bundle.data.get("objects", [bundle.data]) lnet_configuration = [] for lnet_configuration_data in lnet_configurations_data: lnet_configuration.append({ "host_id": lnet_configuration_data["host"]["id"], "state": lnet_configuration_data["state"] }) command_id = JobSchedulerClient.update_lnet_configuration( lnet_configuration) try: command = Command.objects.get(pk=command_id) except ObjectDoesNotExist: command = None raise custom_response(self, bundle.request, http.HttpAccepted, {"command": dehydrate_command(command)})
def obj_create(self, bundle, **kwargs): request = bundle.request self.is_valid(bundle) if bundle.errors: raise ImmediateHttpResponse(response=self.error_response( bundle.request, bundle.errors[self._meta.resource_name])) # Set up an errors dict in the bundle to allow us to carry # hydration errors through to validation. setattr(bundle, 'data_errors', defaultdict(list)) bundle.data['content_type'] = ContentType.objects.get_for_model( KIND_TO_KLASS[bundle.data['kind']]).natural_key() # Should really only be doing one validation pass, but this works # OK for now. It's better than raising a 404 or duplicating the # filesystem validation failure if it doesn't exist, anyhow. self.is_valid(bundle) targets, command = JobSchedulerClient.create_targets([bundle.data]) if request.method == 'POST': raise custom_response( self, request, http.HttpAccepted, { 'command': dehydrate_command(command), 'target': self.full_dehydrate(self.build_bundle(obj=targets[0])).data })
def obj_create(self, bundle, **kwargs): request = bundle.request for job in bundle.data["jobs"]: # FIXME: HYD-1367: This is a hack to work around the inability of # the Job class to handle m2m references properly, serializing hosts # to a list of IDs understood by the HostListMixin class if "hosts" in job["args"]: job_ids = [] for uri in job["args"]["hosts"]: job_ids.append(HostResource().get_via_uri( uri, bundle.request).id) del job["args"]["hosts"] job["args"]["host_ids"] = json.dumps(job_ids) from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient try: command_id = JobSchedulerClient.command_run_jobs( bundle.data["jobs"], bundle.data["message"]) except SchedulingError as e: raise custom_response(self, request, http.HttpBadRequest, {"state": e.message}) bundle.obj = Command.objects.get(pk=command_id) return bundle
def patch_list(self, request, **kwargs): """ Specialization of patch_list to do bulk target creation in a single RPC to job_scheduler (and consequently in a single command). """ deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json')) if "objects" not in deserialized: raise BadRequest("Invalid data sent.") if len(deserialized["objects"]) and 'put' not in self._meta.detail_allowed_methods: raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed()) # If any of the included targets is not a creation, then # skip to a normal PATCH instead of this special case one for target_data in deserialized['objects']: if 'id' in target_data or 'resource_uri' in target_data: super(TargetResource, self).patch_list(request, **kwargs) # Validate and prepare each target dict for consumption by job_scheduler for target_data in deserialized['objects']: data = self.alter_deserialized_detail_data(request, target_data) bundle = self.build_bundle(data=dict_strip_unicode_keys(data)) bundle.request = request self.is_valid(bundle) target_data['content_type'] = ContentType.objects.get_for_model(KIND_TO_KLASS[target_data['kind']]).natural_key() targets, command = JobSchedulerClient.create_targets(deserialized['objects']) raise custom_response(self, request, http.HttpAccepted, {'command': dehydrate_command(command), 'targets': [self.get_resource_uri(target) for target in targets]})
def _pool_delete(self, request, obj_list): commands = [] for obj in obj_list: command_id = JobSchedulerClient.delete_ostpool(obj.id) command = Command.objects.get(pk=command_id) commands.append(dehydrate_command(command)) raise custom_response(self, request, http.HttpAccepted, {"commands": commands})
def obj_create(self, bundle, **kwargs): request = bundle.request ostpool_id, command_id = JobSchedulerClient.create_ostpool(bundle.data) command = Command.objects.get(pk=command_id) raise custom_response(self, request, http.HttpAccepted, {"command": dehydrate_command(command)})
def obj_create(self, bundle, **kwargs): request = bundle.request host = self.fields["host"].hydrate(bundle).obj filesystem = self.fields["filesystem"].hydrate(bundle).obj mountpoint = bundle.data["mountpoint"] client_mount = JobSchedulerClient.create_client_mount(host, filesystem, mountpoint) args = dict(client_mount=self.prepare_mount(client_mount)) raise custom_response(self, request, http.HttpAccepted, args)
def obj_create(self, bundle, **kwargs): command_id = JobSchedulerClient.configure_stratagem(bundle.data) try: command = Command.objects.get(pk=command_id) except ObjectDoesNotExist: command = None raise custom_response(self, bundle.request, http.HttpAccepted, {"command": dehydrate_command(command)})
def obj_update(self, bundle, **kwargs): try: obj = self.obj_get(bundle, **kwargs) except ObjectDoesNotExist: raise NotFound( "A model instance matching the provided arguments could not be found." ) command_id = JobSchedulerClient.update_ostpool(bundle.data) command = Command.objects.get(pk=command_id) raise custom_response(self, bundle.request, http.HttpAccepted, {"command": dehydrate_command(command)})
def obj_create(self, bundle, request=None, **kwargs): filesystem_id, command_id = JobSchedulerClient.create_filesystem( bundle.data) filesystem = ManagedFilesystem.objects.get(pk=filesystem_id) command = Command.objects.get(pk=command_id) fs_bundle = self.full_dehydrate(self.build_bundle(obj=filesystem)) filesystem_data = self.alter_detail_data_to_serialize( request, fs_bundle).data raise custom_response(self, request, http.HttpAccepted, { 'command': dehydrate_command(command), 'filesystem': filesystem_data })
def obj_create(self, bundle, request=None, **kwargs): # NB: This is safe because we've already validated the input. host_id = resolve(bundle.data['host'])[2]['pk'] filesystem_id = resolve(bundle.data['filesystem'])[2]['pk'] # Now take a copy of the data dict and clean it up. clean_data = copy.deepcopy(bundle.data) clean_data['host'] = host_id clean_data['filesystem'] = filesystem_id copytool = JobSchedulerClient.create_copytool(clean_data) ct_bundle = self.full_dehydrate(self.build_bundle(obj=copytool)) ct_data = self.alter_detail_data_to_serialize(request, ct_bundle).data raise custom_response(self, request, http.HttpAccepted, {'copytool': ct_data})
def obj_create(self, bundle, **kwargs): (_, fs_id) = get_fs_id(bundle) mdts = list( ManagedMdt.objects.filter( filesystem_id=fs_id, active_mount_id__isnull=False).values_list("id", flat=True)) command_id = JobSchedulerClient.run_stratagem(mdts, fs_id, bundle.data) try: command = Command.objects.get(pk=command_id) except ObjectDoesNotExist: command = None raise custom_response(self, bundle.request, http.HttpAccepted, {"command": dehydrate_command(command)})
def obj_create(self, bundle, request = None, **kwargs): for job in bundle.data['jobs']: # FIXME: HYD-1367: This is a hack to work around the inability of # the Job class to handle m2m references properly, serializing hosts # to a list of IDs understood by the HostListMixin class if 'hosts' in job['args']: job_ids = [] for uri in job['args']['hosts']: job_ids.append(HostResource().get_via_uri(uri).id) del job['args']['hosts'] job['args']['host_ids'] = json.dumps(job_ids) from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient try: command_id = JobSchedulerClient.command_run_jobs(bundle.data['jobs'], bundle.data['message']) except SchedulingError, e: raise custom_response(self, request, http.HttpBadRequest, {'state': e.message})
def obj_create(self, bundle, request=None, **kwargs): if 'objects' in bundle.data: nids_data = bundle.data['objects'] else: nids_data = [bundle.data] for nid_data in nids_data: nid_data['network_interface'] = NetworkInterfaceResource( ).get_via_uri(nid_data['network_interface']).id command_id = JobSchedulerClient.update_nids(nids_data) try: command = Command.objects.get(pk=command_id) except ObjectDoesNotExist: command = None raise custom_response(self, request, http.HttpAccepted, {'command': dehydrate_command(command)})
def obj_create(self, bundle, **kwargs): request = bundle.request if "objects" in bundle.data: nids_data = bundle.data["objects"] else: nids_data = [bundle.data] for nid_data in nids_data: nid_data["network_interface"] = ( NetworkInterfaceResource().get_via_uri(nid_data["network_interface"], bundle.request).id ) command_id = JobSchedulerClient.update_nids(nids_data) try: command = Command.objects.get(pk=command_id) except ObjectDoesNotExist: command = None raise custom_response(self, request, http.HttpAccepted, {"command": dehydrate_command(command)})