def obj_update(self, bundle, **kwargs): # FIXME: I'm not exactly sure how cached cached_object_get is -- should # we be explicitly getting a fresh one? I'm just following what the ModelResource # obj_update does - jcs bundle.obj = self.cached_obj_get( bundle, **self.remove_api_resource_names(kwargs)) volume = bundle.data # Check that we're not trying to modify a Volume that is in # used by a target try: Volume.get_unused_luns(Volume.objects).get(id=volume["id"]) except Volume.DoesNotExist: raise AssertionError("Volume %s is in use!" % volume["id"]) lun = get_object_or_404(Volume, id=volume["id"]) node_ids = [node["id"] for node in volume["nodes"]] host_ids = set( lun.volumenode_set.filter(id__in=node_ids).values_list("host_id", flat=True)) # Sanity-check the primary/failover relationships and save if OK if not any( host_ids.issubset(host.id for host in cluster.peers) for cluster in HaCluster.all_clusters()): error_msg = "Attempt to set primary/secondary VolumeNodes across HA clusters for Volume %s:%s\n" % ( lun.id, lun.label, ) error_msg += "\nVolume Node Hosts %s\n" % ", ".join([ str(host) for host in ManagedHost.objects.filter(id__in=host_ids) ]) error_msg += "\nKnown HA Clusters %s\n" % ", ".join([ "(%s)" % ", ".join([str(host) for host in cluster.peers]) for cluster in HaCluster.all_clusters() ]) raise ImmediateHttpResponse(response=HttpBadRequest(error_msg)) # Apply use,primary values from the request for node in volume["nodes"]: lun.volumenode_set.filter(id=node["id"]).update( primary=node["primary"], use=node["use"]) # Clear use, primary on any nodes not in this request lun.volumenode_set.exclude(id__in=node_ids).update(primary=False, use=False) return bundle
def apply_filters(self, request, filters=None): objects = super(VolumeResource, self).apply_filters(request, filters) try: category = request.GET['category'] if not category in ['unused', 'usable', None]: raise ImmediateHttpResponse(response=HttpBadRequest()) if category == 'unused': objects = Volume.get_unused_luns(objects) elif category == 'usable': objects = Volume.get_usable_luns(objects) except KeyError: # Not filtering on category pass try: try: objects = objects.filter( Q(volumenode__primary=request.GET['primary']) & Q(volumenode__host__id=request.GET['host_id']) & Q(volumenode__not_deleted=True)).distinct() except KeyError: # Not filtering on primary, try just host_id objects = objects.filter( Q(volumenode__host__id=request.GET['host_id']) & Q(volumenode__not_deleted=True)).distinct() except KeyError: # Not filtering on host_id pass try: try: fs = ManagedFilesystem.objects.get( pk=request.GET['filesystem_id']) except ManagedFilesystem.DoesNotExist: objects = objects.filter( id=-1 ) # No filesystem so we want to produce an empty list. else: objects = objects.filter(( Q(managedtarget__managedmdt__filesystem=fs) | Q(managedtarget__managedost__filesystem=fs)) | Q(managedtarget__id=fs.mgs.id)) except KeyError: # Not filtering on filesystem_id pass return objects
def alter_list_data_to_serialize(self, request, data): """Impl to pull out the node's primary and use counts to set status Since the query gets this data, it made sense to aggregate it here instead of doing another query to do it for each Volume. This might only be a marginal speed up. """ for vol_bndl in data["objects"]: volumenode_count = len(vol_bndl.data["volume_nodes"]) primary_count = 0 failover_count = 0 for vol_node_bndl in vol_bndl.data["volume_nodes"]: primary = vol_node_bndl.data["primary"] use = vol_node_bndl.data["use"] # True == 1, False = 0 primary_count += int(primary) failover_count += int(not primary and use) vol_bndl.data["status"] = Volume.ha_status_label(volumenode_count, primary_count, failover_count) return data