def save(self, *args, **kwargs): if self.DOI is not None and self.DOI.strip() == "": self.DOI = None if self.private_token is not None and self.private_token.strip() == "": self.private_token = None if self.DOI and not self.private and not self.doi_add_date: self.doi_add_date = datetime.now() # run calculations when collection turns public privacy_changed = False DOI_changed = False if self.pk is not None: old_object = Collection.objects.get(pk=self.pk) old_is_private = old_object.private old_has_DOI = old_object.DOI is not None privacy_changed = old_is_private != self.private DOI_changed = old_has_DOI != (self.DOI is not None) super(Collection, self).save(*args, **kwargs) if (privacy_changed and not self.private) or (DOI_changed and self.DOI is not None): for image in self.basecollectionitem_set.instance_of(Image).all(): if image.pk: generate_glassbrain_image.apply_async([image.pk]) run_voxelwise_pearson_similarity.apply_async([image.pk])
def save(self, *args, **kwargs): if self.DOI is not None and self.DOI.strip() == "": self.DOI = None if self.private_token is not None and self.private_token.strip() == "": self.private_token = None if self.DOI and not self.private and not self.doi_add_date: self.doi_add_date = datetime.now() # run calculations when collection turns public privacy_changed = False if self.pk is not None: old_object = Collection.objects.get(pk=self.pk) old_is_private = old_object.private privacy_changed = old_is_private != self.private super(Collection, self).save(*args, **kwargs) assign_perm('delete_collection', self.owner, self) assign_perm('change_collection', self.owner, self) for image in self.image_set.all(): assign_perm('change_image', self.owner, image) assign_perm('delete_image', self.owner, image) for nidmresult in self.nidmresults_set.all(): assign_perm('change_nidmresults', self.owner, nidmresult) assign_perm('delete_nidmresults', self.owner, nidmresult) if privacy_changed and self.private == False: for image in self.image_set.all(): if image.pk: generate_glassbrain_image.apply_async([image.pk]) run_voxelwise_pearson_similarity.apply_async([image.pk])
def save(self): if self.perc_bad_voxels == None and self.file: import neurovault.apps.statmaps.utils as nvutils self.file.open() gzfileobj = GzipFile(filename=self.file.name, mode='rb', fileobj=self.file.file) nii = nb.Nifti1Image.from_file_map( {'image': nb.FileHolder(self.file.name, gzfileobj)}) self.is_thresholded, ratio_bad = nvutils.is_thresholded(nii) self.perc_bad_voxels = ratio_bad * 100.0 if self.brain_coverage == None and self.file: import neurovault.apps.statmaps.utils as nvutils self.file.open() gzfileobj = GzipFile(filename=self.file.name, mode='rb', fileobj=self.file.file) nii = nb.Nifti1Image.from_file_map( {'image': nb.FileHolder(self.file.name, gzfileobj)}) self.not_mni, self.brain_coverage, self.perc_voxels_outside = nvutils.not_in_mni( nii) if self.map_type == self.OTHER: import neurovault.apps.statmaps.utils as nvutils self.file.open() gzfileobj = GzipFile(filename=self.file.name, mode='rb', fileobj=self.file.file) nii = nb.Nifti1Image.from_file_map( {'image': nb.FileHolder(self.file.name, gzfileobj)}) self.map_type = nvutils.infer_map_type(nii) # Calculation of image reduced_representation and comparisons file_changed = False if self.pk is not None: existing = Image.objects.get(pk=self.pk) if existing.file != self.file: file_changed = True do_update = True if file_changed else False new_image = True if self.pk is None else False # If we have an update, delete old pkl and comparisons first before saving if do_update and self.collection: if self.reduced_representation: # not applicable for private collections self.reduced_representation.delete() # If more than one metric is added to NeuroVault, this must also filter based on metric comparisons = Comparison.objects.filter( Q(image1=self) | Q(image2=self)) if comparisons: comparisons.delete() super(BaseStatisticMap, self).save() # Calculate comparisons if do_update or new_image: run_voxelwise_pearson_similarity.apply_async([self.pk]) self.file.close()
def save(self): if self.perc_bad_voxels == None and self.file: import neurovault.apps.statmaps.utils as nvutils self.file.open() gzfileobj = GzipFile(filename=self.file.name, mode='rb', fileobj=self.file.file) nii = nb.Nifti1Image.from_file_map({'image': nb.FileHolder(self.file.name, gzfileobj)}) self.is_thresholded, ratio_bad = nvutils.is_thresholded(nii) self.perc_bad_voxels = ratio_bad*100.0 if self.brain_coverage == None and self.file: import neurovault.apps.statmaps.utils as nvutils self.file.open() gzfileobj = GzipFile(filename=self.file.name, mode='rb', fileobj=self.file.file) nii = nb.Nifti1Image.from_file_map({'image': nb.FileHolder(self.file.name, gzfileobj)}) self.not_mni, self.brain_coverage, self.perc_voxels_outside = nvutils.not_in_mni(nii) if self.map_type == self.OTHER: import neurovault.apps.statmaps.utils as nvutils self.file.open() gzfileobj = GzipFile(filename=self.file.name, mode='rb', fileobj=self.file.file) nii = nb.Nifti1Image.from_file_map({'image': nb.FileHolder(self.file.name, gzfileobj)}) self.map_type = nvutils.infer_map_type(nii) # Calculation of image reduced_representation and comparisons file_changed = False if self.pk is not None: existing = Image.objects.get(pk=self.pk) if existing.file != self.file: file_changed = True do_update = True if file_changed else False new_image = True if self.pk is None else False # If we have an update, delete old pkl and comparisons first before saving if do_update and self.collection: if self.reduced_representation: # not applicable for private collections self.reduced_representation.delete() # If more than one metric is added to NeuroVault, this must also filter based on metric comparisons = Comparison.objects.filter(Q(image1=self) | Q(image2=self)) if comparisons: comparisons.delete() super(BaseStatisticMap, self).save() # Calculate comparisons if do_update or new_image: run_voxelwise_pearson_similarity.apply_async([self.pk]) self.file.close()
def save(self): if self.DOI is not None and self.DOI.strip() == "": self.DOI = None if self.private_token is not None and self.private_token.strip() == "": self.private_token = None # run calculations when collection turns public privacy_changed = False if self.pk is not None: old_object = Collection.objects.get(pk=self.pk) old_is_private = old_object.private privacy_changed = old_is_private != self.private super(Collection, self).save() if privacy_changed and self.private == False: for image in self.image_set.all(): if image.pk: generate_glassbrain_image.apply_async([image.pk]) run_voxelwise_pearson_similarity.apply_async([image.pk])
django.setup() from neurovault.apps.statmaps.models import Similarity, Comparison, Image, Collection from neurovault.apps.statmaps.tasks import run_voxelwise_pearson_similarity # Images should have the "transform" field after applying migrations (I think) # First create/update the image similarity metric pearson_metric = Similarity.objects.update_or_create( similarity_metric="pearson product-moment correlation coefficient", transformation="voxelwise", metric_ontology_iri= "http://webprotege.stanford.edu/RCS8W76v1MfdvskPLiOdPaA", transformation_ontology_iri= "http://webprotege.stanford.edu/R87C6eFjEftkceScn1GblDL") # Delete all old comparisons all_comparisons = Comparison.objects.all().delete() # Delete all reduced representations for img in Image.objects.all(): img.reduced_representation.delete() # Filter down to images that are not private, not thresholded # Now, we need to generate a "comparison" object for all files in the database # We will use a celery task (as this will be integrated into upload workflow) for collection in Collection.objects.filter(DOI__isnull=False): for image in collection.basecollectionitem_set.instance_of(Image).all(): print "Calculating pearson similarity for images %s" % image run_voxelwise_pearson_similarity.apply_async([image.pk])
import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neurovault.settings") django.setup() from neurovault.apps.statmaps.models import Similarity, Comparison, Image, Collection from neurovault.apps.statmaps.tasks import run_voxelwise_pearson_similarity # Images should have the "transform" field after applying migrations (I think) # First create/update the image similarity metric pearson_metric = Similarity.objects.update_or_create(similarity_metric="pearson product-moment correlation coefficient", transformation="voxelwise", metric_ontology_iri="http://webprotege.stanford.edu/RCS8W76v1MfdvskPLiOdPaA", transformation_ontology_iri="http://webprotege.stanford.edu/R87C6eFjEftkceScn1GblDL") # Delete all old comparisons all_comparisons = Comparison.objects.all().delete() # Delete all reduced representations for img in Image.objects.all(): img.reduced_representation.delete() # Filter down to images that are not private, not thresholded # Now, we need to generate a "comparison" object for all files in the database # We will use a celery task (as this will be integrated into upload workflow) for collection in Collection.objects.filter(DOI__isnull=False): for image in collection.basecollectionitem_set.instance_of(Image).all(): print "Calculating pearson similarity for images %s" %image run_voxelwise_pearson_similarity.apply_async([image.pk])