コード例 #1
0
    def test_adding_nidm(self):
        Image2 = StatisticMap(name='Image2', collection=self.Collection1, file='beta_0001.nii.gz', map_type="Other")
        Image2.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
        Image2.save()
        
        zip_file = open(os.path.join(self.test_path,'test_data/nidm/spm_example.nidm.zip'), 'rb')
        post_dict = {
            'name': 'spm_nidm',
            'description':'{0} upload test'.format('spm_example'),
            'collection':self.Collection2.pk}
        fname = os.path.basename(os.path.join(self.test_path,'test_data/nidm/spm_example.nidm.zip'))
        file_dict = {'zip_file': SimpleUploadedFile(fname, zip_file.read())}
        zip_file.close()
        form = NIDMResultsForm(post_dict, file_dict)
        # Transforms should be generated synchronously
        nidm = form.save()
        print "\nTesting Counter - added nidm result ###"

        # We should have 2 images total, so 1 comparison
        total_comparisons = count_existing_comparisons(Image2.pk)
        self.assertEqual(total_comparisons,1)
        
        #Let's add a single subject map - this should not trigger a comparison
        Image2ss = StatisticMap(name='Image2 - single subject', collection=self.Collection3, file='beta_0001.nii.gz', map_type="Other", analysis_level='S')
        Image2ss.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
        Image2ss.save()
        total_comparisons = count_existing_comparisons(Image2ss.pk)
        self.assertEqual(total_comparisons,0)

        # Make sure comparisons were calculated
        number_comparisons = len(Comparison.objects.all())
        print "\n %s comparisons exist after adding NIDM `[should not be 0]" %(number_comparisons)
        self.assertEqual(number_comparisons>0,True)
コード例 #2
0
    def test_adding_nidm(self):
        Image2 = StatisticMap(name='Image2',
                              collection=self.Collection1,
                              file='beta_0001.nii.gz',
                              map_type="Other")
        Image2.file = SimpleUploadedFile(
            'beta_0001.nii.gz',
            file(
                os.path.join(self.test_path,
                             'test_data/statmaps/beta_0001.nii.gz')).read())
        Image2.save()

        zip_file = open(
            os.path.join(self.test_path,
                         'test_data/nidm/spm_example.nidm.zip'), 'rb')
        post_dict = {
            'name': 'spm_nidm',
            'description': '{0} upload test'.format('spm_example'),
            'collection': self.Collection2.pk
        }
        fname = os.path.basename(
            os.path.join(self.test_path,
                         'test_data/nidm/spm_example.nidm.zip'))
        file_dict = {'zip_file': SimpleUploadedFile(fname, zip_file.read())}
        zip_file.close()
        form = NIDMResultsForm(post_dict, file_dict)
        # Transforms should be generated synchronously
        nidm = form.save()
        print "\nTesting Counter - added nidm result ###"

        # We should have 2 images total, so 1 comparison
        total_comparisons = count_existing_comparisons(Image2.pk)
        self.assertEqual(total_comparisons, 1)

        #Let's add a single subject map - this should not trigger a comparison
        Image2ss = StatisticMap(name='Image2 - single subject',
                                collection=self.Collection3,
                                file='beta_0001.nii.gz',
                                map_type="Other",
                                analysis_level='S')
        Image2ss.file = SimpleUploadedFile(
            'beta_0001.nii.gz',
            file(
                os.path.join(self.test_path,
                             'test_data/statmaps/beta_0001.nii.gz')).read())
        Image2ss.save()
        total_comparisons = count_existing_comparisons(Image2ss.pk)
        self.assertEqual(total_comparisons, 0)

        # Make sure comparisons were calculated
        number_comparisons = len(Comparison.objects.all())
        print "\n %s comparisons exist after adding NIDM `[should not be 0]" % (
            number_comparisons)
        self.assertEqual(number_comparisons > 0, True)
コード例 #3
0
ファイル: test_counter.py プロジェクト: lhongjum/NeuroVault
    def test_statmaps_processing(self):

        # The counter is the count of the number of images with the field "transform" set to None
        # The field is populated with the file when image comparisons are done, meaning that if there is only one
        # image in the database (case below) we cannot calculate comparisons, and the "transform" field remains none
        # This is currently the only way that we can test the counter, which will be "1" in this case
        print "\nTesting Counter - added statistic maps ###" 
        Image1 = StatisticMap(name='Image1', collection=self.Collection1, file='motor_lips.nii.gz',
                              map_type="Z", analysis_level='G', number_of_subjects=10)
        Image1.file = SimpleUploadedFile('motor_lips.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/motor_lips.nii.gz')).read())
        Image1.save()
        images_processing = count_processing_comparisons(Image1.pk)
        print "%s images processing [should be 0]" %(images_processing)
        self.assertEqual(images_processing,0)

        # When we add an image, the comparison will be calculated with image1, and both images transform fields will be populated
        # the counter will be set to 0.  Celery runs in synchronous mode when testing (meaning that jobs are run locally, one
        # after the other, instead of being sent to worker nodes) so there is no way to test submitting a batch of async
        # jobs and watching the "images still processing" counter go from N to 0. There is also no way of arbitrarily
        # setting an image transform field to "None" because on save, all image comparisons are automatically re-calcualted        
        Image2 = StatisticMap(name='Image2', collection=self.Collection2, file='beta_0001.nii.gz',
                              map_type="Other", analysis_level='G', number_of_subjects=10)
        Image2.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
        Image2.save()
        images_processing = count_processing_comparisons(Image1.pk)
        print "%s images processing [should be 0]" %(images_processing)
        self.assertEqual(images_processing,0)

        # We should have 2 images total, so 1 comparison
        total_comparisons = count_existing_comparisons(Image1.pk)
        self.assertEqual(total_comparisons,1)
コード例 #4
0
    def test_statmaps_processing(self):

        # The counter is the count of the number of images with the field "transform" set to None
        # The field is populated with the file when image comparisons are done, meaning that if there is only one
        # image in the database (case below) we cannot calculate comparisons, and the "transform" field remains none
        # This is currently the only way that we can test the counter, which will be "1" in this case
        print "\nTesting Counter - added statistic maps ###" 
        Image1 = StatisticMap(name='Image1', collection=self.Collection1, file='motor_lips.nii.gz', map_type="Z")
        Image1.file = SimpleUploadedFile('motor_lips.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/motor_lips.nii.gz')).read())
        Image1.save()
        images_processing = count_processing_comparisons(Image1.pk)
        print "%s images processing [should be 0]" %(images_processing)
        self.assertEqual(images_processing,0)

        # When we add an image, the comparison will be calculated with image1, and both images transform fields will be populated
        # the counter will be set to 0.  Celery runs in synchronous mode when testing (meaning that jobs are run locally, one
        # after the other, instead of being sent to worker nodes) so there is no way to test submitting a batch of async
        # jobs and watching the "images still processing" counter go from N to 0. There is also no way of arbitrarily
        # setting an image transform field to "None" because on save, all image comparisons are automatically re-calcualted        
        Image2 = StatisticMap(name='Image2', collection=self.Collection2, file='beta_0001.nii.gz', map_type="Other")
        Image2.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
        Image2.save()
        images_processing = count_processing_comparisons(Image1.pk)
        print "%s images processing [should be 0]" %(images_processing)
        self.assertEqual(images_processing,0)

        # We should have 2 images total, so 1 comparison
        total_comparisons = count_existing_comparisons(Image1.pk)
        self.assertEqual(total_comparisons,1)
コード例 #5
0
ファイル: test_counter.py プロジェクト: vsoch/brainmeta-www
    def test_adding_nidm(self):
        zip_file = open(os.path.join(self.test_path,'test_data/nidm/fsl.nidm.zip'), 'rb')
        post_dict = {
            'name': 'fsl_nidm',
            'description':'{0} upload test'.format('fsl_nidm'),
            'collection':self.Collection1.pk}
        fname = os.path.basename(os.path.join(self.test_path,'test_data/nidm/fsl.nidm.zip'))
        file_dict = {'zip_file': SimpleUploadedFile(fname, zip_file.read())}
        zip_file.close()
        form = NIDMResultsForm(post_dict, file_dict)
        # Transforms should be generated synchronously
        nidm = form.save()
        images_processing = count_processing_comparisons()
        print "\nTesting Counter - added nidm result ###" 
        # And when we count, there should be 0 still processing
        print "%s images processing [should be 0]" %(images_processing)
        assert_equal(images_processing,0)

        # We should have 2 images total, so 1 comparison
        total_comparisons = count_existing_comparisons()
        assert_equal(total_comparisons,1)

        # Make sure comparisons were calculated
        number_comparisons = len(Comparison.objects.all())
        print "\n %s comparisons exist after adding NIDM [should not be 0]" %(number_comparisons)
        assert_equal(number_comparisons>0,True)
コード例 #6
0
    def test_thresholded_image_comparison(self):
        # There should be no comparisons for a thresholded image
        print "testing comparisons for thresholded images"
        assert_equal(0,count_existing_comparisons(self.pk3))

        # There should be no comparisons for an atlas
        print "testing comparisons for atlases"
        assert_equal(0,count_existing_comparisons(self.pk1))

        # There should be no comparisons for statistical map because no other statistical maps
        print "testing comparisons for statistical maps"
        assert_equal(0,count_existing_comparisons(self.pk2))

        # Add another statistical map   
        image_path = os.path.join(self.app_path,'test_data/statmaps/motor_lips.nii.gz')
        image4 = save_statmap_form(image_path=image_path,collection = self.comparisonCollection)
        self.pk4 = image4.id
          
        # There should STILL be no comparisons for a thresholded image
        print "testing comparisons for thresholded images"
        assert_equal(0,count_existing_comparisons(self.pk3))

        # There should STILL be no comparisons for an of the atlas
        print "testing comparisons for atlases"
        assert_equal(0,count_existing_comparisons(self.pk1))

        # There should now be one comparison for each statistical map, two total
        print "testing comparisons for statistical maps"
        print Comparison.objects.all()
        assert_equal(1,count_existing_comparisons(self.pk2))
        assert_equal(1,count_existing_comparisons(self.pk4))
        assert_equal(1,count_existing_comparisons())

        # This is the call that find_similar users to get images
        comparisons =  get_existing_comparisons(self.pk4)
        for comp in comparisons:
            pk1=comp.image1.pk
            pk2=comp.image2.pk          
            im1 = Image.objects.get(pk=pk1)
            im2 = Image.objects.get(pk=pk2)
            assert_equal(im1.is_thresholded,False)
            assert_equal(im2.is_thresholded,False)
コード例 #7
0
import django
import numpy as np
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neurovault.settings")
django.setup()
from neurovault.apps.statmaps.models import Similarity, Comparison, Image
from neurovault.apps.statmaps.utils import count_existing_comparisons, \
    count_processing_comparisons, get_existing_comparisons, count_possible_comparisons


time_log = dict()

# Count existing comparisons
times = []
for iter in range(0,1000):
    start = time.time()
    number_comparisons = count_existing_comparisons(pk1=8)
    end = time.time()
    times.append(end-start)   
time_log["count_existing_comparisons_single"] = np.mean(times)
print "count_existing_comparisons for single image: %s" %(np.mean(times))

times = []
for iter in range(0,1000):
    start = time.time()
    number_comparisons = count_existing_comparisons()
    end = time.time()
    times.append(end-start)   
time_log["count_existing_comparisons_all"] = np.mean(times)
print "count_existing_comparisons for all images: %s" %(np.mean(times))

コード例 #8
0
ファイル: views.py プロジェクト: nagyistge/brainmeta-www
def find_similar(request, pk):
    image1 = get_image(pk, None, request)
    pk = int(pk)

    # Search only enabled if the image is not thresholded
    if image1.is_thresholded == False:

        # Count the number of comparisons that we have to determine max that we can return
        number_comparisons = count_existing_comparisons(pk)

        max_results = 100
        if number_comparisons < 100:
            max_results = number_comparisons

        # Get only # max_results similarity calculations for this image, and ids of other images
        comparisons = get_existing_comparisons(pk).extra(select={
            "abs_score":
            "abs(similarity_score)"
        }).order_by("-abs_score")[0:max_results]  # "-" indicates descending

        images = [image1]
        scores = [1]  # pearsonr
        for comp in comparisons:
            # pick the image we are comparing with
            image = [
                image for image in [comp.image1, comp.image2] if image.id != pk
            ][0]
            if hasattr(image, "map_type") and image.thumbnail:
                images.append(image)
                scores.append(comp.similarity_score)

        # We will need lists of image ids, png paths, query id, query path, tags, names, scores
        image_ids = [image.pk for image in images]
        png_img_paths = [image.get_thumbnail_url() for image in images]
        tags = [[str(image.map_type)] for image in images]

        # The top text will be the collection name, the bottom text the image name
        bottom_text = ["%s" % (image.name) for image in images]
        top_text = ["%s" % (image.collection.name) for image in images]
        compare_url = "/images/compare"  # format will be prefix/[query_id]/[other_id]
        image_url = "/images"  # format will be prefix/[other_id]
        image_title = format_image_collection_names(
            image_name=image1.name,
            collection_name=image1.collection.name,
            map_type=image1.map_type,
            total_length=50)

        # Here is the query image
        query_png = image1.thumbnail.url

        # Do similarity search and return html to put in page, specify 100 max results, take absolute value of scores
        html_snippet = search.similarity_search(image_scores=scores,
                                                tags=tags,
                                                png_paths=png_img_paths,
                                                button_url=compare_url,
                                                image_url=image_url,
                                                query_png=query_png,
                                                query_id=pk,
                                                top_text=top_text,
                                                image_ids=image_ids,
                                                bottom_text=bottom_text,
                                                max_results=max_results,
                                                absolute_value=True)

        html = [h.strip("\n") for h in html_snippet]

        # Get the number of images still processing
        images_processing = count_processing_comparisons(pk)

        context = {
            'html': html,
            'images_processing': images_processing,
            'image_title': image_title,
            'image_url': '/images/%s' % (image1.pk)
        }
        return render(request, 'statmaps/compare_search.html', context)
    else:
        error_message = "Image comparison is not enabled for thresholded images."
        context = {'error_message': error_message}
        return render(request, 'statmaps/error_message.html', context)
コード例 #9
0
ファイル: views.py プロジェクト: vsoch/brainmeta-www
def find_similar(request,pk):
    image1 = get_image(pk,None,request)
    pk = int(pk)

    # Search only enabled if the image is not thresholded
    if image1.is_thresholded == False:

        # Count the number of comparisons that we have to determine max that we can return
        number_comparisons = count_existing_comparisons(pk)

        max_results = 100
        if number_comparisons < 100:
            max_results = number_comparisons

        # Get only # max_results similarity calculations for this image, and ids of other images
        comparisons = get_existing_comparisons(pk).extra(select={"abs_score": "abs(similarity_score)"}).order_by("-abs_score")[0:max_results] # "-" indicates descending

        images = [image1]
        scores = [1] # pearsonr
        for comp in comparisons:
            # pick the image we are comparing with
            image = [image for image in [comp.image1, comp.image2] if image.id != pk][0]
            if hasattr(image, "map_type") and image.thumbnail:
                images.append(image)
                scores.append(comp.similarity_score)
    
        # We will need lists of image ids, png paths, query id, query path, tags, names, scores
        image_ids = [image.pk for image in images]
        png_img_paths = [image.get_thumbnail_url() for image in images]
        tags = [[str(image.map_type)] for image in images]
    
        # The top text will be the collection name, the bottom text the image name
        bottom_text = ["%s" % (image.name) for image in images]
        top_text = ["%s" % (image.collection.name) for image in images]
        compare_url = "/images/compare"  # format will be prefix/[query_id]/[other_id]
        image_url = "/images"  # format will be prefix/[other_id]
        image_title = format_image_collection_names(image_name=image1.name,
                                                    collection_name=image1.collection.name,
                                                    map_type=image1.map_type,total_length=50)
    
        # Here is the query image
        query_png = image1.thumbnail.url

        # Do similarity search and return html to put in page, specify 100 max results, take absolute value of scores
        html_snippet = search.similarity_search(image_scores=scores,tags=tags,png_paths=png_img_paths,
                                    button_url=compare_url,image_url=image_url,query_png=query_png,
                                    query_id=pk,top_text=top_text,image_ids=image_ids,
                                    bottom_text=bottom_text,max_results=max_results,absolute_value=True)

        html = [h.strip("\n") for h in html_snippet]
    
        # Get the number of images still processing
        images_processing = count_processing_comparisons(pk)

        context = {'html': html,'images_processing':images_processing,
                   'image_title':image_title, 'image_url': '/images/%s' % (image1.pk) }
        return render(request, 'statmaps/compare_search.html', context)
    else:
        error_message = "Image comparison is not enabled for thresholded images." 
        context = {'error_message': error_message}
        return render(request, 'statmaps/error_message.html', context)
コード例 #10
0
import time
import django
import numpy as np
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neurovault.settings")
django.setup()
from neurovault.apps.statmaps.models import Similarity, Comparison, Image
from neurovault.apps.statmaps.utils import count_existing_comparisons, \
    count_processing_comparisons, get_existing_comparisons, count_possible_comparisons

time_log = dict()

# Count existing comparisons
times = []
for iter in range(0, 1000):
    start = time.time()
    number_comparisons = count_existing_comparisons(pk1=8)
    end = time.time()
    times.append(end - start)
time_log["count_existing_comparisons_single"] = np.mean(times)
print "count_existing_comparisons for single image: %s" % (np.mean(times))

times = []
for iter in range(0, 1000):
    start = time.time()
    number_comparisons = count_existing_comparisons()
    end = time.time()
    times.append(end - start)
time_log["count_existing_comparisons_all"] = np.mean(times)
print "count_existing_comparisons for all images: %s" % (np.mean(times))

# Get existing comparisons