Example #1
0
    def compare_with(self, image2, downsample_ratio = 35.0):
        ret = {'scale': float(downsample_ratio), 'offsets': [0, 0]}

        digipal_images = [self, Image.from_digipal_image(image2)]
        
        # Get PIL images of the thumbnails
        ims = []
        for image in digipal_images:
            ims.append(image.get_pil_img(1.0/downsample_ratio, cache=True, grey=True))

        # Make sure the largest image is ims[0]
        sgn = -1
        reverse_order = False
        if ims[0].size[0] < ims[1].size[0]:
            sgn = 1
            ims = ims[::-1]
            digipal_images = digipal_images[::-1]
            reverse_order = True
        
        # -------------------
        # Step 1: Find the approximate crop offset using thumbnails 
        # -------------------
        offsets = self.find_offsets_from_pil_images(ims[0], ims[1])
        
        if offsets is None:
            return ret

        # convert the offset from the thumbnail size to the full size
        ret['offsets'] = [offset * downsample_ratio * sgn for offset in offsets]
        
        return ret
Example #2
0
 def replace_image_and_update_annotations(self, offsets, new_image):
     ret = True
     
     old_image_size = self.get_img_size()
     new_image = Image.from_digipal_image(new_image)
     new_image_size = new_image.get_img_size()
     
     self.iipimage = new_image.iipimage
     self.save()
     
     annotations = self.annotation_set.filter().distinct()
     if annotations.count():
         for annotation in annotations:
             annotation.geo_json = self.get_uncropped_geo_json(annotation, offsets, new_image_size, old_image_size)
             annotation.save()
     
     return ret
Example #3
0
    def replace_image_and_update_annotations(self, offsets, new_image):
        ret = True

        old_image_size = self.get_img_size()
        new_image = Image.from_digipal_image(new_image)
        new_image_size = new_image.get_img_size()

        self.iipimage = new_image.iipimage
        self.save()

        annotations = self.annotation_set.filter().distinct()
        if annotations.count():
            for annotation in annotations:
                print 'translate annotation #%s' % annotation.id
                annotation.geo_json = self.get_uncropped_geo_json(
                    annotation, offsets, new_image_size, old_image_size)
                annotation.save()

        return ret
Example #4
0
    def compare_with(self, image2, downsample_ratio=35.0):
        ret = {'scale': float(downsample_ratio), 'offsets': [0, 0]}

        digipal_images = [self, Image.from_digipal_image(image2)]

        # Get PIL images of the thumbnails
        ims = []
        for image in digipal_images:
            ims.append(
                image.get_pil_img(1.0 / downsample_ratio,
                                  cache=True,
                                  grey=True))

        # Make sure the largest image is ims[0]
        sgn = -1
        reverse_order = False
        if ims[0].size[0] < ims[1].size[0]:
            sgn = 1
            ims = ims[::-1]
            digipal_images = digipal_images[::-1]
            reverse_order = True

        # -------------------
        # Step 1: Find the approximate crop offset using thumbnails
        # -------------------
        offsets = self.find_offsets_from_pil_images(ims[0], ims[1])

        if offsets is None:
            return ret

        # convert the offset from the thumbnail size to the full size
        ret['offsets'] = [
            offset * downsample_ratio * sgn for offset in offsets
        ]

        return ret
Example #5
0
    def find_image_offset(self, image2, downsample_ratio = 35.0):
        '''    
            Find and returns the crop offset and two sample annotations.
            
            return = {'offsets': [x,y], 'annotations': [a1, a2]}
            (x, y), how much do we have to move image1 to match image2
            a1 is the URL of one annotation, a2 the URL of the matching 
                annotation in the other image
            
            The offset is found using this tow-steps method:
            
            1. obtains a thumbnail of the images and use PIL to 
                scan all possible offsets and return the best match.
                Note that this is an approximate offset due to
                the downsampling made for creating the thumbnails. 
                
            2. the approximate is then refined by applying the same
                PIL matching operation on one annotation cutout from
                one image and the region where we expect to find it
                in the other image. 
                
                The smaller the thumbnail in step 1, the larger the
                search region in this step. 
                
            downsample_ratio specifies how small the thumbnail is compared
            to the original. Value above 35 may cause problems with our 
            images as the approximation in the first stage will be excessive.
        '''
        
        ret = {'offsets': [0,0], 'annotations': []}
        
        if self.annotation_set.count() + image2.annotation_set.count() == 0:
            return ret

        digipal_images = [self, Image.from_digipal_image(image2)]
        
        # Get PIL images of the thumbnails
        ims = []
        for image in digipal_images:
            ims.append(image.get_pil_img(1.0/downsample_ratio, cache=True, grey=True))

        # Make sure the largest image is ims[0]
        reverse_order = False
        if ims[0].size[0] < ims[1].size[0]:
            ims = ims[::-1]
            digipal_images = digipal_images[::-1]
            reverse_order = True
        
        # -------------------
        # Step 1: Find the approximate crop offset using thumbnails 
        # -------------------
        best = self.find_offsets_from_pil_images(ims[0], ims[1])
        
        if best is None:
            return ret

        # -------------------
        # Step 2: Compare an annotation from one image with the containing region in the other image
        # -------------------
        
        # convert the offset from the thumbnail size to the full size
        ret['offsets'] = [best[0] * downsample_ratio, best[1] * downsample_ratio]
        
        # get one annotation (ann)
        for annotation_image_index in range(0, 2):
            anns = digipal_images[annotation_image_index].annotation_set.all().order_by('?')
            #anns = digipal_images[annotation_image_index].annotation_set.all().order_by('id')
            if anns.count():
                ann = anns[0]
                break
        
        # Calculate the search region (region_search):
        # start from the coordinates of the annotation in the first image (region)
        region = self.get_annotation_coordinates(ann, digipal_images[annotation_image_index])
        region_search = []
        sign = 1
        if annotation_image_index == 0:
            sign = -1
        for pair in region:
            region_search.append([pair[0] + ret['offsets'][0] * sign, pair[1] + ret['offsets'][1] * sign])
        
        # extend the search region by downsample_ratio/2 pixels in every direction 
        # (due to approximation of the first step - every pixel in thumbnail = 
        # downsample_ratio pixels in the original)
        #
        # Now add a bit more to that because of the rounding errors when asking for the 
        # thumbnail dimensions and other possible source of perturbation.
        # We take 50% (* 1.5) safety margin more on each side
        #
        safety_margin = 2.2
        search_margin = int((downsample_ratio / 2.0) * safety_margin + 0.5)
        region_search = [
                            [region_search[0][0] - search_margin, region_search[0][1] - search_margin], 
                            [region_search[1][0] + search_margin, region_search[1][1] + search_margin], 
                        ]
        
        # iip img server only accepts relative coordinates, so convert them into relatives
        region_search_relative = digipal_images[1-annotation_image_index].get_relative_coordinates(region_search)
        
        # Get the two images (annotation and search region) as PIL objects
        ann_im = self.get_pil_img_from_url(ann.get_cutout_url(False, True), grey=True, cache=True)
        rgn_im = digipal_images[1-annotation_image_index].get_pil_img( 
                            query='&RGN=%1.6f,%1.6f,%1.6f,%1.6f&QLT=100&CVT=JPG' % (region_search_relative[0][0], region_search_relative[0][1], region_search_relative[1][0], region_search_relative[1][1])
                            , grey=True
                            , cache=True
                        )
        
        # Find the offsets by systematically looking for a match of the annotation within 
        # the search region
        offsets = self.find_offsets_from_pil_images(rgn_im, ann_im)
        
        # Now add local offset to the global estimate
        ret['offsets'] = [offsets[0] + ret['offsets'][0] - search_margin, offsets[1] + ret['offsets'][1] - search_margin]
        
        # Get the URL of the reference annotation and the matching one in the other image 
        ret_relative = digipal_images[1-annotation_image_index].get_relative_coordinates([[region[0][0] + ret['offsets'][0], region[0][1] + ret['offsets'][1]], [region[1][0] + ret['offsets'][0], region[1][1] + ret['offsets'][1]]])
        ann_img_2_url = digipal_images[1-annotation_image_index].iipimage.full_base_url + '&RGN=%1.6f,%1.6f,%1.6f,%1.6f&QLT=100&CVT=JPG' % (ret_relative[0][0], ret_relative[0][1], ret_relative[1][0], ret_relative[1][1])

        ret['annotations'] = [ann.get_cutout_url(False, True), ann_img_2_url]
        
        # Adjust the crop signs
        if not reverse_order:
            ret['offsets'] = [-ret['offsets'][0], -ret['offsets'][1]]


        return ret