예제 #1
0
def output_aligned_image(image,
                         center_tck,
                         width_tck,
                         keypoints,
                         output_file,
                         t_out=[0.07, 0.15, 0.5, 0.95]):
    """ Make warped pictures and align the keypoints
    """
    #get the alignments for longitudinal_warp_spline
    t_in = calculate_keypoints(keypoints, center_tck)
    aligned_center, aligned_width = worm_spline.longitudinal_warp_spline(
        t_in, t_out, center_tck, width_tck=width_tck)

    #output the image
    warps = warps = worm_spline.to_worm_frame(image,
                                              aligned_center,
                                              width_tck=aligned_width,
                                              width_margin=0,
                                              standard_width=aligned_width)
    mask = worm_spline.worm_frame_mask(aligned_width, warps.shape)

    #change warps to an 8-bit image
    bit_warp = colorize.scale(warps).astype('uint8')
    #make an rgba image, so that the worm mask is applied
    rgba = np.dstack([bit_warp, bit_warp, bit_warp, mask])
    #save the image
    freeimage.write(rgba, output_file)
예제 #2
0
    def save_annotations(self):
        """Save the pose annotations as pickle files into the parent directory.
        A pickle file is created for each page in the flipbook with the name of the first image in the
        flipbook_page list as the base for the pickle file name.
        """
        for fp in self.ris_widget.flipbook_pages:
            if len(fp) == 0:
                # skip empty flipbook pages
                continue
            annotations = getattr(fp, 'annotations', {})
            pose = annotations.get('pose', (None, None))
            if pose is not None:
                center_tck, width_tck = pose
                if center_tck is not None:
                    path = pathlib.Path(fp[0].name)
                    with path.with_suffix('.pickle').open('wb') as f:
                        pickle.dump(dict(pose=pose), f)

                    # warp and save images from all flipbook pages
                    for lab_frame in fp:
                        lab_frame_image = lab_frame.data
                        path = pathlib.Path(lab_frame.name)
                        warp = worm_spline.to_worm_frame(lab_frame_image, center_tck, width_tck)
                        warp_save_path = path.parent / (path.stem + '-straight.png')
                        freeimage.write(warp, warp_save_path)

                        # If the widths are drawn, then create a mask that allows the user to make an alpha channel later.
                        # We create one mask for each flipbook page, in case the images were saved in different places.
                        # If we wind up redundantly writing the same mask a few times, so be it.
                        if width_tck is not None:
                            mask = worm_spline.worm_frame_mask(width_tck, warp.shape)
                            mask_save_path = path.parent / (path.stem + '-mask.png')
                            freeimage.write(mask, mask_save_path)
예제 #3
0
def get_worm_frame_image(timepoint, downscale=1, image_size=(960, 512)):
    bf = preprocess_image(timepoint, downscale)
    annotations = timepoint.annotations
    center_tck, width_tck = annotations['pose']
    image_shape = (image_size[0] / downscale, image_size[1] / downscale)
    #deal with downscaling
    new_center_tck = (center_tck[0], center_tck[1] / downscale, center_tck[2])
    new_width_tck = (width_tck[0], width_tck[1] / downscale, width_tck[2])
    avg_widths = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1] / downscale,
                  AVG_WIDTHS_TCK[2])

    reflect = False
    """if 'keypoints' in annotations and 'vulva' in annotations['keypoints']:
                    x, y = annotations['keypoints']['vulva']
                    reflect = y < 0"""

    image_width, image_height = image_shape
    worm_frame = worm_spline.to_worm_frame(bf,
                                           new_center_tck,
                                           new_width_tck,
                                           standard_width=avg_widths,
                                           zoom=1,
                                           order=1,
                                           sample_distance=image_height // 2,
                                           standard_length=image_width,
                                           reflect_centerline=reflect)
    mask = worm_spline.worm_frame_mask(avg_widths, worm_frame.shape)
    worm_frame[mask == 0] = 0
    return worm_frame
예제 #4
0
def accuracy(keypoint_maps, out):
    acc = 0
    N,C,H,W = keypoint_maps[0].size()
    s = int(960/H)#get the mask
    widths_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1]/s, AVG_WIDTHS_TCK[2])
    mask = worm_spline.worm_frame_mask(widths_tck, (H, W)) #make worm mask
    mask = mask>0
    print(mask.shape)
    #mask = numpy.array([[mask]*C]*N) #get mask into the same dimension as keypoint should be (N, 1, H, W)

    for sampleIndex in range(len(keypoint_maps[0])):
        kp_map = keypoint_maps[0][sampleIndex].cpu().numpy()
        gt = kp_map[0]
        gt[~mask] = -1 #since we don't care about things outside of the worm pixels, set everything outside to -1
        gt_kp = numpy.unravel_index(numpy.argmax(gt), gt.shape)
        #gt_kp = numpy.where(gt == numpy.max(gt[mask]))

        out_kp_map = out[('Keypoint0',0)][sampleIndex].cpu().detach().numpy()
        pred = out_kp_map[0]
        pred[~mask] = -1 #since we don't care about things outside of the worm pixels, set everything outside to -1
        #out_kp = numpy.where(pred == numpy.max(pred[mask]))
        out_kp = numpy.unravel_index(numpy.argmax(pred), pred.shape)

        #dist = numpy.sqrt((gt_kp[0][0]-out_kp[0][0])**2 + (gt_kp[1][0]-out_kp[1][0])**2)
        dist = numpy.sqrt((gt_kp[0]-out_kp[0])**2 + (gt_kp[1]-out_kp[1])**2)
        print("GT: {}, Out: {}, dist: {:.0f} ".format(gt_kp, out_kp, dist))
        acc += dist
    print("avg acc: ", acc/N)
    return acc
예제 #5
0
 def __init__(self, downscale=2, scale=(0,1,2,3), image_shape=(960,512)):
     super(LossofRegmentation, self).__init__()
     self.scale = scale
     self.reglLoss = nn.L1Loss(reduction='sum')
     #self.segLoss = nn.BCELoss(reduction='sum')
     self.downscale = downscale
     image_size = (int(image_shape[0]/downscale), int(image_shape[1]/downscale))
     widths_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1]/downscale, AVG_WIDTHS_TCK[2])
     mask = worm_spline.worm_frame_mask(widths_tck, image_size) #make worm mask for training
     self.mask = mask
예제 #6
0
def generate_worm_masks(lab_frame_image, center_tck, width_tck):
    #make a lab frame mask
    lab_mask = worm_spline.lab_frame_mask(center_tck, width_tck, lab_frame_image.shape)
    lab_frame_mask = lab_mask>0

    #get worm_frame image/mask
    worm_frame_image = worm_spline.to_worm_frame(lab_frame_image, center_tck, width_tck=width_tck)
    worm_mask = worm_spline.worm_frame_mask(width_tck, worm_frame_image.shape)
    worm_frame_mask = worm_mask>0

    return (lab_frame_mask, worm_frame_image, worm_frame_mask)
예제 #7
0
 def worm_frame_image(self, i, image_shape):
     bf = self.normalized_bf_image(i)
     annotations = self.timepoint_list.timepoint_annotations(i)
     center_tck, width_tck = annotations['pose']
     reflect = False
     if 'keypoints' in annotations and 'vulva' in annotations['keypoints']:
         x, y = annotations['keypoints']['vulva']
         reflect = y > 0
     reflect = False
     image_width, image_height = image_shape
     worm_frame = worm_spline.to_worm_frame(bf, center_tck, width_tck,
         sample_distance=image_height//2, standard_length=image_width, reflect_centerline=reflect)
     mask = worm_spline.worm_frame_mask(width_tck, worm_frame.shape)
     worm_frame[mask == 0] = 0
     return worm_frame
예제 #8
0
def process_reg_output(out, downscale=2):
    #Way to get the keypoint maps and make it into the xy positions
    out_kp_map = out[('Keypoint0', 0)][0].cpu().detach().numpy()
    out_kp_map = out_kp_map[0]
    image_shape = out_kp_map.shape
    widths_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1] / downscale,
                  AVG_WIDTHS_TCK[2])
    mask = worm_spline.worm_frame_mask(widths_tck,
                                       image_shape)  #make worm mask
    mask = mask > 0
    out_kp_map[
        ~mask] = 0  #since we don't care about things outside of the worm pixels, set everything outside to -1
    #out_kp = numpy.where(out_kp_map == numpy.max(out_kp_map[mask]))
    out_kp = numpy.unravel_index(numpy.argmax(out_kp_map), out_kp_map.shape)

    return out_kp
def standardized_worm_frame_image(lab_frame_image, center_tck, width_tck, pixel_height, 
        pixel_width, useMask=False, average_widths_tck=AVG_WIDTHS_TCK):
    WORM_WIDTH= 64
    WORM_PAD = 38

    worm_width_factor = (pixel_height - WORM_PAD) / WORM_WIDTH
    new_avg_width_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1] * worm_width_factor, AVG_WIDTHS_TCK[2])
    worm_frame_image = worm_spline.to_worm_frame(lab_frame_image, center_tck, width_tck, 
                                standard_length=pixel_width, standard_width=new_avg_width_tck)
    
    if useMask:
        mask = worm_spline.worm_frame_mask(new_avg_width_tck, worm_frame_image.shape)
        worm_frame_image[mask < 10] = 0
        return worm_frame_image

    else:
        return worm_frame_image
예제 #10
0
 def __call__(self, timepoint):
     bf = normalized_bf_image(timepoint)
     annotations = timepoint.annotations
     center_tck, width_tck = annotations['pose']
     reflect = False
     if 'keypoints' in annotations and 'vulva' in annotations['keypoints']:
         x, y = annotations['keypoints']['vulva']
         reflect = y < 0
     image_width, image_height = self.image_shape
     worm_frame = worm_spline.to_worm_frame(bf,
                                            center_tck,
                                            width_tck,
                                            sample_distance=image_height //
                                            2,
                                            standard_length=image_width,
                                            reflect_centerline=reflect)
     mask = worm_spline.worm_frame_mask(width_tck, worm_frame.shape)
     worm_frame[mask == 0] = 0
     return worm_frame
예제 #11
0
    def worm_frame_image(self, i):
        downscale = self.downscale
        bf = self.preprocess_image(i)
        annotations = self.timepoint_list[i].annotations
        center_tck, width_tck = annotations['pose']
        image_size = self.image_size

        image_shape = (image_size[0]/downscale, image_size[1]/downscale)
        
        new_center_tck = (center_tck[0], center_tck[1]/downscale, center_tck[2])
        new_width_tck = (width_tck[0], width_tck[1]/downscale, width_tck[2])
        avg_widths = (self.AVG_WIDTHS_TCK[0], self.AVG_WIDTHS_TCK[1]/downscale, self.AVG_WIDTHS_TCK[2])
        
        reflect = False

        image_width, image_height = image_shape
        worm_frame = worm_spline.to_worm_frame(bf, new_center_tck, new_width_tck,
            standard_width=avg_widths, zoom=1, order=1, sample_distance=image_height//2, standard_length=image_width, reflect_centerline=reflect)
        mask = worm_spline.worm_frame_mask(new_width_tck, worm_frame.shape)
        worm_frame[mask == 0] = 0
        return worm_frame
예제 #12
0
def plot_output(imgList, keypoint_maps, out, epoch, phase, save_dir='./'):
    figWinNumHeight, figWinNumWidth, subwinCount = 4, 4, 1
    plt.figure(figsize=(22,20), dpi=88, facecolor='w', edgecolor='k') # figsize -- inch-by-inch
    plt.clf()
    print(imgList.min())
    print(imgList.max())
    acc = 0
    N,C,H,W = keypoint_maps[0].size()
    print(N,C,H,W)
    s = int(960/H)#get the mask
    widths_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1]/s, AVG_WIDTHS_TCK[2])
    mask = worm_spline.worm_frame_mask(widths_tck, (H, W)) #make worm mask
    mask = mask>0


    for sampleIndex in range(min(4, len(imgList))):
        # visualize image
        plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
        subwinCount += 1
        image = imgList[sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))      
        plt.imshow(image[0], cmap='gray')    
        plt.axis('off')
        plt.title('Image of worm')
        
        #keypoint 0
        plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
        subwinCount += 1
        kp_map = keypoint_maps[0][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))
        plt.imshow(kp_map[0], cmap='jet')
        plt.axis('on')
        plt.colorbar()
        plt.title('Keypoint '+str(0)+" GT")
        
        plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
        subwinCount += 1
        kp_map = out[('Keypoint0',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))
        plt.imshow(kp_map[0], cmap='jet')
        plt.axis('on')
        plt.colorbar()
        plt.title('Keypoint '+str(0))
        
        plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
        subwinCount += 1
        kp_map = out[('Keypoint0',0)][sampleIndex].cpu().detach().numpy()
        per90 = numpy.percentile(kp_map[0], 95)
        kp_map[0][~mask] = 0
        
        plt.imshow((kp_map[0]>50).astype(numpy.float32)*1, cmap='jet')
        plt.axis('on')
        plt.colorbar()
        plt.title('Keypoint '+str(0))

        """#Keypoint 1
                        
                                subwinCount+=1
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = keypoint_maps[1][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))
                                plt.imshow(kp_map[0], cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(1)+" GT")
                                
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = out[('Keypoint1',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))
                                plt.imshow(kp_map[0], cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(1))
                                
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = out[('Keypoint1',0)][sampleIndex].cpu().detach().numpy()
                                per90 = numpy.percentile(kp_map[0], 95)
                                
                                plt.imshow((kp_map[0]>per90).astype(numpy.float32)*1, cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(1))
                        
                                #Keypoint 2                                       
                                subwinCount+=1
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = keypoint_maps[2][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))
                                plt.imshow(kp_map[0], cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(2)+" GT")
                                
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = out[('Keypoint2',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))
                                plt.imshow(kp_map[0], cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(2))
                                
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = out[('Keypoint2',0)][sampleIndex].cpu().detach().numpy()
                                per90 = numpy.percentile(kp_map[0], 95)
                                
                                plt.imshow((kp_map[0]>per90).astype(numpy.float32)*1, cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(2))
                        
                                #keypoint 3
                                subwinCount+=1
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = keypoint_maps[3][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))
                                plt.imshow(kp_map[0], cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(3)+" GT")
                                
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = out[('Keypoint3',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))
                                plt.imshow(kp_map[0], cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(3))
                                
                                plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)
                                subwinCount += 1
                                kp_map = out[('Keypoint3',0)][sampleIndex].cpu().detach().numpy()
                                per90 = numpy.percentile(kp_map[0], 95)
                                
                                plt.imshow((kp_map[0]>per90).astype(numpy.float32)*1, cmap='jet')
                                plt.axis('on')
                                plt.colorbar()
                                plt.title('Keypoint '+str(3))"""
                        


    save_path = os.path.join(save_dir, ('epoch '+str(epoch)+' output '+phase+'.png'))
    plt.savefig(save_path)
    plt.close()