Example #1
0
def find_local_minima(path_out, path_ref, h_min, mask=None, sigma=2):
    ''' Find local minima in an intensity image
    path_out : path to the output seeds image
    path_ref : path to the reference intensity image
    h_min : value of the h-minima operator value
    mask : mask on the intensity image
    sigma : value of the gaussian filter in voxels
    '''
    from os import path
    path_mask_out=path_out.replace('.inr','_mask_'+str(h_min)+'.inr')
    tmp_min=path_out.replace('.inr','_local_minima_out.inr')
    tmp_filt=path_out.replace('.inr','_local_minima_filter'+str(sigma)+'.inr') 
    if not path.exists(tmp_filt) and mask==None:
        recfilter(path_ref, tmp_filt, filter_value=sigma, lazy=True)
    if mask==None:
        os.system(path_regional_max + ' ' + tmp_filt + ' ' +\
                  ' -diff ' + path_mask_out + ' ' +\
                  tmp_min + ' ' +\
                  '-h ' + str(h_min) + ' ' +\
                  '-inv')
    else:
        os.system(path_regional_max + ' ' + mask + ' ' +
                  '-diff ' + path_mask_out + ' ' +
                  tmp_min + ' ' +
                  '-h ' + str(h_min))
    os.system(path_connexe + ' ' + path_mask_out + ' ' +
              path_out + ' ' +
              '-sb 1 -sh ' + str(h_min) +
              ' -labels -o 2')
    try:
        im=imread(path_out.replace('\\', ''))
    except:
        im=None
    os.system('rm -f '+tmp_filt+' '+tmp_min);
    return im, path_mask_out
Example #2
0
def watershed(path_seeds, path_int, path_output=None, lazy=True):
    ''' Perform the watershed operation
    path_seeds : path to the seeds image
    path_int : path to the intensity image
    path_output : path to the output image
    lazy : do not return the output image if True
    '''    
    if type(path_seeds)!=str:
        imsave("seeds.inr", path_seeds)
        path_seeds = "seeds.inr"
    if type(path_int)!=str:
        imsave("intensity.inr", path_int)
        path_int = "intensity.inr"

    if path_output is None:
        lazy = False
        path_output = 'seg.inr'
 
    os.system(path_watershed + ' ' + path_seeds +\
              ' ' + path_int +\
              ' ' + path_output \
              )
    if not lazy:
        out=imread(path_output)
        os.system('rm seeds.inr intensity.inr seg.inr')
        return out
Example #3
0
def croping(image_input, impage_output, downsize):
    ''' Automatically crop and resample an images
    image_input : path to the input image
    image_output : path to the output image
    downsize : voxel size of the resampled image (in \mu m)
    '''
    shape_begin = imread(image_input).shape  #Image Size
    image_main = reech3d(image_input,
                         (shape_begin[0] / np.float(downsize), shape_begin[1] /
                          np.float(downsize), shape_begin[2]))  #Downsampling
    vxsize = image_main.resolution
    im_maxed = image_main.max(axis=2)
    thr = np.mean(im_maxed)
    im_th = np.zeros((im_maxed.shape[0], im_maxed.shape[1], 1), dtype=np.uint8)
    im_th[im_maxed > thr] = 1
    comp_co = nd.label(im_th)[0]
    volumes = compute_volumes(comp_co)
    volumes.pop(0)
    label = volumes.keys()[np.argmax(volumes.values())]
    bb = nd.find_objects(comp_co)[label - 1]
    bb2 = (slice(max(bb[0].start - 40, 0),
                 min(image_main.shape[0], bb[0].stop + 40), None),
           slice(max(bb[1].start - 40, 0),
                 min(image_main.shape[1], bb[1].stop + 40),
                 None), slice(0, image_main.shape[2]))
    out = SpatialImage(image_main[bb2])
    out.voxelsize = (float("{0:.1f}".format(image_main.resolution[0])),
                     float("{0:.1f}".format(image_main.resolution[1])),
                     float("{0:.1f}".format(image_main.resolution[2])))
    imsave(impage_output, out.astype(np.uint16))
Example #4
0
def reech3d(im_path, output_shape):  
    ''' Perform a resampling operation
    im_path : path to the image to resample
    output_shape : desired output shape
    '''
    tmp_file=im_path.replace('.inr','_temp.inr')
    os.system(path_reech3d + ' '+im_path+'  '+tmp_file+
              ' -x ' + str(int(output_shape[0])) +
              ' -y ' + str(int(output_shape[1])) +
              ' -z ' + str(int(output_shape[2])))
    out = imread(tmp_file)
    os.system('rm '+tmp_file)
    return  out 
Example #5
0
def recfilter(path_input, path_output='tmp.inr', filter_value=2, lazy=False):
    ''' Perform a gaussian filtering on an intensity image
    path_input : path to the image to filter
    path_output : path to the temporary output image
    filter_value : sigma of the gaussian filter
    lazy : do not return the output image if True
    '''
    os.system(path_filters + ' ' + path_input +\
              ' ' + path_output +\
              ' -cont 10 -sigma ' + str(filter_value) +\
              ' -x 0 -y 0 -z 0 -o 2')
    if not lazy:
        out = imread(path_output)
        os.system('rm ' + path_output)
        return out  
Example #6
0
def outer_detection(im_ref_tmp, radius, seg_ref_tmp):
    ''' Compute the detection of the outer of the embryo
    im_ref_tmp : intensity image for the outer detection (SpatialImage)
    radius : radius of the grey closing to perform
    seg_ref_tmp : segmented reference image (SpatialImage)
    '''
    from copy import deepcopy
    if radius!='0':
        imsave("tmp_bounds.inr", im_ref_tmp)
        os.system(path_morpho + " tmp_bounds.inr closed.inr -clo -R " + radius)
        im=imread("closed.inr")
    else:
        im=deepcopy(im_ref_tmp)
    imax=np.max(im)
    h=np.histogram(im, range=(0, imax), bins=imax)
    cumhist=np.cumsum(h[0][::-1]),h[1][::-1]
    vol=np.sum(seg_ref_tmp!=1)#*1.10
    low=np.max(cumhist[1][cumhist[0]>vol])
    im_th=np.zeros_like(im)
    #im=imread("closed.inr")
    im_th[im>=low]=1 # Cytoplasm
    if radius!='0':
        imsave("tmp.inr", SpatialImage(im_th))
        os.system((path_morpho + " tmp.inr closing.inr -clo -R " + radius))
    else:
        imsave("closing.inr", SpatialImage(im_th))
    os.system((path_morpho + " closing.inr erode.inr -ero -R 5"))
    imE=imread("closing.inr")
    imE=nd.binary_fill_holes(imE)
    mask=np.uint8(imE)
    bounds=nd.binary_dilation(mask, structure=nd.generate_binary_structure(3, 1))-mask
    im_refB=im_ref_tmp.copy()
    im_refB[bounds.astype(np.bool)]=np.max(im_ref_tmp)
    imsave('tmp.inr', SpatialImage(im_refB))
    os.system(path_filters + " tmp.inr out_bounds.inr -x 0 -y 0 -z 0 -sigma 1 -o 2")
    return imread('out_bounds.inr'), bounds.astype(np.bool)
Example #7
0
def perform_ac(parameters):
    """
    Return the shape resulting of morphosnake operation on image I using image S as an initialisation
    m : label of the cell to work on
    daughters : list of the daughters of cell m (to keep track when working in parallel)
    bb : bounding boxe of m
    I : intensity image to perform active contours on (SpatialImage)
    S : segmented image to perform active contours from (SpatialImage, must contain the label m)
    """

    m, daughters, bb, I, S, MorphosnakeIterations, NIterations, DeltaVoxels = parameters
    import os
    from scipy import ndimage as nd
    import morphsnakes
    cell_num = m
    Sb = nd.binary_erosion(S != cell_num,
                           iterations=MorphosnakeIterations,
                           border_value=1)  #[:,:,sl]
    image_input = 'tmp_' + str(cell_num) + '.inr'
    gradient_output = 'tmp_out_' + str(cell_num) + '.inr'
    imsave(image_input, I)
    gradient_norm(image_input, gradient_output)
    gI = imread(gradient_output)
    os.system('rm -f ' + image_input + ' ' + gradient_output)
    gI = 1. / np.sqrt(1 + 100 * gI)

    macwe = morphsnakes.MorphGAC(gI, smoothing=3, threshold=1, balloon=1)
    macwe.levelset = Sb
    bef = np.ones_like(Sb)
    from copy import deepcopy
    for i in xrange(NIterations):
        beff = deepcopy(bef)
        bef = deepcopy(macwe.levelset)
        macwe.step()
        if np.sum(bef != macwe.levelset) < DeltaVoxels or np.sum(
                beff != macwe.levelset) < DeltaVoxels:
            break
    out = macwe.levelset
    tmp = nd.binary_fill_holes(out)
    cell_out = (out.astype(np.bool) ^ tmp)
    return m, daughters, bb, cell_out
Example #8
0
def apply_trsf(path_flo, path_trsf, path_output="tmp_seeds.inr", 
               template=None, nearest=True, lazy=True):
    ''' Apply a transformation to a given image
    path_flo : path to the floating image
    path_trsf : path to the transformation
    path_output : path to the output image
    template : path to the template image
    nearest : do not interpolate (take the nearest value) if True, to use when applying on label images
    '''
    command_line = path_apply_trsf + " " + path_flo + " " + path_output 
    command_line += " -trsf " + path_trsf
    if not template is None:
        command_line += " -template " + template
    if not nearest is None:
        command_line += " -nearest"
    os.system(command_line)
    if not lazy:
        out=imread(path_output)
        if path_output=='tmp_seeds.inr':
            os.system('rm -f tmp_seeds.inr')
        return out
Example #9
0
def extract_seeds(seg_c,
                  c,
                  path_seeds_not_prop=None,
                  bb=None,
                  accept_3_seeds=False):
    """
    Return the seeds from seeds_not_prop stricly included in cell c from seg_c (the labels of the seeds go from 1 to 3)
    seg_c : segmented image (SpatialImage)
    c : label of the cell to process
    seeds_not_prop : image of seeds (can be the path to the image or the SpatialImage)
    bb : if seeds_not_prop is a path then bb is the bounding box of c in seeds_not_prop
    accept_3_seeds : True if 3 seeds can be accepted as a possible choice
    """
    if type(path_seeds_not_prop) != SpatialImage:
        seeds_not_prop_out = imread(path_seeds_not_prop)
        seeds_not_prop = seeds_not_prop_out[bb]
    else:  ## Then path_seeds_not_prop is the actual image we want to work with
        from copy import deepcopy
        seeds_not_prop = deepcopy(path_seeds_not_prop)
    labels = list(np.unique(seeds_not_prop[seg_c == c]))
    labels.remove(0)
    final_labels = []
    for l in labels:
        if (seg_c[seeds_not_prop == l] == c).all():
            final_labels.append(l)
    if len(final_labels) == 1:
        return (1, (seeds_not_prop == final_labels[0]).astype(np.uint8))
    elif len(final_labels) == 2:
        return (2, ((seeds_not_prop == final_labels[0]) + 2 *
                    (seeds_not_prop == final_labels[1])).astype(np.uint8))
    elif len(
            final_labels
    ) == 3 and not accept_3_seeds:  # "too much seeds in the second extraction"
        return (3, ((seeds_not_prop == final_labels[0]) + 2 *
                    (seeds_not_prop == final_labels[1])).astype(np.uint8))
    elif len(final_labels) == 3 and accept_3_seeds:  #"accept 3 seeds !"
        return (3, ((seeds_not_prop == final_labels[0]) + 2 *
                    (seeds_not_prop == final_labels[1]) + 3 *
                    (seeds_not_prop == final_labels[2])).astype(np.uint8))
Example #10
0
def outer_correction(seg_from_opt_h,
                     exterior_correction,
                     segmentation_file_ref,
                     RadiusOpening=20):
    """
    Return an eroded segmentation correcting for potential errors in the morphsnake
    seg_from_opt_h : segmentated image (SpatialImage)
    exterior_correction : list of cells that have been corrected using morphsnake algorithm
    """
    if exterior_correction != []:
        image_input = segmentation_file_ref.replace('.inr',
                                                    '.seg_from_opt_h.inr')
        imsave(image_input, SpatialImage(seg_from_opt_h != 1).astype(np.uint8))
        image_output = segmentation_file_ref.replace('.inr', '.seg_out_h.inr')
        morpho(image_input, image_output, ' -ope -R ' + str(RadiusOpening))
        opened = imread(image_output)
        cells_to_correct = [i for j in exterior_correction for i in j[1]]
        os.system('rm -f ' + image_input + ' ' + image_output)
        to_remove = opened ^ (seg_from_opt_h > 1)

        for c in cells_to_correct:
            seg_from_opt_h[((seg_from_opt_h == c) & to_remove).astype(
                np.bool)] = 1
    return seg_from_opt_h
Example #11
0
def fusion_process(time_angles_files,
                   output_file,
                   temporary_path,
                   ori,
                   resolution,
                   target_resolution,
                   delay,
                   ext_im,
                   mirrors=False):
    """Compute the fusions of a given time-series of raw data
    time_angles_files : ??
    output_file : ??
    image_output : name of the fused image
    temporary_path : path where the temporary images will be written
    ori : orientation of the rotation between the consecutive raw images
    resolution : resolution of the raw images in $\mu m$
    target_resolution : resolution of the final fused image in $\mu m$
    delay : time to add in the name of the fused files if the movie have been splited
    ext_im : extension of the image ('.inr', '.tiff', '.tif', '.h5')
    mirrors : if True a mirror was used to correct the X-Y transformation"""

    tozip = False  #Zip files ?
    if ext_im.lower() == '.zip':
        tozip = True

    start_process = time_estimation()
    output_path = output_file[:output_file.rfind('/')]
    os.system('mkdir -p ' + output_path)  #Create output folder

    os.system("rm -rf " +
              temporary_path)  #Delte the temporary folder if previous created
    os.system('mkdir -p ' + temporary_path)  #Create temporary folder
    downsize = target_resolution / resolution[0]  #Downsampling

    ### Pre-treatment (Unzip if necessary and conver in inr format )
    angle_paths = [
        temporary_path + 'ANGLE_' + str(a) + '/'
        for a in range(len(time_angles_files))
    ]
    [os.system('mkdir -p ' + angle_path)
     for angle_path in angle_paths]  #Create Angle Temporary Directory

    inr_files = []
    for time_angle_file, angle_path in zip(time_angles_files, angle_paths):
        if tozip:  #Unzip if necessary
            image_file = time_angle_file[time_angle_file.rfind('/') + 1:]
            print 'Unzip ' + time_angle_file
            f = os.listdir(angle_path)
            os.system('unzip ' + time_angle_file + ' -d ' + angle_path)
            [
                image_file for image_file in os.listdir(angle_path)
                if image_file not in f
            ]
            ext_im = image_file[image_file.find('.'):]
            image_file = angle_path + image_file
        else:
            image_file = time_angle_file
        print 'Convert in inr ' + image_file
        im = imread(image_file)
        im.resolution = resolution
        image_file = image_file[image_file.rfind('/') + 1:]
        inr_file = angle_path + image_file.replace(ext_im, '.inr').replace(
            '\\', '')
        imsave(inr_file, im)
        inr_files.append(inr_file)

    ### Croping process
    cropped_files = [
        inr_file.replace('.inr', '_cropp.inr') for inr_file in inr_files
    ]
    for inr_file, cropped_file in zip(inr_files, cropped_files):
        print 'Crop ' + inr_file
        croping(inr_file, cropped_file, downsize)

    ### Fusion process
    print ' Fusion  on ' + str(cropped_files)
    fusion(cropped_files, output_file, temporary_path, ori, mirrors)
    print 'Fusion done in ' + output_file

    return time_estimation() - start_process
Example #12
0
def fusion(images_input, image_output, temporary_path, ori, mirrors=False):
    """Compute the fusion of a set of raw data at a given time point
    images_input : list of raw images
    image_output : name of the fused image
    temporary_path : path where the temporary images will be written
    ori : orientation of the rotation between the consecutive raw images
    mirrors : if True a mirror was used to correct the X-Y transformation"""

    # References Images
    references_files = [
        image_input.replace('.inr', '_ref.inr') for image_input in images_input
    ]
    im_flos_t0 = [imread(image_input) for image_input in images_input]
    im_ref_t0 = im_flos_t0[0]
    #The First angle is the starting reference point
    voxelsize = im_ref_t0.voxelsize
    print voxelsize
    im_ref_t0.resolution = voxelsize
    imsave(references_files[0],
           im_ref_t0)  #Save the first angle as a reference
    im_flos_t0 = im_flos_t0[1:len(im_flos_t0)]

    for i in range(len(im_flos_t0)):
        if i % 2 == 0:
            if mirrors:
                im = SpatialImage(im_flos_t0[i])
            else:
                im = SpatialImage(im_flos_t0[i].copy())[-1::-1, :, :]
        else:
            im = SpatialImage(im_flos_t0[i])
        im.voxelsize = voxelsize
        imsave(references_files[i + 1], im)

    #Rotation Matrix
    rotation_files = [
        image_input.replace('.inr', '_rot.txt') for image_input in images_input
    ]
    if ori == 'left':
        a = 270.
    else:
        a = 90.
    for i, im in enumerate(im_flos_t0):
        if i == 0:
            angle = 0.
        else:
            angle = a
        print 'Angle used :' + str(angle) + ' for ' + rotation_files[i + 1]
        rot = axis_rotation_matrix(axis="Y",
                                   angle=angle,
                                   min_space=(0, 0, 0),
                                   max_space=np.multiply(
                                       im.shape[:3], im.resolution))
        np.savetxt(rotation_files[i + 1], rot)

    registration_files = [
        image_input.replace('.inr', '_reg.inr') for image_input in images_input
    ]
    print 'Linear Reech in ' + registration_files[0]
    reech(references_files[0], registration_files[0], voxelsize[0])
    #imsave((registration_files[0]).replace('.inr','.tiff'),imread(registration_files[0])) #TEMPORARY

    trsf_files = [
        image_input.replace('.inr', '.trsf') for image_input in images_input
    ]
    for i in range(1, len(images_input)):
        print 'Linear Registration in ' + registration_files[i]
        linear_registration(registration_files[0], references_files[i],
                            rotation_files[i], registration_files[i],
                            trsf_files[i])
        #imsave((registration_files[i]).replace('.inr','.tiff'),imread(registration_files[i])) #TEMPORARY

    #Mask
    mask_files = [
        image_input.replace('.inr', '_mask.inr')
        for image_input in images_input
    ]
    mask = build_mask(im_ref_t0, True)
    mask.resolution = voxelsize
    temporary_mask = mask_files[0].replace('.inr', '_temp.inr')
    imsave(temporary_mask, mask)
    #imsave(temporary_mask.replace('.inr','.tiff'), mask) #TEMPORARY
    reech(temporary_mask, mask_files[0], voxelsize[0])
    full_mask = imread(mask_files[0])
    #imsave((mask_files[0]).replace('.inr','.tiff'),full_mask) #TEMPORARY
    for i in range(1, len(images_input)):
        print ' Calcul Mask in ' + mask_files[i]
        im = imread(references_files[i])
        if i % 2 == 1:
            direction = False
        else:
            direction = True
        mask = build_mask(im, direction)
        mask.resolution = voxelsize
        temporary_mask = mask_files[i].replace('.inr', '_temp.inr')
        imsave(temporary_mask, mask)
        del mask
        apply_trsf(temporary_mask, trsf_files[i], mask_files[i],
                   registration_files[0])
        full_mask += imread(mask_files[i])
        #imsave((mask_files[i]).replace('.inr','.tiff'),imread(mask_files[i])) #TEMPORARY

    final = np.zeros_like(full_mask)
    final = final.astype(np.uint16)
    for i in range(len(images_input)):
        final += (imread(registration_files[i]) *
                  imread(mask_files[i])) / full_mask

    im_th = np.zeros((final.shape[0], final.shape[1], 1), dtype=np.uint16)
    im_max = np.max(final, axis=2)
    th = np.mean(im_max)
    im_th[im_max > th] = 1
    comp_co = nd.label(im_th)[0]
    volumes = compute_volumes(comp_co)
    volumes.pop(0)
    label = volumes.keys()[np.argmax(volumes.values())]
    bb = nd.find_objects(comp_co)[label - 1]
    bb2 = (slice(max(bb[0].start - 40, 0), min(final.shape[0],
                                               bb[0].stop + 40), None),
           slice(max(bb[1].start - 40, 0), min(final.shape[1],
                                               bb[1].stop + 40),
                 None), slice(0, final.shape[2]))
    imsave(image_output, SpatialImage(final[bb2]))
Example #13
0
def CalculAnimalPole(A,B,segmentation_corrected_files,naming,lin_tree):
    next=[A, B]
    #print next
    im=imread(timeNamed(segmentation_corrected_files,str(A/10**4)))
    time_barycenter={}
    time_barycenter[A/10**4]=np.mean(np.where((im==A%10**4) | (im==B%10**4)), axis=1)
    del im
    treated=set(next)
    # barycenter naming
    while next!=[]:
        prev_A=naming[next[0]]
        prev_B=naming[next[1]]
        nA=lin_tree.get(next[0], [])
        nB=lin_tree.get(next[1], [])
        print ' Process from '+str(prev_A)+ ' and ' + str(prev_B)
        treated=treated.union(set(next))
        if len(nA)==1 and len(nB)==1:
            naming[nA[0]] = naming[next[0]]
            naming[nB[0]] = naming[next[1]]
            im=imread(timeNamed(segmentation_corrected_files,str(nA[0]/10**4)))
            time_barycenter[nA[0]/10**4]=np.mean(np.where((im==nA[0]%10**4) | (im==nB[0]%10**4)), axis=1)
            print 'at '+str(nA[0]/10**4)+' ->' +str(time_barycenter[nA[0]/10**4])
            del im
            next=[nA[0], nB[0]]
        elif len(nA)==1 and len(nB)==2:
            #Recupere la plus proche du barycentre
            naming[nA[0]]=naming[next[0]]
            nB, time_barycenter[nA[0]/10**4]=find_neighbors(nB[0], nB[1], nA[0],
                imread(timeNamed(segmentation_corrected_files,str(nA[0]/10**4))))
            for i, c in enumerate(nB):
                naming[c] = 'b' + str(int(prev_B.split('.')[0][1:])+1) + '.' + \
                    '%04d'%(int(prev_B.split('.')[1][:-1])*2-1+i) + '*'
            print 'at '+str(nA[0]/10**4)+' ->' +str(time_barycenter[nA[0]/10**4])
            next=[nA[0], nB[0]]

        elif len(nB)==1 and len(nA)==2:
            naming[nB[0]]=naming[next[1]]
            nA, time_barycenter[nA[0]/10**4]=find_neighbors(nA[0], nA[1], nB[0],
                imread(timeNamed(segmentation_corrected_files,str(nA[0]/10**4))))
            for i, c in enumerate(nA):
                naming[c] = 'a' + str(int(prev_A.split('.')[0][1:])+1) + '.' + \
                    '%04d'%(int(prev_A.split('.')[1][:-1])*2-1+i) + '_'
            print 'at '+str(nA[0]/10**4)+' ->' +str(time_barycenter[nA[0]/10**4])
            next=[nA[0], nB[0]]

        elif len(nB)==len(nA)==2:
            next=[]
            order, time_barycenter[nA[0]/10**4]=order_2_cells_dividing(nA, nB,
                imread(timeNamed(segmentation_corrected_files,str(nA[0]/10**4))))
            for i, c in enumerate(order):
                if i%2:
                    prev=prev_B
                else:
                    prev=prev_A
                naming[c] = prev[0] + str(int(prev.split('.')[0][1:])+1) + '.' + \
                    '%04d'%(int(prev.split('.')[1][:-1])*2-1+i) + prev[-1]
            print 'at '+str(nA[0]/10**4)+' ->' +str(time_barycenter[nA[0]/10**4])
            next=[order[0], order[1]]

        else:
            print "Finish "
            next=[]


    return time_barycenter,treated
Example #14
0
def volume_checking(t,
                    delta_t,
                    seg,
                    seeds_from_opt_h,
                    seg_from_opt_h,
                    corres,
                    divided_cells,
                    bounding_boxes,
                    right_parameters,
                    im_ref,
                    im_ref16,
                    seeds,
                    nb_cells,
                    label_max,
                    exterior_corres,
                    parameters,
                    h_min_information,
                    sigma_information,
                    seg_origin,
                    segmentation_file_ref,
                    vf_file,
                    path_h_min,
                    volumes_t_1,
                    nb_proc=26,
                    Thau=25,
                    MinVolume=1000,
                    VolumeRatioBigger=0.5,
                    VolumeRatioSmaller=0.1,
                    MorphosnakeIterations=10,
                    NIterations=200,
                    DeltaVoxels=10**3):
    """
    Return corrected final segmentation based on conservation of volume in time
    seg : propagated segmentation (seg at t deformed on t+dt) (SpatialImage)
    seeds_from_opt_h : optimized seeds (SpatialImage)
    seg_from_opt_h : segmented image from seeds_from_opt_h (SpatialImage)
    corres : mapping of cells at time t to cells at t+dt in seg_from_opt_h
    divided_cells : list of cells that have divided between t and t+dt
    bounding_boxes : bounding boxes of the cells in seg (to fasten the computation)
    right_parameters : list of parameters used to create seeds_from_opt_h
    im_ref : image to segment at time t+dt 8 bits (SpatialImage)
    im_ref16 : image to segment at time t+dt in 16 bits (SpatialImage)
    seeds : Propagated seeds from segmentation at time t
    nb_cells : { cell: [#seeds, ] }: dict, key: cell, values: list of #seeds
    label_max : maximum label in seg_from_opt_h
    exterior_corres : list of cells that have been corrected for issue in exterior
    parameters : { cell: [[h_min, sigma], ]}: dict matching nb_cells, key: cell, values: list of parameters
    h_min_information : { cell: h_min}: dict associating to each cells the h_min that allowed its segmentation
    sigma_information : { cell: sigma}: dict associating to each cells the sigma that allowed its segmentation
    seg_origin : original segmentation (SpatialImage)
    segmentation_file_ref : path to the segmentation at time t
    vf_file : path to the vector field that register t into t+dt
    path_h_min : format of h-minima files
    volumes_t_1 : cell volumes at t
    """

    volumes_from_opt_h = compute_volumes(seg_from_opt_h)
    if volumes_t_1 == {}:
        volumes = compute_volumes(seg_origin)
    else:
        volumes = volumes_t_1

    bigger = []
    lower = []
    to_look_at = []
    too_little = []
    for mother_c, sisters_c in corres.iteritems():
        if mother_c != 1:
            volume_ratio = 1 - (volumes[mother_c] / np.sum(
                [volumes_from_opt_h.get(s, 1) for s in sisters_c]))
            if not (-VolumeRatioSmaller < volume_ratio < VolumeRatioSmaller):
                if (volume_ratio > 0) and (volumes_from_opt_h.get(s, 1) != 1):
                    bigger.append((mother_c, sisters_c))
                elif volumes_from_opt_h.get(s, 1) != 1:
                    lower.append((mother_c, sisters_c))
                if volume_ratio < -VolumeRatioBigger:
                    to_look_at.append(mother_c)
            else:
                for s in sisters_c:
                    if volumes_from_opt_h[s] < MinVolume:
                        too_little.append((mother_c, s))

    to_fuse_3 = []
    change_happen = False
    for c in to_look_at:
        s = nb_cells[c]
        nb_2 = np.sum(np.array(s) == 2)
        nb_3 = np.sum(np.array(s) >= 2)
        score = nb_2 * nb_3
        if (s.count(1) or s.count(2)) != 0:
            if score >= Thau:
                h, sigma = parameters[c][np.where(np.array(s) == 2)[0][-1]]
                nb_final = 2
            elif s.count(1) != 0:
                h, sigma = parameters[c][np.where(np.array(s) == 1)[0][-1]]
                nb_final = 1
            else:
                h, sigma = parameters[c][np.where(np.array(s) == 2)[0][-1]]
                nb_final = 2
            right_parameters[c] = [h, sigma, nb_final]
            if nb_final == 1 and s.count(2) != 0:
                h, sigma = parameters[c][s.index(2)]
                path_seeds_not_prop = path_h_min.replace('$HMIN',
                                                         str(h)).replace(
                                                             '$SIGMA',
                                                             str(sigma))
                bb = slices_dilation(bounding_boxes[c],
                                     maximum=seg.shape,
                                     iterations=2)
                seg_c = np.ones_like(seg[bb])
                seg_c[seg[bb] == c] = c
                nb, seeds_c = extract_seeds(seg_c, c, path_seeds_not_prop, bb)
                if nb == 2 and (seg_from_opt_h[bb][seeds_c != 0] == 0).any(
                ):  #If we can found 2 seeds and one is not in new calculated cell
                    change_happen = True
                    seeds_from_opt_h[seeds_from_opt_h == corres[c][0]] = 0
                    corres[c] = [label_max, label_max + 1]
                    divided_cells.append((label_max, label_max + 1))
                    seeds_from_opt_h[bb][seeds_c == 1] = label_max
                    h_min_information[(t + delta_t) * 10**4 + label_max] = h
                    sigma_information[(t + delta_t) * 10**4 +
                                      label_max] = sigma
                    label_max += 1
                    seeds_from_opt_h[bb][seeds_c == 2] = label_max
                    h_min_information[(t + delta_t) * 10**4 + label_max] = h
                    sigma_information[(t + delta_t) * 10**4 +
                                      label_max] = sigma
                    label_max += 1
            if (nb_final == 1 or nb_final == 2) and (np.array(s) > 2).any():
                h, sigma = parameters[c][-1]
                path_seeds_not_prop = path_h_min.replace('$HMIN',
                                                         str(h)).replace(
                                                             '$SIGMA',
                                                             str(sigma))
                seeds_image = imread(path_seeds_not_prop)
                bb = slices_dilation(bounding_boxes[c],
                                     maximum=seg.shape,
                                     iterations=2)
                seg_c = np.zeros_like(seg_from_opt_h[bb])
                for daughter in corres[c]:
                    seg_c[seg_from_opt_h[bb] == daughter] = 1
                seeds_c = np.zeros_like(seg_from_opt_h[bb])
                seeds_c[(seg_c == 1) & (seeds_image[bb] != 0)] = 1
                seeds_c[(seg[bb] == c) & (seg_c != 1) &
                        (seeds_image[bb] != 0)] = 2
                if 2 in seeds_c:
                    change_happen = True
                    for daughter in corres[c]:
                        seeds_from_opt_h[seeds_from_opt_h == daughter] = 0
                    corres[c] = [label_max, label_max + 1]
                    divided_cells.append((label_max, label_max + 1))
                    seeds_from_opt_h[bb][seeds_c == 1] = label_max
                    h_min_information[(t + delta_t) * 10**4 + label_max] = h
                    sigma_information[(t + delta_t) * 10**4 +
                                      label_max] = sigma
                    label_max += 1
                    seeds_from_opt_h[bb][seeds_c == 2] = label_max
                    h_min_information[(t + delta_t) * 10**4 + label_max] = h
                    sigma_information[(t + delta_t) * 10**4 +
                                      label_max] = sigma
                    label_max += 1
            elif nb_final == 1:
                change_happen = True
                seeds_from_opt_h[seeds_from_opt_h == corres[c][0]] = 0
                seeds_from_opt_h[seeds == c] = corres[c][0]
                label_max += 1
        elif s.count(3) != 0:
            h, sigma = parameters[c][s.index(3)]
            path_seeds_not_prop = path_h_min.replace('$HMIN', str(h)).replace(
                '$SIGMA', str(sigma))
            bb = slices_dilation(bounding_boxes[c],
                                 maximum=seg.shape,
                                 iterations=2)
            seg_c = np.ones_like(seg[bb])
            seg_c[seg[bb] == c] = c
            nb, seeds_c = extract_seeds(seg_c,
                                        c,
                                        path_seeds_not_prop,
                                        bb,
                                        accept_3_seeds=True)
            change_happen = True
            seeds_from_opt_h[seeds_from_opt_h == corres[c]] = 0
            divided_cells.append((label_max, label_max + 1))
            seeds_from_opt_h[bb][seeds_c == 1] = label_max
            h_min_information[(t + delta_t) * 10**4 + label_max] = h
            sigma_information[(t + delta_t) * 10**4 + label_max] = sigma
            label_max += 1
            seeds_from_opt_h[bb][seeds_c == 2] = label_max
            h_min_information[(t + delta_t) * 10**4 + label_max] = h
            sigma_information[(t + delta_t) * 10**4 + label_max] = sigma
            label_max += 1
            seeds_from_opt_h[bb][seeds_c == 3] = label_max
            h_min_information[(t + delta_t) * 10**4 + label_max] = h
            sigma_information[(t + delta_t) * 10**4 + label_max] = sigma
            label_max += 1
            to_fuse_3.append(
                [c, (label_max - 1, label_max - 2, label_max - 3)])

    if too_little != []:
        for c in too_little:
            #for d in corres[c]:
            seeds_from_opt_h[seeds_from_opt_h == c[1]] = 0
            tmp = corres[c[0]]
            tmp.remove(c[1])
            if tmp == []:
                corres.pop(c[0])
            else:
                corres[c[0]] = tmp
            change_happen = True

    if change_happen:
        seg_from_opt_h = watershed(SpatialImage(seeds_from_opt_h), im_ref)
        for l in exterior_corres:
            seg_from_opt_h[seg_from_opt_h == l] = 1

        volumes_from_opt_h = compute_volumes(seg_from_opt_h)

    lower = []
    for mother_c, sisters_c in corres.iteritems():
        if mother_c != 1:
            volume_ratio = 1 - (volumes[mother_c] / np.sum(
                [volumes_from_opt_h.get(s, 1) for s in sisters_c]))
            if not (-.1 < volume_ratio < .1):
                if (volume_ratio < 0) and volumes_from_opt_h.get(s, 1) != 1:
                    lower.append((mother_c, sisters_c))

    exterior_correction = []
    if lower != []:
        from copy import deepcopy
        tmp = apply_trsf(segmentation_file_ref,
                         vf_file,
                         nearest=True,
                         lazy=False)
        old_bb = nd.find_objects(tmp)
        for mother_c, sisters_c in lower:
            cell_before = tmp[old_bb[mother_c - 1]] == mother_c
            cell_after = np.zeros_like(cell_before)
            for c in sisters_c:
                cell_after += seg_from_opt_h[old_bb[mother_c - 1]] == c
            lost = seg_from_opt_h[old_bb[mother_c - 1]][cell_after
                                                        ^ cell_before]
            max_share = 0
            share_lab = 0
            size = {}
            for v in np.unique(lost):
                size[v] = np.sum(lost == v)
                if np.sum(lost == v) > max_share:
                    max_share = np.sum(lost == v)
                    share_lab = v
            if share_lab == 1 and 1 in tmp[old_bb[mother_c - 1]]:
                exterior_correction.append((mother_c, sisters_c))
        from multiprocessing import Pool
        pool = Pool(processes=nb_proc)
        mapping = []
        for m, daughters in exterior_correction:
            bb = slices_dilation(old_bb[m - 1],
                                 maximum=im_ref.shape,
                                 iterations=15)
            im_ref_tmp = deepcopy(im_ref16[bb])
            seg_ref_tmp = deepcopy(tmp[bb])
            mapping.append((m, daughters, bb, im_ref_tmp, seg_ref_tmp,
                            MorphosnakeIterations, NIterations, DeltaVoxels))
        outputs = pool.map(perform_ac, mapping)
        pool.close()
        pool.terminate()
        for m, daughters, bb, cell_out in outputs:
            seg_from_opt_h[bb][seg_from_opt_h[bb] == 1
                               & cell_out] = daughters[0]
            if len(daughters) == 2:
                seg_from_opt_h[bb][seg_from_opt_h[bb] ==
                                   daughters[1]] = daughters[0]
                if tuple(daughters) in divided_cells:
                    divided_cells.remove(tuple(daughters))
            corres[m] = [daughters[0]]
    for c, tf in to_fuse_3:
        bb = slices_dilation(bounding_boxes[c],
                             maximum=seg.shape,
                             iterations=2)
        seg_c = np.ones_like(seg_from_opt_h[bb])
        seg_c[seg_from_opt_h[bb] == tf[0]] = tf[0]
        seg_c[seg_from_opt_h[bb] == tf[1]] = tf[1]
        seg_c[seg_from_opt_h[bb] == tf[2]] = tf[2]
        v1 = np.sum(seg_c == tf[0])
        v2 = np.sum(seg_c == tf[1])
        v3 = np.sum(seg_c == tf[2])
        vol_cells_to_f = [v1, v2, v3]
        cell_to_f = np.argmin(vol_cells_to_f)
        tmp = nd.binary_dilation(seg_c == tf[cell_to_f])
        p1 = tf[np.argsort(vol_cells_to_f)[1]]
        p2 = tf[np.argsort(vol_cells_to_f)[2]]
        im_tmp = np.zeros_like(seg_c)
        im_tmp[seg_c == p1] = p1
        im_tmp[seg_c == p2] = p2
        im_tmp[tmp == False] = 0
        p1_share = np.sum(im_tmp == p1)
        p2_share = np.sum(im_tmp == p2)
        if p1_share > p2_share:
            seg_from_opt_h[seg_from_opt_h == tf[cell_to_f]] = p1
        else:
            seg_from_opt_h[seg_from_opt_h == tf[cell_to_f]] = p2
        corres[c] = [p1, p2]
        divided_cells.append((p1, p2))

    return seg_from_opt_h, bigger, lower, to_look_at, too_little, corres, exterior_correction
Example #15
0
def get_seeds_from_optimized_parameters(t,
                                        seg,
                                        cells,
                                        cells_with_no_seed,
                                        right_parameters,
                                        delta_t,
                                        bounding_boxes,
                                        im_ref,
                                        seeds,
                                        parameters,
                                        h_min_max,
                                        path_h_min,
                                        sigma,
                                        Volum_Min_No_Seed=100):
    """
    Return the seed image from the locally parametrized h-minima operator
    t : time
    seg : propagated segmentation (seg at t deformed on t+dt)
    cells : list of cells in seg
    cells_with_no_seed : list of cells with no correct parameters
    right_parameters : dict of the correct parameters for every cells
    delta_t : dt
    bounding_boxes : bounding boxes of the cells in seg (to fasten the computation)
    im_ref : Intensity image at time t+dt (on which to permorm the watershed)
    seeds : Propagated seeds from segmentation at time t (when no correct parameters were found)
    parameters : ?
    h_min_max : starting maximum value of h_min
    sigma : sigma of the gaussian smoothing (in voxels)
    path_h_min : format of h minima file names
    """
    seeds_from_opt_h = np.zeros_like(seg, dtype=np.uint16)
    label_max = 2
    corres = {}
    divided_cells = []
    h_min_information = {}
    sigma_information = {}
    sigma_done = []
    h_min_done = []
    seeds_images = {}
    for c in cells:
        if c in cells_with_no_seed:
            continue
        if not seeds_images.has_key(
            (right_parameters[c][0], right_parameters[c][1])):
            path_seeds_not_prop = path_h_min.replace(
                '$HMIN', str(right_parameters[c][0])).replace(
                    '$SIGMA', str(right_parameters[c][1]))
            seeds_images[(
                right_parameters[c][0],
                right_parameters[c][1])] = imread(path_seeds_not_prop)
        bb = slices_dilation(bounding_boxes[c],
                             maximum=seg.shape,
                             iterations=2)
        seg_c = np.ones_like(seg[bb])
        seg_c[seg[bb] == c] = c
        seeds_ex = seeds_images[(right_parameters[c][0],
                                 right_parameters[c][1])][bb]
        nb, seeds_c = extract_seeds(seg_c, c, seeds_ex)
        if nb == 1:
            corres[c] = [label_max]
            h_min_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][0]
            sigma_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][1]
            seeds_from_opt_h[bb] += seeds_c * label_max
            label_max += 1
        elif nb == 2:
            corres[c] = [label_max, label_max + 1]
            divided_cells.append((label_max, label_max + 1))
            seeds_from_opt_h[bb][seeds_c == 1] = label_max
            h_min_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][0]
            sigma_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][1]
            label_max += 1
            seeds_from_opt_h[bb][seeds_c == 2] = label_max
            h_min_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][0]
            sigma_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][1]
            label_max += 1
        elif nb == 3:
            corres[c] = [label_max, label_max + 1]
            divided_cells.append((label_max, label_max + 1))
            seeds_from_opt_h[bb][seeds_c == 1] = label_max
            h_min_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][0]
            sigma_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][1]
            label_max += 1
            seeds_from_opt_h[bb][seeds_c == 2] = label_max
            h_min_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][0]
            sigma_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][1]
            label_max += 1

#create Background seed
    c = 1
    seg_c = np.ones_like(seg)
    seg_c[seg != c] = 0
    sigma_out = sigma
    key_min = (h_min_max, sigma_out)
    for k in seeds_images.iterkeys():
        if k[0] < key_min[0]:
            key_min = k

    seeds_not_prop = seeds_images[key_min]
    parameters = (seg_c, c, seeds_not_prop)
    seg_c_p, nb, labels, c = cell_propagation(parameters)
    corres[1] = []
    exterior_corres = []
    for l in labels:
        seeds_from_opt_h = seeds_from_opt_h.astype(np.uint16)
        exterior_corres.append(label_max)
        seeds_from_opt_h[seeds_not_prop == l] = label_max
        label_max += 1

    for c in cells_with_no_seed:
        if np.sum(seg == c) > Volum_Min_No_Seed:
            seeds_from_opt_h[seeds == c] = label_max
            h_min_information[(t + delta_t) * 10**4 +
                              label_max] = right_parameters[c][0]
            corres[c] = [label_max]
            label_max += 1

    seg_from_opt_h = watershed(SpatialImage(seeds_from_opt_h), im_ref)
    for l in exterior_corres:
        seg_from_opt_h[seg_from_opt_h == l] = 1
    corres[1] = [1]

    return seeds_from_opt_h, seg_from_opt_h, corres, exterior_corres, h_min_information, sigma_information, divided_cells, label_max
Example #16
0
def get_seeds(seg,
              h_min_min,
              h_min_max,
              sigma,
              cells,
              fused_file,
              path_h_min,
              bounding_boxes,
              nb_proc=26):
    """
    Return the number of seeds found for each cell in seg for different h_min values (from h_min_max down to 1)
    seg : Segmented image (SpatialImage)
    h_min_max : starting maximum value of h_min
    sigma : sigma of the gaussian smoothing (in voxels)
    cells : cells contained in seg
    fused_file : path (?) towards the fused image on which to perform the local minima detection
    path_h_min : format of h minima file names
    bounding_boxes : bounding boxes of the cells in seg (to fasten the computation)
    """
    from multiprocessing import Pool
    nb_cells = {}
    treated = []
    parameters = {}
    mask = None
    temp_path_h_min = path_h_min.replace('$HMIN', str(h_min_max))
    if not os.path.exists(temp_path_h_min):
        seeds_not_prop, mask = find_local_minima(temp_path_h_min,
                                                 fused_file,
                                                 h_min_max,
                                                 sigma=sigma)
    else:
        seeds_not_prop = imread(temp_path_h_min)

    h_min = h_min_max
    tmp_nb = []
    checking = True
    while (checking):
        mapping = []
        tmp_nb = []
        for c in cells:
            if not c in treated:
                bb = slices_dilation(bounding_boxes[c],
                                     maximum=seg.shape,
                                     iterations=2)
                seg_c = np.ones_like(seg[bb])
                seg_c[seg[bb] == c] = c
                mapping.append((seg_c, c, seeds_not_prop[bb]))

        pool = Pool(processes=nb_proc)
        outputs = pool.map(cell_propagation, mapping)
        pool.close()
        pool.terminate()
        for seg_c_p, nb, labels, c in outputs:
            tmp_nb.append(nb)
            nb_cells.setdefault(c, []).append(nb)
            parameters.setdefault(c, []).append([h_min, sigma])

        h_min -= 2
        checking = h_min >= h_min_min and (((np.array(tmp_nb) <= 2) &
                                            (np.array(tmp_nb) != 0)).any()
                                           or tmp_nb == [])
        if checking:
            temp_path_h_min = path_h_min.replace('$HMIN', str(h_min))
            if not os.path.exists(temp_path_h_min):
                seeds_not_prop, mask = find_local_minima(temp_path_h_min,
                                                         fused_file,
                                                         h_min,
                                                         mask=mask,
                                                         sigma=sigma)
            else:
                seeds_not_prop = imread(temp_path_h_min)
            if seeds_not_prop is None:
                checking = False
    return nb_cells, parameters
Example #17
0
def mars_segmentation(image_input,
                      segmentation_output,
                      sigma,
                      h_min,
                      sigma_ws,
                      th=0):
    """ Perform the watershed segmentation of a given intensity image
    image_input : path to the input image
    segmentation_output : path to the final segmetented image
    sigma : value of the gaussian filter on the intensity image used for the h-minima operation (in voxels)
    h_min : value of the h-minima operation parameter (in bit)
    cleaning_needed : True if a removing of the cells touching the border of the image is needed
    sigma_ws : value of the gaussian filter on the intensity image used for the watershed (in voxel)
    th:Threshold for output cleaning
    """
    os.system('mkdir -p ' +
              segmentation_output[:segmentation_output.rfind('/')])
    print 'Process Segmentation of ' + image_input
    if image_input.split('.')[-1] == 'tif' or image_input.split(
            '.')[-1] == 'tiff':
        print ' Convert in inr ' + image_input
        image_input_inr = ''
        for n in image_input.split('.')[:-1]:
            image_input_inr += n
        image_input_inr += '.inr'
        imsave(image_input_inr, imread(image_input))
        image_input = image_input_inr

    #### Definition of paths to the different outputs ####
    path_gSigma = segmentation_output.replace(
        '.inr', 'g' + str(sigma) +
        '.inr')  # Path to the smoothed image for the local minima detection
    path_g_5 = segmentation_output.replace(
        '.inr', 'g' + str(sigma_ws) +
        '.inr')  # Path to the smoothed image for the watershed
    path_rm = segmentation_output.replace(
        '.inr', 'rm_s' + str(sigma) + '_h' + str(h_min) +
        '.inr')  # Path to the regionalmax image
    path_cc = segmentation_output.replace('.inr', 'c_s' + str(sigma) + '_h' +
                                          str(h_min) +
                                          '.inr')  # Path to the seeds image

    print 'Filter with sigma=' + str(sigma) + ' in ' + path_gSigma
    recfilter(image_input, path_gSigma, sigma, lazy=True)

    print 'Filter with sigma=' + str(sigma_ws) + ' in ' + path_g_5
    recfilter(image_input, path_g_5, sigma_ws, lazy=True)

    print 'Find Local minnima with h_min=' + str(h_min) + ' in ' + path_rm
    regionalmax(path_gSigma, path_rm, h_min)

    print 'Find connex composant in ' + path_cc
    connexe(path_rm, path_cc, h_min)

    print 'Process watershed segmentation in ' + segmentation_output
    watershed(path_cc, path_g_5, segmentation_output)

    #Delete Temporary files
    os.system('rm ' + path_gSigma + ' ' + path_g_5 + ' ' + path_rm + ' ' +
              path_cc)

    print 'Segmentation done'
Example #18
0
def segmentation_propagation(t,
                             fused_file_ref,
                             segmentation_file_ref,
                             fused_file,
                             seeds_file_ref,
                             vf_file,
                             path_h_min,
                             h_min_min,
                             h_min_max,
                             sigma,
                             lin_tree_information,
                             delta_t,
                             nb_proc,
                             RadiusOpening=20,
                             Thau=25,
                             MinVolume=1000,
                             VolumeRatioBigger=0.5,
                             VolumeRatioSmaller=0.1,
                             MorphosnakeIterations=10,
                             NIterations=200,
                             DeltaVoxels=10**3,
                             Volum_Min_No_Seed=100):
    """
    Return the propagated segmentation at time t+dt and the updated lineage tree and cell informations
    t : time t
    fused_file_ref : path format to fused images
    segmentation_file_ref : path format to segmentated images
    fused_file : fused image at t+dt
    vf_file : path format to transformation
    path_h_min : path format to h-minima files
    h_min_max : maximum value of the h-min value for h-minima operator
    sigma : sigma value in voxels for gaussian filtering
    lin_tree_information : dictionary containing the lineage tree dictionary, volume information, h_min information and sigma information for every cells
    delta_t : value of dt (in number of time point)
    nb_proc : number maximum of processors to allocate
    """
    from copy import deepcopy

    lin_tree = lin_tree_information.get('lin_tree', {})
    tmp = lin_tree_information.get('volumes_information', {})
    volumes_t_1 = {k % 10**4: v for k, v in tmp.iteritems() if k / 10**4 == t}
    h_min_information = {}

    segmentation_ref = imread(segmentation_file_ref)

    print 'Calcul Vector Fields from ' + str(t) + ' to ' + str(t + delta_t)
    non_linear_registration(fused_file_ref,\
                        fused_file, \
                        vf_file.replace('.inr','_affine.inr'), \
                        vf_file.replace('.inr','_affine.trsf'),\
                        vf_file.replace('.inr','_vector.inr'),\
                        vf_file)
    os.system('rm -f ' + vf_file.replace('.inr', '_affine.inr') + ' ' +
              vf_file.replace('.inr', '_affine.trsf') + ' ' +
              vf_file.replace('.inr', '_vector.inrf'))

    print 'Create The Seeds from ' + str(t)
    seeds_ref = create_seeds(segmentation_ref, max_size_cell=np.inf)
    imsave(seeds_file_ref, SpatialImage(seeds_ref))

    print 'Deform Seeds with vector fields from ' + str(t) + ' to ' + str(
        t + delta_t)
    seeds = apply_trsf(seeds_file_ref,
                       vf_file,
                       template=fused_file,
                       nearest=True,
                       lazy=False)

    print 'Perform watershed with these transformed seeds'
    im_fused = imread(fused_file)
    im_fused_16 = deepcopy(im_fused)
    im_fused = to_u8(im_fused)

    segmentation = watershed(seeds, im_fused)
    cells = list(np.unique(segmentation))
    cells.remove(1)
    bounding_boxes = dict(
        zip(range(1,
                  max(cells) + 1), nd.find_objects(segmentation)))
    treated = []

    print 'Estimation of the local h-minimas at ' + str(t + delta_t)
    nb_cells, parameters = get_seeds(segmentation,
                                     h_min_min,
                                     h_min_max,
                                     sigma,
                                     cells,
                                     fused_file,
                                     path_h_min.replace('$SIGMA', str(sigma)),
                                     bounding_boxes,
                                     nb_proc=nb_proc)

    right_parameters, cells_with_no_seed = get_back_parameters(nb_cells,
                                                               parameters,
                                                               lin_tree,
                                                               cells,
                                                               Thau=Thau)

    print 'Applying volume correction ' + str(t + delta_t)
    seeds_from_opt_h, seg_from_opt_h, corres, exterior_corres, h_min_information, sigma_information, divided_cells, label_max = get_seeds_from_optimized_parameters(
        t,
        segmentation,
        cells,
        cells_with_no_seed,
        right_parameters,
        delta_t,
        bounding_boxes,
        im_fused,
        seeds,
        parameters,
        h_min_max,
        path_h_min,
        sigma,
        Volum_Min_No_Seed=Volum_Min_No_Seed)

    seg_from_opt_h, bigger, lower, to_look_at, too_little, corres, exterior_correction = volume_checking(
        t,
        delta_t,
        segmentation,
        seeds_from_opt_h,
        seg_from_opt_h,
        corres,
        divided_cells,
        bounding_boxes,
        right_parameters,
        im_fused,
        im_fused_16,
        seeds,
        nb_cells,
        label_max,
        exterior_corres,
        parameters,
        h_min_information,
        sigma_information,
        segmentation_ref,
        segmentation_file_ref,
        vf_file,
        path_h_min,
        volumes_t_1,
        nb_proc=nb_proc,
        Thau=Thau,
        MinVolume=MinVolume,
        VolumeRatioBigger=VolumeRatioBigger,
        VolumeRatioSmaller=VolumeRatioSmaller,
        MorphosnakeIterations=MorphosnakeIterations,
        NIterations=NIterations,
        DeltaVoxels=DeltaVoxels)

    seg_from_opt_h = outer_correction(seg_from_opt_h,
                                      exterior_correction,
                                      segmentation_file_ref,
                                      RadiusOpening=RadiusOpening)

    volumes = compute_volumes(seg_from_opt_h)
    volumes_information = {}
    for k, v in volumes.iteritems():
        volumes_information[(t + delta_t) * 10**4 + k] = v
    for m, d in corres.iteritems():
        if m != 1:
            daughters = []
            for c in d:
                if c in volumes:
                    daughters.append(c + (t + delta_t) * 10**4)
                else:
                    print str(c) + ' is not segmented'
            if len(daughters) > 0:
                lin_tree[m + t * 10**4] = daughters
    lin_tree_information['lin_tree'] = lin_tree
    lin_tree_information.setdefault('volumes_information',
                                    {}).update(volumes_information)
    lin_tree_information.setdefault('h_mins_information',
                                    {}).update(h_min_information)
    lin_tree_information.setdefault('sigmas_information',
                                    {}).update(sigma_information)

    return seg_from_opt_h, lin_tree_information
Example #19
0
def name_propagation(lin_tree, starting_name, segmentation_corrected_files, begin,end, step):
    """
    Return the propagated names of each cell in the lineage tree using Conklin rule (eulerian distand, not geodesic)
    lin_tree : lineage tree
    starting_name : manually processed starting names ({cell id : name}) the names has to be this format : 'a7.xxxx*' or 'a7.xxxx_'
    path : path toward the segmentated image (the names of the segmented images has to be fw_seg_t002_corrected.inr, TO CHANGE)
    end : ending time point
    step : dt value
    """
    print 'Process Name Propagation'
    from copy import copy
    inv_lin_tree={ d : m for m, daughters in lin_tree.iteritems() for d in daughters }
    naming={}
    for k, v in starting_name.iteritems():
        naming[k]=v
 

    stage=naming.values()[0].split('.')[0][1:]
    #First we calcul the Animale Pole to orient named cell division 
    A=naming.keys()[naming.values().index("a"+stage+".0001*")]
    B=naming.keys()[naming.values().index("b"+stage+".0001_")]
    time_barycenter1,treated1=CalculAnimalPole(A,B,segmentation_corrected_files,naming,lin_tree)
    A=naming.keys()[naming.values().index("a"+stage+".0001_")]
    B=naming.keys()[naming.values().index("b"+stage+".0001*")]
    time_barycenter2,treated2=CalculAnimalPole(A,B,segmentation_corrected_files,naming,lin_tree)
    treated=treated1.union(treated2)
    #Add starting name
    for cell in naming:
        if cell not in treated:
            treated.add(cell)

    #Average Barycenters
    time_barycenter={}
    for t in range(begin, end+1, step):
        n=0
        coord=np.zeros(3)
        if t in time_barycenter1:
            coord+=time_barycenter1[t]
            n+=1
        if t in time_barycenter2:
            coord+=time_barycenter2[t]
            n+=1
        if n==0:
            time_barycenter[t]=time_barycenter[t-1]
        else:
            time_barycenter[t]=np.divide(coord,n)
        #print ' at '+str(t)+ '->'+str(time_barycenter[t])


    error_metric={}


    pre_treated=np.array(list(treated))
    for i in range(begin, end+1, step):
        print "Progagate cell name at "+str(i)
        im = imread(timeNamed(segmentation_corrected_files,i))
        ids=i*10**4+np.unique(im[im>1]).astype(np.uint32)
        treated=list(pre_treated[pre_treated/10**4==i])
        for cell in ids:
            print " Process cell "+str(cell)
            if not cell in treated:
            	#print ' -> not treated'
                treated.append(cell)
                mother=inv_lin_tree.get(cell, None)
                sibling=lin_tree.get(mother, cell)
                if sibling==[cell]:
                    naming[cell]=naming.get(mother, "")
                elif cell in sibling and len(sibling)==2:
                    #print ' Sinling sibling[0]%10**4='+str(sibling[0]%10**4) 
                    #print ' Sinling sibling[1]%10**4='+str(sibling[1]%10**4) 
                    #print time_barycenter
                    #print i
                    #print time_barycenter[i]
                    cells_sorted, dist_diff=sort_to_bary([sibling[0]%10**4, sibling[1]%10**4], time_barycenter[i], im)
                    error_metric[cell]=dist_diff
                    for j, c in enumerate(cells_sorted):
                        prev_n=naming.get(mother, "")
                        if prev_n:
                            naming[c+i*10**4] = prev_n[0] + str(int(prev_n.split('.')[0][1:])+1) + '.' + \
                    '%04d'%(int(prev_n.split('.')[1][:-1])*2-1+j) + prev_n[-1]
                        treated.append(c+i*10**4)
                else:
                    treated.extend(sibling)
            if not cell in naming:
                naming[cell]=""
            print " -->" +naming[cell]
  

    #Propage Errors
    errors=error_metric
    roots=[k for k in lin_tree.keys() if k<=2*10**4]
    todo=roots
    inv_tree={ v : k for k, values in lin_tree.iteritems() for v in values }
    while todo!=[]:
        n=todo[0]
        if len(lin_tree.get(n, ''))==2:
            tmp=lin_tree[n]
            if errors.get(tmp[0], 0)==0:
                errors[tmp[0]]=errors[tmp[1]]
            else:
                errors[tmp[1]]=errors[tmp[0]]
        if errors.get(n, '')=='':
            errors[n]=errors.get(inv_tree.get(n, ''), 20)
        todo.remove(n)
        new=lin_tree.get(n, [])
        todo.extend(new)

    return naming,errors