Exemplo n.º 1
0
def main(args):
    if args.outdir == 'GT':
        marker_files = map(
            string.strip,
            os.popen('ls ' + args.indir + '*-GT.marker').readlines())
        substack_ids = [f.split('/')[-1].split('-')[0] for f in marker_files]
    else:
        marker_files = map(
            string.strip,
            os.popen('ls ' + args.outdir + '/*/ms.marker').readlines())
        substack_ids = [f.split('/')[-2] for f in marker_files]

    C_final = set()
    plist = None
    hue = 0.0
    # FIXME: This is way too slow, should use divide & conquer using a binary tree
    for marker_file, substack_id in zip(marker_files, substack_ids):
        substack = SubStack(args.indir, substack_id, plist)
        plist = substack.plist
        print('Merging', marker_file, substack_id)
        C = substack.load_markers(marker_file, args.outdir == 'GT')
        # I had forgotten that markers are shifted by 1 in save_markers()
        # that was because Vaa3D coordinates starts from 1 rather than 0
        # ==> repair original values
        for c in C:
            c.x -= 1
            c.y -= 1
            c.z -= 1
        hue = hue + 0.31
        if hue > 1:
            hue = hue - 1
        merge(C_final, C, substack, hue, args.verbose)
    substack.save_markers(args.outfile, C_final)
Exemplo n.º 2
0
def main(args):
    if args.outdir=='GT':
        marker_files = map(string.strip,os.popen('ls '+args.indir+'*-GT.marker').readlines())
        substack_ids = [f.split('/')[-1].split('-')[0] for f in marker_files]
    else:
        marker_files = map(string.strip,os.popen('ls '+args.outdir+'/*/ms.marker').readlines())
        substack_ids = [f.split('/')[-2] for f in marker_files]

    C_final=set()
    plist = None
    hue = 0.0
    # FIXME: This is way too slow, should use divide & conquer using a binary tree
    for marker_file,substack_id in zip(marker_files,substack_ids):
        substack=SubStack(args.indir,substack_id, plist)
        plist = substack.plist
        print('Merging', marker_file, substack_id)
        C=substack.load_markers(marker_file,args.outdir=='GT')
        # I had forgotten that markers are shifted by 1 in save_markers()
        # that was because Vaa3D coordinates starts from 1 rather than 0
        # ==> repair original values
        for c in C:
            c.x -= 1
            c.y -= 1
            c.z -= 1
        hue = hue + 0.31
        if hue > 1:
            hue = hue -1
        merge(C_final, C, substack, hue, args.verbose)
    substack.save_markers(args.outfile, C_final)
Exemplo n.º 3
0
def main(args):
    substack = SubStack(args.indir,args.substack_id)
    patch = substack.get_volume()
    histogram = np.histogram(patch, bins=256,range=(0,256))[0]
    thresholds = threshold.multi_kapur(histogram, 2)
    outfile=args.outdir+'/'+args.substack_id+'/'+basename(args.indir)
    mkdir_p(args.outdir+'/'+args.substack_id)
    f=open(outfile,'w')
    f.write(str(thresholds[0])+','+str(thresholds[1])+'\n')
    f.close()
Exemplo n.º 4
0
def main(args):
    substack = SubStack(args.indir, args.substack_id)
    patch = substack.get_volume()
    histogram = np.histogram(patch, bins=256, range=(0, 256))[0]
    thresholds = threshold.multi_kapur(histogram, 2)
    outfile = args.outdir + '/' + args.substack_id + '/' + basename(args.indir)
    mkdir_p(args.outdir + '/' + args.substack_id)
    f = open(outfile, 'w')
    f.write(str(thresholds[0]) + ',' + str(thresholds[1]) + '\n')
    f.close()
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    substack = SubStack(args.indir,args.substack_id)
    np_tensor_3d, minz = imtensor.load_nearby(args.tensorimage, substack, args.extramargin)

    if not args.local_mean_std:
        print('Reading standardization data from', args.trainfile)
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None
    
    print('Starting semantic devonvolution of volume', args.substack_id)
    model = pickle.load(open(args.model))
    minz = int(re.split('[a-zA-z0-9]*_',substack.info['Files'][0])[1].split('.tif')[0])
    reconstruction = deconvolver.filter_volume(np_tensor_3d, Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython, trainfile=args.trainfile)

    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)

    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
Exemplo n.º 6
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting Preibisch fusion', args.substack_id)

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(
        ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(
        ss.info['Files'][0].split("/")[-1].split('_')[0:-1]) + '_'
    np_tensor_3d_first_view, _ = imtensor.load_nearby(
        args.tensorimage_first_view, ss, args.size_patch)
    sc_in = np_tensor_3d_first_view.shape

    if args.transformation_file is not None:
        R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(
            args.second_view_dir,
            args.tensorimage_second_view,
            args.substack_id,
            R,
            t,
            args.size_patch,
            invert=True)
    else:
        np_tensor_3d_second_view, _ = imtensor.load_nearby(
            args.tensorimage_second_view, ss, args.size_patch)

    fused_image, entropy_mask__view, entropy_mask_second_view = do_content_based_fusion(
        np_tensor_3d_first_view,
        np_tensor_3d_second_view,
        args.size_patch,
        args.size_patch,
        speedup=1,
        fast_computation=True)

    if args.extramargin > args.size_patch:
        args.extramargin = args.size_patch

    offset_margin = args.size_patch - args.extramargin
    fused_image_output = fused_image[offset_margin:sc_in[0] - offset_margin,
                                     offset_margin:sc_in[1] - offset_margin,
                                     offset_margin:sc_in[2] - offset_margin]
    atom = tables.UInt8Atom()
    mkdir_p(args.outdir)
    h5f = tables.openFile(args.outdir + '/' + args.substack_id + '.h5', 'w')
    sc_out = fused_image_output.shape
    ca = h5f.createCArray(h5f.root, 'full_image', atom, sc_out)
    for z in xrange(0, sc_out[0], 1):
        ca[z, :, :] = fused_image_output[z, :, :]
    h5f.close()

    imtensor.save_tensor_as_tif(fused_image_output,
                                args.outdir + '/' + args.substack_id,
                                minz,
                                prefix=prefix)
    print("total time Preibisch fusion: %s" %
          (str(timeit.default_timer() - total_start)))
Exemplo n.º 7
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...' % (args.substack_id))

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(
        ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(
        ss.info['Files'][0].split("/")[-1].split('_')[0:-1]) + '_'

    np_tensor_3d_first_view, _ = imtensor.load_nearby(
        args.tensorimage_first_view, ss, args.extramargin)
    if args.transformation_file is not None:
        R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(
            args.second_view_dir,
            args.tensorimage_second_view,
            args.substack_id,
            R,
            t,
            args.extramargin,
            invert=True)
    else:
        np_tensor_3d_second_view, _ = imtensor.load_nearby(
            args.tensorimage_second_view, ss, args.extramargin)

    print('Loading model...')
    model = pickle.load(open(args.model))

    if not args.local_mean_std:
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean = None
        Xstd = None

    reconstruction = deconvolver.filter_volume(
        [np_tensor_3d_first_view, np_tensor_3d_second_view],
        Xmean,
        Xstd,
        args.extramargin,
        model,
        args.speedup,
        do_cython=args.do_cython,
        trainfile=args.trainfile)

    pair_id = basename(args.first_view_dir) + '_' + basename(
        args.second_view_dir)
    outdir = args.outdir + '/' + args.substack_id + '/' + pair_id

    imtensor.save_tensor_as_tif(reconstruction, outdir, minz, prefix='slice_')

    print("total time reconstruction: %s" %
          (str(timeit.default_timer() - total_start)))
Exemplo n.º 8
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    substack = SubStack(args.indir,args.substack_id)
    substack.load_volume()
    tensor = substack.get_volume()

    # Changing the tensor so that it has a 6 pixel black padding. Hopefully it won't frick things up too much. Else, mail time.
    print("The shape of the tensor before padding: " + str(np.shape(tensor)))
    tensor = pad(tensor, 6)
    print("The shape of the tensor after padding: " + str(np.shape(tensor)))

    if not args.local_mean_std:
        print('Reading standardization data from', args.trainfile)
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None
    
    print('Starting semantic devonvolution of volume', args.substack_id)
    # Importing here to have a clean --help
    from keras.models import model_from_json
    model = model_from_json(open(args.model + '/architecture.json').read())
    model.load_weights(args.model + '/weights.h5')
    
    minz = int(re.split('[a-zA-z0-9]*_',substack.info['Files'][0])[1].split('.tif')[0])
    # Remove the margin, I have changed deconvolver to use a fized number instead of the extramargin. Hope it works.
    reconstruction = deconvolver.filter_volume(tensor, Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython, trainfile=args.trainfile)
    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)

    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
Exemplo n.º 9
0
def main(args):
    try:
        C_firstview = m_load_markers(args.first_view,from_vaa3d=True)
    except IOError:
        print('Warning: first view marker file',args.first_view,'not found.')
        C_firstview = []
    try:
        C_secondview = m_load_markers(args.second_view,from_vaa3d=True)
    except IOError:
        print('Warning: second view marker file',args.second_view,'not found.')
        C_secondview = []

    mkdir_p(os.path.dirname(args.output_marker_file))
    substack = SubStack(args.indir,args.substack_id)
    if args.do_icp:
        C_merged, C_onlyfirstview, C_onlyfirstview, _ = do_fuse_with_icp(substack,C_firstview,C_secondview,args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
    else:
        C_merged, C_onlyfirstview, C_onlyfirstview, _ = do_fuse(substack,C_firstview,C_secondview,args.max_distance, args.verbose)
    
    save_fused_markers(substack,C_merged,C_onlyfirstview,C_onlysecondview,output_marker_file,first_view_id,second_view_id,verbose)
Exemplo n.º 10
0
def get_visible_pairs(base_indir,substack_id,view_ids,lower_threshold=30.):
   
    num_vis=0
    for view_id in view_ids:
	substack = SubStack(base_indir+'/'+view_id,args.substack_id)
	patch = substack.get_volume()
	histogram = np.histogram(patch, bins=256,range=(0,256))[0]
	thresholds = threshold.multi_kapur(histogram, 2)
	if thresholds[1]>=lower_threshold:
	    num_vis+=1
    threshold_0=thresholds[0]
    threshold_1=thresholds[1]
    threshold_2=thresholds[2]
    threshold_3=thresholds[3]
   
    list_views=[]
    if num_vis==0:
        return list_views
    elif num_vis==1:
        if threshold_0>=lower_threshold:
                list_views.append(tuple((view_ids[0],view_ids[1])))
                list_views.append(tuple((view_ids[0],view_ids[3])))
        if threshold_1>=lower_threshold:
                list_views.append(tuple((view_ids[0],view_ids[1])))
                list_views.append(tuple((view_ids[2],view_ids[1])))
        if threshold_2>=lower_threshold:
                list_views.append(tuple((view_ids[2],view_ids[1])))
                list_views.append(tuple((view_ids[2],view_ids[3])))
        if threshold_3>=lower_threshold:
                list_views.append(tuple((view_ids[0],view_ids[3])))
                list_views.append(tuple((view_ids[2],view_ids[3])))
    elif num_vis==2:
        if (threshold_0>=lower_threshold and threshold_2>=lower_threshold) or (threshold_1>=lower_threshold and threshold_3>=lower_threshold):
            list_views.append(tuple((view_ids[0],view_ids[1])))
            list_views.append(tuple((view_ids[0],view_ids[3])))
            list_views.append(tuple((view_ids[2],view_ids[1])))
            list_views.append(tuple((view_ids[2],view_ids[3])))
        elif threshold_0>=lower_threshold and threshold_1>=lower_threshold:
            list_views.append(tuple((view_ids[0],view_ids[1])))
        elif threshold_0>=lower_threshold and threshold_3>=lower_threshold:
            list_views.append(tuple((view_ids[0],view_ids[3])))
        elif threshold_2>=lower_threshold and threshold_1>=lower_threshold:
            list_views.append(tuple((view_ids[2],view_ids[1])))
        elif threshold_2>=lower_threshold and threshold_3>=lower_threshold:
            list_views.append(tuple((view_ids[2],view_ids[3])))
    elif num_vis==3:
        if threshold_0 < lower_threshold:
            list_views.append(tuple((view_ids[2],view_ids[1])))
            list_views.append(tuple((view_ids[2],view_ids[3])))
        elif threshold_2 < lower_threshold:
            list_views.append(tuple((view_ids[0],view_ids[1])))
            list_views.append(tuple((view_ids[0],view_ids[3])))
        elif threshold_1 < lower_threshold or threshold_3 < lower_threshold:
            list_views.append(tuple((view_ids[0],view_ids[1])))
            list_views.append(tuple((view_ids[0],view_ids[3])))
            list_views.append(tuple((view_ids[2],view_ids[1])))
            list_views.append(tuple((view_ids[2],view_ids[3])))
    elif num_vis==4:
        list_views.append(tuple((view_ids[0],view_ids[1])))
        list_views.append(tuple((view_ids[0],view_ids[3])))
        list_views.append(tuple((view_ids[2],view_ids[1])))
        list_views.append(tuple((view_ids[2],view_ids[3])))
    return list_views
Exemplo n.º 11
0
def merge_views(args):

    view_ids=args.view_ids
    max_len=0
    hue=0.
    C_substack=[]

    if args.thresholds_dir is None:
        print(view_ids)
        list_views = get_visible_pairs(args.base_indir,args.substack_id,view_ids)
    else:
        list_views = get_visible_pairs_from_dir(args.thresholds_dir, args.substack_id, view_ids)

    substack = SubStack(args.base_indir+'/'+view_ids[0],args.substack_id)
    valid_pairs = filter_valid_pairs(args,substack,list_views)

    if len(valid_pairs.keys())==1:
            C_substack+=valid_pairs.values()[0]
    else:
            keys_view_1=[]
            keys_view_3=[]
            for view_key in valid_pairs.keys():
                    if view_key[0] == view_ids[2]:
                            keys_view_3.append(view_key)
                    elif view_key[0] == view_ids[0]:
                            keys_view_1.append(view_key)

    if len(valid_pairs)==2:
            if len(keys_view_1) == 2 or len(keys_view_3) == 2:
                    total_list=compute_fusion(substack,valid_pairs.values()[0],valid_pairs.values()[1],args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_substack+=total_list
            elif len(keys_view_1) == 1 and len(keys_view_3) == 1:
                    C_view_3_t=transform_markers(args,substack,valid_pairs[keys_view_3[0]], view_ids)
                    total_list=compute_fusion(substack,valid_pairs[keys_view_1[0]],C_view_3_t,args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_substack+=total_list
            else:
                    raise Exception('not valid list of views %s'%(str(valid_pairs)))

    elif len(valid_pairs)==3:
            if len(keys_view_1) == 2:
                    C_view_1=compute_fusion(substack,valid_pairs[keys_view_1[0]],valid_pairs[keys_view_1[1]],args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_view_3_t=transform_markers(args,substack,valid_pairs[keys_view_3[0]], view_ids)
                    total_list=compute_fusion(substack,C_view_1,C_view_3_t,args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_substack+=total_list
            elif len(keys_view_3) == 2:
                    C_view_3=compute_fusion(substack,valid_pairs[keys_view_3[0]],valid_pairs[keys_view_3[1]],args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_view_3_t=transform_markers(args,substack,C_view_3, view_ids)
                    C_view_1=valid_pairs[keys_view_1[0]]
                    total_list=compute_fusion(substack,C_view_1,C_view_3_t,args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_substack+=total_list
            else:
                    raise Exception('not valid list of views %s'%(str(valid_pairs)))

    elif len(valid_pairs)==4:
            if len(keys_view_1) == 2 and len(keys_view_3) == 2:
                    C_view_1=compute_fusion(substack,valid_pairs[keys_view_1[0]],valid_pairs[keys_view_1[1]],args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_view_3=compute_fusion(substack,valid_pairs[keys_view_3[0]],valid_pairs[keys_view_3[1]],args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_view_3_t=transform_markers(args,substack,C_view_3, view_ids)
                    total_list=compute_fusion(substack,C_view_1,C_view_3_t,args.max_distance,match_distance=args.match_distance,verbose=args.verbose)
                    C_substack+=total_list
            else:
                    raise Exception('not valid list of views %s'%(str(valid_pairs)))

    mkdir_p(args.outdir+'/'+args.substack_id)
    if len(C_substack)>0:
            C_substack = filter_outside_markers(C_substack,substack)
            substack.save_markers(args.outdir+'/'+args.substack_id+'/ms.marker', C_substack, floating_point=True)
Exemplo n.º 12
0
def get_visible_pairs(base_indir, substack_id, view_ids, lower_threshold=30.):

    num_vis = 0
    for view_id in view_ids:
        substack = SubStack(base_indir + '/' + view_id, args.substack_id)
        patch = substack.get_volume()
        histogram = np.histogram(patch, bins=256, range=(0, 256))[0]
        thresholds = threshold.multi_kapur(histogram, 2)
        if thresholds[1] >= lower_threshold:
            num_vis += 1
    threshold_0 = thresholds[0]
    threshold_1 = thresholds[1]
    threshold_2 = thresholds[2]
    threshold_3 = thresholds[3]

    list_views = []
    if num_vis == 0:
        return list_views
    elif num_vis == 1:
        if threshold_0 >= lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[1])))
            list_views.append(tuple((view_ids[0], view_ids[3])))
        if threshold_1 >= lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[1])))
            list_views.append(tuple((view_ids[2], view_ids[1])))
        if threshold_2 >= lower_threshold:
            list_views.append(tuple((view_ids[2], view_ids[1])))
            list_views.append(tuple((view_ids[2], view_ids[3])))
        if threshold_3 >= lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[3])))
            list_views.append(tuple((view_ids[2], view_ids[3])))
    elif num_vis == 2:
        if (threshold_0 >= lower_threshold and threshold_2 >= lower_threshold
            ) or (threshold_1 >= lower_threshold
                  and threshold_3 >= lower_threshold):
            list_views.append(tuple((view_ids[0], view_ids[1])))
            list_views.append(tuple((view_ids[0], view_ids[3])))
            list_views.append(tuple((view_ids[2], view_ids[1])))
            list_views.append(tuple((view_ids[2], view_ids[3])))
        elif threshold_0 >= lower_threshold and threshold_1 >= lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[1])))
        elif threshold_0 >= lower_threshold and threshold_3 >= lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[3])))
        elif threshold_2 >= lower_threshold and threshold_1 >= lower_threshold:
            list_views.append(tuple((view_ids[2], view_ids[1])))
        elif threshold_2 >= lower_threshold and threshold_3 >= lower_threshold:
            list_views.append(tuple((view_ids[2], view_ids[3])))
    elif num_vis == 3:
        if threshold_0 < lower_threshold:
            list_views.append(tuple((view_ids[2], view_ids[1])))
            list_views.append(tuple((view_ids[2], view_ids[3])))
        elif threshold_2 < lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[1])))
            list_views.append(tuple((view_ids[0], view_ids[3])))
        elif threshold_1 < lower_threshold or threshold_3 < lower_threshold:
            list_views.append(tuple((view_ids[0], view_ids[1])))
            list_views.append(tuple((view_ids[0], view_ids[3])))
            list_views.append(tuple((view_ids[2], view_ids[1])))
            list_views.append(tuple((view_ids[2], view_ids[3])))
    elif num_vis == 4:
        list_views.append(tuple((view_ids[0], view_ids[1])))
        list_views.append(tuple((view_ids[0], view_ids[3])))
        list_views.append(tuple((view_ids[2], view_ids[1])))
        list_views.append(tuple((view_ids[2], view_ids[3])))
    return list_views
Exemplo n.º 13
0
def merge_views(args):

    view_ids = args.view_ids
    max_len = 0
    hue = 0.
    C_substack = []

    if args.thresholds_dir is None:
        print(view_ids)
        list_views = get_visible_pairs(args.base_indir, args.substack_id,
                                       view_ids)
    else:
        list_views = get_visible_pairs_from_dir(args.thresholds_dir,
                                                args.substack_id, view_ids)

    substack = SubStack(args.base_indir + '/' + view_ids[0], args.substack_id)
    valid_pairs = filter_valid_pairs(args, substack, list_views)

    if len(valid_pairs.keys()) == 1:
        C_substack += valid_pairs.values()[0]
    else:
        keys_view_1 = []
        keys_view_3 = []
        for view_key in valid_pairs.keys():
            if view_key[0] == view_ids[2]:
                keys_view_3.append(view_key)
            elif view_key[0] == view_ids[0]:
                keys_view_1.append(view_key)

    if len(valid_pairs) == 2:
        if len(keys_view_1) == 2 or len(keys_view_3) == 2:
            total_list = compute_fusion(substack,
                                        valid_pairs.values()[0],
                                        valid_pairs.values()[1],
                                        args.max_distance,
                                        match_distance=args.match_distance,
                                        verbose=args.verbose)
            C_substack += total_list
        elif len(keys_view_1) == 1 and len(keys_view_3) == 1:
            C_view_3_t = transform_markers(args, substack,
                                           valid_pairs[keys_view_3[0]],
                                           view_ids)
            total_list = compute_fusion(substack,
                                        valid_pairs[keys_view_1[0]],
                                        C_view_3_t,
                                        args.max_distance,
                                        match_distance=args.match_distance,
                                        verbose=args.verbose)
            C_substack += total_list
        else:
            raise Exception('not valid list of views %s' % (str(valid_pairs)))

    elif len(valid_pairs) == 3:
        if len(keys_view_1) == 2:
            C_view_1 = compute_fusion(substack,
                                      valid_pairs[keys_view_1[0]],
                                      valid_pairs[keys_view_1[1]],
                                      args.max_distance,
                                      match_distance=args.match_distance,
                                      verbose=args.verbose)
            C_view_3_t = transform_markers(args, substack,
                                           valid_pairs[keys_view_3[0]],
                                           view_ids)
            total_list = compute_fusion(substack,
                                        C_view_1,
                                        C_view_3_t,
                                        args.max_distance,
                                        match_distance=args.match_distance,
                                        verbose=args.verbose)
            C_substack += total_list
        elif len(keys_view_3) == 2:
            C_view_3 = compute_fusion(substack,
                                      valid_pairs[keys_view_3[0]],
                                      valid_pairs[keys_view_3[1]],
                                      args.max_distance,
                                      match_distance=args.match_distance,
                                      verbose=args.verbose)
            C_view_3_t = transform_markers(args, substack, C_view_3, view_ids)
            C_view_1 = valid_pairs[keys_view_1[0]]
            total_list = compute_fusion(substack,
                                        C_view_1,
                                        C_view_3_t,
                                        args.max_distance,
                                        match_distance=args.match_distance,
                                        verbose=args.verbose)
            C_substack += total_list
        else:
            raise Exception('not valid list of views %s' % (str(valid_pairs)))

    elif len(valid_pairs) == 4:
        if len(keys_view_1) == 2 and len(keys_view_3) == 2:
            C_view_1 = compute_fusion(substack,
                                      valid_pairs[keys_view_1[0]],
                                      valid_pairs[keys_view_1[1]],
                                      args.max_distance,
                                      match_distance=args.match_distance,
                                      verbose=args.verbose)
            C_view_3 = compute_fusion(substack,
                                      valid_pairs[keys_view_3[0]],
                                      valid_pairs[keys_view_3[1]],
                                      args.max_distance,
                                      match_distance=args.match_distance,
                                      verbose=args.verbose)
            C_view_3_t = transform_markers(args, substack, C_view_3, view_ids)
            total_list = compute_fusion(substack,
                                        C_view_1,
                                        C_view_3_t,
                                        args.max_distance,
                                        match_distance=args.match_distance,
                                        verbose=args.verbose)
            C_substack += total_list
        else:
            raise Exception('not valid list of views %s' % (str(valid_pairs)))

    mkdir_p(args.outdir + '/' + args.substack_id)
    if len(C_substack) > 0:
        C_substack = filter_outside_markers(C_substack, substack)
        substack.save_markers(args.outdir + '/' + args.substack_id +
                              '/ms.marker',
                              C_substack,
                              floating_point=True)
Exemplo n.º 14
0
def transform_substack(indir, tensorimage, substack_id, R, t, extramargin, outdir=None, invert=False, save_tiff=False, save_hdf5=False):
    """
    Method that applies a previously estimated rigid transformation to a specific substack.

    Parameters
    ----------
    
    indir : str
	substack dir of the input view
    tensorimage : str
	the whole tensor in hdf5 format
    substack_id : str
	id of the substack that will be transformed
    R : numpy array of shape (3, 3)
	rotational component of the estimated rigid transformation
    t : numpy array of shape (3)
	translational component of the estimated rigid transformation
    extramargin : int
	extramargin used to extract the transformed substack from tensorimage
    outdir : str 
	output directory where the transformed substack will be saved (Default: None)
    invert : boolean 
	if True the tranformation is inverted (Default: False)
    save_tiff : boolean 
	save the transformed substack in a stack of tiff slices (Default: False)
    save_hdf5 : boolean 
	save the transformed substack as a hdf5 tensor (Default: False)

    Returns
    -------

    pixels_transformed_input: numpy tensor
	tensor of the transformed substack

    """
    ss = SubStack(indir, substack_id)
    input_stack_file = tensorimage
    hf5 = tables.openFile(input_stack_file, 'r')
    full_D, full_H, full_W = hf5.root.full_image.shape
    X0,Y0,Z0 = ss.info['X0'], ss.info['Y0'], ss.info['Z0']
    origin = (Z0, Y0, X0)
    H,W,D = ss.info['Height'], ss.info['Width'], ss.info['Depth']
    or_ss_shape = (D,H,W)
    offset_W=int((3**(1/2.0)*W + W/2.0 - W)/2.0)
    offset_H=int((3**(1/2.0)*H + H/2.0 - H)/2.0)
    offset_D=int((3**(1/2.0)*D + D/2.0 - D)/2.0)


    if offset_W < extramargin:
        offset_W = extramargin
    if offset_H < extramargin:
        offset_H = extramargin
    if offset_D < extramargin:
        offset_D = extramargin

    offset_D_left = offset_D if int(origin[0] - offset_D) > 0 else origin[0]
    offset_H_left = offset_H if int(origin[1] - offset_H) > 0 else origin[1]
    offset_W_left = offset_W if int(origin[2] - offset_W) > 0 else origin[2]
    offset_D_right = offset_D if int(origin[0] + or_ss_shape[0] + offset_D) <= full_D else full_D - (origin[0] + or_ss_shape[0])
    offset_H_right = offset_H if int(origin[1] + or_ss_shape[1] + offset_H) <= full_H else full_H - (origin[1] + or_ss_shape[1])
    offset_W_right = offset_W if int(origin[2] + or_ss_shape[2] + offset_W) <= full_W else full_W - (origin[2] + or_ss_shape[2])



    pixels_input = hf5.root.full_image[origin[0] - offset_D_left:origin[0] + or_ss_shape[0] + offset_D_right,
                                       origin[1] - offset_H_left:origin[1] + or_ss_shape[1] + offset_H_right,
                                       origin[2] - offset_W_left:origin[2] + or_ss_shape[2] + offset_W_right]


    exmar_D_left = 0 if offset_D_left == origin[0] else extramargin
    exmar_H_left  = 0 if offset_H_left == origin[1] else extramargin
    exmar_W_left  = 0 if offset_W_left == origin[2] else extramargin

    depth_target, height_target, width_target = or_ss_shape[0] + 2 * extramargin, or_ss_shape[1] + 2 * extramargin, or_ss_shape[2] + 2 * extramargin #new
    depth_input, height_input, width_input = pixels_input.shape[0], pixels_input.shape[1],  pixels_input.shape[2]
    pixels_transformed_input = np.zeros((depth_target,height_target,width_target), dtype=np.uint8)


    total_start = timeit.default_timer()

    coords_2d_target = np.vstack(np.indices((width_target,height_target)).swapaxes(0,2).swapaxes(0,1))
    invR = R.T

    if invert:
        t = -np.dot(invR, t)
        invR = R

    invR_2d_transpose = np.transpose(np.dot(invR[:, 0:2], np.transpose(coords_2d_target - t[0:2])))
    offset_coords = np.array([[offset_W_left - exmar_W_left, offset_H_left - exmar_H_left, offset_D_left - exmar_D_left]]*invR_2d_transpose.shape[0])#new

    for z in xrange(0, depth_target, 1):
        R_t_3d = np.transpose(invR_2d_transpose + invR[:, 2] * (z - t[2]) + offset_coords)
        good_indices = np.array(range(R_t_3d.shape[1]))
        good_indices = good_indices[(R_t_3d[0, :] > 0) * (R_t_3d[1, :] > 0) * (R_t_3d[2, :] > 0) * (R_t_3d[0, :] < (width_input - 1)) * (R_t_3d[1, :] < (height_input - 1)) * (R_t_3d[2, :] < (depth_input - 1))]
        R_t_3d = R_t_3d.take(good_indices,axis=1)
        R_t_3d = np.round(R_t_3d).astype(int)
        coords_2d_target_tmp = coords_2d_target.take(good_indices, axis=0)
        coords_3d_target_tmp = np.hstack((coords_2d_target_tmp, np.ones((coords_2d_target_tmp.shape[0], 1)).astype(int)*z))
        pixels_transformed_input[coords_3d_target_tmp[:, 2], coords_3d_target_tmp[:, 1], coords_3d_target_tmp[:, 0]] = pixels_input[R_t_3d[2, :], R_t_3d[1, :], R_t_3d[0, :]]

    total_stop = timeit.default_timer()
    print ("total time transformation stack:%s "%(str(total_stop - total_start)))

    pixels_transformed_input = np.array(pixels_transformed_input, dtype=np.uint8)
    if save_tiff or save_hdf5:
	mkdir_p(outdir)
	if save_tiff:
	    minz = int(ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
	    _prefix = '_'.join(ss.info['Files'][0].split("/")[-1].split('_')[0:-1])+'_'
	    substack_outdir = outdir + '/' + substack_id
	    imtensor.save_tensor_as_tif(pixels_transformed_input, substack_outdir, minz, prefix=_prefix)
	if save_hdf5: 
	    target_shape = (depth_target, height_target, width_target)
	    atom = tables.UInt8Atom()
	    h5f = tables.openFile(outdir + '/' + ss.substack_id + '.h5', 'w')
	    ca = h5f.createCArray(h5f.root, 'full_image', atom, target_shape)
	    for z in xrange(0, depth_target, 1):
		ca[z, :, :] = pixels_transformed_input[z,:,:]
	    h5f.close()

    return pixels_transformed_input