Esempio n. 1
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting Preibisch fusion', args.substack_id)

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(ss.info['Files'][0].split("/")[-1].split('_')[0:-1])+'_'
    np_tensor_3d_first_view,_  = imtensor.load_nearby(args.tensorimage_first_view, ss, args.size_patch)
    sc_in=np_tensor_3d_first_view.shape

    if args.transformation_file is not None:
	R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(args.second_view_dir, args.tensorimage_second_view, args.substack_id, R, t, args.size_patch, invert=True)
    else:
        np_tensor_3d_second_view,_  = imtensor.load_nearby(args.tensorimage_second_view, ss, args.size_patch)

    fused_image,entropy_mask__view,entropy_mask_second_view = do_content_based_fusion(np_tensor_3d_first_view,np_tensor_3d_second_view,args.size_patch, args.size_patch, speedup=1,fast_computation=True)
   
    if args.extramargin>args.size_patch:
	args.extramargin=args.size_patch
    
    offset_margin=args.size_patch - args.extramargin
    fused_image_output=fused_image[offset_margin:sc_in[0]-offset_margin,offset_margin:sc_in[1]-offset_margin,offset_margin:sc_in[2]-offset_margin]
    atom = tables.UInt8Atom()
    mkdir_p(args.outdir)
    h5f = tables.openFile(args.outdir + '/' + args.substack_id + '.h5', 'w')
    sc_out=fused_image_output.shape
    ca = h5f.createCArray(h5f.root, 'full_image', atom, sc_out)
    for z in xrange(0, sc_out[0], 1):
        ca[z, :, :] = fused_image_output[z,:,:]
    h5f.close()

    imtensor.save_tensor_as_tif(fused_image_output, args.outdir+'/'+args.substack_id, minz,prefix=prefix)
    print ("total time Preibisch fusion: %s" %(str(timeit.default_timer() - total_start)))
Esempio n. 2
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting Preibisch fusion', args.substack_id)

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(
        ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(
        ss.info['Files'][0].split("/")[-1].split('_')[0:-1]) + '_'
    np_tensor_3d_first_view, _ = imtensor.load_nearby(
        args.tensorimage_first_view, ss, args.size_patch)
    sc_in = np_tensor_3d_first_view.shape

    if args.transformation_file is not None:
        R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(
            args.second_view_dir,
            args.tensorimage_second_view,
            args.substack_id,
            R,
            t,
            args.size_patch,
            invert=True)
    else:
        np_tensor_3d_second_view, _ = imtensor.load_nearby(
            args.tensorimage_second_view, ss, args.size_patch)

    fused_image, entropy_mask__view, entropy_mask_second_view = do_content_based_fusion(
        np_tensor_3d_first_view,
        np_tensor_3d_second_view,
        args.size_patch,
        args.size_patch,
        speedup=1,
        fast_computation=True)

    if args.extramargin > args.size_patch:
        args.extramargin = args.size_patch

    offset_margin = args.size_patch - args.extramargin
    fused_image_output = fused_image[offset_margin:sc_in[0] - offset_margin,
                                     offset_margin:sc_in[1] - offset_margin,
                                     offset_margin:sc_in[2] - offset_margin]
    atom = tables.UInt8Atom()
    mkdir_p(args.outdir)
    h5f = tables.openFile(args.outdir + '/' + args.substack_id + '.h5', 'w')
    sc_out = fused_image_output.shape
    ca = h5f.createCArray(h5f.root, 'full_image', atom, sc_out)
    for z in xrange(0, sc_out[0], 1):
        ca[z, :, :] = fused_image_output[z, :, :]
    h5f.close()

    imtensor.save_tensor_as_tif(fused_image_output,
                                args.outdir + '/' + args.substack_id,
                                minz,
                                prefix=prefix)
    print("total time Preibisch fusion: %s" %
          (str(timeit.default_timer() - total_start)))
Esempio n. 3
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...' % (args.substack_id))

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(
        ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(
        ss.info['Files'][0].split("/")[-1].split('_')[0:-1]) + '_'

    np_tensor_3d_first_view, _ = imtensor.load_nearby(
        args.tensorimage_first_view, ss, args.extramargin)
    if args.transformation_file is not None:
        R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(
            args.second_view_dir,
            args.tensorimage_second_view,
            args.substack_id,
            R,
            t,
            args.extramargin,
            invert=True)
    else:
        np_tensor_3d_second_view, _ = imtensor.load_nearby(
            args.tensorimage_second_view, ss, args.extramargin)

    print('Loading model...')
    model = pickle.load(open(args.model))

    if not args.local_mean_std:
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean = None
        Xstd = None

    reconstruction = deconvolver.filter_volume(
        [np_tensor_3d_first_view, np_tensor_3d_second_view],
        Xmean,
        Xstd,
        args.extramargin,
        model,
        args.speedup,
        do_cython=args.do_cython,
        trainfile=args.trainfile)

    pair_id = basename(args.first_view_dir) + '_' + basename(
        args.second_view_dir)
    outdir = args.outdir + '/' + args.substack_id + '/' + pair_id

    imtensor.save_tensor_as_tif(reconstruction, outdir, minz, prefix='slice_')

    print("total time reconstruction: %s" %
          (str(timeit.default_timer() - total_start)))
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    substack = SubStack(args.indir,args.substack_id)
    np_tensor_3d, minz = imtensor.load_nearby(args.tensorimage, substack, args.extramargin)

    if not args.local_mean_std:
        print('Reading standardization data from', args.trainfile)
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None
    
    print('Starting semantic devonvolution of volume', args.substack_id)
    model = pickle.load(open(args.model))
    minz = int(re.split('[a-zA-z0-9]*_',substack.info['Files'][0])[1].split('.tif')[0])
    reconstruction = deconvolver.filter_volume(np_tensor_3d, Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython, trainfile=args.trainfile)

    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)

    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
Esempio n. 5
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting transformation and fusion of views of volume %s ...'%(args.substack_id))

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(ss.info['Files'][0].split("/")[-1].split('_')[0:-1])+'_'

    np_tensor_3d_first_view,_  = imtensor.load_nearby(args.tensorimage_first_view, ss, 0)
    if args.transformation_file is not None:
	R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(args.second_view_dir, args.tensorimage_second_view, args.substack_id, R, t, 0, invert=True)
    else:
        np_tensor_3d_second_view,_  = imtensor.load_nearby(args.tensorimage_second_view, ss, 0)
    fuse_tensors(args.outdir, np_tensor_3d_first_view,np_tensor_3d_second_view,np.zeros_like(np_tensor_3d_first_view).astype(np.uint8))
    print ("total time transformation and fusion: %s" %(str(timeit.default_timer() - total_start)))
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(ss.info['Files'][0].split("/")[-1].split('_')[0:-1])+'_'

    np_tensor_3d_first_view,_  = imtensor.load_nearby(args.tensorimage_first_view, ss, args.extramargin)
    if args.transformation_file is not None:
	R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(args.second_view_dir, args.tensorimage_second_view, args.substack_id, R, t, args.extramargin, invert=True)
    else:
        np_tensor_3d_second_view,_  = imtensor.load_nearby(args.tensorimage_second_view, ss, args.extramargin)

    print('Loading model...')
    model = pickle.load(open(args.model))
    
    if not args.local_mean_std:
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None

    reconstruction = deconvolver.filter_volume([np_tensor_3d_first_view,np_tensor_3d_second_view], Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython,trainfile=args.trainfile)

    pair_id = basename(args.first_view_dir)+ '_' +basename(args.second_view_dir)
    outdir=args.outdir+'/'+args.substack_id+'/'+pair_id

    imtensor.save_tensor_as_tif(reconstruction, outdir, minz, prefix='slice_')


    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
def main(args):
    substack = SubStack(args.indir,args.substack_id)
    np_tensor_3d, minz = imtensor.load_nearby(args.tensorimage, substack, args.extramargin)

    # Standardize volume according to mean and std found in the training set
    print('Reading standardization data from', args.trainfile)
    h5=tables.openFile(args.trainfile)
    Xmean = h5.root.Xmean[:]
    Xstd = h5.root.Xstd[:]
    h5.close()
    print('Starting semantic devonvolution of volume', args.substack_id)
    model = pickle.load(open(args.model))
    minz = int(substack.info['Files'][0].split('full_')[1].split('.tif')[0])
    reconstruction = deconvolver.filter_volume(np_tensor_3d, Xmean, Xstd,
                                               args.extramargin, model, args.speedup)
    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)
def main(args):

    patchlen = (1+2*args.size_patch) ** 3

    X_sup = np.array([]).reshape(0, 2*patchlen).astype(np.float32)
    y_sup = np.array([]).reshape(0, patchlen).astype(np.float32)
    X_neg = np.array([]).reshape(0, 2*patchlen).astype(np.float32)
    y_neg = np.array([]).reshape(0, patchlen).astype(np.float32)
    X_original = np.array([]).reshape(0, 2*patchlen).astype(np.float32)

    data_frame_markers = pd.read_csv(args.list_trainset, dtype={'view1': str, 'view2': str, 'ss_id': str })
    for row in data_frame_markers.index:
        row_data=data_frame_markers.iloc[row]

        first_view_dir=args.substacks_base_path+'/'+row_data['view1']

        substack = SubStack(first_view_dir, row_data['ss_id'])
        markers = args.mergedmarkers_folder + '/' +  row_data['view1']+'_'+row_data['view2']+ '/'+row_data['ss_id']+'-GT.marker'
        print('Loading ground truth markers from', markers)
        C = substack.load_markers(markers, from_vaa3d=True)
        for c in C:
            c.x -= 1
            c.y -= 1
            c.z -= 1

        substack.load_volume(h5filename=args.tensors_base_path+'/'+row_data['view1'] + '.h5')
        tensor_first_view,_ = imtensor.load_nearby(args.tensors_base_path+'/'+row_data['view1'] + '.h5', substack, 0)


        second_view_dir=args.substacks_base_path+'/'+row_data['view2']
        R, t = parse_transformation_file(args.transformations_path+'/'+row_data['ss_id']+'/'+row_data['view1']+'_'+row_data['view2'])
	tensor_second_view = transform_substack(second_view_dir, args.tensors_base_path+'/'+row_data['view2']+'.h5', row_data['ss_id'], R, t, 0, invert=True)


        temp_X_sup, temp_y_sup, temp_X_neg, temp_y_neg = make_pos_neg_dataset(tensor_first_view, tensor_second_view, substack, C,row_data['view1'],row_data['view2'], default_sigma=args.sigma,size=args.size_patch, save_tiff_files=False ,find_negative=args.negatives)


        X_original = np.vstack((X_original, temp_X_sup))
        X_original = np.vstack((X_original, temp_X_neg))
        if args.local_standardization:
            print('Do local standardization')
            Xmean = np.vstack((temp_X_sup,temp_X_neg)).mean(axis=0)
            Xstd = np.vstack((temp_X_sup,temp_X_neg)).std(axis=0)
            temp_X_sup = (temp_X_sup - Xmean) / Xstd
            temp_X_neg = (temp_X_neg - Xmean) / Xstd

        X_sup = np.vstack((X_sup, temp_X_sup))
        y_sup = np.vstack((y_sup, temp_y_sup))
        X_neg = np.vstack((X_neg, temp_X_neg))
        y_neg = np.vstack((y_neg, temp_y_neg))


    print('Negative Data set shape:', X_neg.shape, 'size:', X_neg.nbytes / (1024 * 1024), 'MBytes')
    print('Negative target shape:', y_neg.shape, 'size:', y_neg.nbytes / (1024 * 1024), 'MBytes')
    print('Positive Data set shape:', X_sup.shape, 'size:', X_sup.nbytes / (1024 * 1024), 'MBytes')
    print('Positive target shape:', y_sup.shape, 'size:', y_sup.nbytes / (1024 * 1024), 'MBytes')

    ratio_positive_negative = float(y_sup.sum())/float(y_neg.shape[0]*y_neg.shape[1] + y_sup.shape[0]*y_sup.shape[1])
    print('ratio positive-negative:', ratio_positive_negative)

    X = np.vstack((X_sup,X_neg))
    y = np.vstack((y_sup,y_neg))

    print('Total Data set shape:', X.shape, 'size:', X.nbytes / (1024 * 1024), 'MBytes')
    print('Total target shape:', y.shape, 'size:', y.nbytes / (1024 * 1024), 'MBytes')

    print('Compute global mean and std')
    Xmean = X_original.mean(axis=0)
    Xstd = X_original.std(axis=0)

    print('Saving training data to', args.outfile)
    h5file = tables.openFile(args.outfile, mode='w', title="Training set")
    root = h5file.root
    h5file.createArray(root, "X", X)
    h5file.createArray(root, "y", y)
    h5file.createArray(root, "Xmean", Xmean)
    h5file.createArray(root, "Xstd", Xstd)
    h5file.close()