def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    substack = SubStack(args.indir,args.substack_id)
    np_tensor_3d, minz = imtensor.load_nearby(args.tensorimage, substack, args.extramargin)

    if not args.local_mean_std:
        print('Reading standardization data from', args.trainfile)
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None
    
    print('Starting semantic devonvolution of volume', args.substack_id)
    model = pickle.load(open(args.model))
    minz = int(re.split('[a-zA-z0-9]*_',substack.info['Files'][0])[1].split('.tif')[0])
    reconstruction = deconvolver.filter_volume(np_tensor_3d, Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython, trainfile=args.trainfile)

    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)

    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
예제 #2
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...' % (args.substack_id))

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(
        ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(
        ss.info['Files'][0].split("/")[-1].split('_')[0:-1]) + '_'

    np_tensor_3d_first_view, _ = imtensor.load_nearby(
        args.tensorimage_first_view, ss, args.extramargin)
    if args.transformation_file is not None:
        R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(
            args.second_view_dir,
            args.tensorimage_second_view,
            args.substack_id,
            R,
            t,
            args.extramargin,
            invert=True)
    else:
        np_tensor_3d_second_view, _ = imtensor.load_nearby(
            args.tensorimage_second_view, ss, args.extramargin)

    print('Loading model...')
    model = pickle.load(open(args.model))

    if not args.local_mean_std:
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean = None
        Xstd = None

    reconstruction = deconvolver.filter_volume(
        [np_tensor_3d_first_view, np_tensor_3d_second_view],
        Xmean,
        Xstd,
        args.extramargin,
        model,
        args.speedup,
        do_cython=args.do_cython,
        trainfile=args.trainfile)

    pair_id = basename(args.first_view_dir) + '_' + basename(
        args.second_view_dir)
    outdir = args.outdir + '/' + args.substack_id + '/' + pair_id

    imtensor.save_tensor_as_tif(reconstruction, outdir, minz, prefix='slice_')

    print("total time reconstruction: %s" %
          (str(timeit.default_timer() - total_start)))
def main(args):
    substack = SubStack(args.indir,args.substack_id)
    np_tensor_3d, minz = imtensor.load_nearby(args.tensorimage, substack, args.extramargin)

    # Standardize volume according to mean and std found in the training set
    print('Reading standardization data from', args.trainfile)
    h5=tables.openFile(args.trainfile)
    Xmean = h5.root.Xmean[:]
    Xstd = h5.root.Xstd[:]
    h5.close()
    print('Starting semantic devonvolution of volume', args.substack_id)
    model = pickle.load(open(args.model))
    minz = int(substack.info['Files'][0].split('full_')[1].split('.tif')[0])
    reconstruction = deconvolver.filter_volume(np_tensor_3d, Xmean, Xstd,
                                               args.extramargin, model, args.speedup)
    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    ss = SubStack(args.first_view_dir, args.substack_id)
    minz = int(ss.info['Files'][0].split("/")[-1].split('_')[-1].split('.tif')[0])
    prefix = '_'.join(ss.info['Files'][0].split("/")[-1].split('_')[0:-1])+'_'

    np_tensor_3d_first_view,_  = imtensor.load_nearby(args.tensorimage_first_view, ss, args.extramargin)
    if args.transformation_file is not None:
	R, t = parse_transformation_file(args.transformation_file)
        np_tensor_3d_second_view = transform_substack(args.second_view_dir, args.tensorimage_second_view, args.substack_id, R, t, args.extramargin, invert=True)
    else:
        np_tensor_3d_second_view,_  = imtensor.load_nearby(args.tensorimage_second_view, ss, args.extramargin)

    print('Loading model...')
    model = pickle.load(open(args.model))
    
    if not args.local_mean_std:
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None

    reconstruction = deconvolver.filter_volume([np_tensor_3d_first_view,np_tensor_3d_second_view], Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython,trainfile=args.trainfile)

    pair_id = basename(args.first_view_dir)+ '_' +basename(args.second_view_dir)
    outdir=args.outdir+'/'+args.substack_id+'/'+pair_id

    imtensor.save_tensor_as_tif(reconstruction, outdir, minz, prefix='slice_')


    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
예제 #5
0
def main(args):

    total_start = timeit.default_timer()
    print('Starting reconstruction of volume %s ...'%(args.substack_id))

    substack = SubStack(args.indir,args.substack_id)
    substack.load_volume()
    tensor = substack.get_volume()

    # Changing the tensor so that it has a 6 pixel black padding. Hopefully it won't frick things up too much. Else, mail time.
    print("The shape of the tensor before padding: " + str(np.shape(tensor)))
    tensor = pad(tensor, 6)
    print("The shape of the tensor after padding: " + str(np.shape(tensor)))

    if not args.local_mean_std:
        print('Reading standardization data from', args.trainfile)
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()
    else:
        Xmean=None
        Xstd=None
    
    print('Starting semantic devonvolution of volume', args.substack_id)
    # Importing here to have a clean --help
    from keras.models import model_from_json
    model = model_from_json(open(args.model + '/architecture.json').read())
    model.load_weights(args.model + '/weights.h5')
    
    minz = int(re.split('[a-zA-z0-9]*_',substack.info['Files'][0])[1].split('.tif')[0])
    # Remove the margin, I have changed deconvolver to use a fized number instead of the extramargin. Hope it works.
    reconstruction = deconvolver.filter_volume(tensor, Xmean, Xstd,
                                               args.extramargin, model, args.speedup, do_cython=args.do_cython, trainfile=args.trainfile)
    imtensor.save_tensor_as_tif(reconstruction, args.outdir+'/'+args.substack_id, minz)

    print ("total time reconstruction: %s" %(str(timeit.default_timer() - total_start)))
예제 #6
0
def main(args):
    # The data for the substack proper are readable from the info.json (or .plist)
    z = args.z
    dz = args.dz
    maxX = 3662
    maxY = 8249
    maxZ = 3646

    start_timer = timeit.default_timer()
    print('Extracting subtensor')

    if (z + dz > maxZ):
        print("Z coordinate is out of bound for target file! Exiting.")
        return

    big_tensor = extract_level(args.infile, z, dz + args.extramargin, args.fps)

    # Now for an interactive loop to help catch volumes
    quit = False
    while (quit == False):

        #x, y, dx, dy = getCoords()
        x, y, dx, dy = 964, 256, 275, 239

        if (x + dx > maxX or y + dy > maxY):
            print("Coordinates are out of bound for target file! Exiting.")
            return

        np_tensor_3d = big_tensor[:, y:y + dy + args.extramargin,
                                  x:x + dx + args.extramargin]

        print('Reading standardization data from ' + args.trainfile)
        h5 = tables.openFile(args.trainfile)
        Xmean = h5.root.Xmean[:].astype(np.float32)
        Xstd = h5.root.Xstd[:].astype(np.float32)
        h5.close()

        extract_timer = timeit.default_timer()
        print("Extraction time: " + str(extract_timer - start_timer))

        print('Starting semantic devonvolution of a (' +
              str(dx + args.extramargin) + ',' + str(dy + args.extramargin) +
              ',' + str(dz + args.extramargin) + ') volume starting at (' +
              str(x) + ',' + str(y) + ',' + str(z) + ')')
        # Importing here to have a clean --help
        from keras.models import model_from_json

        net_model = model_from_json(
            open(args.model + '/architecture.json').read())
        net_model.load_weights(args.model + '/weights.h5')

        reconstruction = deconvolver.filter_volume(pad(np_tensor_3d, 6),
                                                   Xmean,
                                                   Xstd,
                                                   args.extramargin,
                                                   net_model,
                                                   4,
                                                   do_cython=args.do_cython,
                                                   trainfile=args.trainfile)

        reconstruct_timer = timeit.default_timer()
        print("Reconstruction time: " + str(reconstruct_timer - extract_timer))

        volume = simpleSubStack.simpleSubStack()
        volume.load_volume(reconstruction)
        # save_image requires the full SubStack item, not my rough copy
        args.save_image = False
        # Without changing mscd.ms, right now, there is no way of controlling the file it goes to write.
        # at each iteration it would overwrite the old ms.marker
        mscd.ms(volume, args)
        meanshift_timer = timeit.default_timer()
        print("Mean Shift time: " + str(meanshift_timer - reconstruct_timer))
        print("Total time: " + str(meanshift_timer - start_timer))

        cont = raw_input(
            "Do you want to get another volume in the same z-dz range? [y]/n: "
        )
        if (cont == 'n' or cont == 'no'):
            quit = True