Exemplo n.º 1
0
def test_save_load(tmpdir):
    import os
    print(f'tempdir = {tmpdir}')
    print(f'{os.path.exists(tmpdir)}')
    arr_in = zeros((10, 5))
    str_in = 'this is string'
    dict_in = {'1': 1, '2': 2}
    list_in = [1, 2, 3, 4]
    compound_in = {}
    compound_in['arr'] = arr_in
    compound_in['str'] = str_in
    compound_in['dict'] = dict_in
    compound_in['list'] = list_in

    # save to a file
    save_to_file(tmpdir + '/arr.pkl', arr_in)
    save_to_file(tmpdir + '/str.pkl', str_in)
    save_to_file(tmpdir + '/dict.pkl', dict_in)
    save_to_file(tmpdir + '/list.pkl', list_in)
    save_to_file(tmpdir + '/compound.pkl', compound_in)

    # Load to a file
    arr_out = load_from_file(tmpdir + '/arr.pkl')
    str_out = load_from_file(tmpdir + '/str.pkl')
    dict_out = load_from_file(tmpdir + '/dict.pkl')
    list_out = load_from_file(tmpdir + '/list.pkl')
    compound_out = load_from_file(tmpdir + '/compound.pkl')

    #run tests
    assert array_equal(arr_in, arr_out)
    assert str_in == str_out
    assert dict_in == dict_out
    assert list_in == list_out
    assert compound_in.keys() == compound_out.keys()
Exemplo n.º 2
0
def plot_2(buffer, N=10, background=None, root='', show=False, comments=''):
    from numpy import concatenate
    arr = []
    for i in range(N):
        if background is not None:
            arr.append((buffer.buffer[i, :, :, :].sum(axis=0)).T -
                       background.sum(axis=0).T)
        else:
            arr.append((buffer.buffer[i, :, :, :].sum(axis=0)).T)
    data = concatenate(arr)
    fig = plt.figure(figsize=(10, 20))
    plt.imshow(-data, aspect='auto', vmin=0, vmax=100)
    from tempfile import gettempdir
    root = "Y:/trash/microfluidics"  #gettempdir()
    suffix = '.png'
    filename = os.path.join(root, comments + suffix)
    plt.savefig(filename)
    from ubcs_auxiliary.save_load_object import save_to_file
    filename = os.path.join(root, comments + '.npy')
    dic = {}
    dic['buffer'] = buffer.buffer
    dic['comments'] = comments
    dic['background'] = background
    dic['image'] = -data
    save_to_file(filename, dic)
    if show:
        plt.show()
    else:
        plt.close(fig)
Exemplo n.º 3
0
def stats_from_chunk(reference, sigma=6):
    """Returns mean, var, and threshold (in counts) for reference. The mean
    and var are calculated after omitting the largest and smallest values
    found for each pixel, which assumes few particles in the laser
    sheet. The threshold statistic corresponds to the specified sigma level.
    Adding 0.5 to var helps compensate for digitization error so that false
    positives in the light sheet approximately match that outside the light
    sheet. The threshold for pixels defined by 'mask' are reset to 4095, which
    ensures they won't contribute to hits."""
    from lcp_video.procedures.analysis_functions import poisson_array, dm16_mask, images_from_file, save_to_file
    from numpy import sqrt, ceil, cast, array, zeros_like
    from time import time
    from os import path
    t0 = time()
    if '.data.' in reference:
        stats_name = reference.replace('.data.hdf5', '.stats.pkl')
    if '.raw.' in reference:
        stats_name = reference.replace('.raw.hdf5', '.stats.pkl')
    if path.exists(stats_name):
        stats = load_from_file(stats_name)
        median = stats['median']
        mean = stats['mean']
        var = stats['var']
        threshold = stats['threshold']
    else:
        print('Processing {} ... please wait'.format(reference))
        # Load images and sort in place to minmimze memory footprint
        images = images_from_file(reference)
        images.sort(axis=0)
        mask = zeros_like(images[0])
        if 'dm16' in reference: mask = dm16_mask()
        # Compute median, then mean and var after omitting smallest and largest values.
        N = len(images)
        M = int(N / 2)
        median = images[M]
        mean = images[1:-1].mean(axis=0, dtype='float32')
        var = images[1:-1].var(axis=0, dtype='float32')
        # Compute std_ratio; used to rescale stdev.
        std_ratio = []
        for i in range(10000):
            dist = poisson_array(3, N, sort=True)
            std_ratio.append(dist.std() / dist[1:-1].std())
        std_ratio_mean = array(std_ratio).mean()
        # Compute threshold to nearest larger integer; recast as int16.
        threshold = ceil(mean + sigma * std_ratio_mean * sqrt(var + 0.5))
        threshold = cast['int16'](threshold) - median
        threshold[mask] = 4095
        save_to_file(stats_name, {
            'median': median,
            'mean': mean,
            'var': var,
            'threshold': threshold
        })
    print('time to execute stats_from_chunk [s]: {:0.1f}'.format(time() - t0))
    return median, mean, var, threshold
Exemplo n.º 4
0
def func(filename):
    import numpy as np
    from h5py import File
    f = File(filename)
    data = f['images'][()]
    sorted = np.sort(data, axis=0)
    from ubcs_auxiliary import save_load_object
    from os import path
    head, tail = path.split(filename)
    dst_name = tail.split('.')[0]
    dic = {}
    dic['imax'] = sorted[254:]
    dic['mean'] = np.mean(sorted[2:-2], axis=0)
    dic['std'] = np.std(sorted[2:-2], axis=0)
    dic['imin'] = sorted[:2]
    save_load_object.save_to_file(path.join(head, dst_name + '.dict.npy'), dic)
Exemplo n.º 5
0
def zfs_dark_data_hdf5(filename, clip):
    from h5py import File
    from numpy import ndarray
    from lcp_video.analysis import zinger_free_statistics
    with File(filename, 'r') as f:
        dic = zinger_free_statistics(f['images'])
    from ubcs_auxiliary.save_load_object import save_to_file
    save_to_file(filename.split('.hdf5')[0] + '.zfs', dic)

    filename_zfs = filename.split('.hdf5')[0] + '.zfs.hdf5'
    with File(filename_zfs, 'a') as f_new:
        for key in list(dic.keys()):
            data = dic[key]
            if type(data) is ndarray:
                f_new.create_dataset(key, data=data, compression='gzip')
            else:
                f_new.create_dataset(key, data=data)
def process_threshold_chunk(filename):
    """

    """

    import os
    from h5py import File
    from numpy import argsort, array, zeros, empty

    f_reference = File(filename, 'r')
    raw = f_reference['images'][()]
    pixel_format = f_reference['pixel format'][()]
    width = f_reference['image width'][()]
    height = f_reference['image height'][()]

    images = convert_raw_to_images(raw, pixel_format, length, height, width)
    stats = stats_from_data(images)

    from ubcs_auxiliary.save_load_object import save_to_file
    stats_filename = filename.replace(f'_0.raw.hdf5', '.stats.pkl')
    save_to_file(stats_filename, stats)
Exemplo n.º 7
0
def zfs_light_data_hdf5(dark_zfs_filename, light_filename, clip=2):
    """
    """
    from lcp_video.analysis import zinger_free_statistics
    from ubcs_auxiliary.save_load_object import load_from_file, save_to_file
    from numpy import ndarray
    from h5py import File
    print(f'Getting DarkData from {dark_zfs_filename}')
    print(f'Analysing LightData from {light_filename}')
    filename = light_filename.split(',')[0] + '.light_zfs'
    print(f'ZFS LightData will be saved to {filename}')
    dmean = load_from_file(dark_zfs_filename)['M1']
    with File(light_filename, 'r') as f:
        dic = zinger_free_statistics(f['images'], Dmean=dmean, clip=clip)
        save_to_file(filename, dic)
    filename_zfs = filename.split('.hdf5')[0] + '.light_zfs.hdf5'
    with File(filename_zfs, 'a') as f_new:
        for key in list(dic.keys()):
            data = dic[key]
            if type(data) is ndarray:
                f_new.create_dataset(key, data=data, compression='gzip')
            else:
                f_new.create_dataset(key, data=data)
    print(f'analysis of {light_filename} is done')