def stabilization(data, m_hat, sigma, N, mask=None, clip_eta=True, return_eta=False, n_cores=None, mp_method=None):

    data = np.asarray(data)
    m_hat = np.asarray(m_hat)
    sigma = np.atleast_3d(sigma)
    N = np.atleast_3d(N)

    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=np.bool)
    else:
        mask = np.asarray(mask, dtype=np.bool)

    if N.ndim < data.ndim:
        N = np.broadcast_to(N[..., None], data.shape)

    if sigma.ndim == (data.ndim - 1):
        sigma = np.broadcast_to(sigma[..., None], data.shape)

    # Check all dims are ok
    if (data.shape != sigma.shape):
        raise ValueError('data shape {} is not compatible with sigma shape {}'.format(data.shape, sigma.shape))

    if (data.shape[:-1] != mask.shape):
        raise ValueError('data shape {} is not compatible with mask shape {}'.format(data.shape, mask.shape))

    if (data.shape != m_hat.shape):
        raise ValueError('data shape {} is not compatible with m_hat shape {}'.format(data.shape, m_hat.shape))

    arglist = ((data[..., idx, :],
                m_hat[..., idx, :],
                mask[..., idx],
                sigma[..., idx, :],
                N[..., idx, :],
                clip_eta)
               for idx in range(data.shape[-2]))

    parallel_stabilization = multiprocesser(multiprocess_stabilization, n_cores=n_cores, mp_method=mp_method)
    output = parallel_stabilization(arglist)

    data_stabilized = np.zeros_like(data, dtype=np.float32)
    eta = np.zeros_like(data, dtype=np.float32)

    for idx, content in enumerate(output):
        data_stabilized[..., idx, :] = content[0]
        eta[..., idx, :] = content[1]

    if return_eta:
        return data_stabilized, eta
    return data_stabilized
Exemple #2
0
def local_standard_deviation(arr, n_cores=None, mp_method=None):
    """Standard deviation estimation from local patches.

    The noise field is estimated by subtracting the data from it's low pass
    filtered version, from which we then compute the variance on a local
    neighborhood basis.

    Parameters
    ----------
    arr : 3D or 4D ndarray
        The array to be estimated

    n_cores : int
        Number of cores to use for multiprocessing, default : all of them

    Returns
    -------
    sigma : ndarray
        Map of standard deviation of the noise.
    """

    # No multiprocessing for 3D array since we smooth on each separate volume
    if arr.ndim == 3:
        sigma = _local_standard_deviation(arr)
    else:
        list_arr = [[arr[..., i]] for i in range(arr.shape[-1])]
        parallel_local_standard_deviation = multiprocesser(
            _local_standard_deviation, n_cores=n_cores, mp_method=mp_method)
        result = parallel_local_standard_deviation(list_arr)

        # Reshape the multiprocessed list as an array
        result = np.rollaxis(np.asarray(result), 0, arr.ndim)
        sigma = np.median(result, axis=-1)

    # http://en.wikipedia.org/wiki/Full_width_at_half_maximum
    # This defines a normal distribution similar to specifying the variance.
    full_width_at_half_max = 10
    blur = full_width_at_half_max / np.sqrt(8 * np.log(2))

    return gaussian_filter(sigma, blur, mode='reflect')
Exemple #3
0
def local_standard_deviation(arr, n_cores=None, mp_method=None):
    """Standard deviation estimation from local patches.

    The noise field is estimated by subtracting the data from it's low pass
    filtered version, from which we then compute the variance on a local
    neighborhood basis.

    Parameters
    ----------
    arr : 3D or 4D ndarray
        The array to be estimated

    n_cores : int
        Number of cores to use for multiprocessing, default : all of them

    Returns
    -------
    sigma : ndarray
        Map of standard deviation of the noise.
    """

    # No multiprocessing for 3D array since we smooth on each separate volume
    if arr.ndim == 3:
        sigma = _local_standard_deviation(arr)
    else:
        list_arr = [[arr[..., i]] for i in range(arr.shape[-1])]
        parallel_local_standard_deviation = multiprocesser(_local_standard_deviation, n_cores=n_cores, mp_method=mp_method)
        result = parallel_local_standard_deviation(list_arr)

        # Reshape the multiprocessed list as an array
        result = np.rollaxis(np.asarray(result), 0, arr.ndim)
        sigma = np.median(result, axis=-1)

    # http://en.wikipedia.org/wiki/Full_width_at_half_maximum
    # This defines a normal distribution similar to specifying the variance.
    full_width_at_half_max = 10
    blur = full_width_at_half_max / np.sqrt(8 * np.log(2))

    return gaussian_filter(sigma, blur, mode='reflect')
Exemple #4
0
def local_denoise(data,
                  block_size,
                  overlap,
                  variance,
                  n_iter=10,
                  mask=None,
                  dtype=np.float64,
                  n_cores=None,
                  use_threading=False,
                  verbose=False,
                  mp_method=None):
    if verbose:
        logger.setLevel(logging.INFO)

    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=np.bool)

    # no overlapping blocks for training
    no_over = (0, 0, 0, 0)
    X = im2col_nd(data, block_size, no_over)

    # Solving for D
    param_alpha = {}
    param_alpha['pos'] = True
    param_alpha['mode'] = 1

    param_D = {}
    param_D['verbose'] = False
    param_D['posAlpha'] = True
    param_D['posD'] = True
    param_D['mode'] = 2
    param_D['lambda1'] = 1.2 / np.sqrt(np.prod(block_size))
    param_D['K'] = int(2 * np.prod(block_size))
    param_D['iter'] = 150
    param_D['batchsize'] = 500
    param_D['numThreads'] = n_cores

    if 'D' in param_alpha:
        param_D['D'] = param_alpha['D']

    mask_col = im2col_nd(np.broadcast_to(mask[..., None], data.shape),
                         block_size, no_over)
    train_idx = np.sum(mask_col, axis=0) > (mask_col.shape[0] / 2.)

    train_data = X[:, train_idx]
    train_data = np.asfortranarray(train_data[:,
                                              np.any(train_data != 0, axis=0)],
                                   dtype=dtype)
    train_data /= np.sqrt(np.sum(train_data**2, axis=0, keepdims=True),
                          dtype=dtype)

    param_alpha['D'] = spams.trainDL(train_data, **param_D)
    param_alpha['D'] /= np.sqrt(
        np.sum(param_alpha['D']**2, axis=0, keepdims=True, dtype=dtype))
    param_D['D'] = param_alpha['D']

    del train_data, X, mask_col

    if use_threading or (n_cores == 1):
        param_alpha['numThreads'] = n_cores
        param_D['numThreads'] = n_cores
    else:
        param_alpha['numThreads'] = 1
        param_D['numThreads'] = 1

    arglist = ((data[:, :, k:k + block_size[2]], mask[:, :,
                                                      k:k + block_size[2]],
                variance[:, :, k:k + block_size[2]], block_size, overlap,
                param_alpha, param_D, dtype, n_iter)
               for k in range(data.shape[2] - block_size[2] + 1))

    if use_threading:
        data_denoised = starmap(processer, arglist)
    else:
        time_multi = time()
        parallel_processer = multiprocesser(processer,
                                            n_cores=n_cores,
                                            mp_method=mp_method)
        data_denoised = parallel_processer(arglist)
        logger.info('Multiprocessing done in {0:.2f} mins.'.format(
            (time() - time_multi) / 60.))

    # Put together the multiprocessed results
    data_subset = np.zeros_like(data, dtype=np.float32)
    divider = np.zeros_like(data, dtype=np.int16)

    for k, content in enumerate(data_denoised):
        data_subset[:, :, k:k + block_size[2]] += content
        divider[:, :, k:k + block_size[2]] += 1

    data_subset /= divider
    return data_subset