Ejemplo n.º 1
0
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None):
    check_shape_equality(image_true, image_test)

    if data_range is None:
        if image_true.dtype != image_test.dtype:
            warn(
                "Inputs have mismatched dtype.  Setting data_range based on "
                "im_true.",
                stacklevel=2)
        dmin, dmax = dtype_range[image_true.dtype.type]
        true_min, true_max = np.min(image_true), np.max(image_true)
        if true_max > dmax or true_min < dmin:
            raise ValueError(
                "im_true has intensity values outside the range expected for "
                "its data type.  Please manually specify the data_range")
        if true_min >= 0:
            # most common case (255 for uint8, 1 for float)
            data_range = dmax
        else:
            data_range = dmax - dmin

    image_true, image_test = _as_floats(image_true, image_test)

    err = mean_squared_error(image_true, image_test)
    return 10 * np.log10((data_range**2) / err)
Ejemplo n.º 2
0
def peak_signal_noise_ratio(image_true, image_test, *, data_range=None):
    """
    Compute the peak signal to noise ratio (PSNR) for an image.

    Parameters
    ----------
    image_true : ndarray
        Ground-truth image, same shape as im_test.
    image_test : ndarray
        Test image.
    data_range : int, optional
        The data range of the input image (distance between minimum and
        maximum possible values).  By default, this is estimated from the image
        data-type.

    Returns
    -------
    psnr : float
        The PSNR metric.

    Notes
    -----
    .. versionchanged:: 0.16
        This function was renamed from ``skimage.measure.compare_psnr`` to
        ``skimage.metrics.peak_signal_noise_ratio``.

    References
    ----------
    .. [1] https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio

    """
    check_shape_equality(image_true, image_test)

    if data_range is None:
        if image_true.dtype != image_test.dtype:
            warn(
                "Inputs have mismatched dtype.  Setting data_range based on "
                "im_true.",
                stacklevel=2,
            )
        dmin, dmax = dtype_range[image_true.dtype.type]
        true_min, true_max = cp.min(image_true), cp.max(image_true)
        if true_max > dmax or true_min < dmin:
            raise ValueError(
                "im_true has intensity values outside the range expected for "
                "its data type.  Please manually specify the data_range")
        if true_min >= 0:
            # most common case (255 for uint8, 1 for float)
            data_range = dmax
        else:
            data_range = dmax - dmin

    image_true, image_test = _as_floats(image_true, image_test)

    err = mean_squared_error(image_true, image_test)
    return 10 * cp.log10((data_range * data_range) / err)
Ejemplo n.º 3
0
def load_parec_data(subject='AH'):
    #loads the parec data of the selected subject and returns an array with the arrays for M, P, S, #timesteps, #slices in dataset
    cwd = os.getcwd()
    #choose subject, default if no argument passed = AH

    if subject == 'AH':
        basepath = cwd + '/../ibt_4dFlow/AH/'
        path_s = basepath + 'an_27052015_1027340_4_2_wipqflow_fbclearV4_S.rec'
        path_m = basepath + 'an_27052015_1027340_4_2_wipqflow_fbclearV4_M.rec'
        path_p = basepath + 'an_27052015_1027340_4_2_wipqflow_fbclearV4_P.rec'

    elif subject == 'CB':
        basepath = cwd + '/../ibt_4dFlow/CB/'
        path_s = basepath + 'ch_11122015_1428290_4_2_wipqflow_fb_experiment1V4_S.rec'
        path_m = basepath + 'ch_11122015_1428290_4_2_wipqflow_fb_experiment1V4_M.rec'
        path_p = basepath + 'ch_11122015_1428290_4_2_wipqflow_fb_experiment1V4_P.rec'

    elif subject == 'DG':
        basepath = cwd + '/../ibt_4dFlow/DG/'
        path_s = basepath + 'da_15072015_1612350_3_2_wipqflow_fbclearV4_S.rec'
        path_m = basepath + 'da_15072015_1612350_3_2_wipqflow_fbclearV4_M.rec'
        path_p = basepath + 'da_15072015_1612350_3_2_wipqflow_fbclearV4_P.rec'

    elif subject == 'JR':
        basepath = cwd + '/../ibt_4dFlow/JR/'
        path_s = basepath + 'ju_27052015_1208240_5_1_wipqflow_fbclearV42.rec'
        path_m = basepath + 'ju_27052015_1142050_4_2_wipqflow_fbclearV4_M.rec'
        path_p = basepath + 'ju_27052015_1142050_4_2_wipqflow_fbclearV4_P.rec'

    elif subject == 'LT':
        basepath = cwd + '/../ibt_4dFlow/LT/'
        path_s = basepath + 'lo_27112015_1256300_2_2_wipqflow_fb_experiment1V4_S.rec'
        path_m = basepath + 'lo_27112015_1256300_2_2_wipqflow_fb_experiment1V4_M.rec'
        path_p = basepath + 'lo_27112015_1256300_2_2_wipqflow_fb_experiment1V4_P.rec'

    #if wrong input is given, break function and return the warning in console
    else:
        warn('Invalid subject')

    #load the data into arrays
    data_s = nib.parrec.load(path_s).get_data()
    data_m = nib.parrec.load(path_m).get_data()
    data_p = nib.parrec.load(path_p).get_data()

    #calculate the numer of timesteps and slices, note that the timesteps have to be divided by 2 as we have a magnitude and phase image for each time step
    num_times = int(data_s.shape[3] / 2)
    num_slices = int(data_s.shape[2])

    #return the desired vector of the loaded data
    parec_data = [data_m, data_p, data_s, num_times, num_slices]

    return parec_data
Ejemplo n.º 4
0
def compare_psnr(im_true, im_test, data_range=None, dynamic_range=None):
    """ Compute the peak signal to noise ratio (PSNR) for an image.
    Parameters
    ----------
    im_true : ndarray
        Ground-truth image.
    im_test : ndarray
        Test image.
    data_range : int
        The data range of the input image (distance between minimum and
        maximum possible values).  By default, this is estimated from the image
        data-type.
    Returns
    -------
    psnr : float
        The PSNR metric.

    """
    _assert_compatible(im_true, im_test)
    if dynamic_range is not None:
        warn(
            '`dynamic_range` has been deprecated in favor of '
            '`data_range`. The `dynamic_range` keyword argument '
            'will be removed in v0.14', skimage_deprecation)
        data_range = dynamic_range

    if data_range is None:
        dmin, dmax = dtype_range[im_true.dtype.type]
        true_min, true_max = np.min(im_true), np.max(im_true)
        if true_max > dmax or true_min < dmin:
            raise ValueError(
                "im_true has intensity values outside the range expected for "
                "its data type.  Please manually specify the data_range")
        if true_min >= 0:
            # most common case (255 for uint8, 1 for float)
            data_range = dmax
        else:
            data_range = dmax - dmin

    im_true, im_test = _as_floats(im_true, im_test)

    err = compare_mse(im_true, im_test)
    return 10 * np.log10((data_range**2) / err)
Ejemplo n.º 5
0
def estimate_sigma(image, average_sigmas=False, multichannel=False):
    """
    Robust wavelet-based estimator of the (Gaussian) noise standard deviation.
    Parameters
    ----------
    image : ndarray
        Image for which to estimate the noise standard deviation.
    average_sigmas : bool, optional
        If true, average the channel estimates of `sigma`.  Otherwise return
        a list of sigmas corresponding to each channel.
    multichannel : bool
        Estimate sigma separately for each channel.
    Returns
    -------
    sigma : float or list
        Estimated noise standard deviation(s).  If `multichannel` is True and
        `average_sigmas` is False, a separate noise estimate for each channel
        is returned.  Otherwise, the average of the individual channel
        estimates is returned.
    """
    if multichannel:
        nchannels = image.shape[-1]
        sigmas = [
            estimate_sigma(image[..., c], multichannel=False)
            for c in range(nchannels)
        ]
        if average_sigmas:
            sigmas = np.mean(sigmas)
        return sigmas
    elif image.shape[-1] <= 4:
        msg = ("image is size {0} on the last axis, but multichannel is "
               "False.  If this is a color image, please set multichannel "
               "to True for proper noise estimation.")
        warn(msg.format(image.shape[-1]))
    coeffs = pywt.dwtn(image, wavelet='db2')
    detail_coeffs = coeffs['d' * image.ndim]
    return _sigma_est_dwt(detail_coeffs, distribution='Gaussian')
Ejemplo n.º 6
0
def random_walker(data,
                  labels,
                  mode='bf',
                  tol=1.e-3,
                  copy=True,
                  return_full_prob=False,
                  spacing=None,
                  alpha=0.3,
                  beta=0.3,
                  gamma=0.4,
                  a=130.0,
                  b=10.0,
                  c=800.0):
    """Random walker algorithm for segmentation from markers.
    Random walker algorithm is implemented for gray-level or multichannel
    images.
    Parameters
    ----------
    data : array_like
        Image to be segmented in phases. Gray-level `data` can be two- or
        three-dimensional; multichannel data can be three- or four-
        dimensional (multichannel=True) with the highest dimension denoting
        channels. Data spacing is assumed isotropic unless the `spacing`
        keyword argument is used.
    labels : array of ints, of same shape as `data` without channels dimension
        Array of seed markers labeled with different positive integers
        for different phases. Zero-labeled pixels are unlabeled pixels.
        Negative labels correspond to inactive pixels that are not taken
        into account (they are removed from the graph). If labels are not
        consecutive integers, the labels array will be transformed so that
        labels are consecutive. In the multichannel case, `labels` should have
        the same shape as a single channel of `data`, i.e. without the final
        dimension denoting channels.
    beta : float
        Penalization coefficient for the random walker motion
        (the greater `beta`, the more difficult the diffusion).
    mode : string, available options {'cg_mg', 'cg', 'bf'}
        Mode for solving the linear system in the random walker algorithm.
        If no preference given, automatically attempt to use the fastest
        option available ('cg_mg' from pyamg >> 'cg' with UMFPACK > 'bf').
        - 'bf' (brute force): an LU factorization of the Laplacian is
          computed. This is fast for small images (<1024x1024), but very slow
          and memory-intensive for large images (e.g., 3-D volumes).
        - 'cg' (conjugate gradient): the linear system is solved iteratively
          using the Conjugate Gradient method from scipy.sparse.linalg. This is
          less memory-consuming than the brute force method for large images,
          but it is quite slow.
        - 'cg_mg' (conjugate gradient with multigrid preconditioner): a
          preconditioner is computed using a multigrid solver, then the
          solution is computed with the Conjugate Gradient method.  This mode
          requires that the pyamg module (http://pyamg.org/) is
          installed. For images of size > 512x512, this is the recommended
          (fastest) mode.
    tol : float
        tolerance to achieve when solving the linear system, in
        cg' and 'cg_mg' modes.
    copy : bool
        If copy is False, the `labels` array will be overwritten with
        the result of the segmentation. Use copy=False if you want to
        save on memory.
    multichannel : bool, default False
        If True, input data is parsed as multichannel data (see 'data' above
        for proper input format in this case)
    return_full_prob : bool, default False
        If True, the probability that a pixel belongs to each of the labels
        will be returned, instead of only the most likely label.
    spacing : iterable of floats
        Spacing between voxels in each spatial dimension. If `None`, then
        the spacing between pixels/voxels in each dimension is assumed 1.
    Returns
    -------
    output : ndarray
        * If `return_full_prob` is False, array of ints of same shape as
          `data`, in which each pixel has been labeled according to the marker
          that reached the pixel first by anisotropic diffusion.
        * If `return_full_prob` is True, array of floats of shape
          `(nlabels, data.shape)`. `output[label_nb, i, j]` is the probability
          that label `label_nb` reaches the pixel `(i, j)` first.
    See also
    --------
    skimage.morphology.watershed: watershed segmentation
        A segmentation algorithm based on mathematical morphology
        and "flooding" of regions from markers.
    Notes
    -----
    Multichannel inputs are scaled with all channel data combined. Ensure all
    channels are separately normalized prior to running this algorithm.
    The `spacing` argument is specifically for anisotropic datasets, where
    data points are spaced differently in one or more spatial dimensions.
    Anisotropic data is commonly encountered in medical imaging.
    The algorithm was first proposed in *Random walks for image
    segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell.
    2006 Nov;28(11):1768-83.
    The algorithm solves the diffusion equation at infinite times for
    sources placed on markers of each phase in turn. A pixel is labeled with
    the phase that has the greatest probability to diffuse first to the pixel.
    The diffusion equation is solved by minimizing x.T L x for each phase,
    where L is the Laplacian of the weighted graph of the image, and x is
    the probability that a marker of the given phase arrives first at a pixel
    by diffusion (x=1 on markers of the phase, x=0 on the other markers, and
    the other coefficients are looked for). Each pixel is attributed the label
    for which it has a maximal value of x. The Laplacian L of the image
    is defined as:
       - L_ii = d_i, the number of neighbors of pixel i (the degree of i)
       - L_ij = -w_ij if i and j are adjacent pixels
    The weight w_ij is a decreasing function of the norm of the local gradient.
    This ensures that diffusion is easier between pixels of similar values.
    When the Laplacian is decomposed into blocks of marked and unmarked
    pixels::
        L = M B.T
            B A
    with first indices corresponding to marked pixels, and then to unmarked
    pixels, minimizing x.T L x for one phase amount to solving::
        A x = - B x_m
    where x_m = 1 on markers of the given phase, and 0 on other markers.
    This linear system is solved in the algorithm using a direct method for
    small images, and an iterative method for larger images.
    Examples
    --------
    >>> np.random.seed(0)
    >>> a = np.zeros((10, 10)) + 0.2 * np.random.rand(10, 10)
    >>> a[5:8, 5:8] += 1
    >>> b = np.zeros_like(a)
    >>> b[3, 3] = 1  # Marker for first phase
    >>> b[6, 6] = 2  # Marker for second phase
    >>> random_walker(a, b)
    array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
           [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
           [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
           [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
           [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
           [1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
           [1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
           [1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
           [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
           [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=int32)
    """
    # Parse input data
    if mode is None:
        if amg_loaded:
            mode = 'cg_mg'
        elif UmfpackContext is not None:
            mode = 'cg'
        else:
            mode = 'bf'

    if (labels != 0).all():
        warn('Random walker only segments unlabeled areas, where '
             'labels == 0. No zero valued areas in labels were '
             'found. Returning provided labels.')

        if return_full_prob:
            # Find and iterate over valid labels
            unique_labels = np.unique(labels)
            unique_labels = unique_labels[unique_labels > 0]

            out_labels = np.empty(labels.shape + (len(unique_labels), ),
                                  dtype=np.bool)
            for n, i in enumerate(unique_labels):
                out_labels[..., n] = (labels == i)

        else:
            out_labels = labels
        return out_labels

    if data.ndim < 4:
        raise ValueError('Dta must have 4 ' 'dimensions.')
    dims = data[..., 0].shape  # To reshape final labeled result
    data = img_as_float(data)

    # Spacing kwarg checks
    if spacing is None:
        spacing = np.asarray((1., ) * 4)
    elif len(spacing) == len(dims):
        if len(spacing) == 2:  # Need a dummy spacing for singleton 3rd dim
            spacing = np.r_[spacing, 1.]
        else:  # Convert to array
            spacing = np.asarray(spacing)
    else:
        raise ValueError('Input argument `spacing` incorrect, should be an '
                         'iterable with one number per spatial dimension.')

    if copy:
        labels = np.copy(labels)
    label_values = np.unique(labels)

    # Reorder label values to have consecutive integers (no gaps)
    if np.any(np.diff(label_values) != 1):
        mask = labels >= 0
        labels[mask] = rank_order(labels[mask])[0].astype(labels.dtype)
    labels = labels.astype(np.int32)

    # If the array has pruned zones, be sure that no isolated pixels
    # exist between pruned zones (they could not be determined)
    if np.any(labels < 0):
        filled = ndi.binary_propagation(labels > 0, mask=labels >= 0)
        labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1
        del filled
    labels = np.atleast_3d(labels)
    if np.any(labels < 0):
        lap_sparse = _build_laplacian(data,
                                      spacing,
                                      mask=labels >= 0,
                                      alpha=alpha,
                                      beta=beta,
                                      gamma=gamma,
                                      a=a,
                                      b=b,
                                      c=c)
    else:
        lap_sparse = _build_laplacian(data,
                                      spacing,
                                      alpha=alpha,
                                      beta=beta,
                                      gamma=gamma,
                                      a=a,
                                      b=b,
                                      c=c)
    lap_sparse, B = _buildAB(lap_sparse, labels)

    # We solve the linear system
    # lap_sparse X = B
    # where X[i, j] is the probability that a marker of label i arrives
    # first at pixel j by anisotropic diffusion.
    if mode == 'cg':
        X = _solve_cg(lap_sparse,
                      B,
                      tol=tol,
                      return_full_prob=return_full_prob)
    if mode == 'cg_mg':
        if not amg_loaded:
            warn("""pyamg (http://pyamg.org/)) is needed to use
                this mode, but is not installed. The 'cg' mode will be used
                instead.""")
            X = _solve_cg(lap_sparse,
                          B,
                          tol=tol,
                          return_full_prob=return_full_prob)
        else:
            X = _solve_cg_mg(lap_sparse,
                             B,
                             tol=tol,
                             return_full_prob=return_full_prob)
    if mode == 'bf':
        X = _solve_bf(lap_sparse, B, return_full_prob=return_full_prob)

    # Clean up results
    if return_full_prob:
        labels = labels.astype(np.float)
        X = np.array([
            _clean_labels_ar(Xline, labels, copy=True).reshape(dims)
            for Xline in X
        ])

        for i in range(1, int(labels.max()) + 1):
            mask_i = np.squeeze(labels == i)
            X[:, mask_i] = 0
            X[i - 1, mask_i] = 1

    else:
        X = _clean_labels_ar(X + 1, labels).reshape(dims)
    return X
from skimage._shared.utils import warn
from .viewers import ImageViewer
from .qt import has_qt

if not has_qt:
    warn('Viewer requires Qt')
Ejemplo n.º 8
0
def create_pcmra_images(parec_data, option='no_avg', save=False, subject='AH'):
    #function creates pcmra images
    #options include the ability to save the data for later use as a .npy array in a file in the working path
    #other optiuon allows to choose between keeping the times dimension or collapsing/averaging over the time slices

    separated_arrays = create_separated_arrays(parec_data)

    vm_vec = separated_arrays[0]
    vp_vec = separated_arrays[1]
    vs_vec = separated_arrays[2]
    m_vec = separated_arrays[3]

    num_times = parec_data[3]
    num_slices = parec_data[4]

    # create PCMRA slices by summing over the square of all velocity vectors, multiplying it by the squared magnitude image,
    # summing over all times and finally normalizing
    # with 1/numer of timesteps and the sqrt
    # data_s/m/p are strucured like : [px,px,slice numer,timestep] where the timesteps alternate with the magnitude (even slices) and velocity (odd slices)
    pcmra_img = np.zeros(
        (int(vm_vec.shape[0]), int(vm_vec.shape[1]), num_slices))

    #account for the fact that we want to know the velocity magnitude and not normalize large negative flows to zero

    vs_vec_magn = np.sqrt(np.square(vs_vec))
    vm_vec_magn = np.sqrt(np.square(vm_vec))
    vp_vec_magn = np.sqrt(np.square(vp_vec))

    #normalize the vectors to 1

    vs_vec_max = vs_vec_magn.max()
    vs_vec_min = vs_vec_magn.min()
    vs_vec_norm = (vs_vec_magn - vs_vec_min) / (vs_vec_max - vs_vec_min)

    vm_vec_max = vm_vec_magn.max()
    vm_vec_min = vm_vec_magn.min()
    vm_vec_norm = (vm_vec_magn - vm_vec_min) / (vm_vec_max - vm_vec_min)

    vp_vec_max = vp_vec_magn.max()
    vp_vec_min = vp_vec_magn.min()
    vp_vec_norm = (vp_vec_magn - vp_vec_min) / (vp_vec_max - vp_vec_min)

    m_vec_max = m_vec.max()
    m_vec_min = m_vec.min()
    m_vec_norm = (m_vec - m_vec_min) / (m_vec_max - m_vec_min)

    if option == 'no_avg':
        pcmra_img = np.zeros(parec_data[2].shape)

        for s in range(num_slices):
            for x in range(vm_vec.shape[0]):
                for y in range(vm_vec.shape[1]):
                    for t in range(num_times):
                        v_squared = np.square(
                            vs_vec_norm[x, y, s, t]) + np.square(
                                vm_vec_norm[x, y, s, t]) + np.square(
                                    vp_vec_norm[x, y, s, t])
                        m_squared = np.square(m_vec_norm[x, y, s, t])
                        pcmra_img[x, y, s, t] = np.sqrt(m_squared * v_squared)

        return pcmra_img

        if save == True:
            file = open('saved_pcmra_img_noavg_' + str(subject) + ' .npy',
                        "w+")
            np.save('saved_pcmra_img_noavg_' + str(subject) + ' .npy',
                    pcmra_img)
            file.close()

    elif option == 'avg':
        pcmra_img = np.zeros(
            (parec_data[2, 0], parec_data[2, 1], parec_data[2, 2]))

        for s in range(num_slices):
            for x in range(vm_vec.shape[0]):
                for y in range(vm_vec.shape[1]):
                    v_squared = 0
                    m_squared = 0
                    for t in range(num_times):
                        v_squared += np.square(
                            vs_vec_norm[x, y, s, t]) + np.square(
                                vm_vec_norm[x, y, s, t]) + np.square(
                                    vp_vec_norm[x, y, s, t])
                        m_squared += np.square(m_vec_norm[x, y, s, t])
                        pcmra_img[x, y, s] += np.sqrt(m_squared * v_squared)

        return pcmra_img

        if save == True:
            file = open('saved_pcmra_img_avg_' + str(subject) + ' .npy', "w+")
            np.save('saved_pcmra_img_avg_' + str(subject) + ' .npy', pcmra_img)
            file.close()

    else:
        warn('Invalid options input! Valid inputs are: no_avg, avg')
Ejemplo n.º 9
0
def instance_random_shapes(image_shape,
                           max_shapes,
                           *,
                           min_shapes=1,
                           min_size=2,
                           max_size=None,
                           multichannel=True,
                           num_channels=3,
                           shape_names=None,
                           intensity_range=None,
                           allow_overlap=False,
                           num_trials=30,
                           random_state=None,
                           class_is=None,
                           fill_is=None,
                           colors=None,
                           textures=None,
                           background_texture=None):
    """
    Copied from scikit-image's `skimage.draw.random_shapes`.

    Generate an image with random shapes, labeled with bounding boxes.

    The image is populated with random shapes with random sizes, random
    locations, and random colors, with or without overlap.

    Shapes have random (row, col) starting coordinates and random sizes
    bounded by `min_size` and `max_size`. It can occur that a randomly
    generated shape will not fit the image at all. In that case, the
    algorithm will try again with new starting coordinates a certain
    number of times. However, it also means that some shapes may be
    skipped altogether. In that case, this function will generate fewer
    shapes than requested.

    Parameters
    ----------
    image_shape : tuple
        The number of rows and columns of the image to generate.
    max_shapes : int
        The maximum number of shapes to (attempt to) fit into the shape.
    min_shapes : int, optional
        The minimum number of shapes to (attempt to) fit into the shape.
    min_size : int, optional
        The minimum dimension of each shape to fit into the image.
    max_size : int, optional
        The maximum dimension of each shape to fit into the image.
    multichannel : bool, optional
        If True, the generated image has ``num_channels`` color channels,
        otherwise generates grayscale image.
    num_channels : int, optional
        Number of channels in the generated image. If 1, generate
        monochrome images, else color images with multiple
        channels. Ignored if ``multichannel`` is set to False.
    shape_names : {rectangle, circle, triangle, None} iterable of str, optional
        The name(s) of the shape(s) to generate or `None` to allow all shapes.
    intensity_range : {tuple of tuples of uint8, tuple of uint8}, optional
        The range of values to sample pixel values from. For grayscale
        images the format is (min, max). For multichannel - ((min, max),)
        if the ranges are equal across the channels, and ((min_0, max_0),
        ... (min_N, max_N)) if they differ. As the function supports
        generation of uint8 arrays only, the maximum range is (0,
        255). If None, set to (0, 254) for each channel reserving color
        of intensity = 255 for background.
    allow_overlap : bool, optional
        If `True`, allow shapes to overlap.
    num_trials : int, optional
        How often to attempt to fit a shape into the image before
        skipping it.
    seed : int, optional
        Seed to initialize the random number generator.  If `None`,
        a random seed from the operating system is used.

    Returns
    -------
    image : uint8 array
        An image with the fitted shapes.
    labels : list
        A list of labels, one per shape in the image. Each label is a
        (category, ((r0, r1), (c0, c1))) tuple specifying the category and
        bounding box coordinates of the shape.

    Examples
    --------
    >>> import skimage.draw
    >>> image, labels = skimage.draw.random_shapes((32, 32), max_shapes=3)
    >>> image # doctest: +SKIP
    array([
       [[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=uint8)
    >>> labels # doctest: +SKIP
    [('circle', ((22, 18), (25, 21))),
     ('triangle', ((5, 6), (13, 13)))]
    """

    assert class_is in ['shape', 'fill']
    assert fill_is in ['shape', 'random']

    if min_size > image_shape[0] or min_size > image_shape[1]:
        raise ValueError('Minimum dimension must be less than ncols and nrows')
    max_size = max_size or max(image_shape[0], image_shape[1])

    if not multichannel:
        num_channels = 1

    if intensity_range is None:
        intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )
    else:
        tmp = (intensity_range, ) if num_channels == 1 else intensity_range
        for intensity_pair in tmp:
            for intensity in intensity_pair:
                if not (0 <= intensity <= 255):
                    msg = 'Intensity range must lie within (0, 255) interval'
                    raise ValueError(msg)

    image_shape = (image_shape[0], image_shape[1], num_channels)
    if background_texture is None:
        image = np.full(image_shape, 255, dtype=np.uint8)
    else:
        image = background_texture.copy()
    target = np.full(image_shape, 0, dtype=np.uint8)
    filled = np.zeros(image_shape, dtype=bool)
    labels = []
    masks = []

    num_shapes = random_state.randint(min_shapes, max_shapes + 1)

    # One random color per shape, unconnected to the shape itself. This
    # allows one to test a model's ability to segment the shapes with
    # respect to shape only, irrespective of color.
    if colors is None and textures is None:
        colors = _generate_random_colors(num_shapes, num_channels,
                                         intensity_range, random_state)

    # Create a list of (SHAPE, COLOR, CLASS) tuples.

    samples = []

    shape_choices = []
    if shape_names is None:
        shape_choices = SHAPE_CHOICES
    else:
        generator_map = {get_shape_name(sc): sc for sc in SHAPE_CHOICES}
        for shape_name in shape_names:
            shape_choices.append(generator_map[shape_name])

    instance_ids = set([2])

    for shape_num in range(num_shapes):
        object_spec = generate_object_spec(shape_choices,
                                           colors=colors,
                                           textures=textures,
                                           fill_is=fill_is,
                                           class_is=class_is,
                                           random_state=random_state,
                                           instance_ids=instance_ids)

        shape_size = (min_size, max_size)

        for trial_num in range(num_trials):
            # Pick start coordinates.
            column = random_state.randint(image_shape[1])
            row = random_state.randint(image_shape[0])
            point = (row, column)
            try:
                mask_idx, label = object_spec.generator(
                    point, image_shape, shape_size, random_state)
            except ArithmeticError:
                # Couldn't fit the shape, skip it.
                continue

            mask = np.zeros(image.shape[:2])
            mask[mask_idx] = 1
            mask = mask.astype(bool)

            # Check if there is an overlap where the mask is nonzero.
            if allow_overlap or not filled[mask].any():
                # Calling `overlay_object` has side effects, such
                # as setting the pixels of `filled` to `True` where
                # the object exists.
                image = overlay_object(image, target, filled, mask,
                                       object_spec)
                labels.append(label)
                break
        else:
            warn('Could not fit any shapes to image, '
                 'consider reducing the minimum dimension')

    if not multichannel:
        image = np.squeeze(image, axis=2)

    return image, labels, target
Ejemplo n.º 10
0
def random_shapes(
        image_shape,
        max_shapes,
        min_shapes=1,
        min_size=2,
        max_size=None,
        multichannel=True,
        num_channels=3,
        shape=None,
        scenario='SHAPEGENERATOR_ALL_HALO',
        #my_shape_list=[1,2,3]
        intensity_range=None,
        allow_overlap=False,
        num_trials=100,
        random_seed=None):

    if min_size > image_shape[0] or min_size > image_shape[1]:
        raise ValueError('Minimum dimension must be less than ncols and nrows')
    max_size = max_size or max(image_shape[0], image_shape[1])

    if not multichannel:
        num_channels = 1

    if intensity_range is None:
        intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )
    else:
        tmp = (intensity_range, ) if num_channels == 1 else intensity_range
        for intensity_pair in tmp:
            for intensity in intensity_pair:
                if not (0 <= intensity <= 255):
                    msg = 'Intensity range must lie within (0, 255) interval'
                    raise ValueError(msg)

    random = np.random.RandomState(random_seed)
    user_shape = shape
    #shape_list =
    image_shape = (image_shape[0], image_shape[1], num_channels)
    image = np.full(image_shape, 255, dtype=np.uint8)
    filled = np.zeros(image_shape, dtype=bool)
    labels = []

    num_shapes = random.randint(min_shapes, max_shapes + 1)

    colors = _generate_random_colors(num_shapes, num_channels, intensity_range,
                                     random)
    for shape_idx in range(num_shapes):
        if user_shape is None:

            #run differen scenarious, two of them with probability as well
            if scenario == 'SHAPEGENERATOR_ALL_HALO':
                shape_generator = random.choice(SHAPE_CHOICES_ALL,
                                                p=[0.45, 0.45, 0.1])

            elif scenario == 'SHAPEGENERATOR_ALL_CORE':
                shape_generator = random.choice(SHAPE_CHOICES_ALL,
                                                p=[0.1, 0.1, 0.8])
    #rectangles and triangles
            elif scenario == 'SHAPEGENERATOR_R_T':
                shape_generator = random.choice(SHAPE_CHOICES_R_T)
    #only circles
            elif scenario == 'SHAPEGENERATOR_C':
                shape_generator = random.choice(SHAPE_CHOICES_C)

            else:
                print("Error in the script")

        else:
            shape_generator = SHAPE_GENERATORS_ALL[user_shape]
        shape = (min_size, max_size)
        for _ in range(num_trials):
            # Pick start coordinates.
            column = random.randint(image_shape[1])
            row = random.randint(image_shape[0])
            point = (row, column)
            try:
                indices, label = shape_generator(point, image_shape, shape,
                                                 random)
            except ArithmeticError:
                # Couldn't fit the shape, skip it.
                continue
            # Check if there is an overlap where the mask is nonzero.
            if allow_overlap or not filled[indices].any():
                image[indices] = colors[shape_idx]
                filled[indices] = True
                labels.append(label)
                break
        else:
            warn('Could not fit any shapes to image, '
                 'consider reducing the minimum dimension')

    if not multichannel:
        image = np.squeeze(image, axis=2)
    return image, labels
Ejemplo n.º 11
0
def structural_similarity(X,
                          Y,
                          win_size=None,
                          gradient=False,
                          data_range=None,
                          multichannel=False,
                          gaussian_weights=False,
                          full=False,
                          **kwargs):
    """Compute the mean structural comparison between two images.
    Parameters
    ----------
    X, Y : ndarray
        Image. Any dimensionality.
    win_size : int or None
        The side-length of the sliding window used in comparison. Must be an
        odd value. If `gaussian_weights` is True, this is ignored and the
        window size will depend on `sigma`.
    gradient : bool, optional
        If True, also return the gradient with respect to Y.
    data_range : float, optional
        The data range of the input image (distance between minimum and
        maximum possible values). By default, this is estimated from the image
        data-type.
    multichannel : bool, optional
        If True, treat the last dimension of the array as channels. Similarity
        calculations are done independently for each channel then averaged.
    gaussian_weights : bool, optional
        If True, each patch has its mean and variance spatially weighted by a
        normalized Gaussian kernel of width sigma=1.5.
    full : bool, optional
        If True, also return the full structural similarity image.
    Other Parameters
    ----------------
    use_sample_covariance : bool
        If True, normalize covariances by N-1 rather than, N where N is the
        number of pixels within the sliding window.
    K1 : float
        Algorithm parameter, K1 (small constant, see [1]_).
    K2 : float
        Algorithm parameter, K2 (small constant, see [1]_).
    sigma : float
        Standard deviation for the Gaussian when `gaussian_weights` is True.
    Returns
    -------
    mssim : float
        The mean structural similarity over the image.
    grad : ndarray
        The gradient of the structural similarity index between X and Y [2]_.
        This is only returned if `gradient` is set to True.
    S : ndarray
        The full SSIM image.  This is only returned if `full` is set to True.
    Notes
    -----
    To match the implementation of Wang et. al. [1]_, set `gaussian_weights`
    to True, `sigma` to 1.5, and `use_sample_covariance` to False.
    References
    ----------
    .. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
       (2004). Image quality assessment: From error visibility to
       structural similarity. IEEE Transactions on Image Processing,
       13, 600-612.
       https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,
       :DOI:`10.1109/TIP.2003.819861`
    .. [2] Avanaki, A. N. (2009). Exact global histogram specification
       optimized for structural similarity. Optical Review, 16, 613-621.
       :arXiv:`0901.0065`
       :DOI:`10.1007/s10043-009-0119-z`
    """
    if not X.shape == Y.shape:
        raise ValueError('Input images must have the same dimensions.')

    if multichannel:
        # loop over channels
        args = dict(win_size=win_size,
                    gradient=gradient,
                    data_range=data_range,
                    multichannel=False,
                    gaussian_weights=gaussian_weights,
                    full=full)
        args.update(kwargs)
        nch = X.shape[-1]
        mssim = np.empty(nch)
        if gradient:
            G = np.empty(X.shape)
        if full:
            S = np.empty(X.shape)
        for ch in range(nch):
            ch_result = compare_ssim(X[..., ch], Y[..., ch], **args)
            if gradient and full:
                mssim[..., ch], G[..., ch], S[..., ch] = ch_result
            elif gradient:
                mssim[..., ch], G[..., ch] = ch_result
            elif full:
                mssim[..., ch], S[..., ch] = ch_result
            else:
                mssim[..., ch] = ch_result
        mssim = mssim.mean()
        if gradient and full:
            return mssim, G, S
        elif gradient:
            return mssim, G
        elif full:
            return mssim, S
        else:
            return mssim

    K1 = kwargs.pop('K1', 0.01)
    K2 = kwargs.pop('K2', 0.03)
    sigma = kwargs.pop('sigma', 1.5)
    if K1 < 0:
        raise ValueError("K1 must be positive")
    if K2 < 0:
        raise ValueError("K2 must be positive")
    if sigma < 0:
        raise ValueError("sigma must be positive")
    use_sample_covariance = kwargs.pop('use_sample_covariance', True)

    if gaussian_weights:
        # Set to give an 11-tap filter with the default sigma of 1.5 to match
        # Wang et. al. 2004.
        truncate = 3.5

    if win_size is None:
        if gaussian_weights:
            # set win_size used by crop to match the filter size
            r = int(truncate * sigma + 0.5)  # radius as in ndimage
            win_size = 2 * r + 1
        else:
            win_size = 7  # backwards compatibility

    if np.any((np.asarray(X.shape) - win_size) < 0):
        raise ValueError(
            "win_size exceeds image extent.  If the input is a multichannel "
            "(color) image, set multichannel=True.")

    if not (win_size % 2 == 1):
        raise ValueError('Window size must be odd.')

    if data_range is None:
        if X.dtype != Y.dtype:
            warn("Inputs have mismatched dtype.  Setting data_range based on "
                 "X.dtype.")
        dmin, dmax = dtype_range[X.dtype.type]
        data_range = dmax - dmin

    ndim = X.ndim

    if gaussian_weights:
        filter_func = gaussian_filter
        filter_args = {'sigma': sigma, 'truncate': truncate}
    else:
        filter_func = uniform_filter
        filter_args = {'size': win_size}

    # ndimage filters need floating point data
    X = X.astype(np.float64)
    Y = Y.astype(np.float64)

    NP = win_size**ndim

    # filter has already normalized by NP
    if use_sample_covariance:
        cov_norm = NP / (NP - 1)  # sample covariance
    else:
        cov_norm = 1.0  # population covariance to match Wang et. al. 2004

    # compute (weighted) means
    ux = filter_func(X, **filter_args)
    uy = filter_func(Y, **filter_args)

    # compute (weighted) variances and covariances
    uxx = filter_func(X * X, **filter_args)
    uyy = filter_func(Y * Y, **filter_args)
    uxy = filter_func(X * Y, **filter_args)
    vx = cov_norm * (uxx - ux * ux)
    vy = cov_norm * (uyy - uy * uy)
    vxy = cov_norm * (uxy - ux * uy)

    R = data_range
    C1 = (K1 * R)**2
    C2 = (K2 * R)**2

    A1, A2 = ((2 * np.sqrt(abs(vx * vy)) + C2, 2 * vxy + C2))

    S = A2 / (A1)
    # to avoid edge effects will ignore filter radius strip around edges
    pad = (win_size - 1) // 2

    # compute (weighted) mean of ssim
    mssim = crop(S, pad).mean()

    return mssim
Ejemplo n.º 12
0
def random_shapes_distr(image_shape,
                        max_shapes,
                        min_shapes=1,
                        min_size=2,
                        max_size=None,
                        multichannel=True,
                        num_channels=3,
                        shape=None,
                        intensity_range=None,
                        allow_overlap=False,
                        num_trials=100,
                        random_seed=None):
    """Generate an image with random shapes, labeled with bounding boxes.
    The image is populated with random shapes with random sizes, random
    locations, and random colors, with or without overlap.
    Shapes have random (row, col) starting coordinates and random sizes bounded
    by `min_size` and `max_size`. It can occur that a randomly generated shape
    will not fit the image at all. In that case, the algorithm will try again
    with new starting coordinates a certain number of times. However, it also
    means that some shapes may be skipped altogether. In that case, this
    function will generate fewer shapes than requested.
    Parameters
    ----------
    image_shape : tuple
        The number of rows and columns of the image to generate.
    max_shapes : int
        The maximum number of shapes to (attempt to) fit into the shape.
    min_shapes : int, optional
        The minimum number of shapes to (attempt to) fit into the shape.
    min_size : int, optional
        The minimum dimension of each shape to fit into the image.
    max_size : int, optional
        The maximum dimension of each shape to fit into the image.
    multichannel : bool, optional
        If True, the generated image has ``num_channels`` color channels,
        otherwise generates grayscale image.
    num_channels : int, optional
        Number of channels in the generated image. If 1, generate monochrome
        images, else color images with multiple channels. Ignored if
        ``multichannel`` is set to False.
    shape : {rectangle, circle, triangle, None} str, optional
        The name of the shape to generate or `None` to pick random ones.
    intensity_range : {tuple of tuples of uint8, tuple of uint8}, optional
        The range of values to sample pixel values from. For grayscale images
        the format is (min, max). For multichannel - ((min, max),) if the
        ranges are equal across the channels, and ((min_0, max_0), ... (min_N, max_N))
        if they differ. As the function supports generation of uint8 arrays only,
        the maximum range is (0, 255). If None, set to (0, 254) for each
        channel reserving color of intensity = 255 for background.
    allow_overlap : bool, optional
        If `True`, allow shapes to overlap.
    num_trials : int, optional
        How often to attempt to fit a shape into the image before skipping it.
    seed : int, optional
        Seed to initialize the random number generator.
        If `None`, a random seed from the operating system is used.
    Returns
    -------
    image : uint8 array
        An image with the fitted shapes.
    labels : list
        A list of labels, one per shape in the image. Each label is a
        (category, ((r0, r1), (c0, c1))) tuple specifying the category and
        bounding box coordinates of the shape.
    Examples
    --------
    >>> import skimage.draw
    >>> image, labels = skimage.draw.random_shapes((32, 32), max_shapes=3)
    >>> image # doctest: +SKIP
    array([
       [[255, 255, 255],
        [255, 255, 255],
        [255, 255, 255],
        ...,
        [255, 255, 255],
        [255, 255, 255],
        [255, 255, 255]]], dtype=uint8)
    >>> labels # doctest: +SKIP
    [('circle', ((22, 18), (25, 21))),
     ('triangle', ((5, 6), (13, 13)))]
    """
    if min_size > image_shape[0] or min_size > image_shape[1]:
        raise ValueError('Minimum dimension must be less than ncols and nrows')
    max_size = max_size or max(image_shape[0], image_shape[1])

    if not multichannel:
        num_channels = 1

    if intensity_range is None:
        intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )
    else:
        tmp = (intensity_range, ) if num_channels == 1 else intensity_range
        for intensity_pair in tmp:
            for intensity in intensity_pair:
                if not (0 <= intensity <= 255):
                    msg = 'Intensity range must lie within (0, 255) interval'
                    raise ValueError(msg)

    random = np.random.RandomState(random_seed)
    user_shape = shape
    image_shape = (image_shape[0], image_shape[1], num_channels)
    image = np.ones(image_shape, dtype=np.uint8) * 255
    filled = np.zeros(image_shape, dtype=bool)
    labels = []

    num_shapes = random.randint(min_shapes, max_shapes + 1)
    colors = _generate_random_colors(num_shapes, num_channels, intensity_range,
                                     random)
    for shape_idx in range(num_shapes):
        if user_shape is None:
            shape_generator = random.choice(SHAPE_CHOICES)
        else:
            shape_generator = SHAPE_GENERATORS[user_shape]
        shape = (min_size, max_size)
        for _ in range(num_trials):
            # Pick start coordinates.
            loc0 = 0.35 * image_shape[0]
            scale0 = 0.30 * image_shape[0]
            loc1 = 0.35 * image_shape[1]
            scale1 = 0.30 * image_shape[1]
            row = np.random.normal(loc0, scale0)
            column = np.random.normal(loc1, scale1)
            point = (int(row), int(column))
            try:
                indices, label = shape_generator(point, image_shape, shape,
                                                 random)
            except ArithmeticError:
                # Couldn't fit the shape, skip it.
                continue
            # Check if there is an overlap where the mask is nonzero.
            indices = tuple(indices)
            if allow_overlap or not filled[indices].any():
                image[indices] = colors[shape_idx]
                filled[indices] = True
                labels.append(label)
                break
        else:
            warn('Could not fit any shapes to image, '
                 'consider reducing the minimum dimension')

    if not multichannel:
        image = np.squeeze(image, axis=2)
    return image, labels
Ejemplo n.º 13
0
import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
from skimage._shared.utils import warn
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap

if has_qt and 'agg' not in mpl.get_backend().lower():
    warn("Recommended matplotlib backend is `Agg` for full "
         "skimage.viewer functionality.")

__all__ = [
    'init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage', 'LinearColormap',
    'ClearColormap', 'FigureCanvas', 'new_plot', 'update_axes_image'
]

QApp = None


def init_qtapp():
    """Initialize QAppliction.

    The QApplication needs to be initialized before creating any QWidgets
    """
    global QApp
    QApp = QtWidgets.QApplication.instance()
    if QApp is None:
        QApp = QtWidgets.QApplication([])
    return QApp
Ejemplo n.º 14
0
def _wavelet_threshold(image,
                       wavelet,
                       method=None,
                       threshold=None,
                       sigma=None,
                       mode='soft',
                       wavelet_levels=None):
    """Perform wavelet thresholding.
    Parameters
    ----------
    image : ndarray (2d or 3d) of ints, uints or floats
        Input data to be denoised. `image` can be of any numeric type,
        but it is cast into an ndarray of floats for the computation
        of the denoised image.
    wavelet : string
        The type of wavelet to perform. Can be any of the options
        pywt.wavelist outputs. For example, this may be any of ``{db1, db2,
        db3, db4, haar}``.
    method : {'BayesShrink', 'VisuShrink'}, optional
        Thresholding method to be used. The currently supported methods are
        "BayesShrink" [1]_ and "VisuShrink" [2]_. If it is set to None, a
        user-specified ``threshold`` must be supplied instead.
    threshold : float, optional
        The thresholding value to apply during wavelet coefficient
        thresholding. The default value (None) uses the selected ``method`` to
        estimate appropriate threshold(s) for noise removal.
    sigma : float, optional
        The standard deviation of the noise. The noise is estimated when sigma
        is None (the default) by the method in [2]_.
    mode : {'soft', 'hard'}, optional
        An optional argument to choose the type of denoising performed. It
        noted that choosing soft thresholding given additive noise finds the
        best approximation of the original image.
    wavelet_levels : int or None, optional
        The number of wavelet decomposition levels to use.  The default is
        three less than the maximum number of possible decomposition levels
        (see Notes below).
    Returns
    -------
    out : ndarray
        Denoised image.
"""
    wavelet = pywt.Wavelet(wavelet)

    # original_extent is used to workaround PyWavelets issue #80
    # odd-sized input results in an image with 1 extra sample after waverecn
    original_extent = [slice(s) for s in image.shape]

    # Determine the number of wavelet decomposition levels
    if wavelet_levels is None:
        # Determine the maximum number of possible levels for image
        dlen = wavelet.dec_len
        wavelet_levels = np.min(
            [pywt.dwt_max_level(s, dlen) for s in image.shape])

        # Skip coarsest wavelet scales (see Notes in docstring).
        wavelet_levels = max(wavelet_levels - 3, 1)

    coeffs = pywt.wavedecn(image, wavelet=wavelet, level=wavelet_levels)
    # Detail coefficients at each decomposition level
    dcoeffs = coeffs[1:]

    if sigma is None:
        # Estimate the noise via the method in [2]_
        detail_coeffs = dcoeffs[-1]['d' * image.ndim]
        sigma = _sigma_est_dwt(detail_coeffs, distribution='Gaussian')

    if method is not None and threshold is not None:
        warn(("Thresholding method {} selected.  The user-specified threshold "
              "will be ignored.").format(method))

    if threshold is None:
        var = sigma**2
        if method is None:
            raise ValueError(
                "If method is None, a threshold must be provided.")
        elif method == "BayesShrink":
            # The BayesShrink thresholds from [1]_ in docstring
            threshold = [{
                key: _bayes_thresh(level[key], var)
                for key in level
            } for level in dcoeffs]
        elif method == "VisuShrink":
            # The VisuShrink thresholds from [2]_ in docstring
            threshold = _universal_thresh(image, sigma)
        else:
            raise ValueError("Unrecognized method: {}".format(method))

    if np.isscalar(threshold):
        # A single threshold for all coefficient arrays
        denoised_detail = [{
            key: pywt.threshold(level[key], value=threshold, mode=mode)
            for key in level
        } for level in dcoeffs]
    else:
        # Dict of unique threshold coefficients for each detail coeff. array
        denoised_detail = [{
            key: pywt.threshold(level[key], value=thresh[key], mode=mode)
            for key in level
        } for thresh, level in zip(threshold, dcoeffs)]
    denoised_coeffs = [coeffs[0]] + denoised_detail
    return pywt.waverecn(denoised_coeffs, wavelet)[original_extent]
Ejemplo n.º 15
0
def random_shapes(size,
                  p,
                  image_shape,
                  max_shapes,
                  min_shapes=1,
                  multichannel=True,
                  num_channels=3,
                  shape=None,
                  intensity_range=None,
                  allow_overlap=False,
                  num_trials=10000,
                  random_seed=None):

    if not multichannel:
        num_channels = 1

    if intensity_range is None:
        intensity_range = (0, 254) if num_channels == 1 else ((0, 254), )
    else:
        tmp = (intensity_range, ) if num_channels == 1 else intensity_range
        for intensity_pair in tmp:
            for intensity in intensity_pair:
                if not (0 <= intensity <= 255):
                    msg = 'Intensity range must lie within (0, 255) interval'
                    raise ValueError(msg)

    random = np.random.RandomState(random_seed)
    image_shape = (image_shape[0], image_shape[1], num_channels)
    image = np.full(image_shape, 255, dtype=np.uint8)
    filled = np.zeros(image_shape, dtype=bool)
    labels = []

    num_shapes = random.randint(min_shapes, max_shapes + 1)
    colors = _generate_random_colors(num_shapes, num_channels, intensity_range,
                                     random)
    angle = np.random.uniform(-math.pi, math.pi)
    OorA = np.random.random()
    lean = np.random.random()

    if shape is None:
        shape_generator = random.choice(SHAPE_CHOICES)
    else:
        shape_generator = SHAPE_GENERATORS[shape]
    ####为了使生成的相同的几个图形产生相同的颜色而进行的值传递
    A = range(num_shapes)
    ####
    for shape_idx in A:
        for _ in range(num_trials):
            # Pick start coordinates.
            column = random.randint(image_shape[1])
            row = random.randint(image_shape[0])
            point = (row, column)
            try:
                indices, label = shape_generator(point, image_shape, size, p,
                                                 random, OorA, lean, angle)
            except ArithmeticError:
                # Couldn't fit the shape, skip it.
                continue
            # Check if there is an overlap where the mask is nonzero.
            if allow_overlap or not filled[indices].any():
                image[indices] = colors[A[0]]
                filled[indices] = True
                labels.append(label)
                break
        else:
            warn('Could not fit any shapes to image, '
                 'consider reducing the minimum dimension')

    if not multichannel:
        image = np.squeeze(image, axis=2)
    return image, labels, angle, colors, shape, size
Ejemplo n.º 16
0
def threshold_otsu(image, nbins=256):
    """Return threshold value based on Otsu's method.
    Parameters
    ----------
    image : (N, M) ndarray
        Grayscale input image.
    nbins : int, optional
        Number of bins used to calculate histogram. This value is ignored for
        integer arrays.
    Returns
    -------
    threshold : float
        Upper threshold value. All pixels with an intensity higher than
        this value are assumed to be foreground.
    Raises
    ------
    ValueError
         If `image` only contains a single grayscale value.
    References
    ----------
    .. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()
    >>> thresh = threshold_otsu(image)
    >>> binary = image <= thresh
    Notes
    -----
    The input image must be grayscale.
    """
    if len(image.shape) > 2 and image.shape[-1] in (3, 4):
        msg = "threshold_otsu is expected to work correctly only for " \
              "grayscale images; image shape {0} looks like an RGB image"
        warn(msg.format(image.shape))

    # Check if the image is multi-colored or not
    if image.min() == image.max():
        raise ValueError("threshold_otsu is expected to work with images "
                         "having more than one color. The input image seems "
                         "to have just one color {0}.".format(image.min()))

    image = np.asarray([x for x in image.ravel() if x != 255])
    image = np.asarray([x for x in image.ravel() if x != 0])
    print image.ravel()

    hist, bin_centers = histogram(image.ravel(), nbins)
    hist = hist.astype(float)

    # class probabilities for all possible thresholds
    weight1 = np.cumsum(hist)
    weight2 = np.cumsum(hist[::-1])[::-1]
    # class means for all possible thresholds
    mean1 = np.cumsum(hist * bin_centers) / weight1
    mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]

    # Clip ends to align class 1 and class 2 variables:
    # The last value of `weight1`/`mean1` should pair with zero values in
    # `weight2`/`mean2`, which do not exist.
    variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2

    idx = np.argmax(variance12)
    threshold = bin_centers[:-1][idx]

    #threshold = 150
    return threshold
def load_parec_data(subject_name = 'AH'):

    # ============
    # set base path
    # ============
    basepath = os.getcwd() + '/../../data/eth_ibt/' + subject_name
    
    # ============    
    # set paths according to the subject that has to be loaded (default subject: AH)
    # ============
    if subject_name == 'AH':
        file_prefix = '/an_27052015_1027340_4_2_wipqflow_fbclearV4_'   
        path_s = basepath + file_prefix + 'S.rec'
        path_m = basepath + file_prefix + 'M.rec'
        path_p = basepath + file_prefix + 'P.rec'
    elif subject_name == 'CB':
        file_prefix = '/ch_11122015_1428290_4_2_wipqflow_fb_experiment1V4_'        
        path_s = basepath + file_prefix + 'S.rec'
        path_m = basepath + file_prefix + 'M.rec'
        path_p = basepath + file_prefix + 'P.rec'
    elif subject_name == 'DG':
        file_prefix = '/da_15072015_1612350_3_2_wipqflow_fbclearV4_'        
        path_s = basepath + file_prefix + 'S.rec'
        path_m = basepath + file_prefix + 'M.rec'
        path_p = basepath + file_prefix + 'P.rec'
    elif subject_name == 'LT':
        file_prefix = '/lo_27112015_1256300_2_2_wipqflow_fb_experiment1V4_'
        path_s = basepath + file_prefix + 'S.rec'
        path_m = basepath + file_prefix + 'M.rec'
        path_p = basepath + file_prefix + 'P.rec'
    elif subject_name == 'JR':
        file_prefix = '/ju_27052015_1142050_4_2_wipqflow_fbclearV4_'
        path_s = basepath + 'ju_27052015_1208240_5_1_wipqflow_fbclearV42.rec'
        path_m = basepath + file_prefix + 'M.rec'
        path_p = basepath + file_prefix + 'P.rec'        
        
    # ============
    # if wrong input is given, break function and return the warning in console
    # ============
    else:
        warn('Invalid subject')

    # ============
    # load the data into arrays
    # ============
    data_s = nib.parrec.load(path_s).get_data()
    data_m = nib.parrec.load(path_m).get_data()
    data_p = nib.parrec.load(path_p).get_data()
    
    # ============
    # calculate the numer of timesteps and slices.
    # note that the timesteps have to be divided by 2 as we have a magnitude and phase image for each time step
    # ============
    num_times = int(data_s.shape[3] / 2)
    num_slices = int(data_s.shape[2])
    
    # ============
    # return the desired vector of the loaded data
    # ============
    parec_data = [data_m, data_p, data_s, num_times, num_slices]
   
    return parec_data