コード例 #1
0
ファイル: measure.py プロジェクト: sbernasek/flyqma
    def measure_expression(self, im, labels, segment_ids):
        """
        Measure expression levels.

        Args:

            im (np.ndarray[float]) - 3D array of pixel values

            labels (np.ndarray[int]) - cell segment labels

            segment_ids (np.ndarray[int]) - ordered segment IDs

        """

        # split R/G/B image channels
        drop = lambda x: x.reshape(*x.shape[:2])
        channels = [drop(x) for x in np.split(im, self.colordepth, axis=-1)]

        # compute means
        means = [mean(channel, labels, segment_ids) for channel in channels]

        # compute std
        with catch_warnings():
            filterwarnings('ignore')
            evaluate_std = lambda x: standard_deviation(x, labels, segment_ids)
            stds = [evaluate_std(channel) for channel in channels]

        # compile dictionaries
        self.levels = dict(enumerate(means))
        self.std = dict(enumerate(stds))
コード例 #2
0
ファイル: curvature.py プロジェクト: imane-aanna/adel
def interpolate_curvature(curvatures, times, kind='cubic'):
    """ Interpolate the curvatures.

    A curvature is a parametrisation `s`, d(angle)/ ds and a parameter between [0,1].
    Return a surface f(s,t).
    """
    import numpy as np

    from scipy import interpolate
    from scipy.ndimage import measurements
    
    if len(curvatures) == 3 and not isinstance(curvatures[2], tuple):
        curvatures= [curvatures]

    curv_abs = [c[0] for c in curvatures]
    curves = [c[1] for c in curvatures]
    params = [c[2] for c in curvatures]

    n = len(params)
    if n==1:
        curv_abs*=2
        curves*=2
        curves[0]=np.zeros(len(curves[0]))
        params = [0.,1.]
    elif False:
        if params[0] != 0.:
            params.insert(0,0.)
            curves.insert(0,curves[0])
            curv_abs.insert(0, curv_abs[0])
        if params[-1] != 0:
            params.append(1.)
            curves.append(curves[-1])
            curv_abs.append(curv_abs[-1])

    # compute a common parametrisation s
    # We conserve the last parametrisation because the result is very sensitive
    if True:
        min_s = min(np.diff(s).min() for s in curv_abs)
        s_new = np.unique(np.array(curv_abs).flatten())
        ds = np.diff(s_new)
        k = np.cumsum(ds >= min_s)
        labels = range(k.min(), k.max()+1)
        s = np.zeros(len(labels)+1)
        s[1:] = measurements.mean(s_new[1:], k, labels)
        s[-1]=1.
        s = s_new
    else:
        s = np.array(curv_abs[-1])

    # renormalise all the curves
    curves = [np.interp(s[1:-1], old_s[1:-1], old_crv) for old_s, old_crv in zip(curv_abs,curves)]

    # interpolate correctly the curvatures
    x = s[1:-1]
    y = np.array(params)
    z = np.array(curves)

    #f = interpolate.interp2d(y, x, z, kind=kind)
    f = interpolate.RectBivariateSpline(x, y, z.T, kx=1, ky=1)
    return s, f(x,times)
コード例 #3
0
 def _showMeanStd(self, busy=True):
     if busy:
         busy = BusyIndicator()
     dmin, dmax = self._subset_range
     subset, mask = self.image.optimalRavel(self._subset)
     dprint(5, "computing mean")
     mean = measurements.mean(subset, labels=mask, index=None if mask is None else False)
     dprint(5, "computing std")
     std = measurements.standard_deviation(subset, labels=mask, index=None if mask is None else False)
     dprint(5, "done")
     text = "  ".join([("%s: " + DataValueFormat) % (name, value) for name, value in
                       ("min", dmin), ("max", dmax), ("mean", mean), ("std", std)] + ["np: %d" % self._subset.size])
     self._wlab_stats.setText(text)
     self._wmore_stats.hide()
     # update markers
     ypos = 0.3
     self._line_mean.line.setData([mean, mean], [0, 1])
     self._line_mean.marker.setValue(mean, ypos)
     self._line_mean.setText(("\u03BC=" + DataValueFormat) % mean)
     self._line_mean.show()
     self._line_std.line.setData([mean - std, mean + std], [ypos, ypos])
     self._line_std.marker.setValue(mean, ypos)
     self._line_std.setText(("\u03C3=" + DataValueFormat) % std)
     self._line_std.show()
     self._histplot.replot()
コード例 #4
0
ファイル: pixelated_stem_tools.py プロジェクト: tinabe/pyxem
def _threshold_and_mask_single_frame(im, threshold=None, mask=None):
    image = copy.deepcopy(im)
    if mask is not None:
        image *= mask
    if threshold is not None:
        mean_value = measurements.mean(image, mask) * threshold
        image[image <= mean_value] = 0
        image[image > mean_value] = 1
    return image
コード例 #5
0
ファイル: stats.py プロジェクト: mckinsel/starfish
def measure(im, labels, num_objs, measurement_type='mean'):
    if measurement_type == 'mean':
        res = spm.mean(im, labels, range(1, num_objs))
    elif measurement_type == 'max':
        res = spm.maximum(im, labels, range(1, num_objs))
    else:
        raise ValueError('Unsporrted measurement type: {}'.format(measurement_type))

    return res
コード例 #6
0
ファイル: dy_iterator.py プロジェクト: jeanollion/distnet
def _get_prev_lab(prevlabelIm, labelIm, label, center):
    if int(
            labelIm[int(round(center[0])),
                    int(round(center[1]))]
    ) == label:  # in case center is inluced in object -> simply return value @ center
        prev_lab = int(prevlabelIm[int(round(center[0])),
                                   int(round(center[1]))])
    else:
        prev_lab = int(round(mean(prevlabelIm, labelIm, label)))
    return prev_lab
コード例 #7
0
def get_superpixel_means_band(label_array: np.ndarray,
                              band: np.ndarray) -> np.ndarray:
    """Assume labels in label_array are 0, 1, 2, ..., n

    Returns array of shape = (len(np.unique(labels)) x 1)
    """
    labels_ = label_array + 1  # scipy wants labels to begin at 1 and transforms to 1, 2, ..., n+1
    labels_unique = np.unique(labels_)
    means = measurements.mean(band, labels=labels_, index=labels_unique)
    return means.reshape((-1, 1))
コード例 #8
0
 def getLMRectStats(self, rect):
     xx1, xx2, yy1, yy2 = self._lmRectToPix(rect)
     if xx1 is not None:
         subset = self.image.image()[xx1:xx2, yy1:yy2]
         subset, mask = self.image.optimalRavel(subset)
         mmin, mmax = measurements.extrema(subset, labels=mask, index=None if mask is None else False)[:2]
         mean = measurements.mean(subset, labels=mask, index=None if mask is None else False)
         std = measurements.standard_deviation(subset, labels=mask, index=None if mask is None else False)
         ssum = measurements.sum(subset, labels=mask, index=None if mask is None else False)
         return xx1, xx2, yy1, yy2, mmin, mmax, mean, std, ssum, subset.size
     return None
コード例 #9
0
def relabelise(filled, A , nofroi):
      labels, nlabs = meas.label(filled)
      aves = meas.mean(A ,labels=labels, index = range(1,nlabs+1) )
      avlabs = zip(aves, np.arange(1,nlabs+1))  
      avlabs.sort()
      intes = [ a for a,l in avlabs[-nofroi:]]
      amed = np.median(intes)
      labs = [ l for a,l in avlabs[-nofroi:] if a>amed/100]
      newmask = np.zeros(filled.shape,"i")
      i=1
      for l in labs:
        newmask[np.equal(labels,l)]=i
        i+=1
      return newmask
コード例 #10
0
def getPeaks(im, nstd=6, maxsize=None):
    """
    Detects the peaks using edge detection
    Parameters
    ----------
    im: 2d array
        The image to fit
    nstd: integer, default 6
        Number of STD to use for theshold
    maxsize: integer
        Maximim size of the peak
    
    Returns
    -------
    peaks: 2d array
        mask of the peaks location
        
    Notes
    -----
    Position of high slope and intern parts are marked
    """
    im = np.asarray(im, dtype='float')
    imblur = np.empty(im.shape)
    edge = cr.Scharr_edge(im, imblur=imblur)
    threshold = np.nanmean(edge) + 6 * np.nanstd(edge)
    peaks = edge > threshold

    labels, n = msr.label(peaks)
    intensity_inclusions = msr.mean(imblur, labels, np.arange(n) + 1)

    for i in np.arange(n) + 1:
        if intensity_inclusions[i - 1] > np.nanmean(imblur):
            high, m = msr.label(imblur > intensity_inclusions[i - 1])
            for j in np.unique(high[np.logical_and(labels == i, high > 0)]):
                labels[high == j] = i

            if maxsize is not None and np.sum(labels == i) > maxsize:
                labels[labels == i] = 0
        else:
            labels[labels == i] = 0
    """
    from matplotlib.pyplot import figure, hist, plot, title, ylim
    figure()
    hist(edge[np.isfinite(edge)],100)
    plot(threshold*np.array([1,1]),[0,ylim()[1]])
    title(str(threshold))
    #"""
    return labels > 0
コード例 #11
0
ファイル: roi.py プロジェクト: yugangzhang/scikit-beam
def mean_intensity(images, labeled_array, index=None):
    """Compute the mean intensity for each ROI in the image list

    Parameters
    ----------
    images : list
        List of images
    labeled_array : array
        labeled array; 0 is background.
        Each ROI is represented by a nonzero integer. It is not required that
        the ROI labels are contiguous
    index : int, list, optional
        The ROI's to use. If None, this function will extract averages for all
        ROIs

    Returns
    -------
    mean_intensity : array
        The mean intensity of each ROI for all `images`
        Dimensions:
            len(mean_intensity) == len(index)
            len(mean_intensity[0]) == len(images)
    index : list
        The labels for each element of the `mean_intensity` list

    """
    if labeled_array.shape != images[0].shape[0:]:
        raise ValueError(
            "`images` shape (%s) needs to be equal to the labeled_array shape"
            "(%s)" % (images[0].shape, labeled_array.shape)
        )
    # handle various input for `index`
    if index is None:
        index = list(np.unique(labeled_array))
        index.remove(0)
    try:
        len(index)
    except TypeError:
        index = [index]
    # pre-allocate an array for performance
    # might be able to use list comprehension to make this faster
    mean_intensity = np.zeros((images.shape[0], len(index)))
    for n, img in enumerate(images):
        # use a mean that is mask-aware
        mean_intensity[n] = ndim.mean(img, labeled_array, index=index)
    return mean_intensity, index
コード例 #12
0
ファイル: roi.py プロジェクト: sameera2004/scikit-beam
def mean_intensity(images, labeled_array, index=None):
    """Compute the mean intensity for each ROI in the image list

    Parameters
    ----------
    images : list
        List of images
    labeled_array : array
        labeled array; 0 is background.
        Each ROI is represented by a nonzero integer. It is not required that
        the ROI labels are contiguous
    index : int, list, optional
        The ROI's to use. If None, this function will extract averages for all
        ROIs

    Returns
    -------
    mean_intensity : array
        The mean intensity of each ROI for all `images`
        Dimensions:

          -  len(mean_intensity) == len(index)
          -  len(mean_intensity[0]) == len(images)
    index : list
        The labels for each element of the `mean_intensity` list
    """
    if labeled_array.shape != images[0].shape[0:]:
        raise ValueError(
            "`images` shape (%s) needs to be equal to the labeled_array shape"
            "(%s)" % (images[0].shape, labeled_array.shape))
    # handle various input for `index`
    if index is None:
        index = list(np.unique(labeled_array))
        index.remove(0)
    try:
        len(index)
    except TypeError:
        index = [index]
    # pre-allocate an array for performance
    # might be able to use list comprehension to make this faster
    mean_intensity = np.zeros((images.shape[0], len(index)))
    for n, img in enumerate(images):
        # use a mean that is mask-aware
        mean_intensity[n] = ndim.mean(img, labeled_array, index=index)
    return mean_intensity, index
コード例 #13
0
def sptr_mean(path, fname, boolean_mask):
    '''
	sptr_mean takes an input of the hyperspectral image and the boolean array
	from hyper_pixcorr function to give an output image with mean spectral intensities
	across the sources in each spectral channel

	Input Parameters:
	------------
	path = str
		Path to the directory where hyperspectral images are located

	fname = str
		File name of raw hyperspectral image

	boolean_mask = np.array
		Output boolean mask of sources from hyper_pixcorr function

	Output:
	------------
	src_sptr_mean = np.array
		Mean Spectrum of sources across corresponsing source pixels
	'''

    # Reading the Raw hyperspectral image
    cube = utils.read_hyper(path, fname)

    # Storing the hyperspectral image as a memmap for future computations
    img = 1.0 * cube.data

    #Labeling the sources in Boolean Mask
    mask = boolean_mask
    labels, count = mm.label(mask)

    index = np.arange(count + 1)
    sptr_stack = []

    for i in range(0, img.shape[0]):
        channel = img[i, :-1, :-1]
        src_mean = mm.mean(channel, labels, index)
        sptr_stack.append(src_mean)  #redund
        #sptr_stack = np.array(sptr_stack)    #redund
        #sources = np.array([sptr_stack[:,i] for i in range(count)])   #redund

    return np.array(sptr_stack)
コード例 #14
0
ファイル: signals.py プロジェクト: mindandbrain/pipeline
def img_to_signals(in_file,
                   atlas_file,
                   mask_file=None,
                   background_label=0,
                   min_n_voxels=50):
    in_img = nib.load(in_file)

    atlas_img = nib.load(atlas_file)
    assert nvol(atlas_img) == 1
    assert atlas_img.shape[:3] == in_img.shape[:3]
    assert np.allclose(atlas_img.affine, in_img.affine)
    labels = np.asanyarray(atlas_img.dataobj).astype(np.int32)
    nlabel = labels.max()

    if mask_file is not None:
        mask_img = nib.load(mask_file)
        assert nvol(mask_img) == 1
        assert mask_img.shape[:3] == in_img.shape[:3]
        assert np.allclose(mask_img.affine, in_img.affine)
        mask_data = np.asanyarray(mask_img.dataobj).astype(np.bool)
        labels[np.logical_not(mask_data)] = background_label

    assert np.all(labels >= 0)

    indices, counts = np.unique(labels, return_counts=True)
    indices = indices[counts >= min_n_voxels]
    indices = np.setdiff1d(indices, [background_label])

    in_data = in_img.get_fdata()
    if in_data.ndim == 3:
        in_data = in_data[:, :, :, np.newaxis]
    assert in_data.ndim == 4

    result = np.full((in_data.shape[3], nlabel), np.nan)
    for i, img in enumerate(np.moveaxis(in_data, 3, 0)):
        result[i, indices - 1] = mean(img, labels=labels, index=indices)

    return result
コード例 #15
0
ファイル: roi.py プロジェクト: giltis/scikit-xray
def mean_intensity(images, labels, index=None):
    """
    Mean intensities for ROIS' of the labeled array for set of images

    Parameters
    ----------
    images : array
        Intensity array of the images
        dimensions are: (num_img, num_rows, num_cols)

    labels : array
        labeled array; 0 is background.
        Each ROI is represented by a distinct label (i.e., integer).

    index : list
        labels list
        eg: 5 ROI's
        index = [1, 2, 3, 4, 5]

    Returns
    -------
    mean_intensity : array
        mean intensity of each ROI for the set of images as an array
        shape (len(images), number of labels)

    """
    if labels.shape != images[0].shape[0:]:
        raise ValueError("Shape of the images should be equal to"
                         " shape of the label array")
    if index is None:
        index = np.arange(1, np.max(labels) + 1)

    mean_intensity = np.zeros((images.shape[0], index.shape[0]))
    for n, img in enumerate(images):
        mean_intensity[n] = ndim.mean(img, labels, index=index)

    return mean_intensity, index
コード例 #16
0
def saveInclusions(im, labels, maskWorm, imagesFolder, fn, i, um2px2ratio=1,
                   im_number_per_worm_pixel=[], im_intensity=[], im_size=[],
                   im_index=[], j=0):
    if not os.path.isdir(imagesFolder):
        os.mkdir(imagesFolder)

    imagesFolder = imagesFolder + str(i)
    if j > 0:
        imagesFolder = imagesFolder + '_{}'.format(j)
    # get worm coverage
    prct = maskWorm.sum() / np.product(im.shape)

    nl = labels.max()

    # extract normalized intensity
    intensity_inclusions = msr.mean(im, labels, np.arange(nl) + 1)
    # extract size
    size_inclusions = np.bincount(labels.flat)[1:] * um2px2ratio

    # plot image
    f = figure()
    imshow(im)
    colorbar()
    # if no inclusions
    if nl < 1:
        plt.title(fn + ', no Inclusions, %.1f %% worm' % (prct * 100))
        plt.savefig(imagesFolder + '_image')
        f.close()
    else:

        # Fill array
        im_number_per_worm_pixel.append(nl / maskWorm.sum())
        im_intensity.append(intensity_inclusions.mean())
        im_size.append(size_inclusions.mean())
        im_index.append((i, j))

        plt.title(fn + ', %.1f %% worm' % (prct * 100))
        plt.savefig(imagesFolder + '_image')
        plt.close()

        # plot detected inclusions
        # coordinates = peak_local_max(im, min_distance=20)
        f = figure()
        imshow(labels)
        imshow(maskWorm, alpha=.5)
        # plot(coordinates[:, 1], coordinates[:, 0], 'w.')
        plt.title(
            "%d inclusions, mean = %.2f" %
            (nl, intensity_inclusions.mean()))
        plt.savefig(imagesFolder + '_inclusions')
        plt.close()

        # plot intensity
        f = figure()
        plt.hist(intensity_inclusions, 20)
        plt.xlabel('Normalized intensity')
        plt.ylabel('Number of inclusions')
        plt.savefig(imagesFolder + '_intensityHistogram')
        plt.close()

        f = figure()
        plt.hist(size_inclusions, 20)
        plt.xlabel('Size')
        plt.ylabel('Number of inclusions')
        plt.savefig(imagesFolder + '_sizeHistogram')
        plt.close()

    writeFileInfos(os.path.splitext(fn)[0] + "_{}.txt".format(j),
                   intensity_inclusions, size_inclusions, prct)
コード例 #17
0
ファイル: microscopy.py プロジェクト: sbernasek/growth
 def _measure(self, channel):
     """ Returns measured <channel> level in for each contour. """
     labels = self.segmentation
     index = np.arange(self.num_nuclei)
     return measurements.mean(self.im[channel], labels=labels, index=index)
コード例 #18
0
	def getMACDSignal(self, t):
		"""t時点におけるMACDシグナルを返します."""
		dat = [self.getMACD(t-i) for i in xrange(self.__z)]
		return mean(dat)
コード例 #19
0
ファイル: curvature.py プロジェクト: rbarillot/adel
def interpolate_curvature(curvatures, times, kind='cubic'):
    """ Interpolate the curvatures.

    A curvature is a parametrisation `s`, d(angle)/ ds and a parameter between [0,1].
    Return a surface f(s,t).
    """
    import numpy as np

    from scipy import interpolate
    from scipy.ndimage import measurements

    if len(curvatures) == 3 and not isinstance(curvatures[2], tuple):
        curvatures = [curvatures]

    curv_abs = [c[0] for c in curvatures]
    curves = [c[1] for c in curvatures]
    params = [c[2] for c in curvatures]

    n = len(params)
    if n == 1:
        curv_abs *= 2
        curves *= 2
        curves[0] = np.zeros(len(curves[0]))
        params = [0., 1.]
    elif False:
        if params[0] != 0.:
            params.insert(0, 0.)
            curves.insert(0, curves[0])
            curv_abs.insert(0, curv_abs[0])
        if params[-1] != 0:
            params.append(1.)
            curves.append(curves[-1])
            curv_abs.append(curv_abs[-1])

    # compute a common parametrisation s
    # We conserve the last parametrisation because the result is very sensitive
    if True:
        min_s = min(np.diff(s).min() for s in curv_abs)
        s_new = np.unique(np.array(curv_abs).flatten())
        ds = np.diff(s_new)
        k = np.cumsum(ds >= min_s)
        labels = range(k.min(), k.max() + 1)
        s = np.zeros(len(labels) + 1)
        s[1:] = measurements.mean(s_new[1:], k, labels)
        s[-1] = 1.
        s = s_new
    else:
        s = np.array(curv_abs[-1])

    # renormalise all the curves
    curves = [
        np.interp(s[1:-1], old_s[1:-1], old_crv)
        for old_s, old_crv in zip(curv_abs, curves)
    ]

    # interpolate correctly the curvatures
    x = s[1:-1]
    y = np.array(params)
    z = np.array(curves)

    #f = interpolate.interp2d(y, x, z, kind=kind)
    f = interpolate.RectBivariateSpline(x, y, z.T, kx=1, ky=1)
    return s, f(x, times)
コード例 #20
0
def meansignals(in_file,
                atlas_file,
                mask_file=None,
                background_label=0,
                min_region_coverage=0.5,
                output_coverage=False):
    in_img = nib.load(in_file)

    atlas_img = nib.load(atlas_file)
    assert nvol(atlas_img) == 1
    assert atlas_img.shape[:3] == in_img.shape[:3]
    assert np.allclose(atlas_img.affine, in_img.affine)
    labels = np.asanyarray(atlas_img.dataobj).astype(np.int32)

    nlabel = labels.max()

    assert background_label <= nlabel

    indices = np.arange(0, nlabel + 1, dtype=np.int32)

    out_region_coverage = None

    if mask_file is not None:
        mask_img = nib.load(mask_file)
        assert nvol(mask_img) == 1
        assert mask_img.shape[:3] == in_img.shape[:3]
        assert np.allclose(mask_img.affine, in_img.affine)
        mask_data = np.asanyarray(mask_img.dataobj).astype(np.bool)

        pre_counts = np.bincount(np.ravel(labels), minlength=nlabel + 1)
        pre_counts = pre_counts[:nlabel + 1].astype(np.float64)

        labels[np.logical_not(mask_data)] = background_label

        post_counts = np.bincount(np.ravel(labels), minlength=nlabel + 1)
        post_counts = post_counts[:nlabel + 1].astype(np.float64)

        region_coverage = post_counts / pre_counts
        region_coverage[np.isclose(pre_counts, 0)] = 0

        out_region_coverage = list(
            region_coverage[indices != background_label])
        assert len(out_region_coverage) == nlabel

        indices = indices[region_coverage >= min_region_coverage]

    indices = np.setdiff1d(indices, [background_label])

    assert np.all(labels >= 0)

    in_data = in_img.get_fdata()
    if in_data.ndim == 3:
        in_data = in_data[:, :, :, np.newaxis]
    assert in_data.ndim == 4

    result = np.full((in_data.shape[3], nlabel), np.nan)
    for i, img in enumerate(np.moveaxis(in_data, 3, 0)):
        result[i, indices - 1] = mean(img,
                                      labels=labels.reshape(img.shape),
                                      index=indices)

    if output_coverage is True:
        return result, out_region_coverage
    else:
        return result
コード例 #21
0
def label_objects(dataset=None, labels=None, out=None, out_features=None,
                  source=None, return_labels=False):
    DM = DataModel.instance()

    log.info('+ Loading data into memory')
    data = DM.load_slices(dataset)
    if labels is None:
        data += 1
        labels = set(np.unique(data)) - set([0])
    else:
        data += 1
        labels = np.asarray(labels) + 1

    obj_labels = []

    log.info('+ Extracting individual objects')
    new_labels = np.zeros(data.shape, np.int32)
    total_labels = 0
    num = 0

    for label in labels:
        mask = (data == label)
        tmp_data = data.copy()
        tmp_data[~mask] = 0
        tmp_labels, num = splabel(tmp_data, structure=octahedron(1))
        mask = (tmp_labels > 0)
        new_labels[mask] = tmp_labels[mask] + total_labels
        total_labels += num
        obj_labels += [label] * num

    log.info('+ {} Objects found'.format(total_labels))
    log.info('+ Saving results')
    DM.create_empty_dataset(out, DM.data_shape, new_labels.dtype)
    DM.write_slices(out, new_labels, params=dict(active=True, num_objects=total_labels))

    log.info('+ Loading source to memory')
    data = DM.load_slices(source)
    objs = new_labels
    objects = new_labels
    num_objects = total_labels
    objlabels = np.arange(1, num_objects+1)

    log.info('+ Computing Average intensity')
    feature = measure.mean(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[0], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[0], feature, params=dict(active=True))

    """log.info('+ Computing Median intensity')
    objs.shape = -1
    data.shape = -1
    feature = binned_statistic(objs, data, statistic='median',
                               bins=num_objects+1)[0]
    feature = feature[objlabels]
    out_features[1].write_direct(feature)
    out_features[1].attrs['active'] = True
    objs.shape = dataset.shape
    data.shape = dataset.shape"""

    log.info('+ Computing Sum of intensity')
    feature = measure.sum(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[1], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[1], feature, params=dict(active=True))

    log.info('+ Computing Standard Deviation of intensity')
    feature = measure.standard_deviation(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[2], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[2], feature, params=dict(active=True))

    log.info('+ Computing Variance of intensity')
    feature = measure.variance(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[3], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[3], feature, params=dict(active=True))

    log.info('+ Computing Area')
    objs.shape = -1
    feature = np.bincount(objs, minlength=num_objects+1)[1:]
    DM.create_empty_dataset(out_features[4], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[4], feature, params=dict(active=True))
    DM.create_empty_dataset(out_features[5], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[5], np.log10(feature), params=dict(active=True))
    objs.shape = data.shape

    log.info('+ Computing Bounding Box')
    obj_windows = measure.find_objects(objs)
    feature = []; depth = []; height = []; width = [];
    for w in obj_windows:
        feature.append((w[0].stop - w[0].start) *
                       (w[1].stop - w[1].start) *
                       (w[2].stop - w[2].start))
        depth.append(w[0].stop - w[0].start)
        height.append(w[1].stop - w[1].start)
        width.append(w[2].stop - w[2].start)

    feature = np.asarray(feature, np.float32)
    DM.create_empty_dataset(out_features[6], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[6], feature, params=dict(active=True))
    #depth
    depth = np.asarray(depth, np.float32)
    DM.create_empty_dataset(out_features[7], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[7], depth, params=dict(active=True))
    # height
    height = np.asarray(height, np.float32)
    DM.create_empty_dataset(out_features[8], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[8], height, params=dict(active=True))
    # width
    width = np.asarray(width, np.float32)
    DM.create_empty_dataset(out_features[9], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[9], width, params=dict(active=True))
    # log10
    DM.create_empty_dataset(out_features[10], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[10], np.log10(feature), params=dict(active=True))

    log.info('+ Computing Oriented Bounding Box')
    ori_feature = []; ori_depth = []; ori_height = []; ori_width = [];
    for i, w in enumerate(obj_windows):
        z, y, x = np.where(objs[w] == i+1)
        coords = np.c_[z, y, x]
        if coords.shape[0] >= 3:
            coords = PCA(n_components=3).fit_transform(coords)
        cmin, cmax = coords.min(0), coords.max(0)
        zz, yy, xx = (cmax[0] - cmin[0] + 1,
                      cmax[1] - cmin[1] + 1,
                      cmax[2] - cmin[2] + 1)
        ori_feature.append(zz * yy * xx)
        ori_depth.append(zz)
        ori_height.append(yy)
        ori_width.append(xx)

    ori_feature = np.asarray(ori_feature, np.float32)
    DM.create_empty_dataset(out_features[11], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[11], ori_feature, params=dict(active=True))
    #depth
    ori_depth = np.asarray(ori_depth, np.float32)
    DM.create_empty_dataset(out_features[12], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[12], ori_depth, params=dict(active=True))
    # height
    ori_height = np.asarray(ori_height, np.float32)
    DM.create_empty_dataset(out_features[13], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[13], ori_height, params=dict(active=True))
    # width
    ori_width = np.asarray(ori_width, np.float32)
    DM.create_empty_dataset(out_features[14], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[14], ori_width, params=dict(active=True))
    # log10
    DM.create_empty_dataset(out_features[15], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[15], np.log10(ori_feature), params=dict(active=True))

    log.info('+ Computing Positions')
    pos = measure.center_of_mass(objs, labels=objs, index=objlabels)
    pos = np.asarray(pos, dtype=np.float32)
    DM.create_empty_dataset(out_features[16], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[16], pos[:, 2].copy(), params=dict(active=True))
    DM.create_empty_dataset(out_features[17], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[17], pos[:, 1].copy(), params=dict(active=True))
    DM.create_empty_dataset(out_features[18], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[18], pos[:, 0].copy(), params=dict(active=True))

    if return_labels:
        return out, total_labels, np.asarray(obj_labels)

    return out, total_labels
コード例 #22
0
def commit(cutout,
           low_threshold=LOW_THRESHOLD,
           high_threshold=HIGH_THRESHOLD,
           cost_benefit_ratio=COST_BENEFIT_RATIO,
           close=lambda x, y: True,
           force=False):
    V = cutout.parent
    unique_list = cutout.central_unique_list

    traced_list = measurements.mean(cutout.traced, cutout.raw_labels,
                                    unique_list)
    current_list = measurements.mean(cutout.current_object, cutout.raw_labels,
                                     unique_list)
    volumes = measurements.sum(np.ones_like(cutout.current_object),
                               cutout.raw_labels, unique_list)

    positive_indices = [
        i for i in xrange(len(unique_list)) if high_threshold < traced_list[i]
    ]
    uncertain_indices = [
        i for i in xrange(len(unique_list))
        if low_threshold <= traced_list[i] <= high_threshold
    ]
    negative_indices = [
        i for i in xrange(len(unique_list)) if traced_list[i] < low_threshold
    ]

    cost = sum([volumes[i]*max(traced_list[i],1-traced_list[i]) for i in uncertain_indices]) + \
     sum([volumes[i]*traced_list[i] for i in negative_indices]) + \
     sum([volumes[i]*(1-traced_list[i]) for i in positive_indices])
    benefit = sum([
        abs(volumes[i] * (traced_list[i] - current_list[i]))
        for i in positive_indices + negative_indices
    ])
    """
	cost = sum([volumes[i] * traced_list[i]*(1-traced_list[i]) for i in xrange(len(unique_list))])
	benefit = sum([abs(volumes[i]*(round(traced_list[i])-current_list[i])) for i in xrange(len(unique_list))])
	"""

    cost2 = sum([volumes[i] for i in uncertain_indices]) + \
     sum([volumes[i]*traced_list[i] for i in negative_indices]) + \
     sum([volumes[i]*(1-traced_list[i]) for i in positive_indices])
    benefit2 = sum([
        abs(volumes[i] * (round(traced_list[i]) - current_list[i]))
        for i in positive_indices + negative_indices
    ])

    if not (len(uncertain_indices) == 0 or
            (cost_benefit_ratio is not None
             and benefit2 > cost_benefit_ratio * cost2) or force):
        raise ReconstructionException("not confident")

    split_point = (high_threshold + low_threshold) / 2
    rounded_positive = [
        unique_list[i] for i in xrange(len(unique_list))
        if traced_list[i] > split_point
    ]
    rounded_negative = [
        unique_list[i] for i in xrange(len(unique_list))
        if traced_list[i] <= split_point
    ]

    if not V.valid.isdisjoint(rounded_positive):
        raise ReconstructionException("blocking merge to valid segment")

    full_segment = bfs(V.G, rounded_positive)
    if not V.glial.isdisjoint(full_segment):
        raise ReconstructionException("blocking merge to glial cell")
    if len(V.dendrite & full_segment) > 2:
        raise ReconstructionException("blocking merge of two dendritic trunks")

    original_components = list(
        nx.connected_components(V.G.subgraph(cutout.unique_list)))
    regiongraphs.add_clique(V.G, rounded_positive, guard=close)
    regiongraphs.delete_bipartite(V.G, rounded_positive, rounded_negative)
    new_components = list(
        nx.connected_components(V.G.subgraph(cutout.unique_list)))
    changed_list = set(cutout.unique_list) - set.union(
        *([set([])] + [s for s in original_components if s in new_components]))
    changed_cutout = indicator(cutout.raw_labels, changed_list)
    V.changed[cutout.region] = np.maximum(V.changed[cutout.region],
                                          changed_cutout)

    if not GLOBAL_EXPAND:
        cutout.G = V.G.subgraph(cutout.unique_list)

    return len(changed_list) > 0
コード例 #23
0
    lopen = open(lname, "w")
    lopen.write("Extracting lightcurves for {0} observations...\n========\n" \
                    .format(nobs))

    # -- initialize the lightcurve array
    lcs = np.zeros((nobs, nbbl), dtype=float) - 9999

    # -- loop over observations
    lopen.write("  FIX THE DIMENSIONS!!!\n")

    for ii, fname in enumerate(flist):
        if ii % 10 == 0:
            lopen.write("  obs {0} of {1}\n".format(ii, nobs))
            lopen.flush()

        # - get luminosities
        img = imread(fname)[5::2, 1::2]
        lcs[ii] = ndm.mean(img, bbls, blist)

        # - periodically write to file
        if (ii + 1) % 100 == 0:
            np.save(oname, lcs[:ii])

    # -- write to file
    lopen.write("\nWriting to npy...\n========\n")
    lopen.flush()
    np.save(oname, lcs)
    lopen.write("FINISHED in {0}s\n".format(time.time() - t0))
    lopen.flush()
    lopen.close()
コード例 #24
0
ファイル: parallel_fragments.py プロジェクト: yajivunev/lsd
def watershed_in_block(
        affs,
        block,
        context,
        rag_provider,
        fragments_out,
        num_voxels_in_block,
        mask=None,
        fragments_in_xy=False,
        epsilon_agglomerate=0.0,
        filter_fragments=0.0,
        min_seed_distance=10,
        replace_sections=None):
    '''

    Args:

        filter_fragments (float):

            Filter fragments that have an average affinity lower than this
            value.

        min_seed_distance (int):

            Controls distance between seeds in the initial watershed. Reducing
            this value improves downsampled segmentation.
    '''

    total_roi = affs.roi

    logger.debug("reading affs from %s", block.read_roi)

    affs = affs.intersect(block.read_roi)
    affs.materialize()

    if affs.dtype == np.uint8:
        logger.info("Assuming affinities are in [0,255]")
        max_affinity_value = 255.0
        affs.data = affs.data.astype(np.float32)
    else:
        max_affinity_value = 1.0

    if mask is not None:

        logger.debug("reading mask from %s", block.read_roi)
        mask_data = get_mask_data_in_roi(mask, affs.roi, affs.voxel_size)
        logger.debug("masking affinities")
        affs.data *= mask_data

    # extract fragments
    fragments_data, _ = watershed_from_affinities(
        affs.data,
        max_affinity_value,
        fragments_in_xy=fragments_in_xy,
        min_seed_distance=min_seed_distance)

    if mask is not None:
        fragments_data *= mask_data.astype(np.uint64)

    if filter_fragments > 0:

        if fragments_in_xy:
            average_affs = np.mean(affs.data[0:2]/max_affinity_value, axis=0)
        else:
            average_affs = np.mean(affs.data/max_affinity_value, axis=0)

        filtered_fragments = []

        fragment_ids = np.unique(fragments_data)

        for fragment, mean in zip(
                fragment_ids,
                measurements.mean(
                    average_affs,
                    fragments_data,
                    fragment_ids)):
            if mean < filter_fragments:
                filtered_fragments.append(fragment)

        filtered_fragments = np.array(
            filtered_fragments,
            dtype=fragments_data.dtype)
        replace = np.zeros_like(filtered_fragments)
        replace_values(fragments_data, filtered_fragments, replace, inplace=True)

    if epsilon_agglomerate > 0:

        logger.info(
            "Performing initial fragment agglomeration until %f",
            epsilon_agglomerate)

        generator = waterz.agglomerate(
                affs=affs.data/max_affinity_value,
                thresholds=[epsilon_agglomerate],
                fragments=fragments_data,
                scoring_function='OneMinus<HistogramQuantileAffinity<RegionGraphType, 25, ScoreValue, 256, false>>',
                discretize_queue=256,
                return_merge_history=False,
                return_region_graph=False)
        fragments_data[:] = next(generator)

        # cleanup generator
        for _ in generator:
            pass

    if replace_sections:

        logger.info("Replacing sections...")

        block_begin = block.write_roi.get_begin()
        shape = block.write_roi.get_shape()

        z_context = context[0]/affs.voxel_size[0]
        logger.info("Z context: %i",z_context)

        mapping = {}

        voxel_offset = block_begin[0]/affs.voxel_size[0]

        for i,j in zip(
                range(fragments_data.shape[0]),
                range(shape[0])):
            mapping[i] = i
            mapping[j] = int(voxel_offset + i) \
                    if block_begin[0] == total_roi.get_begin()[0] \
                    else int(voxel_offset + (i - z_context))

        logging.info('Mapping: %s', mapping)

        replace = [k for k,v in mapping.items() if v in replace_sections]

        for r in replace:
            logger.info("Replacing mapped section %i with zero", r)
            fragments_data[r] = 0

    #todo add key value replacement option

    fragments = daisy.Array(fragments_data, affs.roi, affs.voxel_size)

    # crop fragments to write_roi
    fragments = fragments[block.write_roi]
    fragments.materialize()
    max_id = fragments.data.max()

    # ensure we don't have IDs larger than the number of voxels (that would
    # break uniqueness of IDs below)
    if max_id > num_voxels_in_block:
        logger.warning(
            "fragments in %s have max ID %d, relabelling...",
            block.write_roi, max_id)
        fragments.data, max_id = relabel(fragments.data)

        assert max_id < num_voxels_in_block

    # ensure unique IDs
    id_bump = block.block_id[1]*num_voxels_in_block
    logger.debug("bumping fragment IDs by %i", id_bump)
    fragments.data[fragments.data>0] += id_bump
    fragment_ids = range(id_bump + 1, id_bump + 1 + int(max_id))

    # store fragments
    logger.debug("writing fragments to %s", block.write_roi)
    fragments_out[block.write_roi] = fragments

    # following only makes a difference if fragments were found
    if max_id == 0:
        return

    # get fragment centers
    fragment_centers = {
        fragment: block.write_roi.get_offset() + affs.voxel_size*daisy.Coordinate(center)
        for fragment, center in zip(
            fragment_ids,
            measurements.center_of_mass(fragments.data, fragments.data, fragment_ids))
        if not np.isnan(center[0])
    }

    # store nodes
    rag = rag_provider[block.write_roi]
    rag.add_nodes_from([
        (node, {
            'center_z': c[0],
            'center_y': c[1],
            'center_x': c[2]
            }
        )
        for node, c in fragment_centers.items()
    ])
    rag.write_nodes(block.write_roi)
コード例 #25
0
	def evaluate(self, t):
		self.asset.lock(t)
		dat = self.asset.getPreviousData(t, self.length)
		self.asset.unlock()
		return mean(dat)
コード例 #26
0
 def evaluate(self, t):
     self.asset.lock(t)
     dat = self.asset.getPreviousData(t, self.length)
     self.asset.unlock()
     return mean(dat)
コード例 #27
0
def analyze(cutout, example_id):
    V = cutout.parent
    unique_list = cutout.central_unique_list
    args = [cutout.raw_labels, unique_list]
    tic()
    guess = measurements.mean(cutout.traced, *args)
    truth = measurements.mean(
        cutout.local_human_labels[np.unravel_index(
            np.argmax(cutout.traced),
            cutout.raw_labels.shape)] == cutout.local_human_labels, *args)
    volumes = measurements.sum(np.ones_like(cutout.raw_labels), *args)
    histogram_list = list(ndimage.histogram(cutout.traced, 0, 1, 10, *args))
    histogram = np.histogram(crop(cutout.traced, CENTRAL_CROP), bins=10)
    toc("compute statistics")

    tic()
    high_threshold = HIGH_THRESHOLD
    low_threshold = LOW_THRESHOLD
    traced_list = measurements.mean(cutout.traced, cutout.raw_labels,
                                    unique_list)
    proofread_list = measurements.mean(cutout.proofread_object,
                                       cutout.raw_labels, unique_list)
    current_list = measurements.mean(cutout.current_object, cutout.raw_labels,
                                     unique_list)
    volumes = measurements.sum(np.ones_like(cutout.current_object),
                               cutout.raw_labels, unique_list)

    positive_indices = [
        i for i in xrange(len(unique_list)) if high_threshold < traced_list[i]
    ]
    uncertain_indices = [
        i for i in xrange(len(unique_list))
        if low_threshold <= traced_list[i] <= high_threshold
    ]
    negative_indices = [
        i for i in xrange(len(unique_list)) if traced_list[i] < low_threshold
    ]

    cost = sum([volumes[i]*max(traced_list[i],1-traced_list[i]) for i in uncertain_indices]) + \
     sum([volumes[i]*traced_list[i] for i in negative_indices]) + \
     sum([volumes[i]*(1-traced_list[i]) for i in positive_indices])
    benefit = sum([
        abs(volumes[i] * (traced_list[i] - current_list[i]))
        for i in positive_indices + negative_indices
    ])

    cost2 = sum([volumes[i] for i in uncertain_indices]) + \
     sum([volumes[i]*traced_list[i] for i in negative_indices]) + \
     sum([volumes[i]*(1-traced_list[i]) for i in positive_indices])
    benefit2 = sum([
        abs(volumes[i] * (round(traced_list[i]) - current_list[i]))
        for i in positive_indices + negative_indices
    ])

    true_cost = sum([
        volumes[i] * abs(round(traced_list[i]) - proofread_list[i])
        for i in xrange(len(unique_list))
    ])
    true_benefit = sum([
        volumes[i] * (abs(proofread_list[i] - round(current_list[i])) -
                      abs(proofread_list[i] - traced_list[i]))
        for i in xrange(len(unique_list))
    ])
    toc("cost benefit analysis")

    tic()
    positive = [
        unique_list[i] for i in xrange(len(unique_list)) if guess[i] > 0.5
    ]
    negative = [
        unique_list[i] for i in xrange(len(unique_list)) if guess[i] <= 0.5
    ]
    new_graph = V.G.subgraph(cutout.unique_list).copy()

    regiongraphs.add_clique(new_graph, positive)
    regiongraphs.delete_bipartite(new_graph, positive, negative)

    new_obj = indicator(cutout.raw_labels, bfs(new_graph, positive))
    new_errors_cutout = crop(
        reconstruct_utils.discrim_online_daemon(cutout.image, new_obj),
        ERRORS_CROP)
    old_errors_cutout = crop(cutout.errors * new_obj, ERRORS_CROP)
    #d_error = crop(new_errors_cutout,ERRORS_CROP) - crop(old_errors_cutout,ERRORS_CROP)
    #print(np.histogram(d_error, bins=20, range=(-1.0,1.0)))

    satisfaction = np.sum(
        crop(np.abs(cutout.current_object - cutout.traced), CENTRAL_CROP))
    toc("computing change in error")

    guess_margin = np.min(np.append(guess[guess > 0.5], 1)) - np.max(
        np.append(guess[guess <= 0.5], 0))
    true_margin = np.min(np.append(guess[truth > 0.5], 1)) - np.max(
        np.append(guess[truth <= 0.5], 0))

    df1 = pd.DataFrame.from_dict({
        "guess":
        guess,
        "truth":
        truth,
        "volume":
        volumes,
        "seg_id":
        unique_list,
        "example_id": [example_id for i in unique_list],
        "histogram":
        histogram_list
    })
    df2 = pd.DataFrame.from_dict({
        "guess_margin": [guess_margin],
        "true_margin": [true_margin],
        "err_max": [np.max(new_errors_cutout)],
        "err_min": [np.min(new_errors_cutout)],
        "err_mean": [np.mean(new_errors_cutout)],
        "satisfaction": [satisfaction],
        "histogram": [histogram],
        "example_id": [example_id],
        "cost": [cost],
        "benefit": [benefit],
        "cost2": [cost2],
        "benefit2": [benefit2],
        "true_cost": [true_cost],
        "true_benefit": [true_benefit],
    })
    return df1, df2