Beispiel #1
0
 def get_edge(self, pos=None, rep=1):
     if not np.iterable(pos):
         pos = self.get_pos()
     from scipy import ndimage
     xlst = np.unique(pos[0])
     ylst = np.unique(pos[1])
     out = np.array([xlst, ndimage.minimum(pos[1], pos[0], xlst)]).T
     out = np.r_[
         out,
         np.array([xlst, ndimage.maximum(pos[1], pos[0], xlst)]).T]
     out2 = np.array([ndimage.minimum(pos[0], pos[1], ylst), ylst]).T
     out2 = np.r_[out2,
                  np.array([ndimage.maximum(pos[0], pos[1], ylst), ylst]).T]
     #l.scatter(*pos)
     cent = pos.mean(1)
     allang = np.r_[np.arctan2(out[:, 0] - cent[0], out[:, 1] - cent[1]),
                    np.arctan2(out2[:, 0] - cent[0], out2[:, 1] - cent[1])]
     sorang = allang.argsort()
     dif = allang[sorang][1:] - allang[sorang][:-1]
     okang = [sorang[0]] + list(sorang[1:][dif > 0])
     edgpos = np.r_[out, out2][okang]
     if rep == 0:
         klist = [
             "pos_" + str(list(p))[1:-1].replace(",", "_").replace(" ", "")
             for p in edgpos
         ]
         return [
             self.samps[self.names[k]] for k in klist if k in self.names
         ]
     return edgpos
Beispiel #2
0
    def analyzeList(self, mylist, myrange=(0, 1, 1), filename=None):
        """
		histogram2(a, bins) -- Compute histogram of a using divisions in bins

		Description:
		   Count the number of times values from array a fall into
		   numerical ranges defined by bins.  Range x is given by
		   bins[x] <= range_x < bins[x+1] where x =0,N and N is the
		   length of the bins array.  The last range is given by
		   bins[N] <= range_N < infinity.  Values less than bins[0] are
		   not included in the histogram.
		Arguments:
		   a -- 1D array.  The array of values to be divied into bins
		   bins -- 1D array.  Defines the ranges of values to use during
		         histogramming.
		Returns:
		   1D array.  Each value represents the occurences for a given
		   bin (range) of values.
		"""
        #hist,bmin,minw,err = stats.histogram(mynumpy, numbins=36)
        #print hist,bmin,minw,err,"\n"
        if len(mylist) < 2:
            apDisplay.printWarning("Did not write file not enough rows (" +
                                   str(filename) + ")")
            return

        if myrange[0] is None:
            mymin = float(math.floor(ndimage.minimum(mylist)))
        else:
            mymin = float(myrange[0])
        if myrange[1] is None:
            mymax = float(math.ceil(ndimage.maximum(mylist)))
        else:
            mymax = float(myrange[1])
        mystep = float(myrange[2])

        mynumpy = numpy.asarray(mylist, dtype=numpy.float32)
        print "range=", round(ndimage.minimum(mynumpy),
                              2), " <> ", round(ndimage.maximum(mynumpy), 2)
        print " mean=", round(ndimage.mean(mynumpy), 2), " +- ", round(
            ndimage.standard_deviation(mynumpy), 2)

        #histogram
        bins = []
        mybin = mymin
        while mybin <= mymax:
            bins.append(mybin)
            mybin += mystep
        bins = numpy.asarray(bins, dtype=numpy.float32)
        apDisplay.printMsg("Creating histogram with " + str(len(bins)) +
                           " bins")
        hist = stats.histogram2(mynumpy, bins=bins)
        #print bins
        #print hist
        if filename is not None:
            f = open(filename, "w")
            for i in range(len(bins)):
                out = ("%3.4f %d\n" % (bins[i] + mystep / 2.0, hist[i]))
                f.write(out)
            f.write("&\n")
Beispiel #3
0
def gaussian_fourierkernel(uu, vv, ww, sigma):
    """
    Create Gaussian Fourier filter kernel
    """
    if not hasattr(sigma, "__len__"):  # type(sigma) is float:
        gfilter = np.expm1(-2 * (np.pi ** 2) *
                           (uu ** 2 + vv ** 2 + ww ** 2) * (sigma ** 2)) + 1

        midpoint = np.ceil(np.array(uu.shape) / 2.0)
        maxval = ndimage.maximum(gfilter[midpoint[0] - 10:midpoint[0] + 10,
                                         midpoint[1] - 10:midpoint[1] + 10,
                                         midpoint[2] - 10:midpoint[2] + 10])
        return gfilter / maxval
    elif len(sigma) == 2:
        gfilter = np.expm1(-2 * (np.pi ** 2) * ((sigma[0] ** 2) * uu ** 2 +
                                                (sigma[1] ** 2) * vv ** 2)) + 1
        midpoint = np.ceil(np.array(uu.shape) / 2.0)
        maxval = ndimage.maximum(gfilter[midpoint[0] - 10:midpoint[0] + 10,
                                         midpoint[1] - 10:midpoint[1] + 10])
        gfilter = gfilter / maxval
    else:
        gfilter = np.expm1(-2 * (np.pi ** 2) * ((sigma[0] ** 2) * uu ** 2 +
                                                (sigma[1] ** 2) * vv ** 2 +
                                                (sigma[2] ** 2) * ww ** 2)) + 1
        midpoint = np.ceil(np.array(uu.shape) / 2.0)
        maxval = ndimage.maximum(gfilter[midpoint[0] - 10:midpoint[0] + 10,
                                         midpoint[1] - 10:midpoint[1] + 10,
                                         midpoint[2] - 10:midpoint[2] + 10])
        gfilter = gfilter / maxval
    return gfilter
def fillDataDict(radlist, anglelist, freqlist):
        """
        Get min/max statistics on data lists
        """
        d = {}
        freqnumpy = numpy.asarray(freqlist, dtype=numpy.int32)
        d['minf'] = float(ndimage.minimum(freqnumpy))
        d['maxf'] = float(ndimage.maximum(freqnumpy))
        if ndimage.sum(freqnumpy) < 10:
                apDisplay.printWarning("not enough eulers to draw a map")
                return None
        d['rangef'] = d['maxf']-d['minf']+1

        angnumpy = numpy.asarray(anglelist, dtype=numpy.float32)
        d['mina'] = float(ndimage.minimum(angnumpy))
        d['maxa'] = float(ndimage.maximum(angnumpy))
        if d['maxa'] > 330.0*math.pi/180.0:
                d['maxa'] = 2.0*math.pi
        d['rangea'] = d['maxa']-d['mina']

        radnumpy = numpy.asarray(radlist, dtype=numpy.float32)
        d['minr'] = float(ndimage.minimum(radnumpy))
        d['maxr'] = float(ndimage.maximum(radnumpy))
        d['ranger'] = d['maxr']-d['minr']

        xnumpy = radnumpy * numpy.cos(angnumpy - d['mina'])
        ynumpy = radnumpy * numpy.sin(angnumpy - d['mina'])
        d['minx'] = float(ndimage.minimum(xnumpy))
        d['maxx'] = float(ndimage.maximum(xnumpy))
        d['miny'] = float(ndimage.minimum(ynumpy))
        d['maxy'] = float(ndimage.maximum(ynumpy))
        d['rangex'] = d['maxx']-d['minx']
        d['rangey'] = d['maxy']-d['miny']

        return d
Beispiel #5
0
def test_depth_algorithm(image_filtered, basename='depth'):
    """test_depth_algorithm
    Depth algorithm testing
    """
    print "Testing depth algorithm"
    # t1 = time.time()

    # Close gaussian filtered image
    c = ndimage.grey_closing(np.abs(image_filtered), size=(5, 5, 5))
    # Mask closed image
    # cm = c * (c>8000).astype(float)
    cm = c / (ndimage.maximum(c))
    # avoid div by zero
    cm = 0.99 * cm + 0.00001
    # Regularise gaussian filtered image
    gm = (np.abs(image_filtered) / cm)  # * (c>8000).astype(float)
    # Depth = difference between closed image and regularised gaussian
    depth = c / ndimage.maximum(c) - gm / ndimage.maximum(gm)
    # mask regularised image
    depth = depth * (c > 0.00015).astype(float)
    # Normalise
    # depth = (depth -
    # ndimage.minimum(depth))/(ndimage.maximum(depth)-ndimage.minimum(depth))
    # save to nifti
    new_image = nib.Nifti1Image(np.abs(depth), affine)
    new_image.set_data_dtype(np.float32)
    nib.save(new_image, basename + '.nii.gz')
Beispiel #6
0
    def run(self, ips, snap, img, para = None):
        intenimg = ImageManager.get(para['inten']).img
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        buf, n = ndimage.label(snap, strc, output=np.uint32)
        index = range(1, n+1)
        idx = (np.ones(n+1)*para['front']).astype(np.uint8)
        msk = np.ones(n, dtype=np.bool)

        if para['mean']>0: msk *= ndimage.mean(intenimg, buf, index)>=para['mean']
        if para['mean']<0: msk *= ndimage.mean(intenimg, buf, index)<-para['mean']
        if para['max']>0: msk *= ndimage.maximum(intenimg, buf, index)>=para['max']
        if para['max']<0: msk *= ndimage.maximum(intenimg, buf, index)<-para['max']
        if para['min']>0: msk *= ndimage.minimum(intenimg, buf, index)>=para['min']
        if para['min']<0: msk *= ndimage.minimum(intenimg, buf, index)<-para['min']
        if para['sum']>0: msk *= ndimage.sum(intenimg, buf, index)>=para['sum']
        if para['sum']<0: msk *= ndimage.sum(intenimg, buf, index)<-para['sum']
        if para['std']>0: msk *= ndimage.standard_deviation(intenimg, buf, index)>=para['std']
        if para['std']<0: msk *= ndimage.standard_deviation(intenimg, buf, index)<-para['std']


        xy = ndimage.center_of_mass(intenimg, buf, index)
        xy = np.array(xy).round(2).T

        idx[1:][~msk] = para['back']
        idx[0] = 0
        img[:] = idx[buf]

        ImageManager.get(para['inten']).mark = RGMark((xy.T, msk))
        ImageManager.get(para['inten']).update = True
Beispiel #7
0
def keypoint(m1, m2, m3):
    keypoint_candidate = m2.item(4)
    if keypoint_candidate >= ndimage.maximum(m2):
        if keypoint_candidate > ndimage.maximum(m1):
            if keypoint_candidate > ndimage.maximum(m3):
                return keypoint_candidate
    return -1
Beispiel #8
0
def getOverlapPercent(image1, image2, data):
    #SET IMAGE LIMITS
    gap = int(image1.shape[0] / 256.0)
    xm = image1.shape[1] + gap
    ym = image1.shape[0] + gap
    a1 = numpy.array([
        data['point1'],
        [-gap, -gap],
        [-gap, ym],
        [xm, ym],
        [xm, -gap],
    ])
    xm = image2.shape[1] + gap
    ym = image2.shape[0] + gap
    a2 = numpy.array([
        data['point2'],
        [-gap, -gap],
        [-gap, ym],
        [xm, ym],
        [xm, -gap],
    ])

    #CALCULATE TRANSFORM LIMITS
    a2mask = a1Toa2Data(a1, data)
    a1mask = a2Toa1Data(a2, data)

    #CONVERT NUMPY TO POLYGON LIST
    a1masklist = []
    a2masklist = []
    for j in range(4):
        for i in range(2):
            item = int(a1mask[j + 1, i])
            a1masklist.append(item)
            item = int(a2mask[j + 1, i])
            a2masklist.append(item)

    #CREATE POLYGON MASK FROM THE LIMITS 1 -> IMAGE 2
    mask2 = numpy.zeros(shape=image2.shape, dtype=numpy.bool_)
    mask2b = apImage.arrayToImage(mask2, normalize=False)
    mask2b = mask2b.convert("L")
    draw2 = ImageDraw.Draw(mask2b)
    draw2.polygon(a2masklist, fill="white")
    mask2 = apImage.imageToArray(mask2b, dtype=numpy.float32)

    #CREATE POLYGON MASK FROM THE LIMITS 2 -> IMAGE 1
    mask1 = numpy.zeros(shape=image1.shape, dtype=numpy.bool_)
    mask1b = apImage.arrayToImage(mask1, normalize=False)
    mask1b = mask1b.convert("L")
    draw1 = ImageDraw.Draw(mask1b)
    draw1.polygon(a1masklist, fill="white")
    mask1 = apImage.imageToArray(mask1b, dtype=numpy.float32)

    percent1 = ndimage.sum(mask1) / (mask1.shape[0] *
                                     mask1.shape[1]) / ndimage.maximum(mask1)
    percent2 = ndimage.sum(mask2) / (mask2.shape[0] *
                                     mask2.shape[1]) / ndimage.maximum(mask2)

    return max(percent1, percent2), min(percent1, percent2)
Beispiel #9
0
    def run(self, ips, snap, img, para=None):
        intenimg = self.app.get_img(para['inten']).img
        strc = ndimage.generate_binary_structure(
            2, 1 if para['con'] == '4-connect' else 2)
        buf, n = ndimage.label(snap, strc, output=np.uint32)
        index = range(1, n + 1)
        idx = (np.ones(n + 1) * para['front']).astype(np.uint8)
        msk = np.ones(n, dtype=np.bool)

        if para['mean'] > 0:
            msk *= ndimage.mean(intenimg, buf, index) >= para['mean']
        if para['mean'] < 0:
            msk *= ndimage.mean(intenimg, buf, index) < -para['mean']
        if para['max'] > 0:
            msk *= ndimage.maximum(intenimg, buf, index) >= para['max']
        if para['max'] < 0:
            msk *= ndimage.maximum(intenimg, buf, index) < -para['max']
        if para['min'] > 0:
            msk *= ndimage.minimum(intenimg, buf, index) >= para['min']
        if para['min'] < 0:
            msk *= ndimage.minimum(intenimg, buf, index) < -para['min']
        if para['sum'] > 0:
            msk *= ndimage.sum(intenimg, buf, index) >= para['sum']
        if para['sum'] < 0:
            msk *= ndimage.sum(intenimg, buf, index) < -para['sum']
        if para['std'] > 0:
            msk *= ndimage.standard_deviation(intenimg, buf,
                                              index) >= para['std']
        if para['std'] < 0:
            msk *= ndimage.standard_deviation(intenimg, buf,
                                              index) < -para['std']

        xy = ndimage.center_of_mass(intenimg, buf, index)
        xy = np.array(xy).round(2).T

        idx[1:][~msk] = para['back']
        idx[0] = 0
        img[:] = idx[buf]

        red_pts = {
            'type': 'points',
            'body': xy[::-1].T[~msk],
            'color': (255, 0, 0)
        }
        green_pts = {
            'type': 'points',
            'body': xy[::-1].T[msk],
            'color': (0, 255, 0)
        }

        self.app.get_img(para['inten']).mark = mark2shp({
            'type':
            'layer',
            'body': [red_pts, green_pts]
        })
        self.app.get_img(para['inten']).update()
	def analyzeList(self, mylist, myrange=(0,1,1), filename=None):
		"""
		histogram2(a, bins) -- Compute histogram of a using divisions in bins

		Description:
		   Count the number of times values from array a fall into
		   numerical ranges defined by bins.  Range x is given by
		   bins[x] <= range_x < bins[x+1] where x =0,N and N is the
		   length of the bins array.  The last range is given by
		   bins[N] <= range_N < infinity.  Values less than bins[0] are
		   not included in the histogram.
		Arguments:
		   a -- 1D array.  The array of values to be divied into bins
		   bins -- 1D array.  Defines the ranges of values to use during
		         histogramming.
		Returns:
		   1D array.  Each value represents the occurences for a given
		   bin (range) of values.
		"""
		#hist,bmin,minw,err = stats.histogram(mynumpy, numbins=36)
		#print hist,bmin,minw,err,"\n"
		if len(mylist) < 2:
			apDisplay.printWarning("Did not write file not enough rows ("+str(filename)+")")
			return

		if myrange[0] is None:
			mymin = float(math.floor(ndimage.minimum(mylist)))
		else:
			mymin = float(myrange[0])
		if myrange[1] is None:
			mymax = float(math.ceil(ndimage.maximum(mylist)))
		else:
			mymax = float(myrange[1])
		mystep = float(myrange[2])

		mynumpy = numpy.asarray(mylist, dtype=numpy.float32)
		print "range=",round(ndimage.minimum(mynumpy),2)," <> ",round(ndimage.maximum(mynumpy),2)
		print " mean=",round(ndimage.mean(mynumpy),2)," +- ",round(ndimage.standard_deviation(mynumpy),2)

		#histogram
		bins = []
		mybin = mymin
		while mybin <= mymax:
			bins.append(mybin)
			mybin += mystep
		bins = numpy.asarray(bins, dtype=numpy.float32)
		apDisplay.printMsg("Creating histogram with "+str(len(bins))+" bins")
		hist = stats.histogram2(mynumpy, bins=bins)
		#print bins
		#print hist
		if filename is not None:
			f = open(filename, "w")
			for i in range(len(bins)):
				out = ("%3.4f %d\n" % (bins[i] + mystep/2.0, hist[i]) )
				f.write(out)
			f.write("&\n")
Beispiel #11
0
def procesar_imagen(archivo="", factor=0.5, f="white"):
    '''
    A partir de archivo procesa una imagen del Sol,
    detectando los centros de masa con un factor
    por encima del valor medio y por debajo del maximo.

    Crea un archivo procesado en la carpeta output/img/~.png
    y otro en output/mosaico_~.png con una
    superpocision de todos los valores.

    @param archivo: la ruta del archivo a procesar

    @param factor: Desde 0 para valor promedio, hasta 1 para valor maximo

    @param f: puede ser 'white' para usar white_~.png o 'sun' para usar sun_~.png
    '''

    # Lee la imagen del disco duro
    img = misc.imread("img/" + archivo, 1)

    # Calcula el valor maximo de intensidad para un pixel e invierte todos los valores, de modo que ahora oscuridad = max y luz = 0
    maximum = ndimage.maximum(img)
    img = maximum - img

    # calcula para los nuevos datos maximo y valor promedio
    median = ndimage.median(img)
    maximum = ndimage.maximum(img)

    # Crea filtro con un criterio muy relativo :P
    filtro = img > median + (maximum - median) * factor
    img = img * filtro

    # Calcula los centros de masa con un maximo de NUM posibles manchas
    lbl, num = ndimage.label(img)

    # centros de masa
    center_of_mass = ndimage.measurements.center_of_mass(
        img, lbl, range(2, num + 1))

    # crear mapa de imagen con elementos calculados
    arch = archivo.split("_", 3)
    sun = misc.imread("lib/img/" + f + "_" + arch[2] + ".png", 1)
    mosaico = misc.imread("output/mosaico_" + arch[2] + ".png", 1)

    for elemento in center_of_mass:
        y = int(elemento[0])
        x = int(elemento[1])
        mosaico[y, x] = 0
        sun[y, x] = 0

    # configura un nuevo nombre para la imagen en output/
    archivo = arch[0] + "_" + arch[1] + "_" + arch[2] + ".png"

    misc.imsave("output/img/" + archivo, sun)
    misc.imsave("output/mosaico_" + arch[2] + ".png", mosaico)
Beispiel #12
0
def all_alive(board):
    black_groups, black_count = nd.label(board == 1)
    empty_adjacent_count = nd.filters.convolve((board == 0).astype(np.uint8),
                                               np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]),
                                               mode='constant')
    black_adjacent_max = nd.maximum(empty_adjacent_count, black_groups,
                                    np.arange(1, black_count))
    if np.any(black_adjacent_max == 0):
        return False
    white_groups, white_count = nd.label(board == 2)
    white_adjacenet_max = nd.maximum(empty_adjacent_count, white_groups,
                                     np.arange(1, white_count))
    if np.any(white_adjacenet_max == 0):
        return False
    return True
Beispiel #13
0
    def label(self, input_grid):
        """
        Label input grid with hysteresis method.

        Args:
            input_grid: 2D array of values.

        Returns:
            Labeled output grid.
        """
        unset = 0
        high_labels, num_labels = label(input_grid > self.high_thresh)
        region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1]
        output_grid = np.zeros(input_grid.shape, dtype=int)
        stack = []
        for rank in region_ranking:
            label_num = rank + 1
            label_i, label_j = np.where(high_labels == label_num)
            for i in range(label_i.size):
                if output_grid[label_i[i], label_j[i]] == unset:
                    stack.append((label_i[i], label_j[i]))
            while len(stack) > 0:
                index = stack.pop()
                output_grid[index] = label_num
                for i in range(index[0] - 1, index[0] + 2):
                    for j in range(index[1] - 1, index[1] + 2):
                        if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]:
                            if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset):
                                stack.append((i, j))
        return output_grid
Beispiel #14
0
def estimate_pk_parms_2d(x, y, f, pktype):
    """
    Calculate initial parameter values for 2-dimensional peak fitting.

    Parameters
    ----------
    x : array_like
        (n, ) ndarray of coordinate positions for dimension 1
        (numpy.meshgrid formatting).
    y : array_like
        (n, ) ndarray of coordinate positions for dimension 2
        (numpy.meshgrid formatting).
    f : array_like
        (n, ) ndarray of intensity measurements at coordinate
        positions x and y.
    pktype : str
        type of analytic function that will be used to fit the data; current
        options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes)
        and "split_pvoigt_rot" (split psuedo voigt with arbitrary axes).

    Returns
    -------
    p -- (m) ndarray containing initial guesses for parameters for the input
    peak type (see peakfunction help for more information).
    """

    bg0 = np.mean([f[0, 0], f[-1, 0], f[-1, -1], f[0, -1]])
    bg1x = (np.mean([f[-1, -1], f[0, -1]]) - np.mean([f[0, 0], f[-1, 0]])) \
        / (x[0, -1] - x[0, 0])
    bg1y = (np.mean([f[-1, -1], f[-1, 0]]) - np.mean([f[0, 0], f[0, -1]])) \
        / (y[-1, 0] - y[0, 0])

    fnobg = f - (bg0 + bg1x * x + bg1y * y)

    labels, numlabels = imgproc.label(fnobg > 0.5*np.max(fnobg))

    # looks for the largest peak
    areas = np.zeros(numlabels)
    for ii in np.arange(1, numlabels + 1, 1):
        areas[ii - 1] = np.sum(labels == ii)

    peakIndex = np.argmax(areas) + 1

    FWHMx = np.max(x[labels == peakIndex]) - np.min(x[labels == peakIndex])
    FWHMy = np.max(y[labels == peakIndex]) - np.min(y[labels == peakIndex])

    coords = imgproc.maximum_position(fnobg, labels=labels, index=peakIndex)
    A = imgproc.maximum(fnobg, labels=labels, index=peakIndex)
    x0 = x[coords]
    y0 = y[coords]

    if pktype == 'gaussian':
        p = [A, x0, y0, FWHMx, FWHMy, bg0, bg1x, bg1y]
    elif pktype == 'gaussian_rot':
        p = [A, x0, y0, FWHMx, FWHMy, 0., bg0, bg1x, bg1y]
    elif pktype == 'split_pvoigt_rot':
        p = [A, x0, y0, FWHMx, FWHMx, FWHMy, FWHMy,
             0.5, 0.5, 0.5, 0.5, 0., bg0, bg1x, bg1y]
    p = np.array(p)
    return p
Beispiel #15
0
def test_maximum03():
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum(input, labels=labels,
                                           index=2)
        assert_almost_equal(output, 4.0)
Beispiel #16
0
def tworepnoiseest(img1, img2):
    if np.iscomplexobj(img1) and np.iscomplexobj(img2):
        real_STD = noiseest(img1.real, img2.real)
        imag_STD = noiseest(img1.imag, img2.imag)
        return np.sqrt((real_STD**2 + imag_STD**2) / 2.0)
    else:
        # Normalise image
        nimg1 = (img1 - ndimage.minimum(img1)) / \
                 (ndimage.maximum(img1) - ndimage.minimum(img1))
        nimg2 = (img2 - ndimage.minimum(img2)) / \
                 (ndimage.maximum(img2) - ndimage.minimum(img2))

        # nimg1*=256.0
        # nimg2*=256.0

        return np.sqrt(0.5) * (nimg1 - nimg2)
Beispiel #17
0
def LabelCut(labels3d, nstrCut, massCut, volCut, densCut):

    labels1d = np.unique(labels3d)[1:]

    print '#Haloes before label-cut:', labels1d.shape

    maxnstrEachBlob = np.array(
        ndi.maximum(nstream, labels=labels3d, index=labels1d))
    c1 = (maxnstrEachBlob < nstrCut
          )  #max(nstream) = 1  ( entire region in void)

    gridOnes = np.ones_like(nstream)
    gridEachBlob = ndi.measurements.sum(gridOnes,
                                        labels=labels3d,
                                        index=labels1d)
    c2 = (gridEachBlob < volCut)  #total volume < 8 grids points

    massEachBlob = ndi.measurements.sum(macro, labels=labels3d,
                                        index=labels1d) * m_particle
    densEachBlob = massEachBlob / (gridEachBlob * (L / size_fact)**3)
    denBackground = m_particle * (nGr)**3 / (L**3)
    dentoVirial = densEachBlob / denBackground
    c3 = dentoVirial < densCut

    MaskOutCondition0 = np.where(c1 | c2 | c3)
    maskingValues = labels1d[MaskOutCondition0]
    labels3d_out = MaskedRemove(labels3d, maskingValues)

    print '#Haloes after label-cut:', (np.unique(labels3d_out)[1:]).shape

    return labels3d_out
Beispiel #18
0
def _label_detection_islands(data, analysis_thresh, detection_thresh):
    """
    Find regions which are above analysis_thresh and peak above detection_thresh.

    Args:
        data (array_like): Raw data (may be masked)
        analysis_thresh (float): Analysis ('single connected region') threshold
        detection_thresh (float): Detections threshold

    Returns:
        tuple (array_like, dict): Tuple of `(label_map, valid_labels)`.
            `label_map` is an ndarray containing the pixel labels.
            `valid_labels` is a dict mapping valid label numbers to the maximum
            pixel-value in that label-region.

    """
    analysis_map = data > analysis_thresh
    label_map, n_labels = ndimage.label(analysis_map)

    label_maxvals = ndimage.maximum(data, label_map,
                                    index=range(1, n_labels + 1))

    valid_label_maxvals = {}
    # Delabel islands that don't meet detection threshold:
    for zero_idx, maxval in enumerate(label_maxvals):
        label = zero_idx + 1
        if maxval < detection_thresh:
            label_map[label_map == label] = 0.
        else:
            valid_label_maxvals[label] = maxval
    return label_map, valid_label_maxvals
def normalizeImage(a):
	"""	
	Normalizes numarray to fit into an image format
	that is values between 0 and 255.
	"""
	#Minimum image value, i.e. how black the image can get
	minlevel = 0.0
	#Maximum image value, i.e. how white the image can get
	maxlevel = 235.0
	#Maximum standard deviations to include, i.e. pixel > N*stdev --> white
	devlimit=5.0
 	imrange = maxlevel - minlevel

	avg1=ndimage.mean(a)

	stdev1=ndimage.standard_deviation(a)

	min1=ndimage.minimum(a)
	if(min1 < avg1-devlimit*stdev1):
		min1 = avg1-devlimit*stdev1

	max1=ndimage.maximum(a)
	if(max1 > avg1+devlimit*stdev1):
		max1 = avg1+devlimit*stdev1

	a = (a - min1)/(max1 - min1)*imrange + minlevel
	a = numarray.where(a > maxlevel,255.0,a)
	a = numarray.where(a < minlevel,0.0,a)

	return a
Beispiel #20
0
def labelstats_str(factors, values, stat='mvnx'):
    # works also for string labels in ys, but requires 1D
    # from mailing list scipy-user 2009-02-11
    unil, unilinv = np.unique1d(factors,
                                return_index=False,
                                return_inverse=True)
    res = []
    if 'm' in stat:
        labelmeans = np.array(
            ndimage.mean(values, labels=unilinv, index=np.arange(len(unil))))
        res.append(labelmeans)
    if 'v' in stat:
        labelvars = np.array(
            ndimage.variance(values,
                             labels=unilinv,
                             index=np.arange(len(unil))))
        res.append(labelvars)
    if 'n' in stat:
        labelmin = np.array(
            ndimage.minimum(values, labels=unilinv,
                            index=np.arange(len(unil))))
        res.append(labelmin)
    if 'x' in stat:
        labelmax = np.array(
            ndimage.maximum(values, labels=unilinv,
                            index=np.arange(len(unil))))
        res.append(labelmax)
    return res
Beispiel #21
0
def test_maximum04():
    labels = np.array([[1, 2], [2, 3]])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum(input, labels=labels,
                                           index=[2, 3, 8])
        assert_array_almost_equal(output, [3.0, 4.0, 0.0])
Beispiel #22
0
def scale_array_min_max(img, new_min, new_max):
    old_min = ndimage.minimum(img)
    old_max = ndimage.maximum(img)
    if old_min == old_max:
        return img - old_min + new_min
    return new_min + (img - old_min) * ((new_max - new_min) /
                                        (old_max - old_min))
Beispiel #23
0
def fouriercoords(siz):
    """fouriercoords
    Create x,y,z mesh of Fourier domain space
    """
    sz = np.ceil(np.array(siz) / 2.0)
    xx = np.array(range(-int(sz[0]), int(sz[0])))
    yy = np.array(range(-int(sz[1]), int(sz[1])))
    maxlen = ndimage.maximum(np.array(siz))
    if len(siz) == 3:
        zz = np.array(range(-int(sz[2]), int(sz[2])))
        mult_fact = np.ones((len(xx), len(yy), len(zz)))
        uu = xx[:, np.newaxis, np.newaxis] * mult_fact / maxlen  # * voxmm[0]
        vv = yy[np.newaxis, :, np.newaxis] * mult_fact / maxlen  # * voxmm[0]
        ww = zz[np.newaxis, np.newaxis, :] * mult_fact / maxlen  # * voxmm[0]
        if np.prod(siz) != np.prod(sz * 2):
            uu = uu[:siz[0], :siz[1], :siz[2]]
            vv = vv[:siz[0], :siz[1], :siz[2]]
            ww = ww[:siz[0], :siz[1], :siz[2]]
        return (uu, vv, ww)
    else:
        mult_fact = np.ones((len(xx), len(yy)))
        uu = xx[:, np.newaxis] * mult_fact / maxlen  # * voxmm[0]
        vv = yy[np.newaxis, :] * mult_fact / maxlen  # * voxmm[0]
        if np.prod(siz) != np.prod(sz * 2):
            uu = uu[:siz[0], :siz[1]]
            vv = vv[:siz[0], :siz[1]]
        return (uu, vv, [])
Beispiel #24
0
def test_maximum03():
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum(input, labels=labels,
                                           index=2)
        assert_almost_equal(output, 4.0)
Beispiel #25
0
def test_maximum04():
    labels = np.array([[1, 2], [2, 3]])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum(input, labels=labels,
                                           index=[2, 3, 8])
        assert_array_almost_equal(output, [3.0, 4.0, 0.0])
Beispiel #26
0
    def run(self, ips, imgs, para=None):
        lab = WindowsManager.get(para['lab']).ips.get_img()
        if lab.dtype != np.uint8 and lab.dtype != np.uint16:
            IPy.alert('Label image must be in type 8-bit or 16-bit')
            return
        index = range(1, lab.max() + 1)
        titles = ['Max', 'Min', 'Mean', 'Variance', 'Standard', 'Sum']
        key = {
            'Max': 'max',
            'Min': 'min',
            'Mean': 'mean',
            'Variance': 'var',
            'Standard': 'std',
            'Sum': 'sum'
        }
        titles = ['value'] + [i for i in titles if para[key[i]]]

        data = [index]
        img = ips.get_img()
        if img is lab: img = img > 0
        if para['max']: data.append(ndimage.maximum(img, lab, index))
        if para['min']: data.append(ndimage.minimum(img, lab, index))
        if para['mean']: data.append(ndimage.mean(img, lab, index).round(4))
        if para['var']: data.append(ndimage.variance(img, lab, index).round(4))
        if para['std']:
            data.append(ndimage.standard_deviation(img, lab, index).round(4))
        if para['sum']: data.append(ndimage.sum(img, lab, index).round(4))
        data = zip(*data)
        IPy.table(ips.title + '-segment', data, titles)
Beispiel #27
0
def LabelCutStr(labels3d, nstrCutMax, nstrCutMin):

    labels1d = np.unique(labels3d)[1:]

    print '---------------------------------'
    print 'Max nstream threshold', nstrCutMax
    print 'Min nstream threshold', nstrCutMin
    print '#Haloes before nstreams-cut:', labels1d.shape

    maxnstrEachBlob = np.array(
        ndi.maximum(nstream, labels=labels3d, index=labels1d))
    c1 = (maxnstrEachBlob < nstrCutMax
          )  #max(nstream) = 1  ( entire region in void)

    minnstrEachBlob = np.array(
        ndi.minimum(nstream, labels=labels3d, index=labels1d))
    c2 = (minnstrEachBlob < nstrCutMin
          )  #max(nstream) = 1  ( entire region in void)

    MaskOutCondition0 = np.where(c1 | c2)
    maskingValues = labels1d[MaskOutCondition0]
    labels3d_out = MaskedRemove(labels3d, maskingValues)

    print '#Haloes after nstreams-cut nstreams:', (
        np.unique(labels3d_out)[1:]).shape

    #    maxnstrEachBlob = np.array(ndi.maximum(nstream, labels=labels3d_out, index= (np.unique(labels3d_out)[1:])))
    #    minnstrEachBlob = np.array(ndi.minimum(nstream, labels=labels3d_out, index= (np.unique(labels3d_out)[1:])))
    #
    #    print 'Streams min(min) ', minnstrEachBlob.min()
    #    print 'Streams min(max) ', maxnstrEachBlob.min()

    return labels3d_out
Beispiel #28
0
def fouriergausssubband15(siz, sigma):
    """ Subband15 Gaussian filter

    Based on 3D filtering paper:
    Max W. K. Law and Albert C. S. Chung, "Efficient Implementation for
    Spherical Flux Computation and Its Application to Vascular Segmentation,
       IEEE Transactions on Image Processing, 2009, Volume 18(3), 596V612

    http://www.cse.ust.hk/~maxlawwk/

    """
    (uu, vv, ww) = fouriercoords(siz)
    # Original Gaussian kernel
    gfilter = gaussian_fourierkernel(uu, vv, ww, sigma)
    # Subband_1.5 frequency oversampling component.

    gfilter = gfilter + gaussian_fourierkernel(uu + 1, vv, ww, sigma)
    gfilter = gfilter + gaussian_fourierkernel(uu - 1, vv, ww, sigma)
    gfilter = gfilter + gaussian_fourierkernel(uu, vv + 1, ww, sigma)
    gfilter = gfilter + gaussian_fourierkernel(uu, vv - 1, ww, sigma)
    gfilter = gfilter + gaussian_fourierkernel(uu, vv, ww + 1, sigma)
    gfilter = gfilter + gaussian_fourierkernel(uu, vv, ww - 1, sigma)
    # Normalization improves accuracy when sigma is small (e.g. sigma < 0.8
    # voxel length)
    gfilter = gfilter / ndimage.maximum(gfilter)

# End of Subband_1.5 frequency oversampling component
    return gfilter
Beispiel #29
0
def wavg(values,weights,labels=None,index=None,inverse=None,counts=None):
    # Weighted average:
    # http://en.wikipedia.org/wiki/Weighted_arithmetic_mean
    # Using the weighted sample variance with reliability weights
    # Referenced to the GSL:
    # https://www.gnu.org/software/gsl/doc/html/statistics.html#weighted-samples

    if labels is None: 
        labels = np.ones(len(values),dtype=int)

    if any(x is None for x in [index,inverse,counts]):
        index,inverse,counts = np.unique(labels,False,True,True)

    V1 = nd.sum(weights,labels=labels,index=index)
    V2 = nd.sum(weights**2,labels=labels,index=index)    

    wavg = nd.sum(weights*values,labels=labels,index=index)/V1

    wavg_err  = nd.maximum(np.sqrt(1/weights),labels=labels,index=index)

    # https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights
    wavg_rms  = nd.sum(weights*(values - wavg[inverse])**2,
                       labels=labels,index=index)
    err = np.seterr()
    np.seterr(all='ignore')
    wavg_rms *= (V1/(V1**2 - V2)) 
    wavg_rms  = np.sqrt(wavg_rms)
    np.seterr(**err)

    ## Convert NaN to BADMAG
    #wavg_rms[np.isnan(wavg_rms)] = BADMAG

    return wavg, np.where(counts > 1,wavg_rms,wavg_err)
Beispiel #30
0
def blackNormalizeImage(a):
    """     
        Normalizes numarray to fit into an image format
        that is values between 0 and 255.
        """
    #Minimum image value, i.e. how black the image can get
    minlevel = 0.0
    #Maximum image value, i.e. how white the image can get
    maxlevel = 200.0
    #Maximum standard deviations to include, i.e. pixel > N*stdev --> white
    devlimit = 5.0
    imrange = maxlevel - minlevel

    avg1 = ndimage.mean(a)

    stdev1 = ndimage.standard_deviation(a)

    min1 = ndimage.minimum(a)
    if (min1 < avg1 - devlimit * stdev1):
        min1 = avg1 - devlimit * stdev1

    max1 = ndimage.maximum(a)
    if (max1 > avg1 + devlimit * stdev1):
        max1 = avg1 + devlimit * stdev1

    a = (a - min1) / (max1 - min1) * imrange + minlevel
    a = numarray.where(a > maxlevel, 215.0, a)
    a = numarray.where(a < minlevel, 0.0, a)

    return a
Beispiel #31
0
def downsample(voxels, step, method='max'):
    """
    downsample a voxels matrix by a factor of step. 
    downsample method options: max/mean 
    same as a pooling
    """
    assert step > 0
    assert voxels.ndim == 3 or voxels.ndim == 4
    assert method in ('max', 'mean')
    if step == 1:
        return voxels

    if voxels.ndim == 3:
        sx, sy, sz = voxels.shape[-3:]
        X, Y, Z = np.ogrid[0:sx, 0:sy, 0:sz]
        regions = sz / step * sy / step * (X / step) + sz / step * (
            Y / step) + Z / step
        if method == 'max':
            res = ndimage.maximum(voxels,
                                  labels=regions,
                                  index=np.arange(regions.max() + 1))
        elif method == 'mean':
            res = ndimage.mean(voxels,
                               labels=regions,
                               index=np.arange(regions.max() + 1))
        res.shape = (sx / step, sy / step, sz / step)
        return res
    else:
        res0 = downsample(voxels[0], step, method)
        res = np.zeros((voxels.shape[0], ) + res0.shape)
        res[0] = res0
        for ind in xrange(1, voxels.shape[0]):
            res[ind] = downsample(voxels[ind], step, method)
        return res
Beispiel #32
0
def rawFrameToImageFile(image, filename):
    """Writes a single raw image frame to image file.
    The file type must be given, e.g. png or jpg.
    The image need not be scaled beforehand, it is done prior 
    to writing out the image. Could be one of
    BMP, JPG, JPEG, PNG, PPM, TIFF, XBM, XPM)
    but the file types available depends
    on the QT imsave plugin in use.

    Args:
        | image (np.ndarray): two-dimensional array representing an image
        | filename (string): name of file to be written to, with extension

    Returns:
        | Nothing

    Raises:
        | No exception is raised.
    """
    #normalise input image (img) data to between 0 and 1
    from scipy import ndimage

    image = (image - ndimage.minimum(image)) / (ndimage.maximum(image) - ndimage.minimum(image))

    # http://scikit-image.org/docs/dev/api/skimage.io.html#imsave
    import skimage.io as io
    io.imsave(filename, image) 
Beispiel #33
0
def test_maximum01():
    "maximum 1"
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum(input, labels=labels)
        assert_almost_equal(output, 3.0)
def test_maximum01():
    "maximum 1"
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.maximum(input, labels=labels)
        assert_almost_equal(output, 3.0)
Beispiel #35
0
    def run(self, ips, imgs, para = None):
        inten = ImageManager.get(para['inten'])
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else: 
            msks = ips.imgs
            imgs = inten.imgs
            if len(msks)==1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        idct = ['Max','Min','Mean','Variance','Standard','Sum']
        key = {'Max':'max','Min':'min','Mean':'mean',
               'Variance':'var','Standard':'std','Sum':'sum'}
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:] 
        if para['center']: titles.extend(['Center-X','Center-Y'])
        if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [],{'type':'layers', 'body':{}}
        # data,mark=[],[]
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n+1)
            dt = []
            if para['slice']:dt.append([i]*n)
            dt.append(range(n))
            
            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']:dt.extend([xy[1]*k, xy[0]*k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [( i[1].start+(i[1].stop-i[1].start)/2, i[0].start+(i[0].stop-i[0].start)/2, i[1].stop-i[1].start,i[0].stop-i[0].start) for i in boxs]
                for j in (0,1,2,3):
                    dt.append([i[j]*k for i in boxs])
            if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))        
            if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) 
            if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))      
 
            layer = {'type':'layer', 'body':[]}
            xy=np.int0(xy).T

            texts = [(i[1],i[0])+('id=%d'%n,) for i,n in zip(xy,range(len(xy)))]
            layer['body'].append({'type':'texts', 'body':texts})
            if para['extent']: layer['body'].append({'type':'rectangles', 'body':boxs})
            mark['body'][i] = layer

            data.extend(list(zip(*dt)))
        IPy.show_table(pd.DataFrame(data, columns=titles), inten.title+'-region statistic')
        inten.mark = GeometryMark(mark)
        inten.update = True
Beispiel #36
0
def measure_labeled_regions(data,
                            labels,
                            tag='IMAGE',
                            measure_positions=True,
                            measure_values=True,
                            fits_offset=True,
                            bbox_offset=True):
    """Measure source properties in image.

    Sources are defined by a label image.

    Parameters
    ----------
    TODO

    Returns
    -------
    TODO
    """
    import scipy.ndimage as nd
    from astropy.table import Table, Column
    # Measure all segments
    nsegments = labels.max()
    index = np.arange(1, nsegments + 1)  # Measure all sources
    # Measure stuff
    sum = nd.sum(data, labels, index)
    max = nd.maximum(data, labels, index)
    mean = nd.mean(data, labels, index)
    x, y = _split_xys(nd.center_of_mass(data, labels, index))
    xpeak, ypeak = _split_xys(nd.maximum_position(data, labels, index))
    xmin, xmax, ymin, ymax = _split_slices(nd.find_objects(labels))
    area = _measure_area(labels)
    # Use FITS convention, i.e. start counting at 1
    FITS_OFFSET = 1 if fits_offset else 0
    # Use SExtractor convention, i.e. slice max is inside
    BBOX_OFFSET = -1 if bbox_offset else 0
    # Create a table
    table = Table()
    table.add_column(Column(data=index, name='NUMBER'))

    if measure_positions:
        table.add_column(Column(data=x + FITS_OFFSET, name='X_IMAGE'))
        table.add_column(Column(data=y + FITS_OFFSET, name='Y_IMAGE'))
        table.add_column(Column(data=xpeak + FITS_OFFSET, name='XPEAK_IMAGE'))
        table.add_column(Column(data=ypeak + FITS_OFFSET, name='YPEAK_IMAGE'))
        table.add_column(Column(data=xmin + FITS_OFFSET, name='XMIN_IMAGE'))
        table.add_column(
            Column(data=xmax + FITS_OFFSET + BBOX_OFFSET, name='XMAX_IMAGE'))
        table.add_column(Column(data=ymin + FITS_OFFSET, name='YMIN_IMAGE'))
        table.add_column(
            Column(data=ymax + FITS_OFFSET + BBOX_OFFSET, name='YMAX_IMAGE'))
        table.add_column(Column(data=area, name='AREA'))

    if measure_values:
        table.add_column(Column(data=max, name=tag + '_MAX'))
        table.add_column(Column(data=sum, name=tag + '_SUM'))
        table.add_column(Column(data=mean, name=tag + '_MEAN'))

    return table
Beispiel #37
0
def checkArrayMinMax(self, a1, a2):
	"""
	Tests whether an image has a valid range for libcv
	"""
	a1b = ndimage.median_filter(a1, size=3)
	min1 = ndimage.minimum(a1b)
	max1 = ndimage.maximum(a1b)
	if max1 - min1 < 10:
		self.logger.error("Old Image Range Error %d" % int(max1 - min1))
		return False
	a2b = ndimage.median_filter(a2, size=3)
	min2 = ndimage.minimum(a2b)
	max2 = ndimage.maximum(a2b)
	if max2 - min2 < 10:
		self.logger.error("New Image Range Error %d" % int(max2 - min2))
		return False
	return True
Beispiel #38
0
def checkArrayMinMax(self, a1, a2):
    """
	Tests whether an image has a valid range for libcv
	"""
    a1b = ndimage.median_filter(a1, size=3)
    min1 = ndimage.minimum(a1b)
    max1 = ndimage.maximum(a1b)
    if max1 - min1 < 10:
        self.logger.error("Old Image Range Error %d" % int(max1 - min1))
        return False
    a2b = ndimage.median_filter(a2, size=3)
    min2 = ndimage.minimum(a2b)
    max2 = ndimage.maximum(a2b)
    if max2 - min2 < 10:
        self.logger.error("New Image Range Error %d" % int(max2 - min2))
        return False
    return True
def getOverlapPercent(image1, image2, data):
	#SET IMAGE LIMITS
	gap = int(image1.shape[0]/256.0)
	xm = image1.shape[1]+gap
	ym = image1.shape[0]+gap
	a1 = numpy.array([ data['point1'], [-gap,-gap], [-gap,ym], [xm,ym], [xm,-gap], ])
	xm = image2.shape[1]+gap
	ym = image2.shape[0]+gap
	a2 = numpy.array([ data['point2'], [-gap,-gap], [-gap,ym], [xm,ym], [xm,-gap], ])

	#CALCULATE TRANSFORM LIMITS
	a2mask = a1Toa2Data(a1, data)
	a1mask = a2Toa1Data(a2, data)

	#CONVERT NUMPY TO POLYGON LIST
	a1masklist = []
	a2masklist = []
	for j in range(4):
		for i in range(2):
			item = int(a1mask[j+1,i])
			a1masklist.append(item)
			item = int(a2mask[j+1,i])
			a2masklist.append(item)

	#CREATE POLYGON MASK FROM THE LIMITS 1 -> IMAGE 2
	mask2 = numpy.zeros(shape=image2.shape, dtype=numpy.bool_)
	mask2b = apImage.arrayToImage(mask2, normalize=False)
	mask2b = mask2b.convert("L")
	draw2 = ImageDraw.Draw(mask2b)
	draw2.polygon(a2masklist, fill="white")
	mask2 = apImage.imageToArray(mask2b, dtype=numpy.float32)

	#CREATE POLYGON MASK FROM THE LIMITS 2 -> IMAGE 1
	mask1 = numpy.zeros(shape=image1.shape, dtype=numpy.bool_)
	mask1b = apImage.arrayToImage(mask1, normalize=False)
	mask1b = mask1b.convert("L")
	draw1 = ImageDraw.Draw(mask1b)
	draw1.polygon(a1masklist, fill="white")
	mask1 = apImage.imageToArray(mask1b, dtype=numpy.float32)

	percent1 = ndimage.sum(mask1) / (mask1.shape[0]*mask1.shape[1]) / ndimage.maximum(mask1)
	percent2 = ndimage.sum(mask2) / (mask2.shape[0]*mask2.shape[1]) / ndimage.maximum(mask2)

	return max(percent1,percent2), min(percent1,percent2)
def getImageInfo(im):
        """
        prints out image information good for debugging
        """
        avg1=ndimage.mean(im)
        stdev1=ndimage.standard_deviation(im)
        min1=ndimage.minimum(im)
        max1=ndimage.maximum(im)

        return avg1,stdev1,min1,max1
Beispiel #41
0
def test_extrema01():
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels)
        output2 = ndimage.minimum(input, labels=labels)
        output3 = ndimage.maximum(input, labels=labels)
        output4 = ndimage.minimum_position(input,
                                                     labels=labels)
        output5 = ndimage.maximum_position(input,
                                                     labels=labels)
        assert_equal(output1, (output2, output3, output4, output5))
Beispiel #42
0
def measure_labeled_regions(data, labels, tag='IMAGE',
                            measure_positions=True, measure_values=True,
                            fits_offset=True, bbox_offset=True):
    """Measure source properties in image.

    Sources are defined by a label image.

    Parameters
    ----------
    TODO

    Returns
    -------
    TODO
    """
    import scipy.ndimage as nd
    from astropy.table import Table, Column
    # Measure all segments
    nsegments = labels.max()
    index = np.arange(1, nsegments + 1)  # Measure all sources
    # Measure stuff
    sum = nd.sum(data, labels, index)
    max = nd.maximum(data, labels, index)
    mean = nd.mean(data, labels, index)
    x, y = _split_xys(nd.center_of_mass(data, labels, index))
    xpeak, ypeak = _split_xys(nd.maximum_position(data, labels, index))
    xmin, xmax, ymin, ymax = _split_slices(nd.find_objects(labels))
    area = _measure_area(labels)
    # Use FITS convention, i.e. start counting at 1
    FITS_OFFSET = 1 if fits_offset else 0
    # Use SExtractor convention, i.e. slice max is inside
    BBOX_OFFSET = -1 if bbox_offset else 0
    # Create a table
    table = Table()
    table.add_column(Column(data=index, name='NUMBER'))

    if measure_positions:
        table.add_column(Column(data=x + FITS_OFFSET, name='X_IMAGE'))
        table.add_column(Column(data=y + FITS_OFFSET, name='Y_IMAGE'))
        table.add_column(Column(data=xpeak + FITS_OFFSET, name='XPEAK_IMAGE'))
        table.add_column(Column(data=ypeak + FITS_OFFSET, name='YPEAK_IMAGE'))
        table.add_column(Column(data=xmin + FITS_OFFSET, name='XMIN_IMAGE'))
        table.add_column(Column(data=xmax + FITS_OFFSET + BBOX_OFFSET, name='XMAX_IMAGE'))
        table.add_column(Column(data=ymin + FITS_OFFSET, name='YMIN_IMAGE'))
        table.add_column(Column(data=ymax + FITS_OFFSET + BBOX_OFFSET, name='YMAX_IMAGE'))
        table.add_column(Column(data=area, name='AREA'))

    if measure_values:
        table.add_column(Column(data=max, name=tag + '_MAX'))
        table.add_column(Column(data=sum, name=tag + '_SUM'))
        table.add_column(Column(data=mean, name=tag + '_MEAN'))

    return table
def imageinfo(im):
	#print " ... size: ",im.shape
	#print " ... sum:  ",im.sum()

	avg1=ndimage.mean(im)
	stdev1=ndimage.standard_deviation(im)
	print " ... avg:  ",round(avg1,6),"+-",round(stdev1,6)

	min1=ndimage.minimum(im)
	max1=ndimage.maximum(im)
	print " ... range:",round(min1,6),"<>",round(max1,6)

	return
Beispiel #44
0
 def cal(self, stat = 'mean'):
         if stat=='mean':
                 zonalstats = ndimage.mean(self.data, labels=self.lb, index=self.labSet)
         if stat=='minimum':
                 zonalstats = ndimage.minimum(self.data, labels=self.lb, index=self.labSet)
         if stat=='maximum':
                 zonalstats = ndimage.maximum(self.data, labels=self.lb, index=self.labSet)
         if stat=='sum':
                 zonalstats = ndimage.sum(self.data, labels=self.lb, index=self.labSet)
         if stat=='std':
                 zonalstats = ndimage.standard_deviation(self.data, labels=self.lb, index=self.labSet)
         if stat=='variance':
                 zonalstats = ndimage.variance(self.data, labels=self.lb, index=self.labSet)
         return zonalstats
    def filter_labels(self, labels_out, objects, workspace):
        """Filter labels out of the output
        
        Filter labels that are not in the segmented input labels. Optionally
        filter labels that are touching the edge.
        
        labels_out - the unfiltered output labels
        objects    - the objects thing, containing both segmented and 
                     small_removed labels
        """
        segmented_labels = objects.segmented
        max_out = np.max(labels_out)
        if max_out > 0:
            segmented_labels, m1 = cpo.size_similarly(labels_out, segmented_labels)
            segmented_labels[~m1] = 0
            lookup = scind.maximum(segmented_labels, labels_out, range(max_out + 1))
            lookup = np.array(lookup, int)
            lookup[0] = 0
            segmented_labels_out = lookup[labels_out]
        else:
            segmented_labels_out = labels_out.copy()
        if self.wants_discard_edge:
            image = workspace.image_set.get_image(self.image_name.value)
            if image.has_mask:
                mask_border = image.mask & ~scind.binary_erosion(image.mask)
                edge_labels = segmented_labels_out[mask_border]
            else:
                edge_labels = np.hstack(
                    (
                        segmented_labels_out[0, :],
                        segmented_labels_out[-1, :],
                        segmented_labels_out[:, 0],
                        segmented_labels_out[:, -1],
                    )
                )
            edge_labels = np.unique(edge_labels)
            #
            # Make a lookup table that translates edge labels to zero
            # but translates everything else to itself
            #
            lookup = np.arange(max_out + 1)
            lookup[edge_labels] = 0
            #
            # Run the segmented labels through this to filter out edge
            # labels
            segmented_labels_out = lookup[segmented_labels_out]

        return segmented_labels_out
Beispiel #46
0
def test_extrema02():
    "extrema 2"
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels,
                                            index=2)
        output2 = ndimage.minimum(input, labels=labels,
                                            index=2)
        output3 = ndimage.maximum(input, labels=labels,
                                            index=2)
        output4 = ndimage.minimum_position(input,
                                            labels=labels, index=2)
        output5 = ndimage.maximum_position(input,
                                            labels=labels, index=2)
        assert_equal(output1, (output2, output3, output4, output5))
Beispiel #47
0
def test_extrema04():
    labels = [1, 2, 0, 4]
    for type in types:
        input = np.array([[5, 4, 2, 5],
                                [3, 7, 8, 2],
                                [1, 5, 1, 1]], type)
        output1 = ndimage.extrema(input, labels, [1, 2])
        output2 = ndimage.minimum(input, labels, [1, 2])
        output3 = ndimage.maximum(input, labels, [1, 2])
        output4 = ndimage.minimum_position(input, labels,
                                                     [1, 2])
        output5 = ndimage.maximum_position(input, labels,
                                                     [1, 2])
        assert_array_almost_equal(output1[0], output2)
        assert_array_almost_equal(output1[1], output3)
        assert_array_almost_equal(output1[2], output4)
        assert_array_almost_equal(output1[3], output5)
Beispiel #48
0
def test_extrema03():
    labels = np.array([[1, 2], [2, 3]])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output1 = ndimage.extrema(input, labels=labels,
                                            index=[2, 3, 8])
        output2 = ndimage.minimum(input, labels=labels,
                                            index=[2, 3, 8])
        output3 = ndimage.maximum(input, labels=labels,
                                            index=[2, 3, 8])
        output4 = ndimage.minimum_position(input,
                                    labels=labels, index=[2, 3, 8])
        output5 = ndimage.maximum_position(input,
                                    labels=labels, index=[2, 3, 8])
        assert_array_almost_equal(output1[0], output2)
        assert_array_almost_equal(output1[1], output3)
        assert_array_almost_equal(output1[2], output4)
        assert_array_almost_equal(output1[3], output5)
Beispiel #49
0
def test_stat_funcs_2d():
    a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]])
    lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]])

    mean = ndimage.mean(a, labels=lbl, index=[1, 2])
    assert_array_equal(mean, [7.0, 4.0])

    var = ndimage.variance(a, labels=lbl, index=[1, 2])
    assert_array_equal(var, [2.5, 1.0])

    std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
    assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))

    med = ndimage.median(a, labels=lbl, index=[1, 2])
    assert_array_equal(med, [7.0, 4.0])

    min = ndimage.minimum(a, labels=lbl, index=[1, 2])
    assert_array_equal(min, [5, 3])

    max = ndimage.maximum(a, labels=lbl, index=[1, 2])
    assert_array_equal(max, [9, 5])
Beispiel #50
0
def form_clusters(data,threshold,type='p-value',cluster_size_threshold=1):

    s=ndimage.morphology.generate_binary_structure(3,3)
    if type=='p-value':
        clusters, n_clusters = ndimage.label((data < threshold) & (data>0),structure=s)
        stat_cl=ndimage.minimum(data,labels=clusters, index=range(1,n_clusters+1))
    elif type=='t-stat':
        clusters, n_clusters = ndimage.label(data > threshold,structure=s)
        stat_cl=ndimage.maximum(data,labels=clusters, index=range(1,n_clusters+1))
    else:
        raise ValueError('Wrong map type!')

    clusters_label=np.arange(1,n_clusters+1)
    count,sum=ndimage.measurements._stats(data,labels=clusters,index=clusters_label)
    clusters_mask=(count>cluster_size_threshold)
    if np.sum(count>10**5)!=0:
        raise ValueError('Some of the clusters are too huge for analysis {}.'
                         'Need to change the threshold to form clusters or check your input image.'
                         'If everything is correct, then probably you need to use -model correlation '.format(np.max(count))) #TODO correlation
    clusters_label=clusters_label[clusters_mask]
    return clusters,clusters_label,stat_cl
Beispiel #51
0
        def anz(self):
                self.result = {'id':list(self.labSet),
                         'mean':[ round(x, 4) for x in list(ndimage.mean(self.data, labels=self.lb, index=self.labSet))],
                         'min':list(ndimage.minimum(self.data, labels=self.lb, index=self.labSet)),
                         'max':list(ndimage.maximum(self.data, labels=self.lb, index=self.labSet)),
                         'std':list(ndimage.variance(self.data, labels=self.lb, index=self.labSet))
                         }
                #print self.result['id']
                #print self.result['min']
                #print len(self.result['min'])
                self.df = pd.DataFrame(self.result)
		self.df = self.df[self.df['id']>0 ]
                self.df.set_index(self.df['id'])
               	
		# save each zonal ouput ...TODO
                # self.outname = self._inDs[:-4]+'.csv'
                # f = open(self.outname, 'w')
                # self.df.to_csv( f, index=False )
                # f.close()
		print self.df.iloc[0:5, ]

                return self.df
Beispiel #52
0
def objstats(args):
    # Open and read from image and segmentation
    try:
        img_ds = gdal.Open(args.image, gdal.GA_ReadOnly)
    except:
        logger.error("Could not open image: {}".format(i=args.image))
        sys.exit(1)

    try:
        seg_ds = ogr.Open(args.segment, 0)
        seg_layer = seg_ds.GetLayer()
    except:
        logger.error("Could not open segmentation vector file: {}".format(args.segment))
        sys.exit(1)

    cols, rows = img_ds.RasterXSize, img_ds.RasterYSize
    bands = range(1, img_ds.RasterCount + 1)
    if args.bands is not None:
        bands = args.bands

    # Rasterize segments
    logger.debug("About to rasterize segment vector file")
    img_srs = osr.SpatialReference()
    img_srs.ImportFromWkt(img_ds.GetProjectionRef())

    mem_raster = gdal.GetDriverByName("MEM").Create("", cols, rows, 1, gdal.GDT_UInt32)
    mem_raster.SetProjection(img_ds.GetProjection())
    mem_raster.SetGeoTransform(img_ds.GetGeoTransform())

    # Create artificial 'FID' field
    fid_layer = seg_ds.ExecuteSQL('select FID, * from "{l}"'.format(l=seg_layer.GetName()))
    gdal.RasterizeLayer(mem_raster, [1], fid_layer, options=["ATTRIBUTE=FID"])
    logger.debug("Rasterized segment vector file")

    seg = mem_raster.GetRasterBand(1).ReadAsArray()
    logger.debug("Read segmentation image into memory")
    mem_raster = None
    seg_ds = None

    # Get list of unique segments
    useg = np.unique(seg)

    # If calc is num, do only for 1 band
    out_bands = 0
    for stat in args.stat:
        if stat == "num":
            out_bands += 1
        else:
            out_bands += len(bands)

    # Create output driver
    driver = gdal.GetDriverByName(args.format)
    out_ds = driver.Create(args.output, cols, rows, out_bands, gdal.GDT_Float32)

    # Loop through image bands
    out_b = 0
    out_2d = np.empty_like(seg, dtype=np.float32)
    for i_b, b in enumerate(bands):
        img_band = img_ds.GetRasterBand(b)
        ndv = img_band.GetNoDataValue()
        band_name = img_band.GetDescription()
        if not band_name:
            band_name = "Band {i}".format(i=b)
        logger.info('Processing input band {i}, "{b}"'.format(i=b, b=band_name))

        img = img_band.ReadAsArray().astype(gdal_array.GDALTypeCodeToNumericTypeCode(img_band.DataType))
        logger.debug('Read image band {i}, "{b}" into memory'.format(i=b, b=band_name))

        for stat in args.stat:
            logger.debug("    calculating {s}".format(s=stat))
            if stat == "mean":
                out = ndimage.mean(img, seg, useg)
            elif stat == "var":
                out = ndimage.variance(img, seg, useg)
            elif stat == "num":
                # Remove from list of stats so it is only calculated once
                args.stat.remove("num")
                count = np.ones_like(seg)
                out = ndimage.sum(count, seg, useg)
            elif stat == "sum":
                out = ndimage.sum(img, seg, useg)
            elif stat == "min":
                out = ndimage.minimum(img, seg, useg)
            elif stat == "max":
                out = ndimage.maximum(img, seg, useg)
            elif stat == "mode":
                out = ndimage.labeled_comprehension(img, seg, useg, scipy_mode, out_2d.dtype, ndv)
            else:
                logger.error("Unknown stat. Not sure how you got here")
                sys.exit(1)

            # Transform to 2D
            out_2d = out[seg - seg.min()]

            # Fill in NDV
            if ndv is not None:
                out_2d[np.where(img == ndv)] = ndv

            # Write out the data
            out_band = out_ds.GetRasterBand(out_b + 1)
            out_band.SetDescription(band_name)
            if ndv is not None:
                out_band.SetNoDataValue(ndv)
            logger.debug("    Writing object statistic for band {b}".format(b=b + 1))
            out_band.WriteArray(out_2d, 0, 0)
            out_band.FlushCache()
            logger.debug("    Wrote out object statistic for band {b}".format(b=b + 1))
            out_b += 1

    out_ds.SetGeoTransform(img_ds.GetGeoTransform())
    out_ds.SetProjection(img_ds.GetProjection())

    img_ds = None
    seg_ds = None
    out_ds = None
    logger.info("Completed object statistic calculation")
Beispiel #53
0
    def ndimage_alg(self, img, opts):
        """Island detection using scipy.ndimage

        Use scipy.ndimage.label to detect islands of emission in the image.
        Island is defined as group of tightly connected (8-connectivity
        for 2D images) pixels with emission.

        The following cuts are applied:
         - pixel is considered to have emission if it is 'thresh_isl' times
           higher than RMS.
         - Island should have at least 'minsize' active pixels
         - There should be at lease 1 pixel in the island which is 'thresh_pix'
           times higher than noise (peak clip).

        Parameters:
        image, mask: arrays with image data and mask
        mean, rms: arrays with mean & rms maps
        thresh_isl: threshold for 'active pixels'
        thresh_pix: threshold for peak
        minsize: minimal acceptable island size

        Function returns a list of Island objects.
        """
        ### islands detection
        mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Islands")

        image = img.ch0_arr
        mask = img.mask_arr
        rms = img.rms_arr
        mean = img.mean_arr
        thresh_isl = opts.thresh_isl
        thresh_pix = img.thresh_pix
        clipped_mean = img.clipped_mean
        saverank = opts.savefits_rankim

                        # act_pixels is true if significant emission
        act_pixels = (image-mean)/thresh_isl >= rms
        if isinstance(mask, N.ndarray):
            act_pixels[mask] = False

                        # dimension of image
        rank = len(image.shape)
                        # generates matrix for connectivity, in this case, 8-conn
        connectivity = nd.generate_binary_structure(rank, rank)
                        # labels = matrix with value = (initial) island number
        labels, count = nd.label(act_pixels, connectivity)
                        # slices has limits of bounding box of each such island
        slices = nd.find_objects(labels)
        img.island_labels = labels

        ### apply cuts on island size and peak value
        pyrank = N.zeros(image.shape, dtype=N.int32)
        res = []
        islid = 0
        for idx, s in enumerate(slices):
            idx += 1 # nd.labels indices are counted from 1
                        # number of pixels inside bounding box which are in island
            isl_size = (labels[s] == idx).sum()
            isl_peak = nd.maximum(image[s], labels[s], idx)
            isl_maxposn = tuple(N.array(N.unravel_index(N.nanargmax(image[s]), image[s].shape))+\
                          N.array((s[0].start, s[1].start)))
            if (isl_size >= img.minpix_isl) and (isl_peak - mean[isl_maxposn])/thresh_pix > rms[isl_maxposn]:
                isl = Island(image, mask, mean, rms, labels, s, idx, img.pixel_beamarea())
                res.append(isl)
                pyrank[isl.bbox] += N.invert(isl.mask_active)*idx / idx

        return res
Beispiel #54
0
def test_maximum02():
    labels = np.array([1, 0], bool)
    input = np.array([[2, 2], [2, 4]], bool)
    output = ndimage.maximum(input, labels=labels)
    assert_almost_equal(output, 1.0)
Beispiel #55
0
def test_maximum05():
    "Ticket #501"
    x = np.array([-3,-2,-1])
    assert_equal(ndimage.maximum(x),-1)
Beispiel #56
0
def test_maximum05():
    # Regression test for ticket #501 (Trac)
    x = np.array([-3,-2,-1])
    assert_equal(ndimage.maximum(x),-1)
    def run(self, workspace):
        if self.show_window:
            workspace.display_data.col_labels = ("Image", "Object", "Feature", "Mean", "Median", "STD")
            workspace.display_data.statistics = statistics = []
        for image_name in [img.name for img in self.images]:
            image = workspace.image_set.get_image(image_name.value, must_be_grayscale=True)
            for object_name in [obj.name for obj in self.objects]:
                # Need to refresh image after each iteration...
                img = image.pixel_data
                if image.has_mask:
                    masked_image = img.copy()
                    masked_image[~image.mask] = 0
                else:
                    masked_image = img
                objects = workspace.object_set.get_objects(object_name.value)
                nobjects = objects.count
                integrated_intensity = np.zeros((nobjects,))
                integrated_intensity_edge = np.zeros((nobjects,))
                mean_intensity = np.zeros((nobjects,))
                mean_intensity_edge = np.zeros((nobjects,))
                std_intensity = np.zeros((nobjects,))
                std_intensity_edge = np.zeros((nobjects,))
                min_intensity = np.zeros((nobjects,))
                min_intensity_edge = np.zeros((nobjects,))
                max_intensity = np.zeros((nobjects,))
                max_intensity_edge = np.zeros((nobjects,))
                mass_displacement = np.zeros((nobjects,))
                lower_quartile_intensity = np.zeros((nobjects,))
                median_intensity = np.zeros((nobjects,))
                mad_intensity = np.zeros((nobjects,))
                upper_quartile_intensity = np.zeros((nobjects,))
                cmi_x = np.zeros((nobjects,))
                cmi_y = np.zeros((nobjects,))
                max_x = np.zeros((nobjects,))
                max_y = np.zeros((nobjects,))
                for labels, lindexes in objects.get_labels():
                    lindexes = lindexes[lindexes != 0]
                    labels, img = cpo.crop_labels_and_image(labels, img)
                    _, masked_image = cpo.crop_labels_and_image(labels, masked_image)
                    outlines = cpmo.outline(labels)

                    if image.has_mask:
                        _, mask = cpo.crop_labels_and_image(labels, image.mask)
                        masked_labels = labels.copy()
                        masked_labels[~mask] = 0
                        masked_outlines = outlines.copy()
                        masked_outlines[~mask] = 0
                    else:
                        masked_labels = labels
                        masked_outlines = outlines

                    lmask = masked_labels > 0 & np.isfinite(img)  # Ignore NaNs, Infs
                    has_objects = np.any(lmask)
                    if has_objects:
                        limg = img[lmask]
                        llabels = labels[lmask]
                        mesh_y, mesh_x = np.mgrid[0 : masked_image.shape[0], 0 : masked_image.shape[1]]
                        mesh_x = mesh_x[lmask]
                        mesh_y = mesh_y[lmask]
                        lcount = fix(nd.sum(np.ones(len(limg)), llabels, lindexes))
                        integrated_intensity[lindexes - 1] = fix(nd.sum(limg, llabels, lindexes))
                        mean_intensity[lindexes - 1] = integrated_intensity[lindexes - 1] / lcount
                        std_intensity[lindexes - 1] = np.sqrt(
                            fix(nd.mean((limg - mean_intensity[llabels - 1]) ** 2, llabels, lindexes))
                        )
                        min_intensity[lindexes - 1] = fix(nd.minimum(limg, llabels, lindexes))
                        max_intensity[lindexes - 1] = fix(nd.maximum(limg, llabels, lindexes))
                        # Compute the position of the intensity maximum
                        max_position = np.array(fix(nd.maximum_position(limg, llabels, lindexes)), dtype=int)
                        max_position = np.reshape(max_position, (max_position.shape[0],))
                        max_x[lindexes - 1] = mesh_x[max_position]
                        max_y[lindexes - 1] = mesh_y[max_position]
                        # The mass displacement is the distance between the center
                        # of mass of the binary image and of the intensity image. The
                        # center of mass is the average X or Y for the binary image
                        # and the sum of X or Y * intensity / integrated intensity
                        cm_x = fix(nd.mean(mesh_x, llabels, lindexes))
                        cm_y = fix(nd.mean(mesh_y, llabels, lindexes))

                        i_x = fix(nd.sum(mesh_x * limg, llabels, lindexes))
                        i_y = fix(nd.sum(mesh_y * limg, llabels, lindexes))
                        cmi_x[lindexes - 1] = i_x / integrated_intensity[lindexes - 1]
                        cmi_y[lindexes - 1] = i_y / integrated_intensity[lindexes - 1]
                        diff_x = cm_x - cmi_x[lindexes - 1]
                        diff_y = cm_y - cmi_y[lindexes - 1]
                        mass_displacement[lindexes - 1] = np.sqrt(diff_x * diff_x + diff_y * diff_y)
                        #
                        # Sort the intensities by label, then intensity.
                        # For each label, find the index above and below
                        # the 25%, 50% and 75% mark and take the weighted
                        # average.
                        #
                        order = np.lexsort((limg, llabels))
                        areas = lcount.astype(int)
                        indices = np.cumsum(areas) - areas
                        for dest, fraction in (
                            (lower_quartile_intensity, 1.0 / 4.0),
                            (median_intensity, 1.0 / 2.0),
                            (upper_quartile_intensity, 3.0 / 4.0),
                        ):
                            qindex = indices.astype(float) + areas * fraction
                            qfraction = qindex - np.floor(qindex)
                            qindex = qindex.astype(int)
                            qmask = qindex < indices + areas - 1
                            qi = qindex[qmask]
                            qf = qfraction[qmask]
                            dest[lindexes[qmask] - 1] = limg[order[qi]] * (1 - qf) + limg[order[qi + 1]] * qf
                            #
                            # In some situations (e.g. only 3 points), there may
                            # not be an upper bound.
                            #
                            qmask = (~qmask) & (areas > 0)
                            dest[lindexes[qmask] - 1] = limg[order[qindex[qmask]]]
                        #
                        # Once again, for the MAD
                        #
                        madimg = np.abs(limg - median_intensity[llabels - 1])
                        order = np.lexsort((madimg, llabels))
                        qindex = indices.astype(float) + areas / 2.0
                        qfraction = qindex - np.floor(qindex)
                        qindex = qindex.astype(int)
                        qmask = qindex < indices + areas - 1
                        qi = qindex[qmask]
                        qf = qfraction[qmask]
                        mad_intensity[lindexes[qmask] - 1] = madimg[order[qi]] * (1 - qf) + madimg[order[qi + 1]] * qf
                        qmask = (~qmask) & (areas > 0)
                        mad_intensity[lindexes[qmask] - 1] = madimg[order[qindex[qmask]]]

                    emask = masked_outlines > 0
                    eimg = img[emask]
                    elabels = labels[emask]
                    has_edge = len(eimg) > 0
                    if has_edge:
                        ecount = fix(nd.sum(np.ones(len(eimg)), elabels, lindexes))
                        integrated_intensity_edge[lindexes - 1] = fix(nd.sum(eimg, elabels, lindexes))
                        mean_intensity_edge[lindexes - 1] = integrated_intensity_edge[lindexes - 1] / ecount
                        std_intensity_edge[lindexes - 1] = np.sqrt(
                            fix(nd.mean((eimg - mean_intensity_edge[elabels - 1]) ** 2, elabels, lindexes))
                        )
                        min_intensity_edge[lindexes - 1] = fix(nd.minimum(eimg, elabels, lindexes))
                        max_intensity_edge[lindexes - 1] = fix(nd.maximum(eimg, elabels, lindexes))
                m = workspace.measurements
                for category, feature_name, measurement in (
                    (INTENSITY, INTEGRATED_INTENSITY, integrated_intensity),
                    (INTENSITY, MEAN_INTENSITY, mean_intensity),
                    (INTENSITY, STD_INTENSITY, std_intensity),
                    (INTENSITY, MIN_INTENSITY, min_intensity),
                    (INTENSITY, MAX_INTENSITY, max_intensity),
                    (INTENSITY, INTEGRATED_INTENSITY_EDGE, integrated_intensity_edge),
                    (INTENSITY, MEAN_INTENSITY_EDGE, mean_intensity_edge),
                    (INTENSITY, STD_INTENSITY_EDGE, std_intensity_edge),
                    (INTENSITY, MIN_INTENSITY_EDGE, min_intensity_edge),
                    (INTENSITY, MAX_INTENSITY_EDGE, max_intensity_edge),
                    (INTENSITY, MASS_DISPLACEMENT, mass_displacement),
                    (INTENSITY, LOWER_QUARTILE_INTENSITY, lower_quartile_intensity),
                    (INTENSITY, MEDIAN_INTENSITY, median_intensity),
                    (INTENSITY, MAD_INTENSITY, mad_intensity),
                    (INTENSITY, UPPER_QUARTILE_INTENSITY, upper_quartile_intensity),
                    (C_LOCATION, LOC_CMI_X, cmi_x),
                    (C_LOCATION, LOC_CMI_Y, cmi_y),
                    (C_LOCATION, LOC_MAX_X, max_x),
                    (C_LOCATION, LOC_MAX_Y, max_y),
                ):
                    measurement_name = "%s_%s_%s" % (category, feature_name, image_name.value)
                    m.add_measurement(object_name.value, measurement_name, measurement)
                    if self.show_window and len(measurement) > 0:
                        statistics.append(
                            (
                                image_name.value,
                                object_name.value,
                                feature_name,
                                np.round(np.mean(measurement), 3),
                                np.round(np.median(measurement), 3),
                                np.round(np.std(measurement), 3),
                            )
                        )
Beispiel #58
0
def estimate_pk_parms_2d(x,y,f,pktype):
    """
    Gives initial guess of parameters for analytic fit of two dimensional peak
    data.

    Required Arguments:
    x -- (n x 0) ndarray of coordinate positions for dimension 1 (numpy.meshgrid formatting)
    y -- (n x 0) ndarray of coordinate positions for dimension 2 (numpy.meshgrid formatting)
    f -- (n x 0) ndarray of intensity measurements at coordinate positions x and y
    pktype -- string, type of analytic function that will be used to fit the data,
    current options are "gaussian", "gaussian_rot" (gaussian with arbitrary axes) and 
    "split_pvoigt_rot" (split psuedo voigt with arbitrary axes)
    

    Outputs:
    p -- (m) ndarray containing initial guesses for parameters for the input peaktype
    (see peakfunction help for more information)
    """


    
    bg0=np.mean([f[0,0],f[-1,0],f[-1,-1],f[0,-1]])
    bg1x=(np.mean([f[-1,-1],f[0,-1]])-np.mean([f[0,0],f[-1,0]]))/(x[0,-1]-x[0,0])
    bg1y=(np.mean([f[-1,-1],f[-1,0]])-np.mean([f[0,0],f[0,-1]]))/(y[-1,0]-y[0,0])
    
    fnobg=f-(bg0+bg1x*x+bg1y*y)    
    
    labels,numlabels=imgproc.label(fnobg>np.max(fnobg)/2.)
    
    #looks for the largest peak
    areas=np.zeros(numlabels)
    for ii in np.arange(1,numlabels+1,1):
        areas[ii-1]= np.sum(labels==ii)
    
    peakIndex=np.argmax(areas)+1  
    
    
#    #currently looks for peak closest to center
#    dist=np.zeros(numlabels)
#    for ii in np.arange(1,numlabels+1,1):
#        dist[ii-1]= ######
#    
#    peakIndex=np.argmin(dist)+1
    
    FWHMx=np.max(x[labels==peakIndex])-np.min(x[labels==peakIndex])
    FWHMy=np.max(y[labels==peakIndex])-np.min(y[labels==peakIndex])
    
    coords=imgproc.maximum_position(fnobg, labels=labels, index=peakIndex)
    A=imgproc.maximum(fnobg, labels=labels, index=peakIndex)
    x0=x[coords]
    y0=y[coords]
    
    if pktype=='gaussian':
        p=[A,x0,y0,FWHMx,FWHMy,bg0,bg1x,bg1y]
    elif pktype=='gaussian_rot':
        p=[A,x0,y0,FWHMx,FWHMy,0.,bg0,bg1x,bg1y]
    elif pktype=='split_pvoigt_rot':
        p=[A,x0,y0,FWHMx,FWHMx,FWHMy,FWHMy,0.5,0.5,0.5,0.5,0.,bg0,bg1x,bg1y]
        
    p=np.array(p)
    return p