Example #1
0
def _reassign_bad_bins(classe, x, y):
    """
    Implements steps (vi)-(vii) in section 5.1 of Cappellari & Copin (2003)

    """
    # Find the centroid of all succesful bins.
    # CLASS = 0 are unbinned pixels which are excluded.
    #
    good = np.unique(classe[classe > 0])
    xnode = ndimage.mean(x, labels=classe, index=good)
    ynode = ndimage.mean(y, labels=classe, index=good)

    # Reassign pixels of bins with S/N < targetSN
    # to the closest centroid of a good bin
    #
    bad = classe == 0
    index = np.argmin((x[bad, np.newaxis] - xnode)**2 + (y[bad, np.newaxis] - ynode)**2, axis=1)
    classe[bad] = good[index]

    # Recompute all centroids of the reassigned bins.
    # These will be used as starting points for the CVT.
    #
    good = np.unique(classe)
    xnode = ndimage.mean(x, labels=classe, index=good)
    ynode = ndimage.mean(y, labels=classe, index=good)

    return xnode, ynode
Example #2
0
def ClusterObjects(farn, struct_elem):
    magn_img = farn.magnitude_image
    dir_img = farn.direction_image    
    
    bin_img = np.zeros(shape=(magn_img.shape[0], magn_img.shape[1]), dtype=np.uint8)
    bin_img[magn_img < 25] = 0
    bin_img[magn_img >= 25] = 1
    
    bin_img = ndimage.binary_dilation(bin_img, structure=struct_elem, iterations=3).astype(bin_img.dtype)

    labels, nb_labels = Morphology.ConnenctedComponents(bin_img)    
    filt_labels, areas, nb_new_labels = Morphology.FilterArea(bin_img, labels, nb_labels, 480)
    
    
    temp_magn = ndimage.mean(magn_img, filt_labels, range(nb_new_labels + 1))
    temp_dir = ndimage.mean(dir_img, filt_labels, range(nb_new_labels + 1))
    
    data = np.concatenate((np.reshape(temp_magn, (-1,1)), np.reshape(temp_dir, (-1,1))), axis=1)
    
    clusters = -1
    if nb_new_labels >= 1:
        Y = pdist(data, 'euclidean')
        agglo = AgglomerativeClustering.Agglomerative(Y, 50.)
        agglo.AggloClustering(criterion = 'distance', method = 'single', metric = 'euclidean', normalized = False)

        clusters = agglo.clusters
             
    bin_img[filt_labels == 0] = 0
    bin_img[filt_labels >= 1] = 1
    
    
    
    return bin_img, nb_new_labels, temp_magn, temp_dir, data, clusters
	def calculateParticleStackStats(self, imgstackfile, boxedpartdatas):
		### read mean and stdev
		partmeantree = []
		t0 = time.time()
		imagicdata = apImagicFile.readImagic(imgstackfile)
		apDisplay.printMsg("gathering mean and stdev data")
		### loop over the particles and read data
		for i in range(len(boxedpartdatas)):
			partdata = boxedpartdatas[i]
			partarray = imagicdata['images'][i]

			### if particle stdev == 0, then it is all constant, i.e., a bad particle
			stdev = float(partarray.std())
			if stdev < 1.0e-6:
				apDisplay.printError("Standard deviation == 0 for particle %d in image %s"%(i,self.shortname))

			### skew and kurtosis
			partravel = numpy.ravel(partarray)
			skew = float(stats.skew(partravel))
			kurtosis = float(stats.kurtosis(partravel))

			### edge and center stats
			edgemean = float(ndimage.mean(partarray, self.edgemap, 1.0))
			edgestdev = float(ndimage.standard_deviation(partarray, self.edgemap, 1.0))
			centermean = float(ndimage.mean(partarray, self.edgemap, 0.0))
			centerstdev = float(ndimage.standard_deviation(partarray, self.edgemap, 0.0))
			self.summedParticles += partarray

			### take abs of all means, because ctf whole image may become negative
			partmeandict = {
				'partdata': partdata,
				'mean': abs(float(partarray.mean())),
				'stdev': stdev,
				'min': float(partarray.min()),
				'max': float(partarray.max()),
				'skew': skew,
				'kurtosis': kurtosis,
				'edgemean': abs(edgemean),
				'edgestdev': edgestdev,
				'centermean': abs(centermean),
				'centerstdev': centerstdev,
			}
			### show stats for first particle
			"""
			if i == 0:
				keys = partmeandict.keys()
				keys.sort()
				mystr = "PART STATS: "
				for key in keys:
					if isinstance(partmeandict[key], float):
						mystr += "%s=%.3f :: "%(key, partmeandict[key])
				print mystr
			"""
			partmeantree.append(partmeandict)
		self.meanreadtimes.append(time.time()-t0)
		return partmeantree
def compute_resultant_force_directions(nx_arr, ny_arr, nz_arr,
                                       label_im, nb_labels):
    nx = ni.mean(nx_arr, label_im, range(1, nb_labels + 1))
    ny = ni.mean(ny_arr, label_im, range(1, nb_labels + 1))
    nz = ni.mean(nz_arr, label_im, range(1, nb_labels + 1))
    if nx.__class__!= list:
        nx = [nx]
        ny = [ny]
        nz = [nz]
    return nx, ny, nz
def compute_centers_of_pressure(cx_arr, cy_arr, cz_arr, label_im,
                                nb_labels):
    # for the forearm skin patch, it might be more accurate to perform
    # the averaging in cylindrical coordinates (to ensure that the COP
    # lies on the surface of the skin), but Advait does not care
    # enough.
    cx = ni.mean(cx_arr, label_im, range(1, nb_labels + 1))
    cy = ni.mean(cy_arr, label_im, range(1, nb_labels + 1))
    cz = ni.mean(cz_arr, label_im, range(1, nb_labels + 1))
    if cx.__class__!= list:
        cx = [cx]
        cy = [cy]
        cz = [cz]
    return cx, cy, cz
        def createRaster(self):
                """
                from center of image, generate a raster of points
                """
                #print "xy raster"
                try:
                        imageshape = self.currentimagedata['image'].shape
                except:
                        imageshape = (512,512)
                xspacing = float(self.settings['raster spacing'])
                xpoints = int(self.settings['raster limit'])

                if self.settings['raster symmetric']:
                        yspacing = xspacing
                        ypoints = xpoints
                else:
                        yspacing = float(self.settings['raster spacing asymm'])
                        ypoints = int(self.settings['raster limit asymm'])

                radians = math.pi * self.settings['raster angle'] / 180.0
                if self.settings['raster center on image']:
                        x0 = imageshape[0]/2.0
                        y0 = imageshape[1]/2.0
                else:
                        x0 = float(self.settings['raster center x'])
                        y0 = float(self.settings['raster center y'])
                points = []

                #new stuff
                xlist = numpy.asarray(range(xpoints), dtype=numpy.float32)
                xlist -= ndimage.mean(xlist)
                ylist = numpy.asarray(range(ypoints), dtype=numpy.float32)
                ylist -= ndimage.mean(ylist)

                for xt in xlist:
                        xshft = xt * xspacing
                        for yt in ylist:
                                yshft = yt * yspacing
                                xrot = xshft * numpy.cos(radians) - yshft * numpy.sin(radians) 
                                yrot = yshft * numpy.cos(radians) + xshft * numpy.sin(radians)
                                x = int(xrot + x0)
                                y = int(yrot + y0)
                                if x < 0 or x >= imageshape[0]: continue
                                if y < 0 or y >= imageshape[1]: continue
                                points.append( (x,y) )

                #old stuff
                self.setTargets(points, 'Raster')
                self.rasterpoints = points
                self.logger.info('Full raster has %s points' % (len(points),))
Example #7
0
def test_mean03():
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.mean(input, labels=labels,
                                        index=2)
        assert_almost_equal(output, 3.0)
Example #8
0
def find_local_maxima(image, min_distance):
    """Find maxima in an image.

    Finds the highest-valued points in an image, such that each point is
    separted by at least min_distance.

    If there are flat regions that are all at a maxima, the enter of mass of
    the region is reported. Large flat regions of more than min_distance in
    radius will be erroneously returned as maxima even if they are not. Further
    filtering should be performed to exclude these if needed.

    Returns the position of the maxima and the value at each maximum.

    Parameters:
        image: image of arbitrary dimensionality
        min_distance: maxima found will be at least this many pixels apart

    Returns:
        centroids: list of centers of each maxima
        values: image value at each maxima

  """
    image_max = ndimage.maximum_filter(image, size=2*min_distance+1, mode='constant')
    peak_mask = (image == image_max)
    # NB: some maxima might be marked by multiple contiguous pixels if the image
    # has "plateaus". So we need to label the mask and get the centroids
    # of each of the labeled regions.
    labeled_image, num_regions = ndimage.label(peak_mask)
    label_indices = numpy.arange(1, num_regions+1)
    centroids = ndimage.center_of_mass(peak_mask, labeled_image, label_indices)
    values = ndimage.mean(image, labeled_image, label_indices)
    return numpy.array(centroids), values
Example #9
0
def interpixel_watershed(img,min_dist=3,max_iter=100000, **filter_args):
    ##don't work... stop dev at sort_label
    img = _filter(img, **filter_args)
    
    # make marker map
    size = (2*min_dist+1)
    marker = _local_min(-img,footprint=size).astype('int16') # find markers
    marker,N = _nd.label(marker, structure=_np.ones(img.ndim*(3,)))
    
    # sort label (marker)
    mean  = _nd.mean(img,marker,index=_np.arange(1,N+1))
    order = _np.argsort(mean)
    # here order[i] = position of marker==i+1 in the sorted list of all marker values
    #order[marker[marker!=0]-1]
    
    # => knowing that all pixels of a label have same image value
    #mask = marker!=0
    #label  = marker[mask]
    
    #order = _np.argsort(img[mask])
   
    
    #marker[mask] = label[order]
    
    return marker, N, order
Example #10
0
    def _estimate_profile(self, image, image_err, mask):
        """
        Estimate image profile.
        """
        from scipy import ndimage

        p = self.parameters
        labels = self._label_image(image, mask)

        profile_err = None

        index = np.arange(1, len(self._get_x_edges(image)))

        if p['method'] == 'sum':
            profile = ndimage.sum(image.data, labels.data, index)

            if image.unit.is_equivalent('counts'):
                profile_err = np.sqrt(profile)
            elif image_err:
                # gaussian error propagation
                err_sum = ndimage.sum(image_err.data ** 2, labels.data, index)
                profile_err = np.sqrt(err_sum)

        elif p['method'] == 'mean':
            # gaussian error propagation
            profile = ndimage.mean(image.data, labels.data, index)
            if image_err:
                N = ndimage.sum(~np.isnan(image_err.data), labels.data, index)
                err_sum = ndimage.sum(image_err.data ** 2, labels.data, index)
                profile_err = np.sqrt(err_sum) / N

        return profile, profile_err
Example #11
0
def test_mean01():
    "mean 1"
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.mean(input, labels=labels)
        assert_almost_equal(output, 2.0)
 def compute(self,  dataset_pool):
     persons = dataset_pool.get_dataset('person')
     hhs = self.get_dataset()
     where_adult = where(persons.get_attribute('is_adult'))[0]
     age = persons.get_attribute('age')
     return array(ndimage.mean(age[where_adult], labels=persons.get_attribute('household_id')[where_adult],
                           index=hhs.get_id_attribute()))
def getRealLabeledMeanStdev(image,labeled_image,indices,info):
        print "Getting real mean and stdev"
        mean=nd.mean(image,labels=labeled_image,index=indices)
        stdev=nd.standard_deviation(image,labels=labeled_image,index=indices)
        ll=0
        try:
                len(mean)
        except:
                mean=[mean]
                stdev=[stdev]
                try:
                        len(indices)
                except:
                        indices=[indices]
        try:
                info.keys()
        except:
                offset=1
        else:
                offset=0
        for l in indices:
                info[l-offset][1]=mean[ll]
                info[l-offset][2]=stdev[ll]
                ll += 1
        return info
def binarization(image):
    """
    Binarize the image to improve contrast. Makes image black and white only.
    :param image: 256x256 numpy array of fingerprint
    :param window_size: Size of the window. It is recommended that this be set to 32.
    :return: 256x256 numpy array of enhanced fingerprint
    """

    (w,h) = image.shape


    size = 16 #Window Size


    (w,h) = image.shape
    w1 = np.floor(w/size)*size
    h1 = np.floor(h/size)*size
    ft_output = np.zeros((w1,h1),dtype=np.double)
    "Running same window as FFT.... eventually we should just have one loop"
    for i in range(0,int(w1)-1,size):
        for j in range(0,int(h1)-1,size):
            #Create window sizes
            a = size+i
            b = size+j
            #Get windowed mean
            mean = ndimage.mean(image[i:a,j:b])
            #If pixel is greater than mean, 1, O.W. 0
            image[i:a,j:b] = np.where(image[i:a,j:b] > mean, 1, 0)






    return image
def normalizeImage(a):
	"""	
	Normalizes numarray to fit into an image format
	that is values between 0 and 255.
	"""
	#Minimum image value, i.e. how black the image can get
	minlevel = 0.0
	#Maximum image value, i.e. how white the image can get
	maxlevel = 235.0
	#Maximum standard deviations to include, i.e. pixel > N*stdev --> white
	devlimit=5.0
 	imrange = maxlevel - minlevel

	avg1=ndimage.mean(a)

	stdev1=ndimage.standard_deviation(a)

	min1=ndimage.minimum(a)
	if(min1 < avg1-devlimit*stdev1):
		min1 = avg1-devlimit*stdev1

	max1=ndimage.maximum(a)
	if(max1 > avg1+devlimit*stdev1):
		max1 = avg1+devlimit*stdev1

	a = (a - min1)/(max1 - min1)*imrange + minlevel
	a = numarray.where(a > maxlevel,255.0,a)
	a = numarray.where(a < minlevel,0.0,a)

	return a
def phase_correlate(image, template):	
	#CALCULATE BIGGER MAP SIZE
	shape = image.shape
	kshape = template.shape
	oversized = (numarray.array(shape) + numarray.array(kshape))

	#EXPAND IMAGE TO BIGGER SIZE
	avg=ndimage.mean(image)
	image2 = convolve.iraf_frame.frame(image, oversized, mode="wrap", cval=avg)

	#CALCULATE FOURIER TRANSFORMS
	imagefft = fft.real_fft2d(image2, s=oversized)
	templatefft = fft.real_fft2d(template, s=oversized)
	#imagefft = fft.fft2d(image2, s=oversized)
	#templatefft = fft.fft2d(template, s=oversized)

	#MULTIPLY FFTs TOGETHER
	newfft = (templatefft * numarray.conjugate(imagefft)).copy()
	del templatefft

	#NORMALIZE CC TO GET PC
	print "d"
	phasefft = newfft / numarray.absolute(newfft)
	del newfft
	print "d"

	#INVERSE TRANSFORM TO GET RESULT
	correlation = fft.inverse_real_fft2d(phasefft, s=oversized)
	#correlation = fft.inverse_fft2d(phasefft, s=oversized)
	del phasefft

	#RETURN CENTRAL PART OF IMAGE (SIDES ARE JUNK)
	return correlation[ kshape[0]/2-1:shape[0]+kshape[0]/2-1, kshape[1]/2-1:shape[1]+kshape[1]/2-1 ]
Example #17
0
def labelmeanfilter(y, x):
   # requires integer labels
   # from mailing list scipy-user 2009-02-11
   labelsunique = np.arange(np.max(y)+1)
   labelmeans = np.array(ndimage.mean(x, labels=y, index=labelsunique))
   # returns label means for each original observation
   return labelmeans[y]
Example #18
0
def labelmeanfilter_str(ys, x):
    # works also for string labels in ys, but requires 1D
    # from mailing list scipy-user 2009-02-11
    unil, unilinv = np.unique1d(ys, return_index=False, return_inverse=True)
    labelmeans = np.array(ndimage.mean(x, labels=unilinv, index=np.arange(np.max(unil)+1)))
    arr3 = labelmeans[unilinv]
    return arr3
Example #19
0
def block_mean(ar, fact):
    assert isinstance(fact, int), type(fact)
    sx, sy = ar.shape
    X, Y = numpy.ogrid[0:sx, 0:sy]
    regions = sy/fact * (X/fact) + Y/fact
    res = ndimage.mean(ar, labels=regions, index=numpy.arange(regions.max() + 1))
    res.shape = (sx/fact, sy/fact)
    return res
Example #20
0
def avg_school_score(fazes, schools):
    """
    Computes the average of the school's total score over FAZes, where missing values are removed 
    from the computation. Missing values are those that are less or equal zero.
    """    
    valid_idx = schools.total_score > 0
    res = ndi.mean(schools.total_score.values[valid_idx.values], labels=schools.faz_id.values[valid_idx.values], index=fazes.index)
    return pd.Series(res, index=fazes.index).fillna(0)
def repairPicks(a1, a2, rmsd):
	"""
	Attempts to repair lists a1 and a2 that have become shifted
	out of frame with minimal damage
	"""
	maxdev = ndimage.mean(rmsd[:5])
	avgdev = 3*ndimage.mean(rmsd)
	x0 = [ maxdev, avgdev, 0.25*len(rmsd), 0.75*len(rmsd) ]
	print x0
	solved = optimize.fmin(_rsmdStep, x0, args=([rmsd]), 
		xtol=1e-4, ftol=1e-4, maxiter=500, maxfun=500, disp=0, full_output=1)
	upstep = int(math.floor(solved[0][2]))
	print solved

	a1b = numpyPop2d(a1, upstep)
	a2b = numpyPop2d(a2, upstep)

	return a1b, a2b
Example #22
0
def downsample_pattern(image, factor):
    """Image shape must be exact multiples of downsample factor"""
    from scipy import ndimage
    size_y, size_x = image.shape
    y_2d, x_2d = _numpy.ogrid[:size_y, :size_x]
    regions = size_x/factor * (y_2d/factor) + x_2d/factor
    result = ndimage.mean(image, labels=regions,
                          index=_numpy.arange(regions.max()+1))
    result.shape = (size_y/factor, size_x/factor)
def test_mean04():
    "mean 4"
    labels = np.array([[1, 2], [2, 4]], np.int8)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.mean(input, labels=labels,
                                        index=[4, 8, 2])
        assert_array_almost_equal(output[[0,2]], [4.0, 2.5])
        assert_(np.isnan(output[1]))
	def analyzeList(self, mylist, myrange=(0,1,1), filename=None):
		"""
		histogram2(a, bins) -- Compute histogram of a using divisions in bins

		Description:
		   Count the number of times values from array a fall into
		   numerical ranges defined by bins.  Range x is given by
		   bins[x] <= range_x < bins[x+1] where x =0,N and N is the
		   length of the bins array.  The last range is given by
		   bins[N] <= range_N < infinity.  Values less than bins[0] are
		   not included in the histogram.
		Arguments:
		   a -- 1D array.  The array of values to be divied into bins
		   bins -- 1D array.  Defines the ranges of values to use during
		         histogramming.
		Returns:
		   1D array.  Each value represents the occurences for a given
		   bin (range) of values.
		"""
		#hist,bmin,minw,err = stats.histogram(mynumpy, numbins=36)
		#print hist,bmin,minw,err,"\n"
		if len(mylist) < 2:
			apDisplay.printWarning("Did not write file not enough rows ("+str(filename)+")")
			return

		if myrange[0] is None:
			mymin = float(math.floor(ndimage.minimum(mylist)))
		else:
			mymin = float(myrange[0])
		if myrange[1] is None:
			mymax = float(math.ceil(ndimage.maximum(mylist)))
		else:
			mymax = float(myrange[1])
		mystep = float(myrange[2])

		mynumpy = numpy.asarray(mylist, dtype=numpy.float32)
		print "range=",round(ndimage.minimum(mynumpy),2)," <> ",round(ndimage.maximum(mynumpy),2)
		print " mean=",round(ndimage.mean(mynumpy),2)," +- ",round(ndimage.standard_deviation(mynumpy),2)

		#histogram
		bins = []
		mybin = mymin
		while mybin <= mymax:
			bins.append(mybin)
			mybin += mystep
		bins = numpy.asarray(bins, dtype=numpy.float32)
		apDisplay.printMsg("Creating histogram with "+str(len(bins))+" bins")
		hist = stats.histogram2(mynumpy, bins=bins)
		#print bins
		#print hist
		if filename is not None:
			f = open(filename, "w")
			for i in range(len(bins)):
				out = ("%3.4f %d\n" % (bins[i] + mystep/2.0, hist[i]) )
				f.write(out)
			f.write("&\n")
Example #25
0
 def __init__(self, name):
     self.name = name
     self.labels = workspace.object_set.get_objects(name).segmented
     self.nobjects = np.max(self.labels)
     if self.nobjects != 0:
         self.range = np.arange(1, np.max(self.labels) + 1)
         self.labels = self.labels.copy()
         self.labels[~im.mask] = 0
         self.current_mean = fix(scind.mean(im.pixel_data, self.labels, self.range))
         self.start_mean = np.maximum(self.current_mean, np.finfo(float).eps)
def _rsmdStep(x1, rmsd):
	mean1  = x1[0]
	mean2  = x1[1]
	upstep = int(x1[2])
	dnstep = int(x1[3])
	fit = numpy.ones((len(rmsd)))*mean1
	fit[upstep:dnstep] += mean2
	error = ndimage.mean((rmsd-fit)**2/fit)

	return error
def getImageInfo(im):
        """
        prints out image information good for debugging
        """
        avg1=ndimage.mean(im)
        stdev1=ndimage.standard_deviation(im)
        min1=ndimage.minimum(im)
        max1=ndimage.maximum(im)

        return avg1,stdev1,min1,max1
def calcImageFft(image, oversized):
    # EXPAND IMAGE TO BIGGER SIZE
    avg = nd_image.mean(image)
    image2 = convolve.iraf_frame.frame(image, oversized, mode="constant", cval=avg)

    # CALCULATE FOURIER TRANSFORMS
    imagefft = fft.real_fft2d(image2, s=oversized)
    # imagefft = fft.fft2d(image2, s=oversized)
    del image2

    return imagefft
Example #29
0
def block_mean(ar, fact):
    
    # function to downsample inputs by a factor of two

    assert isinstance(fact, int), type(fact)
    sx, sy = ar.shape
    X, Y = np.ogrid[0:sx, 0:sy]
    regions = sy/fact * (X/fact) + Y/fact
    res = ndimage.mean(ar, labels=regions, index=np.arange(regions.max() + 1))
    res.shape = (sx/fact, sy/fact)
    return res
Example #30
0
 def run(self, workspace):
     parents = workspace.object_set.get_objects(self.parent_name.value)
     children = workspace.object_set.get_objects(self.sub_object_name.value)
     child_count, parents_of = parents.relate_children(children)
     m = workspace.measurements
     if self.wants_per_parent_means.value:
         parent_indexes = np.arange(np.max(parents.segmented))+1
         for feature_name in m.get_feature_names(self.sub_object_name.value):
             if not self.should_aggregate_feature(feature_name):
                 continue
             data = m.get_current_measurement(self.sub_object_name.value,
                                              feature_name)
             if data is not None:
                 if len(parents_of) > 0:
                     means = fix(scind.mean(data.astype(float), 
                                            parents_of, parent_indexes))
                 else:
                     means = np.zeros((0,))
                 mean_feature_name = FF_MEAN%(self.sub_object_name.value,
                                              feature_name)
                 m.add_measurement(self.parent_name.value, mean_feature_name,
                                   means)
     m.add_measurement(self.sub_object_name.value,
                       FF_PARENT%(self.parent_name.value),
                       parents_of)
     m.add_measurement(self.parent_name.value,
                       FF_CHILDREN_COUNT%(self.sub_object_name.value),
                       child_count)
     parent_names = self.get_parent_names()
     
     for parent_name in parent_names:
         if self.find_parent_child_distances in (D_BOTH, D_CENTROID):
             self.calculate_centroid_distances(workspace, parent_name)
         if self.find_parent_child_distances in (D_BOTH, D_MINIMUM):
             self.calculate_minimum_distances(workspace, parent_name)
         
     if workspace.frame is not None:
         figure = workspace.create_or_find_figure(title="RelateObjects, image cycle #%d"%(
             workspace.measurements.image_set_number),subplots=(2,2))
         figure.subplot_imshow_labels(0,0,parents.segmented,
                                      title = self.parent_name.value)
         figure.subplot_imshow_labels(1,0,children.segmented,
                                      title = self.sub_object_name.value,
                                      sharex = figure.subplot(0,0),
                                      sharey = figure.subplot(0,0))
         parent_labeled_children = np.zeros(children.segmented.shape, int)
         parent_labeled_children[children.segmented > 0] = \
             parents_of[children.segmented[children.segmented > 0]-1]
         figure.subplot_imshow_labels(0,1,parent_labeled_children,
                                      "%s labeled by %s"%
                                      (self.sub_object_name.value,
                                       self.parent_name.value),
                                      sharex = figure.subplot(0,0),
                                      sharey = figure.subplot(0,0))
Example #31
0
### File description
# The file contains classical algorithms for sharpness calculation

### Packages import
from scipy import ndimage
from skimage import morphology
import numpy as np

def edge_sharpness(image):
	"""
	Edge enhancement method
	"""
    img_dilat = morphology.dilation(image)
    img_result = img_dilat - image
    return ndimage.sum(img_result)

def variance_sharpness(image):
	"""
	Variance-based method
	"""
    return ndimage.variance(image)

def variance_normal_sharpness(image):
	"""
	Variance-based method with normalization
	"""
    H, W = image.shape
    int_mean = ndimage.mean(image) 
    return (ndimage.variance(image)/(H*W*int_mean))
import numpy as np
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt

l = scipy.lena()
sx, sy = l.shape
X, Y = np.ogrid[0:sx, 0:sy]

regions = sy/6 * (X/4) + Y/6
block_mean = ndimage.mean(l, labels=regions, index=np.arange(1, regions.max() +1))
block_mean.shape = (sx/4, sy/6)

plt.figure(figsize=(5,5))
plt.imshow(block_mean, cmap=plt.cm.gray)
plt.axis('off')



Example #33
0
    def run_on_image_setting(self, workspace, image):
        assert isinstance(workspace, cpw.Workspace)
        image_set = workspace.image_set
        assert isinstance(image_set, cpi.ImageSet)
        measurements = workspace.measurements
        im = image_set.get_image(image.image_name.value,
                                 must_be_grayscale=True)
        #
        # Downsample the image and mask
        #
        new_shape = np.array(im.pixel_data.shape)
        if image.subsample_size.value < 1:
            new_shape = new_shape * image.subsample_size.value
            i, j = (np.mgrid[0:new_shape[0], 0:new_shape[1]].astype(float) /
                    image.subsample_size.value)
            pixels = scind.map_coordinates(im.pixel_data, (i, j), order=1)
            mask = scind.map_coordinates(im.mask.astype(float), (i, j)) > .9
        else:
            pixels = im.pixel_data
            mask = im.mask
        #
        # Remove background pixels using a greyscale tophat filter
        #
        if image.image_sample_size.value < 1:
            back_shape = new_shape * image.image_sample_size.value
            i, j = (np.mgrid[0:back_shape[0], 0:back_shape[1]].astype(float) /
                    image.image_sample_size.value)
            back_pixels = scind.map_coordinates(pixels, (i, j), order=1)
            back_mask = scind.map_coordinates(mask.astype(float), (i, j)) > .9
        else:
            back_pixels = pixels
            back_mask = mask
        radius = image.element_size.value
        back_pixels = morph.grey_erosion(back_pixels, radius, back_mask)
        back_pixels = morph.grey_dilation(back_pixels, radius, back_mask)
        if image.image_sample_size.value < 1:
            i, j = np.mgrid[0:new_shape[0], 0:new_shape[1]].astype(float)
            #
            # Make sure the mapping only references the index range of
            # back_pixels.
            #
            i *= float(back_shape[0] - 1) / float(new_shape[0] - 1)
            j *= float(back_shape[1] - 1) / float(new_shape[1] - 1)
            back_pixels = scind.map_coordinates(back_pixels, (i, j), order=1)
        pixels -= back_pixels
        pixels[pixels < 0] = 0

        #
        # For each object, build a little record
        #
        class ObjectRecord(object):
            def __init__(self, name):
                self.name = name
                self.labels = workspace.object_set.get_objects(name).segmented
                self.nobjects = np.max(self.labels)
                if self.nobjects != 0:
                    self.range = np.arange(1, np.max(self.labels) + 1)
                    self.labels = self.labels.copy()
                    self.labels[~im.mask] = 0
                    self.current_mean = fix(
                        scind.mean(im.pixel_data, self.labels, self.range))
                    self.start_mean = np.maximum(self.current_mean,
                                                 np.finfo(float).eps)

        object_records = [
            ObjectRecord(ob.objects_name.value) for ob in image.objects
        ]
        #
        # Transcribed from the Matlab module: granspectr function
        #
        # CALCULATES GRANULAR SPECTRUM, ALSO KNOWN AS SIZE DISTRIBUTION,
        # GRANULOMETRY, AND PATTERN SPECTRUM, SEE REF.:
        # J.Serra, Image Analysis and Mathematical Morphology, Vol. 1. Academic Press, London, 1989
        # Maragos,P. "Pattern spectrum and multiscale shape representation", IEEE Transactions on Pattern Analysis and Machine Intelligence, 11, N 7, pp. 701-716, 1989
        # L.Vincent "Granulometries and Opening Trees", Fundamenta Informaticae, 41, No. 1-2, pp. 57-90, IOS Press, 2000.
        # L.Vincent "Morphological Area Opening and Closing for Grayscale Images", Proc. NATO Shape in Picture Workshop, Driebergen, The Netherlands, pp. 197-208, 1992.
        # I.Ravkin, V.Temov "Bit representation techniques and image processing", Applied Informatics, v.14, pp. 41-90, Finances and Statistics, Moskow, 1988 (in Russian)
        # THIS IMPLEMENTATION INSTEAD OF OPENING USES EROSION FOLLOWED BY RECONSTRUCTION
        #
        ng = image.granular_spectrum_length.value
        startmean = np.mean(pixels[mask])
        ero = pixels.copy()
        # Mask the test image so that masked pixels will have no effect
        # during reconstruction
        #
        ero[~mask] = 0
        currentmean = startmean
        startmean = max(startmean, np.finfo(float).eps)

        footprint = np.array([[False, True, False], [True, True, True],
                              [False, True, False]])
        statistics = [image.image_name.value]
        for i in range(1, ng + 1):
            prevmean = currentmean
            ero = morph.grey_erosion(ero, mask=mask, footprint=footprint)
            rec = morph.grey_reconstruction(ero, pixels, footprint)
            currentmean = np.mean(rec[mask])
            gs = (prevmean - currentmean) * 100 / startmean
            statistics += ["%.2f" % gs]
            feature = image.granularity_feature(i)
            measurements.add_image_measurement(feature, gs)
            #
            # Restore the reconstructed image to the shape of the
            # original image so we can match against object labels
            #
            orig_shape = im.pixel_data.shape
            i, j = np.mgrid[0:orig_shape[0], 0:orig_shape[1]].astype(float)
            #
            # Make sure the mapping only references the index range of
            # back_pixels.
            #
            i *= float(new_shape[0] - 1) / float(orig_shape[0] - 1)
            j *= float(new_shape[1] - 1) / float(orig_shape[1] - 1)
            rec = scind.map_coordinates(rec, (i, j), order=1)

            #
            # Calculate the means for the objects
            #
            for object_record in object_records:
                assert isinstance(object_record, ObjectRecord)
                if object_record.nobjects > 0:
                    new_mean = fix(
                        scind.mean(rec, object_record.labels,
                                   object_record.range))
                    gss = ((object_record.current_mean - new_mean) * 100 /
                           object_record.start_mean)
                    object_record.current_mean = new_mean
                else:
                    gss = np.zeros((0, ))
                measurements.add_measurement(object_record.name, feature, gss)
        return statistics
    def run_on_objects(self,object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)
        assert isinstance(objects, cpo.Objects)
        #
        # Do the ellipse-related measurements
        #
        i, j, l = objects.ijv.transpose()
        centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness =\
            ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
        del i
        del j
        del l
        self.record_measurement(workspace, object_name,
                                F_ECCENTRICITY, eccentricity)
        self.record_measurement(workspace, object_name,
                                F_MAJOR_AXIS_LENGTH, major_axis_length)
        self.record_measurement(workspace, object_name, 
                                F_MINOR_AXIS_LENGTH, minor_axis_length)
        self.record_measurement(workspace, object_name, F_ORIENTATION, 
                                theta * 180 / np.pi)
        self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                compactness)
        is_first = False
        if len(objects.indices) == 0:
            nobjects = 0
        else:
            nobjects = np.max(objects.indices)
        mcenter_x = np.zeros(nobjects)
        mcenter_y = np.zeros(nobjects)
        mextent = np.zeros(nobjects)
        mperimeters = np.zeros(nobjects)
        msolidity = np.zeros(nobjects)
        euler = np.zeros(nobjects)
        max_radius = np.zeros(nobjects)
        median_radius = np.zeros(nobjects)
        mean_radius = np.zeros(nobjects)
        min_feret_diameter = np.zeros(nobjects)
        max_feret_diameter = np.zeros(nobjects)
        zernike_numbers = self.get_zernike_numbers()
        zf = {}
        for n,m in zernike_numbers:
            zf[(n,m)] = np.zeros(nobjects)
        if nobjects > 0:
            chulls, chull_counts = convex_hull_ijv(objects.ijv, objects.indices)
            for labels, indices in objects.get_labels():
                to_indices = indices-1
                distances = distance_to_edge(labels)
                mcenter_y[to_indices], mcenter_x[to_indices] =\
                         maximum_position_of_labels(distances, labels, indices)
                max_radius[to_indices] = fix(scind.maximum(
                    distances, labels, indices))
                mean_radius[to_indices] = fix(scind.mean(
                    distances, labels, indices))
                median_radius[to_indices] = median_of_labels(
                    distances, labels, indices)
                #
                # The extent (area / bounding box area)
                #
                mextent[to_indices] = calculate_extents(labels, indices)
                #
                # The perimeter distance
                #
                mperimeters[to_indices] = calculate_perimeters(labels, indices)
                #
                # Solidity
                #
                msolidity[to_indices] = calculate_solidity(labels, indices)
                #
                # Euler number
                #
                euler[to_indices] = euler_number(labels, indices)
                #
                # Zernike features
                #
                zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                for (n,m), z in zip(zernike_numbers, zf_l.transpose()):
                    zf[(n,m)][to_indices] = z
            #
            # Form factor
            #
            ff = 4.0 * np.pi * objects.areas / mperimeters**2
            #
            # Feret diameter
            #
            min_feret_diameter, max_feret_diameter = \
                feret_diameter(chulls, chull_counts, objects.indices)
            
        else:
            ff = np.zeros(0)

        for f, m in ([(F_AREA, objects.areas),
                      (F_CENTER_X, mcenter_x),
                      (F_CENTER_Y, mcenter_y),
                      (F_EXTENT, mextent),
                      (F_PERIMETER, mperimeters),
                      (F_SOLIDITY, msolidity),
                      (F_FORM_FACTOR, ff),
                      (F_EULER_NUMBER, euler),
                      (F_MAXIMUM_RADIUS, max_radius),
                      (F_MEAN_RADIUS, mean_radius),
                      (F_MEDIAN_RADIUS, median_radius),
                      (F_MIN_FERET_DIAMETER, min_feret_diameter),
                      (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                     [(self.get_zernike_name((n,m)), zf[(n,m)])
                       for n,m in zernike_numbers]):
            self.record_measurement(workspace, object_name, f, m) 
    def run_image_pair_objects(self, workspace, first_image_name,
                               second_image_name, object_name):
        '''Calculate per-object correlations between intensities in two images'''
        first_image = workspace.image_set.get_image(first_image_name,
                                                    must_be_grayscale=True)
        second_image = workspace.image_set.get_image(second_image_name,
                                                     must_be_grayscale=True)
        objects = workspace.object_set.get_objects(object_name)
        #
        # Crop both images to the size of the labels matrix
        #
        labels = objects.segmented
        try:
            first_pixels = objects.crop_image_similarly(first_image.pixel_data)
            first_mask = objects.crop_image_similarly(first_image.mask)
        except ValueError:
            first_pixels, m1 = cpo.size_similarly(labels,
                                                  first_image.pixel_data)
            first_mask, m1 = cpo.size_similarly(labels, first_image.mask)
            first_mask[~m1] = False
        try:
            second_pixels = objects.crop_image_similarly(
                second_image.pixel_data)
            second_mask = objects.crop_image_similarly(second_image.mask)
        except ValueError:
            second_pixels, m1 = cpo.size_similarly(labels,
                                                   second_image.pixel_data)
            second_mask, m1 = cpo.size_similarly(labels, second_image.mask)
            second_mask[~m1] = False
        mask = ((labels > 0) & first_mask & second_mask)
        first_pixels = first_pixels[mask]
        second_pixels = second_pixels[mask]
        labels = labels[mask]
        result = []
        first_pixel_data = first_image.pixel_data
        first_mask = first_image.mask
        first_pixel_count = np.product(first_pixel_data.shape)
        second_pixel_data = second_image.pixel_data
        second_mask = second_image.mask
        second_pixel_count = np.product(second_pixel_data.shape)
        #
        # Crop the larger image similarly to the smaller one
        #
        if first_pixel_count < second_pixel_count:
            second_pixel_data = first_image.crop_image_similarly(
                second_pixel_data)
            second_mask = first_image.crop_image_similarly(second_mask)
        elif second_pixel_count < first_pixel_count:
            first_pixel_data = second_image.crop_image_similarly(
                first_pixel_data)
            first_mask = second_image.crop_image_similarly(first_mask)
        mask = (first_mask & second_mask & (~np.isnan(first_pixel_data)) &
                (~np.isnan(second_pixel_data)))
        if np.any(mask):
            #
            # Perform the correlation, which returns:
            # [ [ii, ij],
            #   [ji, jj] ]
            #
            fi = first_pixel_data[mask]
            si = second_pixel_data[mask]

        n_objects = objects.count
        # Handle case when both images for the correlation are completely masked out

        if n_objects == 0:
            corr = np.zeros((0, ))
            overlap = np.zeros((0, ))
            K1 = np.zeros((0, ))
            K2 = np.zeros((0, ))
            M1 = np.zeros((0, ))
            M2 = np.zeros((0, ))
            RWC1 = np.zeros((0, ))
            RWC2 = np.zeros((0, ))
            C1 = np.zeros((0, ))
            C2 = np.zeros((0, ))
        elif np.where(mask)[0].__len__() == 0:
            corr = np.zeros((n_objects, ))
            corr[:] = np.NaN
            overlap = K1 = K2 = M1 = M2 = RWC1 = RWC2 = C1 = C2 = corr
        else:
            #
            # The correlation is sum((x-mean(x))(y-mean(y)) /
            #                         ((n-1) * std(x) *std(y)))
            #
            lrange = np.arange(n_objects, dtype=np.int32) + 1
            area = fix(scind.sum(np.ones_like(labels), labels, lrange))
            mean1 = fix(scind.mean(first_pixels, labels, lrange))
            mean2 = fix(scind.mean(second_pixels, labels, lrange))
            #
            # Calculate the standard deviation times the population.
            #
            std1 = np.sqrt(
                fix(
                    scind.sum((first_pixels - mean1[labels - 1])**2, labels,
                              lrange)))
            std2 = np.sqrt(
                fix(
                    scind.sum((second_pixels - mean2[labels - 1])**2, labels,
                              lrange)))
            x = first_pixels - mean1[labels - 1]  # x - mean(x)
            y = second_pixels - mean2[labels - 1]  # y - mean(y)
            corr = fix(
                scind.sum(x * y / (std1[labels - 1] * std2[labels - 1]),
                          labels, lrange))
            # Explicitly set the correlation to NaN for masked objects
            corr[scind.sum(1, labels, lrange) == 0] = np.NaN
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Correlation coeff",
                "%.3f" % np.mean(corr)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Correlation coeff",
                           "%.3f" % np.median(corr)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Correlation coeff",
                           "%.3f" % np.min(corr)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Correlation coeff",
                           "%.3f" % np.max(corr)
                       ]]

            # Threshold as percentage of maximum intensity of objects in each channel
            tff = (self.thr.value / 100) * fix(
                scind.maximum(first_pixels, labels, lrange))
            tss = (self.thr.value / 100) * fix(
                scind.maximum(second_pixels, labels, lrange))

            combined_thresh = (first_pixels >= tff[labels - 1]) & (
                second_pixels >= tss[labels - 1])
            fi_thresh = first_pixels[combined_thresh]
            si_thresh = second_pixels[combined_thresh]
            tot_fi_thr = scind.sum(
                first_pixels[first_pixels >= tff[labels - 1]],
                labels[first_pixels >= tff[labels - 1]], lrange)
            tot_si_thr = scind.sum(
                second_pixels[second_pixels >= tss[labels - 1]],
                labels[second_pixels >= tss[labels - 1]], lrange)

            nonZero = (fi > 0) | (si > 0)
            xvar = np.var(fi[nonZero], axis=0, ddof=1)
            yvar = np.var(si[nonZero], axis=0, ddof=1)

            xmean = np.mean(fi[nonZero], axis=0)
            ymean = np.mean(si[nonZero], axis=0)

            z = fi[nonZero] + si[nonZero]
            zvar = np.var(z, axis=0, ddof=1)

            covar = 0.5 * (zvar - (xvar + yvar))

            denom = 2 * covar
            num = (yvar - xvar) + np.sqrt((yvar - xvar) * (yvar - xvar) + 4 *
                                          (covar * covar))
            a = (num / denom)
            b = (ymean - a * xmean)

            i = 1
            while i > 0.003921568627:
                thr_fi_c = i
                thr_si_c = (a * i) + b
                combt = (fi < thr_fi_c) | (si < thr_si_c)
                costReg = scistat.pearsonr(fi[combt], si[combt])
                if costReg[0] <= 0:
                    break
                i = i - 0.003921568627

            # Costes' thershold for entire image is applied to each object
            fi_above_thr = first_pixels > thr_fi_c
            si_above_thr = second_pixels > thr_si_c
            combined_thresh_c = fi_above_thr & si_above_thr
            fi_thresh_c = first_pixels[combined_thresh_c]
            si_thresh_c = second_pixels[combined_thresh_c]
            if np.any(fi_above_thr):
                tot_fi_thr_c = scind.sum(
                    first_pixels[first_pixels >= thr_fi_c],
                    labels[first_pixels >= thr_fi_c], lrange)
            else:
                tot_fi_thr_c = np.zeros(len(lrange))
            if np.any(si_above_thr):
                tot_si_thr_c = scind.sum(
                    second_pixels[second_pixels >= thr_si_c],
                    labels[second_pixels >= thr_si_c], lrange)
            else:
                tot_si_thr_c = np.zeros(len(lrange))

            # Manders Coefficient
            M1 = np.zeros(len(lrange))
            M2 = np.zeros(len(lrange))

            if np.any(combined_thresh):
                M1 = np.array(
                    scind.sum(fi_thresh, labels[combined_thresh],
                              lrange)) / np.array(tot_fi_thr)
                M2 = np.array(
                    scind.sum(si_thresh, labels[combined_thresh],
                              lrange)) / np.array(tot_si_thr)
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Manders coeff",
                "%.3f" % np.mean(M1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Manders coeff",
                           "%.3f" % np.median(M1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Manders coeff",
                           "%.3f" % np.min(M1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Manders coeff",
                           "%.3f" % np.max(M1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean Manders coeff",
                "%.3f" % np.mean(M2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median Manders coeff",
                           "%.3f" % np.median(M2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min Manders coeff",
                           "%.3f" % np.min(M2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max Manders coeff",
                           "%.3f" % np.max(M2)
                       ]]

            # RWC Coefficient
            RWC1 = np.zeros(len(lrange))
            RWC2 = np.zeros(len(lrange))
            [Rank1] = np.lexsort(([labels], [first_pixels]))
            [Rank2] = np.lexsort(([labels], [second_pixels]))
            Rank1_U = np.hstack(
                [[False], first_pixels[Rank1[:-1]] != first_pixels[Rank1[1:]]])
            Rank2_U = np.hstack(
                [[False],
                 second_pixels[Rank2[:-1]] != second_pixels[Rank2[1:]]])
            Rank1_S = np.cumsum(Rank1_U)
            Rank2_S = np.cumsum(Rank2_U)
            Rank_im1 = np.zeros(first_pixels.shape, dtype=int)
            Rank_im2 = np.zeros(second_pixels.shape, dtype=int)
            Rank_im1[Rank1] = Rank1_S
            Rank_im2[Rank2] = Rank2_S

            R = max(Rank_im1.max(), Rank_im2.max()) + 1
            Di = abs(Rank_im1 - Rank_im2)
            weight = (R - Di) * 1.0 / R
            weight_thresh = weight[combined_thresh]

            if np.any(combined_thresh):
                RWC1 = np.array(
                    scind.sum(fi_thresh * weight_thresh,
                              labels[combined_thresh],
                              lrange)) / np.array(tot_fi_thr)
                RWC2 = np.array(
                    scind.sum(si_thresh * weight_thresh,
                              labels[combined_thresh],
                              lrange)) / np.array(tot_si_thr)

            result += [[
                first_image_name, second_image_name, object_name,
                "Mean RWC coeff",
                "%.3f" % np.mean(RWC1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median RWC coeff",
                           "%.3f" % np.median(RWC1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min RWC coeff",
                           "%.3f" % np.min(RWC1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max RWC coeff",
                           "%.3f" % np.max(RWC1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean RWC coeff",
                "%.3f" % np.mean(RWC2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median RWC coeff",
                           "%.3f" % np.median(RWC2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min RWC coeff",
                           "%.3f" % np.min(RWC2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max RWC coeff",
                           "%.3f" % np.max(RWC2)
                       ]]

            # Costes Automated Threshold
            C1 = np.zeros(len(lrange))
            C2 = np.zeros(len(lrange))
            if np.any(combined_thresh_c):
                C1 = np.array(
                    scind.sum(fi_thresh_c, labels[combined_thresh_c],
                              lrange)) / np.array(tot_fi_thr_c)
                C2 = np.array(
                    scind.sum(si_thresh_c, labels[combined_thresh_c],
                              lrange)) / np.array(tot_si_thr_c)
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Manders coeff (Costes)",
                "%.3f" % np.mean(C1)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Manders coeff (Costes)",
                           "%.3f" % np.median(C1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Manders coeff (Costes)",
                           "%.3f" % np.min(C1)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Manders coeff (Costes)",
                           "%.3f" % np.max(C1)
                       ]]
            result += [[
                second_image_name, first_image_name, object_name,
                "Mean Manders coeff (Costes)",
                "%.3f" % np.mean(C2)
            ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Median Manders coeff (Costes)",
                           "%.3f" % np.median(C2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Min Manders coeff (Costes)",
                           "%.3f" % np.min(C2)
                       ],
                       [
                           second_image_name, first_image_name, object_name,
                           "Max Manders coeff (Costes)",
                           "%.3f" % np.max(C2)
                       ]]

            # Overlap Coefficient
            if np.any(combined_thresh):
                fpsq = scind.sum(first_pixels[combined_thresh]**2,
                                 labels[combined_thresh], lrange)
                spsq = scind.sum(second_pixels[combined_thresh]**2,
                                 labels[combined_thresh], lrange)
                pdt = np.sqrt(np.array(fpsq) * np.array(spsq))

                overlap = fix(
                    scind.sum(
                        first_pixels[combined_thresh] *
                        second_pixels[combined_thresh],
                        labels[combined_thresh], lrange) / pdt)
                K1 = fix((scind.sum(
                    first_pixels[combined_thresh] *
                    second_pixels[combined_thresh], labels[combined_thresh],
                    lrange)) / (np.array(fpsq)))
                K2 = fix(
                    scind.sum(
                        first_pixels[combined_thresh] *
                        second_pixels[combined_thresh],
                        labels[combined_thresh], lrange) / np.array(spsq))
            else:
                overlap = K1 = K2 = np.zeros(len(lrange))
            result += [[
                first_image_name, second_image_name, object_name,
                "Mean Overlap coeff",
                "%.3f" % np.mean(overlap)
            ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Median Overlap coeff",
                           "%.3f" % np.median(overlap)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Min Overlap coeff",
                           "%.3f" % np.min(overlap)
                       ],
                       [
                           first_image_name, second_image_name, object_name,
                           "Max Overlap coeff",
                           "%.3f" % np.max(overlap)
                       ]]

        measurement = ("Correlation_Correlation_%s_%s" %
                       (first_image_name, second_image_name))
        overlap_measurement = (F_OVERLAP_FORMAT %
                               (first_image_name, second_image_name))
        k_measurement_1 = (F_K_FORMAT % (first_image_name, second_image_name))
        k_measurement_2 = (F_K_FORMAT % (second_image_name, first_image_name))
        manders_measurement_1 = (F_MANDERS_FORMAT %
                                 (first_image_name, second_image_name))
        manders_measurement_2 = (F_MANDERS_FORMAT %
                                 (second_image_name, first_image_name))
        rwc_measurement_1 = (F_RWC_FORMAT %
                             (first_image_name, second_image_name))
        rwc_measurement_2 = (F_RWC_FORMAT %
                             (second_image_name, first_image_name))
        costes_measurement_1 = (F_COSTES_FORMAT %
                                (first_image_name, second_image_name))
        costes_measurement_2 = (F_COSTES_FORMAT %
                                (second_image_name, first_image_name))

        workspace.measurements.add_measurement(object_name, measurement, corr)
        workspace.measurements.add_measurement(object_name,
                                               overlap_measurement, overlap)
        workspace.measurements.add_measurement(object_name, k_measurement_1,
                                               K1)
        workspace.measurements.add_measurement(object_name, k_measurement_2,
                                               K2)
        workspace.measurements.add_measurement(object_name,
                                               manders_measurement_1, M1)
        workspace.measurements.add_measurement(object_name,
                                               manders_measurement_2, M2)
        workspace.measurements.add_measurement(object_name, rwc_measurement_1,
                                               RWC1)
        workspace.measurements.add_measurement(object_name, rwc_measurement_2,
                                               RWC2)
        workspace.measurements.add_measurement(object_name,
                                               costes_measurement_1, C1)
        workspace.measurements.add_measurement(object_name,
                                               costes_measurement_2, C2)

        if n_objects == 0:
            return [[
                first_image_name, second_image_name, object_name,
                "Mean correlation", "-"
            ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Median correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Min correlation", "-"
                    ],
                    [
                        first_image_name, second_image_name, object_name,
                        "Max correlation", "-"
                    ]]
        else:
            return result
Example #36
0
def MaximaFind(image, qty, len_object=18):
    """
    Find the centers coordinates of maximum spots in an image.

    Parameters
    ----------
    image : array like
        Data array containing the greyscale image.
    qty : int
        The amount of maxima you want to find.
    len_object : int
        A length in pixels somewhat larger than a typical object.

    Returns
    -------
    refined_position : array like
        A 2D array of shape (N, 2) containing the coordinates of the maxima.

    """
    filtered = BandPassFilter(image, 1, len_object)

    # dilation
    structure = _CreateSEDisk(len_object // 2)
    dilated = cv2.dilate(filtered, structure, iterations=1)

    # find local maxima
    binary = (filtered == dilated)

    # thresholding
    qradius = max(len_object // 4, 1)
    structure = _CreateSEDisk(qradius)
    BWdil = cv2.dilate(numpy.array(binary).astype(numpy.uint8), structure)

    labels, num_features = ndimage.label(BWdil)
    index = numpy.arange(1, num_features + 1)
    mean = ndimage.mean(filtered, labels=labels, index=index)
    si = numpy.argsort(mean)[::-1][:qty]
    # estimate spot center position
    pos = numpy.array(
        ndimage.center_of_mass(filtered, labels=labels,
                               index=index[si]))[:, ::-1]

    if numpy.any(numpy.isnan(pos)):
        pos = pos[numpy.any(~numpy.isnan(pos), axis=1)]
        logging.debug("Only %d maxima found, while expected %d", len(pos), qty)
    # improve center estimate using radial symmetry method
    w = len_object // 2
    refined_center = numpy.zeros_like(pos)
    pos = numpy.rint(pos).astype(numpy.int16)
    y_max, x_max = image.shape
    for idx, xy in enumerate(pos):
        x_start, y_start = xy - w + 1
        x_end, y_end = xy + w
        # If the spot is near the edge of the image, crop so it is still in the center of the sub-image. Subtract the
        # value of x/y_start from x/y_end to keep the spot in the center when x/y_start is set to 0. Add the difference
        # between x/y_end and x/y_max to x/y_start to keep the spot in the center when x/y_end is set to x/y_max.
        if x_start < 0:
            x_end += x_start
            x_start = 0
        elif x_end > x_max:
            x_start += x_end - x_max
            x_end = x_max
        if y_start < 0:
            y_end += y_start
            y_start = 0
        elif y_end > y_max:
            y_start += y_end - y_max
            y_end = y_max
        spot = filtered[y_start:y_end, x_start:x_end]
        refined_center[idx] = numpy.array(FindCenterCoordinates(spot))
    refined_position = pos + refined_center
    return refined_position
Example #37
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et sts=4 ai:

import numpy as np
import scipy
from scipy import ndimage
from scipy.misc import imsave

h = 720
w = 1280
f = np.zeros((h, w), dtype=np.uint32)

for s in [1, 4, 16, 32, 64]:
    sx, sy = f.shape
    X, Y = np.ogrid[0:sx, 0:sy]

    r = np.hypot(X - sx / 2, Y - sy / 2)

    n = round(w / 2 / s)
    rbin = (n * r / r.max()).astype(np.int)
    radial_mean = ndimage.mean(f,
                               labels=rbin,
                               index=np.arange(1,
                                               rbin.max() + 1))

    imsave('input/circles-{0:02}px.png'.format(s), rbin)
Example #38
0
 def block_downsample(arr):
     res = ndimage.mean(arr, labels=regions, index=indices)
     res.shape = (sx / factor, sy / factor)
     return res
Example #39
0
def segment_layer(filename, params):
    '''
	Segment one layer in a stack
	'''
    start = time.time()
    #extract pixel size in xy and z
    xsize, zsize = extract_zoom(params.folder)

    #load image
    img = tifffile.imread(params.inputfolder + params.folder + filename)

    #normalize image
    img = ndimage.median_filter(img, 3)
    img = img * 255. / img.max()

    ##segment kidney tissue

    sizefactor = 10.
    small = ndimage.interpolation.zoom(
        img, 1. / sizefactor)  #scale the image to a smaller size

    imgf = ndimage.gaussian_filter(small, 3. / xsize)  #Gaussian filter
    median = np.percentile(imgf, 40)  #40-th percentile for thresholding

    kmask = imgf > median * 1.5  #thresholding
    kmask = mahotas.dilate(kmask, mahotas.disk(5))
    kmask = mahotas.close_holes(kmask)  #closing holes
    kmask = mahotas.erode(kmask, mahotas.disk(5)) * 255

    #remove objects that are darker than 2*percentile
    l, n = ndimage.label(kmask)
    llist = np.unique(l)
    if len(llist) > 2:
        means = ndimage.mean(imgf, l, llist)
        bv = llist[np.where(means < median * 2)]
        ix = np.in1d(l.ravel(), bv).reshape(l.shape)
        kmask[ix] = 0

    kmask = ndimage.interpolation.zoom(kmask,
                                       sizefactor)  #scale back to normal size
    kmask = normalize(kmask)
    kmask = (kmask > mahotas.otsu(kmask.astype(
        np.uint8))) * 255.  #remove artifacts of interpolation

    #save indices of the kidney mask
    ind = np.where(kmask > 0)
    ind = np.array(ind)
    np.save(
        params.inputfolder + '../segmented/masks/kidney/' + params.folder +
        filename[:-4] + '.npy', ind)
    skimage.io.imsave(
        params.inputfolder + '../segmented/masks/kidney/' + params.folder +
        filename[:-4] + '.tif', (kmask > 0).astype(np.uint8) * 255)

    #segment glomeruli, if there is a kidney tissue
    if kmask.max() > 0:
        #remove all intensity variations larger than maximum radius of a glomerulus
        d = mahotas.disk(int(float(params.maxrad) / xsize))
        img = img - mahotas.open(img.astype(np.uint8), d)
        img = img * 255. / img.max()
        ch = img[np.where(kmask > 0)]

        #segment glomeruli by otsu thresholding	only if this threshold is higher than the 75-th percentile in the kidney mask
        t = mahotas.otsu(img.astype(np.uint8))

        if t > np.percentile(ch, 75) * 1.5:
            cells = img > t
            cells[np.where(kmask == 0)] = 0
            cells = mahotas.open(
                cells, mahotas.disk(int(float(params.minrad) / 2. / xsize)))

        else:
            cells = np.zeros_like(img)

    else:
        cells = np.zeros_like(img)

    #save indices of the glomeruli mask
    ind = np.where(cells > 0)
    ind = np.array(ind)
    np.save(
        params.inputfolder + '../segmented/masks/glomeruli/' + params.folder +
        filename[:-4] + '.npy', ind)
    skimage.io.imsave(
        params.inputfolder + '../segmented/masks/glomeruli/' + params.folder +
        filename[:-4] + '.tif', (cells > 0).astype(np.uint8) * 255)
Example #40
0
def get_psf_secondpeak(fn,
                       show_image=False,
                       min_radial_extent=1.5 * u.arcsec,
                       max_radial_extent=5 * u.arcsec):
    """ REDUNDANT with get_psf_secondpeak, but this one is better

    Process:
        1. Find the first minimum of the PSF by taking the radial profile within 50 pixels
        2. Take the integral of the PSF within that range
        3. Calculate the residual of the PSF minus the CASA-fitted Gaussian beam
        4. Integrate that to get the fraction of flux outside the synthesized
        beam in the main lobe of the dirty beam
        5. Find the peak and the location of the peak residual

    """
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        cube = SpectralCube.read(
            fn, format='casa_image' if not fn.endswith('.fits') else 'fits')
    psfim = cube[0]

    pixscale = wcs.utils.proj_plane_pixel_scales(cube.wcs.celestial)[0] * u.deg

    center = np.unravel_index(np.argmax(psfim), psfim.shape)
    cy, cx = center

    cutout = psfim[cy - 100:cy + 101, cx - 100:cx + 101]
    psfim = cutout
    fullbeam = cube.beam.as_kernel(
        pixscale,
        x_size=201,
        y_size=201,
    )

    shape = cutout.shape
    sy, sx = shape

    Y, X = np.mgrid[0:sy, 0:sx]

    beam = cube.beam

    center = np.unravel_index(np.argmax(cutout), cutout.shape)
    cy, cx = center

    # elliptical version...
    dy = (Y - cy)
    dx = (X - cx)
    costh = np.cos(beam.pa)
    sinth = np.sin(beam.pa)
    rmajmin = beam.minor / beam.major

    rr = ((dx * costh + dy * sinth)**2 / rmajmin**2 +
          (dx * sinth - dy * costh)**2 / 1**2)**0.5

    rbin = (rr).astype(np.int)

    # assume the PSF first minimum is within 100 pixels of center
    radial_mean = ndimage.mean(cutout**2, labels=rbin, index=np.arange(100))

    # find the first negative peak (approximately); we include anything
    # within this radius as part of the main beam
    first_min_ind = scipy.signal.find_peaks(-radial_mean)[0][0]

    view = (slice(cy - first_min_ind.astype('int'),
                  cy + first_min_ind.astype('int') + 1),
            slice(cx - first_min_ind.astype('int'),
                  cx + first_min_ind.astype('int') + 1))
    data = cutout[view].value
    bm = fullbeam.array[view]
    # the data and beam must be concentric
    # and there must be only one peak location
    # (these checks are to avoid the even-kernel issue in which the center
    # of the beam can have its flux spread over four pixels)
    assert np.argmax(data) == np.argmax(bm)
    assert (bm.max() == bm).sum() == 1

    bmfit_residual = data - bm / bm.max()
    radial_mask = rr[view] < first_min_ind

    psf_integral_firstpeak = (data * radial_mask).sum()
    psf_residual_integral = (bmfit_residual * radial_mask).sum()
    residual_peak = bmfit_residual.max()
    residual_peak_loc = rr[view].flat[bmfit_residual.argmax()]

    peakloc_as = (residual_peak_loc * pixscale).to(u.arcsec)

    # pl.figure(3).clf()
    # bmradmean = ndimage.mean((fullbeam.array/fullbeam.array.max())**2, labels=rbin, index=np.arange(100))
    # pl.plot(radial_mean)
    # pl.plot(bmradmean)
    # pl.figure(1)

    if show_image:
        import pylab as pl
        #pl.clf()

        # this finds the second peak
        # (useful for display)
        outside_first_peak_mask = (rr > first_min_ind) & (fullbeam.array <
                                                          1e-5)
        #first_sidelobe_ind = scipy.signal.find_peaks(radial_mean * (np.arange(len(radial_mean)) > first_min_ind))[0][0]
        max_sidelobe = cutout[outside_first_peak_mask].max()
        max_sidelobe_loc = cutout[outside_first_peak_mask].argmax()
        r_max_sidelobe = rr[outside_first_peak_mask][max_sidelobe_loc]
        #r_max_sidelobe = first_sidelobe_ind

        if r_max_sidelobe * pixscale < min_radial_extent:
            radial_extent = (min_radial_extent / pixscale).decompose().value
        else:
            radial_extent = r_max_sidelobe
        if radial_extent * pixscale > max_radial_extent:
            radial_extent = (max_radial_extent / pixscale).decompose().value

        bm2 = cube.beam.as_kernel(
            pixscale,
            x_size=radial_extent.astype('int') * 2 + 1,
            y_size=radial_extent.astype('int') * 2 + 1,
        )
        view = (slice(cy - radial_extent.astype('int'),
                      cy + radial_extent.astype('int') + 1),
                slice(cx - radial_extent.astype('int'),
                      cx + radial_extent.astype('int') + 1))
        bmfit_residual2 = cutout[view].value - bm2.array / bm2.array.max()

        #extent = np.array([-first_min_ind, first_min_ind, -first_min_ind, first_min_ind])*pixscale.to(u.arcsec).value
        extent = np.array([
            -radial_extent, radial_extent, -radial_extent, radial_extent
        ]) * pixscale.to(u.arcsec).value
        pl.imshow(bmfit_residual2,
                  origin='lower',
                  interpolation='nearest',
                  extent=extent,
                  cmap='gray_r')
        cb = pl.colorbar()
        pl.matplotlib.colorbar.ColorbarBase.add_lines(self=cb,
                                                      levels=[max_sidelobe],
                                                      colors=[(0.1, 0.7, 0.1,
                                                               0.9)],
                                                      linewidths=1)
        pl.contour(bm2.array / bm2.array.max(),
                   levels=[0.1, 0.5, 0.9],
                   colors=['r'] * 3,
                   extent=extent)
        pl.contour(rr[view],
                   levels=[first_min_ind, r_max_sidelobe],
                   linestyles=['--', ':'],
                   colors=[(0.2, 0.2, 1, 0.5), (0.1, 0.7, 0.1, 0.5)],
                   extent=extent)
        pl.xlabel("RA Offset [arcsec]")
        pl.ylabel("Dec Offset [arcsec]")

    return (residual_peak, peakloc_as.value,
            psf_residual_integral / psf_integral_firstpeak)
Example #41
0
    def run(self, workspace):
        parents = workspace.object_set.get_objects(self.parent_name.value)
        children = workspace.object_set.get_objects(self.sub_object_name.value)
        child_count, parents_of = parents.relate_children(children)
        m = workspace.measurements
        assert isinstance(m, cpmeas.Measurements)
        if self.wants_per_parent_means.value:
            parent_indexes = np.arange(np.max(parents.segmented)) + 1
            for feature_name in m.get_feature_names(
                    self.sub_object_name.value):
                if not self.should_aggregate_feature(feature_name):
                    continue
                data = m.get_current_measurement(self.sub_object_name.value,
                                                 feature_name)
                if data is not None:
                    if len(parents_of) > 0:
                        means = fix(
                            scind.mean(data.astype(float), parents_of,
                                       parent_indexes))
                    else:
                        means = np.zeros((0, ))
                    mean_feature_name = FF_MEAN % (self.sub_object_name.value,
                                                   feature_name)
                    m.add_measurement(self.parent_name.value,
                                      mean_feature_name, means)
        m.add_measurement(self.sub_object_name.value,
                          FF_PARENT % (self.parent_name.value), parents_of)
        m.add_measurement(self.parent_name.value,
                          FF_CHILDREN_COUNT % (self.sub_object_name.value),
                          child_count)
        group_index = m.get_current_image_measurement(cpmeas.GROUP_INDEX)
        group_indexes = np.ones(np.sum(parents_of != 0), int) * group_index
        good_parents = parents_of[parents_of != 0]
        good_children = np.argwhere(parents_of != 0).flatten() + 1
        if np.any(good_parents):
            m.add_relate_measurement(self.module_num, R_PARENT,
                                     self.parent_name.value,
                                     self.sub_object_name.value, group_indexes,
                                     good_parents, group_indexes,
                                     good_children)
            m.add_relate_measurement(self.module_num, R_CHILD,
                                     self.sub_object_name.value,
                                     self.parent_name.value, group_indexes,
                                     good_children, group_indexes,
                                     good_parents)
        parent_names = self.get_parent_names()

        for parent_name in parent_names:
            if self.find_parent_child_distances in (D_BOTH, D_CENTROID):
                self.calculate_centroid_distances(workspace, parent_name)
            if self.find_parent_child_distances in (D_BOTH, D_MINIMUM):
                self.calculate_minimum_distances(workspace, parent_name)

        if workspace.frame is not None:
            figure = workspace.create_or_find_figure(
                title="RelateObjects, image cycle #%d" %
                (workspace.measurements.image_set_number),
                subplots=(2, 2))
            figure.subplot_imshow_labels(0,
                                         0,
                                         parents.segmented,
                                         title=self.parent_name.value)
            figure.subplot_imshow_labels(1,
                                         0,
                                         children.segmented,
                                         title=self.sub_object_name.value,
                                         sharex=figure.subplot(0, 0),
                                         sharey=figure.subplot(0, 0))
            parent_labeled_children = np.zeros(children.segmented.shape, int)
            parent_labeled_children[children.segmented > 0] = \
                parents_of[children.segmented[children.segmented > 0]-1]
            figure.subplot_imshow_labels(
                0,
                1,
                parent_labeled_children,
                "%s labeled by %s" %
                (self.sub_object_name.value, self.parent_name.value),
                sharex=figure.subplot(0, 0),
                sharey=figure.subplot(0, 0))
Example #42
0
def test_mean03():
    labels = np.array([1, 2])
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.mean(input, labels=labels, index=2)
        assert_almost_equal(output, 3.0)
Example #43
0
# From Bi Rico on StackOverflow
def radial_profile(data, center):
    y, x = np.indices((data.shape))

# running as stand-alone program
if __name__ == "__main__":

    # Stand-alone programs must create a GX context before calling Geosoft methods.
    with gxpy.gx.GXpy() as gxc:

        with gxpy.grid.Grid.open(grid_path) as grid:

            f = grid.np()
            sx, sy = f.shape
            X, Y = np.ogrid[0:sx, 0:sy]

            r = np.hypot(X - sx/2, Y) # r is the same shape as grid, contains radius to every point

            print(pd.DataFrame((bins * (r/r.max())).astype(np.int)))

            # you need to correlate the bin numbers with their radii (frequency in CYC/km)

            rbin = (bins* r/r.max()).astype(np.int) # array where each position has its bin number
            index=np.arange(1, rbin.max() +1) # array from 1 to rbin.max()
            radial_mean = ndimage.mean(f, labels=rbin, index=index)

            plt.figure()
            plt.title("with rbins")
            plt.scatter(index, radial_mean)
            plt.show()
Example #44
0
def test_mean01():
    labels = np.array([1, 0], bool)
    for type in types:
        input = np.array([[1, 2], [3, 4]], type)
        output = ndimage.mean(input, labels=labels)
        assert_almost_equal(output, 2.0)
Example #45
0
    def run(self, workspace):
        if self.show_window:
            workspace.display_data.col_labels = ("Image", "Object", "Feature",
                                                 "Mean", "Median", "STD")
            workspace.display_data.statistics = statistics = []
        for image_name in [img.name for img in self.images]:
            image = workspace.image_set.get_image(image_name.value,
                                                  must_be_grayscale=True)
            for object_name in [obj.name for obj in self.objects]:
                # Need to refresh image after each iteration...
                img = image.pixel_data
                if image.has_mask:
                    masked_image = img.copy()
                    masked_image[~image.mask] = 0
                else:
                    masked_image = img
                objects = workspace.object_set.get_objects(object_name.value)
                nobjects = objects.count
                integrated_intensity = np.zeros((nobjects, ))
                integrated_intensity_edge = np.zeros((nobjects, ))
                mean_intensity = np.zeros((nobjects, ))
                mean_intensity_edge = np.zeros((nobjects, ))
                std_intensity = np.zeros((nobjects, ))
                std_intensity_edge = np.zeros((nobjects, ))
                min_intensity = np.zeros((nobjects, ))
                min_intensity_edge = np.zeros((nobjects, ))
                max_intensity = np.zeros((nobjects, ))
                max_intensity_edge = np.zeros((nobjects, ))
                mass_displacement = np.zeros((nobjects, ))
                lower_quartile_intensity = np.zeros((nobjects, ))
                median_intensity = np.zeros((nobjects, ))
                mad_intensity = np.zeros((nobjects, ))
                upper_quartile_intensity = np.zeros((nobjects, ))
                cmi_x = np.zeros((nobjects, ))
                cmi_y = np.zeros((nobjects, ))
                max_x = np.zeros((nobjects, ))
                max_y = np.zeros((nobjects, ))
                for labels, lindexes in objects.get_labels():
                    lindexes = lindexes[lindexes != 0]
                    labels, img = cpo.crop_labels_and_image(labels, img)
                    _, masked_image = cpo.crop_labels_and_image(
                        labels, masked_image)
                    outlines = cpmo.outline(labels)

                    if image.has_mask:
                        _, mask = cpo.crop_labels_and_image(labels, image.mask)
                        masked_labels = labels.copy()
                        masked_labels[~mask] = 0
                        masked_outlines = outlines.copy()
                        masked_outlines[~mask] = 0
                    else:
                        masked_labels = labels
                        masked_outlines = outlines

                    lmask = masked_labels > 0 & np.isfinite(
                        img)  # Ignore NaNs, Infs
                    has_objects = np.any(lmask)
                    if has_objects:
                        limg = img[lmask]
                        llabels = labels[lmask]
                        mesh_y, mesh_x = np.mgrid[0:masked_image.shape[0],
                                                  0:masked_image.shape[1]]
                        mesh_x = mesh_x[lmask]
                        mesh_y = mesh_y[lmask]
                        lcount = fix(
                            nd.sum(np.ones(len(limg)), llabels, lindexes))
                        integrated_intensity[lindexes-1] = \
                            fix(nd.sum(limg, llabels, lindexes))
                        mean_intensity[lindexes-1] = \
                            integrated_intensity[lindexes-1] / lcount
                        std_intensity[lindexes - 1] = np.sqrt(
                            fix(
                                nd.mean(
                                    (limg - mean_intensity[llabels - 1])**2,
                                    llabels, lindexes)))
                        min_intensity[lindexes - 1] = fix(
                            nd.minimum(limg, llabels, lindexes))
                        max_intensity[lindexes - 1] = fix(
                            nd.maximum(limg, llabels, lindexes))
                        # Compute the position of the intensity maximum
                        max_position = np.array(fix(
                            nd.maximum_position(limg, llabels, lindexes)),
                                                dtype=int)
                        max_position = np.reshape(max_position,
                                                  (max_position.shape[0], ))
                        max_x[lindexes - 1] = mesh_x[max_position]
                        max_y[lindexes - 1] = mesh_y[max_position]
                        # The mass displacement is the distance between the center
                        # of mass of the binary image and of the intensity image. The
                        # center of mass is the average X or Y for the binary image
                        # and the sum of X or Y * intensity / integrated intensity
                        cm_x = fix(nd.mean(mesh_x, llabels, lindexes))
                        cm_y = fix(nd.mean(mesh_y, llabels, lindexes))

                        i_x = fix(nd.sum(mesh_x * limg, llabels, lindexes))
                        i_y = fix(nd.sum(mesh_y * limg, llabels, lindexes))
                        cmi_x[lindexes -
                              1] = i_x / integrated_intensity[lindexes - 1]
                        cmi_y[lindexes -
                              1] = i_y / integrated_intensity[lindexes - 1]
                        diff_x = cm_x - cmi_x[lindexes - 1]
                        diff_y = cm_y - cmi_y[lindexes - 1]
                        mass_displacement[lindexes-1] = \
                            np.sqrt(diff_x * diff_x+diff_y*diff_y)
                        #
                        # Sort the intensities by label, then intensity.
                        # For each label, find the index above and below
                        # the 25%, 50% and 75% mark and take the weighted
                        # average.
                        #
                        order = np.lexsort((limg, llabels))
                        areas = lcount.astype(int)
                        indices = np.cumsum(areas) - areas
                        for dest, fraction in ((lower_quartile_intensity,
                                                1.0 / 4.0), (median_intensity,
                                                             1.0 / 2.0),
                                               (upper_quartile_intensity,
                                                3.0 / 4.0)):
                            qindex = indices.astype(float) + areas * fraction
                            qfraction = qindex - np.floor(qindex)
                            qindex = qindex.astype(int)
                            qmask = qindex < indices + areas - 1
                            qi = qindex[qmask]
                            qf = qfraction[qmask]
                            dest[lindexes[qmask] -
                                 1] = (limg[order[qi]] * (1 - qf) +
                                       limg[order[qi + 1]] * qf)
                            #
                            # In some situations (e.g. only 3 points), there may
                            # not be an upper bound.
                            #
                            qmask = (~qmask) & (areas > 0)
                            dest[lindexes[qmask] -
                                 1] = limg[order[qindex[qmask]]]
                        #
                        # Once again, for the MAD
                        #
                        madimg = np.abs(limg - median_intensity[llabels - 1])
                        order = np.lexsort((madimg, llabels))
                        qindex = indices.astype(float) + areas / 2.0
                        qfraction = qindex - np.floor(qindex)
                        qindex = qindex.astype(int)
                        qmask = qindex < indices + areas - 1
                        qi = qindex[qmask]
                        qf = qfraction[qmask]
                        mad_intensity[lindexes[qmask] -
                                      1] = (madimg[order[qi]] * (1 - qf) +
                                            madimg[order[qi + 1]] * qf)
                        qmask = (~qmask) & (areas > 0)
                        mad_intensity[lindexes[qmask] -
                                      1] = madimg[order[qindex[qmask]]]

                    emask = masked_outlines > 0
                    eimg = img[emask]
                    elabels = labels[emask]
                    has_edge = len(eimg) > 0
                    if has_edge:
                        ecount = fix(
                            nd.sum(np.ones(len(eimg)), elabels, lindexes))
                        integrated_intensity_edge[lindexes-1] = \
                            fix(nd.sum(eimg, elabels, lindexes))
                        mean_intensity_edge[lindexes-1] = \
                            integrated_intensity_edge[lindexes-1] / ecount
                        std_intensity_edge[lindexes-1] = \
                            np.sqrt(fix(nd.mean(
                                (eimg - mean_intensity_edge[elabels-1])**2,
                                elabels, lindexes)))
                        min_intensity_edge[lindexes - 1] = fix(
                            nd.minimum(eimg, elabels, lindexes))
                        max_intensity_edge[lindexes - 1] = fix(
                            nd.maximum(eimg, elabels, lindexes))
                m = workspace.measurements
                for category, feature_name, measurement in \
                    ((INTENSITY, INTEGRATED_INTENSITY, integrated_intensity),
                     (INTENSITY, MEAN_INTENSITY, mean_intensity),
                     (INTENSITY, STD_INTENSITY, std_intensity),
                     (INTENSITY, MIN_INTENSITY, min_intensity),
                     (INTENSITY, MAX_INTENSITY, max_intensity),
                     (INTENSITY, INTEGRATED_INTENSITY_EDGE, integrated_intensity_edge),
                     (INTENSITY, MEAN_INTENSITY_EDGE, mean_intensity_edge),
                     (INTENSITY, STD_INTENSITY_EDGE, std_intensity_edge),
                     (INTENSITY, MIN_INTENSITY_EDGE, min_intensity_edge),
                     (INTENSITY, MAX_INTENSITY_EDGE, max_intensity_edge),
                     (INTENSITY, MASS_DISPLACEMENT, mass_displacement),
                     (INTENSITY, LOWER_QUARTILE_INTENSITY, lower_quartile_intensity),
                     (INTENSITY, MEDIAN_INTENSITY, median_intensity),
                     (INTENSITY, MAD_INTENSITY, mad_intensity),
                     (INTENSITY, UPPER_QUARTILE_INTENSITY, upper_quartile_intensity),
                     (C_LOCATION, LOC_CMI_X, cmi_x),
                     (C_LOCATION, LOC_CMI_Y, cmi_y),
                     (C_LOCATION, LOC_MAX_X, max_x),
                     (C_LOCATION, LOC_MAX_Y, max_y)):
                    measurement_name = "%s_%s_%s" % (category, feature_name,
                                                     image_name.value)
                    m.add_measurement(object_name.value, measurement_name,
                                      measurement)
                    if self.show_window and len(measurement) > 0:
                        statistics.append(
                            (image_name.value, object_name.value, feature_name,
                             np.round(np.mean(measurement),
                                      3), np.round(np.median(measurement), 3),
                             np.round(np.std(measurement), 3)))
Example #46
0
def prep_gcm_wrevap(workspace):
    """Prepare GCM WREVAP monthly data files

    Calculate and save daily Thornton-Running solar
    Calculate and save daily Tdew
    Calculate Monthly Tmean, Tdew, and Rs
    Save monthly data to WREVAP data files
    Build WREVAP input files
    """

    logging.info('\nPreparing GCM WREVAP data')

    # Folder with the csv data files
    input_ws = r'Z:\USBR_Ag_Demands_Project\reservoir_evap\GCM_data\BC\CSV'

    input_daily_temp = 'F'
    input_daily_prcp = 'IN'
    output_daily_temp = 'C'

    # Output folder
    output_ws = r'Z:\USBR_Ag_Demands_Project\reservoir_evap\WREVAP_GCM'
    wrevap_temp = 'C'

    res_list = []
    # res_list = ['Lake_Mead']
    res_list = ['Lake_Tahoe', 'Lake_Mead', 'Lake_Powell']
    # res_list = [
    #     'American_Falls', 'Boysen', 'Canyon_Ferry', 'Elephant_Butte',
    #     'Grand_Coulee', 'Lahontan', 'Millerton_Lake', 'Shasta_Lake',
    #     'Upper_Klamath']

    # Adjust precipitation
    # PPT will be multiplied by these ratios
    #  (i.e. decrease the estimated values that are read in)
    ppt_month_factor_dict = dict()
    # New factors from Justin (2014-09-23)
    ppt_month_factor_dict['Lake_Tahoe'] = [
        0.683793809, 0.696432660, 0.691828784, 0.712677417, 0.743181881,
        0.668170098, 0.543929977, 0.604882144, 0.607345296, 0.675140118,
        0.678983427, 0.779749794
    ]
    # Old factors
    # ppt_month_factor_dict['Lake_Tahoe'] = [
    #     0.702914491, 0.720976639, 0.720924194, 0.774378465,
    #     0.850691236, 0.828274127, 0.872059365, 0.866350328,
    #     0.758308161, 0.738646093, 0.708893856, 0.804691107]

    # Adjust incoming solar
    # From: Lake_Tahoe_Solar_Calibration_Check.xlxs
    # Ratios are estimated divided by measured
    #  (i.e. decrease the estimated values that are read in)
    rs_month_factor_dict = dict()
    rs_month_factor_dict['Lake_Tahoe'] = [
        1.233, 1.144, 1.084, 1.025, 1.015, 1.041, 1.065, 1.031, 1.029, 1.013,
        1.132, 1.396
    ]
    rs_month_factor_dict['Lahontan'] = [
        1.108210828, 1.086, 1.066844029, 1.05, 1.030296257, 1.011742403,
        1.013442609, 1.013532861, 1.01, 1.017420093, 1.072664479, 1.133997425
    ]
    rs_month_factor_dict['Lake_Mead'] = [
        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    ]
    rs_month_factor_dict['Lake_Powell'] = [
        1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
    ]

    # Output solar is in W/m^2 for dailies, and MJ/m^2/d for WREVAP inputs

    # Ancillary files
    res_param_name = 'reservoir_parameters.csv'
    res_param_temp = 'C'
    default_ini_name = 'default.ini'
    # header_name = 'HEADER.txt'
    # bad_data_name = 'bad_data.csv'

    # Reservoir parameter fields
    # gcm_name_field = 'GCM_NAME'
    input_name_field = 'GCM_NAME'
    output_name_field = 'OUTPUT_NAME'
    depth_field = 'DEPTH'
    salin_field = 'SALINITY'
    elev_field = 'ELEVATION'
    lat_field = 'LATITUDE'
    # lon_field = 'LONGITUDE'
    tr_b0_field = 'TR_B0'
    tr_b1_field = 'TR_B1'
    tr_b2_field = 'TR_B2'
    ko_field = 'KO_'

    # Overwrite existing files
    overwrite_flag = True

    # DeltaT year range
    delta_t_year_range = (1950, 1999)

    # Check inputs
    if not os.path.isdir(input_ws):
        logging.error(
            '\nERROR: The input workspace {} does not exist'.format(input_ws))
        raise SystemExit
    if not os.path.isdir(output_ws):
        os.mkdir(output_ws)

    res_param_path = os.path.join(workspace, res_param_name)
    if not os.path.isfile(res_param_path):
        logging.error(('\nERROR: The reservoir parameter file (\'{}\')' +
                       ' does not exist').format(res_param_path))
        raise SystemExit()
    default_ini_path = os.path.join(workspace, default_ini_name)
    if not os.path.isfile(default_ini_path):
        logging.error(('\nERROR: The default config file (\'{}\')' +
                       ' does not exist').format(default_ini_path))
        raise SystemExit()
    # bad_data_path = os.path.join(workspace, bad_data_name)
    # if os.path.isfile(bad_data_path): os.remove(bad_data_path)

    input_daily_temp = input_daily_temp.upper()
    input_daily_prcp = input_daily_prcp.upper()
    output_daily_temp = output_daily_temp.upper()
    wrevap_temp = wrevap_temp.upper()
    res_param_temp = res_param_temp.upper()
    if input_daily_temp not in ['F', 'C']:
        logging.error('\nERROR: The input daily temperature must be F or C\n')
        raise SystemExit()
    if input_daily_prcp not in ['MM', 'IN']:
        logging.error(
            '\nERROR: The input daily prcipitation must be MM or IN\n')
        raise SystemExit()
    if output_daily_temp not in ['F', 'C']:
        logging.error('\nERROR: The output daily temperature must be F or C\n')
        raise SystemExit()
    if wrevap_temp not in ['F', 'C']:
        logging.error('\nERROR: The WREVAP temperature must be F or C\n')
        raise SystemExit()
    if res_param_temp not in ['F', 'C']:
        logging.error('\nERROR: The reservoir parameter temperature ' +
                      'must be F or C\n')
        raise SystemExit()

    # Input file regular expressions
    csv_re = re.compile('\w+_\w+.csv$')
    tmax_csv_re = re.compile('\w+_TMAX.csv$')
    tmin_csv_re = re.compile('\w+_TMIN.csv$')
    # dtrg_csv_re = re.compile('\w+_DTRG.csv$')
    prcp_csv_re = re.compile('\w+_PRCP.csv$')
    # tdew_csv_re = re.compile('\w+_TDEW.csv$')
    # rso_csv_re = re.compile('\w+_RSO.csv$')
    # rs_csv_re = re.compile('\w+_RS.csv$')

    # Build list of reservoirs with TMAX and TMIN data
    csv_list = [item for item in os.listdir(input_ws) if csv_re.match(item)]
    tmax_csv_dict = dict([(item.split('_')[0], item) for item in csv_list
                          if tmax_csv_re.match(item)])
    tmin_csv_dict = dict([(item.split('_')[0], item) for item in csv_list
                          if tmin_csv_re.match(item)])
    prcp_csv_dict = dict([(item.split('_')[0], item) for item in csv_list
                          if prcp_csv_re.match(item)])
    # tdew_csv_dict = dict([
    #     (item.split('_')[0], item) for item in csv_list
    #     if tdew_csv_re.match(item)])
    # output_res_list = sorted(list(
    #     set(tmax_csv_dict.keys() + tmin_csv_dict.keys())))

    # Read in reservoir parameters as structured array
    res_param_array = np.atleast_2d(
        np.genfromtxt(res_param_path, delimiter=',', names=True, dtype=None))
    # res_param_fields = res_param_array.dtype.names
    # DEADBEEF
    # res_param_names = res_param_array[gcm_name_field]
    input_res_array = res_param_array[input_name_field]
    output_res_array = res_param_array[output_name_field]

    # Use reservoir parameters file to set input and output name lists
    input_res_list = input_res_array[0]
    output_res_list = output_res_array[0]

    # Write bad data to a txt file
    # bad_data_f = open(bad_data_path, 'w')

    # Check keys on factors
    for output_res in ppt_month_factor_dict.keys():
        if output_res not in output_res_list:
            logging.error(
                'The reservoir name {} is invalid'.format(output_res))
            raise SystemExit()
    for output_res in rs_month_factor_dict.keys():
        if output_res not in output_res_list:
            logging.error(
                'The reservoir name {} is invalid'.format(output_res))
            raise SystemExit()

    # Process each reservoir
    for input_res, output_res in sorted(zip(input_res_list, output_res_list)):
        logging.info('{}'.format(output_res))

        # Process target reservoirs
        if res_list and output_res not in res_list:
            continue

        # A TMAX and TMIN csv must be present to run
        try:
            tmax_csv_path = os.path.join(input_ws, tmax_csv_dict[input_res])
            tmin_csv_path = os.path.join(input_ws, tmin_csv_dict[input_res])
            prcp_csv_path = os.path.join(input_ws, prcp_csv_dict[input_res])
        except KeyError:
            continue
        logging.info('  GCM Name: {}'.format(input_res))

        # Daily output file names
        tdew_csv_name = '{}_TDEW.csv'.format(input_res)
        rso_csv_name = '{}_RSO.csv'.format(input_res)
        rs_csv_name = '{}_RS.csv'.format(input_res)

        # Daily output file paths
        tdew_csv_path = os.path.join(input_ws, tdew_csv_name)
        rso_csv_path = os.path.join(input_ws, rso_csv_name)
        rs_csv_path = os.path.join(input_ws, rs_csv_name)

        # Remove existing daily files if necessary
        if overwrite_flag and os.path.isfile(tdew_csv_path):
            os.remove(tdew_csv_path)
        if overwrite_flag and os.path.isfile(rso_csv_path):
            os.remove(rso_csv_path)
        if overwrite_flag and os.path.isfile(rs_csv_path):
            os.remove(rs_csv_path)

        # Get reservoir parameter data from file using output_res as key
        if output_res not in output_res_array:
            logging.info(
                '    Reservoir does not have data in {}'.format(output_res))
            continue
        res_param = res_param_array[output_res_array == output_res]
        # output_name = res_param[output_name_field][0]
        depth_flt = float(res_param[depth_field][0])
        salin_flt = float(res_param[salin_field][0])
        elev_flt = float(res_param[elev_field][0])
        lat_flt = float(res_param[lat_field][0])
        # lon_flt = float(res_param[lon_field][0])
        tr_b0_flt = float(res_param[tr_b0_field][0])
        tr_b1_flt = float(res_param[tr_b1_field][0])
        tr_b2_flt = float(res_param[tr_b2_field][0])
        ko_month_array = np.array([
            float(res_param[ko_field + month_abbr.upper()][0])
            for month_i, month_abbr in enumerate(calendar.month_abbr)
            if month_abbr
        ])
        del res_param

        # Convert ko temperature to celsius
        if res_param_temp == 'F':
            ko_month_array = fahrenheit_to_celsius(ko_month_array)
        elif res_param_temp == 'C':
            pass
        else:
            continue

        # Monthly output workspace
        res_output_ws = os.path.join(output_ws, output_res)
        if not os.path.isdir(res_output_ws):
            os.mkdir(res_output_ws)
        res_data_ws = os.path.join(res_output_ws, 'input_data')
        if not os.path.isdir(res_data_ws):
            os.mkdir(res_data_ws)

        # Read header row for each file to get GCM list
        # First field is date field, skip
        with open(tmax_csv_path, 'r') as f:
            header_str = f.readline().strip()
            # Header str has a comma at the end
            if header_str[-1] == ',':
                header_str = header_str[:-1]
        f.closed
        gcm_name_list = [i.strip() for i in header_str.split(',') if i][1:]
        header_fmt_str = '%d,' + ','.join(['%7.4f'] * len(gcm_name_list))

        # Read in daily Tmin and Tmax data, skip header row
        tmax_csv_array = np.loadtxt(tmax_csv_path, delimiter=',', skiprows=1)
        tmin_csv_array = np.loadtxt(tmin_csv_path, delimiter=',', skiprows=1)
        prcp_csv_array = np.loadtxt(prcp_csv_path, delimiter=',', skiprows=1)
        del tmax_csv_path, tmin_csv_path, prcp_csv_path

        # Get data subsets from full csv array
        date_array = tmax_csv_array[:, 0].astype(np.int)
        tmax_array = tmax_csv_array[:, 1:]
        tmin_array = tmin_csv_array[:, 1:]
        prcp_array = prcp_csv_array[:, 1:]
        del tmax_csv_array, tmin_csv_array, prcp_csv_array

        # Convert input temperature to celsius (for Thornton-Running)
        if input_daily_temp == 'F':
            tmax_array = fahrenheit_to_celsius(tmax_array)
            tmin_array = fahrenheit_to_celsius(tmin_array)
        elif input_daily_temp == 'C':
            pass
        else:
            continue

        # Convert input precipitation to mm (from inches)
        if input_daily_prcp == 'IN':
            prcp_array *= 25.4
        elif input_daily_prcp == 'MM':
            pass
        else:
            continue

        # Calculate Tmax-Tmin
        tmax_tmin_array = tmax_array - tmin_array

        # Where Tmax > Tmin, set to nan and save
        tmax_tmin_mask = (tmax_tmin_array >= 0)
        if not np.all(tmax_tmin_mask):
            logging.warning('    WARNING: TMIN > TMAX for {} cells'.format(
                np.sum(~tmax_tmin_mask)))
            tmax_tmin_mask_i = np.nonzero(~tmax_tmin_mask)
            for gcm, date in zip(
                    np.array(gcm_name_list)[tmax_tmin_mask_i[1]],
                    date_array[tmax_tmin_mask_i[0]]):
                # bad_data_f.write(
                #     ','.join([output_res, gcm, str(date)]) + '\n')
                logging.debug('    {} {}'.format(gcm, date))
                del date, gcm
            del tmax_tmin_mask_i
        tmax_tmin_array[~tmax_tmin_mask] = np.nan
        tmax_array[~tmax_tmin_mask] = np.nan
        tmin_array[~tmax_tmin_mask] = np.nan

        # Calculate mean temperature
        tmean_array = 0.5 * (tmax_array + tmin_array)
        del tmax_array

        # Get Year/Month for each DOY
        ym_daily_array = np.array([
            dt.datetime.strptime(str(d), '%Y%m%d').strftime('%Y_%m')
            for d in date_array
        ])
        year_array = np.array([
            dt.datetime.strptime(str(d), '%Y%m%d').strftime('%Y')
            for d in date_array
        ]).astype(np.int)
        month_array = np.array([
            dt.datetime.strptime(str(d), '%Y%m%d').strftime('%m')
            for d in date_array
        ]).astype(np.int)
        # Get DOY from date
        doy_array = np.array([
            dt.datetime.strptime(str(d), '%Y%m%d').strftime('%j')
            for d in date_array
        ]).astype(np.int)
        # Get Year/Month for each month
        ym_month_array = np.unique(ym_daily_array)

        # Calculate mean monthly temperature difference for each day
        delta_t_array = np.zeros(tmean_array.shape)
        # Only use "historical" temperatures
        year_mask = ((year_array >= delta_t_year_range[0]) &
                     (year_array <= delta_t_year_range[1]))
        for gcm_i, gcm_name in enumerate(gcm_name_list):
            # Only calculate mean for days with Tmax > Tmin
            gcm_mask = (year_mask & tmax_tmin_mask[:, gcm_i])
            mean_array = ndimage.mean(tmax_tmin_array[:, gcm_i][gcm_mask],
                                      labels=month_array[gcm_mask],
                                      index=range(1, 13))
            for month_i, month in enumerate(range(1, 13)):
                month_mask = (month_array == month)
                delta_t_array[:, gcm_i][month_mask] = mean_array[month_i]
                del month_mask
            del mean_array, gcm_mask

        # Map KO from mean monthlies to individual dailies
        ko_array = np.zeros(tmin_array.shape)
        for month_i, month in enumerate(range(1, 13)):
            ko_array[month_array == month] = ko_month_array[month_i]

        # Calculate Tdew
        tdew_array = (tmin_array - ko_array)
        del ko_array, tmin_array

        # Calculate vapor pressure
        ea_array = calc_vapor_pressure(tdew_array)

        # Calculate pressure
        pres_flt = float(calc_pressure(elev_flt))

        # Calculate Thornton-Running Solar
        rso_array, rs_array = calc_tr_rso(lat_flt, pres_flt, ea_array,
                                          doy_array[:, np.newaxis],
                                          delta_t_array, tmax_tmin_array,
                                          tr_b0_flt, tr_b1_flt, tr_b2_flt)
        del ea_array, pres_flt, delta_t_array, tmax_tmin_array
        del doy_array

        # Scale Rso values by monthly factor
        if output_res in rs_month_factor_dict.keys():
            for month_i, month in enumerate(range(1, 13)):
                logging.info('    Scaling Monthly Rs: {} {}'.format(
                    month, rs_month_factor_dict[output_res][month_i]))
                rs_array[month_array ==
                         month] /= rs_month_factor_dict[output_res][month_i]

        # Make copies of output arrays for unit conversions
        tdew_daily_array = np.copy(tdew_array)
        rso_daily_array = np.copy(rso_array)
        rs_daily_array = np.copy(rs_array)

        # Convert output daily dew point temperature from celsius
        if output_daily_temp == 'F':
            tdew_daily_array = celsius_to_fahrenheit(tdew_daily_array)
        elif output_daily_temp == 'C':
            pass
        else:
            continue

        # Convert daily solar from MJ/m2/d to w/m2
        rso_daily_array *= 11.574
        rs_daily_array *= 11.574

        # Save daily values
        # Append date_array at beginning
        if not os.path.isfile(tdew_csv_path):
            np.savetxt(tdew_csv_path,
                       np.hstack((date_array[:,
                                             np.newaxis], tdew_daily_array)),
                       delimiter=',',
                       header=header_str,
                       fmt=header_fmt_str,
                       comments='')
        if not os.path.isfile(rso_csv_path):
            np.savetxt(rso_csv_path,
                       np.hstack((date_array[:, np.newaxis], rso_daily_array)),
                       delimiter=',',
                       header=header_str,
                       fmt=header_fmt_str,
                       comments='')
        if not os.path.isfile(rs_csv_path):
            np.savetxt(rs_csv_path,
                       np.hstack((date_array[:, np.newaxis], rs_daily_array)),
                       delimiter=',',
                       header=header_str,
                       fmt=header_fmt_str,
                       comments='')
        del tdew_daily_array, rso_daily_array, rs_daily_array,
        del date_array, header_fmt_str

        # Calculat fixed monthly values
        year_array = np.array([int(ym.split('_')[0])
                               for ym in ym_month_array]).astype(np.int)
        month_array = np.array(
            [int(ym.split('_')[1]) for ym in ym_month_array]).astype(np.int)
        start_array = np.ones(ym_month_array.size).astype(np.int)
        length_array = np.array([
            calendar.monthrange(*map(int, ym.split('_')))[1]
            # calendar.monthrange(int(ym.split('_')[0]), int(ym.split('_')[1]))[1]
            for ym in ym_month_array
        ]).astype(np.int)

        # Calculate variable monthly values (Rso, Rs, Tdew, Tmean)
        tmean_month_array = np.zeros(
            (ym_month_array.size, tmean_array.shape[1]))
        tdew_month_array = np.zeros(
            (ym_month_array.size, tmean_array.shape[1]))
        rso_month_array = np.zeros((ym_month_array.size, tmean_array.shape[1]))
        rs_month_array = np.zeros((ym_month_array.size, tmean_array.shape[1]))
        prcp_month_array = np.zeros((ym_month_array.size, prcp_array.shape[1]))
        for gcm_i, gcm_name in enumerate(gcm_name_list):
            tmean_month_array[:, gcm_i] = ndimage.mean(
                tmean_array[:, gcm_i][tmax_tmin_mask[:, gcm_i]],
                labels=ym_daily_array[tmax_tmin_mask[:, gcm_i]],
                index=ym_month_array)
            tdew_month_array[:, gcm_i] = ndimage.mean(
                tdew_array[:, gcm_i][tmax_tmin_mask[:, gcm_i]],
                labels=ym_daily_array[tmax_tmin_mask[:, gcm_i]],
                index=ym_month_array)
            rso_month_array[:, gcm_i] = ndimage.mean(
                rso_array[:, gcm_i][tmax_tmin_mask[:, gcm_i]],
                labels=ym_daily_array[tmax_tmin_mask[:, gcm_i]],
                index=ym_month_array)
            rs_month_array[:, gcm_i] = ndimage.mean(
                rs_array[:, gcm_i][tmax_tmin_mask[:, gcm_i]],
                labels=ym_daily_array[tmax_tmin_mask[:, gcm_i]],
                index=ym_month_array)
            prcp_month_array[:, gcm_i] = ndimage.sum(
                prcp_array[:, gcm_i][tmax_tmin_mask[:, gcm_i]],
                labels=ym_daily_array[tmax_tmin_mask[:, gcm_i]],
                index=ym_month_array)
        del tmean_array, tdew_array, rso_array, rs_array, prcp_array
        del ym_month_array, ym_daily_array

        # Convert output daily dew point temperature from celsius
        if wrevap_temp == 'F':
            tdew_month_array = celsius_to_fahrenheit(tdew_month_array)
            tmean_month_array = celsius_to_fahrenheit(tmean_month_array)
        elif wrevap_temp == 'C':
            pass
        else:
            continue

        # Scale PPT values by monthly factor
        if output_res in ppt_month_factor_dict.keys():
            for month_i, month in enumerate(range(1, 13)):
                logging.info('    Scaling Monthly PPT: {} {}'.format(
                    month, ppt_month_factor_dict[output_res][month_i]))
                prcp_month_array[
                    month_array ==
                    month] *= ppt_month_factor_dict[output_res][month_i]

        # Save monthly data for each GCM to a separate data file
        wrevap_header_str = 'YEAR,MONTH,STARTDAY,LENGTH,TD,T,S,PPT'
        wrevap_fmt_str = '%d,%d,%d,%d,%f,%f,%f,%f'
        for gcm_i, gcm_name in enumerate(gcm_name_list):
            output_csv_name = '{}_{}.csv'.format(output_res,
                                                 gcm_name.replace('.', '_'))
            output_csv_path = os.path.join(res_data_ws, output_csv_name)
            if overwrite_flag and os.path.isfile(output_csv_path):
                os.remove(output_csv_path)
            if not os.path.isfile(output_csv_path):
                np.savetxt(output_csv_path,
                           np.vstack((year_array, month_array, start_array,
                                      length_array, tdew_month_array[:, gcm_i],
                                      tmean_month_array[:, gcm_i],
                                      rs_month_array[:, gcm_i],
                                      prcp_month_array[:, gcm_i])).T,
                           delimiter=',',
                           header=wrevap_header_str,
                           comments='',
                           fmt=wrevap_fmt_str)
        del year_array, month_array, start_array, length_array
        del tdew_month_array, tmean_month_array
        del rs_month_array, rso_month_array, prcp_month_array

        # Build a Python WREVAP_gcm input file
        output_ini_name = '{}.ini'.format(output_res.lower())
        output_ini_path = os.path.join(res_output_ws, output_ini_name)
        if os.path.isfile(output_ini_path):
            os.remove(output_ini_path)
        default_ini_f = open(default_ini_path, 'r')
        output_ini_f = open(output_ini_path, 'w')
        for default_line in default_ini_f:
            if default_line.strip().startswith('SITE = '):
                default_line = 'SITE = {}\n'.format(output_res.upper().replace(
                    ' ', '_'))
            elif default_line.strip().startswith('PHID = '):
                default_line = 'PHID = {}\n'.format(lat_flt)
            elif default_line.strip().startswith('P = '):
                default_line = 'P = {}\n'.format(elev_flt)
            elif default_line.strip().startswith('DA = '):
                default_line = 'DA = {}\n'.format(depth_flt)
            elif default_line.strip().startswith('SALT = '):
                default_line = 'SALT = {}\n'.format(salin_flt)
            elif default_line.strip().startswith('LK = '):
                default_line = 'LK = 2\n'
            elif default_line.strip().startswith('IT = '):
                if wrevap_temp == 'C':
                    default_line = 'IT = 0\n'
                elif wrevap_temp == 'F':
                    default_line = 'IT = 1\n'
            elif default_line.strip().startswith('IS = '):
                default_line = 'IS = 3\n'
            elif default_line.strip().startswith('IV = '):
                default_line = 'IV = 0\n'
            elif default_line.strip().startswith('IP = '):
                default_line = 'IP = 1\n'
            output_ini_f.write(default_line)
        default_ini_f.close()
        output_ini_f.close()

        # Cleanup
        del header_str, gcm_name_list
        del depth_flt, salin_flt, elev_flt, lat_flt
    del input_res_list, output_res_list
Example #47
0
    qbinsc = np.copy(qbins)
    qbinsc[1:] += qstep / 2.
    #create an array labeling each voxel according to which qbin it belongs
    qbin_labels = np.searchsorted(qbins, qr, "right")
    qbin_labels -= 1

    #assume rho is given as electron density, not electron count
    #convert from density to electron count for FFT calculation
    rho *= dV

    #create list of qbin indices just in region of data for later F scaling
    qbin_args = np.copy(qbinsc)
    F = np.fft.fftn(rho)
    I3D = np.abs(F)**2
    Imean = ndimage.mean(I3D,
                         labels=qbin_labels,
                         index=np.arange(0,
                                         qbin_labels.max() + 1))

    if args.plot:
        plt.plot(qbinsc, Imean, label='Default dq = %.4f' % (2 * np.pi / side))
    print('Default dq = %.4f' % (2 * np.pi / side))

    if args.dq is not None or args.n is not None:

        #padded to get desired dq value (or near it)
        if args.n is not None:
            n = args.n
        else:
            current_dq = 2 * np.pi / side
            desired_dq = args.dq
            if desired_dq > current_dq:
Example #48
0
    def _calculateArrayId(self):
        """
        Calculates statistics (see methods calculate) when ids are array, even
        if ids have one element only.
        """

        # make sure the result data structures are ok
        self._prepareArrays(arrays=('mean', 'std', 'min', 'max', 'minPos',
                                    'maxPos'),
                            widths=(1, 1, 1, 1, self.ndim, self.ndim),
                            dtypes=(float, float, float, float, int, int))

        # do calculations for each segment separately
        self.mean[self._ids] = ndimage.mean(input=self.data,
                                            labels=self.labels,
                                            index=self._ids)
        self.std[self._ids] = ndimage.standard_deviation(input=self.data,
                                                         labels=self.labels,
                                                         index=self._ids)
        extr = ndimage.extrema(input=self.data,
                               labels=self.labels,
                               index=self._ids)

        # figure out if 0 is the only id
        zero_id = False
        if isinstance(self._ids, (numpy.ndarray, list)):
            zero_id = (self._ids[0] == 0)
        else:
            zero_id = (self._ids == 0)

        # do calculations for all segments together
        if not zero_id:
            from .segment import Segment
            seg = Segment()
            locLabels = seg.keep(ids=self._ids, data=self.labels.copy())
            self.mean[0] = ndimage.mean(input=self.data,
                                        labels=locLabels,
                                        index=None)
            self.std[0] = ndimage.standard_deviation(input=self.data,
                                                     labels=locLabels,
                                                     index=None)
            extr0 = ndimage.extrema(input=self.data,
                                    labels=locLabels,
                                    index=None)

        # put extrema for individual labels in data arrays
        (self.min[self._ids], self.max[self._ids]) = (extr[0], extr[1])
        #self.max[self._ids] = -numpy.array(extr_bug[0])
        if (self._ids.size == 1) and not isinstance(extr[2], list):
            self.minPos[self._ids] = [extr[2]]
            self.maxPos[self._ids] = [extr[3]]
            #self.maxPos[self._ids] = [extr_bug[2]]
        else:
            self.minPos[self._ids] = extr[2]
            self.maxPos[self._ids] = extr[3]
            #self.maxPos[self._ids] = extr_bug[3]

        # put total extrema in data arrays at index 0
        if not zero_id:
            (self.min[0], self.max[0]) = (extr0[0], extr0[1])
            #self.max[0] = -extr0_bug[0]
            if self.ndim == 1:
                self.minPos[0] = extr0[2][0]
                self.maxPos[0] = extr0[3][0]
                #self.maxPos[0] = extr0_bug[2][0]
            else:
                self.minPos[0] = extr0[2]
                self.maxPos[0] = extr0[3]
def freqListStat(freqlist):
    freqnumpy = numpy.asarray(freqlist, dtype=numpy.int32)
    print "min=", ndimage.minimum(freqnumpy)
    print "max=", ndimage.maximum(freqnumpy)
    print "mean=", ndimage.mean(freqnumpy)
    print "stdev=", ndimage.standard_deviation(freqnumpy)
Example #50
0
def groupsstats_1d(y, x, labelsunique):
    '''use ndimage to get fast mean and variance'''
    labelmeans = np.array(ndimage.mean(x, labels=y, index=labelsunique))
    labelvars = np.array(ndimage.var(x, labels=y, index=labelsunique))
    return labelmeans, labelvars
Example #51
0
center = np.unravel_index(np.argmax(cutout), cutout.shape)
cy, cx = center

dy = (Y - cy)
dx = (X - cx)
costh = np.cos(beams_table[plot_channel][2] * (np.pi / 180))
sinth = np.sin(beams_table[plot_channel][2] * (np.pi / 180))
rmajmin = beams_table[plot_channel][1] / beams_table[plot_channel][0]

rr = ((dx * costh + dy * sinth)**2 / rmajmin**2 +
      (dx * sinth - dy * costh)**2 / 1**2)**0.5
rbin = (rr).astype(int)

#radial_mean = ndimage.mean(cutout**2, labels=rbin, index=np.arange(max_npix_peak))
radial_mean = ndimage.mean(np.abs(cutout),
                           labels=rbin,
                           index=np.arange(max_npix_peak))
first_min_ind = signal.find_peaks(-radial_mean)[0][0]

#cutout_posit = np.where(cutout > 0, cutout, 0.)
#radial_sum = ndimage.sum(cutout_posit, labels=rbin, index=np.arange(first_min_ind))
radial_sum = ndimage.sum(cutout, labels=rbin, index=np.arange(first_min_ind))
psf_sum = np.sum(radial_sum)

#Z = np.full(shape, 0)

bmaj_npix = beams_table[plot_channel][0] / (header['CDELT2'] * 3600.)
bmin_npix = beams_table[plot_channel][1] / (header['CDELT2'] * 3600.)
clean_beam_sum = (np.pi / (4 * np.log(2))) * bmaj_npix * bmin_npix

scale_factor = clean_beam_sum / psf_sum
lv_mask = np.where(labels == lv_val, 1, 0)

# Find bounding box of left ventricle
bboxes = ndi.find_objects(lv_mask)
print('Number of objects:', len(bboxes))
print('Indices for first box:', bboxes[0])

## MEASURING INTENSITY

# after segmentation of objects their properties can be identified using scipy tools
# mean, mean standard deviation  and labeled_comprehension
vol = imageio.volread("SCD-3d.npz")
label = imageio.volread("labels.npz")

# all pixel
ndi.mean(vol)

# by doing it with label, you'll restrict the analysis to non zero pixels
ndi.mean(vol, label)

# if provided the index value with label, you can get mean intensity for a single
# label or multiple labels
ndi.mean(vol, label, index=1)
ndi.mean(vol, label, index=[1, 2])

# Object Histograms can also use labels to be specific about an object in an image
hist = ndi.histogram(vol, min=0, max=255, bins=256)  # for whole image
obj_hist = ndi.histogram(vol, 0, 255, 256, labels, index=[1, 2])

## these histograms are very informative at object segmentations
# if it has alot of peaks and variations, it means the object has vrious tissue types
Example #53
0
    else:
        # get the parcel-level affiliations if we have parcellated data
        data = data.assign(networks=getattr(networks, 'parcels'))
        # when working with parcellated data, our spins were NOT generated with
        # the medial wall / corpuscallosum included, so we should drop these
        # parcels (which should [ideally] have values of ~=0)
        todrop = np.array(putils.DROP)[np.isin(putils.DROP, data.index)]
        if len(todrop) > 0:
            data = data.drop(todrop, axis=0)

    return data


if __name__ == "__main__":
    data = load_data('yeo', PARC, SCALE)
    netmeans = ndimage.mean(data['myelin'], data['networks'],
                            np.unique(data['networks']))

    fig, axes = plt.subplots(len(NETWORKS),
                             len(METHODS),
                             sharex=True,
                             sharey=True,
                             figsize=(25, 10))
    for row, network in enumerate(NETWORKS):
        for col, spatnull in enumerate(METHODS):
            idx = YEO_CODES[network] - 1
            ax = axes[row, col]
            fn = (HCPDIR / PARC / 'nulls' / 'yeo' / spatnull / THRESH /
                  f'{SCALE}_nulls.csv')
            null = np.loadtxt(fn, delimiter=',')[:, idx]
            sns.kdeplot(null, fill=True, alpha=0.4, color=SPATHUES[col], ax=ax)
            sns.despine(ax=ax)
    def pixelPeak(self, newimage=None, guess=None, limit=None):
        """
		guess = where to center your search for the peak (row,col)
		limit = shape of the search box (with guess at the center)
		Setting guess and limit can serve two purposes:
			1) You can imit your peak search if you are pretty sure
				where it will be
			2) Given that the image may wrap around into negative
				space, you can specify that you want to search for the peak
				in these out of bounds areas.  For instance, a (512,512)
				image may have a peak at (500,500).  You may specify a guess
				of (-10,-10) and a relatively small limit box.
				The (500,500) peak will be found, but it will be returned
				as (-12,-12).
		"""
        if newimage is not None:
            self.setImage(newimage)

        if self.results['pixel peak'] is None:

            if None not in (guess, limit):
                cropcenter = limit[0] / 2.0 - 0.5, limit[1] / 2.0 - 0.5
                im = imagefun.crop_at(self.image, guess, limit)
            else:
                cropcenter = None
                im = self.image

            peak = numpy.argmax(im.ravel())
            peakvalue = im.flat[peak]
            rows, cols = im.shape
            peakrow = peak / cols
            peakcol = peak % cols

            if cropcenter is not None:
                peakrow = int(round(guess[0] + peakrow - cropcenter[0]))
                peakcol = int(round(guess[1] + peakcol - cropcenter[1]))

            pixelpeak = (peakrow, peakcol)
            self.results['pixel peak'] = pixelpeak
            self.results['pixel peak value'] = peakvalue
            if peakrow < 0:
                unsignedr = peakrow + self.image.shape[0]
            else:
                unsignedr = peakrow
            if peakcol < 0:
                unsignedc = peakcol + self.image.shape[0]
            else:
                unsignedc = peakcol
            self.results['unsigned pixel peak'] = unsignedr, unsignedc

            #NEIL's SNR calculation
            self.results['noise'] = nd_image.standard_deviation(im)
            self.results['mean'] = nd_image.mean(im)
            self.results['signal'] = self.results[
                'pixel peak value'] - self.results['mean']
            if self.results['noise'] == self.results[
                    'noise'] and self.results['noise'] != 0.0:
                self.results[
                    'snr'] = self.results['signal'] / self.results['noise']
            else:
                self.results['snr'] = self.results['pixel peak value']
            #print self.results['noise'],self.results['mean'],self.results['signal'],self.results['snr']

        return self.results['pixel peak']
Example #55
0
    def run_on_objects(self, object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)

        if len(objects.shape) == 2:
            #
            # Do the ellipse-related measurements
            #
            i, j, l = objects.ijv.transpose()
            centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness = \
                ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
            del i
            del j
            del l
            self.record_measurement(workspace, object_name, F_ECCENTRICITY,
                                    eccentricity)
            self.record_measurement(workspace, object_name,
                                    F_MAJOR_AXIS_LENGTH, major_axis_length)
            self.record_measurement(workspace, object_name,
                                    F_MINOR_AXIS_LENGTH, minor_axis_length)
            self.record_measurement(workspace, object_name, F_ORIENTATION,
                                    theta * 180 / np.pi)
            self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                    compactness)
            is_first = False
            if len(objects.indices) == 0:
                nobjects = 0
            else:
                nobjects = np.max(objects.indices)
            mcenter_x = np.zeros(nobjects)
            mcenter_y = np.zeros(nobjects)
            mextent = np.zeros(nobjects)
            mperimeters = np.zeros(nobjects)
            msolidity = np.zeros(nobjects)
            euler = np.zeros(nobjects)
            max_radius = np.zeros(nobjects)
            median_radius = np.zeros(nobjects)
            mean_radius = np.zeros(nobjects)
            min_feret_diameter = np.zeros(nobjects)
            max_feret_diameter = np.zeros(nobjects)
            zernike_numbers = self.get_zernike_numbers()
            zf = {}
            for n, m in zernike_numbers:
                zf[(n, m)] = np.zeros(nobjects)
            if nobjects > 0:
                chulls, chull_counts = convex_hull_ijv(objects.ijv,
                                                       objects.indices)
                for labels, indices in objects.get_labels():
                    to_indices = indices - 1
                    distances = distance_to_edge(labels)
                    mcenter_y[to_indices], mcenter_x[to_indices] = \
                        maximum_position_of_labels(distances, labels, indices)
                    max_radius[to_indices] = fix(
                        scind.maximum(distances, labels, indices))
                    mean_radius[to_indices] = fix(
                        scind.mean(distances, labels, indices))
                    median_radius[to_indices] = median_of_labels(
                        distances, labels, indices)
                    #
                    # The extent (area / bounding box area)
                    #
                    mextent[to_indices] = calculate_extents(labels, indices)
                    #
                    # The perimeter distance
                    #
                    mperimeters[to_indices] = calculate_perimeters(
                        labels, indices)
                    #
                    # Solidity
                    #
                    msolidity[to_indices] = calculate_solidity(labels, indices)
                    #
                    # Euler number
                    #
                    euler[to_indices] = euler_number(labels, indices)
                    #
                    # Zernike features
                    #
                    if self.calculate_zernikes.value:
                        zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                        for (n, m), z in zip(zernike_numbers,
                                             zf_l.transpose()):
                            zf[(n, m)][to_indices] = z
                #
                # Form factor
                #
                ff = 4.0 * np.pi * objects.areas / mperimeters**2
                #
                # Feret diameter
                #
                min_feret_diameter, max_feret_diameter = \
                    feret_diameter(chulls, chull_counts, objects.indices)

            else:
                ff = np.zeros(0)

            for f, m in ([(F_AREA, objects.areas), (F_CENTER_X, mcenter_x),
                          (F_CENTER_Y, mcenter_y),
                          (F_CENTER_Z, np.ones_like(mcenter_x)),
                          (F_EXTENT, mextent), (F_PERIMETER, mperimeters),
                          (F_SOLIDITY, msolidity), (F_FORM_FACTOR, ff),
                          (F_EULER_NUMBER, euler),
                          (F_MAXIMUM_RADIUS, max_radius),
                          (F_MEAN_RADIUS, mean_radius),
                          (F_MEDIAN_RADIUS, median_radius),
                          (F_MIN_FERET_DIAMETER, min_feret_diameter),
                          (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                         [(self.get_zernike_name((n, m)), zf[(n, m)])
                          for n, m in zernike_numbers]):
                self.record_measurement(workspace, object_name, f, m)
        else:
            labels = objects.segmented

            props = skimage.measure.regionprops(labels)

            # Area
            areas = [prop.area for prop in props]

            self.record_measurement(workspace, object_name, F_AREA, areas)

            # Extent
            extents = [prop.extent for prop in props]

            self.record_measurement(workspace, object_name, F_EXTENT, extents)

            # Centers of mass
            centers = objects.center_of_mass()

            center_z, center_x, center_y = centers.transpose()

            self.record_measurement(workspace, object_name, F_CENTER_X,
                                    center_x)

            self.record_measurement(workspace, object_name, F_CENTER_Y,
                                    center_y)

            self.record_measurement(workspace, object_name, F_CENTER_Z,
                                    center_z)

            # Perimeters
            perimeters = []

            for label in np.unique(labels):
                if label == 0:
                    continue

                volume = np.zeros_like(labels, dtype='bool')

                volume[labels == label] = True

                verts, faces, _, _ = skimage.measure.marching_cubes(
                    volume,
                    spacing=objects.parent_image.spacing
                    if objects.has_parent_image else (1.0, ) * labels.ndim,
                    level=0)

                perimeters += [skimage.measure.mesh_surface_area(verts, faces)]

            if len(perimeters) == 0:
                self.record_measurement(workspace, object_name, F_PERIMETER,
                                        [0])
            else:
                self.record_measurement(workspace, object_name, F_PERIMETER,
                                        perimeters)

            for feature in self.get_feature_names():
                if feature in [
                        F_AREA, F_EXTENT, F_CENTER_X, F_CENTER_Y, F_CENTER_Z,
                        F_PERIMETER
                ]:
                    continue

                self.record_measurement(workspace, object_name, feature,
                                        [np.nan])
def calcNormConvMap(image, imagefft, tmplmask, oversized, pixrad):
        t1 = time.time()
        print " ... computing FindEM's norm_conv_map"

        #print " IMAGE"
        #imageinfo(image)
        #numeric_to_jpg(image,"image.jpg")
        #print " TMPLMASK"
        #imageinfo(tmplmask)
        #numeric_to_jpg(tmplmask,"tmplmask.jpg")

        if(nd_image.minimum(image) < 0.0 or nd_image.minimum(tmplmask) < 0.0):
                print " !!! WARNING image or mask is less than zero"

        tmplsize = (tmplmask.shape)[1]
        nmask = tmplmask.sum()
        tmplshape  = tmplmask.shape
        imshape  = image.shape

        shift = int(-1*tmplsize/2.0)
        #tmplmask2 = nd_image.shift(tmplmask, shift, mode='wrap', order=0)
        #tmplmask2 = tmplmask

        err = 0.000001

        #print " IMAGESQ"
        #imageinfo(image*image)

        #print " CNV2 = convolution(image**2, mask)"
        tmplmaskfft = fft.real_fft2d(tmplmask, s=oversized)
        imagesqfft = fft.real_fft2d(image*image, s=oversized)
        cnv2 = convolution_fft(imagesqfft, tmplmaskfft, oversized)
        cnv2 = cnv2 + err
        del imagesqfft
        #SHIFTING CAN BE SLOW
        #cnv2 = nd_image.shift(cnv2, shift, mode='wrap', order=0)
        #imageinfo(cnv2)
        #print cnv2[499,499],cnv2[500,500],cnv2[501,501]
        #numeric_to_jpg(cnv2,"cnv2.jpg")

        #print " CNV1 = convolution(image, mask)"
        cnv1 = convolution_fft(imagefft, tmplmaskfft, oversized)
        cnv1 = cnv1 + err
        del tmplmaskfft
        #SHIFTING CAN BE SLOW
        cnv1 = nd_image.shift(cnv1, shift, mode='wrap', order=0)
        #imageinfo(cnv1)
        #print cnv1[499,499],cnv1[500,500],cnv1[501,501]
        #numeric_to_jpg(cnv1*cnv1,"cnv1.jpg")

        #print " V2 = ((nm*cnv2)-(cnv1*cnv1))/(nm*nm)"
        a1 = nmask*cnv2
        a1 = a1[ tmplshape[0]/2-1:imshape[0]+tmplshape[0]/2-1, tmplshape[1]/2-1:imshape[1]+tmplshape[1]/2-1 ]
        #imageinfo(a1)
        #print a1[499,499],a1[500,500],a1[501,501]
        b1 = cnv1*cnv1
        b1 = b1[ tmplshape[0]/2-1:imshape[0]+tmplshape[0]/2-1, tmplshape[1]/2-1:imshape[1]+tmplshape[1]/2-1 ]
        del cnv2
        del cnv1
        #imageinfo(b1)
        #print b1[499,499],b1[500,500],b1[501,501]

        #print (a1[500,500]-b1[500,500])
        #print nmask**2

        #cross = cross_correlate(a1,b1)
        #print numarray.argmax(numarray.ravel(cross))
        #cross = normRange(cross)
        #cross = numarray.where(cross > 0.8,cross,0.7)
        #cross = nd_image.shift(cross, (cross.shape)[0]/2, mode='wrap', order=0)
        #numeric_to_jpg(cross,"cross.jpg")
        #phase = phase_correlate(a1[128:896,128:896],b1[128:896,128:896])
        #print numarray.argmax(numarray.ravel(phase))
        #phase = normRange(phase)
        #phase = numarray.where(phase > 0.7,phase,0.6)
        #phase = nd_image.shift(phase, (phase.shape)[0]/2, mode='wrap', order=0)
        #numeric_to_jpg(phase,"phase.jpg")

        v2= (a1 - b1)
        v2 = v2/(nmask**2)

        #REMOVE OUTSIDE AREA
        cshape = v2.shape
        white1 = 0.01
        v2[ 0:pixrad*2, 0:cshape[1] ] = white1
        v2[ 0:cshape[0], 0:pixrad*2 ] = white1
        v2[ cshape[0]-pixrad*2:cshape[0], 0:cshape[1] ] = white1
        v2[ 0:cshape[0], cshape[1]-pixrad*2:cshape[1] ] = white1

        xn = (v2.shape)[0]/2
        #IMPORTANT TO CHECK FOR ERROR
        if(v2[xn-1,xn-1] > 1.0 or v2[xn,xn] > 1.0 or v2[xn+1,xn+1] > 1.0 \
                or nd_image.mean(v2[xn/2:3*xn/2,xn/2:3*xn/2]) > 1.0):
                print " !!! MAJOR ERROR IN NORMALIZATION CALCUATION (values > 1)"
                imageinfo(v2)
                print " ... VALUES: ",v2[xn-1,xn-1],v2[xn,xn],v2[xn+1,xn+1],nd_image.mean(v2)
                numeric_to_jpg(a1,"a1.jpg")
                numeric_to_jpg(b1,"b1.jpg")
                numeric_to_jpg(b1,"v2.jpg")
                sys.exit(1)
        if(v2[xn-1,xn-1] < 0.0 or v2[xn,xn] < 0.0 or v2[xn+1,xn+1] < 0.0 \
                or nd_image.mean(v2[xn/2:3*xn/2,xn/2:3*xn/2]) < 0.0):
                print " !!! MAJOR ERROR IN NORMALIZATION CALCUATION (values < 0)"
                imageinfo(v2)
                print " ... VALUES: ",v2[xn-1,xn-1],v2[xn,xn],v2[xn+1,xn+1],nd_image.mean(v2)
                numeric_to_jpg(a1,"a1.jpg")
                numeric_to_jpg(b1,"b1.jpg")
                numeric_to_jpg(b1,"v2.jpg")
                sys.exit(1)
        del a1
        del b1
        #numeric_to_jpg(v2,"v2.jpg")

        #print " Normconvmap = sqrt(v2)"
        v2 = numarray.where(v2 < err, err, v2)
        normconvmap = numarray.sqrt(v2)
        #numeric_to_jpg(normconvmap,"normconvmap-zero.jpg")
        #normconvmap = numarray.where(v2 > err, numarray.sqrt(v2), 0.0)
        del v2

        #imageinfo(normconvmap)
        #print normconvmap[499,499],normconvmap[500,500],normconvmap[501,501]
        #numeric_to_jpg(normconvmap,"normconvmap-big.jpg")
        print " ... ... time %.2f sec" % float(time.time()-t1)

        #RETURN CENTER
        return normconvmap
Example #57
0
    print "Computing Gaussian sub-band1.5 image from Original image"
    Fsubband = fouriergausssubband15(ksp.shape, 0.707)
    image_filtered = simpleifft(procpar, dims, hdr, (ksp * Fsubband), args)
    # print "Saving Gaussian image"
    save_nifti(normalise(np.abs(image_filtered)), 'gauss_subband')

    # inhomogeneous correction
    image_corr = inhomogeneouscorrection(ksp, ksp.shape, 3.0 / 60.0)
    # print "Saving Correction image"
    save_nifti(np.abs(image_filtered / image_corr), 'image_inhCorr3')

    # print "Computing Laplacian enhanced image"
    laplacian = simpleifft(
        procpar, dims, hdr, (kspgauss * fourierlaplace(ksp.shape)), args)
    alpha = ndimage.mean(np.abs(image_filtered)) / \
        ndimage.mean(np.abs(laplacian))
    kspgauss = kspacegaussian_filter2(ksp, 1.707)
    image_filtered = simpleifft(procpar, dims, hdr, kspgauss, args)
    image_filtered = (np.abs(image_filtered))
    image_filtered = normalise(image_filtered)
    image_lfiltered = image_filtered - 0.5 * alpha * laplacian
    print '''Saving enhanced image g(x, y, z) = f(x, y, z) -
    Laplacian[f(x, y, z)]'''
    save_nifti(np.abs(image_lfiltered), 'laplacian_enhanced')

    # print "Computing Laplace of Gaussian smoothed image"
    Flaplace = fourierlaplace(ksp.shape)
    # Flaplace = (Flaplace - ndimage.minimum(Flaplace)) /
    #  (ndimage.maximum(Flaplace)
    #  - ndimage.minimum(Flaplace))
Example #58
0
def video_roi_extractor_faster(video_file=None):
    #DISCOVER VIDEO INFORMATION
    if video_file is None:
        video_find = re.compile('^video.*nd2$')
        video_file = list(filter(video_find.match, os.listdir()))[0]
    roi_file = glob.glob('roi.1024*')

    #DISCOVER ROI INFORMATION
    rois = imread(roi_file)
    rois_unique = np.setdiff1d(np.unique(rois), [0])
    nregions = len(rois_unique)
    print("You have ", nregions, "rois to process")

    #INITIALIZE EMPTY FRAMES
    ImageNumber = []
    ObjectNumber = []
    f2_340 = []
    f2_380 = []

    #LOAD AND PROCESS VIDEO
    images = ND2Reader(video_file)
    nframes = images.sizes['t']
    print("You have ", nframes, " frames to process")
    start_time = time.time()
    #EXTRACT ROI INFORMATION FROM EACH VIDEO FRAMES
    for frame_index in range(nframes):
        if frame_index % 100 == 0:
            print(frame_index, "/", nframes, "in",
                  round(time.time() - start_time, 0), " s")
        #KEEP TRACK OF IMAGE NUMBER
        ImageNumber_value = np.repeat(frame_index + 1, nregions)
        ImageNumber.append(ImageNumber_value)
        #KEEP TRACK OF OBJECT NUMBER
        ObjectNumber_value = np.arange(0, nregions) + 1
        ObjectNumber.append(ObjectNumber_value)
        #EXTRACT 340 INFO
        frame_340 = images.get_frame_2D(t=frame_index, c=0)
        f2_340_val = ndimage.mean(frame_340, rois, rois_unique)
        f2_340.append(f2_340_val)
        #EXTRACT 380 INFO
        frame_380 = images.get_frame_2D(t=frame_index, c=1)
        f2_380_val = ndimage.mean(frame_380, rois, rois_unique)
        f2_380.append(f2_380_val)
    images.close()

    print("Reading: %s seconds ---" % (time.time() - start_time))

    #CONCATENATE THE LISTS MADE DURING THE LOOP TOGETHER
    start_time = time.time()
    ImageNumber = np.concatenate(ImageNumber)
    ObjectNumber = np.concatenate(ObjectNumber)
    f2_340 = np.concatenate(f2_340)
    f2_380 = np.concatenate(f2_380)
    print("concatentation: %s seconds ---" % (time.time() - start_time))

    #CREATE DATAFRAME TO EXPORT
    total_data = np.column_stack([ImageNumber, ObjectNumber, f2_340, f2_380])
    df_data = pd.DataFrame(total_data)
    df_data.columns = [
        'ImageNumber', 'ObjectNumber', 'Intensity_MeanIntensity_f2_340',
        'Intensity_MeanIntensity_f2_380'
    ]
    df_data.to_csv('video_data.txt', header=True, index=False, sep='\t')
    print("Write complete")
Example #59
0
def test_mean02():
    labels = np.array([1, 0], bool)
    input = np.array([[1, 2], [3, 4]], bool)
    output = ndimage.mean(input, labels=labels)
    assert_almost_equal(output, 1.0)
Example #60
0
    def run(self, ips, imgs, para=None):
        inten = self.app.get_img(para['inten'])
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else:
            msks = ips.imgs
            imgs = inten.imgs
            if len(msks) == 1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint32)
        strc = ndimage.generate_binary_structure(
            2, 1 if para['con'] == '4-connect' else 2)
        idct = ['Max', 'Min', 'Mean', 'Variance', 'Standard', 'Sum']
        key = {
            'Max': 'max',
            'Min': 'min',
            'Mean': 'mean',
            'Variance': 'var',
            'Standard': 'std',
            'Sum': 'sum'
        }
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:]
        if para['center']: titles.extend(['Center-X', 'Center-Y'])
        if para['extent']: titles.extend(['Min-Y', 'Min-X', 'Max-Y', 'Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [], {'type': 'layers', 'body': {}}
        # data,mark=[],[]
        for i in range(len(imgs)):
            if para['labeled']:
                n, buf[:] = msks[i].max(), msks[i]
            else:
                n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n + 1)
            dt = []
            if para['slice']: dt.append([i] * n)
            dt.append(range(n))

            xy = ndimage.center_of_mass(buf, buf, index)
            xy = np.array(xy).round(2).T
            if para['center']: dt.extend([xy[1] * k, xy[0] * k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [(i[1].start, i[0].start, i[1].stop - i[1].start,
                         i[0].stop - i[0].start) for i in boxs]
                for j in (0, 1, 2, 3):
                    dt.append([i[j] * k for i in boxs])
            if para['max']:
                dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:
                dt.append(ndimage.minimum(imgs[i], buf, index).round(2))
            if para['mean']:
                dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:
                dt.append(ndimage.variance(imgs[i], buf, index).round(2))
            if para['std']:
                dt.append(
                    ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:
                dt.append(ndimage.sum(imgs[i], buf, index).round(2))

            layer = {'type': 'layer', 'body': []}
            xy = np.int0(xy).T

            texts = [(i[1], i[0]) + ('id=%d' % n, )
                     for i, n in zip(xy, range(len(xy)))]
            layer['body'].append({'type': 'texts', 'body': texts})
            if para['extent']:
                layer['body'].append({'type': 'rectangles', 'body': boxs})
            mark['body'][i] = layer

            data.extend(list(zip(*dt)))
        self.app.show_table(pd.DataFrame(data, columns=titles),
                            inten.title + '-pixels')
        inten.mark = mark2shp(mark)
        inten.update()