def calculateParticleStackStats(self, imgstackfile, boxedpartdatas):
		### read mean and stdev
		partmeantree = []
		t0 = time.time()
		imagicdata = apImagicFile.readImagic(imgstackfile)
		apDisplay.printMsg("gathering mean and stdev data")
		### loop over the particles and read data
		for i in range(len(boxedpartdatas)):
			partdata = boxedpartdatas[i]
			partarray = imagicdata['images'][i]

			### if particle stdev == 0, then it is all constant, i.e., a bad particle
			stdev = float(partarray.std())
			if stdev < 1.0e-6:
				apDisplay.printError("Standard deviation == 0 for particle %d in image %s"%(i,self.shortname))

			### skew and kurtosis
			partravel = numpy.ravel(partarray)
			skew = float(stats.skew(partravel))
			kurtosis = float(stats.kurtosis(partravel))

			### edge and center stats
			edgemean = float(ndimage.mean(partarray, self.edgemap, 1.0))
			edgestdev = float(ndimage.standard_deviation(partarray, self.edgemap, 1.0))
			centermean = float(ndimage.mean(partarray, self.edgemap, 0.0))
			centerstdev = float(ndimage.standard_deviation(partarray, self.edgemap, 0.0))
			self.summedParticles += partarray

			### take abs of all means, because ctf whole image may become negative
			partmeandict = {
				'partdata': partdata,
				'mean': abs(float(partarray.mean())),
				'stdev': stdev,
				'min': float(partarray.min()),
				'max': float(partarray.max()),
				'skew': skew,
				'kurtosis': kurtosis,
				'edgemean': abs(edgemean),
				'edgestdev': edgestdev,
				'centermean': abs(centermean),
				'centerstdev': centerstdev,
			}
			### show stats for first particle
			"""
			if i == 0:
				keys = partmeandict.keys()
				keys.sort()
				mystr = "PART STATS: "
				for key in keys:
					if isinstance(partmeandict[key], float):
						mystr += "%s=%.3f :: "%(key, partmeandict[key])
				print mystr
			"""
			partmeantree.append(partmeandict)
		self.meanreadtimes.append(time.time()-t0)
		return partmeantree
def getRealLabeledMeanStdev(image,labeled_image,indices,info):
        print "Getting real mean and stdev"
        mean=nd.mean(image,labels=labeled_image,index=indices)
        stdev=nd.standard_deviation(image,labels=labeled_image,index=indices)
        ll=0
        try:
                len(mean)
        except:
                mean=[mean]
                stdev=[stdev]
                try:
                        len(indices)
                except:
                        indices=[indices]
        try:
                info.keys()
        except:
                offset=1
        else:
                offset=0
        for l in indices:
                info[l-offset][1]=mean[ll]
                info[l-offset][2]=stdev[ll]
                ll += 1
        return info
Пример #3
0
def test_standard_deviation05():
    "standard deviation 5"
    labels = [2, 2, 3]
    for type in types:
        input = np.array([1, 3, 8], type)
        output = ndimage.standard_deviation(input, labels, 2)
        assert_almost_equal(output, 1.0)
def test_standard_deviation07():
    "standard deviation 7"
    labels = [1]
    for type in types:
        input = np.array([-0.00619519], type)
        output = ndimage.standard_deviation(input, labels, [1])
        assert_array_almost_equal(output, [0])
Пример #5
0
    def updatemain(self):

        if self.verbose:
            print "updating",self.layer
        if self.xview:
            # dataswappedX = np.swapaxes(self.data,0,1)
            self.arr=self.dataswappedX[self.layer]
        elif self.yview:
            # dataswappedY = np.swapaxes(self.data,0,2)
            self.arr=self.dataswappedY[self.layer]
        else:
            self.arr=self.data[self.layer]
        self.img1a.setImage(self.arr)
        if self.firsttime:
            self.firsttime = False
        else:
            if self.verbose:
                print self.rois
            if self.rois[self.layer]:
                # self.p1.removeItem(self.roi)
                # self.restorePolyLineState(self.roi, self.rois[self.layer])
                self.roi.setState(self.rois[self.layer])
                # self.p1.addItem(self.roi)
                
            self.update()
            self.label_layer.setText("layer: "+str(self.layer+1)+"/"+str(len(self.data[:,:,:,0])))
            self.label_shape.setText("shape: "+str(self.arr[:,:,0].shape))
            self.label_size.setText("size: "+str(self.arr[:,:,0].size))
            self.label_min.setText("min: "+str(self.arr[:,:,0].min()))
            self.label_max.setText("max: "+str(self.arr[:,:,0].max()))
            self.label_mean.setText("mean: "+str(self.arr[:,:,0].mean()))
            self.label_sd.setText("sd: "+str(ndimage.standard_deviation(self.arr[:,:,0])))
            self.label_sum.setText("sum: "+str(ndimage.sum(self.arr[:,:,0])))
        self.img1a.updateImage()
def normalizeImage(a):
	"""	
	Normalizes numarray to fit into an image format
	that is values between 0 and 255.
	"""
	#Minimum image value, i.e. how black the image can get
	minlevel = 0.0
	#Maximum image value, i.e. how white the image can get
	maxlevel = 235.0
	#Maximum standard deviations to include, i.e. pixel > N*stdev --> white
	devlimit=5.0
 	imrange = maxlevel - minlevel

	avg1=ndimage.mean(a)

	stdev1=ndimage.standard_deviation(a)

	min1=ndimage.minimum(a)
	if(min1 < avg1-devlimit*stdev1):
		min1 = avg1-devlimit*stdev1

	max1=ndimage.maximum(a)
	if(max1 > avg1+devlimit*stdev1):
		max1 = avg1+devlimit*stdev1

	a = (a - min1)/(max1 - min1)*imrange + minlevel
	a = numarray.where(a > maxlevel,255.0,a)
	a = numarray.where(a < minlevel,0.0,a)

	return a
def test_standard_deviation06():
    "standard deviation 6"
    labels = [2, 2, 3, 3, 4]
    for type in types:
        input = np.array([1, 3, 8, 10, 8], type)
        output = ndimage.standard_deviation(input, labels,
                                                      [2, 3, 4])
        assert_array_almost_equal(output, [1.0, 1.0, 0.0])
Пример #8
0
def find_peaks(file):
  #read image data
  f=pyfits.open(file)
  img=f[0].data

  #set NaN pixels (empty pixels) to zero
  img[img != img]=0.0
  img[img<0.]=0.0
  if dim==4:
    img=img[0,0,:,:] #gets rid of 3 and 4 dimensions, since FIRST fits files have four axis but only first two have data
    
  T=ndimage.standard_deviation(img)
  sourcelabels,num_sources=ndimage.label(img>T)
  backgroundlabels,num_background=ndimage.label(img<T)
  # define an 8-connected neighbourhood
  neighborhood = generate_binary_structure(2,2)
  fimg=img*sourcelabels
  #apply the local maximum filter; all pixel of maximal value 
  #in their neighbourhood are set to 1
  local_max=maximum_filter(fimg,footprint=neighborhood)==fimg
  #In order to isolate the peaks we must remove the background from the mask.
  #we create the mask of the background
  background=img*backgroundlabels
  #we must erode the background in order to 
  #successfully subtract it form local_max, otherwise a line will 
  #appear along the background border (artifact of the local maximum filter)
  eroded_background=binary_erosion(background,structure=neighborhood,border_value=1)
  
  #we obtain the final mask, containing only peaks, 
  #by removing the background from the local_max mask
  detected_peaks=local_max-eroded_background
  #contains some peaks not in source (background bright features), but code can remove these
  #now need to find positions of these maximum
  #label peaks
  peaklabels,num_peaks=ndimage.measurements.label(detected_peaks)
  
  #get peak positions  
  slices = ndimage.find_objects(peaklabels)
  x, y = [], []
  for dy,dx in slices:
      x_center = (dx.start + dx.stop - 1)/2
      x.append(x_center)
      y_center = (dy.start + dy.stop - 1)/2    
      y.append(y_center)
      
  peak_positions=zip(x,y)
  
  #get peak values, in Jy/beam
  peak_fluxes=[]
  for coord in peak_positions:
      peak_fluxes.append(img[coord[1],coord[0]])
  peaks=zip(peak_positions,peak_fluxes)
  
  #sort by peak_fluxes. Two brightest peaks will be the first two in the list
  peaks=sorted(peaks,key=lambda l: l[1])  
  peaks.reverse()
  return peaks,img,f
Пример #9
0
	def analyzeList(self, mylist, myrange=(0,1,1), filename=None):
		"""
		histogram2(a, bins) -- Compute histogram of a using divisions in bins

		Description:
		   Count the number of times values from array a fall into
		   numerical ranges defined by bins.  Range x is given by
		   bins[x] <= range_x < bins[x+1] where x =0,N and N is the
		   length of the bins array.  The last range is given by
		   bins[N] <= range_N < infinity.  Values less than bins[0] are
		   not included in the histogram.
		Arguments:
		   a -- 1D array.  The array of values to be divied into bins
		   bins -- 1D array.  Defines the ranges of values to use during
		         histogramming.
		Returns:
		   1D array.  Each value represents the occurences for a given
		   bin (range) of values.
		"""
		#hist,bmin,minw,err = stats.histogram(mynumpy, numbins=36)
		#print hist,bmin,minw,err,"\n"
		if len(mylist) < 2:
			apDisplay.printWarning("Did not write file not enough rows ("+str(filename)+")")
			return

		if myrange[0] is None:
			mymin = float(math.floor(ndimage.minimum(mylist)))
		else:
			mymin = float(myrange[0])
		if myrange[1] is None:
			mymax = float(math.ceil(ndimage.maximum(mylist)))
		else:
			mymax = float(myrange[1])
		mystep = float(myrange[2])

		mynumpy = numpy.asarray(mylist, dtype=numpy.float32)
		print "range=",round(ndimage.minimum(mynumpy),2)," <> ",round(ndimage.maximum(mynumpy),2)
		print " mean=",round(ndimage.mean(mynumpy),2)," +- ",round(ndimage.standard_deviation(mynumpy),2)

		#histogram
		bins = []
		mybin = mymin
		while mybin <= mymax:
			bins.append(mybin)
			mybin += mystep
		bins = numpy.asarray(bins, dtype=numpy.float32)
		apDisplay.printMsg("Creating histogram with "+str(len(bins))+" bins")
		hist = stats.histogram2(mynumpy, bins=bins)
		#print bins
		#print hist
		if filename is not None:
			f = open(filename, "w")
			for i in range(len(bins)):
				out = ("%3.4f %d\n" % (bins[i] + mystep/2.0, hist[i]) )
				f.write(out)
			f.write("&\n")
Пример #10
0
def test_standard_deviation07():
    labels = [1]
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([-0.00619519], type)
            output = ndimage.standard_deviation(input, labels, [1])
            assert_array_almost_equal(output, [0])
    finally:
        np.seterr(**olderr)
Пример #11
0
def test_standard_deviation01():
    "standard deviation 1"
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            output = ndimage.standard_deviation(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
def getImageInfo(im):
        """
        prints out image information good for debugging
        """
        avg1=ndimage.mean(im)
        stdev1=ndimage.standard_deviation(im)
        min1=ndimage.minimum(im)
        max1=ndimage.maximum(im)

        return avg1,stdev1,min1,max1
Пример #13
0
def test_standard_deviation06():
    labels = [2, 2, 3, 3, 4]
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([1, 3, 8, 10, 8], type)
            output = ndimage.standard_deviation(input, labels, [2, 3, 4])
            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
    finally:
        np.seterr(**olderr)
Пример #14
0
def test_standard_deviation01():
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning, "Mean of empty slice")
                output = ndimage.standard_deviation(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Пример #15
0
def test_standard_deviation07():
    "standard deviation 7"
    labels = [1]
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([-0.00619519], type)
            output = ndimage.standard_deviation(input, labels, [1])
            assert_array_almost_equal(output, [0])
    finally:
        np.seterr(**olderr)
Пример #16
0
def test_standard_deviation06():
    "standard deviation 6"
    labels = [2, 2, 3, 3, 4]
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([1, 3, 8, 10, 8], type)
            output = ndimage.standard_deviation(input, labels, [2, 3, 4])
            assert_array_almost_equal(output, [1.0, 1.0, 0.0])
    finally:
        np.seterr(**olderr)
Пример #17
0
def test_standard_deviation01():
    olderr = np.seterr(all='ignore')
    try:
        for type in types:
            input = np.array([], type)
            with suppress_warnings() as sup:
                sup.filter(RuntimeWarning, "Mean of empty slice")
                output = ndimage.standard_deviation(input)
            assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Пример #18
0
    def run(self, ips, snap, img, para=None):
        intenimg = ImageManager.get(para['inten']).img
        strc = ndimage.generate_binary_structure(
            2, 1 if para['con'] == '4-connect' else 2)
        buf, n = ndimage.label(snap, strc, output=np.uint32)
        index = range(1, n + 1)
        idx = (np.ones(n + 1) * para['front']).astype(np.uint8)
        msk = np.ones(n, dtype=np.bool)

        if para['mean'] > 0:
            msk *= ndimage.mean(intenimg, buf, index) >= para['mean']
        if para['mean'] < 0:
            msk *= ndimage.mean(intenimg, buf, index) < -para['mean']
        if para['max'] > 0:
            msk *= ndimage.maximum(intenimg, buf, index) >= para['max']
        if para['max'] < 0:
            msk *= ndimage.maximum(intenimg, buf, index) < -para['max']
        if para['min'] > 0:
            msk *= ndimage.minimum(intenimg, buf, index) >= para['min']
        if para['min'] < 0:
            msk *= ndimage.minimum(intenimg, buf, index) < -para['min']
        if para['sum'] > 0:
            msk *= ndimage.sum(intenimg, buf, index) >= para['sum']
        if para['sum'] < 0:
            msk *= ndimage.sum(intenimg, buf, index) < -para['sum']
        if para['std'] > 0:
            msk *= ndimage.standard_deviation(intenimg, buf,
                                              index) >= para['std']
        if para['std'] < 0:
            msk *= ndimage.standard_deviation(intenimg, buf,
                                              index) < -para['std']

        xy = ndimage.center_of_mass(intenimg, buf, index)
        xy = np.array(xy).round(2).T

        idx[1:][~msk] = para['back']
        idx[0] = 0
        img[:] = idx[buf]

        ImageManager.get(para['inten']).mark = RGMark((xy.T, msk))
        ImageManager.get(para['inten']).update = True
def test_standard_deviation01():
    olderr = np.seterr(all='ignore')
    try:
        with warnings.catch_warnings():
            # Numpy 1.9 gives warnings for mean([])
            warnings.filterwarnings('ignore', message="Mean of empty slice.")
            for type in types:
                input = np.array([], type)
                output = ndimage.standard_deviation(input)
                assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
	def getCutoffCriteria(self, errorArray):
		#do a small minimum filter to  get rid of outliers
		size = int(len(errorArray)**0.3)+1
		errorArray2 = ndimage.minimum_filter(errorArray, size=size, mode='wrap')
		mean = ndimage.mean(errorArray2)
		stdev = ndimage.standard_deviation(errorArray2)
		### this is so arbitrary
		cut = mean + 5.0 * stdev + 2.0
		### anything bigger than 20 pixels is too big
		if cut > self.data['pixdiam']:
			cut = self.data['pixdiam']
		return cut
Пример #21
0
def test_standard_deviation01():
    olderr = np.seterr(all='ignore')
    try:
        with warnings.catch_warnings():
            # Numpy 1.9 gives warnings for mean([])
            warnings.filterwarnings('ignore', message="Mean of empty slice.")
            for type in types:
                input = np.array([], type)
                output = ndimage.standard_deviation(input)
                assert_(np.isnan(output))
    finally:
        np.seterr(**olderr)
Пример #22
0
def cplxwiener_filter(real_input, imag_input, mysize_=5, noise_=None):
    """cplxwiener_filter Implementation of Wiener filter on complex image

    scipy.signal.wiener(im, mysize=None, noise=None)[source]
Perform a Wiener filter on an N-dimensional array.

The Wiener filter is a simple deblurring filter for denoising images. This is
not the Wiener filter commonly described in image reconstruction problems but
instead it is a simple, local-mean filter.

Apply a Wiener filter to the N-dimensional array im.

Parameters: im : ndarray An N-dimensional array.  mysize : int or arraylike,
optional A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd. If mysize is a
scalar, then this scalar is used as the size in each dimension.  noise : float,
optional The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.  Returns: out : ndarray Wiener
filtered result with the same shape as im.

    """
    # scipy.signal.wiener(im, mysize=None, noise=None)
    # ,(size_, size_, size_)
    filtersiz = (3, 3, 3)
    print "Complex Wiener filter window size ", mysize_, " noise ", noise_
    if not noise_:
        noise_ = ndimage.standard_deviation(real_input)
    if not hasattr(mysize_, "__len__"):
        filtersize = np.array(mysize_)
    else:
        filtersize = mysize_
    if real_input.ndim == 3:
        real_img = wiener(real_input, mysize=filtersize, noise=noise_)
        imag_img = wiener(imag_input, mysize=filtersize, noise=noise_)
    else:
        real_img = np.empty_like(real_input)
        imag_img = np.empty_like(real_input)
        for echo in xrange(0, real_input.shape[4]):
            for acq in xrange(0, real_input.shape[3]):
                real_img[:, :, :, acq, echo] = wiener(real_input[:, :, :, acq,
                                                                 echo],
                                                      mysize=filtersize,
                                                      noise=noise_)
                imag_img[:, :, :, acq, echo] = wiener(imag_input[:, :, :, acq,
                                                                 echo],
                                                      mysize=filtersize,
                                                      noise=noise_)

    filtered_image = np.empty_like(real_input, dtype=np.complex64)
    filtered_image.real = real_img
    filtered_image.imag = imag_img
    return filtered_image
Пример #23
0
def MeasureSEMFocus(image):
    """
    Given an image, focus measure is calculated using the standard deviation of
    the raw data.
    image (model.DataArray): SEM image
    returns (float): The focus level of the SEM image (higher is better)
    """
    # Handle RGB image
    if len(image.shape) == 3:
        # TODO find faster/better solution
        image = _convertRBGToGrayscale(image)

    return ndimage.standard_deviation(image)
Пример #24
0
def fill_label_holes(label):
    # in development (working): fill holes in labeled area
    ##todo: doc, add max_width option ?
    bg = label==0
    bg_lab,bg_n = _nd.label(bg)
    flabel = _fill(label,label==0)
    
    bg_std = _nd.standard_deviation(flabel,bg_lab,range(1,bg_n+1))
    bg_std.insert(0,0) 
    bg_std = _np.array(bg_std)
    flabel[(bg_std>0)[bg_lab]] = 0
    
    return flabel
Пример #25
0
def fill_label_holes(label):
    # in development (working): fill holes in labeled area
    ##todo: doc, add max_width option ?
    bg = label == 0
    bg_lab, bg_n = _nd.label(bg)
    flabel = _fill(label, label == 0)

    bg_std = _nd.standard_deviation(flabel, bg_lab, range(1, bg_n + 1))
    bg_std.insert(0, 0)
    bg_std = _np.array(bg_std)
    flabel[(bg_std > 0)[bg_lab]] = 0

    return flabel
def imageinfo(im):
	#print " ... size: ",im.shape
	#print " ... sum:  ",im.sum()

	avg1=ndimage.mean(im)
	stdev1=ndimage.standard_deviation(im)
	print " ... avg:  ",round(avg1,6),"+-",round(stdev1,6)

	min1=ndimage.minimum(im)
	max1=ndimage.maximum(im)
	print " ... range:",round(min1,6),"<>",round(max1,6)

	return
Пример #27
0
def MeasureSEMFocus(image):
    """
    Given an image, focus measure is calculated using the standard deviation of
    the raw data.
    image (model.DataArray): SEM image
    returns (float): The focus level of the SEM image (higher is better)
    """
    # Handle RGB image
    if len(image.shape) == 3:
        # TODO find faster/better solution
        image = _convertRBGToGrayscale(image)

    return ndimage.standard_deviation(image)
Пример #28
0
def imageinfo(im):
    #print " ... size: ",im.shape
    #print " ... sum:  ",im.sum()

    avg1 = ndimage.mean(im)
    stdev1 = ndimage.standard_deviation(im)
    print " ... avg:  ", round(avg1, 6), "+-", round(stdev1, 6)

    min1 = ndimage.minimum(im)
    max1 = ndimage.maximum(im)
    print " ... range:", round(min1, 6), "<>", round(max1, 6)

    return
Пример #29
0
	def subpixelPeak(self, newimage=None, npix=5, guess=None, limit=None):
		'''
		see pixelPeak doc string for info about guess and limit
		'''
		if newimage is not None:
			self.setImage(newimage)

		if self.results['subpixel peak'] is not None:
			return self.results['subpixel peak']

		self.pixelPeak(guess=guess, limit=limit)
		peakrow,peakcol = self.results['pixel peak']

		## cut out a region of interest around the peak
		roi = imagefun.crop_at(self.image, (peakrow,peakcol), (npix,npix))

		## fit a quadratic to it and find the subpixel peak
		roipeak = self.quadFitPeak(roi)
		#roipeak = self.gaussFitPeak(roi)
		subfailed = False
		if roipeak['row'] < 0 or roipeak['row'] > npix or numpy.isnan(roipeak['row']):
			srow = float(peakrow)
			subfailed = True
		else:
			srow = peakrow + roipeak['row'] - npix/2
		if roipeak['col'] < 0 or roipeak['col'] > npix or numpy.isnan(roipeak['col']):
			scol = float(peakcol)
			subfailed = True
		else:
			scol = peakcol + roipeak['col'] - npix/2

		peakvalue = roipeak['value']
		peakminsum = roipeak['minsum']

		subpixelpeak = (srow, scol)
		self.results['subpixel peak'] = subpixelpeak
		self.results['subpixel peak value'] = peakvalue
		self.results['minsum'] = peakminsum
		self.results['coeffs'] = roipeak['coeffs']
		self.results['subfailed'] = subfailed

		#NEIL's SNR calculation
		self.results['noise']  = nd_image.standard_deviation(self.image)
		self.results['mean']   = nd_image.mean(self.image)
		self.results['signal'] = self.results['pixel peak value'] - self.results['mean']
		if self.results['noise'] == self.results['noise'] and self.results['noise'] != 0.0:
			self.results['snr'] = self.results['signal'] / self.results['noise']
		else:
			self.results['snr'] = self.results['pixel peak value']

		return subpixelpeak
Пример #30
0
    def run(self, ips, imgs, para = None):
        inten = WindowsManager.get(para['inten']).ips
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else: 
            msks = ips.imgs
            if len(msks)==1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        idct = ['Max','Min','Mean','Variance','Standard','Sum']
        key = {'Max':'max','Min':'min','Mean':'mean',
               'Variance':'var','Standard':'std','Sum':'sum'}
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:] 
        if para['center']: titles.extend(['Center-X','Center-Y'])
        if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [], []
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n+1)
            dt = []
            if para['slice']:dt.append([i]*n)
            dt.append(range(n))
            
            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']:dt.extend([xy[1]*k, xy[0]*k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [(i[0].start, i[1].start, i[0].stop, i[1].stop) for i in boxs]
                for j in (0,1,2,3):
                    dt.append([i[j]*k for i in boxs])
            if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))        
            if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) 
            if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))      

            mark.append([(center, cov) for center,cov in zip(xy.T, boxs)]) 
            data.extend(list(zip(*dt)))

        IPy.table(inten.title+'-region statistic', data, titles)
        inten.mark = Mark(mark)
        inten.update = True
Пример #31
0
	def subpixelPeak(self, newimage=None, npix=5, guess=None, limit=None):
		'''
		see pixelPeak doc string for info about guess and limit
		'''
		if newimage is not None:
			self.setImage(newimage)

		if self.results['subpixel peak'] is not None:
			return self.results['subpixel peak']

		self.pixelPeak(guess=guess, limit=limit)
		peakrow,peakcol = self.results['pixel peak']

		## cut out a region of interest around the peak
		roi = imagefun.crop_at(self.image, (peakrow,peakcol), (npix,npix))

		## fit a quadratic to it and find the subpixel peak
		roipeak = self.quadFitPeak(roi)
		#roipeak = self.gaussFitPeak(roi)
		subfailed = False
		if roipeak['row'] < 0 or roipeak['row'] > npix or numpy.isnan(roipeak['row']):
			srow = float(peakrow)
			subfailed = True
		else:
			srow = peakrow + roipeak['row'] - npix/2
		if roipeak['col'] < 0 or roipeak['col'] > npix or numpy.isnan(roipeak['col']):
			scol = float(peakcol)
			subfailed = True
		else:
			scol = peakcol + roipeak['col'] - npix/2

		peakvalue = roipeak['value']
		peakminsum = roipeak['minsum']

		subpixelpeak = (srow, scol)
		self.results['subpixel peak'] = subpixelpeak
		self.results['subpixel peak value'] = peakvalue
		self.results['minsum'] = peakminsum
		self.results['coeffs'] = roipeak['coeffs']
		self.results['subfailed'] = subfailed

		#NEIL's SNR calculation
		self.results['noise']  = nd_image.standard_deviation(self.image)
		self.results['mean']   = nd_image.mean(self.image)
		self.results['signal'] = self.results['pixel peak value'] - self.results['mean']
		if self.results['noise'] == self.results['noise'] and self.results['noise'] != 0.0:
			self.results['snr'] = self.results['signal'] / self.results['noise']
		else:
			self.results['snr'] = self.results['pixel peak value']

		return subpixelpeak
Пример #32
0
def draw_magerr(mag,magerr,**kwargs):
    kwargs.setdefault('fmt','o')
    bins = kwargs.pop('bins',np.linspace(16,25,100))
                      
    ax = plt.gca()
    labels = np.digitize(mag,bins)
    index = np.unique(labels)
    centers = ((bins[1:]+bins[:-1])/2.)[index]
    median = nd.median(magerr,labels=labels,index=index)
    mean = nd.mean(magerr,labels=labels,index=index)
    std = nd.standard_deviation(magerr,labels=labels,index=index)
     
    ax.errorbar(centers,mean,yerr=std,**kwargs)
    return median,mean,std
Пример #33
0
 def cal(self, stat = 'mean'):
         if stat=='mean':
                 zonalstats = ndimage.mean(self.data, labels=self.lb, index=self.labSet)
         if stat=='minimum':
                 zonalstats = ndimage.minimum(self.data, labels=self.lb, index=self.labSet)
         if stat=='maximum':
                 zonalstats = ndimage.maximum(self.data, labels=self.lb, index=self.labSet)
         if stat=='sum':
                 zonalstats = ndimage.sum(self.data, labels=self.lb, index=self.labSet)
         if stat=='std':
                 zonalstats = ndimage.standard_deviation(self.data, labels=self.lb, index=self.labSet)
         if stat=='variance':
                 zonalstats = ndimage.variance(self.data, labels=self.lb, index=self.labSet)
         return zonalstats
 def getCutoffCriteria(self, errorArray):
     #do a small minimum filter to  get rid of outliers
     size = int(len(errorArray)**0.3) + 1
     errorArray2 = ndimage.minimum_filter(errorArray,
                                          size=size,
                                          mode='wrap')
     mean = ndimage.mean(errorArray2)
     stdev = ndimage.standard_deviation(errorArray2)
     ### this is so arbitrary
     cut = mean + 5.0 * stdev + 2.0
     ### anything bigger than 20 pixels is too big
     if cut > self.data['pixdiam']:
         cut = self.data['pixdiam']
     return cut
Пример #35
0
def _patch_average(d, patch_size, shuffle_mask=False):
    """
    Split data d into patches of size patch_size**2.0 (using a sliding window
    and discarding patches when window is outside of domain) and calculate mean
    and standard deviation over all patches
    """
    Nx, Ny = d.shape
    N = Nx
    assert Nx == Ny

    i, j = np.meshgrid(np.arange(Nx), np.arange(Ny), indexing="ij")

    def make_mask(offset=0):
        """
        Make a mask with regions labelled in consecutive patches, remove any
        patches which because of offset window are outside of domain
        """
        assert offset < patch_size
        m = (N - offset) / patch_size
        mask = ((i - offset) / (patch_size)) + m * ((j - offset) /
                                                    (patch_size))
        if np.any(np.isnan(mask)):
            raise Exception("Can't make mask of with this splitting")

        mask[m * patch_size + offset:, :] = -1
        mask[:, m * patch_size + offset:] = -1
        mask[:offset, :] = -1
        mask[:, :offset] = -1

        return mask, np.unique(mask)

    def apply_mask_shuffle(m):
        m_flat = m.reshape(Nx * Ny)
        np.random.shuffle(m_flat)
        return m_flat.reshape(Nx, Ny)

    _d_std_err = []

    for offset in range(0, patch_size, patch_size / 10):
        mask, mask_entries = make_mask(offset=offset)
        if shuffle_mask:
            mask = apply_mask_shuffle(m=mask)
        d_std_err = ndimage.standard_deviation(d,
                                               labels=mask,
                                               index=mask_entries)

        _d_std_err += d_std_err.tolist()

    return np.array(_d_std_err)
Пример #36
0
 def update(self):
     thisroi = self.roi.getArrayRegion(self.arr, self.img1a).astype(float)
     self.img1b.setImage(thisroi, levels=(0, self.arr.max()))
     self.label2_shape.setText("shape: "+str(thisroi.shape))
     self.label2_size.setText("size: "+str(thisroi.size))
     self.label2_min.setText("min: "+str(thisroi.min()))
     self.label2_max.setText("max: "+str(thisroi.max()))
     self.label2_mean.setText("mean: "+str(thisroi.mean()))
     self.label2_sd.setText("sd: "+str( ndimage.standard_deviation(thisroi) ))
     self.label2_sum.setText("sum: "+str( ndimage.sum(thisroi) ))
     # # print("entropy: ",entropy(thisroi, disk(5))
     # # print("maximum: ",maximum(thisroi, disk(5))
     # # print("\n"
     # # print(disk(5)
     # print("\n")
     self.p2.autoRange()
Пример #37
0
 def update(self):
     thisroi = self.roi.getArrayRegion(self.arr, self.img1a).astype(float)
     self.img1b.setImage(thisroi, levels=(0, self.arr.max()))
     self.label2_shape.setText("shape: " + str(thisroi.shape))
     self.label2_size.setText("size: " + str(thisroi.size))
     self.label2_min.setText("min: " + str(thisroi.min()))
     self.label2_max.setText("max: " + str(thisroi.max()))
     self.label2_mean.setText("mean: " + str(thisroi.mean()))
     self.label2_sd.setText("sd: " +
                            str(ndimage.standard_deviation(thisroi)))
     self.label2_sum.setText("sum: " + str(ndimage.sum(thisroi)))
     # # print("entropy: ",entropy(thisroi, disk(5))
     # # print("maximum: ",maximum(thisroi, disk(5))
     # # print("\n"
     # # print(disk(5)
     # print("\n")
     self.p2.autoRange()
Пример #38
0
def MeasureFocus(image):
    """
    Given an image, focus measure is calculated using the standard deviation of
    the raw data.
    image (model.DataArray): Optical image
    returns (float):    The focus level of the optical image
    """
    # Handle RGB image
    if len(image.shape) == 3:
        # TODO find faster solution
        r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
        gray = numpy.empty(image.shape[0:2], dtype="uint16")
        gray[...] = r
        gray += g
        gray += b
    else:
        gray = image
    return ndimage.standard_deviation(image)
Пример #39
0
def MeasureFocus(image):
    """
    Given an image, focus measure is calculated using the standard deviation of
    the raw data.
    image (model.DataArray): Optical image
    returns (float):    The focus level of the optical image
    """
    # Handle RGB image
    if len(image.shape) == 3:
        # TODO find faster solution
        r, g, b = image[:, :, 0], image[:, :, 1], image[:, :, 2]
        gray = numpy.empty(image.shape[0:2], dtype="uint16")
        gray[...] = r
        gray += g
        gray += b
    else:
        gray = image
    return ndimage.standard_deviation(image)
Пример #40
0
def update(roi):
    thisroi = roi.getArrayRegion(arr, img1a).astype(int)
    img1b.setImage(thisroi, levels=(0, arr.max()))

    print(type(thisroi[0][0]))
    print("shape: ", thisroi.shape)
    print("size:  ", thisroi.size)
    print("min:   ", thisroi.min())
    print("max:   ", thisroi.max())
    print("mean:  ", thisroi.mean())
    print("mean:  ", ndimage.mean(thisroi))
    print("sd:    ", ndimage.standard_deviation(thisroi))
    print("sum:   ", ndimage.sum(thisroi))
    # print(thisroi
    # print("entropy: ",entropy(thisroi, disk(5))
    # print("maximum: ",maximum(thisroi, disk(5))
    # print("\n"
    # print(disk(5)
    print("\n")
    v1b.autoRange()
Пример #41
0
def update(roi):
    thisroi = roi.getArrayRegion(arr, img1a).astype(int)
    img1b.setImage(thisroi, levels=(0, arr.max()))

    print(type(thisroi[0][0]))
    print("shape: ", thisroi.shape)
    print("size:  ", thisroi.size)
    print("min:   ", thisroi.min())
    print("max:   ", thisroi.max())
    print("mean:  ", thisroi.mean())
    print("mean:  ", ndimage.mean(thisroi))
    print("sd:    ", ndimage.standard_deviation(thisroi))
    print("sum:   ", ndimage.sum(thisroi))
    # print(thisroi
    # print("entropy: ",entropy(thisroi, disk(5))
    # print("maximum: ",maximum(thisroi, disk(5))
    # print("\n"
    # print(disk(5)
    print("\n")
    v1b.autoRange()
Пример #42
0
def test_stat_funcs_2d():
    a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]])
    lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]])

    mean = ndimage.mean(a, labels=lbl, index=[1, 2])
    assert_array_equal(mean, [7.0, 4.0])

    var = ndimage.variance(a, labels=lbl, index=[1, 2])
    assert_array_equal(var, [2.5, 1.0])

    std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
    assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))

    med = ndimage.median(a, labels=lbl, index=[1, 2])
    assert_array_equal(med, [7.0, 4.0])

    min = ndimage.minimum(a, labels=lbl, index=[1, 2])
    assert_array_equal(min, [5, 3])

    max = ndimage.maximum(a, labels=lbl, index=[1, 2])
    assert_array_equal(max, [9, 5])
Пример #43
0
 def update(self):
     # if not self.filterROI:
     thisroi = self.roi.getArrayRegion(self.arr, self.img1a).astype(float)
     self.img1b.setImage(thisroi, levels=(0, self.arr.max()))
     self.label2_shape.setText("shape: "+str(thisroi.shape))
     self.label2_size.setText("size: "+str(thisroi.size))
     self.label2_min.setText("min: "+str(thisroi.min()))
     self.label2_max.setText("max: "+str(thisroi.max()))
     self.label2_mean.setText("mean: "+str(thisroi.mean()))
     self.label2_sd.setText("sd: "+str( ndimage.standard_deviation(thisroi) ))
     self.label2_sum.setText("sum: "+str( ndimage.sum(thisroi) ))
     # self.img1b.scale(self.xscale, self.yscale)
     # self.img1b.translate(self.xshift, self.yshift)
     # else:
     #     self.img1b.setImage(self.data[self.layer,:,:,0]*self.ROI[self.layer])
     # # print("entropy: ",entropy(thisroi, disk(5))
     # # print("maximum: ",maximum(thisroi, disk(5))
     # # print("\n"
     # # print(disk(5)
     # print("\n")
     self.p2.autoRange()
Пример #44
0
def test_stat_funcs_2d():
    a = np.array([[5,6,0,0,0], [8,9,0,0,0], [0,0,0,3,5]])
    lbl = np.array([[1,1,0,0,0], [1,1,0,0,0], [0,0,0,2,2]])

    mean = ndimage.mean(a, labels=lbl, index=[1, 2])
    assert_array_equal(mean, [7.0, 4.0])

    var = ndimage.variance(a, labels=lbl, index=[1, 2])
    assert_array_equal(var, [2.5, 1.0])

    std = ndimage.standard_deviation(a, labels=lbl, index=[1, 2])
    assert_array_almost_equal(std, np.sqrt([2.5, 1.0]))

    med = ndimage.median(a, labels=lbl, index=[1, 2])
    assert_array_equal(med, [7.0, 4.0])

    min = ndimage.minimum(a, labels=lbl, index=[1, 2])
    assert_array_equal(min, [5, 3])

    max = ndimage.maximum(a, labels=lbl, index=[1, 2])
    assert_array_equal(max, [9, 5])
Пример #45
0
 def update(self):
     # if not self.filterROI:
     thisroi = self.roi.getArrayRegion(self.arr, self.img1a).astype(float)
     self.img1b.setImage(thisroi, levels=(0, self.arr.max()))
     self.label2_shape.setText("shape: " + str(thisroi.shape))
     self.label2_size.setText("size: " + str(thisroi.size))
     self.label2_min.setText("min: " + str(thisroi.min()))
     self.label2_max.setText("max: " + str(thisroi.max()))
     self.label2_mean.setText("mean: " + str(thisroi.mean()))
     self.label2_sd.setText("sd: " +
                            str(ndimage.standard_deviation(thisroi)))
     self.label2_sum.setText("sum: " + str(ndimage.sum(thisroi)))
     # self.img1b.scale(self.xscale, self.yscale)
     # self.img1b.translate(self.xshift, self.yshift)
     # else:
     #     self.img1b.setImage(self.data[self.layer,:,:,0]*self.ROI[self.layer])
     # # print("entropy: ",entropy(thisroi, disk(5))
     # # print("maximum: ",maximum(thisroi, disk(5))
     # # print("\n"
     # # print(disk(5)
     # print("\n")
     self.p2.autoRange()
Пример #46
0
    def updatemain(self):

        if self.verbose:
            print "updating", self.layer
        if self.xview:
            # dataswappedX = np.swapaxes(self.data,0,1)
            self.arr = self.dataswappedX[self.layer]
        elif self.yview:
            # dataswappedY = np.swapaxes(self.data,0,2)
            self.arr = self.dataswappedY[self.layer]
        else:
            self.arr = self.data[self.layer]
        self.img1a.setImage(self.arr)
        if self.firsttime:
            self.firsttime = False
        else:
            if self.verbose:
                print self.rois
            if self.rois[self.layer]:
                # self.p1.removeItem(self.roi)
                # self.restorePolyLineState(self.roi, self.rois[self.layer])
                self.roi.setState(self.rois[self.layer])
                # self.p1.addItem(self.roi)

            self.update()
            self.label_layer.setText("layer: " + str(self.layer + 1) + "/" +
                                     str(len(self.data[:, :, :, 0])))
            self.label_shape.setText("shape: " + str(self.arr[:, :, 0].shape))
            self.label_size.setText("size: " + str(self.arr[:, :, 0].size))
            self.label_min.setText("min: " + str(self.arr[:, :, 0].min()))
            self.label_max.setText("max: " + str(self.arr[:, :, 0].max()))
            self.label_mean.setText("mean: " + str(self.arr[:, :, 0].mean()))
            self.label_sd.setText(
                "sd: " + str(ndimage.standard_deviation(self.arr[:, :, 0])))
            self.label_sum.setText("sum: " +
                                   str(ndimage.sum(self.arr[:, :, 0])))
        self.img1a.updateImage()
Пример #47
0
def test_standard_deviation03():
    for type in types:
        input = np.array([1, 3], type)
        output = ndimage.standard_deviation(input)
        assert_almost_equal(output, np.sqrt(1.0))
Пример #48
0
def test_standard_deviation05():
    labels = [2, 2, 3]
    for type in types:
        input = np.array([1, 3, 8], type)
        output = ndimage.standard_deviation(input, labels, 2)
        assert_almost_equal(output, 1.0)
Пример #49
0
def test_standard_deviation04():
    input = np.array([1, 0], bool)
    output = ndimage.standard_deviation(input)
    assert_almost_equal(output, 0.5)
Пример #50
0
def test_standard_deviation03():
    for type in types:
        input = np.array([1, 3], type)
        output = ndimage.standard_deviation(input)
        assert_almost_equal(output, np.sqrt(1.0))
Пример #51
0
def test_standard_deviation02():
    for type in types:
        input = np.array([1], type)
        output = ndimage.standard_deviation(input)
        assert_almost_equal(output, 0.0)
Пример #52
0
        averageAc, averageFa, averageGe, averageLo, averagePu, averageRa,
        averageRh, averageSc
    ])  #, averageSl, averageSo])
    print('Genre matrix: ')
    print(genreMatrix)
    print('')

    distanceVAE = distance.pdist(VAEMatrix)
    distanceVAE = distance.squareform(distanceVAE)
    averageAll = np.mean(distanceVAE)
    print('All points distances average: ')
    print(averageAll)
    print('')

    print('Standard Deviation between point average distances per genre: ')
    PDstandardDeviation = ndimage.standard_deviation(genreMatrix)
    print(PDstandardDeviation)
    print('')

    print('Standard Deviation between overall point distance: ')
    PDstandardDeviation = ndimage.standard_deviation(distanceVAE)
    print(PDstandardDeviation)

    #plt.imshow(averageAll, cmap='hot', interpolation='nearest')

#======================STANDARD DEVIATION==================================================================================================================

elif printStanDev:
    print('Standard Deviation: ')
    standardDeviation = ndimage.standard_deviation(VAEMatrix)
    print(standardDeviation)
Пример #53
0
 def znorm(self):
     u = ndimage.mean(array(self.data))
     std = ndimage.standard_deviation(array(self.data))
     self.data = (self.data - u) / std
Пример #54
0
def normStdev(im):
    avg1 = ndimage.mean(im)
    std1 = ndimage.standard_deviation(im)
    return (im - avg1) / std1
Пример #55
0
def fogpy(chn108, chn39, chn08, chn16, chn06, chn87, chn120, time, lat, lon,
          elevation, cot, reff):
    """ The fog and low stratus detection and forecasting algorithms are
    utilizing the methods proposed in different innovative studies:

    Arguements:
        chn108    Array for the 10.8 μm channel
        chn39    Array for the 3.9 μm channel
        chn08    Array for the 0.8 μm channel
        chn16    Array for the 1.6 μm channel
        chn06    Array for the 0.6 μm channel
        chn87    Array for the 8.7 μm channel
        chn120    Array for the 12.0 μm channel
        time    Datetime object for the satellite scence
        lat    Array of latitude values
        lon    Array of longitude values
        elevation Array of area elevation
        cot    Array of cloud optical thickness (depth)
        reff    Array of cloud particle effective raduis

    Returns:
        Infrared image with fog mask

    - A novel approach to fog/low stratus detection using Meteosat 8 data
            J. Cermak & J. Bendix
    - Detecting ground fog from space – a microphysics-based approach
            J. Cermak & J. Bendix

    The algorithm can be applied to satellite zenith angle lower than 70°
    and a maximum solar zenith angle of 80°.

    The algorithm workflow is a succession of differnt masking approaches
    from coarse to finer selection to find fog and low stratus clouds within
    provided satellite images.

            Input: Calibrated satellite images >-----
                                                    |
                1.  Cloud masking -------------------
                                                    |
                2.  Spatial clustering---------------
                                                    |
                3.  Maximum margin elevation --------
                                                    |
                4.  Surface homogenity check --------
                                                    |
                5.  Microphysics plausibility check -
                                                    |
                6.  Differenciate fog - low status --
                                                    |
                7.  Fog dissipation -----------------
                                                    |
                8.  Nowcasting ----------------------
                                                    |
            Output: fog and low stratus mask <--
    """
    arrsize = chn108.size
    # Dictionary of filtered values.
    filters = {}
    prev = 0

    # 1. Cloud masking
    logger.info("### Applying fog cloud filters to input array ###")
    # Given the combination of a solar and a thermal signal at 3.9 μm,
    # the difference in radiances to the 10.8 μm must be larger for a
    # cloud-contaminated pixel than for a clear pixel:
    cm_diff = chn108 - chn39

    # In the histogram of the difference the clear sky peak is identified
    # within a certain range. The nearest significant relative minimum in the
    # histogram towards more negative values is detected and used as a
    # threshold to separate clear from cloudy pixels in the image.

    # Create histogram
    hist = (np.histogram(cm_diff.compressed(), bins='auto'))

    # Find local min and max values
    localmin = (np.diff(np.sign(np.diff(hist[0]))) > 0).nonzero()[0] + 1
    localmax = (np.diff(np.sign(np.diff(hist[0]))) < 0).nonzero()[0] + 1

    # Utilize scipy signal funciton to find peaks
    peakind = find_peaks_cwt(hist[0], np.arange(1, len(hist[1]) / 10))
    peakrange = hist[1][peakind][(hist[1][peakind] >= -10) &
                                 (hist[1][peakind] < 10)]
    minpeak = np.min(peakrange)
    maxpeak = np.max(peakrange)

    # Determine threshold
    logger.debug("Histogram range for cloudy/clear sky pixels: {} - {}"
                 .format(minpeak, maxpeak))

    #plt.bar(hist[1][:-1], hist[0])
    #plt.title("Histogram with 'auto' bins")
    #plt.show()

    thres = np.max(hist[1][localmin[(hist[1][localmin] <= maxpeak) &
                                    (hist[1][localmin] >= minpeak) &
                                    (hist[1][localmin] < 0.5)]])
    if thres > 0 or thres < -5:
        logger.warning("Cloud maks difference threshold {} outside normal"
                       " range (from -5 to 0)".format(thres))
    else:
        logger.debug("Cloud mask difference threshold set to %s" % thres)
    # Create cloud mask for image array
    cloud_mask = cm_diff > thres

    filters['cloud'] = np.nansum(cloud_mask)
    prev += filters['cloud']
    fog_mask = cloud_mask

    # Remove remaining snow pixels
    chn108_ma = np.ma.masked_where(cloud_mask, chn108)
    chn16_ma = np.ma.masked_where(cloud_mask, chn16)
    chn08_ma = np.ma.masked_where(cloud_mask, chn08)
    chn06_ma = np.ma.masked_where(cloud_mask, chn06)

    # Snow has a certain minimum reflectance (0.11 at 0.8 μm) and snow has a
    # certain minimum temperature (256 K)

    # Snow displays a lower reflectivity than water clouds at 1.6 μm, combined
    # with a slightly higher level of absorption (Wiscombe and Warren, 1980)
    # Calculate Normalized Difference Snow Index
    ndsi = (chn06 - chn16) / (chn06 + chn16)

    # Where the NDSI exceeds a certain threshold (0.4) and the two other
    # criteria are met, a pixel is rejected as snow-covered.
    snow_mask = (chn08 / 100 >= 0.11) & (chn108 >= 256) & (ndsi >= 0.4)

    fog_mask = fog_mask | snow_mask
    filters['snow'] = np.nansum(fog_mask) - prev
    prev += filters['snow']

    # Ice cloud exclusion
    # Only warm fog (i.e. clouds in the water phase) are considered.
    # No ice fog!!!
    # Difference of brightness temperatures in the 12.0 and 8.7 μm channels
    # is used as an indicator of cloud phase (Strabala et al., 1994).
    # Where it exceeds 2.5 K, a water-cloud-covered pixel is assumed with a
    # large degree of certainty.
    chn120_ma = np.ma.masked_where(cloud_mask | snow_mask, chn120)
    chn108_ma = np.ma.masked_where(cloud_mask | snow_mask, chn108)
    chn87_ma = np.ma.masked_where(cloud_mask | snow_mask, chn87)
    ic_diff = chn120 - chn87

    # Straightforward temperature test, cutting off at very low 10.8 μm
    # brightness temperatures (250 K).
    # Create ice cloud mask
    ice_mask = (ic_diff < 2.5) | (chn108 < 250)

    fog_mask = fog_mask | ice_mask
    filters['ice'] = np.nansum(fog_mask) - prev
    prev += filters['ice']

    # Thin cirrus is detected by means of the split-window IR channel
    # brightness temperature difference (T10.8 –T12.0 ). This difference is
    # compared to a threshold dynamically interpolated from a lookup table
    # based on satellite zenith angle and brightness temperature at 10.8 μm
    # (Saunders and Kriebel, 1988)
    chn120_ma = np.ma.masked_where(cloud_mask | snow_mask | ice_mask, chn120)
    chn108_ma = np.ma.masked_where(cloud_mask | snow_mask | ice_mask, chn108)

    bt_diff = chn108 - chn120

    # Calculate sun zenith angles
    sza = astronomy.sun_zenith_angle(time, lon, lat)

    minsza = np.min(sza)
    maxsza = np.max(sza)
    logger.debug("Found solar zenith angles from %s to %s°" % (minsza,
                                                               maxsza))

    # Calculate secant of sza
    # secsza = np.ma.masked_where(cloud_mask | snow_mask | ice_mask,
    #                             (1 / np.cos(np.deg2rad(sza))))
    secsza = 1 / np.cos(np.deg2rad(sza))

    # Lookup table for BT difference thresholds at certain sec(sun zenith
    # angles) and 10.8 μm BT
    lut = {260: {1.0: 0.55, 1.25: 0.60, 1.50: 0.65, 1.75: 0.90, 2.0: 1.10},
           270: {1.0: 0.58, 1.25: 0.63, 1.50: 0.81, 1.75: 1.03, 2.0: 1.13},
           280: {1.0: 1.30, 1.25: 1.61, 1.50: 1.88, 1.75: 2.14, 2.0: 2.30},
           290: {1.0: 3.06, 1.25: 3.72, 1.50: 3.95, 1.75: 4.27, 2.0: 4.73},
           300: {1.0: 5.77, 1.25: 6.92, 1.50: 7.00, 1.75: 7.42, 2.0: 8.43},
           310: {1.0: 9.41, 1.25: 11.22, 1.50: 11.03, 1.75: 11.60, 2.0: 13.39}}

    # Apply lut to BT and sza values

    def find_nearest_lut_sza(sza):
        """ Get nearest look up table key value for given ssec(sza)"""
        sza_opt = [1.0, 1.25, 1.50, 1.75, 2.0]
        sza_idx = np.array([np.abs(sza - i) for i in sza_opt]).argmin()
        return(sza_opt[sza_idx])

    def find_nearest_lut_bt(bt):
        """ Get nearest look up table key value for given BT"""
        bt_opt = [260, 270, 280, 290, 300, 310]
        bt_idx = np.array([np.abs(bt - i) for i in bt_opt]).argmin()
        return(bt_opt[bt_idx])

    def apply_lut(sza, bt):
        """ Apply LUT to given BT and sza values"""
        return(lut[bt][sza])

    # Vectorize LUT functions for numpy arrays
    vfind_nearest_lut_sza = np.vectorize(find_nearest_lut_sza)
    vfind_nearest_lut_bt = np.vectorize(find_nearest_lut_bt)
    vapply_lut = np.vectorize(apply_lut)

    secsza_lut = vfind_nearest_lut_sza(secsza)
    chn108_ma_lut = vfind_nearest_lut_bt(chn108)

    bt_thres = vapply_lut(secsza_lut, chn108_ma_lut)
    logger.debug("Set BT difference threshold for thin cirrus from %s to %s K"
                 % (np.min(bt_thres), np.max(bt_thres)))
    # Create thin cirrus mask
    bt_ci_mask = bt_diff > bt_thres

    # Other cirrus test (T8.7–T10.8), founded on the relatively strong cirrus
    # signal at the former wavelength (Wiegner et al.1998). Where the
    # difference is greater than 0 K, cirrus is assumed to be present.
    strong_ci_diff = chn87 - chn108
    strong_ci_mask = strong_ci_diff > 0
    cirrus_mask = bt_ci_mask | strong_ci_mask

    fog_mask = fog_mask | cirrus_mask
    filters['cirrus'] = np.nansum(fog_mask) - prev
    prev += filters['cirrus']

    # Those pixels whose cloud phase still remains undefined after these ice
    # cloud exclusions are subjected to a much weaker cloud phase test in order
    # to get an estimate regarding their phase. This test uses the NDSI
    # introduced above. Where it falls below 0.1, a water cloud is assumed to
    # be present.
    chn16_ma = np.ma.masked_where(cloud_mask | snow_mask | ice_mask |
                                  cirrus_mask, chn16)
    chn06_ma = np.ma.masked_where(cloud_mask | snow_mask | ice_mask |
                                  cirrus_mask, chn06)
    ndsi_ci = (chn06 - chn16) / (chn06 + chn16)
    water_mask = ndsi_ci > 0.1

    fog_mask = fog_mask | water_mask
    filters['water'] = np.nansum(fog_mask) - prev
    prev += filters['water']

    # Small droplet proxy test
    # Fog generally has a stronger signal at 3.9 μm than clear ground, which
    # in turn radiates more than other clouds.
    # The 3.9 μm radiances for cloud-free land areas are averaged over 50 rows
    # at a time to obtain an approximately latitudinal value.
    # Wherever a cloud-covered pixel exceeds this value, it is flagged
    # ‘small droplet cloud’.
    cloud_free_ma = np.ma.masked_where(~cloud_mask, chn39)
    chn39_ma = np.ma.masked_where(cloud_mask | snow_mask | ice_mask |
                                  cirrus_mask | water_mask, chn39)
    # Latitudinal average cloud free radiances
    lat_cloudfree = np.ma.mean(cloud_free_ma, 1)
    logger.debug("Mean latitudinal threshold for cloudfree areas: %.2f K"
                 % np.mean(lat_cloudfree))
    global line
    line = 0

    def find_watercloud(lat, thres):
        """Funciton to compare row of BT with given latitudinal thresholds"""
        global line
        if all(lat.mask):
            res = lat.mask
        elif np.ma.is_masked(thres[line]):
            res = lat <= np.mean(lat_cloudfree)
        else:
            res = lat <= thres[line]
        line += 1

        return(res)

    # Apply latitudinal threshold to cloudy areas
    drop_mask = np.apply_along_axis(find_watercloud, 1, chn39,
                                    lat_cloudfree)

    fog_mask = fog_mask | drop_mask
    filters['drop'] = np.nansum(fog_mask) - prev
    prev += filters['drop']

    # Apply previous defined filters
    chn108_ma = np.ma.masked_where(fog_mask, chn108)

    logger.debug("Number of filtered non-fog pixels: %s"
                 % (np.sum(chn108_ma.mask)))
    logger.debug("Number of potential fog cloud pixels: %s"
                 % (np.sum(~chn108_ma.mask)))

    # 2. Spatial clustering
    logger.info("### Analizing spatial properties of filtered fog clouds ###")

    # Enumerate fog cloud clusters
    cluster = measurements.label(~chn108_ma.mask)

    # Get 10.8 channel sampled by the previous fog filters
    cluster_ma = np.ma.masked_where(fog_mask, cluster[0])
    # Get cloud and snow free cells
    clear_ma = np.ma.masked_where(~cloud_mask | snow_mask, chn108)

    logger.debug("Number of spatial coherent fog cloud clusters: %s"
                 % np.nanmax(np.unique(cluster_ma)))

    # 3. Altitude test
    def sliding_window(arr, window_size):
        """ Construct a sliding window view of the array"""
        arr = np.asarray(arr)
        window_size = int(window_size)
        if arr.ndim != 2:
            raise ValueError("need 2-D input")
        if not (window_size > 0):
            raise ValueError("need a positive window size")
        shape = (arr.shape[0] - window_size + 1,
                 arr.shape[1] - window_size + 1,
                 window_size, window_size)
        if shape[0] <= 0:
            shape = (1, shape[1], arr.shape[0], shape[3])
        if shape[1] <= 0:
            shape = (shape[0], 1, shape[2], arr.shape[1])
        strides = (arr.shape[1]*arr.itemsize, arr.itemsize,
                   arr.shape[1]*arr.itemsize, arr.itemsize)
        return as_strided(arr, shape=shape, strides=strides)

    def cell_neighbors(arr, i, j, d, value):
        """Return d-th neighbors of cell (i, j)"""
        w = sliding_window(arr, 2*d+1)

        ix = np.clip(i - d, 0, w.shape[0]-1)
        jx = np.clip(j - d, 0, w.shape[1]-1)

        i0 = max(0, i - d - ix)
        j0 = max(0, j - d - jx)
        i1 = w.shape[2] - max(0, d - i + ix)
        j1 = w.shape[3] - max(0, d - j + jx)

        # Get cell value
        if i1 - i0 == 3:
            icell = 1
        elif (i1 - i0 == 2) & (i0 == 0):
            icell = 0
        elif (i1 - i0 == 2) & (i0 == 1):
            icell = 2
        if j1 - j0 == 3:
            jcell = 1
        elif (j1 - j0 == 2) & (j0 == 0):
            jcell = 0
        elif (j1 - j0 == 2) & (j0 == 1):
            jcell = 2

        irange = range(i0, i1)
        jrange = range(j0, j1)
        neighbors = [w[ix, jx][k, l] for k in irange for l in jrange
                     if k != icell or l != jcell]
        center = value[i, j]  # Get center cell value from additional array

        return(center, neighbors)

    def get_fog_cth(cluster, cf_arr, bt_cc, elevation):
        """Get neighboring cloud free BT and elevation values of potenital
        fog cloud clusters and compute cloud top height from naximum BT
        differences for fog cloud contaminated pixel in comparison to cloud
        free areas and their corresponding elevation
        """
        e = 1
        from collections import defaultdict
        result = defaultdict(list)
        elevation_ma = np.ma.masked_where(~cloud_mask | snow_mask, elevation)
        # Convert masked values to nan
        if np.ma.isMaskedArray(cf_arr):
            cf_arr = cf_arr.filled(np.nan)
        for index, val in np.ndenumerate(cluster):
            if val != 0:
                # Get list of cloud free neighbor pixel
                tcc, tneigh = cell_neighbors(cf_arr, *index, d=1,
                                             value=bt_cc)
                zcc, zneigh = cell_neighbors(elevation_ma, *index, d=1,
                                             value=elevation)
                tcf_diff = np.array([tcf - tcc for tcf in tneigh])
                zcf_diff = np.array([zcf - zcc for zcf in zneigh])
                # Get maximum bt difference
                try:
                    maxd = np.nanargmax(tcf_diff)
                except ValueError:
                    continue
                # compute cloud top height with constant athmospere temperature
                # lapse rate
                rate = 0.65
                cth = tcf_diff[maxd] / rate * 100 - zcf_diff[maxd]
                result[val].append(cth)

        return(result)
    # Calculate fog cluster cloud top height
    cluster_h = get_fog_cth(cluster_ma, clear_ma, chn108_ma, elevation)

    # Apply maximum threshold for cluster height to identify low fog clouds
    cluster_mask = cluster_ma.mask
    for key, item in cluster_h.iteritems():
        if any([c > 2000 for c in item]):
            cluster_mask[cluster_ma[key]] = True

    # Create additional fog cluster map
    cluster_cth = np.ma.masked_where(cluster_mask, cluster_ma)
    for key, item in cluster_h.iteritems():
        if all([c <= 2000 for c in item]):
            cluster_cth[cluster_cth[key]] = np.mean(item)

    # Update fog filter
    fog_mask = fog_mask | cluster_mask
    filters['height'] = np.nansum(fog_mask) - prev
    prev += filters['height']

    # Apply previous defined spatial filters
    chn108_ma = np.ma.masked_where(cluster_mask, chn108)

    # Surface homogenity test
    cluster, nlbl = ndimage.label(~chn108_ma.mask)
    cluster_ma = np.ma.masked_where(cluster_mask, cluster)

    cluster_sd = ndimage.standard_deviation(chn108_ma, cluster_ma,
                                            index=np.arange(1, nlbl+1))

    # 4. Mask potential fog clouds with high spatial inhomogenity
    sd_mask = cluster_sd > 2.5
    cluster_dict = {key: sd_mask[key - 1] for key in np.arange(1, nlbl+1)}
    for val in np.arange(1, nlbl+1):
        cluster_mask[cluster_ma == val] = cluster_dict[val]

    fog_mask = fog_mask | cluster_mask
    filters['homogen'] = np.nansum(fog_mask) - prev
    prev += filters['homogen']

    # Apply previous defined spatial filters
    chn108_ma = np.ma.masked_where(cluster_mask, chn108)

    logger.debug("Number of spatial filtered non-fog pixels: %s"
                 % (np.sum(chn108_ma.mask)))
    logger.debug("Number of remaining fog cloud pixels: %s"
                 % (np.sum(~chn108_ma.mask)))

    # 5. Apply microphysical fog cloud filters
    # Typical microphysical parameters for fog were taken from previous studies
    # Fog optical depth normally ranges between 0.15 and 30 while droplet
    # effective radius varies between 3 and 12 μm, with a maximum of 20 μm in
    # coastal fog. The respective maxima for optical depth (30) and droplet
    # radius (20 μm) are applied to the low stratus mask as cut-off levels.
    # Where a pixel previously identified as fog/low stratus falls outside the
    # range it will now be flagged as a non-fog pixel.
    logger.info("### Apply microphysical plausible check ###")

    if np.ma.isMaskedArray(cot):
        cot = cot.base
    if np.ma.isMaskedArray(reff):
        reff = reff.base

    # Add mask by microphysical thresholds
    cpp_mask = cluster_mask | (cot > 30) | (reff > 20e-6)

    fog_mask = fog_mask | cpp_mask
    filters['cpp'] = np.nansum(fog_mask) - prev
    prev += filters['cpp']

    # Apply previous defined microphysical filters
    chn108_ma = np.ma.masked_where(cpp_mask, chn108)

    logger.debug("Number of microphysical filtered non-fog pixels: %s"
                 % (np.sum(chn108_ma.mask)))
    logger.debug("Number of remaining fog cloud pixels: %s"
                 % (np.sum(~chn108_ma.mask)))

    # Combine single masks to derive potential fog cloud filter
    fls_mask = (fog_mask)

    # Create debug output
    filters['fls'] = np.nansum(fog_mask)
    filters['remain'] = np.nansum(~fog_mask)

    logger.info("""---- FLS algorithm filter results ---- \n
    Number of initial pixels:            {}
    Removed non cloud pixel:             {}
    Removed snow pixels:                 {}
    Removed ice cloud pixels:            {}
    Removed thin cirrus pixels:          {}
    Removed non water cloud pixels       {}
    Removed non small droplet pixels     {}
    Removed spatial high cloud pixels    {}
    Removed spatial inhomogen pixels     {}
    Removed microphysical tested pixels  {}
    ---------------------------------------
    Filtered non fog/low stratus pixels  {}
    Remaining fog/low stratus pixels     {}
    ---------------------------------------
    """.format(arrsize, filters['cloud'], filters['snow'],
               filters['ice'], filters['cirrus'], filters['water'],
               filters['drop'], filters['height'], filters['homogen'],
               filters['cpp'], filters['fls'], filters['remain']))

    return(fog_mask, cluster_h)
Пример #56
0
def least_squares_fit(x,y):
    beta = correlation(x,y) * standard_deviation(y) / standard_deviation(x)
    alpha = mean(y) - beta* mean(x)
    return alpha,beta
Пример #57
0
    def run(self, ips, imgs, para=None):
        inten = ImageManager.get(para['inten'])
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else:
            msks = ips.imgs
            imgs = inten.imgs
            if len(msks) == 1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(
            2, 1 if para['con'] == '4-connect' else 2)
        idct = ['Max', 'Min', 'Mean', 'Variance', 'Standard', 'Sum']
        key = {
            'Max': 'max',
            'Min': 'min',
            'Mean': 'mean',
            'Variance': 'var',
            'Standard': 'std',
            'Sum': 'sum'
        }
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:]
        if para['center']: titles.extend(['Center-X', 'Center-Y'])
        if para['extent']: titles.extend(['Min-Y', 'Min-X', 'Max-Y', 'Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [], {'type': 'layers', 'body': {}}
        # data,mark=[],[]
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n + 1)
            dt = []
            if para['slice']: dt.append([i] * n)
            dt.append(range(n))

            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']: dt.extend([xy[1] * k, xy[0] * k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [(i[1].start + (i[1].stop - i[1].start) / 2,
                         i[0].start + (i[0].stop - i[0].start) / 2,
                         i[1].stop - i[1].start, i[0].stop - i[0].start)
                        for i in boxs]
                for j in (0, 1, 2, 3):
                    dt.append([i[j] * k for i in boxs])
            if para['max']:
                dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:
                dt.append(ndimage.minimum(imgs[i], buf, index).round(2))
            if para['mean']:
                dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:
                dt.append(ndimage.variance(imgs[i], buf, index).round(2))
            if para['std']:
                dt.append(
                    ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:
                dt.append(ndimage.sum(imgs[i], buf, index).round(2))

            layer = {'type': 'layer', 'body': []}
            xy = np.int0(xy).T
            texts = [(i[1], i[0]) + ('id=%d' % n, )
                     for i, n in zip(xy, range(len(xy)))]
            layer['body'].append({'type': 'texts', 'body': texts})
            if para['extent']:
                layer['body'].append({'type': 'rectangles', 'body': boxs})
            mark['body'][i] = layer

            data.extend(list(zip(*dt)))

        IPy.show_table(pd.DataFrame(data, columns=titles),
                       inten.title + '-region statistic')
        inten.mark = GeometryMark(mark)
        inten.update = True
    def run(self, data, upc_sequence, resources=None):

        self.mnl_probabilities=upc_sequence.probability_class
        self.bhhh_estimation = bhhh_mnl_estimation()

        modified_upc_sequence = UPCFactory().get_model(
            utilities=None, probabilities="opus_core.mnl_probabilities", choices=None)
        modified_upc_sequence.utility_class = upc_sequence.utility_class

        N, neqs, V = data.shape

        max_iter = resources.get("max_iterations", 100)  # default
        sc = SessionConfiguration()
        dataset_pool = sc.get_dataset_pool()
        sample_rate = dataset_pool.get_dataset("sample_rate")
        
        CLOSE = sc["CLOSE"]
        info_filename = sc["info_file"]
        info_filename = os.path.join('.', info_filename)
        info_file = open(info_filename, "a")
        constraint_dict = {1:'constrained', 0:'unconstrained'}
        swing_cases_fix = 0  #set swing alternatives to constrained (1) or unconstrained (0)
        prob_correlation = None
        
        choice_set = resources['_model_'].choice_set
        J = choice_set.size()
        alt_id = choice_set.get_id_attribute()
        movers = choice_set.get_attribute('movers')

        resources.check_obligatory_keys(["capacity_string"])
        supply = choice_set.get_attribute(resources["capacity_string"])

        index = resources.get("index", None)
        if index is None: # no sampling case, alternative set is the full choice_set
            index = arange(J)
        if index.ndim <= 1:
            index = repeat(index[newaxis,:], N, axis=0)

        if resources.get('aggregate_to_dataset', None):
            aggregate_dataset = dataset_pool.get_dataset(resources.get('aggregate_to_dataset'))
            choice_set_aggregate_id = choice_set.get_attribute(aggregate_dataset.get_id_name()[0])
            index = aggregate_dataset.get_id_index(choice_set_aggregate_id[index].ravel()).reshape(index.shape)

            supply = aggregate_dataset.get_attribute(resources["capacity_string"])
            J = aggregate_dataset.size()

            movers = aggregate_dataset.get_attribute("movers")

        demand_history = movers[:, newaxis]
        resources.merge({"index":index})
        
        pi = ones(index.shape, dtype=float32)  #initialize pi
        #average_omega = ones(J,dtype=float32)  #initialize average_omega
        logger.start_block('Outer Loop')
        for i in range(max_iter):
            logger.log_status('Outer Loop Iteration %s' % i)

            result = self.bhhh_estimation.run(data, modified_upc_sequence, resources)
            del self.bhhh_estimation; collect()
            self.bhhh_estimation = bhhh_mnl_estimation()

            probability = modified_upc_sequence.get_probabilities()
            if data.shape[2] == V:  #insert a placeholder for ln(pi) in data
                data = concatenate((data,ones((N,neqs,1),dtype=float32)), axis=2)
                coef_names = resources.get("coefficient_names")
                coef_names = concatenate( (coef_names, array(["ln_pi"])) )
                resources.merge({"coefficient_names":coef_names})
            else:
                beta_ln_pi = result['estimators'][where(coef_names == 'ln_pi')][0]
                logger.log_status("mu = 1/%s = %s" % (beta_ln_pi, 1/beta_ln_pi))
                
                prob_hat = safe_array_divide(probability, pi ** beta_ln_pi)
                #prob_hat = safe_array_divide(probability, pi)
                prob_hat_sum = prob_hat.sum(axis=1, dtype=float32)
                if not ma.allclose(prob_hat_sum, 1.0):
                    logger.log_status("probability doesn't sum up to 1, with minimum %s, and maximum %s" %
                                      (prob_hat_sum.min(), prob_hat_sum.max()))
                    
                    probability = normalize(prob_hat)

            demand = self.mnl_probabilities.get_demand(index, probability, J) * 1 / sample_rate
            demand_history = concatenate((demand_history,
                                          demand[:, newaxis]),
                                          axis=1)

            sdratio = safe_array_divide(supply, demand, return_value_if_denominator_is_zero=2.0)
            sdratio_matrix = sdratio[index]
            ## debug info
            from numpy import histogram 
            from opus_core.misc import unique
            cc = histogram(index.ravel(), unique(index.ravel()))[0]
            logger.log_status( "=================================================================")
            logger.log_status( "Probability min: %s, max: %s" % (probability.min(), probability.max()) )
            logger.log_status( "Demand min: %s, max: %s" % (demand.min(), demand.max()) )
            logger.log_status( "sdratio min: %s, max: %s" % (sdratio.min(), sdratio.max()) )
            logger.log_status( "demand[sdratio==sdratio.min()]=%s" % demand[sdratio==sdratio.min()] )
            logger.log_status( "demand[sdratio==sdratio.max()]=%s" % demand[sdratio==sdratio.max()] )
            logger.log_status( "Counts of unique submarkets in alternatives min: %s, max: %s" % (cc.min(), cc.max()) )
            logger.log_status( "=================================================================")

            constrained_locations_matrix, omega, info = self.inner_loop(supply, demand, probability,
                                                                        index, sdratio_matrix,
                                                                        J, max_iteration=max_iter)

            inner_iterations, constrained_locations_history, swing_index, average_omega_history = info
    
            for idx in swing_index:
                logger.log_status("swinging alt with id %s set to %s" % (alt_id[idx], constraint_dict[swing_cases_fix]))
                constrained_locations_matrix[index==idx] = swing_cases_fix
    
            if swing_index.size > 0:    
                info_file.write("swing of constraints found with id %s \n" % alt_id[swing_index])
                info_file.write("outer_iteration, %i, " % i + ", ".join([str(i)]*(len(inner_iterations))) + "\n")
                info_file.write("inner_iteration, , " + ", ".join(inner_iterations) + "\n")
                info_file.write("id, sdratio, " + ", ".join(["avg_omega"]*len(inner_iterations)) + "\n")
                for idx in swing_index:
                    line = str(alt_id[idx]) + ','
                    line += str(sdratio[idx]) + ','
                    line += ",".join([str(x) for x in average_omega_history[idx,]])
                    line += "\n"
                    info_file.write(line)
    
                info_file.write("\n")
                info_file.flush()

            outer_iterations = [str(i)] * len(inner_iterations)
            prob_min = [str(probability.min())] * len(inner_iterations)
            prob_max = [str(probability.max())] * len(inner_iterations)

            pi_new = self.mnl_probabilities.get_pi(sdratio_matrix, omega, constrained_locations_matrix)

            data[:,:,-1] = ln(pi_new)
            #diagnostic output
            
            if not ma.allclose(pi, pi_new, atol=CLOSE):
                if i > 0:  #don't print this for the first iteration
                    logger.log_status("min of abs(pi(l+1) - pi(l)): %s" % absolute(pi_new - pi).min())
                    logger.log_status("max of abs(pi(l+1) - pi(l)): %s" % absolute(pi_new - pi).max())
                    logger.log_status("mean of pi(l+1) - pi(l): %s" % (pi_new - pi).mean())
                    logger.log_status('Standard Deviation pi(l+1) - pi(l): %s' % standard_deviation(pi_new - pi))
                    logger.log_status('correlation of pi(l+1) and pi(l): %s' % corr(pi_new.ravel(), pi.ravel())[0,1])

                pi = pi_new
                probability_old = probability   # keep probability of the previous loop, for statistics computation only    
            else:   #convergence criterion achieved, quiting outer loop
                logger.log_status("pi(l) == pi(l+1): Convergence criterion achieved")
    
                info_file.write("\nConstrained Locations History:\n")
                info_file.write("outer_iteration," + ",".join(outer_iterations) + "\n")
                info_file.write("inner_iteration," + ",".join(inner_iterations) + "\n")
                info_file.write("minimum_probability," + ",".join(prob_min) + "\n")
                info_file.write("maximum_probability," + ",".join(prob_max) + "\n")
                for row in range(J):
                    line = [str(x) for x in constrained_locations_history[row,]]
                    info_file.write(str(alt_id[row]) + "," + ",".join(line) + "\n")

                info_file.flush()

                info_file.write("\nDemand History:\n")
                i_str = [str(x) for x in range(i)]
                info_file.write("outer_iteration, (movers)," + ",".join(i_str) + "\n")
                #info_file.write(", ,\n")
                for row in range(J):
                    line = [str(x) for x in demand_history[row,]]
                    info_file.write(str(alt_id[row]) + "," + ",".join(line) + "\n")

                demand_history_info_criteria = [500, 100, 50, 20]
                for criterion in demand_history_info_criteria:
                    com_rows_index = where(movers <= criterion)[0]
                    info_file.write("\nDemand History for alternatives with less than or equal to %s movers in 1998:\n" % criterion)
                    i_str = [str(x) for x in range(i)]
                    info_file.write("outer_iteration, (movers)," + ",".join(i_str) + "\n")
                    #info_file.write(", movers,\n")
                    for row in com_rows_index:
                        line = [str(x) for x in demand_history[row,]]
                        info_file.write(str(alt_id[row]) + "," + ",".join(line) + "\n")

                #import pdb; pdb.set_trace()
                #export prob correlation history
                correlation_indices, prob_correlation = self.compute_prob_correlation(probability_old, probability, prob_hat, index, resources)

                info_file.write("\nCorrelation of Probabilities:\n")
                c_name = ['corr(p_ij p~_ij)', 'corr(p_ij p^_ij)', 'corr(p_ij dummy)', 'corr(p~_ij p^_ij)', 'corr(p~_ij dummy)', 'corr(p^_ij dummy)']

                info_file.write("com_id, " + ",".join(c_name) + "\n")

                #info_file.write(", ,\n")
                for row in range(correlation_indices.size):
                    line = [str(x) for x in prob_correlation[row,]]
                    info_file.write(str(alt_id[correlation_indices[row]]) + "," + ",".join(line) + "\n")

                info_file.close()

                result['pi'] = pi
                return result

        logger.end_block()
        try:info_file.close()
        except:pass

        raise RuntimeError, "max iteration reached without convergence."
    def inner_loop(self, supply, demand, probability, index, sdratio_matrix, J,
                   max_iteration=100):
        #vacancy_rate = SessionConfiguration().get_dataset_from_pool("vacancy_rate")
        CLOSE = SessionConfiguration()["CLOSE"]

        average_omega = ones(J, dtype=float32)
        inner_iterations=None; constrained_locations_history = None
        swing_index=array([]); average_omega_history=average_omega[:, newaxis];
        N = probability.shape[0]
        constrained_ex_ante = zeros(probability.shape) - 1
        logger.start_block('Inner Loop')

        try:
            for i in range(1, max_iteration+1):
                logger.log_status('Inner Loop Iteration %s' % i)
    
                #initial calculations
                constrained_locations = where(((average_omega * demand - supply) > CLOSE),1,0)    
                constrained_locations_matrix = constrained_locations[index]
    
                omega = self.mnl_probabilities.get_omega(probability, constrained_locations_matrix, sdratio_matrix)
                #omega = _round(omega, 1.0, CLOSE)
    
                logger.log_status('Num of constrainted locations: %s' % constrained_locations.sum())
                logger.log_status('Num of unconstrainted locations: %s' % (constrained_locations.size - constrained_locations.sum()))
                logger.log_status('Min Ex Ante Constraints: %s' % min(constrained_locations_matrix.sum(axis=1)))
                logger.log_status('Max Ex Ante Constraints: %s' % max(constrained_locations_matrix.sum(axis=1)))
                logger.log_status('Minimum Omega: %s' % min(omega))
                logger.log_status('Maximum Omega: %s' % max(omega))
                logger.log_status('Mean Omega: %s' % mean(omega))
                logger.log_status('Median Omega: %s' % median(omega))
                logger.log_status('Sum Omega: %s' % omega.sum())
                logger.log_status('Standard Deviation Omega: %s' % standard_deviation(omega))
                logger.log_status('Count of Negative Omega: %s' % (where(omega<0,1,0)).sum())
                logger.log_status('Count of Omega < 1: %s' % (where(omega<1,1,0)).sum())
                logger.log_status('Count of Omega > 2: %s' % (where(omega>2,1,0)).sum())
                logger.log_status('Count of Omega > 4: %s' % (where(omega>4,1,0)).sum())
    
                average_omega = self.mnl_probabilities.get_average_omega(omega, probability, index, J, demand)
    
                #average_omega=_round(average_omega, 1.0, CLOSE)
                logger.log_status('Minimum average_omega: %s' % min(average_omega))
                logger.log_status('Maximum average_omega: %s' % max(average_omega))
                logger.log_status('Mean average_omega: %s' % mean(average_omega))
                logger.log_status('Median average_omega: %s' % median(average_omega))
                logger.log_status('Sum average_omega: %s' % average_omega.sum())
                logger.log_status('Standard Deviation average_omega: %s' % standard_deviation(average_omega))
    
                logger.log_status(' ')
                if not any(constrained_ex_ante - constrained_locations_matrix):
                    return constrained_locations_matrix, omega, (inner_iterations, constrained_locations_history,swing_index, average_omega_history)
                else:
                    constrained_ex_ante = constrained_locations_matrix
    
                if constrained_locations_history is None:
                    inner_iterations = [str(i)]
                    constrained_locations_history = constrained_locations[:,newaxis]
                else:
                    inner_iterations += [str(i)]
                    constrained_locations_history = concatenate((constrained_locations_history,
                                                                constrained_locations[:, newaxis]),
                                                                axis=1)
                    average_omega_history = concatenate((average_omega_history, average_omega[:, newaxis]), axis=1)
    
                    if i > 2 and ma.allclose(constrained_locations_history[:,i-1], constrained_locations_history[:,i-3]):
                        swing_index = where((constrained_locations_history[:,i-1] - constrained_locations_history[:,i-2]) <> 0)[0]
                        logger.log_warning("swing of constraints found in %s alternatives" % swing_index.size)
                        return constrained_locations_matrix, omega, (inner_iterations, constrained_locations_history,swing_index, average_omega_history)
        finally:
            logger.end_block()
        logger.log_error("max iteration reached without convergence.")
        raise RuntimeError, "max iteration reached without convergence."
Пример #60
0
def test_standard_deviation04():
    input = np.array([1, 0], bool)
    output = ndimage.standard_deviation(input)
    assert_almost_equal(output, 0.5)