Exemplo n.º 1
0
def get_sky_ref_patch(f_gray_one_third, sky_prob_map):
    """
    Return mean and std of the *largest* sky patch detected on the input image
    +TODO: clear patches other than the largest (set sky_prob to 0)
    Will MODIFY sky_prob_map
    """
    lbl, nlbl = ndi.label(f_gray_one_third)
    lbls = np.arange(1, nlbl+1)
    _res = ndi.labeled_comprehension(f_gray_one_third, lbl, lbls, _var_lenpos, object, 0, True)

    val = []; lenpos = []; pos_arr = []
    for idx, (_val, _lenpos, _pos) in np.ndenumerate(_res):
        val.append(_val); lenpos.append(_lenpos); pos_arr.append(_pos)

    pos_am = np.array(lenpos).argmax() # pos of largest sky ref patch
    print 'max patch amount, patch: ', lenpos[pos_am], val[pos_am]
    mean = np.mean(val[pos_am])
    std = np.std(val[pos_am])

    # clear patches other than the largest (set sky_prob to 0)
    # import ipdb; ipdb.set_trace()
    for i, _p in enumerate(pos_arr):
        if i != pos_am:
            sky_prob_map.ravel()[_p] = 0.
    return mean, std
def	ehabitat(ecor,nw,nwpathout): # main fonction (names of ecoregion, network directory input, network directory input) rq: let the network directory input, network directory input empty as we have all the data on the same local folder

	global	nwpath
	if nw=='':
		nwpath = os.getcwd()
	else:
		nwpath = nw
		
		# to create directory :
		#~~~~~~~~~~~~~~~~~~~~
		
	if gmaps == 0: # to check if global maps (input variables) are loaded, if not, do it !
		initglobalmaps()
	if nwpathout=='': 
		#outdir = 'results'	#	ToDo: locally	create	folder "results"	if it	does	not	exist!
		outdir = os.path.join(os.path.sep, os.getcwd(), 'results')
		safelyMakeDir(outdir)
	else:
		#outdir = nwpathout+'/results'	#	SHARED	FOLDER	PATH
		outdir = os.path.join(os.path.sep, nwpathout, 'results')
		safelyMakeDir(outdir)
		#~~~~~~~~~~~~~~~~~~~~~~~
		
	# to create variables	
	tseaspamin = tseaspamax = tmax_warm_Mpamin = tmax_warm_Mpamax = prepamin = prepamax = preD_Mpamin = preD_Mpamax = aridpamin = aridpamax = ndviminpamin = ndviminpamax = ndvimaxpamin = ndvimaxpamax = treehpamin = treehpamax = treeDpamin = treeDpamax = soilpamin = soilpamax = slopepamin = slopepamax = None #=  = treepamin = treepamax = = ndwipamin = ndwipamax
	tseaspamean = tmax_warm_Mpamean = prepamean = preD_Mpamean = aridpamean = ndviminpamean = ndvimaxpamean = treehpamean = treeDpamean = soilpamean = slopepamean = None # = treepamean ndwipamean =
	
	s = nd.generate_binary_structure(2,2)	# pattern to know wgich pixels are agggregated, usefull if we want to work on similar areas	most restrictive pattern for the landscape	patches , for landscape patern analyses
											
	#	LOCAL FOLDER
	csvname1 = os.path.join(os.path.sep, outdir, 'ecoregs_done.csv') # create name of the file
	print csvname1
	if os.path.isfile(csvname1) == False: # if the file ecoregs_done doesn't exist :
		wb = open(csvname1,'a') # create the file
		wb.write('None') # first line
		wb.write('\n') # next line
		wb.close() # close the file
	#	LOCAL FOLDER	
	csvname = os.path.join(os.path.sep, outdir, 'hri_results.csv') # same than before
	print csvname
	if os.path.isfile(csvname) == False:
		wb = open(csvname,'a')
		wb.write('ecoregion wdpaid averpasim hr2aver pxpa hr1insumaver hriaver nfeatsaver lpratio lpratio2 numpszok lpmaxsize aggregation tseaspamin tseaspamax tmax_warm_Mpamin tmax_warm_Mpamax prepamin prepamax preD_Mpamin preD_Mpamax aridpamin aridpamax ndviminpamin ndviminpamax ndvimaxpamin ndvimaxpamax treehpamin treehpamax treeDpamin treeDpamax soilpamin soilpamax slopepamin slopepamax tseaspamean tmax_warm_Mpamean prepamean preD_Mpamean aridpamean ndviminpamean ndvimaxpamean treehpamean treeDpamean soilpamean slopepamean') #      treepamin treepamax treepamean ndwipamin ndwipamax ndwipamean
		wb.write('\n')
		wb.close()
	ef = 'eco_'+str(ecor)+'.tif' # crate a name of a tif file based on the extent of the eco_ (input)
	ecofile = os.path.join(os.path.sep, nwpath, 'ecoregs', ef) # file + path
	#ecofile = os.path.join(os.path.sep, nwpath, os.path.sep,'ecoregs', os.path.sep, ef)
	print ecofile
	avail = os.path.isfile(ecofile) # does this ecoregion file exist in the folder? T/F
	if avail == True: # if the file exist in the path mentionned (if it doesn't exit, it's ignore this ecoregion without error !!): 
		eco_csv = str(ecor)+'.csv' # str = to convert ecor to a string, to have a name
		print eco_csv
		ecoparksf = os.path.join(os.path.sep, nwpath, 'pas', eco_csv)
		#ecoparksf = os.path.join(os.path.sep, nwpath, os.path.sep, 'pas', os.path.sep, eco_csv)
		print ecoparksf
		#ecoparksf = nwpath+'/pas/'+str(ecor)+'.csv'
		src_ds_eco = gdal.Open(ecofile) # for each ecoregion, open the tif
		eco = src_ds_eco.GetRasterBand(1) # band 1 called eco
		eco_mask0 = eco.ReadAsArray(0,0,eco.XSize,eco.YSize).astype(np.int32) # read the values of the raster as a matrix as integers (red-yellow 1/0 raster values ex:555.tif), with the number of cells we want to read 'eco.XSize,eco.YSize' (read total size of rows and columns)
		eco_mask = eco_mask0.flatten() # convert columns and rows of a matrix to one vector (eco.XSize*eco.YSize)
		gt_eco = src_ds_eco.GetGeoTransform() # to get the coordinates, properties of the raster map, to begin by a corner
		print 'eco mask'
		xoff = int((gt_eco[0]-gt_tseas_global[0])/5000)
		yoff = int((gt_tseas_global[3]-gt_eco[3])/5000)
		tseas_eco_bb0 = tseas_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		tseas_eco_bb = tseas_eco_bb0.flatten()
		tseas_eco0 = np.where(eco_mask == 1,(tseas_eco_bb),(0))
		tseas_eco = np.where(tseas_eco0 == 65535.0,	(float('NaN')),(tseas_eco0))
		masktseas = np.isnan(tseas_eco)
		tseas_eco[masktseas] = np.interp(np.flatnonzero(masktseas),	np.flatnonzero(~masktseas),	tseas_eco[~masktseas])
		print 'eco tseas'
		xoff = int((gt_eco[0]-gt_tmax_warm_M_global[0])/5000)
		yoff = int((gt_tmax_warm_M_global[3]-gt_eco[3])/5000)
		tmax_warm_M_eco_bb0 = tmax_warm_M_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		tmax_warm_M_eco_bb = tmax_warm_M_eco_bb0.flatten()
		tmax_warm_M_eco0 = np.where(eco_mask == 1,	(tmax_warm_M_eco_bb),(0))
		tmax_warm_M_eco = np.where(tmax_warm_M_eco0 == 65535.0,	(float('NaN')),(tmax_warm_M_eco0))
		masktmax_warm_M = np.isnan(tmax_warm_M_eco)
		tmax_warm_M_eco[masktmax_warm_M] = np.interp(np.flatnonzero(masktmax_warm_M),	np.flatnonzero(~masktmax_warm_M),	tmax_warm_M_eco[~masktmax_warm_M])
		print 'eco tmax_warm_M'
		xoff = int((gt_eco[0]-gt_pre_global[0])/5000)
		yoff = int((gt_pre_global[3]-gt_eco[3])/5000)
		pre_eco_bb0 = pre_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		pre_eco_bb = pre_eco_bb0.flatten()
		pre_eco0 = np.where(eco_mask == 1,	(pre_eco_bb),(0))
		pre_eco = np.where(pre_eco0 == 65535.0,	(float('NaN')),(pre_eco0))
		maskpre = np.isnan(pre_eco)
		pre_eco[maskpre] = np.interp(np.flatnonzero(maskpre),	np.flatnonzero(~maskpre),	pre_eco[~maskpre])
		print 'eco pre'		
		xoff = int((gt_eco[0]-gt_preD_M_global[0])/5000)
		yoff = int((gt_preD_M_global[3]-gt_eco[3])/5000)
		preD_M_eco_bb0 = preD_M_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		preD_M_eco_bb = preD_M_eco_bb0.flatten()
		preD_M_eco0 = np.where(eco_mask == 1,	(preD_M_eco_bb),(0))
		preD_M_eco = np.where(preD_M_eco0 == 65535.0,	(float('NaN')),(preD_M_eco0))
		maskpreD_M = np.isnan(preD_M_eco)
		preD_M_eco[maskpreD_M] = np.interp(np.flatnonzero(maskpreD_M),	np.flatnonzero(~maskpreD_M),	preD_M_eco[~maskpreD_M])
		print 'eco preD_M'
		xoff = int((gt_eco[0]-gt_arid_global[0])/5000)
		yoff = int((gt_arid_global[3]-gt_eco[3])/5000)
		arid_eco_bb0 = arid_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		arid_eco_bb = arid_eco_bb0.flatten()
		arid_eco0 = np.where(eco_mask == 1,	(arid_eco_bb),(0))
		arid_eco = np.where(arid_eco0 == 65535.0,	(float('NaN')),(arid_eco0))
		maskarid = np.isnan(arid_eco)
		arid_eco[maskarid] = np.interp(np.flatnonzero(maskarid),	np.flatnonzero(~maskarid),	arid_eco[~maskarid])
		print 'eco arid'
		xoff = int((gt_eco[0]-gt_ndvimin_global[0])/5000)
		yoff = int((gt_ndvimin_global[3]-gt_eco[3])/5000)
		ndvimin_eco_bb0 = ndvimin_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		ndvimin_eco_bb = ndvimin_eco_bb0.flatten()
		ndvimin_eco0 = np.where(eco_mask == 1,	(ndvimin_eco_bb),(0))
		ndvimin_eco = np.where(ndvimin_eco0 == 65535.0,	(float('NaN')),(ndvimin_eco0))
		maskndvimin = np.isnan(ndvimin_eco)
		ndvimin_eco[maskndvimin] = np.interp(np.flatnonzero(maskndvimin),	np.flatnonzero(~maskndvimin),	ndvimin_eco[~maskndvimin])
		print 'eco ndvimin'
		xoff = int((gt_eco[0]-gt_ndvimax_global[0])/5000)
		yoff = int((gt_ndvimax_global[3]-gt_eco[3])/5000)
		ndvimax_eco_bb0 = ndvimax_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		ndvimax_eco_bb = ndvimax_eco_bb0.flatten()
		ndvimax_eco0 = np.where(eco_mask == 1,	(ndvimax_eco_bb),(0))
		ndvimax_eco = np.where(ndvimax_eco0 == 65535.0,	(float('NaN')),(ndvimax_eco0))
		maskndvimax = np.isnan(ndvimax_eco)
		ndvimax_eco[maskndvimax] = np.interp(np.flatnonzero(maskndvimax),	np.flatnonzero(~maskndvimax),	ndvimax_eco[~maskndvimax])
		print 'eco ndvimax'
		xoff = int((gt_eco[0]-gt_treeh_global[0])/5000)
		yoff = int((gt_treeh_global[3]-gt_eco[3])/5000)
		treeh_eco_bb0 = treeh_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		treeh_eco_bb = treeh_eco_bb0.flatten()
		treeh_eco0 = np.where(eco_mask == 1, (treeh_eco_bb),(0))
		treeh_eco = np.where(treeh_eco0 == 65535.0,	(float('NaN')),(treeh_eco0))
		masktreeh = np.isnan(treeh_eco)
		treeh_eco[masktreeh] = np.interp(np.flatnonzero(masktreeh),	np.flatnonzero(~masktreeh),	treeh_eco[~masktreeh])
		print 'eco treeh'
		xoff = int((gt_eco[0]-gt_treeD_global[0])/5000)
		yoff = int((gt_treeD_global[3]-gt_eco[3])/5000)
		treeD_eco_bb0 = treeD_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		treeD_eco_bb = treeD_eco_bb0.flatten()
		treeD_eco0 = np.where(eco_mask == 1,	(treeD_eco_bb),(0))
		treeD_eco = np.where(treeD_eco0 == 1.8e+308,	(float('NaN')),(treeD_eco0))
		masktreeD = np.isnan(treeD_eco)
		treeD_eco[masktreeD] = np.interp(np.flatnonzero(masktreeD),	np.flatnonzero(~masktreeD),	treeD_eco[~masktreeD])
		print 'eco treeD'
		xoff = int((gt_eco[0]-gt_soil_global[0])/5000) # compute the begining of the ecoregion (x and y): gt_eco = ecoregion map , gt_soil_global = global map, we convert it at the resolution we want
		yoff = int((gt_soil_global[3]-gt_eco[3])/5000)
		soil_eco_bb0 = soil_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32) # as before eco_mask0 but for global, to knoe where and how big is the ecoregion at the global (bounding box = square), astype(np.float32) to keep values
		soil_eco_bb = soil_eco_bb0.flatten() # to put vales in one single vector
		soil_eco0 = np.where(eco_mask == 1,	(soil_eco_bb),(0)) # where you have values of 1 in the ecoregion (= ecoregion present) => take the values of the inVars raster> because bounding box is a square wich contain the ecoregion 
		soil_eco = np.where(soil_eco0 == 65535.0,	(float('NaN')),(soil_eco0)) # where you have 65535 (= NULL from treeD for instence), replace by NaN
		masksoil = np.isnan(soil_eco) # to create new mask, when you have nan => TRUE if not => FALSE
		soil_eco[masksoil] = np.interp(np.flatnonzero(masksoil),	np.flatnonzero(~masksoil),	soil_eco[~masksoil]) # where you have TRUE, you do an interpolation with nearbourhood to get values instead of NaN
		print 'eco soil'
		xoff = int((gt_eco[0]-gt_slope_global[0])/5000)
		yoff = int((gt_slope_global[3]-gt_eco[3])/5000)
		slope_eco_bb0 = slope_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		slope_eco_bb = slope_eco_bb0.flatten()
		slope_eco0 = np.where(eco_mask == 1,	(slope_eco_bb),(0))
		slope_eco = np.where(slope_eco0 == 65535.0,	(float('NaN')),(slope_eco0))
		maskslope = np.isnan(slope_eco)
		slope_eco[maskslope] = np.interp(np.flatnonzero(maskslope),	np.flatnonzero(~maskslope),	slope_eco[~maskslope])
		print 'eco slope'
		ind_eco0 = np.column_stack((tseas_eco,tmax_warm_M_eco,pre_eco,preD_M_eco,arid_eco,ndvimin_eco,ndvimax_eco,treeh_eco,treeD_eco,soil_eco,slope_eco)) # we make an array putting all the columns of each variable together #    tree_eco,,ndwi_eco
		print 'ecovars stacked'

		print ecoparksf
		pa_list0 = np.genfromtxt(ecoparksf,dtype='int')	# crear este archivo en subpas! read the txt file (eco_csv file = 555.csv), reand pas od one ecoreg
		pa_list = np.unique(pa_list0) # take each pas of the list just once (in case the pa name appears several times)
		n = len(pa_list) # to have the number of pas computed
		for	px in range(0,n): #	0,n

			pa = pa_list[px] # for each pa 
			print pa

			outfile = os.path.join(os.path.sep, outdir, str(ecor)+'_'+str(pa)+'.tif') # prepare the .tif files to be filled
			outfile2 = os.path.join(os.path.sep, outdir, str(ecor)+'_'+str(pa)+'_lp.tif')
			outfile3 = os.path.join(os.path.sep, outdir, str(ecor)+'_'+str(pa)+'_mask.tif')
			#outfile = outdir+'/'+str(ecor)+'_'+str(pa)+'.tif'	#	LOCAL FOLDER
			pa_infile = 'pa_'+str(pa)+'.tif'

			pa4 = os.path.join(os.path.sep, nwpath, 'pas', pa_infile) # pas input 
			#pa4 = os.path.join(os.path.sep, nwpath, os.path.sep, 'pas', os.path.sep, pa_infile)
			print pa4
			#pa4 = nwpath+'/pas/pa_'+str(pa)+'.tif'

			dropcols = np.arange(10,dtype=int) # create vector from 0 to 10 for 11 variables
			done = os.path.isfile(outfile) # outfile is already created
			avail2 = os.path.isfile(pa4) # check if input is available
			if done == False and avail2 == True: # if the files pa4 exists but the HRI computing is not done yet (* see ptt)
				pafile=pa4
				src_ds_pa = gdal.Open(pafile) # open the pa with gdal
				par = src_ds_pa.GetRasterBand(1) # same than previously with ecoreg
				pa_mask0 = par.ReadAsArray(0,0,par.XSize,par.YSize).astype(np.int32)
				pa_mask = pa_mask0.flatten()
				ind = pa_mask >	0 # create index T/F, T if >0.
				go = 1
				sum_pa_mask = sum(pa_mask[ind])# create the sum of the 1 values to know the size of the pa
				if sum_pa_mask < 2: go = 0	#	not	processing	areas	smaller	than	2	pixels, if size too small, don't go
				print sum_pa_mask # print the size of the pa
				sum_pa_mask_inv = len(pa_mask[pa_mask == 0]) # what is the size of 0 pixels inside the pa
				print sum_pa_mask_inv # print size of 0
				print len(pa_mask)
				ratiogeom = 5000 # when bounding box is bigger than pa (due to wwpa database error)
				if sum_pa_mask > 0: ratiogeom = sum_pa_mask_inv/sum_pa_mask # total pixel outside much bigger than pixel in pa ?
				#print ratiogeom
				gt_pa = src_ds_pa.GetGeoTransform() # get coordinates of pa
				xoff = int((gt_pa[0]-gt_pre_global[0])/5000) # compute from the global map xoff, yoff of pa
				yoff = int((gt_pre_global[3]-gt_pa[3])/5000)
				if xoff>=0 and yoff>=0 and go == 1: # xoff>0 to be sure we are not in a boarder of ecoreg and our pa is > 2
					num_bands=src_ds_eco.RasterCount # to have the number of bands in one ecoreg, determined as 1 in the script
					driver = gdal.GetDriverByName("GTiff") # create a new tif file where to store the output (hri)
					dst_options = ['COMPRESS=LZW'] # compression method of the files as many will be processed
					dst_ds = driver.Create(	outfile,src_ds_eco.RasterXSize,src_ds_eco.RasterYSize,num_bands,gdal.GDT_Float32,dst_options) # to create the tif file empty
					dst_ds.SetGeoTransform(	src_ds_eco.GetGeoTransform())					
					dst_ds.SetProjection(src_ds_eco.GetProjectionRef()) # determine the file projection. preparation of the tif file over 
					
					# 1. tseas
					xoff = int((gt_pa[0]-gt_tseas_global[0])/5000)
					yoff = int((gt_tseas_global[3]-gt_pa[3])/5000)
					tseas_pa_bb0 = tseas_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					tseas_pa_bb = tseas_pa_bb0.flatten()
					tseas_pa0 = tseas_pa_bb[ind]
					tseas_pa = np.where(tseas_pa0 == 65535.0,	(float('NaN')),(tseas_pa0))
					mask2tseas = np.isnan(tseas_pa)
					if mask2tseas.all() == True: # if all the variable values in a pa are only NA, put -8 in dropcol
						dropcols[0] = -0
					else: # if there are also non NA values
						tseas_pa[mask2tseas] = np.interp(np.flatnonzero(mask2tseas),	np.flatnonzero(~mask2tseas),	tseas_pa[~mask2tseas])
						tseas_pa = np.random.random_sample(len(tseas_pa),)/5000 + tseas_pa
						print 'pa tseas'

						tseaspamin = round(tseas_pa.min(),2)
						tseaspamax = round(tseas_pa.max(),2)
						tseaspamean = round(np.mean(tseas_pa),2)
						print tseaspamin
						print tseaspamax
						tseasdiff = abs(tseas_pa.min()-tseas_pa.max())
						if tseasdiff < 0.001: dropcols[0] = -0
					
					# 2. tmax_warm_M
					xoff = int((gt_pa[0]-gt_tmax_warm_M_global[0])/5000)
					yoff = int((gt_tmax_warm_M_global[3]-gt_pa[3])/5000)
					tmax_warm_M_pa_bb0 = tmax_warm_M_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					tmax_warm_M_pa_bb = tmax_warm_M_pa_bb0.flatten()
					tmax_warm_M_pa0 = tmax_warm_M_pa_bb[ind]
					tmax_warm_M_pa = np.where(tmax_warm_M_pa0 == 65535.0,	(float('NaN')),(tmax_warm_M_pa0))
					mask2tmax_warm_M = np.isnan(tmax_warm_M_pa)
					if mask2tmax_warm_M.all() == True:
						dropcols[1] = -1
					else:
						tmax_warm_M_pa[mask2tmax_warm_M] = np.interp(np.flatnonzero(mask2tmax_warm_M),	np.flatnonzero(~mask2tmax_warm_M),	tmax_warm_M_pa[~mask2tmax_warm_M])
						tmax_warm_M_pa = np.random.random_sample(len(tmax_warm_M_pa),)/5000 + tmax_warm_M_pa
						print 'pa tmax_warm_M'

						tmax_warm_Mpamin = round(tmax_warm_M_pa.min(),2)
						tmax_warm_Mpamax = round(tmax_warm_M_pa.max(),2)
						tmax_warm_Mpamean = round(np.mean(tmax_warm_M_pa),2)
						print tmax_warm_Mpamin
						print tmax_warm_Mpamax
						tmax_warm_Mdiff = abs(tmax_warm_M_pa.min()-tmax_warm_M_pa.max())
						if tmax_warm_Mdiff < 0.001: dropcols[1] = -1			
						
					# 3. pre
					xoff = int((gt_pa[0]-gt_pre_global[0])/5000)
					yoff = int((gt_pre_global[3]-gt_pa[3])/5000)
					pre_pa_bb0 = pre_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					pre_pa_bb = pre_pa_bb0.flatten()
					pre_pa0 = pre_pa_bb[ind]
					pre_pa = np.where(pre_pa0 == 65535.0,	(float('NaN')),(pre_pa0))
					mask2pre = np.isnan(pre_pa)
					if mask2pre.all() == True:
						dropcols[2] = -2
					else:
						pre_pa[mask2pre] = np.interp(np.flatnonzero(mask2pre),	np.flatnonzero(~mask2pre),	pre_pa[~mask2pre])
						pre_pa = np.random.random_sample(len(pre_pa),)/5000 + pre_pa
						print 'pa pre'

						prepamin = round(pre_pa.min(),2)
						prepamax = round(pre_pa.max(),2)
						prepamean = round(np.mean(pre_pa),2)
						print prepamin
						print prepamax
						prediff = abs(pre_pa.min()-pre_pa.max())
						if prediff < 0.001: dropcols[2] = -2
						
					# # 4. preD_M
					xoff = int((gt_pa[0]-gt_preD_M_global[0])/5000)
					yoff = int((gt_preD_M_global[3]-gt_pa[3])/5000)
					preD_M_pa_bb0 = preD_M_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					preD_M_pa_bb = preD_M_pa_bb0.flatten()
					preD_M_pa0 = preD_M_pa_bb[ind]
					preD_M_pa = np.where(preD_M_pa0 == 65535.0,	(float('NaN')),(preD_M_pa0))
					mask2preD_M = np.isnan(preD_M_pa)
					if mask2preD_M.all() == True:
						dropcols[3] = -3
					else:
						preD_M_pa[mask2preD_M] = np.interp(np.flatnonzero(mask2preD_M),	np.flatnonzero(~mask2preD_M),	preD_M_pa[~mask2preD_M])
						preD_M_pa = np.random.random_sample(len(preD_M_pa),)/5000 + preD_M_pa
						print 'pa preD_M'

						preD_Mpamin = round(preD_M_pa.min(),2)
						preD_Mpamax = round(preD_M_pa.max(),2)
						preD_Mpamean = round(np.mean(preD_M_pa),2)
						print preD_Mpamin
						print preD_Mpamax
						preD_Mdiff = abs(preD_M_pa.min()-preD_M_pa.max())
						if preD_Mdiff < 0.001: dropcols[3] = -3			
						
					# 5. arid
					xoff = int((gt_pa[0]-gt_arid_global[0])/5000)
					yoff = int((gt_arid_global[3]-gt_pa[3])/5000)
					arid_pa_bb0 = arid_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					arid_pa_bb = arid_pa_bb0.flatten()
					arid_pa0 = arid_pa_bb[ind]
					arid_pa = np.where(arid_pa0 == 65535.0,	(float('NaN')),(arid_pa0))
					mask2arid = np.isnan(arid_pa)
					if mask2arid.all() == True:
						dropcols[4] = -4
					else:
						arid_pa[mask2arid] = np.interp(np.flatnonzero(mask2arid),	np.flatnonzero(~mask2arid),	arid_pa[~mask2arid])
						arid_pa = np.random.random_sample(len(arid_pa),)/5000 + arid_pa
						print 'pa arid'

						aridpamin = round(arid_pa.min(),2)
						aridpamax = round(arid_pa.max(),2)
						aridpamean = round(np.mean(arid_pa),2)
						print aridpamin
						print aridpamax
						ariddiff = abs(arid_pa.min()-arid_pa.max())
						if ariddiff < 0.001: dropcols[4] = -4					
					
					# 6. ndvimin
					xoff = int((gt_pa[0]-gt_ndvimin_global[0])/5000)
					yoff = int((gt_ndvimin_global[3]-gt_pa[3])/5000)
					ndvimin_pa_bb0 = ndvimin_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					ndvimin_pa_bb = ndvimin_pa_bb0.flatten()
					ndvimin_pa0 = ndvimin_pa_bb[ind]
					ndvimin_pa = np.where(ndvimin_pa0 == 65535.0,	(float('NaN')),(ndvimin_pa0))
					mask2ndvimin = np.isnan(ndvimin_pa)
					if mask2ndvimin.all() == True:
						dropcols[5] = -5
					else:
						ndvimin_pa[mask2ndvimin] = np.interp(np.flatnonzero(mask2ndvimin),	np.flatnonzero(~mask2ndvimin),	ndvimin_pa[~mask2ndvimin])
						ndvimin_pa = np.random.random_sample(len(ndvimin_pa),)/5000 + ndvimin_pa
						print 'pa ndvimin'

						ndviminpamin = round(ndvimin_pa.min(),2)
						ndviminpamax = round(ndvimin_pa.max(),2)
						ndviminpamean = round(np.mean(ndvimin_pa),2)
						print ndviminpamin
						print ndviminpamax
						ndvimindiff = abs(ndvimin_pa.min()-ndvimin_pa.max())
						if ndvimindiff < 0.001: dropcols[5] = -5
					
					# 7. ndvimax
					xoff = int((gt_pa[0]-gt_ndvimax_global[0])/5000)
					yoff = int((gt_ndvimax_global[3]-gt_pa[3])/5000)
					ndvimax_pa_bb0 = ndvimax_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					ndvimax_pa_bb = ndvimax_pa_bb0.flatten()
					ndvimax_pa0 = ndvimax_pa_bb[ind]
					ndvimax_pa = np.where(ndvimax_pa0 == 65535.0,	(float('NaN')),(ndvimax_pa0))
					mask2ndvimax = np.isnan(ndvimax_pa)
					if mask2ndvimax.all() == True:
						dropcols[6] = -6
					else:
						ndvimax_pa[mask2ndvimax] = np.interp(np.flatnonzero(mask2ndvimax),	np.flatnonzero(~mask2ndvimax),	ndvimax_pa[~mask2ndvimax])
						ndvimax_pa = np.random.random_sample(len(ndvimax_pa),)/5000 + ndvimax_pa
						print 'pa ndvimax'

						ndvimaxpamin = round(ndvimax_pa.min(),2)
						ndvimaxpamax = round(ndvimax_pa.max(),2)
						ndvimaxpamean = round(np.mean(ndvimax_pa),2)
						print ndvimaxpamin
						print ndvimaxpamax
						ndvimaxdiff = abs(ndvimax_pa.min()-ndvimax_pa.max())
						if ndvimaxdiff < 0.001: dropcols[6] = -6
						
					# 8. treeh
					xoff = int((gt_pa[0]-gt_treeh_global[0])/5000) # start to read the trrh cover in the pa
					yoff = int((gt_treeh_global[3]-gt_pa[3])/5000)					
					treeh_pa_bb0 = treeh_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32) 
					treeh_pa_bb = treeh_pa_bb0.flatten()
					treeh_pa0 = treeh_pa_bb[ind] # to read the values inside the pa when we have 1 (higher than 0)
					treeh_pa = np.where(treeh_pa0 == 255.0, (float('NaN')),(treeh_pa0))
					mask2treeh = np.isnan(treeh_pa) # isnan makes an index of T/F, to know which is nan
					if mask2treeh.all() == True: # if all the variable values in a pa are only NA, put -8 in dropcol
						dropcols[7] = -7
					else: # if there are also non NA values
						treeh_pa[mask2treeh] = np.interp(np.flatnonzero(mask2treeh),	np.flatnonzero(~mask2treeh),	treeh_pa[~mask2treeh]) # we do the neirbourood interpolation
						treeh_pa = np.random.random_sample(len(treeh_pa),)/5000 + treeh_pa # add a random noise with an insignificant value to avoid to have all the pixels (from input variables) with the same value and to avoid perfect inverse correlation (between 2 variables ex:treeD/treehs), otherwise Mahalanobis distance can't work
						print 'pa treeh'

						treehpamin = round(treeh_pa.min(),2) #to compute th min of the variable
						treehpamax = round(treeh_pa.max(),2) #to compute the max of the variable
						treehpamean = round(np.mean(treeh_pa),2) #to compute the mean of the variable
						print treehpamin
						print treehpamax
						print treehpamean
						treehdiff = abs(treeh_pa.min()-treeh_pa.max())
						if treehdiff < 0.001: dropcols[7] = -7 # if the difference is too tiny = if the value of the variable don't change => don't use it
					
					# 9. treeD
					xoff = int((gt_pa[0]-gt_treeD_global[0])/5000)
					yoff = int((gt_treeD_global[3]-gt_pa[3])/5000)
					treeD_pa_bb0 = treeD_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					treeD_pa_bb = treeD_pa_bb0.flatten()
					treeD_pa0 = treeD_pa_bb[ind]
					treeD_pa = np.where(treeD_pa0 == 1.8e+308,	(float('NaN')),(treeD_pa0))
					mask2treeD = np.isnan(treeD_pa)
					if mask2treeD.all() == True:
						dropcols[8] = -8
					else:
						treeD_pa[mask2treeD] = np.interp(np.flatnonzero(mask2treeD),	np.flatnonzero(~mask2treeD),	treeD_pa[~mask2treeD])
						treeD_pa = np.random.random_sample(len(treeD_pa),)/5000 + treeD_pa
						print 'pa treeD'

						hpamin = round(treeD_pa.min(),2)
						hpamax = round(treeD_pa.max(),2)
						hpamean = round(np.mean(treeD_pa),2)
						print hpamin
						print hpamax
						hdiff = abs(treeD_pa.min()-treeD_pa.max())
						if hdiff < 0.001: dropcols[8] = -8	
						
					# 10. soil
					xoff = int((gt_pa[0]-gt_soil_global[0])/5000)
					yoff = int((gt_soil_global[3]-gt_pa[3])/5000)
					soil_pa_bb0 = soil_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					soil_pa_bb = soil_pa_bb0.flatten()
					soil_pa0 = soil_pa_bb[ind]
					soil_pa = np.where(soil_pa0 == 65535.0,	(float('NaN')),(soil_pa0))
					mask2soil = np.isnan(soil_pa)
					if mask2soil.all() == True:
						dropcols[9] = -9
					else:
						soil_pa[mask2soil] = np.interp(np.flatnonzero(mask2soil),	np.flatnonzero(~mask2soil),	soil_pa[~mask2soil])
						soil_pa = np.random.random_sample(len(soil_pa),)/5000 + soil_pa
						print 'pa soil'

						soilpamin = round(soil_pa.min(),2)
						soilpamax = round(soil_pa.max(),2)
						soilpamean = round(np.mean(soil_pa),2)
						print soilpamin
						print soilpamax
						soildiff = abs(soil_pa.min()-soil_pa.max())
						if soildiff < 0.001: dropcols[9] = -9
						
					# 11. slope
					xoff = int((gt_pa[0]-gt_slope_global[0])/5000)
					yoff = int((gt_slope_global[3]-gt_pa[3])/5000)
					slope_pa_bb0 = slope_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					slope_pa_bb = slope_pa_bb0.flatten()
					slope_pa0 = slope_pa_bb[ind]
					slope_pa = np.where(slope_pa0 == 65535.0,	(float('NaN')),(slope_pa0))
					mask2slope = np.isnan(slope_pa)
					if mask2slope.all() == True:
						dropcols[10] = -10
					else:
						slope_pa[mask2slope] = np.interp(np.flatnonzero(mask2slope),	np.flatnonzero(~mask2slope),	slope_pa[~mask2slope])
						slope_pa = np.random.random_sample(len(slope_pa),)/5000 + slope_pa
						print 'pa slope'

						slopepamin = round(slope_pa.min(),2)
						slopepamax = round(slope_pa.max(),2)
						slopepamean = round(np.mean(slope_pa),2)
						print slopepamin
						print slopepamax
						slopediff = abs(slope_pa.min()-slope_pa.max())
						if slopediff < 0.001: dropcols[10] = -10
	
					cols = dropcols[dropcols>=0] # select the "columns" which are positive ex: if hdiff < 0.001: dropcols[3] = -3 => we not use it
					ind_pa0 = np.column_stack((tseas_pa,tmax_warm_M_pa,pre_pa,preD_M_pa,arid_pa,ndvimin_pa,ndvimax_pa,treeh_pa,treeD_pa,soil_pa,slope_pa)) # stack of all the columns (even the negative)  tree_pa, ndwi_pa,
					ind_pa = ind_pa0[:,cols] # we select from the previoous stack only positive columns for pas
					ind_eco = ind_eco0[:,cols] # we select from the previoous stack only positive columns for ecoreg
					print ind_pa.shape
					hr1sum = hr1insum = indokpsz = pszok = sumpszok = lpratio2 = numpszok = hr1averpa = hr3aver = hr2aver = pszmax = num_featuresaver = lpratio = hr1medianpa = hr1insumaver = pxpa = aggregation = None
					print "PA masked"
					#print ind_pa
					if ind_pa.shape[0]>2 and ind_pa.shape[1]>1: #if we have at least 3 pixels per pa and 2 variables, then we start mahalanobis computation
						Ymean = np.mean(ind_pa,axis=0) #mean of each of the positive variables for one pa
						print 'Max. mean value is '+ str(Ymean.max()) # print the maximum of the mean of the 6 variables
						print "Ymean ok"
						Ycov = np.cov(ind_pa,rowvar=False) # do the covariance among the positive variables
						print 'Max. cov value is '+ str(Ycov.max())
						print "Ycov	ok"
						#mh = mahalanobis_distances(Ymean,	Ycov,	ind_eco,	parallel=False)
						#mh = mahalanobis_distances(Ymean,	Ycov,	ind_eco,	parallel=True)
						mh2 = mahalanobis_distances_scipy(Ymean,	Ycov,	ind_eco,	parallel=True) # to compute the mahalanobis distance (in parallel)
						#mh2 = mahalanobis_distances_scipy(Ymean,	Ycov,	ind_eco,	parallel=False)
						maxmh=mh2.max() # max to check there is no NA
						print 'Max. mh value is '+ str(maxmh)
						print 'Max. mh value is nan: '+ str(np.isnan(maxmh))
						mh = mh2*mh2 #multiply the mahalanobis distance by itself, to make chi2 (because sqrt is needed by mahalanobis)
						print "mh ok" # transform with the chi2 the values in 0/1
						pmh = chisqprob(mh,len(cols)).reshape((eco.YSize,eco.XSize)) # chi2 with mh with 6 variables ( or change chisqprob(mh,6) by: chisqprob(mh,len(cols)) ), and transform the vector in 2D matrix
						# pmhh = np.where(pmh	<=	0.001,None,	pmh) # if the value in pmh is really low, put NA (chi2 values goes from 0 to 1)
						# print "pmh ok"	#	quitar	valores	muy	bajos!
						# pmhhmax = pmhh.max() # max should be close to 1
						# print 'Max. similarity value is '+ str(pmhhmax) 
						# dst_ds.GetRasterBand(1).WriteArray(pmhh) #put values of pmhh in ecoregion map (dst_ds, l.399)
						# dst_ds = None # close the file and save it (with hri inside)
						# hr11 = np.where(pmhh>0,1,0) # 0.5						
						print "pmh ok"	#	quitar	valores	muy	bajos!
						pmhhmax = pmh.max() # max should be close to 1
						print 'Max. similarity value is '+ str(pmhhmax) 
						dst_ds.GetRasterBand(1).WriteArray(pmh) #put values of pmh in ecoregion map (dst_ds, l.399)
						dst_ds = None # close the file and save it (with hri inside)
						hr11 = np.where(pmh>0,1,0) # 0.5
						hr1 = hr11.flatten()
						hr1sum = sum(hr1)
						print 'Number of pixels with similarity higher than 0 is '+str(hr1sum)
						hr1insumaver = hr1insum = 0
						hr1sumaver = hr1sum
						src_ds_sim = gdal.Open(outfile)
						sim = src_ds_sim.GetRasterBand(1)
						gt_sim = src_ds_sim.GetGeoTransform()
						xoff = int((gt_pa[0]-gt_sim[0])/5000)
						yoff = int((gt_sim[3]-gt_pa[3])/5000)
						xextentpa = xoff + par.XSize
						yextentpa = yoff + par.YSize
						xless = sim.XSize - xextentpa
						yless = sim.YSize - yextentpa
						xsize = par.XSize
						ysize = par.YSize
						if xoff>0 and yoff>0 and pmhhmax>0.01 and hr1sum>1 and maxmh!=float('NaN'):#and ratiogeom < 100: #	also	checks	if results	are	not	empty

							# reading the similarity ecoregion without the PA (tmp mask) : for landscape metrics
							os.system('gdal_merge.py '+str(ecofile)+' '+str(pa4)+' -o '+str(outfile3)+' -ot Int32') # gdal tools is needed !!! otherwise => error
							hri_pa_bb03 = sim.ReadAsArray().astype(np.float32)
							hri_pa_bb3 = hri_pa_bb03.flatten()
							
							src_ds_sim2 = gdal.Open(outfile3)
							sim2 = src_ds_sim2.GetRasterBand(1)
							gt_sim2 = src_ds_sim2.GetGeoTransform()
							hri_pa_bb02 = sim2.ReadAsArray().astype(np.int32)
							#hri_pa_bb2 = hri_pa_bb02.flatten()
							hri_pa_bb02_max = hri_pa_bb02.max()
							print 'PA: '+str(pa)
							print 'PA (= max) value from mask = '+str(hri_pa_bb02_max)
							if hri_pa_bb02.shape == hri_pa_bb03.shape:
							 hri_pa02 = np.where(hri_pa_bb02 == pa,0,hri_pa_bb03) # hri_pa_bb02_max


							 if xless < 0: xsize = xsize + xless
							 if yless < 0: ysize = ysize + yless
							 hri_pa_bb0 = sim.ReadAsArray(xoff,yoff,xsize,ysize).astype(np.float32)
							 hri_pa_bb = hri_pa_bb0.flatten()
							 indd = hri_pa_bb > 0
							 hri_pa0 = hri_pa_bb[indd]
							 print 'Total number of pixels with similarity values in PA: '+str(len(hri_pa0))
							 hr1averpa = round(np.mean(hri_pa0[~np.isnan(hri_pa0)]),2) # compute the mean similarity inside a pa
							 #print hr1averpa
							 #hr1medianpa = np.median(hri_pa0[~np.isnan(hri_pa0)])
							 print 'mean similarity in the park is '+str(hr1averpa)
							 #hr1insum = sum(np.where(hri_pa0 >= 0.5,	1,0))	#	use	hr1averpa	as	threshold	instead!						
							 ##hr1inaver = np.where(hri_pa0 >= hr1averpa,	1,0)
							 ##hr1insumaver = sum(hr1inaver)
							 #print hr1insum
							 ##labeled_arrayin, num_featuresin = nd.label(hr1inaver,	structure=s)
							 hr1averr = np.where(hri_pa02 >= hr1averpa,	1,0) # pmhh
							 hr1aver = hr1averr.flatten()
							 print 'Total number of pixels with similarity values in ECO: '+str(sum(hr1aver))
							 labeled_arrayaver, num_featuresaver = nd.label(hr1averr,	structure=s)
							 print 'Nr of similar patches found: '+str(num_featuresaver)
							 if num_featuresaver > 0:
							  lbls = np.arange(1, num_featuresaver+1)
							  psizes = nd.labeled_comprehension(labeled_arrayaver, labeled_arrayaver, lbls, np.count_nonzero, float, 0) #-1
							  pszmax = psizes.max()#-hr1insumaver
							  dst_ds2 = driver.Create(outfile2,src_ds_eco.RasterXSize,src_ds_eco.RasterYSize,num_bands,gdal.GDT_Int32,dst_options)
							  dst_ds2.SetGeoTransform(src_ds_eco.GetGeoTransform())
							  dst_ds2.SetProjection(src_ds_eco.GetProjectionRef())
							  dst_ds2.GetRasterBand(1).WriteArray(labeled_arrayaver)
							  dst_ds2 = None
							  #num_feats = num_features - num_featuresaver
							  hr1sumaver = sum(hr1aver)
							  hr2aver = hr1sumaver #- hr1insumaver , number of pixel in the all ecoregion which have similarity values higher or equal than the average inside a pa
							  pxpa = ind_pa.shape[0] # size of the pa used for computation
							  indokpsz = psizes >= pxpa
							  pszsok = psizes[indokpsz] # NEW
							  sumpszok = sum(pszsok)
							  lpratio=round(float(pszmax/pxpa),2)
							  lpratio2=round(float(sumpszok/pxpa),2)
							  numpszok = len(pszsok)
							  hr3aver = round(float(hr2aver/pxpa),2)
							  aggregation = round(float(hr2aver/num_featuresaver),2)
						#hr2 = hr1sumaver - hr1insumaver
						#print hr2
						#hr3 = float(hr2/ind_pa.shape[0])
						#print hr3
					wb = open(csvname,'a')
					var = str(ecor)+' '+str(pa)+' '+str(hr1averpa)+' '+str(hr2aver)+' '+str(pxpa)+' '+str(hr3aver)+' '+str(num_featuresaver)+' '+str(lpratio)+' '+str(lpratio2)+' '+str(numpszok)+' '+str(pszmax)+' '+str(aggregation)+' '+str(tseasmin)+' '+str(tseasmax)+' '+str(tmax_warm_Mmin)+' '+str(tmax_warm_Mmax)+' '+str(premin)+' '+str(premax)+' '+str(preD_Mmin)+' '+str(preD_Mmax)+' '+str(aridmin)+' '+str(aridmax)+' '+str(ndviminmin)+' '+str(ndviminmax)+' '+str(ndvimaxmin)+' '+str(ndvimaxmax)+' '+str(treehmin)+' '+str(treehmax)+' '+str(treeDmin)+' '+str(treeDmax)+' '+str(soilmin)+' '+str(soilmax)+' '+str(slopemin)+' '+str(slopemax)+' '+str(tseasmean)+' '+str(tmax_warm_Mmean)+' '+str(premean)+' '+str(preD_Mmean)+' '+str(aridmean)+' '+str(ndviminmean)+' '+str(ndvimaxmean)+' '+str(treehmean)+' '+str(treeDmean)+' '+str(soilmean)+' '+str(slopemean) #	exclude	PA!	 '+str(treemin)+' '+str(treemax)+' '+str(treemean)+' '+str(ndwimin)+' '+str(ndwimax)+' '+str(ndwimean)+' 
					wb.write(var)
					wb.write('\n')
					wb.close()
					print "results exported"
					os.system('rm '+str(outfile3))
		wb = open(csvname1,'a')	# where we write the final results	(LOCAL	FOLDER)
		var = str(ecor)
		wb.write(var)
		wb.write('\n')
		wb.close()	
	print "END ECOREG: " + str(ecor)
Exemplo n.º 3
0
def objstats(args):
    # Open and read from image and segmentation
    try:
        img_ds = gdal.Open(args.image, gdal.GA_ReadOnly)
    except:
        logger.error('Could not open image: {}'.format(args.image))
        sys.exit(1)

    try:
        seg_ds = ogr.Open(args.segment, 0)
        seg_layer = seg_ds.GetLayer()
    except:
        logger.error('Could not open segmentation vector file: {}'.format(
            args.segment))
        sys.exit(1)

    cols, rows = img_ds.RasterXSize, img_ds.RasterYSize
    bands = range(1, img_ds.RasterCount + 1)
    if args.bands is not None:
        bands = args.bands

    # Rasterize segments
    logger.debug('About to rasterize segment vector file')
    img_srs = osr.SpatialReference()
    img_srs.ImportFromWkt(img_ds.GetProjectionRef())

    mem_raster = gdal.GetDriverByName('MEM').Create(
        '', cols, rows, 1, gdal.GDT_UInt32)
    mem_raster.SetProjection(img_ds.GetProjection())
    mem_raster.SetGeoTransform(img_ds.GetGeoTransform())

    # Create artificial 'FID' field
    fid_layer = seg_ds.ExecuteSQL(
        'select FID, * from "{l}"'.format(l=seg_layer.GetName()))
    gdal.RasterizeLayer(mem_raster, [1], fid_layer, options=['ATTRIBUTE=FID'])
    logger.debug('Rasterized segment vector file')

    seg = mem_raster.GetRasterBand(1).ReadAsArray()
    logger.debug('Read segmentation image into memory')
    mem_raster = None
    seg_ds = None

    # Get list of unique segments
    useg = np.unique(seg)

    # If calc is num, do only for 1 band
    out_bands = 0
    for stat in args.stat:
        if stat == 'num':
            out_bands += 1
        else:
            out_bands += len(bands)

    # Create output driver
    driver = gdal.GetDriverByName(args.format)
    out_ds = driver.Create(args.output, cols, rows, out_bands,
                           gdal.GDT_Float32)

    # Loop through image bands
    out_b = 0
    out_2d = np.empty_like(seg, dtype=np.float32)
    for i_b, b in enumerate(bands):
        img_band = img_ds.GetRasterBand(b)
        ndv = img_band.GetNoDataValue()
        band_name = img_band.GetDescription()
        if not band_name:
            band_name = 'Band {i}'.format(i=b)
        logger.info('Processing input band {i}, "{b}"'.format(
            i=b, b=band_name))

        img = img_band.ReadAsArray().astype(
            gdal_array.GDALTypeCodeToNumericTypeCode(img_band.DataType))
        logger.debug('Read image band {i}, "{b}" into memory'.format(
            i=b, b=band_name))

        for stat in args.stat:
            logger.debug('    calculating {s}'.format(s=stat))
            if stat == 'mean':
                out = ndimage.mean(img, seg, useg)
            elif stat == 'var':
                out = ndimage.variance(img, seg, useg)
            elif stat == 'num':
                # Remove from list of stats so it is only calculated once
                args.stat.remove('num')
                count = np.ones_like(seg)
                out = ndimage.sum(count, seg, useg)
            elif stat == 'sum':
                out = ndimage.sum(img, seg, useg)
            elif stat == 'min':
                out = ndimage.minimum(img, seg, useg)
            elif stat == 'max':
                out = ndimage.maximum(img, seg, useg)
            elif stat == 'mode':
                out = ndimage.labeled_comprehension(img, seg, useg,
                                                    scipy_mode,
                                                    out_2d.dtype, ndv)
            else:
                logger.error('Unknown stat. Not sure how you got here')
                sys.exit(1)

            # Transform to 2D
            out_2d = out[seg - seg.min()]

            # Fill in NDV
            if ndv is not None:
                out_2d[np.where(img == ndv)] = ndv

            # Write out the data
            out_band = out_ds.GetRasterBand(out_b + 1)
            out_band.SetDescription(band_name)
            if ndv is not None:
                out_band.SetNoDataValue(ndv)
            logger.debug('    Writing object statistic for band {b}'.format(
                    b=b + 1))
            out_band.WriteArray(out_2d, 0, 0)
            out_band.FlushCache()
            logger.debug('    Wrote out object statistic for band {b}'.format(
                    b=b + 1))
            out_b += 1

    out_ds.SetGeoTransform(img_ds.GetGeoTransform())
    out_ds.SetProjection(img_ds.GetProjection())

    img_ds = None
    seg_ds = None
    out_ds = None
    logger.info('Completed object statistic calculation')
Exemplo n.º 4
0
def _significant_features(radar,
                          field,
                          gatefilter=None,
                          min_size=None,
                          size_bins=75,
                          size_limits=(0, 300),
                          structure=None,
                          remove_size_field=True,
                          fill_value=None,
                          size_field=None,
                          debug=False,
                          verbose=False):
    """
    """

    # Parse fill value
    if fill_value is None:
        fill_value = get_fillvalue()

    # Parse field names
    if size_field is None:
        size_field = '{}_feature_size'.format(field)

    # Parse gate filter
    if gatefilter is None:
        gatefilter = GateFilter(radar, exclude_based=False)

    # Parse binary structuring element
    if structure is None:
        structure = ndimage.generate_binary_structure(2, 1)

    # Initialize echo feature size array
    size_data = np.zeros_like(radar.fields[field]['data'],
                              subok=False,
                              dtype=np.int32)

    # Loop over all sweeps
    feature_sizes = []
    for sweep in radar.iter_slice():

        # Parse radar sweep data and define only valid gates
        is_valid_gate = ~radar.fields[field]['data'][sweep].mask

        # Label the connected features in radar sweep data and create index
        # array which defines each unique label (feature)
        labels, nlabels = ndimage.label(is_valid_gate,
                                        structure=structure,
                                        output=None)
        index = np.arange(1, nlabels + 1, 1)

        if debug:
            print 'Number of unique features for {}: {}'.format(sweep, nlabels)

        # Compute the size (in radar gates) of each echo feature
        # Check for case where no echo features are found, e.g., no data in
        # sweep
        if nlabels > 0:
            sweep_sizes = ndimage.labeled_comprehension(
                is_valid_gate, labels, index, np.count_nonzero, np.int32, 0)
            feature_sizes.append(sweep_sizes)

            # Set each label (feature) to its total size (in radar gates)
            for label, size in zip(index, sweep_sizes):
                size_data[sweep][labels == label] = size

    # Stack sweep echo feature sizes
    feature_sizes = np.hstack(feature_sizes)

    # Compute histogram of echo feature sizes, bin centers and bin
    # width
    counts, bin_edges = np.histogram(feature_sizes,
                                     bins=size_bins,
                                     range=size_limits,
                                     normed=False,
                                     weights=None,
                                     density=False)
    bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2.0
    bin_width = np.diff(bin_edges).mean()

    if debug:
        print 'Bin width: {} gate(s)'.format(bin_width)

    # Compute the peak of the echo feature size distribution
    # We expect the peak of the echo feature size distribution to be close to 1
    # radar gate
    peak_size = bin_centers[counts.argmax()] - bin_width / 2.0

    if debug:
        print 'Feature size at peak: {} gate(s)'.format(peak_size)

    # Determine the first instance when the count (sample size) for an echo
    # feature size bin reaches 0 after the distribution peak
    # This will define the minimum echo feature size
    is_zero_size = np.logical_and(bin_centers > peak_size,
                                  np.isclose(counts, 0, atol=1.0e-5))
    min_size = bin_centers[is_zero_size].min() - bin_width / 2.0

    if debug:
        _range = [0.0, min_size]
        print 'Insignificant feature size range: {} gates'.format(_range)

    # Mask invalid feature sizes, e.g., zero-size features
    size_data = np.ma.masked_equal(size_data, 0, copy=False)
    size_data.set_fill_value(fill_value)

    # Add echo feature size field to radar
    size_dict = {
        'data': size_data.astype(np.int32),
        'standard_name': size_field,
        'long_name': '',
        '_FillValue': size_data.fill_value,
        'units': 'unitless',
    }
    radar.add_field(size_field, size_dict, replace_existing=True)

    # Update gate filter
    gatefilter.include_above(size_field, min_size, op='and', inclusive=False)

    # Remove eacho feature size field
    if remove_size_field:
        radar.fields.pop(size_field, None)

    return gatefilter
def main():
    # Parse command line options
    parser = argparse.ArgumentParser(description='Test different nets with 3D data.')
    parser.add_argument('--flair', action='store', dest='flair', default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd', action='store', dest='pd', default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2', action='store', dest='t2', default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1', action='store', dest='t1', default='T1_preprocessed.nii.gz')
    parser.add_argument('--output', action='store', dest='output', default='output.nii.gz')
    parser.add_argument('--no-docker', action='store_false', dest='docker', default=True)

    c = color_codes()
    patch_size = (15, 15, 15)
    options = vars(parser.parse_args())
    batch_size = 10000
    min_size = 30

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.init.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.init.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum((p + t[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] + '>' + c['nc'])
    names = np.array([options['flair'], options['pd'], options['t2'], options['t1']])
    image_nii = load_nii(options['flair'])
    image1 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image1[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.final.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.final.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_1', pool_size=2, stride=2, mode='average_inc_pad')),
            (Conv3DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5, 5), pad='same')),
            (Pool3DDNNLayer, dict(name='avgpool_2', pool_size=2, stride=2, mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda t, p: 2 * np.sum(t * p[:, 1]) / np.sum((t + p[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] + '>' + c['nc'])
    image2 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image2[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Saving to file ' + c['b'] + options['output'] + c['nc'] + c['g'] + '>' + c['nc'])
    image = (image1 * image2) > 0.5

    # filter candidates < min_size
    labels, num_labels = ndimage.label(image)
    lesion_list = np.unique(labels)
    num_elements_by_lesion = ndimage.labeled_comprehension(image, labels, lesion_list, np.sum, float, 0)
    filt_min_size = num_elements_by_lesion >= min_size
    lesion_list = lesion_list[filt_min_size]
    image = reduce(np.logical_or, map(lambda lab: lab == labels, lesion_list))

    image_nii.get_data()[:] = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
    path = '/'.join(options['t1'].rsplit('/')[:-1])
    outputname = options['output'].rsplit('/')[-1]
    image_nii.to_filename(os.path.join(path, outputname))

    if not options['docker']:
        path = '/'.join(options['output'].rsplit('/')[:-1])
        case = options['output'].rsplit('/')[-1]
        gt = load_nii(os.path.join(path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(2.0 * np.logical_and(gt, image)) / (np.sum(gt) + np.sum(image))
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] + str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])
def Channel_Head_Definition(skeletonFromFlowAndCurvatureArray,
                            geodesicDistanceArray):
    # Locating end points
    print 'Locating skeleton end points'
    structure = np.ones((3, 3))
    skeletonLabeledArray, skeletonNumConnectedComponentsList =\
                          ndimage.label(skeletonFromFlowAndCurvatureArray,
                                        structure=structure)
    """
     Through the histogram of skeletonNumElementsSortedList
     (skeletonNumElementsList minus the maximum value which
      corresponds to the largest connected element of the skeleton) we get the
      size of the smallest elements of the skeleton, which will likely
      correspond to small isolated convergent areas. These elements will be
      excluded from the search of end points.
    """
    print 'Counting the number of elements of each connected component'
    lbls = np.arange(1, skeletonNumConnectedComponentsList + 1)
    skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,\
                                                                 skeletonLabeledArray,\
                                                                 lbls,np.count_nonzero,\
                                                                 int,0)
    skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
    histarray,skeletonNumElementsHistogramX=np.histogram(\
        skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList)-1],
        int(np.floor(np.sqrt(len(skeletonNumElementsSortedList)))))
    if defaults.doPlot == 1:
        raster_plot(skeletonLabeledArray,
                    'Skeleton Labeled Array elements Array')
    # Create skeleton gridded array
    skeleton_label_set, label_indices = np.unique(skeletonLabeledArray,
                                                  return_inverse=True)
    skeletonNumElementsGriddedArray = np.array([
        skeletonLabeledArrayNumtuple[x - 1] for x in skeleton_label_set
    ])[label_indices].reshape(skeletonLabeledArray.shape)
    if defaults.doPlot == 1:
        raster_plot(skeletonNumElementsGriddedArray,
                    'Skeleton Num elements Array')
    # Elements smaller than skeletonNumElementsThreshold are not considered in the
    # skeletonEndPointsList detection
    skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
    print 'skeletonNumElementsThreshold', str(skeletonNumElementsThreshold)
    # Scan the array for finding the channel heads
    print 'Continuing to locate skeleton endpoints'
    skeletonEndPointsList = []
    nrows = skeletonFromFlowAndCurvatureArray.shape[0]
    ncols = skeletonFromFlowAndCurvatureArray.shape[1]
    for i in range(nrows):
        for j in range(ncols):
            if skeletonLabeledArray[i,j]!=0 \
               and skeletonNumElementsGriddedArray[i,j]>=skeletonNumElementsThreshold:
                # Define search box and ensure it fits within the DTM bounds
                my = i - 1
                py = nrows - i
                mx = j - 1
                px = ncols - j
                xMinus = np.min([defaults.endPointSearchBoxSize, mx])
                xPlus = np.min([defaults.endPointSearchBoxSize, px])
                yMinus = np.min([defaults.endPointSearchBoxSize, my])
                yPlus = np.min([defaults.endPointSearchBoxSize, py])
                # Extract the geodesic distances geodesicDistanceArray for pixels within the search box
                searchGeodesicDistanceBox = geodesicDistanceArray[i -
                                                                  yMinus:i +
                                                                  yPlus, j -
                                                                  xMinus:j +
                                                                  xPlus]
                # Extract the skeleton labels for pixels within the search box
                searchLabeledSkeletonBox = skeletonLabeledArray[i - yMinus:i +
                                                                yPlus,
                                                                j - xMinus:j +
                                                                xPlus]
                # Look in the search box for skeleton points with the same label
                # and greater geodesic distance than the current pixel at (i,j)
                # - if there are none, then add the current point as a channel head
                v = searchLabeledSkeletonBox == skeletonLabeledArray[i, j]
                v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i,
                                                                           j]
                v3 = np.where(np.any(v1 == True, axis=0))
                if len(v3[0]) == 0:
                    skeletonEndPointsList.append([i, j])
    # For loop ends here
    skeletonEndPointsListArray = np.transpose(skeletonEndPointsList)
    if defaults.doPlot == 1:
        raster_point_plot(skeletonFromFlowAndCurvatureArray,
                          skeletonEndPointsListArray,
                          'Skeleton Num elements Array with channel heads',
                          cm.binary, 'ro')
    if defaults.doPlot == 1:
        raster_point_plot(geodesicDistanceArray, skeletonEndPointsListArray,
                          'Geodesic distance Array with channel heads',
                          cm.coolwarm, 'ro')
    xx = skeletonEndPointsListArray[1]
    yy = skeletonEndPointsListArray[0]
    # Write shapefiles of channel heads
    write_drainage_nodes(xx, yy, "ChannelHead", Parameters.pointFileName,
                         Parameters.pointshapefileName)
    # Write raster of channel heads
    channelheadArray = np.zeros((geodesicDistanceArray.shape))
    channelheadArray[skeletonEndPointsListArray[0],
                     skeletonEndPointsListArray[1]] = 1
    outfilepath = Parameters.geonetResultsDir
    demName = Parameters.demFileName
    outfilename = demName.split('.')[0] + '_channelHeads.tif'
    write_geotif_generic(channelheadArray,\
                         outfilepath,outfilename)
    return xx, yy
    def detect_objects(self, img, outline='GREEN'):
        original = img.copy()
        post_processed = img.copy()

        if img.mode != 'L':
            img = img.convert('L')

        x_step = 1
        y_step = 1
        scale = 1.25
        count = 0
        confidence_threshold = 3
        reduced_windows = []
        original_draw = ImageDraw.Draw(original)
        window_width = self.classifier.image_width
        window_height = self.classifier.image_height

        while window_width < img.size[0] and window_height < img.size[1]:
            window_stamps = np.zeros_like(img, np.uint16)
            integral_image = get_integral_image(img)

            for y in range(0, img.size[1] - window_height, y_step):
                for x in range(0, img.size[0] - window_width, x_step):
                    window = integral_window(integral_image, x, y,
                                             window_width, window_height)
                    if self.classifier.classify(window):
                        window_stamps[y, x] = window_width
                        original_draw.rectangle(
                            (x * original.size[0] // img.size[0],
                             y * original.size[1] // img.size[1],
                             (x + window_width) * original.size[0] //
                             img.size[0], (y + window_height) *
                             original.size[1] // img.size[1]),
                            outline=outline)

            def representative_windows(values, indexes):
                left = np.inf
                right = -1
                top = np.inf
                bottom = -1
                for index in indexes:
                    x = index % img.size[0]
                    y = index // img.size[0]
                    if x < left:
                        left = x
                    if x > right:
                        right = x
                    if y < top:
                        top = y
                    if y > bottom:
                        bottom = y

                confidence = indexes.size  # / (right - left + values[0])
                if confidence > confidence_threshold:
                    # return (left, top, right + values[0], bottom + values[0]), confidence
                    return ((left + right) // 2, (top + bottom) // 2,
                            (left + right) // 2 + values[0],
                            (top + bottom) // 2 + values[0]), confidence
                else:
                    return None

            labeled, num_of_windows = label(window_stamps,
                                            np.ones((3, 3), np.int))

            if num_of_windows != 0:
                merged_windows = list(
                    filter(
                        lambda w: w is not None,
                        labeled_comprehension(window_stamps, labeled,
                                              np.arange(1, num_of_windows + 1),
                                              representative_windows, tuple, 0,
                                              True)))

                if len(merged_windows) != 0:
                    reduced_windows += [
                        (tuple(v * original.size[0] // img.size[0]
                               for v in x[0]), ) +
                        ((x[0][2] - x[0][0]) * original.size[0] // img.size[0],
                         x[1]) for x in merged_windows
                    ]

            count += 1
            img.thumbnail((img.size[0] / scale, img.size[1] / scale),
                          Image.ANTIALIAS)

        original.show()

        reduced_windows.sort(key=lambda x: x[1])

        def center_of_rectangle(x1, y1, x2, y2):
            return (x1 + x2) / 2, (y1 + y2) / 2

        i = 0
        while i < len(reduced_windows):
            i_center_x, i_center_y = center_of_rectangle(
                *reduced_windows[i][0])
            i_confidence = reduced_windows[i][2]
            j = i + 1
            while j < len(reduced_windows):
                j_x1, j_y1, j_x2, j_y2 = reduced_windows[j][0]
                j_confidence = reduced_windows[j][2]
                if j_x1 <= i_center_x <= j_x2 and j_y1 <= i_center_y <= j_y2:
                    if i_confidence > j_confidence:
                        del reduced_windows[j]
                        j -= 1
                    else:
                        del reduced_windows[i]
                        i -= 1
                        j = len(reduced_windows)

                j += 1
            i += 1

        post_processed_draw = ImageDraw.Draw(post_processed)
        for rectangle in [x[0] for x in reduced_windows]:
            post_processed_draw.rectangle(rectangle, outline=outline)
        post_processed.show()
Exemplo n.º 8
0
def depth_cal(wet_raster, width_raster, ele_raster, pix_per_m, min_slope, max_slope, min_pos_slope,
              wet_pos_depth_raster, water_ele_raster):
    corner = arcpy.Point(arcpy.Describe(wet_raster).Extent.XMin, arcpy.Describe(wet_raster).Extent.YMin)
    dx = arcpy.Describe(wet_raster).meanCellWidth

    org_wet = arcpy.RasterToNumPyArray(wet_raster, nodata_to_value=0)
    wet = ndimage.morphology.binary_fill_holes(org_wet).astype(np.uint8)
    ele = arcpy.RasterToNumPyArray(ele_raster, nodata_to_value=0)

    common_border = find_sep_border(org_wet, 7, corner, dx)
    wet = ndimage.binary_dilation(org_wet, iterations=2).astype(np.uint8)
    wet = np.where(common_border == 1, 0, wet)
    common_border = find_sep_border(wet, 7, corner, dx)
    wet = ndimage.morphology.binary_fill_holes(wet).astype(np.uint8)
    wet = np.where(common_border == 1, 0, wet)
    wet = np.where(org_wet == 1, 1, wet)

    er_wet_1 = ndimage.binary_erosion(wet).astype(np.uint8)
    bound_1 = np.where((wet == 1) & (er_wet_1 == 0), 1, 0).astype(np.uint8)
    ele_1 = np.where(bound_1 == 1, ele, 0)
    temp_ele_1 = arcpy.NumPyArrayToRaster(ele_1, corner, dx, dx, value_to_nodata=0)
    ele_1_thick = FocalStatistics(temp_ele_1, NbrRectangle(3, 3, "CELL"), "MEAN", "DATA")

    del ele_1

    er_wet_2 = ndimage.binary_erosion(er_wet_1).astype(np.uint8)
    bound_2 = np.where((er_wet_1 == 1) & (er_wet_2 == 0), 1, 0).astype(np.uint8)
    ele_2 = np.where(bound_2 == 1, ele, 0)

    ele_1 = arcpy.RasterToNumPyArray(ele_1_thick, nodata_to_value=0)
    ele_1 = np.where(bound_2 == 0, 0, ele_1)

    slope = (ele_1 - ele_2) / pix_per_m
    slope_sign = np.where(slope > 0, 1, 0)

    Lab_wet, num_label = ndimage.label(wet, structure=np.ones((3, 3)))
    labels = np.arange(1, num_label + 1)
    slope_label = ndimage.labeled_comprehension(slope, Lab_wet, labels, np.sum, float, 0)
    slope_sign_label = ndimage.labeled_comprehension(slope_sign, Lab_wet, labels, np.sum, float, 0)
    count_2 = ndimage.labeled_comprehension(bound_2, Lab_wet, labels, np.sum, float, 0)
    water_ele_label = ndimage.labeled_comprehension(ele, Lab_wet, labels, np.min, float, 0)

    slope_seg = np.zeros_like(org_wet).astype(np.float32)
    slope_seg = slope_label[Lab_wet - 1].astype(np.float32) / count_2[Lab_wet - 1].astype(np.float32)
    slope_seg = np.where((Lab_wet == 0) | (org_wet == 0), 0, slope_seg)

    new_wet = np.where((slope_seg > min_slope) & (slope_seg < max_slope), 1, 0).astype(np.uint8)

    width = arcpy.RasterToNumPyArray(width_raster, nodata_to_value=0)
    depth = slope_seg * width / 2

    del slope_seg, slope_label, width

    slope_sign_seg = np.zeros_like(org_wet).astype(np.float32)
    slope_sign_seg = slope_sign_label[Lab_wet - 1].astype(np.float32) / count_2[Lab_wet - 1].astype(np.float32)
    slope_sign_seg = np.where((Lab_wet == 0) | (org_wet == 0), 0, slope_sign_seg)

    new_wet = np.where((slope_sign_seg <= min_pos_slope), 0, new_wet).astype(np.uint8)
    del slope_sign_seg, slope_sign_label, count_2

    arcpy.NumPyArrayToRaster(new_wet, corner, dx, dx, value_to_nodata=0).save(wet_pos_depth_raster)

    water_ele = water_ele_label[Lab_wet - 1].astype(np.float32) + depth
    water_ele = np.where((Lab_wet == 0) | (new_wet == 0), 0, water_ele)
    arcpy.NumPyArrayToRaster(water_ele, corner, dx, dx, value_to_nodata=0).save(water_ele_raster)

    del new_wet, water_ele, depth
Exemplo n.º 9
0
def significant_features(
        radar, fields, gatefilter=None, size_bins=100, size_limits=(0, 400),
        structure=None, save_size_field=False, fill_value=None, debug=False,
        verbose=False):
    """
    Determine significant radar echo features on a sweep by sweep basis by
    computing the size each echo feature. Here an echo feature is defined as
    multiple connected radar gates with valid data, where the connection
    structure is defined by the user.

    Parameters
    ----------
    radar : Radar
        Py-ART Radar containing
    fields : str or list or tuple
        Radar fields to be used to identify significant echo featues.
    gatefilter : GateFilter
        Py-ART GateFilter instance.
    size_bins : int, optional
        Number of size bins used to bin feature size distribution.
    size_limits : list or tuple, optional
        Lower and upper limits of the feature size distribution. This together
        with size_bins defines the bin width of the feature size distribution.
    structure : array_like, optional
        Binary structuring element used to define connected radar gates. The
        default defines a structuring element in which diagonal radar gates are
        not considered connected.
    save_size_field : bool, optional
        True to save size fields in the radar object, False to discard.
    debug : bool, optional
        True to print debugging information, False to suppress.


    Returns
    -------
    gf : GateFilter
        Py-ART GateFilter.

    """

    # Parse fill value
    if fill_value is None:
        fill_value = get_fillvalue()

    # Parse radar fields
    if isinstance(fields, str):
        fields = [fields]

    # Parse gate filter
    if gatefilter is None:
        gf = GateFilter(radar, exclude_based=False)

    # Parse binary structuring element
    if structure is None:
        structure = ndimage.generate_binary_structure(2, 1)

    for field in fields:

        if verbose:
            print 'Processing echo features: {}'.format(field)

        # Initialize echo feature size array
        size_data = np.zeros_like(
            radar.fields[field]['data'], subok=False, dtype=np.int32)

        feature_sizes = []
        for sweep, _slice in enumerate(radar.iter_slice()):

            # Parse radar sweep data and define only valid gates
            data = radar.get_field(sweep, field, copy=False)
            is_valid_gate = ~np.ma.getmaskarray(data)

            # Label the connected features in the radar sweep data and create
            # index array which defines each unique label (feature)
            labels, nlabels = ndimage.label(
                is_valid_gate, structure=structure, output=None)
            index = np.arange(1, nlabels + 1, 1)

            if debug:
                print 'Unique features in sweep {}: {}'.format(sweep, nlabels)

            # Compute the size (in radar gates) of each echo feature
            # Check for case where no echo features are found, e.g., no data in
            # sweep
            if nlabels > 0:
                sweep_sizes = ndimage.labeled_comprehension(
                    is_valid_gate, labels, index, np.count_nonzero,
                    np.int32, 0)
                feature_sizes.append(sweep_sizes)

                # Set each label (feature) to its total size (in radar gates)
                for label, size in zip(index, sweep_sizes):
                    size_data[_slice][labels == label] = size

        # Stack sweep echo feature sizes
        feature_sizes = np.hstack(feature_sizes)

        # Bin and compute feature size occurrences
        counts, bin_edges = np.histogram(
            feature_sizes, bins=size_bins, range=size_limits, normed=False,
            weights=None, density=False)
        bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2.0
        bin_width = np.diff(bin_edges).mean()

        if debug:
            print 'Bin width: {} gate(s)'.format(bin_width)

        # Compute the peak of the echo feature size distribution. We expect the
        # peak of the echo feature size distribution to be close to 1 radar
        # gate
        peak_size = bin_centers[counts.argmax()] - bin_width / 2.0

        if debug:
            print 'Feature size at peak: {} gate(s)'.format(peak_size)

        # Determine the first instance when the count (sample size) for an echo
        # feature size bin reaches 0 after the distribution peak. This will
        # define the minimum echo feature size
        is_zero_size = np.logical_and(
            bin_centers > peak_size, np.isclose(counts, 0, atol=1.0e-1))
        min_size = bin_centers[is_zero_size].min() - bin_width / 2.0

        if debug:
            _range = [0.0, min_size]
            print 'Insignificant feature size range: {} gates'.format(_range)

        # Mask invalid feature sizes, e.g., zero-size features
        size_data = np.ma.masked_equal(size_data, 0, copy=False)
        size_data.set_fill_value(fill_value)

        # Parse echo feature size field name
        size_field = '{}_feature_size'.format(field)

        # Add echo feature size field to radar
        size_dict = {
            'data': size_data.astype(np.int32),
            'long_name': 'Echo feature size in number of radar gates',
            '_FillValue': size_data.fill_value,
            'units': 'unitless',
            'comment': None,
            }
        radar.add_field(size_field, size_dict, replace_existing=True)

        # Update gate filter
        gf.include_above(size_field, min_size, op='and', inclusive=False)

        # Remove eacho feature size field if specified
        if not save_size_field:
            radar.fields.pop(size_field, None)

    return gf
Exemplo n.º 10
0
 def find_star(self, raw, ref=False, count=8, showLBL=False, name=''):
     '''
     找星程序
     首先对图片进行中值滤波 获取滤波图img_med
     将滤波图中 滤波图 < [滤波图中值 + N * 原图标准差] 的位置定义为背景
     余下连通区进行标记 根据大小进行排序 最大的count个连通区定义为星
     随后记录星的几何半径与流量中心 并返回
     para:
         img:        ndarr       图片
         ref:        bool        True:返回pandas.DataFrame对象 False:返回dict对象
         count:      int         找多少颗星 默认:0 假如是0时, 则取对象中的count参数
     return:
         dic:        dict/df     返回的找星数据 有两个关键词
             radius:     几何半径
             centers:    流量中心
     '''
     if ref:
         # 叠加图背景去除
         img = raw - nd.median_filter(raw, footprint=np.array(self.fpa))
     else:
         # 信号增强
         raw = nd.median_filter(raw, 5)
         img = nd.convolve(raw, self.kernel)
     # 计算背景标准差
     sky, std = bginfo(img, self.mask)
     # 标记背景
     mark = img > sky + self.N * std
     # # 清除跨蒙版星
     # lbl, _ = nd.measurements.label(mark | ~self.mask)
     # mark = mark & (lbl != 1)
     # 计算连通区
     lbl, num = nd.measurements.label(mark)
     idx = np.arange(num) + 1
     # 根据连通区计算半径
     r_arr = nd.labeled_comprehension(
         input=lbl,
         labels=lbl,
         index=idx,
         func=lambda x: np.sqrt(len(x) / np.pi),
         out_dtype=float,
         default=0)
     # 第一次筛选连通区
     # jud = r_arr > 9
     # idx = idx[jud]
     # r_arr = r_arr[jud]
     if ref:
         jud = r_arr > 3
         idx = idx[jud]
         r_arr = r_arr[jud]
         # 根据R排序
         jud = np.argsort(-r_arr)
         idx = idx[jud]
         r_arr = r_arr[jud]
         if len(idx) > count:
             idx = idx[:count]
             r_arr = r_arr[:count]
     else:
         # jud = r_arr > 6
         # idx = idx[jud]
         # r_arr = r_arr[jud]
         # 获取SNR列表
         F = nd.sum(img, lbl, idx)  # OR MAX
         # # 第二次筛选连通区
         # jud = Fmax > np.pi * 9**2 * 0
         # idx = idx[jud]
         # r_arr = r_arr[jud]
         # Fmax = Fmax[jud]
         # 根据SNR最大值排序
         jud = np.argsort(-F)
         idx = idx[jud]
         r_arr = r_arr[jud]
         F = F[jud]
         # 第三次筛选连通区
         if len(idx) > 8:
             idx = idx[:8]
             r_arr = r_arr[:8]
             F = F[:8]
     if showLBL:
         save = np.zeros_like(lbl).astype(int)
         for i in idx:
             save += (lbl == i) * 1
         if not os.path.exists('local/lbl/'):
             os.mkdir('local/lbl/')
         plt.imsave('local/lbl/' + name + '_lbl.png', save)
         plt.close()
     # 计算质心
     centers = nd.measurements.center_of_mass(input=raw,
                                              labels=lbl,
                                              index=idx)
     centers = [complex(*center) for center in centers]
     if ref:
         return pd.DataFrame({
             'radius': pd.Series(r_arr),
             'centers': pd.Series(centers)
         })
     return {'radius': pd.Series(r_arr), 'centers': pd.Series(centers)}
Exemplo n.º 11
0
def label_size(radar,
               field,
               structure=None,
               rays_wrap_around=False,
               size_field=None,
               debug=False,
               verbose=False):
    """
    Label connected pixels in a binary radar field and compute the size of each
    label. Unexpected results may occur if the specified field is not binary.

    Parameters
    ----------
    radar : Radar
        Py-ART Radar containing the specified field to be labeled.
    field : str
        Radar field to be labeled.
    structure : array_like, optional
        Binary structuring element used in labeling. The default structuring
        element has a squared connectivity equal to 1, i.e., only nearest
        neighbours are connected to the structure origin and
        diagonally-connected elements are not considered neighbours.
    rays_wrap_around : bool, optional
        True if all sweeps have contiguous rays, e.g., 360 deg PPI radar
        volumes.
    debug : bool, optional
        True to print debugging information, False to suppress.
    verbose : bool, optional
        True to print relevant information, False to suppress.

    """

    if verbose:
        print 'Performing binary labeling: {}'.format(field)

    if size_field is None:
        size_field = '{}_feature_size'.format(field)

    size_dict = {
        'data': np.zeros_like(radar.fields[field]['data'], dtype=np.int32),
        'units': 'unitless',
        'valid_min': 0,
        'comment': 'size in pixels of connected features',
    }
    radar.add_field(size_field, size_dict, replace_existing=True)

    for sweep, slc in enumerate(radar.iter_slice()):

        # Parse radar sweep data
        data = np.ma.getdata(radar.get_field(sweep, field))

        # Label connected pixels defined by the structuring element
        labels, nlabels = ndimage.label(data, structure=structure)
        index = np.arange(1, nlabels + 1)
        if debug:
            print 'Unique features in sweep {}: {}'.format(sweep, nlabels)

        if nlabels > 0:

            # Compute the size in number of pixels of each labeled feature
            sizes = ndimage.labeled_comprehension(data, labels, index,
                                                  np.count_nonzero, np.int32,
                                                  0)

            # Set each labeled feature to its total size in radar gates
            for label, size in zip(index, sizes):
                radar.fields[size_field]['data'][slc][labels == label] = size

    return
Exemplo n.º 12
0
def ehabitat(ecor, nw, nwpathout):

    global nwpath
    if nw == '':
        nwpath = os.getcwd()
    else:
        nwpath = nw

    if gmaps == 0:
        initglobalmaps()
    if nwpathout == '':
        #outdir = 'results'	#	ToDo: locally	create	folder "results"	if it	does	not	exist!
        outdir = os.path.join(os.path.sep, os.getcwd(), 'results')
        safelyMakeDir(outdir)
    else:
        #outdir = nwpathout+'/results'	#	SHARED	FOLDER	PATH
        outdir = os.path.join(os.path.sep, nwpathout, 'results')
        safelyMakeDir(outdir)

    treepamin = treepamax = eprpamin = eprpamax = prepamin = prepamax = biopamin = biopamax = slopepamin = slopepamax = ndwipamin = ndwipamax = ndvimaxpamin = ndvimaxpamax = ndviminpamin = ndviminpamax = hpamin = hpamax = None
    s = nd.generate_binary_structure(
        2, 2)  #	most	restrictive	pattern	for	the	landscape	patches
    #	LOCAL FOLDER
    csvname1 = os.path.join(os.path.sep, outdir, 'ecoregs_done.csv')
    print csvname1
    if os.path.isfile(csvname1) == False:
        wb = open(csvname1, 'a')
        wb.write('None')
        wb.write('\n')
        wb.close()
    #	LOCAL FOLDER
    csvname = os.path.join(os.path.sep, outdir, 'hri_results.csv')
    print csvname
    if os.path.isfile(csvname) == False:
        wb = open(csvname, 'a')
        wb.write(
            'ecoregion wdpaid averpasim hr2aver pxpa hr1insumaver hriaver nfeatsaver lpratio lpratio2 numpszok lpmaxsize aggregation treepamin treepamax eprpamin eprpamax prepamin prepamax biopamin biopamax slopepamin slopepamax ndwipamin ndwipamax ndvimaxpamin ndvimaxpamax ndviminpamin ndviminpamax hpamin hpamax treepamean eprpamean prepamean biopamean slopepamean ndwipamean ndvimaxpamean ndviminpamean hpamean'
        )
        wb.write('\n')
        wb.close()
    treepamean = eprpamean = prepamean = biopamean = slopepamean = ndwipamean = ndvimaxpamean = ndviminpamean = hpamean = None
    ef = 'eco_' + str(ecor) + '.tif'
    ecofile = os.path.join(os.path.sep, nwpath, 'ecoregs', ef)
    #ecofile = os.path.join(os.path.sep, nwpath, os.path.sep,'ecoregs', os.path.sep, ef)
    print ecofile
    avail = os.path.isfile(ecofile)
    if avail == True:
        eco_csv = str(ecor) + '.csv'
        print eco_csv
        ecoparksf = os.path.join(os.path.sep, nwpath, 'pas', eco_csv)
        #ecoparksf = os.path.join(os.path.sep, nwpath, os.path.sep, 'pas', os.path.sep, eco_csv)
        print ecoparksf
        #ecoparksf = nwpath+'/pas/'+str(ecor)+'.csv'
        src_ds_eco = gdal.Open(ecofile)
        eco = src_ds_eco.GetRasterBand(1)
        eco_mask0 = eco.ReadAsArray(0, 0, eco.XSize,
                                    eco.YSize).astype(np.int32)
        eco_mask = eco_mask0.flatten()
        gt_eco = src_ds_eco.GetGeoTransform()
        print 'eco mask'
        xoff = int((gt_eco[0] - gt_epr_global[0]) / 1000)
        yoff = int((gt_epr_global[3] - gt_eco[3]) / 1000)
        epr_eco_bb0 = epr_global.ReadAsArray(xoff, yoff, eco.XSize,
                                             eco.YSize).astype(np.float32)
        epr_eco_bb = epr_eco_bb0.flatten()
        epr_eco0 = np.where(eco_mask == 1, (epr_eco_bb), (0))
        epr_eco = np.where(epr_eco0 == 65535.0, (float('NaN')), (epr_eco0))
        maskepr = np.isnan(epr_eco)
        epr_eco[maskepr] = np.interp(np.flatnonzero(maskepr),
                                     np.flatnonzero(~maskepr),
                                     epr_eco[~maskepr])
        print 'eco epr'
        xoff = int((gt_eco[0] - gt_slope_global[0]) / 1000)
        yoff = int((gt_slope_global[3] - gt_eco[3]) / 1000)
        slope_eco_bb0 = slope_global.ReadAsArray(xoff, yoff, eco.XSize,
                                                 eco.YSize).astype(np.float32)
        slope_eco_bb = slope_eco_bb0.flatten()
        slope_eco0 = np.where(eco_mask == 1, (slope_eco_bb), (0))
        slope_eco = np.where(slope_eco0 == 65535.0, (float('NaN')),
                             (slope_eco0))
        maskslope = np.isnan(slope_eco)
        slope_eco[maskslope] = np.interp(np.flatnonzero(maskslope),
                                         np.flatnonzero(~maskslope),
                                         slope_eco[~maskslope])
        print 'eco slope'
        xoff = int((gt_eco[0] - gt_ndvimax_global[0]) / 1000)
        yoff = int((gt_ndvimax_global[3] - gt_eco[3]) / 1000)
        ndvimax_eco_bb0 = ndvimax_global.ReadAsArray(
            xoff, yoff, eco.XSize, eco.YSize).astype(np.float32)
        ndvimax_eco_bb = ndvimax_eco_bb0.flatten()
        ndvimax_eco0 = np.where(eco_mask == 1, (ndvimax_eco_bb), (0))
        ndvimax_eco = np.where(ndvimax_eco0 == 65535.0, (float('NaN')),
                               (ndvimax_eco0))
        maskndvimax = np.isnan(ndvimax_eco)
        ndvimax_eco[maskndvimax] = np.interp(np.flatnonzero(maskndvimax),
                                             np.flatnonzero(~maskndvimax),
                                             ndvimax_eco[~maskndvimax])
        print 'eco ndvimax'
        xoff = int((gt_eco[0] - gt_ndvimin_global[0]) / 1000)
        yoff = int((gt_ndvimin_global[3] - gt_eco[3]) / 1000)
        ndvimin_eco_bb0 = ndvimin_global.ReadAsArray(
            xoff, yoff, eco.XSize, eco.YSize).astype(np.float32)
        ndvimin_eco_bb = ndvimin_eco_bb0.flatten()
        ndvimin_eco0 = np.where(eco_mask == 1, (ndvimin_eco_bb), (0))
        ndvimin_eco = np.where(ndvimin_eco0 == 65535.0, (float('NaN')),
                               (ndvimin_eco0))
        maskndvimin = np.isnan(ndvimin_eco)
        ndvimin_eco[maskndvimin] = np.interp(np.flatnonzero(maskndvimin),
                                             np.flatnonzero(~maskndvimin),
                                             ndvimin_eco[~maskndvimin])
        print 'eco ndvimin'
        xoff = int((gt_eco[0] - gt_ndwi_global[0]) / 1000)
        yoff = int((gt_ndwi_global[3] - gt_eco[3]) / 1000)
        ndwi_eco_bb0 = ndwi_global.ReadAsArray(xoff, yoff, eco.XSize,
                                               eco.YSize).astype(np.float32)
        ndwi_eco_bb = ndwi_eco_bb0.flatten()
        ndwi_eco0 = np.where(eco_mask == 1, (ndwi_eco_bb), (0))
        ndwi_eco = np.where(ndwi_eco0 == 255.0, (float('NaN')), (ndwi_eco0))
        maskndwi = np.isnan(ndwi_eco)
        ndwi_eco[maskndwi] = np.interp(np.flatnonzero(maskndwi),
                                       np.flatnonzero(~maskndwi),
                                       ndwi_eco[~maskndwi])
        print 'eco ndwi'
        xoff = int((gt_eco[0] - gt_pre_global[0]) / 1000)
        yoff = int((gt_pre_global[3] - gt_eco[3]) / 1000)
        pre_eco_bb0 = pre_global.ReadAsArray(xoff, yoff, eco.XSize,
                                             eco.YSize).astype(np.float32)
        pre_eco_bb = pre_eco_bb0.flatten()
        pre_eco0 = np.where(eco_mask == 1, (pre_eco_bb), (0))
        pre_eco = np.where(pre_eco0 == 65535.0, (float('NaN')), (pre_eco0))
        maskpre = np.isnan(pre_eco)
        pre_eco[maskpre] = np.interp(np.flatnonzero(maskpre),
                                     np.flatnonzero(~maskpre),
                                     pre_eco[~maskpre])
        print 'eco pre'
        xoff = int((gt_eco[0] - gt_bio_global[0]) / 1000)
        yoff = int((gt_bio_global[3] - gt_eco[3]) / 1000)
        bio_eco_bb0 = bio_global.ReadAsArray(xoff, yoff, eco.XSize,
                                             eco.YSize).astype(np.float32)
        bio_eco_bb = bio_eco_bb0.flatten()
        bio_eco0 = np.where(eco_mask == 1, (bio_eco_bb), (0))
        bio_eco = np.where(bio_eco0 == 65535.0, (float('NaN')), (bio_eco0))
        maskbio = np.isnan(bio_eco)
        bio_eco[maskbio] = np.interp(np.flatnonzero(maskbio),
                                     np.flatnonzero(~maskbio),
                                     bio_eco[~maskbio])
        print 'eco bio'
        xoff = int((gt_eco[0] - gt_tree_global[0]) / 1000)
        yoff = int((gt_tree_global[3] - gt_eco[3]) / 1000)
        tree_eco_bb0 = tree_global.ReadAsArray(xoff, yoff, eco.XSize,
                                               eco.YSize).astype(np.float32)
        tree_eco_bb = tree_eco_bb0.flatten()
        tree_eco0 = np.where(eco_mask == 1, (tree_eco_bb), (0))
        tree_eco = np.where(tree_eco0 == 255.0, (float('NaN')), (tree_eco0))
        masktree = np.isnan(tree_eco)
        tree_eco[masktree] = np.interp(np.flatnonzero(masktree),
                                       np.flatnonzero(~masktree),
                                       tree_eco[~masktree])
        print 'eco tree'
        xoff = int((gt_eco[0] - gt_herb_global[0]) / 1000)
        yoff = int((gt_herb_global[3] - gt_eco[3]) / 1000)
        herb_eco_bb0 = herb_global.ReadAsArray(xoff, yoff, eco.XSize,
                                               eco.YSize).astype(np.float32)
        herb_eco_bb = herb_eco_bb0.flatten()
        herb_eco0 = np.where(eco_mask == 1, (herb_eco_bb), (0))
        herb_eco = np.where(herb_eco0 == 255.0, (float('NaN')), (herb_eco0))
        maskherb = np.isnan(herb_eco)
        herb_eco[maskherb] = np.interp(np.flatnonzero(maskherb),
                                       np.flatnonzero(~maskherb),
                                       herb_eco[~maskherb])
        print 'eco herb'
        ind_eco0 = np.column_stack(
            (bio_eco, pre_eco, epr_eco, herb_eco, ndvimax_eco, ndvimin_eco,
             ndwi_eco, slope_eco, tree_eco))
        print 'ecovars stacked'

        print ecoparksf
        pa_list0 = np.genfromtxt(
            ecoparksf, dtype='string')  # crear este archivo en subpas!
        pa_list = np.unique(pa_list0)
        n = len(pa_list)
        for px in range(0, n):  #	0,n

            pa = pa_list[px]
            print pa

            outfile = os.path.join(os.path.sep, outdir,
                                   str(ecor) + '_' + str(pa) + '.tif')
            outfile2 = os.path.join(os.path.sep, outdir,
                                    str(ecor) + '_' + str(pa) + '_lp.tif')
            outfile3 = os.path.join(os.path.sep, outdir,
                                    str(ecor) + '_' + str(pa) + '_mask.tif')
            #outfile = outdir+'/'+str(ecor)+'_'+str(pa)+'.tif'	#	LOCAL FOLDER
            pa_infile = 'pa_' + str(pa) + '.tif'

            pa4 = os.path.join(os.path.sep, nwpath, 'pas', pa_infile)
            #pa4 = os.path.join(os.path.sep, nwpath, os.path.sep, 'pas', os.path.sep, pa_infile)
            print pa4
            #pa4 = nwpath+'/pas/pa_'+str(pa)+'.tif'

            dropcols = np.arange(9, dtype=int)
            done = os.path.isfile(outfile)
            avail2 = os.path.isfile(pa4)
            if done == False and avail2 == True:
                pafile = pa4
                src_ds_pa = gdal.Open(pafile)
                par = src_ds_pa.GetRasterBand(1)
                pa_mask0 = par.ReadAsArray(0, 0, par.XSize,
                                           par.YSize).astype(np.int32)
                pa_mask = pa_mask0.flatten()
                ind = pa_mask > 0  #==int(pa)
                go = 1
                sum_pa_mask = sum(pa_mask[ind])  #/int(pa)
                if sum_pa_mask < 3:
                    go = 0  #	not	processing	areas	smaller	than	3	pixels
                print sum_pa_mask
                sum_pa_mask_inv = len(pa_mask[pa_mask == 0])
                print sum_pa_mask_inv
                print len(pa_mask)
                ratiogeom = 10000
                if sum_pa_mask > 0: ratiogeom = sum_pa_mask_inv / sum_pa_mask
                #print ratiogeom
                gt_pa = src_ds_pa.GetGeoTransform()
                xoff = int((gt_pa[0] - gt_pre_global[0]) / 1000)
                yoff = int((gt_pre_global[3] - gt_pa[3]) / 1000)
                if xoff > 0 and yoff > 0 and go == 1:
                    num_bands = src_ds_eco.RasterCount
                    driver = gdal.GetDriverByName("GTiff")
                    dst_options = ['COMPRESS=LZW']
                    dst_ds = driver.Create(outfile, src_ds_eco.RasterXSize,
                                           src_ds_eco.RasterYSize, num_bands,
                                           gdal.GDT_Float32, dst_options)
                    dst_ds.SetGeoTransform(src_ds_eco.GetGeoTransform())
                    dst_ds.SetProjection(src_ds_eco.GetProjectionRef())
                    xoff = int((gt_pa[0] - gt_tree_global[0]) / 1000)
                    yoff = int((gt_tree_global[3] - gt_pa[3]) / 1000)
                    tree_pa_bb0 = tree_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    tree_pa_bb = tree_pa_bb0.flatten()
                    tree_pa0 = tree_pa_bb[ind]
                    tree_pa = np.where(tree_pa0 == 255.0, (float('NaN')),
                                       (tree_pa0))
                    mask2tree = np.isnan(tree_pa)
                    if mask2tree.all() == True:
                        dropcols[8] = -8
                    else:
                        tree_pa[mask2tree] = np.interp(
                            np.flatnonzero(mask2tree),
                            np.flatnonzero(~mask2tree), tree_pa[~mask2tree])
                        tree_pa = np.random.random_sample(
                            len(tree_pa), ) / 1000 + tree_pa
                        print 'pa tree'

                        treepamin = round(tree_pa.min(), 2)
                        treepamax = round(tree_pa.max(), 2)
                        treepamean = round(np.mean(tree_pa), 2)
                        print treepamin
                        print treepamax
                        treediff = abs(tree_pa.min() - tree_pa.max())
                        if treediff < 0.001: dropcols[8] = -8

                    xoff = int((gt_pa[0] - gt_epr_global[0]) / 1000)
                    yoff = int((gt_epr_global[3] - gt_pa[3]) / 1000)
                    epr_pa_bb0 = epr_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    epr_pa_bb = epr_pa_bb0.flatten()
                    epr_pa0 = epr_pa_bb[ind]
                    epr_pa = np.where(epr_pa0 == 65535.0, (float('NaN')),
                                      (epr_pa0))
                    mask2epr = np.isnan(epr_pa)
                    if mask2epr.all() == True:
                        dropcols[2] = -2
                    else:
                        epr_pa[mask2epr] = np.interp(np.flatnonzero(mask2epr),
                                                     np.flatnonzero(~mask2epr),
                                                     epr_pa[~mask2epr])
                        epr_pa = np.random.random_sample(
                            len(epr_pa), ) / 1000 + epr_pa
                        print 'pa epr'

                        eprpamin = round(epr_pa.min(), 2)
                        eprpamax = round(epr_pa.max(), 2)
                        eprpamean = round(np.mean(epr_pa), 2)
                        print eprpamin
                        print eprpamax
                        eprdiff = abs(epr_pa.min() - epr_pa.max())
                        if eprdiff < 0.001: dropcols[2] = -2

                    xoff = int((gt_pa[0] - gt_pre_global[0]) / 1000)
                    yoff = int((gt_pre_global[3] - gt_pa[3]) / 1000)
                    pre_pa_bb0 = pre_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    pre_pa_bb = pre_pa_bb0.flatten()
                    pre_pa0 = pre_pa_bb[ind]
                    pre_pa = np.where(pre_pa0 == 65535.0, (float('NaN')),
                                      (pre_pa0))
                    mask2pre = np.isnan(pre_pa)
                    if mask2pre.all() == True:
                        dropcols[1] = -1
                    else:
                        pre_pa[mask2pre] = np.interp(np.flatnonzero(mask2pre),
                                                     np.flatnonzero(~mask2pre),
                                                     pre_pa[~mask2pre])
                        pre_pa = np.random.random_sample(
                            len(pre_pa), ) / 1000 + pre_pa
                        print 'pa pre'

                        prepamin = round(pre_pa.min(), 2)
                        prepamax = round(pre_pa.max(), 2)
                        prepamean = round(np.mean(pre_pa), 2)
                        print prepamin
                        print prepamax
                        prediff = abs(pre_pa.min() - pre_pa.max())
                        if prediff < 0.001: dropcols[1] = -1

                    xoff = int((gt_pa[0] - gt_bio_global[0]) / 1000)
                    yoff = int((gt_bio_global[3] - gt_pa[3]) / 1000)
                    bio_pa_bb0 = bio_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    bio_pa_bb = bio_pa_bb0.flatten()
                    bio_pa0 = bio_pa_bb[ind]
                    bio_pa = np.where(bio_pa0 == 65535.0, (float('NaN')),
                                      (bio_pa0))
                    mask2bio = np.isnan(bio_pa)
                    if mask2bio.all() == True:
                        dropcols[0] = -0
                    else:
                        bio_pa[mask2bio] = np.interp(np.flatnonzero(mask2bio),
                                                     np.flatnonzero(~mask2bio),
                                                     bio_pa[~mask2bio])
                        bio_pa = np.random.random_sample(
                            len(bio_pa), ) / 1000 + bio_pa
                        print 'pa bio'

                        biopamin = round(bio_pa.min(), 2)
                        biopamax = round(bio_pa.max(), 2)
                        biopamean = round(np.mean(bio_pa), 2)
                        print biopamin
                        print biopamax
                        biodiff = abs(bio_pa.min() - bio_pa.max())
                        if biodiff < 0.001: dropcols[0] = -0

                    xoff = int((gt_pa[0] - gt_slope_global[0]) / 1000)
                    yoff = int((gt_slope_global[3] - gt_pa[3]) / 1000)
                    slope_pa_bb0 = slope_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    slope_pa_bb = slope_pa_bb0.flatten()
                    slope_pa0 = slope_pa_bb[ind]
                    slope_pa = np.where(slope_pa0 == 65535.0, (float('NaN')),
                                        (slope_pa0))
                    mask2slope = np.isnan(slope_pa)
                    if mask2slope.all() == True:
                        dropcols[7] = -7
                    else:
                        slope_pa[mask2slope] = np.interp(
                            np.flatnonzero(mask2slope),
                            np.flatnonzero(~mask2slope), slope_pa[~mask2slope])
                        slope_pa = np.random.random_sample(
                            len(slope_pa), ) / 1000 + slope_pa
                        print 'pa slope'

                        slopepamin = round(slope_pa.min(), 2)
                        slopepamax = round(slope_pa.max(), 2)
                        slopepamean = round(np.mean(slope_pa), 2)
                        print slopepamin
                        print slopepamax
                        slopediff = abs(slope_pa.min() - slope_pa.max())
                        if slopediff < 0.001: dropcols[7] = -7

                    xoff = int((gt_pa[0] - gt_ndwi_global[0]) / 1000)
                    yoff = int((gt_ndwi_global[3] - gt_pa[3]) / 1000)
                    ndwi_pa_bb0 = ndwi_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    ndwi_pa_bb = ndwi_pa_bb0.flatten()
                    ndwi_pa0 = ndwi_pa_bb[ind]
                    ndwi_pa = np.where(ndwi_pa0 == 255.0, (float('NaN')),
                                       (ndwi_pa0))
                    mask2ndwi = np.isnan(ndwi_pa)
                    if mask2ndwi.all() == True:
                        dropcols[6] = -6
                    else:
                        ndwi_pa[mask2ndwi] = np.interp(
                            np.flatnonzero(mask2ndwi),
                            np.flatnonzero(~mask2ndwi), ndwi_pa[~mask2ndwi])
                        ndwi_pa = np.random.random_sample(
                            len(ndwi_pa), ) / 1000 + ndwi_pa
                        print 'pa ndwi'

                        ndwipamin = round(ndwi_pa.min(), 2)
                        ndwipamax = round(ndwi_pa.max(), 2)
                        ndwipamean = round(np.mean(ndwi_pa), 2)
                        print ndwipamin
                        print ndwipamax
                        ndwidiff = abs(ndwi_pa.min() - ndwi_pa.max())
                        if ndwidiff < 0.001: dropcols[6] = -6

                    xoff = int((gt_pa[0] - gt_ndvimax_global[0]) / 1000)
                    yoff = int((gt_ndvimax_global[3] - gt_pa[3]) / 1000)
                    ndvimax_pa_bb0 = ndvimax_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    ndvimax_pa_bb = ndvimax_pa_bb0.flatten()
                    ndvimax_pa0 = ndvimax_pa_bb[ind]
                    ndvimax_pa = np.where(ndvimax_pa0 == 65535.0,
                                          (float('NaN')), (ndvimax_pa0))
                    mask2ndvimax = np.isnan(ndvimax_pa)
                    if mask2ndvimax.all() == True:
                        dropcols[4] = -4
                    else:
                        ndvimax_pa[mask2ndvimax] = np.interp(
                            np.flatnonzero(mask2ndvimax),
                            np.flatnonzero(~mask2ndvimax),
                            ndvimax_pa[~mask2ndvimax])
                        ndvimax_pa = np.random.random_sample(
                            len(ndvimax_pa), ) / 1000 + ndvimax_pa
                        print 'pa ndvimax'

                        ndvimaxpamin = round(ndvimax_pa.min(), 2)
                        ndvimaxpamax = round(ndvimax_pa.max(), 2)
                        ndvimaxpamean = round(np.mean(ndvimax_pa), 2)
                        print ndvimaxpamin
                        print ndvimaxpamax
                        ndvimaxdiff = abs(ndvimax_pa.min() - ndvimax_pa.max())
                        if ndvimaxdiff < 0.001: dropcols[4] = -4

                    xoff = int((gt_pa[0] - gt_ndvimin_global[0]) / 1000)
                    yoff = int((gt_ndvimin_global[3] - gt_pa[3]) / 1000)
                    ndvimin_pa_bb0 = ndvimin_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    ndvimin_pa_bb = ndvimin_pa_bb0.flatten()
                    ndvimin_pa0 = ndvimin_pa_bb[ind]
                    ndvimin_pa = np.where(ndvimin_pa0 == 65535.0,
                                          (float('NaN')), (ndvimin_pa0))
                    mask2ndvimin = np.isnan(ndvimin_pa)
                    if mask2ndvimin.all() == True:
                        dropcols[5] = -5
                    else:
                        ndvimin_pa[mask2ndvimin] = np.interp(
                            np.flatnonzero(mask2ndvimin),
                            np.flatnonzero(~mask2ndvimin),
                            ndvimin_pa[~mask2ndvimin])
                        ndvimin_pa = np.random.random_sample(
                            len(ndvimin_pa), ) / 1000 + ndvimin_pa
                        print 'pa ndvimin'

                        ndviminpamin = round(ndvimin_pa.min(), 2)
                        ndviminpamax = round(ndvimin_pa.max(), 2)
                        ndviminpamean = round(np.mean(ndvimin_pa), 2)
                        print ndviminpamin
                        print ndviminpamax
                        ndvimindiff = abs(ndvimin_pa.min() - ndvimin_pa.max())
                        if ndvimindiff < 0.001: dropcols[5] = -5

                    xoff = int((gt_pa[0] - gt_herb_global[0]) / 1000)
                    yoff = int((gt_herb_global[3] - gt_pa[3]) / 1000)
                    herb_pa_bb0 = herb_global.ReadAsArray(
                        xoff, yoff, par.XSize, par.YSize).astype(np.float32)
                    herb_pa_bb = herb_pa_bb0.flatten()
                    herb_pa0 = herb_pa_bb[ind]
                    herb_pa = np.where(herb_pa0 == 255.0, (float('NaN')),
                                       (herb_pa0))
                    mask2herb = np.isnan(herb_pa)
                    if mask2herb.all() == True:
                        dropcols[3] = -3
                    else:
                        herb_pa[mask2herb] = np.interp(
                            np.flatnonzero(mask2herb),
                            np.flatnonzero(~mask2herb), herb_pa[~mask2herb])
                        herb_pa = np.random.random_sample(
                            len(herb_pa), ) / 1000 + herb_pa
                        print 'pa herb'

                        hpamin = round(herb_pa.min(), 2)
                        hpamax = round(herb_pa.max(), 2)
                        hpamean = round(np.mean(herb_pa), 2)
                        print hpamin
                        print hpamax
                        hdiff = abs(herb_pa.min() - herb_pa.max())
                        if hdiff < 0.001: dropcols[3] = -3

                    cols = dropcols[dropcols >= 0]
                    ind_pa0 = np.column_stack(
                        (bio_pa, pre_pa, epr_pa, herb_pa, ndvimax_pa,
                         ndvimin_pa, ndwi_pa, slope_pa, tree_pa))
                    ind_pa = ind_pa0[:, cols]
                    ind_eco = ind_eco0[:, cols]
                    print ind_pa.shape
                    hr1sum = hr1insum = indokpsz = pszok = sumpszok = lpratio2 = numpszok = hr1averpa = hr3aver = hr2aver = pszmax = num_featuresaver = lpratio = hr1medianpa = hr1insumaver = pxpa = aggregation = None
                    print "PA masked"
                    #print ind_pa
                    if ind_pa.shape[0] > 4 and ind_pa.shape[1] > 1:
                        Ymean = np.mean(ind_pa, axis=0)
                        print 'Max. mean value is ' + str(Ymean.max())
                        print "Ymean ok"
                        Ycov = np.cov(ind_pa, rowvar=False)
                        print 'Max. cov value is ' + str(Ycov.max())
                        print "Ycov	ok"
                        #mh = mahalanobis_distances(Ymean,	Ycov,	ind_eco,	parallel=False)
                        #mh = mahalanobis_distances(Ymean,	Ycov,	ind_eco,	parallel=True)
                        mh2 = mahalanobis_distances_scipy(Ymean,
                                                          Ycov,
                                                          ind_eco,
                                                          parallel=True)
                        #mh2 = mahalanobis_distances_scipy(Ymean,	Ycov,	ind_eco,	parallel=False)
                        maxmh = mh2.max()
                        print 'Max. mh value is ' + str(maxmh)
                        print 'Max. mh value is nan: ' + str(np.isnan(maxmh))
                        mh = mh2 * mh2
                        print "mh ok"
                        pmh = chisqprob(mh, len(cols)).reshape(
                            (eco.YSize, eco.XSize))  # 9
                        pmhh = np.where(pmh <= 0.001, None, pmh)
                        print "pmh ok"  #	quitar	valores	muy	bajos!
                        pmhhmax = pmhh.max()
                        print 'Max. similarity value is ' + str(pmhhmax)
                        dst_ds.GetRasterBand(1).WriteArray(pmhh)
                        dst_ds = None
                        hr11 = np.where(pmhh > 0, 1, 0)  # 0.5
                        hr1 = hr11.flatten()
                        hr1sum = sum(hr1)
                        print 'Number of pixels with similarity higher than 0 is ' + str(
                            hr1sum)
                        hr1insumaver = hr1insum = 0
                        hr1sumaver = hr1sum
                        src_ds_sim = gdal.Open(outfile)
                        sim = src_ds_sim.GetRasterBand(1)
                        gt_sim = src_ds_sim.GetGeoTransform()
                        xoff = int((gt_pa[0] - gt_sim[0]) / 1000)
                        yoff = int((gt_sim[3] - gt_pa[3]) / 1000)
                        xextentpa = xoff + par.XSize
                        yextentpa = yoff + par.YSize
                        xless = sim.XSize - xextentpa
                        yless = sim.YSize - yextentpa
                        xsize = par.XSize
                        ysize = par.YSize
                        if xoff > 0 and yoff > 0 and pmhhmax > 0.01 and hr1sum > 1 and maxmh != float(
                                'NaN'
                        ):  #and ratiogeom < 100: #	also	checks	if results	are	not	empty

                            # reading the similarity ecoregion without the PA (tmp mask)
                            os.system('gdal_merge.py ' + str(ecofile) + ' ' +
                                      str(pa4) + ' -o ' + str(outfile3) +
                                      ' -ot Int32')
                            hri_pa_bb03 = sim.ReadAsArray().astype(np.float32)
                            hri_pa_bb3 = hri_pa_bb03.flatten()

                            src_ds_sim2 = gdal.Open(outfile3)
                            sim2 = src_ds_sim2.GetRasterBand(1)
                            gt_sim2 = src_ds_sim2.GetGeoTransform()
                            hri_pa_bb02 = sim2.ReadAsArray().astype(np.int32)
                            #hri_pa_bb2 = hri_pa_bb02.flatten()
                            hri_pa_bb02_max = hri_pa_bb02.max()
                            print 'PA: ' + str(pa)
                            print 'PA (= max) value from mask = ' + str(
                                hri_pa_bb02_max)
                            if hri_pa_bb02.shape == hri_pa_bb03.shape:
                                hri_pa02 = np.where(
                                    hri_pa_bb02 == pa, 0,
                                    hri_pa_bb03)  # hri_pa_bb02_max

                                if xless < 0: xsize = xsize + xless
                                if yless < 0: ysize = ysize + yless
                                hri_pa_bb0 = sim.ReadAsArray(
                                    xoff, yoff, xsize,
                                    ysize).astype(np.float32)
                                hri_pa_bb = hri_pa_bb0.flatten()
                                indd = hri_pa_bb > 0
                                hri_pa0 = hri_pa_bb[indd]
                                print 'Total number of pixels with similarity values in PA: ' + str(
                                    len(hri_pa0))
                                hr1averpa = round(
                                    np.mean(hri_pa0[~np.isnan(hri_pa0)]), 2)
                                #print hr1averpa
                                #hr1medianpa = np.median(hri_pa0[~np.isnan(hri_pa0)])
                                print 'mean similarity in the park is ' + str(
                                    hr1averpa)
                                #hr1insum = sum(np.where(hri_pa0 >= 0.5,	1,0))	#	use	hr1averpa	as	threshold	instead!
                                hr1inaver = np.where(hri_pa0 >= hr1averpa, 1,
                                                     0)
                                hr1insumaver = sum(hr1inaver)
                                #print hr1insum
                                ##labeled_arrayin, num_featuresin = nd.label(hr1inaver,	structure=s)
                                hr1averr = np.where(hri_pa02 >= hr1averpa, 1,
                                                    0)  # pmhh
                                hr1aver = hr1averr.flatten()
                                print 'Total number of pixels with similarity values in ECO: ' + str(
                                    sum(hr1aver))
                                labeled_arrayaver, num_featuresaver = nd.label(
                                    hr1averr, structure=s)
                                print 'Nr of similar patches found: ' + str(
                                    num_featuresaver)
                                if num_featuresaver > 0:
                                    lbls = np.arange(1, num_featuresaver + 1)
                                    psizes = nd.labeled_comprehension(
                                        labeled_arrayaver, labeled_arrayaver,
                                        lbls, np.count_nonzero, float, 0)  #-1
                                    pszmax = psizes.max()  #-hr1insumaver
                                    dst_ds2 = driver.Create(
                                        outfile2, src_ds_eco.RasterXSize,
                                        src_ds_eco.RasterYSize, num_bands,
                                        gdal.GDT_Int32, dst_options)
                                    dst_ds2.SetGeoTransform(
                                        src_ds_eco.GetGeoTransform())
                                    dst_ds2.SetProjection(
                                        src_ds_eco.GetProjectionRef())
                                    dst_ds2.GetRasterBand(1).WriteArray(
                                        labeled_arrayaver)
                                    dst_ds2 = None
                                    #num_feats = num_features - num_featuresaver
                                    hr1sumaver = sum(hr1aver)
                                    hr2aver = hr1sumaver  #- hr1insumaver
                                    pxpa = ind_pa.shape[0]
                                    indokpsz = psizes >= pxpa
                                    pszsok = psizes[indokpsz]  # NEW
                                    sumpszok = sum(pszsok)
                                    lpratio = round(float(pszmax / pxpa), 2)
                                    lpratio2 = round(float(sumpszok / pxpa), 2)
                                    numpszok = len(pszsok)
                                    hr3aver = round(float(hr2aver / pxpa), 2)
                                    aggregation = round(
                                        float(hr2aver / num_featuresaver), 2)
                        #hr2 = hr1sumaver - hr1insumaver
                        #print hr2
                        #hr3 = float(hr2/ind_pa.shape[0])
                        #print hr3
                    wb = open(csvname, 'a')
                    var = str(ecor) + ' ' + str(pa) + ' ' + str(
                        hr1averpa
                    ) + ' ' + str(hr2aver) + ' ' + str(pxpa) + ' ' + str(
                        hr1insumaver
                    ) + ' ' + str(hr3aver) + ' ' + str(
                        num_featuresaver
                    ) + ' ' + str(lpratio) + ' ' + str(lpratio2) + ' ' + str(
                        numpszok
                    ) + ' ' + str(pszmax) + ' ' + str(aggregation) + ' ' + str(
                        treepamin
                    ) + ' ' + str(treepamax) + ' ' + str(eprpamin) + ' ' + str(
                        eprpamax
                    ) + ' ' + str(prepamin) + ' ' + str(prepamax) + ' ' + str(
                        biopamin
                    ) + ' ' + str(biopamax) + ' ' + str(slopepamin) + ' ' + str(
                        slopepamax
                    ) + ' ' + str(ndwipamin) + ' ' + str(ndwipamax) + ' ' + str(
                        ndvimaxpamin
                    ) + ' ' + str(
                        ndvimaxpamax
                    ) + ' ' + str(
                        ndviminpamin
                    ) + ' ' + str(
                        ndviminpamax
                    ) + ' ' + str(
                        hpamin
                    ) + ' ' + str(
                        hpamax
                    ) + ' ' + str(
                        treepamean
                    ) + ' ' + str(eprpamean) + ' ' + str(
                        prepamean
                    ) + ' ' + str(biopamean) + ' ' + str(
                        slopepamean
                    ) + ' ' + str(
                        ndwipamean
                    ) + ' ' + str(ndvimaxpamean) + ' ' + str(
                        ndviminpamean
                    ) + ' ' + str(
                        hpamean
                    )  #	exclude	PA!	#+' '+str(hr1p25pa)#	'+str(hr3)+'	+' '+str(hr1medianpa)+' '+str(num_features)+' '
                    wb.write(var)
                    wb.write('\n')
                    wb.close()
                    print "results exported"
                    os.system('rm ' + str(outfile3))
        wb = open(csvname1, 'a')  #	LOCAL	FOLDER
        var = str(ecor)
        wb.write(var)
        wb.write('\n')
        wb.close()
    print "END ECOREG: " + str(ecor)
Exemplo n.º 13
0
def plot_cloud_alpha(data, time, n_bins, size_min, size_max, ref_min,
                     n_cloud_min):
    """
    Written by Till Vondenhoff, 20-03-25
    
    Calculates slopes using 3 different methods (described by Newman)
    
    Parameters:
        data:           unfiltered cloud data including multiple timesteps
        time:           array with all timesteps
        n_bins:         number of bins
        ref_min:        threhold for smallest value to be counted as cloud 
        size_min:       value of the first bin
        size_max:       value of the last bin
        n_cloud_min:    minimum number of clouds per timestep required to calculate slope
        
    Returns:
        valid_time:     updated time -> timesteps where n_cloud_min is not matched are not included
        valid_n_clouds: updated number of clouds -> n_clouds where n_cloud_min is not matched are not included
        m1:             slope linear regression of power-law distribution with linear binning
        m2:             slope linear regression of power-law distribution with logarithmic binning
        m3:             slope linear regression of cumulative distribution (alpha-1)
        
    """
    slope_lin = []
    slope_log = []
    slope_cum = []

    valid_n_clouds = []
    valid_time = []

    for timestep in time:
        timestep_data = data[timestep, :, :]

        # marks everything above ref_min as a cloud
        cloud_2D_mask = np.zeros_like(timestep_data)
        cloud_2D_mask[timestep_data > ref_min] = 1

        # calculates how many clouds exist in cloud_2D_mask, returns total number of clouds
        labeled_clouds, n_clouds = ndi.label(cloud_2D_mask)
        labels = np.arange(1, n_clouds + 1)

        if (n_clouds <= n_cloud_min):
            slope_lin.append(np.NaN)
            slope_log.append(np.NaN)
            slope_cum.append(np.NaN)

            valid_n_clouds.append(n_clouds)
            valid_time.append(timestep)
            print('timestap', timestep, 'has too few clouds:', n_clouds)
            continue

        valid_n_clouds.append(n_clouds)
        valid_time.append(timestep)
        # Calculating how many cells belong to each labeled cloud using ndi.labeled_comprehension
        # returns cloud_area and therefore it's 2D size
        cloud_number_cells = ndi.labeled_comprehension(cloud_2D_mask,
                                                       labeled_clouds, labels,
                                                       np.size, float, 0)

        cloud_area = np.sqrt(cloud_number_cells) * 25

        # linear power-law distribution of the data (a,b)
        f, slope, intercept = lin_binning(cloud_area,
                                          n_bins,
                                          size_min,
                                          size_max,
                                          show_plt=0)
        slope_lin.append(slope)

        # logarithmic binning of the data (c)
        f, slope, intercept = log_binning(cloud_area,
                                          n_bins,
                                          size_min,
                                          size_max,
                                          show_plt=0)
        slope_log.append(slope)

        # cumulative distribution by sorting the data (d)
        f, slope, intercept = cum_dist(cloud_area,
                                       size_min,
                                       size_max,
                                       show_plt=0)
        slope_cum.append(slope)

    return valid_time, valid_n_clouds, slope_lin, slope_log, slope_cum
train_batch_loader = lytic_HU_3cto3v_Loader_patch.data_loader(
    train_records, resize_option)
MAX_ITERATION = len(train_records)

for itr in range(MAX_ITERATION):
    train_batch_pre_images, train_batch_main_images, train_batch_post_images, train_batch_main_annotation, file_list = train_batch_loader.get_next_batch(
        FLAGS.training_batch_size)
    # print(file_list)

    batch_main_training_image = np.squeeze(train_batch_main_images)
    batch_main_training_annotation = np.squeeze(train_batch_main_annotation)
    batch_main_multiple_image = batch_main_training_image * batch_main_training_annotation

    lbl, nlbl = nd.label(batch_main_multiple_image)
    lbls = np.arange(1, nlbl + 1)
    get_median_value = nd.labeled_comprehension(batch_main_multiple_image, lbl,
                                                lbls, np.median, float, -1)
    print('Median_HU_Value:', get_median_value)

    if np.all(get_median_value < 1300):
        print('\r')
        print('==============================')
        print(file_list)
        print(get_median_value)
        print('==============================')
        print('\r')
        file_list = sum(file_list, [])
        low_HU_records.append(file_list)

with open('Osteolyitc_low_value_HU_fold1.pickle', 'wb') as f:
    pickle.dump(low_HU_records, f, protocol=pickle.HIGHEST_PROTOCOL)
Exemplo n.º 15
0
def objstats(args):
    # Open and read from image and segmentation
    try:
        img_ds = gdal.Open(args.image, gdal.GA_ReadOnly)
    except:
        logger.error("Could not open image: {}".format(i=args.image))
        sys.exit(1)

    try:
        seg_ds = ogr.Open(args.segment, 0)
        seg_layer = seg_ds.GetLayer()
    except:
        logger.error("Could not open segmentation vector file: {}".format(args.segment))
        sys.exit(1)

    cols, rows = img_ds.RasterXSize, img_ds.RasterYSize
    bands = range(1, img_ds.RasterCount + 1)
    if args.bands is not None:
        bands = args.bands

    # Rasterize segments
    logger.debug("About to rasterize segment vector file")
    img_srs = osr.SpatialReference()
    img_srs.ImportFromWkt(img_ds.GetProjectionRef())

    mem_raster = gdal.GetDriverByName("MEM").Create("", cols, rows, 1, gdal.GDT_UInt32)
    mem_raster.SetProjection(img_ds.GetProjection())
    mem_raster.SetGeoTransform(img_ds.GetGeoTransform())

    # Create artificial 'FID' field
    fid_layer = seg_ds.ExecuteSQL('select FID, * from "{l}"'.format(l=seg_layer.GetName()))
    gdal.RasterizeLayer(mem_raster, [1], fid_layer, options=["ATTRIBUTE=FID"])
    logger.debug("Rasterized segment vector file")

    seg = mem_raster.GetRasterBand(1).ReadAsArray()
    logger.debug("Read segmentation image into memory")
    mem_raster = None
    seg_ds = None

    # Get list of unique segments
    useg = np.unique(seg)

    # If calc is num, do only for 1 band
    out_bands = 0
    for stat in args.stat:
        if stat == "num":
            out_bands += 1
        else:
            out_bands += len(bands)

    # Create output driver
    driver = gdal.GetDriverByName(args.format)
    out_ds = driver.Create(args.output, cols, rows, out_bands, gdal.GDT_Float32)

    # Loop through image bands
    out_b = 0
    out_2d = np.empty_like(seg, dtype=np.float32)
    for i_b, b in enumerate(bands):
        img_band = img_ds.GetRasterBand(b)
        ndv = img_band.GetNoDataValue()
        band_name = img_band.GetDescription()
        if not band_name:
            band_name = "Band {i}".format(i=b)
        logger.info('Processing input band {i}, "{b}"'.format(i=b, b=band_name))

        img = img_band.ReadAsArray().astype(gdal_array.GDALTypeCodeToNumericTypeCode(img_band.DataType))
        logger.debug('Read image band {i}, "{b}" into memory'.format(i=b, b=band_name))

        for stat in args.stat:
            logger.debug("    calculating {s}".format(s=stat))
            if stat == "mean":
                out = ndimage.mean(img, seg, useg)
            elif stat == "var":
                out = ndimage.variance(img, seg, useg)
            elif stat == "num":
                # Remove from list of stats so it is only calculated once
                args.stat.remove("num")
                count = np.ones_like(seg)
                out = ndimage.sum(count, seg, useg)
            elif stat == "sum":
                out = ndimage.sum(img, seg, useg)
            elif stat == "min":
                out = ndimage.minimum(img, seg, useg)
            elif stat == "max":
                out = ndimage.maximum(img, seg, useg)
            elif stat == "mode":
                out = ndimage.labeled_comprehension(img, seg, useg, scipy_mode, out_2d.dtype, ndv)
            else:
                logger.error("Unknown stat. Not sure how you got here")
                sys.exit(1)

            # Transform to 2D
            out_2d = out[seg - seg.min()]

            # Fill in NDV
            if ndv is not None:
                out_2d[np.where(img == ndv)] = ndv

            # Write out the data
            out_band = out_ds.GetRasterBand(out_b + 1)
            out_band.SetDescription(band_name)
            if ndv is not None:
                out_band.SetNoDataValue(ndv)
            logger.debug("    Writing object statistic for band {b}".format(b=b + 1))
            out_band.WriteArray(out_2d, 0, 0)
            out_band.FlushCache()
            logger.debug("    Wrote out object statistic for band {b}".format(b=b + 1))
            out_b += 1

    out_ds.SetGeoTransform(img_ds.GetGeoTransform())
    out_ds.SetProjection(img_ds.GetProjection())

    img_ds = None
    seg_ds = None
    out_ds = None
    logger.info("Completed object statistic calculation")
def detection_by_scene_segmentation():
    '''Detection of moving object by using Distortion field of centers of mass
    of background objects'''
    if co.counters.im_number == 0:
        co.segclass.needs_segmentation = 1
        if not co.segclass.exists_previous_segmentation:
            print 'No existing previous segmentation. The initialisation will delay...'
        else:
            print 'Checking similarity..'
            old_im = co.meas.background
            check_if_segmented = np.sqrt(np.sum(
                (co.data.depth_im[co.meas.trusty_pixels.astype(bool)].astype(float) -
                 old_im[co.meas.trusty_pixels.astype(bool)].astype(float))**2))
            print 'Euclidean Distance of old and new background is ' +\
                str(check_if_segmented)
            print 'Minimum Distance to approve previous segmentation is ' +\
                str(co.CONST['similar_bg_min_dist'])
            if check_if_segmented < co.CONST['similar_bg_min_dist']:
                print 'No need to segment again'
                co.segclass.needs_segmentation = 0
            else:
                print 'Segmentation is needed'

    if co.segclass.needs_segmentation and co.counters.im_number >= 1:
        if co.counters.im_number == (co.CONST['framerate'] *
                                     co.CONST['calib_secs'] - 1):
            co.segclass.flush_previous_segmentation()
            co.segclass.nz_objects.image = np.zeros_like(co.data.depth_im) - 1
            co.segclass.z_objects.image = np.zeros_like(co.data.depth_im) - 1
            levels_num = 8
            levels = np.linspace(np.min(co.data.depth_im[co.data.depth_im > 0]), np.max(
                co.data.depth_im), levels_num)
            co.segclass.segment_values = np.zeros_like(co.data.depth_im)
            for count in range(levels_num - 1):
                co.segclass.segment_values[(co.data.depth_im >= levels[count]) *
                                           (co.data.depth_im <= levels[count + 1])] = count + 1

        elif co.counters.im_number == (co.CONST['framerate'] *
                                       co.CONST['calib_secs']):
            co.segclass.nz_objects.count = -1
            co.segclass.z_objects.count = -1
            co.segclass.segment_values = co.segclass.segment_values * co.meas.trusty_pixels
            for val in np.unique(co.segclass.segment_values):
                objs = np.ones_like(co.data.depth_im) * \
                    (val == co.segclass.segment_values)
                labeled, nr_objects =\
                    ndimage.label(objs * co.edges.calib_frame)
                lbls = np.arange(1, nr_objects + 1)
                if val > 0:
                    ndimage.labeled_comprehension(objs, labeled, lbls,
                                                  co.segclass.nz_objects.process, float, 0,
                                                  True)
                else:
                    ndimage.labeled_comprehension(objs, labeled, lbls,
                                                  co.segclass.z_objects.process,
                                                  float, 0, True)
            for (points, pixsize,
                 xsize, ysize) in (co.segclass.nz_objects.untrusty +
                                   co.segclass.z_objects.untrusty):
                co.segclass.z_objects.count += 1
                co.segclass.z_objects.image[
                    tuple(points)] = co.segclass.z_objects.count
                co.segclass.z_objects.pixsize.append(pixsize)
                co.segclass.z_objects.xsize.append(xsize)
                co.segclass.z_objects.ysize.append(ysize)

            print 'Found or partitioned',\
                co.segclass.nz_objects.count +\
                co.segclass.z_objects.count + 2, 'background objects'
            co.segclass.needs_segmentation = 0
            with open(co.CONST['segmentation_data'] + '.pkl', 'wb') as output:
                pickle.dump((co.segclass, co.meas), output, -1)
            print 'Saved segmentation data for future use.'
    elif (not co.segclass.needs_segmentation) and co.counters.im_number >= 2:
        if not co.segclass.initialised_centers:
            try:
                co.segclass.nz_objects.find_object_center(1)
            except BaseException as err:
                print 'Centers initialisation Exception'
                raise err
            try:
                co.segclass.z_objects.find_object_center(0)
            except BaseException as err:
                print 'Centers initialisation Exception'
                raise err
            co.segclass.initialise_neighborhoods()
            co.segclass.initialised_centers = 1
        else:
            try:
                co.segclass.nz_objects.find_object_center(1)
            except BaseException as err:
                print 'Centers calculation Exception'
                raise err
        if co.segclass.nz_objects.center.size > 0:
            co.segclass.nz_objects.find_centers_displacement()
            co.meas.found_objects_mask = co.segclass.find_objects()

            points_on_im = co.data.depth3d.copy()
            # points_on_im[np.sum(points_on_im,axis=2)==0,:]=np.array([1,0,1])
            for calc, point1, point2 in zip(
                    co.segclass.nz_objects.centers_to_calculate,
                    co.segclass.nz_objects.initial_center,
                    co.segclass.nz_objects.center):
                if point1[0] != -1:
                    if calc:
                        cv2.arrowedLine(points_on_im,
                                        (point1[1], point1[0]),
                                        (point2[1], point2[0]), [0, 1, 0], 2, 1)
                    else:
                        cv2.arrowedLine(points_on_im,
                                        (point1[1], point1[0]),
                                        (point2[1], point2[0]), [0, 0, 1], 2, 1)
            struct_el = cv2.getStructuringElement(
                cv2.MORPH_ELLIPSE, tuple(2 * [5]))
            co.meas.found_objects_mask = cv2.morphologyEx(
                co.meas.found_objects_mask.astype(np.uint8), cv2.MORPH_CLOSE, struct_el)
            struct_el = cv2.getStructuringElement(
                cv2.MORPH_ELLIPSE, tuple(2 * [10]))
            co.meas.found_objects_mask = cv2.morphologyEx(
                co.meas.found_objects_mask.astype(np.uint8), cv2.MORPH_OPEN, struct_el)
            hand_patch, hand_patch_pos = hsa.main_process(
                co.meas.found_objects_mask.astype(
                    np.uint8), co.meas.all_positions, 1)
            co.meas.hand_patch = hand_patch
            co.meas.hand_patch_pos = hand_patch_pos
            if len(co.im_results.images) == 1:
                co.im_results.images.append(
                    (255 * co.meas.found_objects_mask).astype(np.uint8))
            co.im_results.images.append(points_on_im)
            # elif len(co.im_results.images)==2:
            #    co.im_results.images[1][co.im_results.images[1]==0]=(255*points_on_im).astype(np.uint8)[co.im_results.images[1]==0]
            # if hand_points.shape[1]>1:
            #    points_on_im[tuple(hand_points.T)]=[1,0,0]
            # co.im_results.images.append(points_on_im)
            return 1
Exemplo n.º 17
0
    def computeFeature(self, parameters, context, feedback, clip_flag=True):
        step_feedback = feedbacks.ProgressMultiStepFeedback(6, feedback)
        clipped_path = QgsProcessingUtils.generateTempFilename(
            "labeled_clipped" + self.curr_suffix + ".tif")
        clipped = qgsTreatments.clipRasterFromVector(
            self.labeled_path,
            self.report_layer,
            clipped_path,
            crop_cutline=True,
            nodata=self.nodata,
            data_type=self.label_out_type,
            context=context,
            feedback=step_feedback)
        # clipped = qgsTreatments.clipRasterAllTouched(self.labeled_path,self.report_layer,
        # self.input_crs,out_path=clipped_path,nodata=self.nodata,
        # data_type=self.label_out_type,resolution=self.resolution,
        # context=context,feedback=step_feedback)
        step_feedback.setCurrentStep(1)
        clip_report = qgsTreatments.getRasterUniqueValsReport(
            clipped, context, step_feedback)
        step_feedback.pushDebugInfo("clip_report = " + str(clip_report))
        step_feedback.setCurrentStep(2)
        if clip_flag:
            input_clipped_path = QgsProcessingUtils.generateTempFilename(
                "input_clipped_clipped" + self.curr_suffix + ".tif")
            input_clipped = qgsTreatments.clipRasterFromVector(
                self.input_clipped,
                self.report_layer,
                input_clipped_path,
                crop_cutline=True,
                data_type=0,
                nodata=255,
                context=context,
                feedback=step_feedback)
        else:
            input_clipped_path = self.input_clipped
        # input_clipped = qgsTreatments.clipRasterAllTouched(self.input_clipped,self.report_layer,
        # self.input_crs,out_path=input_clipped_path,nodata=self.nodata,
        # data_type=0,resolution=self.resolution,
        # context=context,feedback=step_feedback)
        step_feedback.setCurrentStep(3)
        input_clip_report = qgsTreatments.getRasterUniqueValsReport(
            input_clipped_path, context, step_feedback)
        step_feedback.pushDebugInfo("input_clip_report = " +
                                    str(input_clip_report))
        clip_classes, clip_array = qgsUtils.getRasterValsAndArray(
            str(clipped_path))
        clip_labels = [int(cl) for cl in clip_classes]
        step_feedback.setCurrentStep(4)
        if 0 in clip_labels:
            clip_labels.remove(0)
        nb_patches_clipped = len(clip_labels)
        # feedback.pushDebugInfo("nb_patches = " + str(nb_patches))
        # feedback.pushDebugInfo("nb labels = " + str(len(labels)))
        step_feedback.pushDebugInfo("nb clip labels = " +
                                    str(len(clip_labels)))

        # Patches length
        if clip_labels:
            patches_len2 = ndimage.labeled_comprehension(
                clip_array, clip_array, clip_labels, len, int, 0)
            step_feedback.pushDebugInfo("patches_len2 = " + str(patches_len2))
            step_feedback.pushDebugInfo("nb patches_len2 = " +
                                        str(len(patches_len2)))
        step_feedback.setCurrentStep(5)

        sum_ai = 0
        sum_ai_sq = 0
        sum_ai_sq_cbc = 0
        for cpt, lbl in enumerate(clip_labels):
            lbl_val = int(lbl)
            cbc_len = self.patches_len[lbl_val - 1]
            patch_len = patches_len2[cpt]
            if cbc_len < patch_len:
                utils.internal_error("CBC len " + str(cbc_len) +
                                     " < patch_len " + str(patch_len))
            ai = patch_len * self.pix_area
            ai_cbc = cbc_len * self.pix_area
            sum_ai_sq += ai * ai
            sum_ai_sq_cbc += ai * ai_cbc
            sum_ai += ai
        step_feedback.pushDebugInfo("sum_ai = " + str(sum_ai))
        step_feedback.pushDebugInfo("sum_ai_sq = " + str(sum_ai_sq))
        step_feedback.pushDebugInfo("sum_ai_sq_cbc = " + str(sum_ai_sq_cbc))
        step_feedback.pushDebugInfo("unit_divisor = " + str(self.unit_divisor))
        if sum_ai_sq == 0:
            step_feedback.reportError(
                "Empty area for patches, please check your selection.")

        nb_pix_old = len(clip_array[clip_array != self.nodata])
        nb_pix2 = input_clip_report['TOTAL_PIXEL_COUNT']
        nb_pix_nodata2 = input_clip_report['NODATA_PIXEL_COUNT']
        nb_pix = nb_pix2 - nb_pix_nodata2
        nb_pix3 = clip_report['TOTAL_PIXEL_COUNT']
        nb_pix_nodata3 = clip_report['NODATA_PIXEL_COUNT']
        nb_pix33 = nb_pix3 - nb_pix_nodata3
        nb_0 = len(clip_array[clip_array == 0])
        nb_not_0 = len(clip_array[clip_array != 0])
        step_feedback.pushDebugInfo("nb_pix_old = " + str(nb_pix_old))
        step_feedback.pushDebugInfo("nb_pix2 = " + str(nb_pix2))
        step_feedback.pushDebugInfo("nb_pix_nodata2 = " + str(nb_pix_nodata2))
        step_feedback.pushDebugInfo("nb_pix = " + str(nb_pix))
        step_feedback.pushDebugInfo("nb_pix3 = " + str(nb_pix3))
        step_feedback.pushDebugInfo("nb_pix_nodata3 = " + str(nb_pix_nodata3))
        step_feedback.pushDebugInfo("nb_pix33 = " + str(nb_pix33))
        step_feedback.pushDebugInfo("nb_0 = " + str(nb_0))
        step_feedback.pushDebugInfo("nb_not_0 = " + str(nb_not_0))
        tot_area = nb_pix * self.pix_area
        step_feedback.pushDebugInfo("tot_area = " + str(tot_area))
        #area_sq = math.pow(nb_pix,2)
        if nb_pix == 0:
            step_feedback.reportError(
                "Unexpected error : empty area for input layer")
        res_dict = {
            self.REPORT_AREA: tot_area,
            self.SUM_AI: sum_ai,
            self.SUM_AI_SQ: sum_ai_sq,
            self.SUM_AI_SQ_CBC: sum_ai_sq_cbc,
            self.NB_PATCHES: nb_patches_clipped,
            self.DIVISOR: self.unit_divisor,
        }
        res = self.mkOutputs(parameters, res_dict, context)
        step_feedback.setCurrentStep(6)
        return res
Exemplo n.º 18
0
def connect_wet_segments(valley_raster, water_ele_raster, wet_raster, flowdir_raster, ele_raster, \
                         width_raster, order_raster, connected_wet_raster, max_gap, pix_per_m):
    corner = arcpy.Point(arcpy.Describe(valley_raster).Extent.XMin, arcpy.Describe(valley_raster).Extent.YMin)
    dx = arcpy.Describe(valley_raster).meanCellWidth

    valley = arcpy.RasterToNumPyArray(valley_raster, nodata_to_value=0).astype(np.uint8)
    dep_ele = arcpy.RasterToNumPyArray(water_ele_raster, nodata_to_value=0)
    wet = arcpy.RasterToNumPyArray(wet_raster, nodata_to_value=0).astype(np.uint8)
    ele = arcpy.RasterToNumPyArray(ele_raster, nodata_to_value=0)
    width = arcpy.RasterToNumPyArray(width_raster, nodata_to_value=0)
    width = np.where(wet == 1, width, 0)
    dep_ele = np.where(wet == 1, dep_ele, 0)

    Lab_wet, num_label_wet = ndimage.label(wet, structure=np.ones((3, 3)))

    # find dry and wet valley
    tran = disconnect_valley_segments(valley_raster, flowdir_raster, 3, order_raster)

    dry_valley = np.where((valley == 1) & (wet == 0), 1, 0).astype(np.uint8)
    wet_valley = np.where((valley == 1) & (wet == 1), 1, 0).astype(np.uint8)

    temp_dry_valley = ndimage.binary_dilation(dry_valley, iterations=1, structure=np.ones((3, 3))).astype(np.uint8)
    dry_valley = np.where(((wet_valley == 1) & (temp_dry_valley == 1)) | (dry_valley == 1), 1, 0).astype(np.uint8)
    dry_valley = np.where(tran == 1, 0, dry_valley).astype(np.uint8)
    del temp_dry_valley

    Lab_dry, num_label_dry = ndimage.label(dry_valley, structure=np.ones((3, 3)))
    labels = np.arange(1, num_label_dry + 1)

    # 1. connection of dry segments
    num_conn_label = ndimage.labeled_comprehension(Lab_wet, Lab_dry, labels, count_unique, int, 0)
    num_conn = np.zeros_like(wet)
    num_conn = num_conn_label[Lab_dry - 1]
    num_conn = np.where(Lab_dry == 0, 0, num_conn)
    conn_dry_valley = np.where(num_conn >= 3, 1, 0)
    del num_conn, num_conn_label

    # 2. elevation of dry segments
    Lab_dry, num_label_dry = ndimage.label(conn_dry_valley, structure=np.ones((3, 3)))
    labels = np.arange(1, num_label_dry + 1)
    ave_ele_label = ndimage.labeled_comprehension(dep_ele, Lab_dry, labels, np.max, float, 0)
    max_ele_label = ndimage.labeled_comprehension(ele, Lab_dry, labels, np.max, float, 0)
    diff_ele = np.zeros_like(ele)
    diff_ele = ave_ele_label[Lab_dry - 1] - max_ele_label[Lab_dry - 1]
    diff_ele = np.where(Lab_dry == 0, 0, diff_ele)
    conn_dry_valley = np.where(diff_ele < 0, 0, conn_dry_valley)

    del diff_ele, max_ele_label, ave_ele_label

    # 3. length of dry segment
    Lab_dry, num_label_dry = ndimage.label(conn_dry_valley, structure=np.ones((3, 3)))
    labels = np.arange(1, num_label_dry + 1)
    area_gap_label = ndimage.labeled_comprehension(conn_dry_valley, Lab_dry, labels, np.sum, float, 0) * pix_per_m ** 2
    area_dry = np.zeros_like(ele)
    area_dry = area_gap_label[Lab_dry - 1]
    area_dry = np.where((Lab_dry == 0), 0, area_dry)
    conn_dry_valley = np.where(area_dry > max_gap, 0, conn_dry_valley)
    del area_dry, ele

    # find width of added segments
    Lab_dry, num_label_dry = ndimage.label(conn_dry_valley, structure=np.ones((3, 3)))
    labels = np.arange(1, num_label_dry + 1)
    width_gap_label = ndimage.labeled_comprehension(width, Lab_dry, labels, np.sum, float, 0)
    num_conn_label = ndimage.labeled_comprehension(Lab_wet, Lab_dry, labels, count_unique, int, 0)
    width_conn = np.zeros_like(width)
    width_conn = (width_gap_label[Lab_dry - 1] / num_conn_label[Lab_dry - 1]).astype(np.float32)
    width_conn = np.where(Lab_dry == 0, 0, width_conn)

    add_wet = add_width(width_conn)
    wet = np.where((add_wet == 1) | (wet == 1), 1, 0)

    arcpy.NumPyArrayToRaster(wet, corner, dx, dx).save(connected_wet_raster)
Exemplo n.º 19
0
def label_size(radar, field, structure=None, rays_wrap_around=False,
               size_field=None, debug=False, verbose=False):
    """
    Label connected pixels in a binary radar field and compute the size of each
    label. Unexpected results may occur if the specified field is not binary.

    Parameters
    ----------
    radar : Radar
        Py-ART Radar containing the specified field to be labeled.
    field : str
        Radar field to be labeled.
    structure : array_like, optional
        Binary structuring element used in labeling. The default structuring
        element has a squared connectivity equal to 1, i.e., only nearest
        neighbours are connected to the structure origin and
        diagonally-connected elements are not considered neighbours.
    rays_wrap_around : bool, optional
        True if all sweeps have contiguous rays, e.g., 360 deg PPI radar
        volumes.
    debug : bool, optional
        True to print debugging information, False to suppress.
    verbose : bool, optional
        True to print relevant information, False to suppress.

    """

    if verbose:
        print 'Performing binary labeling: {}'.format(field)

    if size_field is None:
        size_field = '{}_feature_size'.format(field)

    size_dict = {
        'data': np.zeros_like(radar.fields[field]['data'], dtype=np.int32),
        'units': 'unitless',
        'valid_min': 0,
        'comment': 'size in pixels of connected features',
        }
    radar.add_field(size_field, size_dict, replace_existing=True)

    for sweep, slc in enumerate(radar.iter_slice()):

        # Parse radar sweep data
        data = np.ma.getdata(radar.get_field(sweep, field))

        # Label connected pixels defined by the structuring element
        labels, nlabels = ndimage.label(data, structure=structure)
        index = np.arange(1, nlabels + 1)
        if debug:
            print 'Unique features in sweep {}: {}'.format(sweep, nlabels)

        if nlabels > 0:

            # Compute the size in number of pixels of each labeled feature
            sizes = ndimage.labeled_comprehension(
                data, labels, index, np.count_nonzero, np.int32, 0)

            # Set each labeled feature to its total size in radar gates
            for label, size in zip(index, sizes):
                radar.fields[size_field]['data'][slc][labels == label] = size

    return
def shapes_from_labels(pixels, labeled_image, labels, shape_class, shape_data_type, *args, **kwargs):
    return ndimage.labeled_comprehension(
        pixels, labeled_image, labels,
        lambda shape_pixels: shape_class(shape_pixels, *args, **kwargs),
        shape_data_type, None, False
    ).tolist()
Exemplo n.º 21
0
    def phot(self, im, showmask=True):
        # TODO if we switch to astropy.photometry then we can have that
        # do the work with subpixels properly, but for now they don't
        # do rms of the bg correctly so we can't use their stuff yet.

        mask = self.setmask(im)

        if showmask:
            cmap1 = pl.matplotlib.colors.LinearSegmentedColormap.from_list(
                'my_cmap', ["black", "blue"], 2)
            cmap1._init()
            cmap1._lut[:, -1] = pl.array([0, 0.5, 0, 0, 0])
            pl.imshow(mask > 0,
                      origin="bottom",
                      interpolation="nearest",
                      cmap=cmap1)

        from scipy import ndimage
        from scipy.ndimage import measurements as m
        nin = len(pl.where(mask == 1)[0])
        nout = len(pl.where(mask == 2)[0])

        floor = pl.nanmin(im.data)
        if floor < 0: floor = 0
        raw = m.sum(im.data, mask, 1) - floor * nin

        #bg=m.mean(im.data,mask,2)
        #bgsig=m.standard_deviation(im.data,mask,2)

        #        from astropy.stats import sigma_clip
        #        clipped = sigma_clip(im.data,sig=3,iters=2)
        #        # http://astropy.readthedocs.org/en/latest/api/astropy.stats.sigma_clip.html#astropy.stats.sigma_clip
        #        # TODO what we really want is to sigma-clip only the BG array/mask
        #        # because including the source will probably just be domimated by the
        #        # source...
        #        bg   =m.mean(              clipped,mask,2)-floor
        #        bgsig=m.standard_deviation(clipped,mask,2)

        # sigma_clip doesn't handle nans
        from scipy import stats

        def mymode(x):
            return stats.mode(x, axis=None)[0][0]

#        pdb.set_trace()
#        xx=stats.mode(im.data,axis=None)
#        print xx

        bg = ndimage.labeled_comprehension(im.data, mask, 2, mymode, "float",
                                           0) - floor
        #        bg = ndimage.labeled_comprehension(im.data,mask,2,pl.mean,"float",0)
        bgsig = m.standard_deviation(im.data, mask, 2)

        # assume uncert dominated by BG level.
        # TODO add sqrt(cts in source) Poisson - need gain or explicit err/pix
        uncert = bgsig * nin / pl.sqrt(nout)

        results = raw, bg, raw - bg * nin, uncert

        f = self.photfactor(im)
        if f:
            if self.debug: print "phot factor = ", f
            results = pl.array(results) * f

        if self.debug:
            #            print "max=", m.maximum(im.data,mask,1), m.maximum(im.data,mask,2)
            #            print "nin,nout=",nin,nout
            print "raw, bg, bgsubbed, uncert=", results
            pdb.set_trace()

        return results
Exemplo n.º 22
0
def	ehabitat(ecor,nw,nwpathout):

	global	nwpath
	if nw=='':
		nwpath = os.getcwd()
	else:
		nwpath = nw
		
	if gmaps == 0:
		initglobalmaps()
	if nwpathout=='':
		#outdir = 'results'	#	ToDo: locally	create	folder "results"	if it	does	not	exist!
		outdir = os.path.join(os.path.sep, os.getcwd(), 'results')
		safelyMakeDir(outdir)
	else:
		#outdir = nwpathout+'/results'	#	SHARED	FOLDER	PATH
		outdir = os.path.join(os.path.sep, nwpathout, 'results')
		safelyMakeDir(outdir)
		
	treepamin = treepamax = eprpamin = eprpamax = prepamin = prepamax = biopamin = biopamax = slopepamin = slopepamax = ndwipamin = ndwipamax = ndvimaxpamin = ndvimaxpamax = ndviminpamin = ndviminpamax = hpamin = hpamax = None
	s = nd.generate_binary_structure(2,2)	#	most	restrictive	pattern	for	the	landscape	patches
	#	LOCAL FOLDER
	csvname1 = os.path.join(os.path.sep, outdir, 'ecoregs_done.csv')
	print csvname1
	if os.path.isfile(csvname1) == False:
		wb = open(csvname1,'a')
		wb.write('None')
		wb.write('\n')
		wb.close()
	#	LOCAL FOLDER	
	csvname = os.path.join(os.path.sep, outdir, 'hri_results.csv')
	print csvname
	if os.path.isfile(csvname) == False:
		wb = open(csvname,'a')
		wb.write('ecoregion wdpaid averpasim hr2aver pxpa hr1insumaver hriaver nfeatsaver lpratio lpratio2 numpszok lpmaxsize aggregation treepamin treepamax eprpamin eprpamax prepamin prepamax biopamin biopamax slopepamin slopepamax ndwipamin ndwipamax ndvimaxpamin ndvimaxpamax ndviminpamin ndviminpamax hpamin hpamax treepamean eprpamean prepamean biopamean slopepamean ndwipamean ndvimaxpamean ndviminpamean hpamean')
		wb.write('\n')
		wb.close()
	treepamean = eprpamean = prepamean = biopamean = slopepamean = ndwipamean = ndvimaxpamean = ndviminpamean = hpamean = None
	ef = 'eco_'+str(ecor)+'.tif'
	ecofile = os.path.join(os.path.sep, nwpath, 'ecoregs', ef)
	#ecofile = os.path.join(os.path.sep, nwpath, os.path.sep,'ecoregs', os.path.sep, ef)
	print ecofile
	avail = os.path.isfile(ecofile)
	if avail == True:
		eco_csv = str(ecor)+'.csv'
		print eco_csv
		ecoparksf = os.path.join(os.path.sep, nwpath, 'pas', eco_csv)
		#ecoparksf = os.path.join(os.path.sep, nwpath, os.path.sep, 'pas', os.path.sep, eco_csv)
		print ecoparksf
		#ecoparksf = nwpath+'/pas/'+str(ecor)+'.csv'
		src_ds_eco = gdal.Open(ecofile)
		eco = src_ds_eco.GetRasterBand(1)
		eco_mask0 = eco.ReadAsArray(0,0,eco.XSize,eco.YSize).astype(np.int32)
		eco_mask = eco_mask0.flatten()
		gt_eco = src_ds_eco.GetGeoTransform()
		print 'eco mask'
		xoff = int((gt_eco[0]-gt_epr_global[0])/1000)
		yoff = int((gt_epr_global[3]-gt_eco[3])/1000)
		epr_eco_bb0 = epr_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		epr_eco_bb = epr_eco_bb0.flatten()
		epr_eco0 = np.where(eco_mask == 1,	(epr_eco_bb),(0))
		epr_eco = np.where(epr_eco0 == 65535.0,	(float('NaN')),(epr_eco0))
		maskepr = np.isnan(epr_eco)
		epr_eco[maskepr] = np.interp(np.flatnonzero(maskepr),	np.flatnonzero(~maskepr),	epr_eco[~maskepr])
		print 'eco epr'
		xoff = int((gt_eco[0]-gt_slope_global[0])/1000)
		yoff = int((gt_slope_global[3]-gt_eco[3])/1000)
		slope_eco_bb0 = slope_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		slope_eco_bb = slope_eco_bb0.flatten()
		slope_eco0 = np.where(eco_mask == 1,	(slope_eco_bb),(0))
		slope_eco = np.where(slope_eco0 == 65535.0,	(float('NaN')),(slope_eco0))
		maskslope = np.isnan(slope_eco)
		slope_eco[maskslope] = np.interp(np.flatnonzero(maskslope),	np.flatnonzero(~maskslope),	slope_eco[~maskslope])
		print 'eco slope'
		xoff = int((gt_eco[0]-gt_ndvimax_global[0])/1000)
		yoff = int((gt_ndvimax_global[3]-gt_eco[3])/1000)
		ndvimax_eco_bb0 = ndvimax_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		ndvimax_eco_bb = ndvimax_eco_bb0.flatten()
		ndvimax_eco0 = np.where(eco_mask == 1,	(ndvimax_eco_bb),(0))
		ndvimax_eco = np.where(ndvimax_eco0 == 65535.0,	(float('NaN')),(ndvimax_eco0))
		maskndvimax = np.isnan(ndvimax_eco)
		ndvimax_eco[maskndvimax] = np.interp(np.flatnonzero(maskndvimax),	np.flatnonzero(~maskndvimax),	ndvimax_eco[~maskndvimax])
		print 'eco ndvimax'
		xoff = int((gt_eco[0]-gt_ndvimin_global[0])/1000)
		yoff = int((gt_ndvimin_global[3]-gt_eco[3])/1000)
		ndvimin_eco_bb0 = ndvimin_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		ndvimin_eco_bb = ndvimin_eco_bb0.flatten()
		ndvimin_eco0 = np.where(eco_mask == 1,	(ndvimin_eco_bb),(0))
		ndvimin_eco = np.where(ndvimin_eco0 == 65535.0,	(float('NaN')),(ndvimin_eco0))
		maskndvimin = np.isnan(ndvimin_eco)
		ndvimin_eco[maskndvimin] = np.interp(np.flatnonzero(maskndvimin),	np.flatnonzero(~maskndvimin),	ndvimin_eco[~maskndvimin])
		print 'eco ndvimin'
		xoff = int((gt_eco[0]-gt_ndwi_global[0])/1000)
		yoff = int((gt_ndwi_global[3]-gt_eco[3])/1000)
		ndwi_eco_bb0 = ndwi_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		ndwi_eco_bb = ndwi_eco_bb0.flatten()
		ndwi_eco0 = np.where(eco_mask == 1,	(ndwi_eco_bb),(0))
		ndwi_eco = np.where(ndwi_eco0 == 255.0,	(float('NaN')),(ndwi_eco0))
		maskndwi = np.isnan(ndwi_eco)
		ndwi_eco[maskndwi] = np.interp(np.flatnonzero(maskndwi),	np.flatnonzero(~maskndwi),	ndwi_eco[~maskndwi])
		print 'eco ndwi'
		xoff = int((gt_eco[0]-gt_pre_global[0])/1000)
		yoff = int((gt_pre_global[3]-gt_eco[3])/1000)
		pre_eco_bb0 = pre_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		pre_eco_bb = pre_eco_bb0.flatten()
		pre_eco0 = np.where(eco_mask == 1,	(pre_eco_bb),(0))
		pre_eco = np.where(pre_eco0 == 65535.0,	(float('NaN')),(pre_eco0))
		maskpre = np.isnan(pre_eco)
		pre_eco[maskpre] = np.interp(np.flatnonzero(maskpre),	np.flatnonzero(~maskpre),	pre_eco[~maskpre])
		print 'eco pre'
		xoff = int((gt_eco[0]-gt_bio_global[0])/1000)
		yoff = int((gt_bio_global[3]-gt_eco[3])/1000)
		bio_eco_bb0 = bio_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		bio_eco_bb = bio_eco_bb0.flatten()
		bio_eco0 = np.where(eco_mask == 1,	(bio_eco_bb),(0))
		bio_eco = np.where(bio_eco0 == 65535.0,	(float('NaN')),(bio_eco0))
		maskbio = np.isnan(bio_eco)
		bio_eco[maskbio] = np.interp(np.flatnonzero(maskbio),	np.flatnonzero(~maskbio),	bio_eco[~maskbio])
		print 'eco bio'
		xoff = int((gt_eco[0]-gt_tree_global[0])/1000)
		yoff = int((gt_tree_global[3]-gt_eco[3])/1000)
		tree_eco_bb0 = tree_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		tree_eco_bb = tree_eco_bb0.flatten()
		tree_eco0 = np.where(eco_mask == 1,	(tree_eco_bb),(0))
		tree_eco = np.where(tree_eco0 == 255.0,	(float('NaN')),(tree_eco0))
		masktree = np.isnan(tree_eco)
		tree_eco[masktree] = np.interp(np.flatnonzero(masktree),	np.flatnonzero(~masktree),	tree_eco[~masktree])
		print 'eco tree'
		xoff = int((gt_eco[0]-gt_herb_global[0])/1000)
		yoff = int((gt_herb_global[3]-gt_eco[3])/1000)
		herb_eco_bb0 = herb_global.ReadAsArray(xoff,yoff,eco.XSize,eco.YSize).astype(np.float32)
		herb_eco_bb = herb_eco_bb0.flatten()
		herb_eco0 = np.where(eco_mask == 1,	(herb_eco_bb),(0))
		herb_eco = np.where(herb_eco0 == 255.0,	(float('NaN')),(herb_eco0))
		maskherb = np.isnan(herb_eco)
		herb_eco[maskherb] = np.interp(np.flatnonzero(maskherb),	np.flatnonzero(~maskherb),	herb_eco[~maskherb])
		print 'eco herb'
		ind_eco0 = np.column_stack((bio_eco,pre_eco,epr_eco,herb_eco,ndvimax_eco,ndvimin_eco,ndwi_eco,slope_eco,tree_eco))
		print 'ecovars stacked'
		
		print ecoparksf
		pa_list0 = np.genfromtxt(ecoparksf,dtype='string')	# crear este archivo en subpas!
		pa_list = np.unique(pa_list0)
		n = len(pa_list)
		for	px in range(0,n): #	0,n

			pa = pa_list[px]
			print pa

			outfile = os.path.join(os.path.sep, outdir, str(ecor)+'_'+str(pa)+'.tif')
			outfile2 = os.path.join(os.path.sep, outdir, str(ecor)+'_'+str(pa)+'_lp.tif')
			outfile3 = os.path.join(os.path.sep, outdir, str(ecor)+'_'+str(pa)+'_mask.tif')
			#outfile = outdir+'/'+str(ecor)+'_'+str(pa)+'.tif'	#	LOCAL FOLDER
			pa_infile = 'pa_'+str(pa)+'.tif'

			pa4 = os.path.join(os.path.sep, nwpath, 'pas', pa_infile)
			#pa4 = os.path.join(os.path.sep, nwpath, os.path.sep, 'pas', os.path.sep, pa_infile)
			print pa4
			#pa4 = nwpath+'/pas/pa_'+str(pa)+'.tif'

			dropcols = np.arange(9,dtype=int)
			done = os.path.isfile(outfile)
			avail2 = os.path.isfile(pa4)
			if done == False and avail2 == True:
				pafile=pa4
				src_ds_pa = gdal.Open(pafile)
				par = src_ds_pa.GetRasterBand(1)
				pa_mask0 = par.ReadAsArray(0,0,par.XSize,par.YSize).astype(np.int32)
				pa_mask = pa_mask0.flatten()
				ind = pa_mask >	0 #==int(pa)
				go = 1
				sum_pa_mask = sum(pa_mask[ind])#/int(pa)
				if sum_pa_mask < 3: go = 0	#	not	processing	areas	smaller	than	3	pixels
				print sum_pa_mask
				sum_pa_mask_inv = len(pa_mask[pa_mask == 0])
				print sum_pa_mask_inv
				print len(pa_mask)
				ratiogeom = 10000
				if sum_pa_mask > 0: ratiogeom = sum_pa_mask_inv/sum_pa_mask
				#print ratiogeom
				gt_pa = src_ds_pa.GetGeoTransform()
				xoff = int((gt_pa[0]-gt_pre_global[0])/1000)
				yoff = int((gt_pre_global[3]-gt_pa[3])/1000)
				if xoff>0 and yoff>0 and go == 1:
					num_bands=src_ds_eco.RasterCount
					driver = gdal.GetDriverByName("GTiff")
					dst_options = ['COMPRESS=LZW']
					dst_ds = driver.Create(	outfile,src_ds_eco.RasterXSize,src_ds_eco.RasterYSize,num_bands,gdal.GDT_Float32,dst_options)
					dst_ds.SetGeoTransform(	src_ds_eco.GetGeoTransform())
					dst_ds.SetProjection(	src_ds_eco.GetProjectionRef())
					xoff = int((gt_pa[0]-gt_tree_global[0])/1000)
					yoff = int((gt_tree_global[3]-gt_pa[3])/1000)
					tree_pa_bb0 = tree_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					tree_pa_bb = tree_pa_bb0.flatten()
					tree_pa0 = tree_pa_bb[ind]
					tree_pa = np.where(tree_pa0 == 255.0, (float('NaN')),(tree_pa0))
					mask2tree = np.isnan(tree_pa)
					if mask2tree.all() == True:
						dropcols[8] = -8
					else:
						tree_pa[mask2tree] = np.interp(np.flatnonzero(mask2tree),	np.flatnonzero(~mask2tree),	tree_pa[~mask2tree])
						tree_pa = np.random.random_sample(len(tree_pa),)/1000 + tree_pa
						print 'pa tree'

						treepamin = round(tree_pa.min(),2)
						treepamax = round(tree_pa.max(),2)
						treepamean = round(np.mean(tree_pa),2)
						print treepamin
						print treepamax
						treediff = abs(tree_pa.min()-tree_pa.max())
						if treediff < 0.001: dropcols[8] = -8

					xoff = int((gt_pa[0]-gt_epr_global[0])/1000)
					yoff = int((gt_epr_global[3]-gt_pa[3])/1000)
					epr_pa_bb0 = epr_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					epr_pa_bb = epr_pa_bb0.flatten()
					epr_pa0 = epr_pa_bb[ind]
					epr_pa = np.where(epr_pa0 == 65535.0,	(float('NaN')),(epr_pa0))
					mask2epr = np.isnan(epr_pa)
					if mask2epr.all() == True:
						dropcols[2] = -2
					else:
						epr_pa[mask2epr] = np.interp(np.flatnonzero(mask2epr),	np.flatnonzero(~mask2epr),	epr_pa[~mask2epr])
						epr_pa = np.random.random_sample(len(epr_pa),)/1000 + epr_pa
						print 'pa epr'

						eprpamin = round(epr_pa.min(),2)
						eprpamax = round(epr_pa.max(),2)
						eprpamean = round(np.mean(epr_pa),2)
						print eprpamin
						print eprpamax
						eprdiff = abs(epr_pa.min()-epr_pa.max())
						if eprdiff < 0.001: dropcols[2] = -2

					xoff = int((gt_pa[0]-gt_pre_global[0])/1000)
					yoff = int((gt_pre_global[3]-gt_pa[3])/1000)
					pre_pa_bb0 = pre_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					pre_pa_bb = pre_pa_bb0.flatten()
					pre_pa0 = pre_pa_bb[ind]
					pre_pa = np.where(pre_pa0 == 65535.0,	(float('NaN')),(pre_pa0))
					mask2pre = np.isnan(pre_pa)
					if mask2pre.all() == True:
						dropcols[1] = -1
					else:
						pre_pa[mask2pre] = np.interp(np.flatnonzero(mask2pre),	np.flatnonzero(~mask2pre),	pre_pa[~mask2pre])
						pre_pa = np.random.random_sample(len(pre_pa),)/1000 + pre_pa
						print 'pa pre'

						prepamin = round(pre_pa.min(),2)
						prepamax = round(pre_pa.max(),2)
						prepamean = round(np.mean(pre_pa),2)
						print prepamin
						print prepamax
						prediff = abs(pre_pa.min()-pre_pa.max())
						if prediff < 0.001: dropcols[1] = -1

					xoff = int((gt_pa[0]-gt_bio_global[0])/1000)
					yoff = int((gt_bio_global[3]-gt_pa[3])/1000)
					bio_pa_bb0 = bio_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					bio_pa_bb = bio_pa_bb0.flatten()
					bio_pa0 = bio_pa_bb[ind]
					bio_pa = np.where(bio_pa0 == 65535.0,	(float('NaN')),(bio_pa0))
					mask2bio = np.isnan(bio_pa)
					if mask2bio.all() == True:
						dropcols[0] = -0
					else:
						bio_pa[mask2bio] = np.interp(np.flatnonzero(mask2bio),	np.flatnonzero(~mask2bio),	bio_pa[~mask2bio])
						bio_pa = np.random.random_sample(len(bio_pa),)/1000 + bio_pa
						print 'pa bio'

						biopamin = round(bio_pa.min(),2)
						biopamax = round(bio_pa.max(),2)
						biopamean = round(np.mean(bio_pa),2)
						print biopamin
						print biopamax
						biodiff = abs(bio_pa.min()-bio_pa.max())
						if biodiff < 0.001: dropcols[0] = -0

					xoff = int((gt_pa[0]-gt_slope_global[0])/1000)
					yoff = int((gt_slope_global[3]-gt_pa[3])/1000)
					slope_pa_bb0 = slope_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					slope_pa_bb = slope_pa_bb0.flatten()
					slope_pa0 = slope_pa_bb[ind]
					slope_pa = np.where(slope_pa0 == 65535.0,	(float('NaN')),(slope_pa0))
					mask2slope = np.isnan(slope_pa)
					if mask2slope.all() == True:
						dropcols[7] = -7
					else:
						slope_pa[mask2slope] = np.interp(np.flatnonzero(mask2slope),	np.flatnonzero(~mask2slope),	slope_pa[~mask2slope])
						slope_pa = np.random.random_sample(len(slope_pa),)/1000 + slope_pa
						print 'pa slope'

						slopepamin = round(slope_pa.min(),2)
						slopepamax = round(slope_pa.max(),2)
						slopepamean = round(np.mean(slope_pa),2)
						print slopepamin
						print slopepamax
						slopediff = abs(slope_pa.min()-slope_pa.max())
						if slopediff < 0.001: dropcols[7] = -7

					xoff = int((gt_pa[0]-gt_ndwi_global[0])/1000)
					yoff = int((gt_ndwi_global[3]-gt_pa[3])/1000)
					ndwi_pa_bb0 = ndwi_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					ndwi_pa_bb = ndwi_pa_bb0.flatten()
					ndwi_pa0 = ndwi_pa_bb[ind]
					ndwi_pa = np.where(ndwi_pa0 == 255.0,	(float('NaN')),(ndwi_pa0))
					mask2ndwi = np.isnan(ndwi_pa)
					if mask2ndwi.all() == True:
						dropcols[6] = -6
					else:
						ndwi_pa[mask2ndwi] = np.interp(np.flatnonzero(mask2ndwi),	np.flatnonzero(~mask2ndwi),	ndwi_pa[~mask2ndwi])
						ndwi_pa = np.random.random_sample(len(ndwi_pa),)/1000 + ndwi_pa
						print 'pa ndwi'

						ndwipamin = round(ndwi_pa.min(),2)
						ndwipamax = round(ndwi_pa.max(),2)
						ndwipamean = round(np.mean(ndwi_pa),2)
						print ndwipamin
						print ndwipamax
						ndwidiff = abs(ndwi_pa.min()-ndwi_pa.max())
						if ndwidiff < 0.001: dropcols[6] = -6

					xoff = int((gt_pa[0]-gt_ndvimax_global[0])/1000)
					yoff = int((gt_ndvimax_global[3]-gt_pa[3])/1000)
					ndvimax_pa_bb0 = ndvimax_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					ndvimax_pa_bb = ndvimax_pa_bb0.flatten()
					ndvimax_pa0 = ndvimax_pa_bb[ind]
					ndvimax_pa = np.where(ndvimax_pa0 == 65535.0,	(float('NaN')),(ndvimax_pa0))
					mask2ndvimax = np.isnan(ndvimax_pa)
					if mask2ndvimax.all() == True:
						dropcols[4] = -4
					else:
						ndvimax_pa[mask2ndvimax] = np.interp(np.flatnonzero(mask2ndvimax),	np.flatnonzero(~mask2ndvimax),	ndvimax_pa[~mask2ndvimax])
						ndvimax_pa = np.random.random_sample(len(ndvimax_pa),)/1000 + ndvimax_pa
						print 'pa ndvimax'

						ndvimaxpamin = round(ndvimax_pa.min(),2)
						ndvimaxpamax = round(ndvimax_pa.max(),2)
						ndvimaxpamean = round(np.mean(ndvimax_pa),2)
						print ndvimaxpamin
						print ndvimaxpamax
						ndvimaxdiff = abs(ndvimax_pa.min()-ndvimax_pa.max())
						if ndvimaxdiff < 0.001: dropcols[4] = -4

					xoff = int((gt_pa[0]-gt_ndvimin_global[0])/1000)
					yoff = int((gt_ndvimin_global[3]-gt_pa[3])/1000)
					ndvimin_pa_bb0 = ndvimin_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					ndvimin_pa_bb = ndvimin_pa_bb0.flatten()
					ndvimin_pa0 = ndvimin_pa_bb[ind]
					ndvimin_pa = np.where(ndvimin_pa0 == 65535.0,	(float('NaN')),(ndvimin_pa0))
					mask2ndvimin = np.isnan(ndvimin_pa)
					if mask2ndvimin.all() == True:
						dropcols[5] = -5
					else:
						ndvimin_pa[mask2ndvimin] = np.interp(np.flatnonzero(mask2ndvimin),	np.flatnonzero(~mask2ndvimin),	ndvimin_pa[~mask2ndvimin])
						ndvimin_pa = np.random.random_sample(len(ndvimin_pa),)/1000 + ndvimin_pa
						print 'pa ndvimin'

						ndviminpamin = round(ndvimin_pa.min(),2)
						ndviminpamax = round(ndvimin_pa.max(),2)
						ndviminpamean = round(np.mean(ndvimin_pa),2)
						print ndviminpamin
						print ndviminpamax
						ndvimindiff = abs(ndvimin_pa.min()-ndvimin_pa.max())
						if ndvimindiff < 0.001: dropcols[5] = -5

					xoff = int((gt_pa[0]-gt_herb_global[0])/1000)
					yoff = int((gt_herb_global[3]-gt_pa[3])/1000)
					herb_pa_bb0 = herb_global.ReadAsArray(xoff,yoff,par.XSize,par.YSize).astype(np.float32)
					herb_pa_bb = herb_pa_bb0.flatten()
					herb_pa0 = herb_pa_bb[ind]
					herb_pa = np.where(herb_pa0 == 255.0,	(float('NaN')),(herb_pa0))
					mask2herb = np.isnan(herb_pa)
					if mask2herb.all() == True:
						dropcols[3] = -3
					else:
						herb_pa[mask2herb] = np.interp(np.flatnonzero(mask2herb),	np.flatnonzero(~mask2herb),	herb_pa[~mask2herb])
						herb_pa = np.random.random_sample(len(herb_pa),)/1000 + herb_pa
						print 'pa herb'

						hpamin = round(herb_pa.min(),2)
						hpamax = round(herb_pa.max(),2)
						hpamean = round(np.mean(herb_pa),2)
						print hpamin
						print hpamax
						hdiff = abs(herb_pa.min()-herb_pa.max())
						if hdiff < 0.001: dropcols[3] = -3

					cols = dropcols[dropcols>=0]
					ind_pa0 = np.column_stack((bio_pa,pre_pa,epr_pa,herb_pa,ndvimax_pa,ndvimin_pa,ndwi_pa,slope_pa,tree_pa))
					ind_pa = ind_pa0[:,cols]
					ind_eco = ind_eco0[:,cols]
					print ind_pa.shape
					hr1sum = hr1insum = indokpsz = pszok = sumpszok = lpratio2 = numpszok = hr1averpa = hr3aver = hr2aver = pszmax = num_featuresaver = lpratio = hr1medianpa = hr1insumaver = pxpa = aggregation = None
					print "PA masked"
					#print ind_pa
					if ind_pa.shape[0]>4 and ind_pa.shape[1]>1: 
						Ymean = np.mean(ind_pa,axis=0)
						print 'Max. mean value is '+ str(Ymean.max())
						print "Ymean ok"
						Ycov = np.cov(ind_pa,rowvar=False)
						print 'Max. cov value is '+ str(Ycov.max())
						print "Ycov	ok"
						#mh = mahalanobis_distances(Ymean,	Ycov,	ind_eco,	parallel=False)
						#mh2 = mahalanobis_distances(Ymean,	Ycov,	ind_eco,	parallel=True)
						mh2 = mahalanobis_distances_scipy(Ymean,	Ycov,	ind_eco,	parallel=True) # previous working version
						#mh2 = mahalanobis_distances_scipy(Ymean,	Ycov,	ind_eco,	parallel=False)
						maxmh=mh2.max()
						print 'Max. mh value is '+ str(maxmh)
						print 'Max. mh value is nan: '+ str(np.isnan(maxmh))
						mh = mh2*mh2
						print "mh ok"
						pmh = chi2.sf(mh,len(cols)).reshape((eco.YSize,eco.XSize)) # chisqprob
						pmhh = np.where(pmh	<=	0.001,None,	pmh)
						print "pmh ok"	#	quitar	valores	muy	bajos!
						pmhhmax = pmhh.max()
						print 'Max. similarity value is '+ str(pmhhmax)
						dst_ds.GetRasterBand(1).WriteArray(pmhh)
						dst_ds = None
						hr11 = np.where(pmhh>0,1,0) # 0.5
						hr1 = hr11.flatten()
						hr1sum = sum(hr1)
						print 'Number of pixels with similarity higher than 0 is '+str(hr1sum)
						hr1insumaver = hr1insum = 0
						hr1sumaver = hr1sum
						src_ds_sim = gdal.Open(outfile)
						sim = src_ds_sim.GetRasterBand(1)
						gt_sim = src_ds_sim.GetGeoTransform()
						xoff = int((gt_pa[0]-gt_sim[0])/1000)
						yoff = int((gt_sim[3]-gt_pa[3])/1000)
						xextentpa = xoff + par.XSize
						yextentpa = yoff + par.YSize
						xless = sim.XSize - xextentpa
						yless = sim.YSize - yextentpa
						xsize = par.XSize
						ysize = par.YSize
						if xoff>0 and yoff>0 and pmhhmax>0.01 and hr1sum>1 and maxmh!=float('NaN'):#and ratiogeom < 100: #	also	checks	if results	are	not	empty

							# reading the similarity ecoregion without the PA (tmp mask)
							os.system('gdal_merge.py '+str(ecofile)+' '+str(pa4)+' -o '+str(outfile3)+' -ot Int32')
							hri_pa_bb03 = sim.ReadAsArray().astype(np.float32)
							hri_pa_bb3 = hri_pa_bb03.flatten()
							
							src_ds_sim2 = gdal.Open(outfile3)
							sim2 = src_ds_sim2.GetRasterBand(1)
							gt_sim2 = src_ds_sim2.GetGeoTransform()
							hri_pa_bb02 = sim2.ReadAsArray().astype(np.int32)
							#hri_pa_bb2 = hri_pa_bb02.flatten()
							hri_pa_bb02_max = hri_pa_bb02.max()
							print 'PA: '+str(pa)
							print 'PA (= max) value from mask = '+str(hri_pa_bb02_max)
							if hri_pa_bb02.shape == hri_pa_bb03.shape:
							 hri_pa02 = np.where(hri_pa_bb02 == pa,0,hri_pa_bb03) # hri_pa_bb02_max


							 if xless < 0: xsize = xsize + xless
							 if yless < 0: ysize = ysize + yless
							 hri_pa_bb0 = sim.ReadAsArray(xoff,yoff,xsize,ysize).astype(np.float32)
							 hri_pa_bb = hri_pa_bb0.flatten()
							 indd = hri_pa_bb > 0
							 hri_pa0 = hri_pa_bb[indd]
							 print 'Total number of pixels with similarity values in PA: '+str(len(hri_pa0))
							 hr1averpa = round(np.mean(hri_pa0[~np.isnan(hri_pa0)]),2)
							 #print hr1averpa
							 #hr1medianpa = np.median(hri_pa0[~np.isnan(hri_pa0)])
							 print 'mean similarity in the park is '+str(hr1averpa)
							 #hr1insum = sum(np.where(hri_pa0 >= 0.5,	1,0))	#	use	hr1averpa	as	threshold	instead!						
							 hr1inaver = np.where(hri_pa0 >= hr1averpa,	1,0)
							 hr1insumaver = sum(hr1inaver)
							 #print hr1insum
							 ##labeled_arrayin, num_featuresin = nd.label(hr1inaver,	structure=s)
							 hr1averr = np.where(hri_pa02 >= hr1averpa,	1,0) # pmhh
							 hr1aver = hr1averr.flatten()
							 print 'Total number of pixels with similarity values in ECO: '+str(sum(hr1aver))
							 labeled_arrayaver, num_featuresaver = nd.label(hr1averr,	structure=s)
							 print 'Nr of similar patches found: '+str(num_featuresaver)
							 if num_featuresaver > 0:
							  lbls = np.arange(1, num_featuresaver+1)
							  psizes = nd.labeled_comprehension(labeled_arrayaver, labeled_arrayaver, lbls, np.count_nonzero, float, 0) #-1
							  pszmax = psizes.max()#-hr1insumaver
							  dst_ds2 = driver.Create(outfile2,src_ds_eco.RasterXSize,src_ds_eco.RasterYSize,num_bands,gdal.GDT_Int32,dst_options)
							  dst_ds2.SetGeoTransform(src_ds_eco.GetGeoTransform())
							  dst_ds2.SetProjection(src_ds_eco.GetProjectionRef())
							  dst_ds2.GetRasterBand(1).WriteArray(labeled_arrayaver)
							  dst_ds2 = None
							  #num_feats = num_features - num_featuresaver
							  hr1sumaver = sum(hr1aver)
							  hr2aver = hr1sumaver #- hr1insumaver
							  pxpa = ind_pa.shape[0]
							  indokpsz = psizes >= pxpa
							  pszsok = psizes[indokpsz] # NEW
							  sumpszok = sum(pszsok)
							  lpratio=round(float(pszmax/pxpa),2)
							  lpratio2=round(float(sumpszok/pxpa),2)
							  numpszok = len(pszsok)
							  hr3aver = round(float(hr2aver/pxpa),2)
							  aggregation = round(float(hr2aver/num_featuresaver),2)
						#hr2 = hr1sumaver - hr1insumaver
						#print hr2
						#hr3 = float(hr2/ind_pa.shape[0])
						#print hr3
					wb = open(csvname,'a')
					var = str(ecor)+' '+str(pa)+' '+str(hr1averpa)+' '+str(hr2aver)+' '+str(pxpa)+' '+str(hr1insumaver)+' '+str(hr3aver)+' '+str(num_featuresaver)+' '+str(lpratio)+' '+str(lpratio2)+' '+str(numpszok)+' '+str(pszmax)+' '+str(aggregation)+' '+str(treepamin)+' '+str(treepamax)+' '+str(eprpamin)+' '+str(eprpamax)+' '+str(prepamin)+' '+str(prepamax)+' '+str(biopamin)+' '+str(biopamax)+' '+str(slopepamin)+' '+str(slopepamax)+' '+str(ndwipamin)+' '+str(ndwipamax)+' '+str(ndvimaxpamin)+' '+str(ndvimaxpamax)+' '+str(ndviminpamin)+' '+str(ndviminpamax)+' '+str(hpamin)+' '+str(hpamax)+' '+str(treepamean)+' '+str(eprpamean)+' '+str(prepamean)+' '+str(biopamean)+' '+str(slopepamean)+' '+str(ndwipamean)+' '+str(ndvimaxpamean)+' '+str(ndviminpamean)+' '+str(hpamean)#	exclude	PA!	#+' '+str(hr1p25pa)#	'+str(hr3)+'	+' '+str(hr1medianpa)+' '+str(num_features)+' '
					wb.write(var)
					wb.write('\n')
					wb.close()
					print "results exported"
					os.system('rm '+str(outfile3))
		wb = open(csvname1,'a')	#	LOCAL	FOLDER
		var = str(ecor)
		wb.write(var)
		wb.write('\n')
		wb.close()	
	print "END ECOREG: " + str(ecor)
Exemplo n.º 23
0
def main():
    # Parse command line options
    parser = argparse.ArgumentParser(
        description='Test different nets with 3D data.')
    parser.add_argument('--flair',
                        action='store',
                        dest='flair',
                        default='FLAIR_preprocessed.nii.gz')
    parser.add_argument('--pd',
                        action='store',
                        dest='pd',
                        default='DP_preprocessed.nii.gz')
    parser.add_argument('--t2',
                        action='store',
                        dest='t2',
                        default='T2_preprocessed.nii.gz')
    parser.add_argument('--t1',
                        action='store',
                        dest='t1',
                        default='T1_preprocessed.nii.gz')
    parser.add_argument('--output',
                        action='store',
                        dest='output',
                        default='output.nii.gz')
    parser.add_argument('--no-docker',
                        action='store_false',
                        dest='docker',
                        default=True)

    c = color_codes()
    patch_size = (15, 15, 15)
    options = vars(parser.parse_args())
    batch_size = 10000
    min_size = 30

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '1' + c['nc'] + c['g'] + '>' +
          c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.init.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.init.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        verbose=10,
        max_epochs=50,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda p, t: 2 * np.sum(p * t[:, 1]) / np.sum(
            (p + t[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '1' + c['nc'] + c['g'] +
          '>' + c['nc'])
    names = np.array(
        [options['flair'], options['pd'], options['t2'], options['t1']])
    image_nii = load_nii(options['flair'])
    image1 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(
            names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image1[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Loading the net ' + c['b'] + '2' + c['nc'] + c['g'] + '>' +
          c['nc'])
    net_name = '/usr/local/nets/deep-challenge2016.final.model_weights.pkl' if options['docker'] \
        else './deep-challenge2016.final.model_weights.pkl'
    net = NeuralNet(
        layers=[
            (InputLayer, dict(name='in', shape=(None, 4, 15, 15, 15))),
            (Conv3DDNNLayer,
             dict(name='conv1_1',
                  num_filters=32,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_1',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (Conv3DDNNLayer,
             dict(name='conv2_1',
                  num_filters=64,
                  filter_size=(5, 5, 5),
                  pad='same')),
            (Pool3DDNNLayer,
             dict(name='avgpool_2',
                  pool_size=2,
                  stride=2,
                  mode='average_inc_pad')),
            (DropoutLayer, dict(name='l2drop', p=0.5)),
            (DenseLayer, dict(name='l1', num_units=256)),
            (DenseLayer,
             dict(name='out', num_units=2,
                  nonlinearity=nonlinearities.softmax)),
        ],
        objective_loss_function=objectives.categorical_crossentropy,
        update=updates.adam,
        update_learning_rate=0.0001,
        batch_iterator_train=BatchIterator(batch_size=4096),
        verbose=10,
        max_epochs=2000,
        train_split=TrainSplit(eval_size=0.25),
        custom_scores=[('dsc', lambda t, p: 2 * np.sum(t * p[:, 1]) / np.sum(
            (t + p[:, 1])))],
    )
    net.load_params_from(net_name)

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Creating the probability map ' + c['b'] + '2' + c['nc'] + c['g'] +
          '>' + c['nc'])
    image2 = np.zeros_like(image_nii.get_data())
    print('0% of data tested', end='\r')
    sys.stdout.flush()
    for batch, centers, percent in load_patch_batch_percent(
            names, batch_size, patch_size):
        y_pred = net.predict_proba(batch)
        print('%f%% of data tested' % percent, end='\r')
        sys.stdout.flush()
        [x, y, z] = np.stack(centers, axis=1)
        image2[x, y, z] = y_pred[:, 1]

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
          '<Saving to file ' + c['b'] + options['output'] + c['nc'] + c['g'] +
          '>' + c['nc'])
    image = (image1 * image2) > 0.5

    # filter candidates < min_size
    labels, num_labels = ndimage.label(image)
    lesion_list = np.unique(labels)
    num_elements_by_lesion = ndimage.labeled_comprehension(
        image, labels, lesion_list, np.sum, float, 0)
    filt_min_size = num_elements_by_lesion >= min_size
    lesion_list = lesion_list[filt_min_size]
    image = reduce(np.logical_or, map(lambda lab: lab == labels, lesion_list))

    image_nii.get_data()[:] = np.roll(np.roll(image, 1, axis=0), 1, axis=1)
    path = '/'.join(options['t1'].rsplit('/')[:-1])
    outputname = options['output'].rsplit('/')[-1]
    image_nii.to_filename(os.path.join(path, outputname))

    if not options['docker']:
        path = '/'.join(options['output'].rsplit('/')[:-1])
        case = options['output'].rsplit('/')[-1]
        gt = load_nii(os.path.join(
            path, 'Consensus.nii.gz')).get_data().astype(dtype=np.bool)
        dsc = np.sum(
            2.0 * np.logical_and(gt, image)) / (np.sum(gt) + np.sum(image))
        print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + c['g'] +
              '<DSC value for ' + c['c'] + case + c['g'] + ' = ' + c['b'] +
              str(dsc) + c['nc'] + c['g'] + '>' + c['nc'])
def cloud_size_dist(dist, time, n_bins, size_min, size_max, ref_min, file,
                    show_plt):
    """
    Written by Lennéa Hayo, 2019
    Edited by Till Vondenhoff, 20-03-28
    
    Creates Newmanns figure 3 of cloud size distribution
    
    Parameters:
        dist:      netcdf file of satelite shot with clouds
        n_bins:    number of bins
        ref_min:   smallest value 
        bin_min:   value of the first bin
        bin_max:   value of the last bin
        file:      name given to dataset of netcdf file
        min_pixel: smallest cloud size value for which the power law holds (used in alpha_newman5 to calculate the
                   alpha of cumulative dist.)
    Added Parameters:
        show_plt:  Creates 4 tile plot with different slopes if show_plt=True
        
    Returns:
        fig:       plots that resemble Newmans figure 3
        m1:        slope linear regression of power-law distribution with log scales
        m2:        slope linear regression of power-law distribution with log binning
        m3:        slope linear regression of cumulative distribution (alpha-1)
    
    """

    r1 = file[dist][time]

    # marks everything above ref_min as a cloud
    cloud_2D_mask = np.zeros_like(r1)
    cloud_2D_mask[r1 > ref_min] = 1

    # calculates how many clouds exist in cloud_2D_mask, returns total number of clouds
    labeled_clouds, n_clouds = ndi.label(cloud_2D_mask)
    labels = np.arange(1, n_clouds + 1)

    print('\n---------------------------------------------')
    print('  number of clouds in this timestep:', n_clouds)
    print('---------------------------------------------\n')

    # Calculating how many cells belong to each labeled cloud using ndi.labeled_comprehension
    # returns cloud_area and therefore its 2D size
    cloud_pixel = ndi.labeled_comprehension(cloud_2D_mask, labeled_clouds,
                                            labels, np.size, float, 0)
    cloud_area = np.sqrt(cloud_pixel) * 25.
    cloud_area_min = np.min(cloud_area)
    cloud_area_max = np.max(cloud_area)

    if show_plt:
        CSD, bins = np.histogram(cloud_area, bins=n_bins)

        bin_width = (bins[-1] - bins[0]) / len(bins)
        bins = bins[1:] / 2 + bins[:-1] / 2

        plt.axvline(x=size_min,
                    color='red',
                    linewidth=1.5,
                    alpha=0.8,
                    linestyle='--')
        plt.axvline(x=size_max,
                    color='red',
                    linewidth=1.5,
                    alpha=0.8,
                    linestyle='--')

        plt.plot(bins, CSD / bin_width)
        plt.axvspan(0, size_min, color='gray', alpha=0.4, lw=0)
        plt.axvspan(size_max, cloud_area_max, color='gray', alpha=0.4, lw=0)
        plt.xlabel('cloud size [m]')
        plt.ylabel('probability density function')
        plt.title('linear histogram')
        plt.xlim(0, cloud_area_max)
        plt.show()

    # linear power-law distribution of the data
    f_lin, slope_lin, intercept_lin = lin_binning(cloud_area, n_bins, size_min,
                                                  size_max, show_plt)

    # logarithmic binning of the data
    f_log, slope_log, intercept_log = log_binning(cloud_area, n_bins, size_min,
                                                  size_max, show_plt)

    # cumulative distribution by sorting the data
    f_cum, slope_cum, intercept_cum = cum_dist(cloud_area, size_min, size_max,
                                               show_plt)

    #fig, m1, m2, m3 = func_newmann3(cloud_area, n_bins, bin_min, bin_max, cloud_area_min, cloud_area_max, min_pixel, show_plt)
    #fig = four_tile_plot()

    return slope_lin, slope_log, slope_cum
Exemplo n.º 25
0
def NotePlumeCoordinates(daily_data_dict, basepath, lonres, latres, params,
                         compare_gfed_edgar):
    """
    
    Parameters
    ----------
    daily_data_dict : dictionary
        daily_data[<day>], contains data about TROPOMI measurement per day.
    TODO!!!!!!!!!!!!!!ct (txt file) of plume coordinates will be stored.

    Returns
    -------
    Saves .txt file in coord_directory.

    """

    # Output directories, for coordinates and figures
    coord_directory = ut.DefineAndCreateDirectory(
        os.path.join(basepath + r'\04_output\plume_coordinates'))

    # Defining boundaries
    lat_min = daily_data_dict['lat_min']
    lat_max = daily_data_dict['lat_max']
    lon_min = daily_data_dict['lon_min']
    lon_max = daily_data_dict['lon_max']

    # Deciding on the nlon_t and nlat_t
    field_t = daily_data_dict['CO_ppb']
    nlon_t = len(field_t[0])
    nlat_t = len(field_t)

    if compare_gfed_edgar:
        # Define the plumes
        plumes = daily_data_dict['plumes_explained']

        # Label the plumes
        labels, nlabels = ndimage.label(plumes)

        # Get some statistics
        daily_data_dict = GetStats(daily_data_dict, labels, nlabels)

    else:
        # Define the plumes
        plumes = daily_data_dict['plume_mask']

        # Label the plumes
        labels, nlabels = ndimage.label(plumes)

    # Get some label statistics, for file
    max_xco = ndimage.measurements.maximum_position(field_t, labels,
                                                    np.arange(nlabels) + 1)
    mean_xco_raw = ndimage.measurements.mean(field_t, labels,
                                             np.arange(nlabels) + 1)
    mean_xco = [round(num, 3) for num in mean_xco_raw]
    plume_size = ndimage.labeled_comprehension(plumes, labels,
                                               np.arange(nlabels) + 1, np.sum,
                                               float, 0)
    unexplained = round(
        (100 - (daily_data_dict['explained plumes'] / nlabels) * 100), 2)
    exp_by_gfed = round(
        ((daily_data_dict['explained by gfed'] / nlabels) * 100), 2)
    exp_by_edgar = round(
        ((daily_data_dict['explained by edgar'] / nlabels) * 100), 2)

    # Generate coordinate meshgrid
    lon_t = np.linspace(lon_min, lon_max, nlon_t)
    lat_t = np.linspace(lat_min, lat_max, nlat_t)
    lon, lat = np.meshgrid(lon_t, lat_t)

    # Write to txt file
    day = daily_data_dict['day']
    month = daily_data_dict['month']
    year = daily_data_dict['year']
    curr_time = ut.GetCurrentTime()
    total = daily_data_dict['total_plumes']
    bufferarea = round((np.pi * ((params[0] * ((lonres + latres) / 2))**2)), 1)
    filename = os.path.join(coord_directory + \
        'Plume_coordinates_{}_{}_{}.txt'.format(month, day, year))

    headerstring = f"""#----------------------------------------
#----------------------------------------
This file was automatically generated at: {curr_time['year']}/{curr_time['month']}/{curr_time['day']} {curr_time['hour']}:{curr_time['minute']}

This file contains a list with information on Carbon Monoxide plumes at {month}/{day}/{year}, between:
longitudes: [{lon_min}, {lon_max}] 
latitudes: [{lat_min}, {lat_max}] 

column descriptions:
- type:         Plume centre of mass origin: (Unknown, TROPOMI, TROPOMI+GFED, TROPOMI+EDGAR, TROPOMI+GFED+EDGAR)
- latitude:     Latitude of northernmost edge of plume
- longitude:    Longitude of westermost edge of plume
- grid_cells:   Amount of grid cells (~{lonres}x{latres}km) in plume
- CO_max:       Highest Carbon Monoxide concentration measured in plume (ppb)
- CO_average:   Average Carbon Monoxide concentration measured in plume (ppb)


Total amount of plumes identified by TROPOMI: {total}

Percentage of TROPOMI plumes that can be explained by GFED: {exp_by_gfed}%
Percentage of TROPOMI plumes that can be explained by EDGAR: {exp_by_edgar}%
Percentage of TROPOMI plumes that cannot be explained by EDGAR or GFED:{unexplained}%

Note: a TROPOMI grid cell can be explained by EDGAR or GFED when an EDGAR or GFED enhancement was detected within
a circular buffer with radius {params[0]} (~{bufferarea} km^2) around a TROPOMI plume.

Other parameter settings:
    Moving window size:             {params[2]}x{params[2]} grid cells
    Moving window step size:        {params[3]} grid cells
    Standard deviations treshold:   {params[1]}

#----------------------------------------
#----------------------------------------
plume_origin; latitude; longitude; grid_cells; CO_max; CO_average;

"""

    f = open(filename, 'w+')
    f.write(headerstring)

    for i in range(1, nlabels + 1):
        plume = np.copy(labels)
        plume[plume != i] = 0  # Keep only the labelled plume

        # Retrieve plume value
        y, x = np.where(
            plume != 0)  # Get the indices of the left top corner of plume
        y = y[0]  # Get only northernmost grid cell coordinate
        x = x[0]  # Get only westermost grid cell coordinate

        x_co_max = int(max_xco[i - 1][1])
        y_co_max = int(max_xco[i - 1][0])
        plume_origin = 'Unknown' if (plumes[y,x] == 1) else 'Wildfire' \
            if (plumes[y,x] == 11) else 'Anthropogenic' if (plumes[y,x] == 101) \
                else 'Wildfire/anthropogenic' if (plumes[y,x] == 111) else 'Error'
        f.write(
            f"{plume_origin}; {lat[y, x]}; {lon[y, x]}; {plume_size[i-1]}; {round(field_t[y_co_max, x_co_max], 3)}; {mean_xco[i-1]}\n"
        )
    f.close()

    # Notify a text file has been generated
    #print(f'Generated: {filename}')

    return
Exemplo n.º 26
0
def NEMASubvols(input_vol,
                voxsizes,
                relTh=0.2,
                minvol=300,
                margin=9,
                nbins=100,
                zignore=38,
                bgSignal=None):
    """ Segment a complete NEMA PET volume with several hot sphere in different subvolumes containing
        only one sphere

    Parameters
    ----------
    input_vol : 3D numpy array
      the volume to be segmented

    voxsizes  a 1D numpy array 
      containing the voxelsizes

    relTh : float, optional
      the relative threshold used to find spheres

    minvol : float, optional
      minimum volume of spheres to be segmented (same unit as voxel size^3)

    margin : int, optional
      margin around segmented spheres (same unit as voxel size)

    nbins : int, optional
      number of bins used in histogram for background determination

    zignore : float, optional
     distance to edge of FOV that is ignored (same unit as voxelsize) 

    bgSignal : float or None, optional
      the signal intensity of the background
      if None, it is auto determined from a histogram analysis

    Returns
    -------
    list
      of slices to access the subvolumes from the original volume
    """

    vol = input_vol.copy()

    xdim, ydim, zdim = vol.shape

    minvol = int(minvol / np.prod(voxsizes))

    dx = int(np.ceil(margin / voxsizes[0]))
    dy = int(np.ceil(margin / voxsizes[1]))
    dz = int(np.ceil(margin / voxsizes[2]))

    nzignore = int(np.ceil(zignore / voxsizes[2]))
    vol[:, :, :nzignore] = 0
    vol[:, :, -nzignore:] = 0

    # first do a quick search for the biggest sphere (noisy edge of FOV can spoil max value!)
    histo = py.histogram(vol[vol > 0.01 * vol.max()], nbins)
    #bgSignal = histo[1][argrelextrema(histo[0], np.greater)[0][0]]
    if bgSignal is None:
        bgSignal = histo[1][find_peaks_cwt(histo[0],
                                           np.arange(nbins / 6, nbins))[0]]
    thresh = bgSignal + relTh * (vol.max() - bgSignal)

    vol2 = np.zeros(vol.shape, dtype=np.int)
    vol2[vol > thresh] = 1

    vol3, nrois = label(vol2)
    rois = np.arange(1, nrois + 1)
    roivols = labeled_comprehension(vol, vol3, rois, len, int, 0)

    i = 1

    for roi in rois:
        if (roivols[roi - 1] < minvol): vol3[vol3 == roi] = 0
        else:
            vol3[vol3 == roi] = i
            i = i + 1

    nspheres = vol3.max()
    spherelabels = np.arange(1, nspheres + 1)

    bboxes = find_objects(vol3)

    nmaskvox = list()
    slices = list()

    for bbox in bboxes:
        xstart = max(0, bbox[0].start - dx)
        xstop = min(xdim, bbox[0].stop + dx + 1)

        ystart = max(0, bbox[1].start - dy)
        ystop = min(xdim, bbox[1].stop + dy + 1)

        zstart = max(0, bbox[2].start - dz)
        zstop = min(xdim, bbox[2].stop + dz + 1)

        slices.append((slice(xstart, xstop,
                             None), slice(ystart, ystop,
                                          None), slice(zstart, zstop, None)))

        nmaskvox.append((xstop - xstart) * (ystop - ystart) * (zstop - zstart))

    # sort subvols acc to number of voxel
    slices = [slices[kk] for kk in np.argsort(nmaskvox)[::-1]]

    return slices
Exemplo n.º 27
0
def main():
    print "current working directory", os.getcwd()
    print "Reading input file path :",Parameters.demDataFilePath
    print "Reading input file :",Parameters.demFileName
    defaults.figureNumber = 0

    rawDemArray = read_dem_from_geotiff(Parameters.demFileName,\
                                        Parameters.demDataFilePath)

    nanDemArraylr=np.array(rawDemArray)
    nanDemArray = nanDemArraylr
    nanDemArray[nanDemArray < defaults.demNanFlag]= np.nan
    Parameters.minDemValue= np.min(nanDemArray[:])
    Parameters.maxDemValue= np.max(nanDemArray[:])

    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(nanDemArray,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Input DEM')
    if defaults.doPlot==1:
        plt.show()

    # Area of analysis
    Parameters.xDemSize=np.size(nanDemArray,0)
    Parameters.yDemSize=np.size(nanDemArray,1)

    # Calculate pixel length scale and assume square
    Parameters.maxLowerLeftCoord = np.max([Parameters.xDemSize, \
                                           Parameters.yDemSize])
    print 'DTM size: ',Parameters.xDemSize, 'x' ,Parameters.yDemSize
    #-----------------------------------------------------------------------------

    # Compute slope magnitude for raw and filtered DEMs
    print 'Computing slope of raw DTM'
    print 'DEM pixel scale:',Parameters.demPixelScale
    print np.array(nanDemArray).shape
    slopeXArray,slopeYArray = np.gradient(np.array(nanDemArray),\
                                          Parameters.demPixelScale)
    slopeMagnitudeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)

    # plot the slope DEM array
    slopeMagnitudeDemArrayNp = np.array(slopeMagnitudeDemArray)
    print slopeMagnitudeDemArrayNp.shape

    # plotting the slope DEM of non filtered DEM
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(slopeMagnitudeDemArrayNp,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Slope of unfiltered DEM')
    if defaults.doPlot==1:
        plt.show()

    # Computation of the threshold lambda used in Perona-Malik nonlinear
    # filtering. The value of lambda (=edgeThresholdValue) is given by the 90th
    # quantile of the absolute value of the gradient.
    print'Computing lambda = q-q-based nonlinear filtering threshold'
    slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayNp
    slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,\
                                         np.size(slopeMagnitudeDemArrayQ))
    slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
    print 'dem smoothing Quantile',defaults.demSmoothingQuantile

    edgeThresholdValuescipy = mquantiles(np.absolute(slopeMagnitudeDemArrayQ),\
                                         defaults.demSmoothingQuantile)
    print 'edgeThresholdValuescipy :', edgeThresholdValuescipy
    
    # performing PM filtering using the anisodiff
    print 'Performing Perona-Malik nonlinear filtering'
    filteredDemArray = anisodiff(nanDemArray, defaults.nFilterIterations, \
                                     edgeThresholdValuescipy,\
                                     defaults.diffusionTimeIncrement, \
                                     (Parameters.demPixelScale,\
                                      Parameters.demPixelScale),2)
    
    # plotting the filtered DEM
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(filteredDemArray,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Filtered DEM')
    if defaults.doPlot==1:
        plt.show()
    
    # Writing the filtered DEM as a tif
    write_geotif_filteredDEM(filteredDemArray,Parameters.demDataFilePath,\
                             Parameters.demFileName)

    # Computing slope of filtered DEM
    print 'Computing slope of filtered DTM'
    filteredDemArraynp = filteredDemArray#np.gradient only takes an array as input
    slopeXArray,slopeYArray = np.gradient(filteredDemArraynp,Parameters.demPixelScale)
    slopeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)
    slopeMagnitudeDemArrayQ = slopeDemArray
    slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,\
                                         np.size(slopeMagnitudeDemArrayQ))
    slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
    print ' angle min:', np.arctan(np.percentile(slopeMagnitudeDemArrayQ,0.1))*180/np.pi
    print ' angle max:', np.arctan(np.percentile(slopeMagnitudeDemArrayQ,99.9))*180/np.pi
    print 'mean slope:',np.nanmean(slopeDemArray[:])
    print 'stdev slope:',np.nanstd(slopeDemArray[:])
    
    #Computing curvature
    print 'computing curvature'
    curvatureDemArrayIn= filteredDemArraynp
    #curvatureDemArrayIn[curvatureDemArrayIn== defaults.demErrorFlag]=np.nan
    curvatureDemArray = compute_dem_curvature(curvatureDemArrayIn,\
                                              Parameters.demPixelScale,\
                                              defaults.curvatureCalcMethod)
    #Writing the curvature array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_curvature.tif'
    write_geotif_generic(curvatureDemArray,outfilepath,outfilename)
    
    #Computation of statistics of curvature
    print 'Computing curvature statistics'
    print curvatureDemArray.shape
    tt = curvatureDemArray[~np.isnan(curvatureDemArray[:])]
    print tt.shape
    finiteCurvatureDemList = curvatureDemArray[np.isfinite(curvatureDemArray[:])]
    print finiteCurvatureDemList.shape
    curvatureDemMean = np.nanmean(finiteCurvatureDemList)
    curvatureDemStdDevn = np.nanstd(finiteCurvatureDemList)
    print ' mean: ', curvatureDemMean
    print ' standard deviation: ', curvatureDemStdDevn


    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(curvatureDemArray,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Curvature DEM')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    #*************************************************
    #Compute curvature quantile-quantile curve
    # This seems to take a long time ... is commented for now
    print 'Computing curvature quantile-quantile curve'
    #osm,osr = compute_quantile_quantile_curve(finiteCurvatureDemList)
    #print osm[0]
    #print osr[0]
    thresholdCurvatureQQxx = 1
    # have to add method to automatically compute the thresold
    # .....
    # .....
    #*************************************************
   

    # Computing contributing areas
    print 'Computing upstream accumulation areas using MFD from GRASS GIS'
    """
    return {'outlets':outlets, 'fac':nanDemArrayfac ,\
            'fdr':nanDemArrayfdr ,'basins':nanDemArraybasins,\
            'outletsxxProj':outletsxxProj, 'outletsyyProj':outletsyyProj,\
            'bigbasins':allbasins}
    """
    # Call the flow accumulation function
    flowroutingresults = flowaccumulation(filteredDemArray)

    # Read out the flowroutingresults into appropriate variables
    outletPointsList = flowroutingresults['outlets']
    flowArray = flowroutingresults['fac']
    flowDirectionsArray = flowroutingresults['fdr']
    # These are actually not sub basins, if the basin threshold
    # is large, then you might have as nulls, so best
    # practice is to keep the basin threshold close to 1000
    # default value is 10,000
    #subBasinIndexArray = flowroutingresults['basins']

    
    #subBasinIndexArray[subBasinIndexArray==-9999]=np.nan
    basinIndexArray = flowroutingresults['bigbasins']

    flowArray[np.isnan(filteredDemArray)]=np.nan
    flowMean = np.mean(flowArray[~np.isnan(flowArray[:])])
    print 'Mean upstream flow: ', flowMean

    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    drainageMeasure = -np.sqrt(np.log10(flowArray))
    plt.imshow(drainageMeasure,cmap=cm.coolwarm)
    plt.plot(outletPointsList[1],outletPointsList[0],'go')
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('flowArray with outlets')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(basinIndexArray.T,cmap=cm.Dark2)
    plt.plot(outletPointsList[1],outletPointsList[0],'go')
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('basinIndexArray with outlets')
    if defaults.doPlot==1:
        plt.show()


    # Define a skeleton based on flow alone
    skeletonFromFlowArray = \
    compute_skeleton_by_single_threshold(flowArray.T,\
        defaults.flowThresholdForSkeleton)
    
    # Define a skeleton based on curvature alone
    skeletonFromCurvatureArray =\
    compute_skeleton_by_single_threshold(curvatureDemArray.T,\
        curvatureDemMean+thresholdCurvatureQQxx*curvatureDemStdDevn)
    
    
    # Define a skeleton based on curvature and flow
    skeletonFromFlowAndCurvatureArray =\
    compute_skeleton_by_dual_threshold(curvatureDemArray.T, flowArray.T, \
        curvatureDemMean+thresholdCurvatureQQxx*curvatureDemStdDevn, \
        defaults.flowThresholdForSkeleton)

    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonFromFlowAndCurvatureArray.T,cmap=cm.binary)
    plt.plot(outletPointsList[1],outletPointsList[0],'go')
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Curvature with outlets')
    if defaults.doPlot==1:
        plt.show()
    
    # Writing the skeletonFromFlowAndCurvatureArray array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_skeleton.tif'
    write_geotif_generic(skeletonFromFlowAndCurvatureArray.T,\
                         outfilepath,outfilename)

    
    # Computing the percentage drainage areas
    print 'Computing percentage drainage area of each indexed basin'
    fastMarchingStartPointList = np.array(outletPointsList)
    print fastMarchingStartPointList
    #fastMarchingStartPointListFMM = np.zeros((fastMarchingStartPointList.shape))
    fastMarchingStartPointListFMMx = []
    fastMarchingStartPointListFMMy = []
    basinsUsedIndexList = np.zeros((len(fastMarchingStartPointList[0]),1))
    nx = Parameters.xDemSize
    ny = Parameters.yDemSize
    nDempixels = float(nx*ny)
    basinIndexArray = basinIndexArray.T
    for label in range(0,len(fastMarchingStartPointList[0])):        
        outletbasinIndex = basinIndexArray[fastMarchingStartPointList[0,label],\
                                         fastMarchingStartPointList[1,label]]
        print outletbasinIndex
        numelments = basinIndexArray[basinIndexArray==outletbasinIndex]
        #print type(numelments), len(numelments)
        percentBasinArea = float(len(numelments)) * 100/nDempixels
        print 'Basin: ',outletbasinIndex,\
              '@ : ',fastMarchingStartPointList[:,label],' #Elements ',len(numelments),\
              ' area ',percentBasinArea,' %'
        if percentBasinArea > defaults.thresholdPercentAreaForDelineation and\
           len(numelments) > Parameters.numBasinsElements:
            # Get the watersheds used
            basinsUsedIndexList[label]= label
            # Preparing the outlets used for fast marching in ROI
            #fastMarchingStartPointListFMM[:,label] = fastMarchingStartPointList[:,label]
            fastMarchingStartPointListFMMx.append(fastMarchingStartPointList[0,label])
            fastMarchingStartPointListFMMy.append(fastMarchingStartPointList[1,label])
        # finishing Making outlets for FMM
    #Closing Basin area computation

    fastMarchingStartPointListFMM = np.array([fastMarchingStartPointListFMMx,\
                                                  fastMarchingStartPointListFMMy])
    # Computing the local cost function
    print 'Preparing to calculate cost function'
    # lets normalize the curvature first
    if defaults.doNormalizeCurvature ==1:
        curvatureDemArrayNor = normalize(curvatureDemArray)
    del curvatureDemArray
    curvatureDemArray = curvatureDemArrayNor
    del curvatureDemArrayNor
    
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.figure(defaults.figureNumber)
    plt.imshow(curvatureDemArray,cmap=cm.coolwarm)
    plt.title('Curvature after normalization')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    
    print 'Curvature min: ' ,str(np.min(curvatureDemArray[~np.isnan(curvatureDemArray)])), \
          ' exp(min): ',str(np.exp(3*np.min(curvatureDemArray[~np.isnan(curvatureDemArray)])))
    print 'Curvature max: ' ,str(np.max(curvatureDemArray[~np.isnan(curvatureDemArray)])),\
          ' exp(max): ',str(np.exp(3*np.max(curvatureDemArray[~np.isnan(curvatureDemArray)])))
    
    # set all the nan's to zeros before cost function is computed
    curvatureDemArray[np.isnan(curvatureDemArray)] = 0
    
    print 'Computing cost function & geodesic distance'
    # Calculate the local reciprocal cost (weight, or propagation speed in the
    # eikonal equation sense).  If the cost function isn't defined, default to
    # old cost function.
    flowArray = flowArray.T
    curvatureDemArray = curvatureDemArray.T
    
    if hasattr(defaults, 'reciprocalLocalCostFn'):
        print 'Evaluating local cost func.'
        reciprocalLocalCostArray = eval(defaults.reciprocalLocalCostFn)
    else:
        print 'Evaluating local cost func. (default)'
        reciprocalLocalCostArray = flowArray + \
                                   (flowMean*skeletonFromFlowAndCurvatureArray)\
                                   + (flowMean*curvatureDemArray)
    del reciprocalLocalCostArray
    # Forcing the evaluations
    reciprocalLocalCostArray = flowArray + \
                                   (flowMean*skeletonFromFlowAndCurvatureArray)\
                                   + (flowMean*curvatureDemArray)
    if hasattr(defaults,'reciprocalLocalCostMinimum'):
        if defaults.reciprocalLocalCostMinimum != 'nan':
            reciprocalLocalCostArray[reciprocalLocalCostArray[:]\
                                 < defaults.reciprocalLocalCostMinimum]=1.0
    
    print '1/cost min: ', np.nanmin(reciprocalLocalCostArray[:]) 
    print '1/cost max: ', np.nanmax(reciprocalLocalCostArray[:])

    # Writing the reciprocal array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_costfunction.tif'
    write_geotif_generic(reciprocalLocalCostArray,outfilepath,outfilename)

    # Fast marching
    print 'Performing fast marching'
    print '# of unique basins:',np.size(np.unique(basinIndexArray))
    # Now access each unique basin and get the
    # outlets for it
    basinIndexList = np.unique(basinIndexArray)
    print 'basinIndexList:', str(basinIndexList)
    print reciprocalLocalCostArray.shape
    #stop

    
    # Do fast marching for each sub basin
    geodesicDistanceArray = np.zeros((basinIndexArray.shape))
    geodesicDistanceArray[geodesicDistanceArray==0]=np.Inf
    geodesicDistanceArray = geodesicDistanceArray.T
    filteredDemArrayTr = filteredDemArray.T
    basinIndexArray = basinIndexArray.T
    # create a watershed outlet dictionary
    outletwatersheddict = {}
    defaults.figureNumber = defaults.figureNumber + 1
    for i in range(0,len(fastMarchingStartPointListFMM[0])):
        basinIndexList = basinIndexArray[fastMarchingStartPointListFMM[1,i],\
                                    fastMarchingStartPointListFMM[0,i]]
        print 'basin Index:',basinIndexList
        print 'start point :', fastMarchingStartPointListFMM[:,i]
        outletwatersheddict[basinIndexList]=fastMarchingStartPointListFMM[:,i]
        maskedBasin = np.zeros((basinIndexArray.shape))
        maskedBasin[basinIndexArray==basinIndexList]=1
        # For the masked basin get the maximum accumulation are
        # location and use that as an outlet for the basin.
        maskedBasinFAC = np.zeros((basinIndexArray.shape))
        maskedBasinFAC[basinIndexArray==basinIndexList]=\
        flowArray[basinIndexArray==basinIndexList]
        maskedBasinFAC[maskedBasinFAC==0]=np.nan
        # Get the outlet of subbasin
        maskedBasinFAC[np.isnan(maskedBasinFAC)]=0
        # print subBasinoutletindices
        # outlets locations in projection of the input dataset
        outletsxx = fastMarchingStartPointList[0,i]
        outletsyy = fastMarchingStartPointList[1,i]
        # call the fast marching here
        phi = np.nan * np.ones((reciprocalLocalCostArray.shape))
        speed = np.ones((reciprocalLocalCostArray.shape))* np.nan
        phi[maskedBasinFAC!=0] = 1
        speed[maskedBasinFAC!=0] = reciprocalLocalCostArray[maskedBasinFAC!=0]
        phi[fastMarchingStartPointListFMM[1,i],\
            fastMarchingStartPointListFMM[0,i]] =-1
        try:
            travelTimearray = skfmm.travel_time(phi,speed, dx=1)
        except IOError as e:            
            print 'Error in calculating skfmm travel time'
            print 'Error in catchment: ',basinIndexList
            # setting travel time to empty array
            travelTimearray = np.nan * np.zeros((reciprocalLocalCostArray.shape))
            plt.figure(defaults.figureNumber+1)
            plt.imshow(speed.T,cmap=cm.coolwarm)
            plt.plot(fastMarchingStartPointListFMM[1,i],\
                    fastMarchingStartPointListFMM[0,i],'ok')
            #plt.contour(speed.T,cmap=cm.coolwarm)
            plt.title('speed basin Index'+str(basinIndexList))
            plt.colorbar()
            plt.show()
            
            plt.figure(defaults.figureNumber+1)
            plt.imshow(phi.T,cmap=cm.coolwarm)
            plt.plot(fastMarchingStartPointListFMM[1,i],\
                    fastMarchingStartPointListFMM[0,i],'ok')
            #plt.contour(speed.T,cmap=cm.coolwarm)
            plt.title('phi basin Index'+str(basinIndexList))
            plt.colorbar()
            plt.show()
            
            print "I/O error({0}): {1}".format(e.errno, e.strerror)
            #stop
        
        #print travelTimearray.shape
        geodesicDistanceArray[maskedBasin ==1]= travelTimearray[maskedBasin ==1]

    #-----------------------------------
    #-----------------------------------
    # Plot the geodesic array
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(np.log10(geodesicDistanceArray.T),cmap=cm.coolwarm)
    plt.contour(geodesicDistanceArray.T,140,cmap=cm.coolwarm)
    plt.title('Geodesic distance array (travel time)')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    print geodesicDistanceArray.shape
    # Writing the geodesic distance array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_geodesicDistance.tif'
    write_geotif_generic(geodesicDistanceArray.T,outfilepath,outfilename)
    
    # Locating end points
    print 'Locating skeleton end points'
    xySkeletonSize = skeletonFromFlowAndCurvatureArray.shape
    skeletonLabeledArray, skeletonNumConnectedComponentsList =\
                          ndimage.label(skeletonFromFlowAndCurvatureArray)
    #print skeletonNumConnectedComponentsList
    """
     Through the histogram of skeletonNumElementsSortedList
     (skeletonNumElementsList minus the maximum value which
      corresponds to the largest connected element of the skeleton) we get the
      size of the smallest elements of the skeleton, which will likely
      correspond to small isolated convergent areas. These elements will be
      excluded from the search of end points.
    """
    print 'Counting the number of elements of each connected component'
    #print "ndimage.labeled_comprehension"
    lbls = np.arange(1, skeletonNumConnectedComponentsList+1)
    skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,\
                                                                 skeletonLabeledArray,\
                                                                 lbls,np.count_nonzero,\
                                                                 int,0)
    skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
    print np.sqrt(len(skeletonNumElementsSortedList))
    histarray,skeletonNumElementsHistogramX=np.histogram(\
        skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList)-1],
        np.sqrt(len(skeletonNumElementsSortedList)))

    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonLabeledArray.T,cmap=cm.coolwarm)
    plt.title('Skeleton Labeled Array elements Array')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()

    # Create skeleton gridded array
    skeletonNumElementsGriddedArray = np.zeros(xySkeletonSize)
    #"""
    for i in range(0,xySkeletonSize[0]):
        for j in range(0,xySkeletonSize[1]):
            #Gets the watershed label for this specified cell and checked in
            #subsequent if statement
            basinIndex = basinIndexArray[i,j]
            if skeletonLabeledArray[i, j] > 0:
                skeletonNumElementsGriddedArray[i,j] = \
                    skeletonLabeledArrayNumtuple[skeletonLabeledArray[i,j]-1]
    
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonNumElementsGriddedArray.T,cmap=cm.coolwarm)
    plt.title('Skeleton Num elements Array')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    #"""
    # Elements smaller than skeletonNumElementsThreshold are not considered in the
    # skeletonEndPointsList detection
    print skeletonNumElementsHistogramX
    skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
    
    print 'skeletonNumElementsThreshold',str(skeletonNumElementsThreshold)
    
    # Scan the array for finding the channel heads
    print 'Continuing to locate skeleton endpoints'
    #"""
    skeletonEndPointsList = []
    for i in range(0,xySkeletonSize[0]):
        for j in range(0,xySkeletonSize[1]):
            #print i,j
            # Skip this pixel if the current point is not a labeled or if the
            # number of connected skeleton elements is too small
            if skeletonLabeledArray[i,j]!=0 \
               and skeletonNumElementsGriddedArray[i,j]>=skeletonNumElementsThreshold:
                # Define search box and ensure it fits within the DTM bounds
                mx = i-1
                px = xySkeletonSize[0]-i
                my = j-1
                py = xySkeletonSize[1]-j
                xMinus = np.min([defaults.endPointSearchBoxSize, mx])
                xPlus  = np.min([defaults.endPointSearchBoxSize, px])
                yMinus = np.min([defaults.endPointSearchBoxSize, my])
                yPlus  = np.min([defaults.endPointSearchBoxSize, py])
                # Extract the geodesic distances geodesicDistanceArray for pixels within the search box
                searchGeodesicDistanceBox = geodesicDistanceArray[i-xMinus:i+xPlus, j-yMinus:j+yPlus]
                # Extract the skeleton labels for pixels within the search box
                searchLabeledSkeletonBox = skeletonLabeledArray[i-xMinus:i+xPlus, j-yMinus:j+yPlus]
                # Look in the search box for skeleton points with the same label
                # and greater geodesic distance than the current pixel at (i,j)
                # - if there are none, then add the current point as a channel head
                v = searchLabeledSkeletonBox==skeletonLabeledArray[i,j]
                v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i,j]
                v3 = np.where(np.any(v1==True,axis=0))
                if len(v3[0])==0:
                    skeletonEndPointsList.append([i,j])
    
    # For loop ends here
    skeletonEndPointsListArray = np.array(skeletonEndPointsList)
    xx = skeletonEndPointsListArray[0:len(skeletonEndPointsListArray),0]
    yy = skeletonEndPointsListArray[0:len(skeletonEndPointsListArray),1]
    
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonFromFlowAndCurvatureArray.T,cmap=cm.binary)
    plt.plot(xx,yy,'or')
    plt.title('Skeleton Num elements Array with channel heads')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()             

    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(np.log(geodesicDistanceArray.T),cmap=cm.coolwarm)
    plt.plot(xx,yy,'or')
    plt.title('Geodesic distance Array with channel heads')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()

    # Write shapefiles of channel heads
    write_channel_heads(xx,yy)
    
    # Do compute discrete geodesics
    print 'Computing discrete geodesics'
    compute_discrete_geodesic_v1()
    print 'Finished pyGeoNet'
def Channel_Head_Definition(skeletonFromFlowAndCurvatureArray, geodesicDistanceArray):
    # Locating end points
    print 'Locating skeleton end points'
    structure = np.ones((3, 3))
    skeletonLabeledArray, skeletonNumConnectedComponentsList = \
        ndimage.label(skeletonFromFlowAndCurvatureArray,
                      structure=structure)
    """
     Through the histogram of skeletonNumElementsSortedList
     (skeletonNumElementsList minus the maximum value which
      corresponds to the largest connected element of the skeleton) we get the
      size of the smallest elements of the skeleton, which will likely
      correspond to small isolated convergent areas. These elements will be
      excluded from the search of end points.
    """
    print('Counting the number of elements of each connected component')
    lbls = np.arange(1, skeletonNumConnectedComponentsList + 1)
    skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,
                                                                 skeletonLabeledArray,
                                                                 lbls, np.count_nonzero,
                                                                 int, 0)
    skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
    histarray, skeletonNumElementsHistogramX = np.histogram(
        skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList) - 1],
        int(np.floor(np.sqrt(len(skeletonNumElementsSortedList)))))
    if defaults.doPlot == 1:
        pyg_plt.raster_plot(skeletonLabeledArray,
                            'Skeleton Labeled Array elements Array')
    # Create skeleton gridded array
    skeleton_label_set, label_indices = np.unique(skeletonLabeledArray, return_inverse=True)
    skeletonNumElementsGriddedArray = \
        np.array([skeletonLabeledArrayNumtuple[x - 1] for x in skeleton_label_set])[
            label_indices].reshape(skeletonLabeledArray.shape)
    if defaults.doPlot == 1:
        pyg_plt.raster_plot(skeletonNumElementsGriddedArray,
                            'Skeleton Num elements Array')
    # Elements smaller than skeletonNumElementsThreshold are not considered in the
    # skeletonEndPointsList detection
    skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
    print('skeletonNumElementsThreshold {}'.format(str(skeletonNumElementsThreshold)))
    # Scan the array for finding the channel heads
    print('Continuing to locate skeleton endpoints')
    skeletonEndPointsList = []
    nrows = skeletonFromFlowAndCurvatureArray.shape[0]
    ncols = skeletonFromFlowAndCurvatureArray.shape[1]
    for i in range(nrows):
        for j in range(ncols):
            if skeletonLabeledArray[i, j] != 0 \
                    and skeletonNumElementsGriddedArray[i, j] >= skeletonNumElementsThreshold:
                # Define search box and ensure it fits within the DTM bounds
                my = i - 1
                py = nrows - i
                mx = j - 1
                px = ncols - j
                xMinus = np.min([defaults.endPointSearchBoxSize, mx])
                xPlus = np.min([defaults.endPointSearchBoxSize, px])
                yMinus = np.min([defaults.endPointSearchBoxSize, my])
                yPlus = np.min([defaults.endPointSearchBoxSize, py])
                # Extract the geodesic distances geodesicDistanceArray for pixels within
                # the search box
                searchGeodesicDistanceBox = geodesicDistanceArray[i - yMinus:i + yPlus,
                                            j - xMinus:j + xPlus]
                # Extract the skeleton labels for pixels within the search box
                searchLabeledSkeletonBox = skeletonLabeledArray[i - yMinus:i + yPlus,
                                           j - xMinus:j + xPlus]
                # Look in the search box for skeleton points with the same label
                # and greater geodesic distance than the current pixel at (i,j)
                # - if there are none, then add the current point as a channel head
                v = searchLabeledSkeletonBox == skeletonLabeledArray[i, j]
                v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i, j]
                v3 = np.where(np.any(v1 == True, axis=0))
                if len(v3[0]) == 0:
                    skeletonEndPointsList.append([i, j])
    # For loop ends here
    skeletonEndPointsListArray = np.transpose(skeletonEndPointsList)
    if defaults.doPlot == 1:
        pyg_plt.raster_point_plot(skeletonFromFlowAndCurvatureArray,
                                  skeletonEndPointsListArray,
                                  'Skeleton Num elements Array with channel heads',
                                  cm.binary, 'ro')
    if defaults.doPlot == 1:
        pyg_plt.raster_point_plot(geodesicDistanceArray,
                                  skeletonEndPointsListArray,
                                  'Geodesic distance Array with channel heads',
                                  cm.coolwarm, 'ro')
    xx = skeletonEndPointsListArray[1]
    yy = skeletonEndPointsListArray[0]
    # Write shapefiles of channel heads
    pyg_vio.write_drainage_nodes(xx, yy, "ChannelHead",
                                 parameters.pointFileName,
                                 parameters.pointshapefileName)
    # Write raster of channel heads
    channelheadArray = np.zeros((geodesicDistanceArray.shape))
    channelheadArray[skeletonEndPointsListArray[0],
                     skeletonEndPointsListArray[1]] = 1
    outfilepath = parameters.geonetResultsDir
    demName = parameters.demFileName
    outfilename = demName.split('.')[0] + '_channelHeads.tif'
    pyg_rio.write_geotif_generic(channelheadArray,
                                 outfilepath, outfilename)
    return xx, yy
Exemplo n.º 29
0
def _significant_features(
        radar, field, gatefilter=None, min_size=None, size_bins=75,
        size_limits=(0, 300), structure=None, remove_size_field=True,
        fill_value=None, size_field=None, debug=False, verbose=False):
    """
    """

    # Parse fill value
    if fill_value is None:
        fill_value = get_fillvalue()

    # Parse field names
    if size_field is None:
        size_field = '{}_feature_size'.format(field)

    # Parse gate filter
    if gatefilter is None:
        gatefilter = GateFilter(radar, exclude_based=False)

    # Parse binary structuring element
    if structure is None:
        structure = ndimage.generate_binary_structure(2, 1)

    # Initialize echo feature size array
    size_data = np.zeros_like(
        radar.fields[field]['data'], subok=False, dtype=np.int32)

    # Loop over all sweeps
    feature_sizes = []
    for sweep in radar.iter_slice():

        # Parse radar sweep data and define only valid gates
        is_valid_gate = ~radar.fields[field]['data'][sweep].mask

        # Label the connected features in radar sweep data and create index
        # array which defines each unique label (feature)
        labels, nlabels = ndimage.label(
            is_valid_gate, structure=structure, output=None)
        index = np.arange(1, nlabels + 1, 1)

        if debug:
            print 'Number of unique features for {}: {}'.format(sweep, nlabels)

        # Compute the size (in radar gates) of each echo feature
        # Check for case where no echo features are found, e.g., no data in
        # sweep
        if nlabels > 0:
            sweep_sizes = ndimage.labeled_comprehension(
                is_valid_gate, labels, index, np.count_nonzero, np.int32, 0)
            feature_sizes.append(sweep_sizes)

            # Set each label (feature) to its total size (in radar gates)
            for label, size in zip(index, sweep_sizes):
                size_data[sweep][labels == label] = size

    # Stack sweep echo feature sizes
    feature_sizes = np.hstack(feature_sizes)

    # Compute histogram of echo feature sizes, bin centers and bin
    # width
    counts, bin_edges = np.histogram(
        feature_sizes, bins=size_bins, range=size_limits, normed=False,
        weights=None, density=False)
    bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2.0
    bin_width = np.diff(bin_edges).mean()

    if debug:
        print 'Bin width: {} gate(s)'.format(bin_width)

    # Compute the peak of the echo feature size distribution
    # We expect the peak of the echo feature size distribution to be close to 1
    # radar gate
    peak_size = bin_centers[counts.argmax()] - bin_width / 2.0

    if debug:
        print 'Feature size at peak: {} gate(s)'.format(peak_size)

    # Determine the first instance when the count (sample size) for an echo
    # feature size bin reaches 0 after the distribution peak
    # This will define the minimum echo feature size
    is_zero_size = np.logical_and(
        bin_centers > peak_size, np.isclose(counts, 0, atol=1.0e-5))
    min_size = bin_centers[is_zero_size].min() - bin_width / 2.0

    if debug:
        _range = [0.0, min_size]
        print 'Insignificant feature size range: {} gates'.format(_range)

    # Mask invalid feature sizes, e.g., zero-size features
    size_data = np.ma.masked_equal(size_data, 0, copy=False)
    size_data.set_fill_value(fill_value)

    # Add echo feature size field to radar
    size_dict = {
        'data': size_data.astype(np.int32),
        'standard_name': size_field,
        'long_name': '',
        '_FillValue': size_data.fill_value,
        'units': 'unitless',
    }
    radar.add_field(size_field, size_dict, replace_existing=True)

    # Update gate filter
    gatefilter.include_above(size_field, min_size, op='and', inclusive=False)

    # Remove eacho feature size field
    if remove_size_field:
        radar.fields.pop(size_field, None)

    return gatefilter
Exemplo n.º 30
0
def nema_2008_small_animal_pet_rois(vol, voxsize, lp_voxel = 'max', rod_th = 0.15,
                                    phantom = 'standard'):
  """ generate a label volume indicating the ROIs needed in the analysis of the
      NEMA small animal PET IQ phantom

  Parameters
  ----------
  vol : 3D numpy float array
    containing the image

  voxsize : 3 element 1D numpy array
    containing the voxel size

  lp_voxel: string, optional
    method of how to compute the pixel used to draw the line profiles
    in the rods. 'max' means the maximum voxels in the summed 2D image.
    anything else means use the center of mass.
 
  rod_th : float, optional
    threshold to find the rod in the summed 2D image relative to the
    mean of the big uniform region

  phantom : string
    phantom version ('standard' or 'mini')
 
  Returns
  -------
  a 3D integer numpy array
    encoding the following ROIs:
    1 ... ROI of the big uniform region
    2 ... first cold insert
    3 ... second cold insert
    4 ... central line profile in 5mm rod
    5 ... central line profile in 4mm rod
    6 ... central line profile in 3mm rod
    7 ... central line profile in 2mm rod
    8 ... central line profile in 1mm rod

  Note
  ----
  The rod ROIs in the summed 2D image are found by thresholding.
  If the activity in the small rods is too low, they might be missed.
  """
  roi_vol = np.zeros(vol.shape, dtype = np.uint)
  
  # calculate the summed z profile to place the ROIs
  zprof      = vol.sum(0).sum(0)
  zprof_grad = np.gradient(zprof)
  zprof_grad[np.abs(zprof_grad) < 0.13*np.abs(zprof_grad).max()] = 0
  
  rising_edges  = argrelextrema(zprof_grad, np.greater, order = 10)[0]
  falling_edges = argrelextrema(zprof_grad, np.less, order = 10)[0]
 
  # if we only have 2 falling edges because the volume is cropped, we add the last slices as 
  # falling edge

  if falling_edges.shape[0] == 2:
    falling_edges = np.concatenate([falling_edges,[vol.shape[2]]])

  # define and analyze the big uniform ROI
  uni_region_start_slice  = rising_edges[1]
  uni_region_end_slice    = falling_edges[1]
  uni_region_center_slice = 0.5*(uni_region_start_slice + uni_region_end_slice) 
  
  uni_roi_start_slice = int(np.floor(uni_region_center_slice - 5./voxsize[2]))
  uni_roi_end_slice   = int(np.ceil(uni_region_center_slice  + 5./voxsize[2]))
  
  uni_com = np.array(center_of_mass(vol[:,:,uni_roi_start_slice:(uni_roi_end_slice+1)]))
  
  x0 = (np.arange(vol.shape[0]) - uni_com[0]) * voxsize[0]
  x1 = (np.arange(vol.shape[1]) - uni_com[1]) * voxsize[1]
  x2 = (np.arange(vol.shape[2]) - uni_com[2]) * voxsize[2]
  
  X0,X1,X2 = np.meshgrid(x0,x1,x2,indexing='ij')
  RHO      = np.sqrt(X0**2 + X1**2)
  
  uni_mask = np.zeros(vol.shape, dtype = np.uint)
  if phantom == 'standard':
    uni_mask[RHO <= 11.25] = 1
  elif phantom == 'mini':
    uni_mask[RHO <= 6.25] = 1
  uni_mask[:,:,:uni_roi_start_slice]   = 0
  uni_mask[:,:,(uni_roi_end_slice+1):] = 0
  
  uni_inds = np.where(uni_mask == 1)
  roi_vol[uni_inds] = 1
  
  # define and analyze the two cold ROIs
  insert_region_start_slice  = falling_edges[1]
  insert_region_end_slice    = falling_edges[2]
  insert_region_center_slice = 0.5*(insert_region_start_slice + insert_region_end_slice) 
  
  insert_roi_start_slice = int(np.floor(insert_region_center_slice - 3.75/voxsize[2]))
  insert_roi_end_slice   = int(np.ceil(insert_region_center_slice  + 3.75/voxsize[2]))
  
  # sum the insert slices and subtract them from the max to find the two cold inserts
  sum_insert_img = vol[:,:,insert_roi_start_slice:(insert_roi_end_slice+1)].mean(2)
 
  ref = np.percentile(sum_insert_img,99)
  if phantom == 'standard':
    insert_label_img, nlab_insert = label(sum_insert_img <= 0.5*ref)
  elif phantom == 'mini':
    # reset pixels outside the phantom, since inserts sometimes leak into background
    tmp_inds = RHO[:,:,0] > 9
    sum_insert_img[tmp_inds] = ref
    insert_label_img, nlab_insert = label(binary_erosion(sum_insert_img <= 0.5*ref))

    # add backgroud low activity ROI to be compliant with standard phantom
    insert_label_img[tmp_inds] = 3
    nlab_insert += 1

  insert_labels = np.arange(1,nlab_insert+1)
  # sort the labels according to volume
  npix_insert   = labeled_comprehension(sum_insert_img, insert_label_img, insert_labels, len, int, 0)
  insert_sort_inds = npix_insert.argsort()[::-1]
  insert_labels    = insert_labels[insert_sort_inds] 
  npix_insert      = npix_insert[insert_sort_inds] 

  for i_insert in [1,2]:
    tmp = insert_label_img.copy()
    tmp[insert_label_img != insert_labels[i_insert]] = 0
    com_pixel = np.round(np.array(center_of_mass(tmp)))
  
    x0 = (np.arange(vol.shape[0]) - com_pixel[0]) * voxsize[0]
    x1 = (np.arange(vol.shape[1]) - com_pixel[1]) * voxsize[1]
    x2 = (np.arange(vol.shape[2])) * voxsize[2]
    
    X0,X1,X2 = np.meshgrid(x0,x1,x2,indexing='ij')
    RHO      = np.sqrt(X0**2 + X1**2)
  
    insert_mask = np.zeros(vol.shape, dtype = np.uint)
    insert_mask[RHO <= 2] = 1
    insert_mask[:,:,:insert_roi_start_slice]   = 0
    insert_mask[:,:,(insert_roi_end_slice+1):] = 0
  
    insert_inds = np.where(insert_mask == 1)
    roi_vol[insert_inds] = i_insert + 1
  
  # find the rod z slices
  rod_start_slice = falling_edges[0]
  rod_end_slice   = rising_edges[1]
  rod_center      = 0.5*(rod_start_slice + rod_end_slice)
  
  rod_roi_start_slice = int(np.floor(rod_center - 5./voxsize[2]))
  rod_roi_end_slice   = int(np.ceil(rod_center  + 5./voxsize[2]))
  
  # sum the rod slices
  sum_img = vol[:,:,rod_roi_start_slice:(rod_roi_end_slice+1)].mean(2)
  
  # label the summed image
  label_img, nlab = label(sum_img > rod_th*sum_img.max())
  labels = np.arange(1,nlab+1)

  # sort the labels according to volume
  npix      = labeled_comprehension(sum_img, label_img, labels, len, int, 0)
  sort_inds = npix.argsort()[::-1]
  labels    = labels[sort_inds] 
  npix      = npix[sort_inds] 
  
  # find the center for the line profiles
  for i, lab in enumerate(labels):
    rod_sum_img = sum_img.copy()
    rod_sum_img[label_img != lab] = 0
 
    if lp_voxel == 'max':
      central_pixel = np.unravel_index(rod_sum_img.argmax(),rod_sum_img.shape)
    else:
      central_pixel = np.round(np.array(center_of_mass(rod_sum_img))).astype(np.int)
  
    roi_vol[central_pixel[0],central_pixel[1],rod_roi_start_slice:(rod_roi_end_slice+1)] = i + 4

  #-------------------------------------------------------
  # if we only have 4 labels (rods), we find the last (smallest) one based on symmetries
  if nlab == 4:
    roi_img = roi_vol[...,rod_roi_start_slice]

    com = center_of_mass(roi_vol == 1)
    x0 = (np.arange(sum_img.shape[0]) - com[0]) * voxsize[0]
    x1 = (np.arange(sum_img.shape[1]) - com[1]) * voxsize[1]
    X0,X1 = np.meshgrid(x0, x1, indexing = 'ij')
    RHO = np.sqrt(X0**2 + X1**2)

    PHI = np.arctan2(X1,X0)
    rod_phis = np.array([PHI[roi_img == x][0] for x in np.arange(4,nlab+4)])
    PHI      = ((PHI - rod_phis[3]) % (2*np.pi)) - np.pi
    rod_phis = ((rod_phis - rod_phis[3]) % (2*np.pi)) - np.pi
    
    missing_phi = ((rod_phis[3] - rod_phis[2]) % (2*np.pi)) - np.pi
    
    mask = np.logical_and(np.abs(PHI - missing_phi) < 0.25, np.abs(RHO - 6.4) < 2)
    
    central_pixel = np.unravel_index(np.argmax(sum_img*mask), sum_img.shape)
    roi_vol[central_pixel[0],central_pixel[1],rod_roi_start_slice:(rod_roi_end_slice+1)] = 8

    nlab += 1
  #-------------------------------------------------------


  return roi_vol
Exemplo n.º 31
0
def fit_nema_2008_cylinder_profiles(vol, 
                                    voxsize,
                                    Rrod_init  = [2.5,2,1.5,1,0.5],
                                    fwhm_init  = 1.5,
                                    S_init     = 1,
                                    fix_S      = True,
                                    fix_R      = False,
                                    fix_fwhm   = False,
                                    nrods      = 4,
                                    phantom    = 'standard'):
  """ Fit the radial profiles of the rods in a nema 2008 small animal PET phantom

  Parameters
  ----------
  vol : 3D numpy float array
    containing the image

  voxsize : 3 element 1D numpy array
    containing the voxel size

  Rrod_init : list or 1D numpy array of floats, optional
    containing the initial values of the rod radii

  S_init, fwhm_init: float, optional
    initial values for the signal and the FWHM in the fit

  fix_S, fix_R, fix_fwhm : bool, optional
    whether to keep the initial values of signal, radius and FWHM fixed during the fix

  nrods: int, optional
    number of rods to fit

  phantom : string
    phantom version ('standard' or 'mini')

  Returns
  -------
  a list of lmfit fit results

  Note
  ----

  The axial direction should be the right most direction in the 3D numpy array.
  The slices containing the rods are found automatically and summed.
  In the summed image, all rods (disks) are segmented followed by a fit
  of the radial profile.
  """
  roi_vol = nema_2008_small_animal_pet_rois(vol, voxsize, phantom = phantom)
  
  rod_bbox = find_objects(roi_vol==4)
  
  # find the rods in the summed image
  sum_img = vol[:,:,rod_bbox[0][2].start:rod_bbox[0][2].stop].mean(2)
  
  label_img, nlab = label(sum_img > 0.1*sum_img.max())
  labels = np.arange(1,nlab+1)
  # sort the labels according to volume
  npix   = labeled_comprehension(sum_img, label_img, labels, len, int, 0)
  sort_inds = npix.argsort()[::-1]
  labels    = labels[sort_inds] 
  npix      = npix[sort_inds] 
  
  #----------------------------------------------------------------------  
  ncols = 2
  nrows = int(np.ceil(nrods/ncols))
  fig, ax = py.subplots(nrows,ncols,figsize = (12,7*nrows/2), sharey = True, sharex = True)
 
  retval = []
 
  for irod in range(nrods):
    rod_bbox = find_objects(label_img == labels[irod])
   
    rod_bbox = [(slice(rod_bbox[0][0].start - 2,rod_bbox[0][0].stop + 2),
                 slice(rod_bbox[0][1].start - 2,rod_bbox[0][1].stop + 2))]
   
    rod_img = sum_img[rod_bbox[0]]
    com     = np.array(center_of_mass(rod_img))
    
    x0 = (np.arange(rod_img.shape[0]) - com[0]) * voxsize[0]
    x1 = (np.arange(rod_img.shape[1]) - com[1]) * voxsize[1]
    
    X0, X1 = np.meshgrid(x0, x1, indexing = 'ij')
    RHO    = np.sqrt(X0**2 + X1**2) 
    
    rho    = RHO.flatten()
    signal = rod_img.flatten()
    
    # sort the values according to rho
    sort_inds = rho.argsort()
    rho       = rho[sort_inds]
    signal    = signal[sort_inds]
    
    pmodel = Model(cylinder_profile)
    params = pmodel.make_params(S = S_init, R = Rrod_init[irod], fwhm = fwhm_init)

    if fix_S:
      params['S'].vary = False
    if fix_R:
      params['R'].vary = False
    if fix_fwhm:
      params['fwhm'].vary = False
    
    fitres = pmodel.fit(signal, r = rho, params = params)
    retval.append(fitres)
    fit_report = fitres.fit_report()
   
    iplot = np.unravel_index(irod, ax.shape) 
    ax[iplot].plot(rho,signal,'k.')
    
    rfit = np.linspace(0,rho.max(),100)
    ax[iplot].plot(rfit,fitres.eval(r = rfit),'r-')
    ax[iplot].text(0.99, 0.99, fit_report, fontsize = 6, transform = ax[iplot].transAxes, 
                         verticalalignment='top', horizontalalignment = 'right',
                         backgroundcolor = 'white', bbox = {'pad':0, 'facecolor':'white','lw':0})
    ax[iplot].grid()
  
  for axx in ax[-1,:]: axx.set_xlabel('R (mm)')
  for axx in ax[:,0]:  axx.set_ylabel('signal')
  
  fig.tight_layout()
  fig.show()

  return retval
Exemplo n.º 32
0
def _binary_significant_features(
        radar, binary_field, size_bins=75, size_limits=(0, 300),
        structure=None, debug=False, verbose=False):
    """
    Objectively determine the minimum echo feature size (in radar gates) and
    remove features smaller than this from the specified radar field. This
    function can be used to objectively remove salt and pepper noise from
    binary (mask) radar fields. Unexpected results may occur if the specified
    radar field is not a binary field.

    Parameters
    ----------
    radar : Radar
        Radar object containing the specified binary field.
    binary_field : str
        The binary radar field that will have insignificant echo features
        removed.
    size_bins : int, optional
        Number of bins used to bin echo feature sizes and thus define its
        distribution.
    size_limits : list or tuple, optional
        Limits of the echo feature size distribution. The upper limit needs to
        be large enough to include the minimum feature size. The size bin width
        is defined by both the size_bins and size_limits parameters.
    structure : array_like, optional
        Binary structuring element used to define connected features. The
        default structuring element has a squared connectivity equal to one.
    debug, verbose : bool, optional
        True to print debugging and progress information, respectively, False
        to suppress.

    """

    # Parse binary structuring element
    if structure is None:
        structure = ndimage.generate_binary_structure(2, 1)

    # Parse feature size arrays
    size_data = np.zeros_like(
        radar.fields[binary_field]['data'], dtype=np.int32)
    feature_sizes = []

    for i, sweep in enumerate(radar.iter_slice()):

        # Parse radar sweep data
        # Non-zero elements of the array form the subset to be dilated
        data = radar.fields[binary_field]['data'][sweep]
        is_valid_gate = np.ma.filled(data, 0)

        # Label the connected features in the sweep data and create index
        # array which defines each unique labeled feature
        labels, nlabels = ndimage.label(
            is_valid_gate, structure=structure, output=None)
        index = np.arange(1, nlabels + 1, 1)

        if debug:
            print 'Unique features in sweep {}: {}'.format(i, nlabels)

        # Compute the size in radar gates of each labeled feature
        sweep_sizes = ndimage.labeled_comprehension(
            is_valid_gate, labels, index, np.count_nonzero, np.int32, 0)
        feature_sizes.append(sweep_sizes)

        # Set each labeled feature to its total size in radar gates
        for label, size in zip(index, sweep_sizes):
            size_data[sweep][labels == label] = size

    feature_sizes = np.hstack(feature_sizes)

    # Bin and count occurrences of labeled feature sizes
    # Compute bin centers and bin width
    counts, bin_edges = np.histogram(
        feature_sizes, bins=size_bins, range=size_limits, normed=False,
        weights=None, density=False)
    bin_centers = bin_edges[:-1] + np.diff(bin_edges) / 2.0
    bin_width = np.diff(bin_edges).mean()

    if debug:
        print 'Feature size bin width: {} gate(s)'.format(bin_width)

    # Compute the peak of the labeled feature size distribution
    # We expect the peak of this distribution to be close to 1 radar gate
    peak_size = bin_centers[counts.argmax()] - bin_width / 2.0

    if debug:
        print 'Feature size at peak: {} gate(s)'.format(peak_size)

    # Determine the first instance when the count (sample size) of a feature
    # size bin reaches 0 in the right side of the feature size distribution
    # This will define the minimum feature size
    is_zero_size = np.logical_and(
        bin_centers > peak_size, np.isclose(counts, 0, atol=1.0e-5))
    min_size = bin_centers[is_zero_size].min() - bin_width / 2.0

    if debug:
        _range = [0.0, min_size]
        print 'Insignificant feature size range: {} gates'.format(_range)

    # Remove insignificant features from the binary radar field
    radar.fields[binary_field]['data'][size_data < min_size] = 0

    return
def detection_by_scene_segmentation():
    '''Detection of moving object by using Distortion field of centers of mass
    of background objects'''
    if co.counters.im_number == 0:
        co.segclass.needs_segmentation = 1
        if not co.segclass.exists_previous_segmentation:
            print 'No existing previous segmentation. The initialisation will delay...'
        else:
            print 'Checking similarity..'
            old_im = co.meas.background
            check_if_segmented = np.sqrt(
                np.sum(
                    (co.data.depth_im[co.meas.trusty_pixels.astype(
                        bool)].astype(float) -
                     old_im[co.meas.trusty_pixels.astype(bool)].astype(float)
                     )**2))
            print 'Euclidean Distance of old and new background is ' +\
                str(check_if_segmented)
            print 'Minimum Distance to approve previous segmentation is ' +\
                str(co.CONST['similar_bg_min_dist'])
            if check_if_segmented < co.CONST['similar_bg_min_dist']:
                print 'No need to segment again'
                co.segclass.needs_segmentation = 0
            else:
                print 'Segmentation is needed'

    if co.segclass.needs_segmentation and co.counters.im_number >= 1:
        if co.counters.im_number == (
                co.CONST['framerate'] * co.CONST['calib_secs'] - 1):
            co.segclass.flush_previous_segmentation()
            co.segclass.nz_objects.image = np.zeros_like(co.data.depth_im) - 1
            co.segclass.z_objects.image = np.zeros_like(co.data.depth_im) - 1
            levels_num = 8
            levels = np.linspace(
                np.min(co.data.depth_im[co.data.depth_im > 0]),
                np.max(co.data.depth_im), levels_num)
            co.segclass.segment_values = np.zeros_like(co.data.depth_im)
            for count in range(levels_num - 1):
                co.segclass.segment_values[
                    (co.data.depth_im >= levels[count]) *
                    (co.data.depth_im <= levels[count + 1])] = count + 1

        elif co.counters.im_number == (co.CONST['framerate'] *
                                       co.CONST['calib_secs']):
            co.segclass.nz_objects.count = -1
            co.segclass.z_objects.count = -1
            co.segclass.segment_values = co.segclass.segment_values * co.meas.trusty_pixels
            for val in np.unique(co.segclass.segment_values):
                objs = np.ones_like(co.data.depth_im) * \
                    (val == co.segclass.segment_values)
                labeled, nr_objects =\
                    ndimage.label(objs * co.edges.calib_frame)
                lbls = np.arange(1, nr_objects + 1)
                if val > 0:
                    ndimage.labeled_comprehension(
                        objs, labeled, lbls, co.segclass.nz_objects.process,
                        float, 0, True)
                else:
                    ndimage.labeled_comprehension(
                        objs, labeled, lbls, co.segclass.z_objects.process,
                        float, 0, True)
            for (points, pixsize, xsize,
                 ysize) in (co.segclass.nz_objects.untrusty +
                            co.segclass.z_objects.untrusty):
                co.segclass.z_objects.count += 1
                co.segclass.z_objects.image[tuple(
                    points)] = co.segclass.z_objects.count
                co.segclass.z_objects.pixsize.append(pixsize)
                co.segclass.z_objects.xsize.append(xsize)
                co.segclass.z_objects.ysize.append(ysize)

            print 'Found or partitioned',\
                co.segclass.nz_objects.count +\
                co.segclass.z_objects.count + 2, 'background objects'
            co.segclass.needs_segmentation = 0
            with open(co.CONST['segmentation_data'] + '.pkl', 'wb') as output:
                pickle.dump((co.segclass, co.meas), output, -1)
            print 'Saved segmentation data for future use.'
    elif (not co.segclass.needs_segmentation) and co.counters.im_number >= 2:
        if not co.segclass.initialised_centers:
            try:
                co.segclass.nz_objects.find_object_center(1)
            except BaseException as err:
                print 'Centers initialisation Exception'
                raise err
            try:
                co.segclass.z_objects.find_object_center(0)
            except BaseException as err:
                print 'Centers initialisation Exception'
                raise err
            co.segclass.initialise_neighborhoods()
            co.segclass.initialised_centers = 1
        else:
            try:
                co.segclass.nz_objects.find_object_center(1)
            except BaseException as err:
                print 'Centers calculation Exception'
                raise err
        if co.segclass.nz_objects.center.size > 0:
            co.segclass.nz_objects.find_centers_displacement()
            co.meas.found_objects_mask = co.segclass.find_objects()

            points_on_im = co.data.depth3d.copy()
            # points_on_im[np.sum(points_on_im,axis=2)==0,:]=np.array([1,0,1])
            for calc, point1, point2 in zip(
                    co.segclass.nz_objects.centers_to_calculate,
                    co.segclass.nz_objects.initial_center,
                    co.segclass.nz_objects.center):
                if point1[0] != -1:
                    if calc:
                        cv2.arrowedLine(points_on_im, (point1[1], point1[0]),
                                        (point2[1], point2[0]), [0, 1, 0], 2,
                                        1)
                    else:
                        cv2.arrowedLine(points_on_im, (point1[1], point1[0]),
                                        (point2[1], point2[0]), [0, 0, 1], 2,
                                        1)
            struct_el = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                  tuple(2 * [5]))
            co.meas.found_objects_mask = cv2.morphologyEx(
                co.meas.found_objects_mask.astype(np.uint8), cv2.MORPH_CLOSE,
                struct_el)
            struct_el = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                  tuple(2 * [10]))
            co.meas.found_objects_mask = cv2.morphologyEx(
                co.meas.found_objects_mask.astype(np.uint8), cv2.MORPH_OPEN,
                struct_el)
            hand_patch, hand_patch_pos = hsa.main_process(
                co.meas.found_objects_mask.astype(np.uint8),
                co.meas.all_positions, 1)
            co.meas.hand_patch = hand_patch
            co.meas.hand_patch_pos = hand_patch_pos
            if len(co.im_results.images) == 1:
                co.im_results.images.append(
                    (255 * co.meas.found_objects_mask).astype(np.uint8))
            co.im_results.images.append(points_on_im)
            # elif len(co.im_results.images)==2:
            #    co.im_results.images[1][co.im_results.images[1]==0]=(255*points_on_im).astype(np.uint8)[co.im_results.images[1]==0]
            # if hand_points.shape[1]>1:
            #    points_on_im[tuple(hand_points.T)]=[1,0,0]
            # co.im_results.images.append(points_on_im)
            return 1