def read_icespeed_nc(sample_step=10, store=True): ''' read nc file :return: ''' nc_file = fileutils.correct_path(settings.ICE_SPEED_NC_FILE) dataset = Dataset(nc_file, mask_and_scale=False, decode_times=False) nc_vars = [var for var in dataset.variables] headers = nc_vars[3:7] print('Reading speeds from file....') mask_lat = dataset.variables[headers[0]][:] print('done with: ' + headers[0]) mask_lon = dataset.variables[headers[1]][:] print('done with: ' + headers[1]) mask_vel_x = dataset.variables[headers[2]][:] print('done with: ' + headers[2]) mask_vel_y = dataset.variables[headers[3]][:] print('done with: ' + headers[3]) print('reading the nonzero...') nonzeroposition_x = ma.nonzero(mask_vel_x) nonzeroposition_y = ma.nonzero(mask_vel_y) print('zip...') nonzeroposition_zip = zip(nonzeroposition_x[0], nonzeroposition_x[1], nonzeroposition_y[0], nonzeroposition_y[1]) print('filtering and casting cooridnates....') nonzeroposition = [] for x in tqdm(nonzeroposition_zip): if x[0] == x[2] & x[1] == x[3]: nonzeroposition.append((x[0], x[1])) lat = [] lon = [] vel_x = [] vel_y = [] print('creatating the dataframe...') for pos in tqdm(nonzeroposition): lat.append(ma.getdata(mask_lat[pos[0], pos[1]])) lon.append(ma.getdata(mask_lon[pos[0], pos[1]])) vel_x.append(ma.getdata(mask_vel_x[pos[0], pos[1]])) vel_y.append(ma.getdata(mask_vel_y[pos[0], pos[1]])) table = { headers[0]: lat, headers[1]: lon, headers[2]: vel_x, headers[3]: vel_y } df = pd.DataFrame(table, columns=headers) if store: print('storing in a csv') df.to_csv(settings.OUTPUT_ICESPEED_CSV, sep=',', encoding='utf-8') print('DONE') return df
def peakfinding(): # first step of getting peaks peaks_obj = Data(frq, abs(Y), smoothness=11) # second part of getting peaks peaks_obj.get_peaks(method='slope') # pull data out of peaks data object for filtering peaks_obj.peaks["peaks"] peaks = peaks_obj.peaks["peaks"] peaks_obj.plot() show() peaksnp = np.zeros((2, len(peaks[0]))) peaksnp[0] = peaks[0] peaksnp[1] = peaks[1] maxpeaks = max(peaks[1]) # filtering function: removes peaks that are shorter than 10% of the max peak filteredpeaksnp = [] cutoff = .05 filtered_peaks = ma.masked_less(peaksnp[1], (cutoff * maxpeaks)) indeces = ma.nonzero(filtered_peaks) indeces = indeces[0] final_peaks = np.zeros((3,len(indeces))) i = 0 while i < len(indeces): final_peaks [0,i] = frq[i] final_peaks[1,i] = peaksnp[1, indeces[i]] final_peaks[2,i] = peaksnp[0, indeces[i]] i = i + 1
def train_step_sequential(self, epoch, indices=None): """A single step of sequential training algorithm. """ indices = range(len(self.data)) if indices == None else indices for ind in indices: x = self.data[ind] Dx = self.vectors - self.data[ind] Dist = ma.sum(Dx**2, 1) min_dist = ma.min(Dist) bmu = ma.argmin(Dist) self.distances.append(min_dist) iter = epoch*len(self.data)+ind if self.neighbourhood == Map.NeighbourhoodGaussian: h = numpy.exp(-self.unit_distances[:, bmu]**2/(2*self.radius_seq(iter)**2)) * (self.unit_distances[:, bmu]**2 <= self.radius_seq(iter)**2) elif self.neighbourhood == Map.NeighbourhoodEpanechicov: h = 1.0 - (self.unit_distances[:bmu]/self.radius_seq(iter))**2 h = h * (h >= 0.0) else: h = 1.0*(self.unit_distances[:, bmu] <= self.radius_seq(iter)) h = h * self.alpha(iter) nonzero = ma.nonzero(h) h = h[nonzero] self.vectors[nonzero] = self.vectors[nonzero] - Dx[nonzero] * numpy.reshape(h, (len(h), 1))
def train_step_sequential(self, epoch, indices=None): """A single step of sequential training algorithm. """ indices = range(len(self.data)) if indices is None else indices for ind in indices: x = self.data[ind] Dx = self.vectors - self.data[ind] Dist = ma.sum(Dx**2, 1) min_dist = ma.min(Dist) bmu = ma.argmin(Dist) self.distances.append(min_dist) iter = epoch * len(self.data) + ind if self.neighbourhood == Map.NeighbourhoodGaussian: h = numpy.exp(-self.unit_distances[:, bmu]**2 / (2 * self.radius_seq(iter)**2)) * ( self.unit_distances[:, bmu]**2 <= self.radius_seq(iter)**2) elif self.neighbourhood == Map.NeighbourhoodEpanechicov: h = 1.0 - (self.unit_distances[:bmu] / self.radius_seq(iter))**2 h = h * (h >= 0.0) else: h = 1.0 * (self.unit_distances[:, bmu] <= self.radius_seq(iter)) h = h * self.alpha(iter) nonzero = ma.nonzero(h) h = h[nonzero] self.vectors[nonzero] = self.vectors[ nonzero] - Dx[nonzero] * numpy.reshape(h, (len(h), 1))
def test_indices(self, target, classes=None): classes = self.classes if classes is None else classes def target_set(target): if isinstance(target, tuple): return set([target]) else: assert(isinstance(target, set)) return target if self.useAttributeLabels: if isinstance(target, list): ind = [[i for i, cl in enumerate(self.classes) if target_set(t).intersection(cl)] for t in target] else: target = target_set(target) ind1 = [i for i, cl in enumerate(self.classes) if target.intersection(cl)] ind2 = [i for i, cl in enumerate(self.classes) if not target.intersection(cl)] ind = [ind1, ind2] else: if isinstance(target, list): ind = [ma.nonzero(self.classes == t)[0] for t in target] else: if isinstance(target, (str, Variable)): target = set([target]) else: assert(isinstance(target, set)) target = list(target) ind1 = [i for i, cl in enumerate(self.classes) if cl in target] ind2 = [i for i, cl in enumerate(self.classes) if cl not in target] ind = [ind1, ind2] return ind
def get_histogram_centers(self): diff_image = absolute( cv2.blur(self.images[1], (5, 5)) - cv2.blur(self.images[0], (5, 5))) # diff_image = absolute(self.images[1] - self.images[0]) diff_image = cv2.cvtColor(diff_image, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(diff_image, 127, 255, cv2.THRESH_BINARY) mask_bool = mask.astype(np.bool) nonzero_pos = nonzero(mask) nonzero_num = len(nonzero_pos[0]) if FRACTION_MIN * self.num_pixels < nonzero_num < FRACTION_MAX * self.num_pixels: data_set = np.vstack(nonzero_pos).T mask_label, self.centers = make_kmeans(data_set) mask_res = mask_bool.ravel() mask_int = np.arange(len(mask_res)) mask0 = get_class_mask(mask_res, mask_label, mask_int, mask_bool, 0) mask1 = get_class_mask(mask_res, mask_label, mask_int, mask_bool, 1) self.histogram0 = get_histogram(self.images[1], mask0) self.histogram1 = get_histogram(self.images[1], mask1) if self._debug: self._debug_output(mask, mask0, mask1)
def train_step_sequential(self, epoch, indices=None): indices = list(range(len(self.data))) if indices == None else indices for ind in indices: x = self.data[ind] Dx = self.vectors - self.data[ind] Dist = ma.sum(Dx**2, 1) min_dist = ma.min(Dist) bmu = ma.argmin(Dist) self.distances.append(min_dist) if self.neighbourhood == Map.NeighbourhoodGaussian: h = numpy.exp( -self.unit_distances[:, bmu] / (2 * self.radius(epoch))) * (self.unit_distances[:, bmu] <= self.radius(epoch)) elif self.neighbourhood == Map.NeighbourhoodEpanechicov: h = 1.0 - (self.unit_distances[:bmu] / self.radius(epoch))**2 h = h * (h >= 0.0) else: h = 1.0 * (self.unit_distances[:, bmu] <= self.radius(epoch)) h = h * self.alpha(epoch) nonzero = ma.nonzero(h) h = h[nonzero] self.vectors[nonzero] = self.vectors[ nonzero] - Dx[nonzero] * numpy.reshape(h, (len(h), 1))
def train_step_batch(self, epoch): D1 = ma.dot(self.vectors**2, self.weight_matrix) D2 = ma.dot(self.vectors, self.constant_matrix) Dist = D1 - D2 best_nodes = ma.argmin(Dist, 0) distances = ma.min(Dist, 0) ## print "q error:", ma.mean(ma.sqrt(distances + self.dist_cons)), self.radius(epoch) self.qerror.append(ma.mean(ma.sqrt(distances + self.dist_cons))) if self.neighbourhood == Map.NeighbourhoodGaussian: H = numpy.exp(-self.unit_distances / (2 * self.radius(epoch))) * ( self.unit_distances <= self.radius(epoch)) elif self.neighbourhood == Map.NeighbourhoodEpanechicov: H = 1.0 - (self.unit_distances / self.radius(epoch))**2 H = H * (H >= 0.0) else: H = 1.0 * (self.unit_distances <= self.radius(epoch)) P = numpy.zeros((self.vectors.shape[0], self.data.shape[0])) P[(best_nodes, list(range(len(best_nodes))))] = numpy.ones(len(best_nodes)) S = ma.dot(H, ma.dot(P, self.data)) A = ma.dot(H, ma.dot(P, ~self.data._mask)) ## nonzero = (range(epoch%2, len(self.vectors), 2), ) nonzero = (numpy.array(sorted(set(ma.nonzero(A)[0]))), ) self.vectors[nonzero] = S[nonzero] / A[nonzero]
def train_step_batch(self, epoch): """A single step of batch training algorithm. """ D1 = ma.dot(self.vectors**2, self.weight_matrix) D2 = ma.dot(self.vectors, self.constant_matrix) Dist = D1 - D2 best_nodes = ma.argmin(Dist, 0) distances = ma.min(Dist, 0) ## print "q error:", ma.mean(ma.sqrt(distances + self.dist_cons)), self.radius(epoch) self.qerror.append(ma.mean(ma.sqrt(distances + self.dist_cons))) if self.neighbourhood == Map.NeighbourhoodGaussian: H = numpy.exp(-self.unit_distances**2/(2*self.radius(epoch)**2)) * (self.unit_distances**2 <= self.radius(epoch)**2) elif self.neighbourhood == Map.NeighbourhoodEpanechicov: H = 1.0 - (self.unit_distances/self.radius(epoch))**2 H = H * (H >= 0.0) else: H = 1.0*(self.unit_distances <= self.radius(epoch)) P = numpy.zeros((self.vectors.shape[0], self.data.shape[0])) P[(best_nodes, range(len(best_nodes)))] = numpy.ones(len(best_nodes)) S = ma.dot(H, ma.dot(P, self.data)) A = ma.dot(H, ma.dot(P, ~self.data._mask)) ## nonzero = (range(epoch%2, len(self.vectors), 2), ) nonzero = (numpy.array(sorted(set(ma.nonzero(A)[0]))), ) self.vectors[nonzero] = S[nonzero] / A[nonzero]
def equi_n_discretization(array, intervals=5, dim=1): count = ma.sum(ma.array(ma.ones(array.shape, dtype=int), mask=array.mask), dim) cut = ma.zeros(len(count), dtype=int) sarray = ma.sort(array, dim) r = count % intervals pointsshape = list(array.shape) pointsshape[dim] = 1 points = [] for i in range(intervals): cutend = cut + count // intervals + numpy.ones(len(r)) * (r > i) if dim == 1: p = sarray[list(range(len(cutend))), numpy.array(cutend, dtype=int) -1] else: p = sarray[numpy.array(cutend, dtype=int) -1, list(range(len(cutend)))] points.append(p.reshape(pointsshape)) cut = cutend darray = ma.array(ma.zeros(array.shape) - 1, mask=array.mask) darray[ma.nonzero(array <= points[0])] = 0 for i in range(0, intervals): darray[ma.nonzero((array > points[i]))] = i + 1 return darray
def getCoordsToKeep(nc,variables,newMask='',debug = False): """This routine takes an ncdfView object, a list of variables, and tests which coordinates should be saved. A Mask can be applied instead. """ CoordsToKeep={} for var in variables: if var in alwaysInclude: continue arr = nc.variables[var][:] if len(newMask): out = [] if newMask.shape != arr.shape: if debug:'getCoordsToKeep:\t',var,'Wrong shape' continue try: # 1D arrays for i,m in enumerate(newMask): if not m: continue out.append(arr[i]) arr= array(out).squeeze() except: # multi dimensional arrays arr = masked_where(newMask,array(arr)) #nearlyZero = 1.E-6 nz = nonzero(arr)#+nearlyZero #nz = compressed(arr) #if debug:print "getCoordsToKeep:\tcompressed array:",len(nz),nz.shape,nz.min(),nz.max(),nz.mean() if not len(nz): variables.remove(var) continue nzdims = len(nz) for i,a in enumerate(nz[0]): if var in ['OBSERVATIONS']: coords = tuple([nz[j][i] for j in xrange(nzdims)]) #if coords not in CoordsToKeep.keys(): # print "NEWS OBSERVATION LOCATION:",i,coords try: if i in CoordsToKeep[coords]:pass except: try: CoordsToKeep[coords].append(i) except: CoordsToKeep[coords] = [i,] else: coords = tuple([nz[j][i] for j in xrange(nzdims)]) print coords try: if i in CoordsToKeep[coords]:pass except: try: CoordsToKeep[coords].append(i) except: CoordsToKeep[coords] = [i,] if debug: print "getCoordsToKeep:\t",var,"\tndims:", nzdims, len(nz[0]),"\tNumber of Coords:", len(CoordsToKeep.keys()) return CoordsToKeep,variables
def equi_n_discretization(array, intervals=5, dim=1): count = ma.sum(ma.array(ma.ones(array.shape, dtype=int), mask=array.mask), dim) cut = ma.zeros(len(count), dtype=int) sarray = ma.sort(array, dim) r = count % intervals pointsshape = list(array.shape) pointsshape[dim] = 1 points = [] for i in range(intervals): cutend = cut + count // intervals + numpy.ones(len(r)) * (r > i) if dim == 1: p = sarray[list(range(len(cutend))), numpy.array(cutend, dtype=int) - 1] else: p = sarray[numpy.array(cutend, dtype=int) - 1, list(range(len(cutend)))] points.append(p.reshape(pointsshape)) cut = cutend darray = ma.array(ma.zeros(array.shape) - 1, mask=array.mask) darray[ma.nonzero(array <= points[0])] = 0 for i in range(0, intervals): darray[ma.nonzero((array > points[i]))] = i + 1 return darray
def getCoordsToKeep(nc,variables,newMask='',debug = False): """This routine takes an ncdfView object, a list of varialbes, and tests which coordinates should be saved. A Mask can be applied too. """ CoordsToKeep={} for var in variables: if var in alwaysInclude: continue arr = nc.variables[var][:] if len(newMask): out = [] if newMask.shape != arr.shape: if debug:'getCoordsToKeep:\t',var,'Wrong shape' continue try: # 1D arrays for i,m in enumerate(newMask): if not m: continue out.append(arr[i]) arr= array(out).squeeze() except: # multi dimensional arrays arr = masked_where(newMask,array(arr)) #nearlyZero = 1.E-6 nz = nonzero(arr)#+nearlyZero #nz = compressed(arr) #if debug:print "getCoordsToKeep:\tcompressed array:",len(nz),nz.shape,nz.min(),nz.max(),nz.mean() if not len(nz): variables.remove(var) continue nzdims = len(nz) for i,a in enumerate(nz[0]): if var in ['OBSERVATIONS']: coords = tuple([nz[j][i] for j in xrange(nzdims)]) #if coords not in CoordsToKeep.keys(): # print "NEWS OBSERVATION LOCATION:",i,coords try: if i in CoordsToKeep[coords]:pass except: try: CoordsToKeep[coords].append(i) except: CoordsToKeep[coords] = [i,] else: coords = tuple([nz[j][i] for j in xrange(nzdims)]) try: if i in CoordsToKeep[coords]:pass except: try: CoordsToKeep[coords].append(i) except: CoordsToKeep[coords] = [i,] if debug: print "getCoordsToKeep:\t",var,"\tndims:", nzdims, len(nz[0]),"\tNumber of Coords:", len(CoordsToKeep.keys()) return CoordsToKeep,variables
def test_indices(self, target, classes=None): classes = self.classes if classes is None else classes def target_set(target): if isinstance(target, tuple): return set([target]) else: assert (isinstance(target, set)) return target if self.useAttributeLabels: if isinstance(target, list): ind = [[ i for i, cl in enumerate(self.classes) if target_set(t).intersection(cl) ] for t in target] else: target = target_set(target) ind1 = [ i for i, cl in enumerate(self.classes) if target.intersection(cl) ] ind2 = [ i for i, cl in enumerate(self.classes) if not target.intersection(cl) ] ind = [ind1, ind2] else: if isinstance(target, list): ind = [ma.nonzero(self.classes == t)[0] for t in target] else: if isinstance(target, (str, Variable)): target = set([target]) else: assert (isinstance(target, set)) target = list(target) ind1 = [i for i, cl in enumerate(self.classes) if cl in target] ind2 = [ i for i, cl in enumerate(self.classes) if cl not in target ] ind = [ind1, ind2] return ind
def correctSmile(depths, sml=250000, statn=500, fitn=20): if not sml % (statn * fitn): sml, statn, fitn = 250000, 500, 20 ds = depths[4:] ends = np.zeros((2 * len(ds), sml)) for i, d in enumerate(ds): ends[2 * i] = d[:sml] ends[2 * i + 1] = d[:-sml - 1:-1] ends = ends.reshape((2 * len(ds), -1, statn)) ends = quietly(npma.median, ends, axis=(0, 2)) # patches nans in the profile with last non nan value (highly unlikely to be a problem) for i in npma.nonzero(np.isnan(ends))[0]: if (i == 0): ends[0] = ends[np.nonzero(~np.isnan(ends))[0][0]] ends[i] = ends[i - 1] ends = ends.reshape((-1, fitn)).T fitx = np.arange((statn - 1) / 2, statn * fitn, statn) coefs = np.polyfit(fitx, ends, 1) longx = np.arange(statn * fitn) smirk = np.array([np.poly1d(co)(longx) for co in coefs.T]).ravel() smirk[:20000] = smirk[20000] def stitch(l): k = np.ones(l) fl = min(sml, int(l / 2)) if (2 * sml < l): k[sml:-sml] = smirk[-1] k[:fl] = smirk[:fl] k[-fl:] = smirk[fl - 1::-1] return k fit_smiles = [stitch(len(d)) for d in depths] corrected_depths = [ npma.array(d.data / fs, mask=d.mask) for d, fs in zip(depths, fit_smiles) ] return corrected_depths
def measure(mode, x, y, x0, x1, thresh = 0): """ return the a measure of y in the window x0 to x1 """ xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster! v = y.view(numpy.ndarray) xm = ma.masked_outside(xt, x0, x1).T ym = ma.array(v, mask = ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return(0,0) slope = numpy.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win)/20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k-st, k+st) # get tb array newa = numpy.array(self.dat[i][j, thisaxis, tb]) ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = numpy.append(slope, ppars[0]) # keep track of max slope r1 = numpy.amax(slope) r2 = numpy.argmax(slope) return(r1, r2)
def ba_compare_year(indicesfile, bafile, outfile=None, indicesvarnames= None, support=None, reduction=None) : """collates the various indices with burned area counts""" # interesting variables from the indicesfile last_val_reduce = [ ] if indicesvarnames is None : indicesvarnames = ['gsi_avg','fm1000','fm100','fm10','fm1','dd','t_max'] if 'dd' in indicesvarnames : last_val_reduce = ['dd'] indices = nc.Dataset(indicesfile) indicesvars = [indices.variables[v] for v in indicesvarnames] ba = nc.Dataset(bafile) count = ba.variables['count'] if support is not None : s = nc.Dataset(support) supportvars = list(s.variables.keys()) supportvars.remove('land') indicesvarnames.extend(supportvars) indicesvars.extend([s.variables[v] for v in supportvars]) last_val_reduce.extend(supportvars) # workaround: bug in index calculator does not calculate last day. time_samples = range(1,len(ba.dimensions['days']) - 1) if reduction is not None : if reduction == REDUCE_MONTHLY : grid_reducer = rv.monthly_aggregator(count.shape, 3) cmp_reducer = rv.monthly_aggregator(indicesvars[0].shape,0) grid_reducer.cutpoints[0]=1 cmp_reducer.cutpoints[1]=1 else : grid_reducer = rv.ReduceVar(count.shape, 3, reduction) cmp_reducer = rv.ReduceVar(indicesvars[0].shape, 0, reduction) time_samples = range(grid_reducer.reduced) ca = trend.CompressedAxes(indices, 'land') alldata = [] days = [] for i_time in time_samples : day_data = [] active_lc = [] if reduction is None : count_slice = count[...,i_time] else : count_slice = np.array(grid_reducer.sum(i_time, count)) for lc in range(len(ba.dimensions['landcover'])) : # compress the count lc_count = ca.compress(count_slice[:,:,lc]) # find nonzero counts i_nonzero = ma.nonzero(lc_count) if len(i_nonzero[0]) > 0 : # construct dataframe for this landcover code lc_data = {"BA Count" : lc_count[i_nonzero]} for n,v in zip(indicesvarnames,indicesvars) : # reduce variable if necessary if reduction is None: day_v = v[i_time,:] else : # the last value of the dry day sequence is # representative of the reduced time period if n in last_val_reduce : day_v = cmp_reducer.last_val(i_time, v) else : day_v = cmp_reducer.mean(i_time, v) # add a column for the current index lc_data[n] = day_v[i_nonzero] day_data.append( pd.DataFrame( lc_data ) ) active_lc.append(ba.variables['landcover'][lc]) if len(day_data) > 0 : alldata.append(pd.concat(day_data, keys=active_lc)) days.append(i_time) all_data_frame = pd.concat(alldata, keys=days) if outfile is not None : all_data_frame.to_csv(outfile) return all_data_frame
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) # .compressed() ym = ma.array(y, mask=ma.getmask(xm)) # .compressed() if mode == 'mean': r1 = np.mean(ym) r2 = np.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'minormax': r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == '1090': #measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1 * r1 y90 = 0.9 * r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return (0, 0) slope = np.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = np.array(self.dat[i][j, thisaxis, tb]) ppars = np.polyfit( x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = np.append(slope, ppars[0]) # keep track of max slope r1 = np.amax(slope) r2 = np.argmax(slope) return (r1, r2)
def calculateMaps(MLRmodel, MARSmodel, MLPmodel, etapa): print( "-------------------------------------------------------------------") print("Calculate SM maps") #def calculateMaps(MLRmodel, MLPmodel, etapa): #dir = "ggarcia" dir = "gag" fechaSentinel = [] fechaNDVI = [] fechaLandsat8 = [] fechaSMAP = [] fechaMYD = [] if (etapa == "etapa1"): path = "/media/" + dir + "/Datos/Trabajos/Trabajo_Sentinel_NDVI_CONAE/Modelo/mapasCreados/Etapa1/" print(etapa) fechaSentinel.append("2015-06-29") fechaLandsat8.append("2015-06-18") fechaSMAP.append("2015-06-30") fechaSentinel.append("2015-10-03") fechaLandsat8.append("2015-10-08") fechaSMAP.append("2015-10-04") fechaSentinel.append("2015-12-28") fechaLandsat8.append("2015-12-27") fechaSMAP.append("2015-12-28") fechaSentinel.append("2016-03-19") fechaLandsat8.append("2016-03-16") fechaSMAP.append("2016-03-12") Ta = [] HR = [] PP = [] sigma0 = [] for i in range(0, len(fechaSentinel)): print( "-----------------------------------------------------------") print(fechaSentinel[i]) print( "-----------------------------------------------------------") fileTa = "/media/" + dir + "/Datos/Trabajos/Trabajo_Sentinel_NDVI_CONAE/Datos INTA/" + fechaSentinel[ i] + "/T_aire.asc" src_ds_Ta, bandTa, GeoTTa, ProjectTa = functions.openFileHDF( fileTa, 1) filePP = "/media/" + dir + "/Datos/Trabajos/Trabajo_Sentinel_NDVI_CONAE/Datos INTA/" + fechaSentinel[ i] + "/PP.asc" src_ds_PP, bandPP, GeoTPP, ProjectPP = functions.openFileHDF( filePP, 1) fileHR = "/media/" + dir + "/Datos/Trabajos/Trabajo_Sentinel_NDVI_CONAE/Datos INTA/" + fechaSentinel[ i] + "/HR.asc" src_ds_HR, bandHR, GeoTHR, ProjectHR = functions.openFileHDF( fileHR, 1) fileNDVI = "/media/" + dir + "/Datos/Trabajos/Trabajo_Sentinel_NDVI_CONAE/Landsat8/" + fechaLandsat8[ i] + "/NDVI_recortado" src_ds_NDVI, bandNDVI, GeoTNDVI, ProjectNDVI = functions.openFileHDF( fileNDVI, 1) # ##### smap a 10 km # fileSMAP = "/media/"+dir+"/TOURO Mobile/Trabajo_Sentinel_NDVI_CONAE/SMAP/SMAP-10km/"+fechaSMAP[i]+"/soil_moisture.img" # print(fileSMAP) # src_ds_SMAP, bandSMAP, GeoTSMAP, ProjectSMAP = functions.openFileHDF(fileSMAP, 1) ##### CONAE interpolado # CONAE_HS = "/home/gag/Escritorio/inter_HS_29_06_2015.asc" # src_ds_CONAE_HS, bandCONAE_HS, GeoTCONAE_HS, ProjectCONAE_HS = functions.openFileHDF(CONAE_HS, 1) #fileSar ="/media/"+dir+"/TOURO Mobile/Trabajo_Sentinel_NDVI_CONAE/Sentinel/"+fechaSentinel[i]+".SAFE/subset.data/recorte_30mx30m.img" #fileSar ="/media/"+dir+"/TOURO Mobile/Trabajo_Sentinel_NDVI_CONAE/Sentinel-Otras/"+fechaSentinel[i]+".SAFE/subset.data/recorte_30mx30m.img" fileSar = "/media/" + dir + "/TOURO Mobile/Sentinel_30m_1km/" + fechaSentinel[ i] + "/subset_30m_mapa.data/Sigma0_VV_db.img" nameFileMLR = "mapa_MLR_30m_" + str(fechaSentinel[i]) nameFileMARS = "mapa_MARS_30m_" + str(fechaSentinel[i]) nameFileMLP = "mapa_MLP_30m_" + str(fechaSentinel[i]) src_ds_Sar, bandSar, GeoTSar, ProjectSar = functions.openFileHDF( fileSar, 1) print(ProjectSar) fileMascara = "/media/" + dir + "/Datos/Trabajos/Trabajo_Sentinel_NDVI_CONAE/Landsat8/2015-06-18/mascaraciudadyalgomas_reprojected/subset_1_of_Band_Math__b1_5.data/Band_Math__b1_5.img" src_ds_Mas, bandMas, GeoTMas, ProjectMas = functions.openFileHDF( fileMascara, 1) ### se cambian las resoluciones de todas las imagenes a la de la sar #type = "Nearest" type = "Bilinear" nRow, nCol = bandSar.shape data_src = src_ds_Mas data_match = src_ds_Sar match = functions.matchData(data_src, data_match, type, nRow, nCol) band_matchCity = match.ReadAsArray() data_src = src_ds_Ta data_match = src_ds_Sar match = functions.matchData(data_src, data_match, type, nRow, nCol) band_matchTa = match.ReadAsArray() print( "------------------------------------------------------------") print("Max Ta: " + str(np.max(band_matchTa))) print("Min Ta: " + str(np.min(band_matchTa))) #fig, ax = plt.subplots() #ax.imshow(band_matchTa, interpolation='None',cmap=cm.gray) #plt.show() data_src = src_ds_PP data_match = src_ds_Sar match = functions.matchData(data_src, data_match, type, nRow, nCol) band_matchPP = match.ReadAsArray() print("Max PP: " + str(np.max(band_matchPP))) print("Min PP: " + str(np.min(band_matchPP))) #fig, ax = plt.subplots() #ax.imshow(band_matchPP, interpolation='None',cmap=cm.gray) #plt.show() data_src = src_ds_HR data_match = src_ds_Sar match = functions.matchData(data_src, data_match, type, nRow, nCol) band_matchHR = match.ReadAsArray() print("Max HR: " + str(np.max(band_matchHR))) print("Min HR: " + str(np.min(band_matchHR))) #HR = pd.DataFrame({'HR':band_matchHR.flatten()}) #fig, ax = plt.subplots() #sns.distplot(HR) #print "------------------------------------------------------------" #fig, ax = plt.subplots() #ax.imshow(band_matchHR, interpolation='None',cmap=cm.gray) data_src = src_ds_NDVI data_match = src_ds_Sar match = functions.matchData(data_src, data_match, type, nRow, nCol) band_matchNDVI = match.ReadAsArray() # fig, ax = plt.subplots() # ax.imshow(band_matchNDVI, interpolation='None',cmap=cm.gray) # plt.show() # type = "Nearest" # data_src = src_ds_SMAP # data_match = src_ds_Sar # match = functions.matchData(data_src, data_match, type, nRow, nCol) # band_matchSMAP = match.ReadAsArray() # data_src = src_ds_CONAE_HS # data_match = src_ds_Sar # match = functions.matchData(data_src, data_match, type, nRow, nCol) # band_matchCONAE_HS = match.ReadAsArray() ### se filtra la imagen SAR #print "Se filtran las zonas con NDVI mayores a 0.51 y con NDVI menores a 0" sarEnmask, maskNDVI = applyNDVIfilter(bandSar, band_matchNDVI, etapa) # rSar, cSar = maskNDVI.shape # maskNDVI2 = np.zeros((rSar, cSar)) # for i in range(0, rSar): # for j in range(0, cSar): # if (maskNDVI[i, j] == 0 ): maskNDVI2[i,j] = 1 filtWater, maskWater = applyWaterfilter(bandSar, band_matchNDVI) ### histograma de Sigma0 despues de filtrar #Ss = pd.DataFrame({'Sigma0':sarEnmask.flatten()}) #fig, ax = plt.subplots() #sns.distplot(Ss) # sarEnmask, maskCity = applyCityfilter(sarEnmask,L8maskCity) # sarEnmask, maskSAR = applyBackfilter(sarEnmask) sarEnmask[sarEnmask < -18] = -0 sarEnmask[sarEnmask > -4] = -4 print("Max Sigma0: " + str(np.max(sarEnmask))) print("Min Sigma0: " + str(np.min(sarEnmask))) print( "------------------------------------------------------------") # fig, ax = plt.subplots() # ax.imshow(sarEnmask, interpolation='None',cmap=cm.gray) # # fig, ax = plt.subplots() # ax.imshow(maskNDVI, interpolation='None',cmap=cm.gray) # plt.show() sarEnmask1 = np.copy(sarEnmask) sarEnmask2 = np.copy(sarEnmask) sarEnmask3 = np.copy(sarEnmask) r, c = bandSar.shape # OldRange = (np.max(band_matchPP) - np.min(band_matchPP)) # NewRange = (1 + 1) # newPP = (((band_matchPP - np.min(band_matchPP)) * NewRange) / OldRange) -1 # # OldRange = (np.max(sarEnmask1) - np.min(sarEnmask1)) # NewRange = (1 + 1) # sarEnmask1 = (((sarEnmask1 - np.min(sarEnmask1)) * NewRange) / OldRange) -1 ### se normalizan las variables entre 0 y 1 sarEnmask_22 = normalizadoSAR(sarEnmask1) PP_Norm = normalizadoPP(band_matchPP) Ta_Norm = normalizadoTa(band_matchTa) HR_Norm = normalizadoHR(band_matchHR) print("Max sarEnmask_22 norm:" + str(np.max(sarEnmask_22))) print("Min sarEnmask_22 norm:" + str(np.min(sarEnmask_22))) print("Max PP norm:" + str(np.max(PP_Norm))) print("Min PP norm:" + str(np.min(PP_Norm))) print("Max Ta norm:" + str(np.max(Ta_Norm))) print("Min Ta norm:" + str(np.min(Ta_Norm))) print("Max HR norm:" + str(np.max(HR_Norm))) print("Min HR norm:" + str(np.min(HR_Norm))) # fig, ax = plt.subplots() # ax.imshow(PP_Norm, interpolation='None',cmap=cm.gray) # # fig, ax = plt.subplots() # ax.imshow(np.log10(Ta_Norm), interpolation='None',cmap=cm.gray) # # # fig, ax = plt.subplots() # ax.imshow(np.log10(HR_Norm), interpolation='None',cmap=cm.gray) #### -------------------MLR method------------------- dataMap_MLR = pd.DataFrame({ 'Sigma0': sarEnmask_22.flatten(), 'T_aire': (np.log10(Ta_Norm)).flatten(), 'HR': (np.log10(HR_Norm)).flatten(), 'PP': PP_Norm.flatten() }) dataMap_MLR = dataMap_MLR[['T_aire', 'PP', 'Sigma0', 'HR']] # print(dataMap_MLR.describe()) # input() dataMap_MLR = dataMap_MLR.fillna(0) mapSM_MLR = MLRmodel.predict(dataMap_MLR) ## debo invertir la funcion flatten() #mapSM_MLR = mapSM_MLR.reshape((r,c)) mapSM_MLR = np.array(mapSM_MLR).reshape(r, c) mapSM_MLR = 10**(mapSM_MLR) mapSM_MLR[mapSM_MLR < 0] = 0 #mapSM_MLR[mapSM_MLR > 60] = 0 #### los datos para el modelo MLR llevan log # fig, ax = plt.subplots() # plt.hist(mapSM_MLR, bins=10) # arguments are passed to np.histogram # plt.title("Histogram MLR maps") # plt.show() mapSM_MLR = mapSM_MLR * maskNDVI #*maskCity #SM = pd.DataFrame({'SM':mapSM_MLR.flatten()}) #SM = SM[SM.SM != 0] #fig, ax = plt.subplots() #sns.distplot(SM) #fig, ax = plt.subplots() #ax.imshow(mapSM_MLR, interpolation='None',cmap=cm.gray) #plt.show() #plt.hist(mapSM_MLR) # arguments are passed to np.histogram #plt.title("Histogram with 'auto' bins") #plt.show() print("MLR") print("Max:" + str(np.max(mapSM_MLR[np.nonzero(mapSM_MLR)]))) print("Min:" + str(np.min(mapSM_MLR[np.nonzero(mapSM_MLR)]))) print("Mean:" + str(np.mean(mapSM_MLR[np.nonzero(mapSM_MLR)]))) print("STD:" + str(np.std(mapSM_MLR[np.nonzero(mapSM_MLR)]))) #fig, ax = plt.subplots() #ax.imshow(mapSM_MLR, interpolation='None',cmap=cm.gray) #### -------------------MARS method------------------- dataMap_MARS = pd.DataFrame({ 'Sigma0': sarEnmask.flatten(), 'T_aire': band_matchTa.flatten(), 'HR': band_matchHR.flatten(), 'PP': band_matchPP.flatten() }) dataMap_MARS = dataMap_MARS[['T_aire', 'PP', 'Sigma0', 'HR']] dataMap_MARS = dataMap_MARS.fillna(0) mapSM_MARS = MARSmodel.predict(dataMap_MARS) ## debo invertir la funcion flatten() mapSM_MARS = mapSM_MARS.reshape(r, c) mapSM_MARS[mapSM_MARS < 0] = 0 mapSM_MARS = mapSM_MARS * maskNDVI #*maskCity ####------------------- MLP method ------------------- # OldRange = (np.max(band_matchTa) - np.min(band_matchTa)) # NewRange = (1 + 1) # Ta = (((band_matchTa - np.min(band_matchTa)) * NewRange) / OldRange) -1 # # OldRange = (np.max(band_matchHR) - np.min(band_matchHR)) # NewRange = (1 + 1) # HR = (((band_matchHR - np.min(band_matchHR)) * NewRange) / OldRange) -1 # # OldRange = (np.max(band_matchPP) - np.min(band_matchPP)) # NewRange = (1 + 1) # PP = (((band_matchPP - np.min(band_matchPP)) * NewRange) / OldRange) -1 # # OldRange = (np.max(sarEnmask) - np.min(sarEnmask)) # NewRange = (1 + 1) # sar2 = (((sarEnmask - np.min(sarEnmask)) * NewRange) / OldRange) -1 OldRange = (26.29 - 6.9) NewRange = (1 + 1) Ta = (((band_matchTa - 6.9) * NewRange) / OldRange) - 1 OldRange = (83.63 - 17.83) NewRange = (1 + 1) HR = (((band_matchHR - 17.83) * NewRange) / OldRange) - 1 OldRange = (22.16 - 0) NewRange = (1 + 1) PP = (((band_matchPP - 0) * NewRange) / OldRange) - 1 OldRange = (-4.39 + 17.82) NewRange = (1 + 1) sar2 = (((sarEnmask + 17.82) * NewRange) / OldRange) - 1 dataMap_MLP = pd.DataFrame({ 'T_aire': Ta.flatten(), 'Sigma0': sar2.flatten(), 'HR': HR.flatten(), 'PP': PP.flatten() }) dataMap_MLP = dataMap_MLP[['T_aire', 'PP', 'Sigma0', 'HR']] #print dataMap_MLP ###.describe() dataMap_MLP = dataMap_MLP.fillna(0) mapSM_MLP = MLPmodel.predict(dataMap_MLP) mapSM_MLP = mapSM_MLP.reshape(r, c) #print mapSM_MLR.shape mapSM_MLP[mapSM_MLP < 0] = 0 mapSM_MLP = mapSM_MLP * maskNDVI #fig, ax = plt.subplots() #ax.imshow(mapSM_MLP, interpolation='None',cmap=cm.gray) #plt.show() my_cmap = cm.Blues my_cmap.set_under('k', alpha=0) my_cmap1 = cm.Greens my_cmap1.set_under('k', alpha=0) my_cmap2 = cm.OrRd my_cmap2.set_under('k', alpha=0) my_cmap3 = cm.Oranges my_cmap3.set_under('k', alpha=0) transform = GeoTSar xmin, xmax, ymin, ymax = transform[0], transform[ 0] + transform[1] * src_ds_Sar.RasterXSize, transform[ 3] + transform[5] * src_ds_Sar.RasterYSize, transform[3] print(xmin) print(xmax) # plot MLR maps ##plt.hist(mapSM_MLR, bins=10) # arguments are passed to np.histogram ##plt.title("Histogram with 'auto' bins") ##plt.show() fig, ax = plt.subplots() # meridians = [xmin, xmax,5] m = Basemap(projection='merc',llcrnrlat=ymin,urcrnrlat=ymax,\ llcrnrlon=xmin,urcrnrlon=xmax,resolution='c') # m = Basemap(projection='cyl',llcrnrlat=ymin,urcrnrlat=ymax,\ # llcrnrlon=xmin,urcrnrlon=xmax,resolution='c') # m.drawcoastlines() # lat = np.arange(xmin, xmax, 5) # print lat # m.drawlsmask(land_color='white',ocean_color='white',lakes=True) # # m.drawparallels([-32.90,-32.95,-33.00,-33.05],labels=[1,0,0,0],fontsize=12, linewidth=0.0) # # m.drawmeridians([-62.56,-62.48,-62.40,-62.32],labels=[0,0,1,0],fontsize=12, linewidth=0.0) # m.drawmapscale(-62.35, -33.04, 0,0, 10, barstyle='fancy', units='km') m.drawmapscale(-62.55, -33.05, 0, 0, 10, fontsize=10, units='km') # m.drawmapboundary(fill_color='aqua') # ax.add_compass(loc=1) sarEnmask[sarEnmask != -4] = 0 # sarEnmask[sarEnmask == -4] = 1 img = ax.imshow(sarEnmask, extent=[xmin, xmax, ymin, ymax], cmap=cm.gray, interpolation='none') ax.yaxis.set_major_locator(plt.MaxNLocator(5)) ax.xaxis.set_major_locator(plt.MaxNLocator(5)) ax.xaxis.tick_top() # m.colorbar(img) # plt.title('Plot gridded data on a map') plt.savefig('gridded_data_global_map.png', pad_inches=0.5, bbox_inches='tight') ax.grid(False) plt.show() #im0 = ax.imshow(mapSM_MLR, cmap=my_cmap3)#, vmin=5, vmax=55, extent=[xmin,xmax,ymin,ymax], interpolation='None') #maskNDVI2 = ma.masked_where(maskNDVI2 == 0,maskNDVI2) #im1 = ax.imshow(maskNDVI2, cmap=my_cmap1) # im0 = ax.imshow(maskNDVI, extent=[xmin,xmax,ymin,ymax],cmap=cm.gray) # im0.tick_labels.set_xformat('hhmm') # im0.tick_labels.set_yformat('hhmm') # plt.show() fig, ax = plt.subplots() #im0 = ax.imshow(mapSM_MLR, cmap=my_cmap3)#, vmin=5, vmax=55, extent=[xmin,xmax,ymin,ymax], interpolation='None') #maskNDVI2 = ma.masked_where(maskNDVI2 == 0,maskNDVI2) #im1 = ax.imshow(maskNDVI2, cmap=my_cmap1) im0 = ax.imshow(mapSM_MLR, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap1, clim=(5, 45)) pp = ma.masked_where(band_matchCity == 0, band_matchCity) im = ax.imshow(pp, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap2, interpolation='Bilinear') kk = ma.masked_where(filtWater == 0, filtWater) im = ax.imshow(kk, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap, interpolation='Bilinear') ax.grid(False) ax.xaxis.tick_top() ax.yaxis.set_major_locator(plt.MaxNLocator(4)) ax.xaxis.set_major_locator(plt.MaxNLocator(4)) divider = make_axes_locatable(ax) cax = divider.append_axes('bottom', size="5%", pad=0.05) cb = plt.colorbar(im0, cax=cax, orientation="horizontal") cb.set_label('Volumetric SM (%)') #cb.set_clim(vmin=5, vmax=50) ### ---------------------------------------------------------------- # plot MARS maps print("MARS") print("Max:" + str(np.max(mapSM_MARS[np.nonzero(mapSM_MARS)]))) print("Min:" + str(np.min(mapSM_MARS[np.nonzero(mapSM_MARS)]))) print("Mean:" + str(np.mean(mapSM_MARS[np.nonzero(mapSM_MARS)]))) print("STD:" + str(np.std(mapSM_MARS[np.nonzero(mapSM_MARS)]))) fig, ax = plt.subplots() im0 = ax.imshow(mapSM_MARS, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap1, clim=(5, 45)) pp = ma.masked_where(band_matchCity == 0, band_matchCity) im = ax.imshow(pp, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap2, interpolation='Bilinear') kk = ma.masked_where(filtWater == 0, filtWater) im = ax.imshow(kk, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap, interpolation='Bilinear') ax.grid(False) ax.xaxis.tick_top() ax.yaxis.set_major_locator(plt.MaxNLocator(4)) ax.xaxis.set_major_locator(plt.MaxNLocator(4)) divider = make_axes_locatable(ax) cax = divider.append_axes('bottom', size="5%", pad=0.05) cb = plt.colorbar(im0, cax=cax, orientation="horizontal") cb.set_label('Volumetric SM (%)') #cb.set_clim(vmin=5, vmax=50) ### ---------------------------------------------------------------- # plot MLP map print("MLP") print("Max:" + str(np.max(mapSM_MLP[np.nonzero(mapSM_MLP)]))) print("Min:" + str(np.min(mapSM_MLP[np.nonzero(mapSM_MLP)]))) print("Mean:" + str(np.mean(mapSM_MLP[np.nonzero(mapSM_MLP)]))) print("STD:" + str(np.std(mapSM_MLP[np.nonzero(mapSM_MLP)]))) fig, ax = plt.subplots() im0 = ax.imshow(mapSM_MLP, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap1, clim=(5, 45)) pp = ma.masked_where(band_matchCity == 0, band_matchCity) ax.imshow(pp, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap2, interpolation='Bilinear') kk = ma.masked_where(filtWater == 0, filtWater) ax.imshow(kk, extent=[xmin, xmax, ymin, ymax], cmap=my_cmap, interpolation='Bilinear') ax.grid(False) ax.xaxis.tick_top() ax.yaxis.set_major_locator(plt.MaxNLocator(4)) ax.xaxis.set_major_locator(plt.MaxNLocator(4)) divider = make_axes_locatable(ax) cax = divider.append_axes('bottom', size="5%", pad=0.05) cb = plt.colorbar(im0, cax=cax, orientation="horizontal") cb.set_label('Volumetric SM (%)') #cb.set_clim(vmin=5, vmax=50) ### ---------------------------------------------------------------- # # plot SMAP map # # SMAP_SM = band_matchSMAP*100 # SMAP_SM = maskNDVI*SMAP_SM # # print("SMAP SM") # print("Max:" +str(np.max(SMAP_SM[np.nonzero(SMAP_SM)]))) # print("Min:" +str(np.min(SMAP_SM[np.nonzero(SMAP_SM)]))) # print("Mean:" +str(np.mean(SMAP_SM[np.nonzero(SMAP_SM)]))) # print("STD:" +str(np.std(SMAP_SM[np.nonzero(SMAP_SM)]))) # # # fig4, ax4 = plt.subplots() # im4= ax4.imshow(SMAP_SM, extent=[xmin,xmax,ymin,ymax], cmap=my_cmap1, clim=(5, 45)) # pp = ma.masked_where(band_matchCity == 0, band_matchCity) # ax4.imshow(pp, extent=[xmin,xmax,ymin,ymax], cmap=my_cmap2, interpolation='Bilinear') # kk = ma.masked_where(filtWater == 0, filtWater) # ax4.imshow(kk, extent=[xmin,xmax,ymin,ymax], cmap=my_cmap, interpolation='Bilinear') # ax4.grid(False) # ax4.xaxis.tick_top() # ax4.yaxis.set_major_locator(plt.MaxNLocator(4)) # ax4.xaxis.set_major_locator(plt.MaxNLocator(4)) # divider = make_axes_locatable(ax4) # cax = divider.append_axes('bottom', size="5%", pad=0.05) # cb = plt.colorbar(im4, cax=cax, orientation="horizontal") # cb.set_label('Volumetric SM (%)') ### ---------------------------------------------------------------- # # plot CONAE_HS interpolado mapa # fig4, ax4 = plt.subplots() # im4= ax4.imshow(band_matchCONAE_HS, extent=[xmin,xmax,ymin,ymax], cmap=my_cmap1, clim=(5, 45)) plt.show() #im1 = ax.imshow(filtWater, cmap=my_cmap) #maskNDVI2 = ma.masked_where(filtWater == 0,filtWater) #im1 = ax.imshow(maskNDVI2, cmap=my_cmap) #im1 = ax.imshow(band_matchCity, cmap=my_cmap2) #mapSM_MLP = mapSM_MLP*maskNDVI functions.createHDFfile(path, nameFileMLR, 'ENVI', mapSM_MLR, c, r, GeoTSar, ProjectSar) functions.createHDFfile(path, nameFileMARS, 'ENVI', mapSM_MARS, c, r, GeoTSar, ProjectSar) functions.createHDFfile(path, nameFileMLP, 'ENVI', mapSM_MLP, c, r, GeoTSar, ProjectSar) print("FIN")
elevation = elevation[dmsmask] #elevation = -elevation elevation = elevation - np.mean(elevation) xx = np.arange(np.amin(xT), np.amax(xT), xy_res) yy = np.arange(np.amin(yT), np.amax(yT), xy_res) xx2d, yy2d = meshgrid(xx, yy) elevation2d = ut.grid_elevation(xpts, ypts, elevation, xx2d, yy2d, kdtree=1) # Elevation relative to a fitted plane (order 2=quadratic plane) elevation2d_plane = ut.getPlaneElev(elevation2d, xx2d, yy2d, order=2) # Find local level (modal) surface level_elev, thresh, levpercent = ut.calc_level_ice( asarray(elevation2d_plane[ma.nonzero(elevation2d_plane)]), pint, pwidth, min_feature_depth) # Elevation anomalies relative to a local level (modal) surface elevation2d_anomalies = elevation2d_plane - level_elev # Threhsold thresh = thresh - level_elev elevation2d_masked = ma.masked_where(elevation2d_anomalies < thresh, elevation2d_anomalies) feature_area = ma.count(elevation2d_masked) #================ Label the features ================== labelled_image = ut.label_features(elevation2d_masked, xy_res, min_feature_size, min_feature_depth)
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) # .compressed() ym = ma.array(y, mask=ma.getmask(xm)) # .compressed() if mode == "mean": r1 = np.mean(ym) r2 = np.std(ym) if mode == "max" or mode == "maximum": r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == "min" or mode == "minimum": r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == "minormax": r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == "median": r1 = ma.median(ym) r2 = 0 if mode == "p2p": # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == "std": # standard deviation r1 = ma.std(ym) r2 = 0 if mode == "var": # variance r1 = ma.var(ym) r2 = 0 if mode == "cumsum": # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == "anom": # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == "sum": r1 = ma.sum(ym) r2 = 0 if mode == "area" or mode == "charge": r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == "latency": # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == "1090": # measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1 * r1 y90 = 0.9 * r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == "count": r1 = ma.count(ym) r2 = 0 if mode == "maxslope": return (0, 0) slope = np.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = np.array(self.dat[i][j, thisaxis, tb]) ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = np.append(slope, ppars[0]) # keep track of max slope r1 = np.amax(slope) r2 = np.argmax(slope) return (r1, r2)
def measure(mode, x, y, x0, x1, thresh=0, slopewin=1.0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1)# .compressed() ym = ma.array(y, mask = ma.getmask(xm))# .compressed() if mode == 'mean': r1 = np.mean(ym) r2 = np.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'minormax': r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == '1090': #measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1*r1 y90 = 0.9*r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': slope = [] win = ma.flatnotmasked_contiguous(ym) dt = x[1]-x[0] st = int(slopewin/dt) # use slopewin duration window for fit. print('st: ', st) for k, w in enumerate(win): # move through the slope measurementwindow tb = range(k-st, k+st) # get tb array ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope.append(ppars[0]) # keep track of max slope r1 = np.max(slope) r2 = np.argmax(slope) return(r1, r2)
def RetDistAngle(DisAglCtls,DisAglData): '''Compute and return distances and angles :param dict DisAglCtls: contains distance/angle radii usually defined using :func:`GSASIIctrlGUI.DisAglDialog` :param dict DisAglData: contains phase data: Items 'OrigAtoms' and 'TargAtoms' contain the atoms to be used for distance/angle origins and atoms to be used as targets. Item 'SGData' has the space group information (see :ref:`Space Group object<SGData_table>`) :returns: AtomLabels,DistArray,AngArray where: **AtomLabels** is a dict of atom labels, keys are the atom number **DistArray** is a dict keyed by the origin atom number where the value is a list of distance entries. The value for each distance is a list containing: 0) the target atom number (int); 1) the unit cell offsets added to x,y & z (tuple of int values) 2) the symmetry operator number (which may be modified to indicate centering and center of symmetry) 3) an interatomic distance in A (float) 4) an uncertainty on the distance in A or 0.0 (float) **AngArray** is a dict keyed by the origin (central) atom number where the value is a list of angle entries. The value for each angle entry consists of three values: 0) a distance item reference for one neighbor (int) 1) a distance item reference for a second neighbor (int) 2) a angle, uncertainty pair; the s.u. may be zero (tuple of two floats) The AngArray distance reference items refer directly to the index of the items in the DistArray item for the list of distances for the central atom. ''' import numpy.ma as ma SGData = DisAglData['SGData'] Cell = DisAglData['Cell'] Amat,Bmat = G2lat.cell2AB(Cell[:6]) covData = {} if 'covData' in DisAglData: covData = DisAglData['covData'] covMatrix = covData['covMatrix'] varyList = covData['varyList'] pfx = str(DisAglData['pId'])+'::' Factor = DisAglCtls['Factors'] Radii = dict(zip(DisAglCtls['AtomTypes'],zip(DisAglCtls['BondRadii'],DisAglCtls['AngleRadii']))) indices = (-2,-1,0,1,2) Units = np.array([[h,k,l] for h in indices for k in indices for l in indices]) origAtoms = DisAglData['OrigAtoms'] targAtoms = DisAglData['TargAtoms'] AtomLabels = {} for Oatom in origAtoms: AtomLabels[Oatom[0]] = Oatom[1] for Oatom in targAtoms: AtomLabels[Oatom[0]] = Oatom[1] DistArray = {} AngArray = {} for Oatom in origAtoms: DistArray[Oatom[0]] = [] AngArray[Oatom[0]] = [] OxyzNames = '' IndBlist = [] Dist = [] Vect = [] VectA = [] angles = [] for Tatom in targAtoms: Xvcov = [] TxyzNames = '' if 'covData' in DisAglData: OxyzNames = [pfx+'dAx:%d'%(Oatom[0]),pfx+'dAy:%d'%(Oatom[0]),pfx+'dAz:%d'%(Oatom[0])] TxyzNames = [pfx+'dAx:%d'%(Tatom[0]),pfx+'dAy:%d'%(Tatom[0]),pfx+'dAz:%d'%(Tatom[0])] Xvcov = G2mth.getVCov(OxyzNames+TxyzNames,varyList,covMatrix) result = G2spc.GenAtom(Tatom[3:6],SGData,False,Move=False) BsumR = (Radii[Oatom[2]][0]+Radii[Tatom[2]][0])*Factor[0] AsumR = (Radii[Oatom[2]][1]+Radii[Tatom[2]][1])*Factor[1] for [Txyz,Top,Tunit,Spn] in result: Dx = (Txyz-np.array(Oatom[3:6]))+Units dx = np.inner(Amat,Dx) dist = ma.masked_less(np.sqrt(np.sum(dx**2,axis=0)),0.5) IndB = ma.nonzero(ma.masked_greater(dist-BsumR,0.)) if np.any(IndB): for indb in IndB: for i in range(len(indb)): if str(dx.T[indb][i]) not in IndBlist: IndBlist.append(str(dx.T[indb][i])) unit = Units[indb][i] tunit = (unit[0]+Tunit[0],unit[1]+Tunit[1],unit[2]+Tunit[2]) pdpx = G2mth.getDistDerv(Oatom[3:6],Tatom[3:6],Amat,unit,Top,SGData) sig = 0.0 if len(Xvcov): sig = np.sqrt(np.inner(pdpx,np.inner(pdpx,Xvcov))) Dist.append([Oatom[0],Tatom[0],tunit,Top,ma.getdata(dist[indb])[i],sig]) if (Dist[-1][-2]-AsumR) <= 0.: Vect.append(dx.T[indb][i]/Dist[-1][-2]) VectA.append([OxyzNames,np.array(Oatom[3:6]),TxyzNames,np.array(Tatom[3:6]),unit,Top]) else: Vect.append([0.,0.,0.]) VectA.append([]) for D in Dist: DistArray[Oatom[0]].append(D[1:]) Vect = np.array(Vect) angles = np.zeros((len(Vect),len(Vect))) angsig = np.zeros((len(Vect),len(Vect))) for i,veca in enumerate(Vect): if np.any(veca): for j,vecb in enumerate(Vect): if np.any(vecb): angles[i][j],angsig[i][j] = G2mth.getAngSig(VectA[i],VectA[j],Amat,SGData,covData) if i <= j: continue AngArray[Oatom[0]].append((i,j, G2mth.getAngSig(VectA[i],VectA[j],Amat,SGData,covData))) return AtomLabels,DistArray,AngArray
def test_nonzero(self): for t in "?bhilqpBHILQPfdgFDGO": x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) assert_(eq(nonzero(x), [0]))
def RetDistAngle(DisAglCtls,DisAglData): '''Compute and return distances and angles :param dict DisAglCtls: contains distance/angle radii usually defined using :func:`GSASIIgrid.DisAglDialog` :param dict DisAglData: contains phase data: Items 'OrigAtoms' and 'TargAtoms' contain the atoms to be used for distance/angle origins and atoms to be used as targets. Item 'SGData' has the space group information (see :ref:`Space Group object<SGData_table>`) :returns: AtomLabels,DistArray,AngArray where: **AtomLabels** is a dict of atom labels, keys are the atom number **DistArray** is a dict keyed by the origin atom number where the value is a list of distance entries. The value for each distance is a list containing: 0) the target atom number (int); 1) the unit cell offsets added to x,y & z (tuple of int values) 2) the symmetry operator number (which may be modified to indicate centering and center of symmetry) 3) an interatomic distance in A (float) 4) an uncertainty on the distance in A or 0.0 (float) **AngArray** is a dict keyed by the origin (central) atom number where the value is a list of angle entries. The value for each angle entry consists of three values: 0) a distance item reference for one neighbor (int) 1) a distance item reference for a second neighbor (int) 2) a angle, uncertainty pair; the s.u. may be zero (tuple of two floats) The AngArray distance reference items refer directly to the index of the items in the DistArray item for the list of distances for the central atom. ''' import numpy.ma as ma SGData = DisAglData['SGData'] Cell = DisAglData['Cell'] Amat,Bmat = G2lat.cell2AB(Cell[:6]) covData = {} if 'covData' in DisAglData: covData = DisAglData['covData'] covMatrix = covData['covMatrix'] varyList = covData['varyList'] pfx = str(DisAglData['pId'])+'::' A = G2lat.cell2A(Cell[:6]) cellSig = G2stIO.getCellEsd(pfx,SGData,A,covData) names = [' a = ',' b = ',' c = ',' alpha = ',' beta = ',' gamma = ',' Volume = '] valEsd = [G2mth.ValEsd(Cell[i],cellSig[i],True) for i in range(7)] Factor = DisAglCtls['Factors'] Radii = dict(zip(DisAglCtls['AtomTypes'],zip(DisAglCtls['BondRadii'],DisAglCtls['AngleRadii']))) indices = (-1,0,1) Units = np.array([[h,k,l] for h in indices for k in indices for l in indices]) origAtoms = DisAglData['OrigAtoms'] targAtoms = DisAglData['TargAtoms'] AtomLabels = {} for Oatom in origAtoms: AtomLabels[Oatom[0]] = Oatom[1] for Oatom in targAtoms: AtomLabels[Oatom[0]] = Oatom[1] DistArray = {} AngArray = {} for Oatom in origAtoms: DistArray[Oatom[0]] = [] AngArray[Oatom[0]] = [] OxyzNames = '' IndBlist = [] Dist = [] Vect = [] VectA = [] angles = [] for Tatom in targAtoms: Xvcov = [] TxyzNames = '' if 'covData' in DisAglData: OxyzNames = [pfx+'dAx:%d'%(Oatom[0]),pfx+'dAy:%d'%(Oatom[0]),pfx+'dAz:%d'%(Oatom[0])] TxyzNames = [pfx+'dAx:%d'%(Tatom[0]),pfx+'dAy:%d'%(Tatom[0]),pfx+'dAz:%d'%(Tatom[0])] Xvcov = G2mth.getVCov(OxyzNames+TxyzNames,varyList,covMatrix) result = G2spc.GenAtom(Tatom[3:6],SGData,False,Move=False) BsumR = (Radii[Oatom[2]][0]+Radii[Tatom[2]][0])*Factor[0] AsumR = (Radii[Oatom[2]][1]+Radii[Tatom[2]][1])*Factor[1] for Txyz,Top,Tunit in result: Dx = (Txyz-np.array(Oatom[3:6]))+Units dx = np.inner(Amat,Dx) dist = ma.masked_less(np.sqrt(np.sum(dx**2,axis=0)),0.5) IndB = ma.nonzero(ma.masked_greater(dist-BsumR,0.)) if np.any(IndB): for indb in IndB: for i in range(len(indb)): if str(dx.T[indb][i]) not in IndBlist: IndBlist.append(str(dx.T[indb][i])) unit = Units[indb][i] tunit = (unit[0]+Tunit[0],unit[1]+Tunit[1],unit[2]+Tunit[2]) pdpx = G2mth.getDistDerv(Oatom[3:6],Tatom[3:6],Amat,unit,Top,SGData) sig = 0.0 if len(Xvcov): sig = np.sqrt(np.inner(pdpx,np.inner(Xvcov,pdpx))) Dist.append([Oatom[0],Tatom[0],tunit,Top,ma.getdata(dist[indb])[i],sig]) if (Dist[-1][-2]-AsumR) <= 0.: Vect.append(dx.T[indb][i]/Dist[-1][-2]) VectA.append([OxyzNames,np.array(Oatom[3:6]),TxyzNames,np.array(Tatom[3:6]),unit,Top]) else: Vect.append([0.,0.,0.]) VectA.append([]) for D in Dist: DistArray[Oatom[0]].append(D[1:]) Vect = np.array(Vect) angles = np.zeros((len(Vect),len(Vect))) angsig = np.zeros((len(Vect),len(Vect))) for i,veca in enumerate(Vect): if np.any(veca): for j,vecb in enumerate(Vect): if np.any(vecb): angles[i][j],angsig[i][j] = G2mth.getAngSig(VectA[i],VectA[j],Amat,SGData,covData) if i <= j: continue AngArray[Oatom[0]].append((i,j, G2mth.getAngSig(VectA[i],VectA[j],Amat,SGData,covData))) return AtomLabels,DistArray,AngArray
def extract_vec(target_vector, dhs_vector): dhs_index = nonzero(dhs_vector)[0] return target_vector[dhs_index]
linewidths=2) q.levels = [nf(val) for val in q.levels] plt.clabel(q, q.levels[::2], inline=1, fmt=fmt, fontsize=25) # Add diabatic layer depth PI = c.mnc('PSI.nc', "LaPs1TH").mean(axis=2) PI = ma.masked_array(PI, PI < 0.95) # Depths th = c.mnc('PSI.nc', "LaHs1TH").mean(axis=2) depths = np.cumsum(th[::-1], axis=0)[::-1] DDL = np.zeros(len(c.yc)) psi = c.get_psi_iso() for jj in range(len(c.yc)): if ma.all(PI[:, jj] == 1) or np.all( psi[:, jj] == -0) or PI[:, jj].mask.all(): continue indx = ma.nonzero(PI[:, jj] < 0.9999999999)[0] a = indx[np.nonzero(indx > 3)[0]][0] if a < 41 and depths[a - 1, jj] - depths[a, jj] > 150: DDL[jj] = (depths[a - 1, jj] + depths[a, jj]) / 2 else: DDL[jj] = depths[a, jj] r = ax.plot(c.yc / 1000, SG.savitzky_golay(-DDL / 1000, 21, 1), color='0.75', linewidth=4) # Lables ax.set_title(str(Figletter[Runs[i]]) + str(tau[Runs[i]]) + 'day', fontsize=30) if str(tau[Runs[i]]) == 'Closed': ax.set_title(str(Figletter[Runs[i]]) + str(tau[Runs[i]]), fontsize=30)
def ba_compare_year(indicesfile, bafile, outfile=None, indicesvarnames=None, support=None, reduction=None): """collates the various indices with burned area counts""" # interesting variables from the indicesfile last_val_reduce = [] if indicesvarnames is None: indicesvarnames = [ 'gsi_avg', 'fm1000', 'fm100', 'fm10', 'fm1', 'dd', 't_max' ] if 'dd' in indicesvarnames: last_val_reduce = ['dd'] indices = nc.Dataset(indicesfile) indicesvars = [indices.variables[v] for v in indicesvarnames] ba = nc.Dataset(bafile) count = ba.variables['count'] if support is not None: s = nc.Dataset(support) supportvars = list(s.variables.keys()) supportvars.remove('land') indicesvarnames.extend(supportvars) indicesvars.extend([s.variables[v] for v in supportvars]) last_val_reduce.extend(supportvars) # workaround: bug in index calculator does not calculate last day. time_samples = range(1, len(ba.dimensions['days']) - 1) if reduction is not None: if reduction == REDUCE_MONTHLY: grid_reducer = rv.monthly_aggregator(count.shape, 3) cmp_reducer = rv.monthly_aggregator(indicesvars[0].shape, 0) grid_reducer.cutpoints[0] = 1 cmp_reducer.cutpoints[1] = 1 else: grid_reducer = rv.ReduceVar(count.shape, 3, reduction) cmp_reducer = rv.ReduceVar(indicesvars[0].shape, 0, reduction) time_samples = range(grid_reducer.reduced) ca = trend.CompressedAxes(indices, 'land') alldata = [] days = [] for i_time in time_samples: day_data = [] active_lc = [] if reduction is None: count_slice = count[..., i_time] else: count_slice = np.array(grid_reducer.sum(i_time, count)) for lc in range(len(ba.dimensions['landcover'])): # compress the count lc_count = ca.compress(count_slice[:, :, lc]) # find nonzero counts i_nonzero = ma.nonzero(lc_count) if len(i_nonzero[0]) > 0: # construct dataframe for this landcover code lc_data = {"BA Count": lc_count[i_nonzero]} for n, v in zip(indicesvarnames, indicesvars): # reduce variable if necessary if reduction is None: day_v = v[i_time, :] else: # the last value of the dry day sequence is # representative of the reduced time period if n in last_val_reduce: day_v = cmp_reducer.last_val(i_time, v) else: day_v = cmp_reducer.mean(i_time, v) # add a column for the current index lc_data[n] = day_v[i_nonzero] day_data.append(pd.DataFrame(lc_data)) active_lc.append(ba.variables['landcover'][lc]) if len(day_data) > 0: alldata.append(pd.concat(day_data, keys=active_lc)) days.append(i_time) all_data_frame = pd.concat(alldata, keys=days) if outfile is not None: all_data_frame.to_csv(outfile) return all_data_frame
# Layer probability mask PI = c.mnc('PSI.nc', "LaPs1TH").mean(axis=2) PI = ma.masked_array(PI, PI < 0.95) #psi = ma.masked_array(psi, PI < .98 ) # Depths th = c.mnc('PSI.nc', "LaHs1TH").mean(axis=2) depths = np.cumsum(th[::-1], axis=0)[::-1] # Find Max ROC and depth of diabatic layer DDL = np.zeros(len(c.yc)) for jj in range(len(c.yc)): if ma.all(PI[:, jj] == 1) or np.all( psi[:, jj] == -0) or PI[:, jj].mask.all(): continue indx = ma.nonzero(PI[:, jj] < 1)[0] b = indx[np.nonzero(indx > 3)[0]] if len(b) >= 2 and (b[1] - b[0]) > 1: a = b[1] else: a = b[0] if a < 41 and depths[a - 1, jj] - depths[a, jj] > 150: a = a - 1 DDL[jj] = depths[a, jj] ax = fig.add_subplot(2, 2, i + 1) p = plt.plot(c.yc / 1000, SG.savitzky_golay(-DDL, 31, 1), 'r', linewidth=3) plt.ylim(-2895, 0) ax.set_title(str(Figletter[Runs[i]]) + str(tau[Runs[i]]) + 'day', fontsize=30) if str(tau[Runs[i]]) == 'Closed':
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster! v = y.view(numpy.ndarray) xm = ma.masked_outside(xt, x0, x1).T ym = ma.array(v, mask=ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return (0, 0) slope = numpy.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = numpy.array(self.dat[i][j, thisaxis, tb]) ppars = numpy.polyfit( x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = numpy.append(slope, ppars[0]) # keep track of max slope r1 = numpy.amax(slope) r2 = numpy.argmax(slope) return (r1, r2)
print 'Num pts in section:', size(xatm_sect) #if there are more than 15000 pts in the 1km grid (average of around 20000) then proceed if (num_pts_section>pts_threshold): # generate a 2d grid from the bounds of the ATM coordinates of this section xx2d, yy2d = ut.grid_atm(xatm_sect, yatm_sect, xy_res) print 'Grid:', size(xx2d[0]), size(xx2d[1]) # Elevation relative to a fitted plane (order 2=quadratic plane) elevation2d = ut.grid_elevation(xatm_sect, yatm_sect,elevation_sect, xx2d, yy2d,kdtree=1) # Elevation relative to a fitted plane (order 2=quadratic plane) elevation2d_plane = ut.getPlaneElev(elevation2d, xx2d, yy2d, order=2) # Find local level (modal) surface level_elev, thresh, levpercent = ut.calc_level_ice(asarray(elevation2d_plane[ma.nonzero(elevation2d_plane)]), pint, pwidth, min_feature_depth) # Elevation anomalies relative to a local level (modal) surface elevation2d_anomalies=elevation2d_plane-level_elev # Elevation threhsold thresh=thresh-level_elev elevation2d_masked=ma.masked_where(elevation2d_anomalies<thresh, elevation2d_anomalies) feature_area = ma.count(elevation2d_masked) if (feature_area>0): found_features=1 labelled_image = ut.label_features(elevation2d_masked, xy_res, min_feature_size, min_feature_depth)
import numpy as np import numpy.ma as ma x = ma.array(np.eye(3)) x x.nonzero() x[1, 1] = ma.masked x x.nonzero() np.transpose(x.nonzero()) a = ma.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) a > 3 ma.nonzero(a > 3) (a > 3).nonzero()