def _calc_attenuation(self, lwc_scaled: np.ndarray) -> ma.MaskedArray: """Calculates liquid attenuation (dB).""" liquid_attenuation = ma.zeros(lwc_scaled.shape) spec_liq = self._model["specific_liquid_atten"] lwp_cumsum = ma.cumsum(lwc_scaled[:, :-1] * spec_liq[:, :-1], axis=1) liquid_attenuation[:, 1:] = TWO_WAY * lwp_cumsum * M_TO_KM return liquid_attenuation
def _specific_to_gas_atten(self, specific_atten: np.ndarray) -> np.ndarray: layer1_atten = self._model["gas_atten"][:, 0] atten_cumsum = ma.cumsum(specific_atten, axis=1) atten = TWO_WAY * atten_cumsum * self._dheight * M_TO_KM atten += utils.transpose(layer1_atten) atten = np.insert(atten, 0, layer1_atten, axis=1)[:, :-1] return ma.array(atten, mask=atten_cumsum.mask)
def aggrade_front(self, grid, tstep, source_cells_Qs, elev, SL): #ensure Qs and tstep units match! self.total_sed_supplied_in_tstep = source_cells_Qs*tstep self.Qs_sort_order = np.argsort(source_cells_Qs)[::-1] #descending order self.Qs_sort_order = self.Qs_sort_order[:np.count_nonzero(self.Qs_sort_order>0)] for i in self.Qs_sort_order: subaerial_nodes = elev>=SL subsurface_elev_array = ma.array(elev, subaerial_nodes) xy_tuple = (grid.node_x[i], grid.node_y[i]) distance_map = grid.get_distances_of_nodes_to_point(xy_tuple) loop_number = 0 closest_node_list = ma.argsort(ma.masked_array(distance_map, mask=subsurface_elev_array.mask)) smooth_cone_elev_from_apex = subsurface_elev_array[i]-distance_map*self.tan_repose_angle while 1: filled_all_cells_flag = 0 accom_space_at_controlling_node = SL - subsurface_elev_array[closest_node_list[loop_number]] new_max_cone_surface_elev = smooth_cone_elev_from_apex + accom_space_at_controlling_node subsurface_elev_array.mask = (elev>=SL or new_max_cone_surface_elev<elev) depth_of_accom_space = new_max_cone_surface_elev - subsurface_elev_array accom_depth_order = ma.argsort(depth_of_accom_space)[::-1] #Vectorised method to calc fill volumes: area_to_fill = ma.cumsum(grid.cellarea[accom_depth_order]) differential_depths = ma.empty_like(depth_of_accom_space) differential_depths[:-1] = depth_of_accom_space[accom_depth_order[:-1]] - depth_of_accom_space[accom_depth_order[1:]] differential_depths[-1] = depth_of_accom_space[accom_depth_order[-1]] incremental_volumes = ma.cumsum(differential_depths*area_to_fill) match_position_of_Qs_in = ma.searchsorted(incremental_volumes, self.total_sed_supplied_in_tstep[i]) try: depths_to_add = depth_of_accom_space-depth_of_accom_space[match_position_of_Qs_in] except: depths_to_add = depth_of_accom_space-depth_of_accom_space[match_position_of_Qs_in-1] filled_all_cells_flag = 1 depths_to_add = depths_to_add[ma.where(depths_to_add>=0)] if not filled_all_cells_flag: depths_to_add += (self.total_sed_supplied_in_tstep[i] - incremental_volumes[match_position_of_Qs_in-1])/area_to_fill[match_position_of_Qs_in-1] subsurface_elev_array[accom_depth_order[len(depths_to_add)]] = depths_to_add self.total_sed_supplied_in_tstep[i] = 0 break else: subsurface_elev_array[accom_depth_order] = depths_to_add self.total_sed_supplied_in_tstep[i] -= incremental_volumes[-1] loop_number += 1 return elev
def barotropic_streamfuc(self): """Calculate depth integrated barotropic stream function Psi(x, y) in Sv""" U = self.mnc('Tav.nc', 'UVEL', mask=self.HFacW[:]) Psi = ma.cumsum( ma.sum(U * np.tile(self.dzf, (self.Nx + 1, self.Ny, 1)).T, axis=0)[::-1, :] * 5000, axis=0)[::-1, :] Psi_sv = Psi / 10**6 return Psi_sv
def azimuthalAverage(image, center=None, maskval=0): """ calculate the azimuthally averaged radial profile. image - 2D image center - [x,y] pixel coordinates used as the center. the default is None which then uses the center of the image (including fractional pixels). maskval - threshold value for including data in the profile """ # calculate the indices from the image y, x = np.indices(image.shape) # default to image center if no center given if not center: center = np.array([(x.max() - x.min()) / 2.0, (x.max() - x.min()) / 2.0]) r = np.hypot(x - center[0], y - center[1]) # get sorted radii and sort image accordingly ind = np.argsort(r.flat) i_sorted = image.flat[ind] # for FP data we need to at least mask out data at # 0 or less so the gaps get ignored. # also want to mask out area outside of aperture # so use given maskval to do that. i_ma = ma.masked_less_equal(i_sorted, maskval) mask = ma.getmask(i_ma) # remove masked data points from further analysis r_sorted = ma.compressed(ma.array(r.flat[ind], mask=mask)) i_mask = ma.compressed(i_ma) # get the integer part of the radii (bin size = 1) r_int = r_sorted.astype(int) # find all pixels that fall within each radial bin. deltar = r_int[1:] - r_int[:-1] # assumes all radii represented rind = np.where(deltar)[0] # location of changed radius nr_tot = rind[1:] - rind[:-1] # total number of points in radius bin # cumulative sum to figure out sums for each radius bin csim = ma.cumsum(i_mask, dtype=float) tbin = csim[rind[1:]] - csim[rind[:-1]] # calculate and return profile of mean within each bin radial_prof = tbin / nr_tot return radial_prof
def get_psi_bar(self, V=None, zpoint='F'): """Doc String""" if V is None: V = self.mnc('Tav.nc', 'VVEL', mask=self.HFacS[:]) vflux = V * self.dzf[:, np.newaxis, np.newaxis] Vdx = vflux * self.HFacS Vdx = ma.mean(Vdx, axis=2) * self.Lx psi = ma.cumsum(Vdx, axis=0) if zpoint == 'F': return psi elif zpoint == 'C': psi = ma.apply_along_axis(np.vstack, 1, [np.zeros(self.Ny + 1), psi]) return 0.5 * (psi[1:] + psi[:-1])
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster! v = y.view(numpy.ndarray) xm = ma.masked_outside(xt, x0, x1).T ym = ma.array(v, mask=ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return (0, 0) slope = numpy.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = numpy.array(self.dat[i][j, thisaxis, tb]) ppars = numpy.polyfit( x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = numpy.append(slope, ppars[0]) # keep track of max slope r1 = numpy.amax(slope) r2 = numpy.argmax(slope) return (r1, r2)
def measure(mode, x, y, x0, x1, thresh = 0): """ return the a measure of y in the window x0 to x1 """ xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster! v = y.view(numpy.ndarray) xm = ma.masked_outside(xt, x0, x1).T ym = ma.array(v, mask = ma.getmask(xm)) if mode == 'mean': r1 = ma.mean(ym) r2 = ma.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return(0,0) slope = numpy.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win)/20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k-st, k+st) # get tb array newa = numpy.array(self.dat[i][j, thisaxis, tb]) ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = numpy.append(slope, ppars[0]) # keep track of max slope r1 = numpy.amax(slope) r2 = numpy.argmax(slope) return(r1, r2)
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) # .compressed() ym = ma.array(y, mask=ma.getmask(xm)) # .compressed() if mode == 'mean': r1 = np.mean(ym) r2 = np.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'minormax': r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == '1090': #measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1 * r1 y90 = 0.9 * r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': return (0, 0) slope = np.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = np.array(self.dat[i][j, thisaxis, tb]) ppars = np.polyfit( x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = np.append(slope, ppars[0]) # keep track of max slope r1 = np.amax(slope) r2 = np.argmax(slope) return (r1, r2)
def measure(mode, x, y, x0, x1, thresh=0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1) # .compressed() ym = ma.array(y, mask=ma.getmask(xm)) # .compressed() if mode == "mean": r1 = np.mean(ym) r2 = np.std(ym) if mode == "max" or mode == "maximum": r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == "min" or mode == "minimum": r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == "minormax": r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == "median": r1 = ma.median(ym) r2 = 0 if mode == "p2p": # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == "std": # standard deviation r1 = ma.std(ym) r2 = 0 if mode == "var": # variance r1 = ma.var(ym) r2 = 0 if mode == "cumsum": # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == "anom": # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == "sum": r1 = ma.sum(ym) r2 = 0 if mode == "area" or mode == "charge": r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm)) r2 = 0 if mode == "latency": # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == "1090": # measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1 * r1 y90 = 0.9 * r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == "count": r1 = ma.count(ym) r2 = 0 if mode == "maxslope": return (0, 0) slope = np.array([]) win = ma.flatnotmasked_contiguous(ym) st = int(len(win) / 20) # look over small ranges for k in win: # move through the slope measurementwindow tb = range(k - st, k + st) # get tb array newa = np.array(self.dat[i][j, thisaxis, tb]) ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope = np.append(slope, ppars[0]) # keep track of max slope r1 = np.amax(slope) r2 = np.argmax(slope) return (r1, r2)
def measure(mode, x, y, x0, x1, thresh=0, slopewin=1.0): """ return the a measure of y in the window x0 to x1 """ xm = ma.masked_outside(x, x0, x1)# .compressed() ym = ma.array(y, mask = ma.getmask(xm))# .compressed() if mode == 'mean': r1 = np.mean(ym) r2 = np.std(ym) if mode == 'max' or mode == 'maximum': r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] if mode == 'min' or mode == 'minimum': r1 = ma.min(ym) r2 = xm[ma.argmin(ym)] if mode == 'minormax': r1p = ma.max(ym) r1n = ma.min(ym) if ma.abs(r1p) > ma.abs(r1n): r1 = r1p r2 = xm[ma.argmax(ym)] else: r1 = r1n r2 = xm[ma.argmin(ym)] if mode == 'median': r1 = ma.median(ym) r2 = 0 if mode == 'p2p': # peak to peak r1 = ma.ptp(ym) r2 = 0 if mode == 'std': # standard deviation r1 = ma.std(ym) r2 = 0 if mode == 'var': # variance r1 = ma.var(ym) r2 = 0 if mode == 'cumsum': # cumulative sum r1 = ma.cumsum(ym) # Note: returns an array r2 = 0 if mode == 'anom': # anomalies = difference from averge r1 = ma.anom(ym) # returns an array r2 = 0 if mode == 'sum': r1 = ma.sum(ym) r2 = 0 if mode == 'area' or mode == 'charge': r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm)) r2 = 0 if mode == 'latency': # return first point that is > threshold sm = ma.nonzero(ym > thresh) r1 = -1 # use this to indicate no event detected r2 = 0 if ma.count(sm) > 0: r1 = sm[0][0] r2 = len(sm[0]) if mode == '1090': #measure 10-90% time, also returns max r1 = ma.max(ym) r2 = xm[ma.argmax(ym)] y10 = 0.1*r1 y90 = 0.9*r1 sm1 = ma.nonzero(ym >= y10) sm9 = ma.nonzero(ym >= y90) r1 = xm[sm9] - xm[sm1] if mode == 'count': r1 = ma.count(ym) r2 = 0 if mode == 'maxslope': slope = [] win = ma.flatnotmasked_contiguous(ym) dt = x[1]-x[0] st = int(slopewin/dt) # use slopewin duration window for fit. print('st: ', st) for k, w in enumerate(win): # move through the slope measurementwindow tb = range(k-st, k+st) # get tb array ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures slope.append(ppars[0]) # keep track of max slope r1 = np.max(slope) r2 = np.argmax(slope) return(r1, r2)
print len(shot_dfs) #for shot_df in shot_dfs: # shot_df.plot(x="time") # plt.show() #Now we calculate the displacement on each axis for each shot #Assume the initial velocity is 0, then s = 0.5a(dt)^2 x_displacements = [] y_displacements = [] z_displacements = [] for shot_df in shot_dfs: ts = shot_df.time.values dts = [j-i for i, j in zip(ts[:-1], ts[1:])] i_s = range(len(dts)-1) x_s = cumsum([0.5*shot_df.x.values[i]*dts[i]*dts[i] for i in i_s]) y_s = cumsum([0.5*shot_df.y.values[i]*dts[i]*dts[i] for i in i_s]) z_s = cumsum([0.5*shot_df.z.values[i]*dts[i]*dts[i] for i in i_s]) x_displacements.append([i_s, x_s]) y_displacements.append([i_s, y_s]) z_displacements.append([i_s, z_s]) for i_s, x_s in x_displacements: plt.scatter(i_s, x_s, c='Red', label='x', alpha=0.5) for i_s, y_s in y_displacements: plt.scatter(i_s, y_s, c='Blue', label='y', alpha=0.5) for i_s, z_s in z_displacements: plt.scatter(i_s, z_s, c='Green', label='z', alpha=0.5)