コード例 #1
0
ファイル: geostrophy.py プロジェクト: regeirk/klib
 def metergrid(self, lon, lat, unit='m'):
     """Converts zonal and meridional coordinates from degrees 
     latitude and longitude to another reference unit.
     
     PARAMETERS
         lon, lat (array like) :
             Longitude and latitude as bi-dimensional gridded 
             arrays.
         unit (string, optional) :
             Unit to which the coordinates will be converted.
     
     RETURNS
         x, y (array like) :
             New coordinates
     
     """
     if lon.shape != lat.shape:
         raise Warning, ('Longitude and latitude grid dimensions do not'
             ' match.')
     b, a = lon.shape
     
     x = gsw.distance(lon, lat)
     x = concatenate([zeros((b, 1)), x.cumsum(axis=1)], axis=1)
     
     y = gsw.distance(lon.transpose(), lat.transpose()).transpose()
     y = (concatenate([zeros((1, a)), y.cumsum(axis=0)], axis=0) -
         0.5 * y.sum(axis=0))
     
     return x, y
コード例 #2
0
def size_bin(x0, x1, x2, y0, y1, y2):

    x = gsw.distance([x2, x0], [y1, y1], [0, 0]) / 2.
    y = gsw.distance([x1, x1], [y2, y0], [0, 0]) / 2.
    #if y0>86:
    #  print(x,y)
    area = x * y
    return area
コード例 #3
0
def checkscalearea(checkarea,lon,lat,ellipsex,ellipsey,contourx='',contoury=''):
    if checkarea==False:
        ellipsarea=None
        contarea=None
        areachecker=None
        
    elif checkarea==None or type(checkarea)==dict:
        if checkarea==None:
            checkarea={'mesoscale':2*np.pi}
            
        ellipsarea=gs.distance([[ellipsex.max()],[ellipsex.min()]],[[ellipsey.mean()],[ellipsey.mean()]],axis=0)[0][0]*\
                   gs.distance([[ellipsex.mean()],[ellipsex.mean()]],[[ellipsey.max()],[ellipsey.min()]],axis=0)[0][0]
        if contourx!='' or contoury!='':
            contarea=gs.distance([[contourx.max()],[contourx.min()]],[[contoury.mean()],[contoury.mean()]],axis=0)[0][0]*\
                   gs.distance([[contourx.mean()],[contourx.mean()]],[[contoury.max()],[contoury.min()]],axis=0)[0][0]
        else:
            contarea=None
        if len(checkarea.keys())==1:
            if 'mesoscale' in checkarea.keys():
                areachecker=(checkarea['mesoscale']*(rossbyR(np.mean(lon),np.mean(lat))))**2
            elif 'field' in checkarea.keys():
                try:
                    path=os.path.expanduser(os.path.dirname(os.path.realpath(__file__)))
                    area_file=xarray.open_mfdataset(path+checkarea['mesoscale']['path'])
                    areachecker = (RrD_file.RrD.sel(lon=[lon],lat=[lat],method='nearest').values) * checkarea['mesoscale']['factor']
                except:
                    areachecker=False
            elif 'constant' in checkarea.keys():
                if checkarea['constant']==np.inf or checkarea['constant']==None:
                    areachecker=None
                else:
                    areachecker = checkarea['constant']
            else:
                raise Exception('The Area Check dictionary should have one option: mesoscale, field or constant.')
        else:
            raise Exception('The Area Check dictionary should have only one option: mesoscale, field or constant.')
        
        if areachecker == None:
            areastatus={'status':True,'check':areachecker,'ellipse':ellipsarea,'contour':contarea}
        elif areachecker == False:
            areastatus={'status':False,'check':None,'ellipse':None,'contour':None}
        elif ellipsarea <= areachecker and contarea != None:
            if contarea <= areachecker:
                areastatus={'status':True,'check':areachecker,'ellipse':ellipsarea,'contour':contarea}
            else:
                areastatus={'status':False,'check':None,'ellipse':None,'contour':None}
        elif ellipsarea <= areachecker and contarea == None:
            areastatus={'status':True,'check':areachecker,'ellipse':ellipsarea,'contour':None}
        else:
            areastatus={'status':False,'check':None,'ellipse':None,'contour':None}
    else:
        raise Exception('Unexpected dictionary format. Check the Area Check documentation.')
    return areastatus
コード例 #4
0
 def equations(guess):
     x, y, r = guess
     if len(sndlon) > 2:
         return (
             gsw.distance([x, x1], [y, y1], p=0)[0] - (dist_1 - r),
             gsw.distance([x, x2], [y, y2], p=0)[0] - (dist_2 - r),
             gsw.distance([x, x3], [y, y3], p=0)[0] - (dist_3 - r),
         )
     else:
         return (
             gsw.distance([x, x1], [y, y1], p=0)[0] - (dist_1 - r),
             gsw.distance([x, x2], [y, y2], p=0)[0] - (dist_2 - r),
         )
    def determine_velocity(self, a_max, a_min, afnames, location, lat, lon,
                           exclusions):

        # rearrange a_min eliminating exclusions

        for i in range(0, len(a_min)):
            name = afnames[i]
            try:
                min_list = exclusions[name]
            except:
                continue
            a_min[i] = np.delete(a_min[i], min_list, 0)

            # calculate distances
        distances = gsw.distance(lon, lat, 0)
        print("distances", distances, end=' ')
        print("names", afnames)

        # calculate velocities u=D/t  i is the location  the length of each a_min is the number of peaks
        dt = [[] for i in range(len(a_min))]
        v = [[] for i in range(len(a_min))]
        for i in range(0, len(a_min) - 1):  # iterate on places
            name = afnames[i]
            for j in range(0, len(
                    a_min[0])):  # iterate on min points at each place
                print(i, ':', j)
                dt[i].append(
                    (a_min[i + 1][j][0] - a_min[i][j][0]) * 3600 * 24)  # sec
                print("a_min[%d ] (%f)- a_min[%d ] (%f) = %f" %
                      (i + 1, a_min[i + 1][j][0], i, a_min[i][j][0],
                       (a_min[i + 1][j][0] - a_min[i][j][0]) * 3600 * 24))
        for i in range(0, len(a_min) - 1):  # iterate on places
            for j in range(0, len(
                    a_min[0])):  # iterate on min points at each place
                v[i].append(distances[0][i] / dt[i][j])
                print(
                    "velocity at %s to %s (dist = %f)  event[%d] = %f [m/s]" %
                    (afnames[i], afnames[i + 1], distances[0][i], j, v[i][j]))

        print(
            "------------------------------------------------------------------------------"
        )

        for j in range(len(a_min[0])):
            if j == 0:
                for i in range(0, len(a_min) - 1):  # iterate on places
                    print('%12s | ' % afnames[i], end=' ')
            print()
            print(
                "-----------------------------------------------------------------------------"
            )
            for i in range(0, len(a_min) - 1):  # iterate on places
                print("%12.4f | " % (v[i][j]), end=' ')

        # end for
        print()
        print(
            "-----------------------------------------------------------------------------"
        )
コード例 #6
0
ファイル: plotting.py プロジェクト: imclab/python-ctd
def gen_topomask(h, lon, lat, dx=1., kind='linear', plot=False):
    """
    Generates a topography mask from an oceanographic transect taking the
    deepest CTD scan as the depth of each station.

    Inputs
    ------
    h : array
        Pressure of the deepest CTD scan for each station [dbar].
    lons : array
           Longitude of each station [decimal degrees east].
    lat : Latitude of each station. [decimal degrees north].
    dx : float
         Horizontal resolution of the output arrays [km].
    kind : string, optional
           Type of the interpolation to be performed.
           See scipy.interpolate.interp1d documentation for details.
    plot : bool
           Whether to plot mask for visualization.

    Outputs
    -------
    xm : array
         Horizontal distances [km].
    hm : array
         Local depth [m].

    Examples
    --------
    >>> import gsw
    >>> import df  # FIXME: Add a dataset.
    >>> h = df.get_maxdepth()
    >>> # TODO: method to output distance.
    >>> x = np.append(0, np.cumsum(gsw.distance(df.lon, df.lat)[0] / 1e3))
    >>> xm, hm = gen_topomask(h, df.lon, df.lat, dx=1., kind='linear')
    >>> fig, ax = plt.subplots()
    >>> ax.plot(xm, hm, 'k', linewidth=1.5)
    >>> ax.plot(x, h, 'ro')
    >>> ax.set_xlabel('Distance [km]')
    >>> ax.set_ylabel('Depth [m]')
    >>> ax.grid(True)
    >>> plt.show()

    Author
    ------
    André Palóczy Filho ([email protected]) --  October/2012
    """

    h, lon, lat = map(np.asanyarray, (h, lon, lat))
    # Distance in km.
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    h = -gsw.z_from_p(h, lat.mean())
    Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
    xm = np.arange(0, x.max() + dx, dx)
    hm = Ih(xm)

    return xm, hm
コード例 #7
0
def ll_to_xy(lon, lat):
    """Convert from longitude and latitude coordinates to Euclidian coordinates
    x, y. Only valid if the coordinates are closely spaced, i.e. dx, dy << the
    Earth's radius."""
    lllon = lon.min()
    lllat = lat.min()
    urlon = lon.max()
    urlat = lat.max()

    dlon = urlon - lllon
    dlat = urlat - lllat
    dx_ = np.squeeze(gsw.distance([lllon, urlon], [lllat, lllat]))  # dx of box
    dy_ = np.squeeze(gsw.distance([lllon, lllon], [lllat, urlat]))  # dy of box

    x = dx_ * (lon - lllon) / dlon
    y = dy_ * (lat - lllat) / dlat

    return x, y
コード例 #8
0
ファイル: plotting.py プロジェクト: mankoff/python-ctd
def gen_topomask(h, lon, lat, dx=1., kind='linear', plot=False):
    """
    Generates a topography mask from an oceanographic transect taking the
    deepest CTD scan as the depth of each station.

    Inputs
    ------
    h : array
        Pressure of the deepest CTD scan for each station [dbar].
    lons : array
           Longitude of each station [decimal degrees east].
    lat : Latitude of each station. [decimal degrees north].
    dx : float
         Horizontal resolution of the output arrays [km].
    kind : string, optional
           Type of the interpolation to be performed.
           See scipy.interpolate.interp1d documentation for details.
    plot : bool
           Whether to plot mask for visualization.

    Outputs
    -------
    xm : array
         Horizontal distances [km].
    hm : array
         Local depth [m].

    Examples
    --------
    >>> import gsw
    >>> import df  # FIXME: Add a dataset.
    >>> h = df.get_maxdepth()
    >>> # TODO: method to output distance.
    >>> x = np.append(0, np.cumsum(gsw.distance(df.lon, df.lat)[0] / 1e3))
    >>> xm, hm = gen_topomask(h, df.lon, df.lat, dx=1., kind='linear')
    >>> fig, ax = plt.subplots()
    >>> ax.plot(xm, hm, 'k', linewidth=1.5)
    >>> ax.plot(x, h, 'ro')
    >>> ax.set_xlabel('Distance [km]')
    >>> ax.set_ylabel('Depth [m]')
    >>> ax.grid(True)
    >>> plt.show()

    Author
    ------
    André Palóczy Filho ([email protected]) --  October/2012
    """

    h, lon, lat = map(np.asanyarray, (h, lon, lat))
    # Distance in km.
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    h = -gsw.z_from_p(h, lat.mean())
    Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
    xm = np.arange(0, x.max() + dx, dx)
    hm = Ih(xm)

    return xm, hm
コード例 #9
0
 def delta_area(self, bottom_vel):
     # Compute perpendicular vectors.
     x_norm, y_norm, x_perp, y_perp, x_perp_all, y_perp_all = self.vertor_perp(
     )
     # Depth at each section of the transect.
     delta_z = abs(self.depth_profiles(bottom_vel=bottom_vel))
     # Distance between lon,lat points of transect.
     delta_x = gsw.distance(x_perp_all, y_perp_all)
     return delta_z * delta_x
コード例 #10
0
def compute_area(lon, lat):
    # Lon and lat are 2D matrices !!!
    # Distance is computed in meters !!!
    dy = [
        gsw.distance(lon[:, i], lat[:, i]).squeeze()
        for i in np.arange(lon.shape[1])
    ]
    dy = np.asarray(dy).T
    dx = [
        gsw.distance(lon[i, :], lat[i, :]).squeeze()
        for i in np.arange(lon.shape[0])
    ]
    dx = np.asarray(dx)
    # Area is average between areas computed using lon distance from
    # top or bottom lat as reference
    areatop = dx[1:, :] * dy[:, :-1]
    areabottom = dx[:-1, :] * dy[:, :-1]
    area = (areatop + areabottom) / 2.
    return area
コード例 #11
0
def testdist(n):
    distance = np.arange(0, 200, n)
    tac = tcoasts.TransportAlongCoast(folder, [-89.75, 21.3], contourfile,
                                      distance)
    locations = tac.perploc()
    x_norm, y_norm, x_perp, y_perp, x_perp_all, y_perp_all = tac.vertor_perp()
    dist = np.zeros(len(locations))
    for ii in range(len(dist)):
        dist[ii] = gsw.distance([x_perp_all[ii][0], x_perp_all[ii][-1]],
                                [y_perp_all[ii][0], y_perp_all[ii][-1]])
    return dist, tac.length * 1e3
コード例 #12
0
 def perpvecdist(self, index_perp, perp_angle):
     #compute distances to scale perpendicular vectors.
     ### Note this will produce an error of 1e-4.
     x = np.array([[
         self.coastline[index_perp][ii, 0],
         np.cos(perp_angle[ii]) + self.coastline[index_perp][ii, 0]
     ] for ii in range(len(index_perp))])
     y = np.array([[
         self.coastline[index_perp][ii, 1],
         np.sin(perp_angle[ii]) + self.coastline[index_perp][ii, 1]
     ] for ii in range(len(index_perp))])
     distances = gsw.distance(x, y)
     return distances
コード例 #13
0
def get_distance_over_ground(ds):
    good = ~np.isnan(ds.latitude + ds.longitude)
    dist = gsw.distance(ds.longitude[good].values,
                        ds.latitude[good].values) / 1000
    dist = np.roll(np.append(dist, 0), 1)
    dist = np.cumsum(dist)
    attr = {
        'long_name': 'distance over ground flown since mission start',
        'method': 'get_distance_over_ground',
        'units': 'km',
        'sources': 'latitude longitude'
    }
    ds['distance_over_ground'] = (('time'), dist, attr)
    return ds
コード例 #14
0
    def calculate_offset(self, reflon, reflat):
        """Calculate distance to reference location.

        Parameters
        ----------
        reflon : float
            Reference longitude
        reflat : float
            Reference latitude
        """

        self.offset = gsw.distance(
            np.array([self.lon, reflon]),
            np.array([self.lat, reflat]),
            p=0,
        )[0]
コード例 #15
0
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
    """
    Generates a topography mask from an oceanographic transect taking the
    deepest CTD scan as the depth of each station.

    Inputs
    ------
    h : array
        Pressure of the deepest CTD scan for each station [dbar].
    lons : array
           Longitude of each station [decimal degrees east].
    lat : Latitude of each station. [decimal degrees north].
    dx : float
         Horizontal resolution of the output arrays [km].
    kind : string, optional
           Type of the interpolation to be performed.
           See scipy.interpolate.interp1d documentation for details.
    plot : bool
           Whether to plot mask for visualization.

    Outputs
    -------
    xm : array
         Horizontal distances [km].
    hm : array
         Local depth [m].

    Author
    ------
    André Palóczy Filho ([email protected]) --  October/2012

    """

    import gsw

    from scipy.interpolate import interp1d

    h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
    # Distance in km.
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    h = -gsw.z_from_p(h, lat.mean())
    Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
    xm = np.arange(0, x.max() + dx, dx)
    hm = Ih(xm)

    return xm, hm
コード例 #16
0
ファイル: extras.py プロジェクト: pyoceans/python-ctd
def gen_topomask(h, lon, lat, dx=1.0, kind="linear", plot=False):
    """
    Generates a topography mask from an oceanographic transect taking the
    deepest CTD scan as the depth of each station.

    Inputs
    ------
    h : array
        Pressure of the deepest CTD scan for each station [dbar].
    lons : array
           Longitude of each station [decimal degrees east].
    lat : Latitude of each station. [decimal degrees north].
    dx : float
         Horizontal resolution of the output arrays [km].
    kind : string, optional
           Type of the interpolation to be performed.
           See scipy.interpolate.interp1d documentation for details.
    plot : bool
           Whether to plot mask for visualization.

    Outputs
    -------
    xm : array
         Horizontal distances [km].
    hm : array
         Local depth [m].

    Author
    ------
    André Palóczy Filho ([email protected]) --  October/2012

    """

    import gsw
    from scipy.interpolate import interp1d

    h, lon, lat = list(map(np.asanyarray, (h, lon, lat)))
    # Distance in km.
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    h = -gsw.z_from_p(h, lat.mean())
    Ih = interp1d(x, h, kind=kind, bounds_error=False, fill_value=h[-1])
    xm = np.arange(0, x.max() + dx, dx)
    hm = Ih(xm)

    return xm, hm
コード例 #17
0
def mnear(x, y, x0, y0):
    """
	USAGE
	-----
	xmin,ymin = mnear(x, y, x0, y0)

	Finds the the point in a (lons,lats) line
	that is closest to a specified (lon0,lat0) point.
	"""
    x, y, x0, y0 = map(np.asanyarray, (x, y, x0, y0))
    point = (x0, y0)

    d = np.array([])
    for n in range(x.size):
        xn, yn = x[n], y[n]
        dn = distance((xn, x0), (yn, y0))  # Calculate distance point-wise.
        d = np.append(d, dn)

    idx = d.argmin()

    return x[idx], y[idx]
コード例 #18
0
def mnear(x, y, x0, y0):
	"""
	USAGE
	-----
	xmin,ymin = mnear(x, y, x0, y0)

	Finds the the point in a (lons,lats) line
	that is closest to a specified (lon0,lat0) point.
	"""
	x,y,x0,y0 = map(np.asanyarray, (x,y,x0,y0))
	point = (x0,y0)

	d = np.array([])
	for n in xrange(x.size):
		xn,yn = x[n],y[n]
		dn = distance((xn,x0),(yn,y0)) # Calculate distance point-wise.
		d = np.append(d,dn)

	idx = d.argmin()

	return x[idx],y[idx]
コード例 #19
0
 def compare_trilateration_solutions(self):
     lon = [r.lon for r in self.reserr]
     lat = [r.lat for r in self.reserr]
     lonmin = np.min(lon)
     lonmax = np.max(lon)
     latmin = np.min(lat)
     latmax = np.max(lat)
     if self.result.lon > lonmax or self.result.lon < lonmin:
         print("3-point solution longitude outside of 2-point solutions")
         outside = True
     elif self.result.lat > latmax or self.result.lat < latmin:
         print("3-point solution latitude outside of 2-point solutions")
         outside = True
     else:
         outside = False
     if outside:
         self.result_3point = self.result
         dist = gsw.distance(lon, lat, p=0)
         result = TrilaterationResult(lon=np.mean(lon),
                                      lat=np.mean(lat),
                                      error=np.mean(dist))
         result.calculate_offset(self.plan_lon, self.plan_lat)
         self.result = result
コード例 #20
0
ファイル: glider.py プロジェクト: serazing/sitt
def compute_thermodynamics(prof):
	"""
	Perform a pre-processing on the glider data

	"""
	# 1) Make a nice time record for the profile
	max_depth = prof['P'].max(dim='NT')
	bottom = prof['P'].argmax(dim='NT')
	record = prof['time_since_start_of_dive']
	deltat_bottom = record.data[bottom].astype('f8')
	deltat_total = record.data[-1].astype('f8')
	alpha = deltat_bottom / deltat_total
	t_start = prof.GPS_time[0].data
	t_stop = prof.GPS_time[1].data
	t_bottom = t_start + pd.to_timedelta(
		alpha * (t_stop - t_start).astype('f8'))
	time_profile = t_bottom + pd.to_timedelta(
		prof['time_since_start_of_dive'] - deltat_bottom, unit='s')
	prof = prof.rename({'NT': 'time'}).assign_coords(
		time=('time', time_profile))

	# 2) Get the coordinates of the profile
	lat_start = prof.GPS_latitude[0].data
	lat_stop = prof.GPS_latitude[1].data
	lat_bottom = lat_start + alpha * (lat_stop - lat_start)
	lon_start = prof.GPS_longitude[0].data
	lon_stop = prof.GPS_longitude[1].data
	lon_bottom = lon_start + alpha * (lon_stop - lon_start)
	distance_start_to_bottom = 1e-3 * gsw.distance([lon_start, lon_bottom],
												  [lat_start, lat_bottom],
												  p=[0, max_depth]).squeeze()
	distance_bottom_to_stop = 1e-3 * gsw.distance([lon_bottom, lon_stop],
												  [lat_bottom, lat_stop],
												  p=[max_depth, 0]).squeeze()
	# 3) Clean up unvalid data
	niceT = prof['T']
	niceS = prof['S']
	# Do not forget to correct the offset due to surface pressure
	niceP = (prof['P'] - prof['Psurf'])
	niceDive = prof['dive']

	# 4) Compute thermodynamic quantities from GSW toolbox
	# - Absolute Salinity
	SA = gsw.SA_from_SP(niceS, niceP, lat_start, lon_start)
	# - Conservative Temperature
	CT = gsw.CT_from_t(SA, niceT, niceP)
	# - In situ density
	rho = gsw.rho(SA, CT, niceP)
	# - Potential density referenced to surface pressure
	sigma0 = gsw.sigma0(SA, CT)
	# - Buoyancy
	b = 9.81 * (1 - rho / 1025)
	N2 = xr.DataArray(gsw.Nsquared(SA, CT, niceP)[0], name='Buoyancy frequency',
					  dims='time',
					  coords = {'time': ('time',
										 prof['time'].isel(time=slice(1, None)))
								}
					  )
	# - Depth
	depth = - gsw.z_from_p(niceP, lat_start)

	# 5) Split the dive into one descending and one ascending path
	bottom = niceP.argmax(dim='time')
	ones = xr.ones_like(niceP)
	newdive = xr.concat([2 * niceDive[:bottom] - 1, 2 * niceDive[bottom:]],
						dim='time')
	lat = xr.concat([0.5 * (lat_start + lat_bottom) * ones[:bottom],
					 0.5 * (lat_stop + lat_bottom) * ones[bottom:]], dim='time')
	lon = xr.concat([0.5 * (lon_start + lon_bottom) * ones[:bottom],
					 0.5 * (lon_stop + lon_bottom) * ones[bottom:]], dim='time')
	Pmax = xr.concat([max_depth * ones[:bottom],
					  max_depth * ones[bottom:]], dim='time')

	distance = xr.concat([distance_start_to_bottom * ones[:bottom],
						  distance_bottom_to_stop * ones[bottom:]], dim='time')
	distance.name = 'distance between profiles in km'

	return xr.Dataset({'Temperature': niceT,
					   'Salinity': niceS,
					   'Pressure': niceP,
					   'Rho': ('time', rho),
					   'CT': ('time', CT),
					   'SA': ('time', SA),
					   'Sigma0': ('time', sigma0),
					   'b': ('time', b),
					   'Pmax': ('time', Pmax),
					   'N2': N2},
					  coords={'profile': ('time', newdive.data),
							  'depth': ('time', depth),
							  'lat': ('time', lat), 'lon': ('time', lon),
							  'distance': distance})
コード例 #21
0
    def spectra(self, varname='analysed_sst', lonname='lon', latname='lat', maskname='mask', lonrange=(154.9,171.7), latrange=(30,45.4), nbins=32, MAX_LAND=0.01):
        """Calculate a two-dimensional power spectrum of netcdf variable 'varname'
            in the box defined by lonrange and latrange.
        """
        tlon = self.nc[lonname].sel(lon=slice(lonrange[0],
                                          lonrange[1])).values
        tlat = self.nc[latname].sel(lat=slice(latrange[0],
                                          latrange[1])).values
        tlon[tlon<0.] += 360.

        # step 1: figure out the box indices
        #lon, lat = np.meshgrid(tlon, tlat)
        Nx = len(tlon)
        Ny = len(tlat)
        #########
        # derive dx, dy using the gsw package (takes a long time)
        #########
        dx = gsw.distance([tlon[Nx/2],tlon[Nx/2+1]], [tlat[Ny/2],tlat[Ny/2]])[0]
        dy = gsw.distance([tlon[Nx/2],tlon[Nx/2]], [tlat[Ny/2],tlat[Ny/2+1]])[0]
        #########
        # derive dx, dy just at the center point
        #########
        # a = gsw.earth_radius
        # dx = a * np.cos(np.pi/180.*tlat[Ny/2]) * np.pi/180.*np.diff(tlon)[Nx/2]
        # dy = a * np.pi/180.*np.diff(tlat)[Ny/2]

        # step 2: load the data
        T = self.nc[varname].sel(lon=slice(lonrange[0], lonrange[1]), 
                                                          lat=slice(latrange[0], latrange[1])).values[0]

        # step 3: figure out if there is too much land in the box
        #MAX_LAND = 0.01 # only allow up to 1% of land
        #mask_domain = mask.roll( nlon=roll )[jmin_bound:jmax_bound+100, imin_bound:imax_bound+100]
        region_mask = self.nc[maskname].sel(lon=slice(lonrange[0], lonrange[1]), 
                                                          lat=slice(latrange[0], latrange[1])).values - 1.
        land_fraction = region_mask.sum().astype('f8') / (Ny*Nx)
        if land_fraction == 0.:
            # no problem
            pass
        elif land_fraction <= MAX_LAND:
            crit = 'false'
            errstr = 'The sector has too much land. land_fraction = ' + str(land_fraction)
            warnings.warn(errstr)
            #raise ValueError('The sector has too much land. land_fraction = ' + str(land_fraction))
        else:    
            # do some interpolation
            errstr = 'The sector has land (land_fraction=%g) but we are interpolating it out.' % land_fraction
            warnings.warn(errstr)
        
        # step 4: figure out FFT parameters (k, l, etc.) and set up result variable
        #dlon = lon[np.round(np.floor(lon.shape[0]*0.5)), np.round(
        #             np.floor(lon.shape[1]*0.5))+1]-lon[np.round(
        #             np.floor(lon.shape[0]*0.5)), np.round(np.floor(lon.shape[1]*0.5))]
        #dlat = lat[np.round(np.floor(lat.shape[0]*0.5))+1, np.round(
        #             np.floor(lat.shape[1]*0.5))]-lat[np.round(
        #             np.floor(lat.shape[0]*0.5)), np.round(np.floor(lat.shape[1]*0.5))]

        # Spatial step
        #dx = gfd.A*np.cos(np.radians(lat[np.round(
        #             np.floor(lat.shape[0]*0.5)),np.round(
        #             np.floor(lat.shape[1]*0.5))]))*np.radians(dlon)
        #dy = gfd.A*np.radians(dlat)
        
        # Wavenumber step
        #dx_domain = dx[jmin:jmax,imin:imax].copy()
        #dy_domain = dy[jmin:jmax,imin:imax].copy()
        #dk = np.diff(k)[0]*.5/np.pi
        #dl = np.diff(l)[0]*.5/np.pi
        k = fft.fftshift(fft.fftfreq(Nx, dx))
        l = fft.fftshift(fft.fftfreq(Ny, dy))
        dk = np.diff(k)[0]
        dl = np.diff(l)[0]

        ################################
        ###  MUR data is given daily individually ###
        ################################
        #Nt = T.shape[0]
        #Decor_lag = 13
        #tilde2_sum = np.zeros((Ny, Nx))
        #Ti2_sum = np.zeros((Ny, Nx))
        #Days = np.arange(0,Nt,Decor_lag)
        #Neff = len(Days)
        #for n in Days:
        Ti = np.ma.masked_array(T.copy(), region_mask)
            
        # step 5: interpolate the missing data (only if necessary)
        if land_fraction>0. and land_fraction<MAX_LAND:
            Ti = interpolate_2d(Ti)
        elif land_fraction==0.:
            # no problem
            pass
        else:
            sys.exit(0)
        
        # step 6: detrend and window the data in two dimensions (least squares plane fit)
        Ti = self._detrend_and_window_2d(Ti)   
        Ti2 = Ti**2

        # step 8: do the FFT for each timestep and aggregate the results
        Tif = fft.fftshift(fft.fft2(Ti))    # [u^2] (u: unit)
        tilde2 = np.real(Tif*np.conj(Tif))

        # step 9: check whether the Plancherel theorem is satisfied
        breve2 = tilde2/((Nx*Ny)**2*dk*dl)
        if land_fraction == 1.:
            #np.testing.assert_almost_equal(breve2_ave.sum()/(dx_domain[Ny/2,Nx/2]*dy_domain[Ny/2,Nx/2]*(spac2_ave).sum()), 1., decimal=5)
            np.testing.assert_almost_equal( breve2.sum() / ( dx * dy * Ti2.sum() ), 1., decimal=5)
            
        # step 10: derive the isotropic spectrum
        kk, ll = np.meshgrid(k, l)
        K = np.sqrt( kk**2 + ll**2 )
        #Ki = np.linspace(0, k.max(), nbins)
        if k.max() > l.max():
            Ki = np.linspace(0, l.max(), nbins)
        else:
            Ki = np.linspace(0, k.max(), nbins)
        #Ki = np.linspace(0, K.max(), nbins)
        deltaKi = np.diff(Ki)[0]
        Kidx = np.digitize(K.ravel(), Ki)
        invalid = Kidx[-1]
        area = np.bincount(Kidx)
        iso_wv = np.ma.masked_invalid(
                                           np.bincount(Kidx, weights=K.ravel()) / area )
        isotropic_PSD = np.ma.masked_invalid(
                                           np.bincount(Kidx, weights=breve2.ravel()) / area ) * iso_wv
        
        # Usage of digitize
        #>>> x = np.array([-0.2, 6.4, 3.0, 1.6, 20.])
        #>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
        #>>> inds = np.digitize(x, bins)
        #array([0, 4, 3, 2, 5])
        
        # Usage of bincount 
        #>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
        #array([1, 3, 1, 1, 0, 0, 0, 1])
        # With the option weight
        #>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
        #>>> x = np.array([0, 1, 1, 2, 2, 2])
        #>>> np.bincount(x,  weights=w)
        #array([ 0.3,  0.7,  1.1])  <- [0.3, 0.5+0.2, 0.7+1.-0.6]
        
        # step 10: return the results
        return Nx, Ny, k, l, Ti2, tilde2, breve2, iso_wv, isotropic_PSD[:], area[1:-1]
コード例 #22
0
ファイル: ray_tracer.py プロジェクト: ouc-cook/Ray_Tracing
    def run(self,
            tstep=30,
            duration=5,
            lonpad=1.5,
            latpad=1.5,
            tpad=7,
            direction='forward',
            bottom=3000,
            rho0=1030,
            clearance=.5,
            shear=-.001,
            fname='ray_trace.csv',
            strain=True,
            stops=True,
            vertspeed=True,
            time_constant=False,
            save_data=False,
            progress_bar=False):
        """
        INSTRUCTIONS:
        TSTEP: TIMESTEP IN SECONDS (DEFAULT 30 SECONDS)
        DURATION: DURATION (IN DAYS) - DEFAULT 5
        IGNORE LONPAD AND LATPAD (DIDNT CHANGE FROM OLDER VERSION)
        DIRECTION: "forward" and "reverse"  (SETS INTEGRATION TIME DIRECTION)
        BOTTOM:  can set default bottom instead of using bathymetry file
        
        Setup for midpoint method integration:
        1. Get field values 
        2. Get Cg @ t_n, X_n
        3. Get field values at t_n+dt/2, X_n + (dt/2)(Cg_n)
        4. Calculate Cg  @(t_n+dt/2, X_n + (dt/2)(Cg_n))
        5. X_(n+1) = X_n +  [dt * Cg  @(t_n+dt/2, X_n + (dt/2)(Cg_n))]
        
        """

        if direction == 'forward':
            # convert duration in hours to seconds
            T = np.arange(0, duration * 60 * 60, tstep)
        else:
            T = np.arange(0, -duration * 60 * 60, -tstep)
            tstep = -tstep

        Xall = []
        Kall = []
        amplitudes = []
        energy = []

        # names of all the columns in results frame
        names = ('Lon', 'Lat', 'depth', 'distance', 'bottom_depth', 'k', 'l',
                 'm', 'omega', 'N2', 'U', 'V', 'dudx', 'dvdx', 'dndx', 'dudy',
                 'dvdy', 'dndy', 'dudz', 'dvdz', 'dndz', 'cgx', 'cgy', 'cgz',
                 'x', 'y', 'z', 'u0', 'v0', 'w0', 'u', 'v', 'w', 'b', 'energy',
                 'u_momentum', 'v_momentum', 'horiz_momentum', 'time')
        cg = []
        steps = []
        localfield = []
        X = self.X0[:]
        lon0 = X[0]
        lat0 = X[1]
        K = self.K0[:]
        t0 = self.t0
        allbottom = []
        if progress_bar:
            pbar = FloatProgress(min=0, max=T.shape[0])
            pbar.value
            display(pbar)

        if not hasattr(self.F, 'dudx'):
            lonlim, latlim, tlim = self.F.createFuncs(X, lonpad, latpad, tpad)

        for ii, t1 in enumerate(T):
            # Get field values
            if progress_bar:
                pbar.value = float(ii)
            t = t0 + t1 / (24 * 60 * 60)
            if X[2] > 6000:
                zi1 = 2499
            else:
                zi1 = X[2]

            f = gsw.f(X[1])
            if time_constant:
                t = np.copy(t0)
            xi = (X[0], X[1], zi1, t)
            field = self.F.getfield(xi)

            f = gsw.f(X[1])

            # Step 1
            dy1 = self._cgy(field[0], K[3], K, field[2], f) * tstep / 2
            dz1 = self._cgz(field[0], K[3], K, f) * tstep / 2
            dx1 = self._cgx(field[0], K[3], K, field[1], f) * tstep / 2

            # midpoint position

            lon2, lat2 = inverse_hav(dx1, dy1, X[0], X[1])
            if X[2] + dz1 > 6000:
                zi = 2499
            else:
                zi = X[2] + dz1
            xi2 = (lon2, lat2, zi, t + tstep / (24 * 60 * 60 * 2))
            if time_constant:
                xi2 = (lon2, lat2, zi, t)
            field1 = self.F.getfield(xi2)
            f2 = gsw.f(lat2)

            # Update Wave properties at midpoint (midpoint refraction)
            dK = self._dKdt(field1, K, xi, xi2, tstep / 2)

            if not np.all(np.isfinite(dK)):

                K1 = K[:]
            else:

                if strain:

                    K1 = [
                        K[0] + dK[0], K[1] + dK[1], K[2] + dK[2], K[3] + dK[3]
                    ]
                else:
                    K1 = [
                        K[0], K[1],
                        K[2] + (tstep / 2) * (-(shear) * (K[0] + K[1])),
                        K[3] + dK[3]
                    ]

            # Step2
            dx2 = self._cgx(field1[0], K1[3], K1, field1[1], f2) * tstep
            dy2 = self._cgy(field1[0], K1[3], K1, field1[2], f2) * tstep
            dz2 = self._cgz(field1[0], K1[3], K1, f2) * tstep

            lon3, lat3 = inverse_hav(dx2, dy2, X[0], X[1])

            lonr = np.expand_dims(np.array([lon0, lon3]), axis=1)
            latr = np.expand_dims(np.array([lat0, lat3]), axis=1)
            distance = gsw.distance(lonr, latr, axis=0)

            if X[2] + dz2 > 6000:
                zi = 2499

            bathypad = np.linspace(-.01, .01, num=5)
            loncheck = bathypad + X[0]
            latcheck = bathypad + X[1]
            loncheck, latcheck = np.meshgrid(loncheck, latcheck)
            tester = np.array([loncheck.flatten(), latcheck.flatten()])
            bottom = np.nanmax([-self.F.bathy((p1[0], p1[1])) \
                                    for p1 in tester.T])
            # bottom = -self.F.bathy((X[0], X[1]))
            X1 = [lon3, lat3, X[2] + dz2, distance, bottom]

            steps.append([dx2, dy2, -dz2])
            cg.append([dx2 / tstep, dy2 / tstep, -dz2 / tstep])
            localfield.append(field)
            Kall.append(K1)
            K = K1
            Xall.append(X1)
            X = X1

            dist_so_far = np.cumsum(steps, axis=0)
            # print(dK[3])
            # print(K[3]**2)
            k = np.copy(K1[0])
            l = np.copy(K1[1])
            m = np.copy(K1[2])
            omega = np.copy(K1[3])
            f = gsw.f(lat3)
            w0 = (self.p0 * (-m * omega) / (field[0] - omega**2))

            # Perturbation amplitudes
            u0 = (self.p0 * (k * omega + l * f * 1j) / (omega**2 - f**2))
            v0 = (self.p0 * (l * omega - k * f * 1j) / (omega**2 - f**2))
            b0 = (self.p0 * (-1j * m * field[0]) / (field[0] - omega**2))

            # total distance so far
            xx = np.copy(dist_so_far[ii, 0])
            yy = np.copy(dist_so_far[ii, 1])
            zz = np.copy(dist_so_far[ii, 2])
            phase = k * xx + l * yy \
                    + m * zz - omega * t1

            # INtegration Limits
            period = np.abs(2 * np.pi / omega)
            t11 = t1 - period / 2
            t22 = t1 + period / 2

            # mean value theorem to get average over one wave period
            u2 = .5 * np.real(w0)**2
            v2 = .5 * np.real(v0)**2
            w2 = .5 * np.real(w0)**2
            b2 = .5 * np.real(b0)**2

            u = (quad(self._planewave,
                      t11,
                      t22,
                      args=(u0, xx, yy, zz, k, l, m, omega))[0])
            v = (quad(self._planewave,
                      t11,
                      t22,
                      args=(v0, xx, yy, zz, k, l, m, omega))[0])
            w = (quad(self._planewave,
                      t11,
                      t22,
                      args=(w0, xx, yy, zz, k, l, m, omega))[0])

            b = (quad(self._planewave,
                      t11,
                      t22,
                      args=(b0, xx, yy, zz, k, l, m, omega))[0])

            amplitudes.append([u0, v0, w0, u, v, w, b])

            # Calculate U and V momentum
            Umom = rho0 * (u * w) / period
            Vmom = rho0 * (v * w) / period

            # Calculate momentum flux
            mFlux = np.sqrt(((u * w) / period)**2 + ((v * w) / period)**2)

            # b = -(field[0] /omega / 9.8) * rho0 * w0 * np.sin(phase)
            # Internal wave energy
            E = .5 * rho0 * (u2 + v2 + w2) \
                + .5 *rho0* b2 * np.sqrt(field[0])**-2
            # E =E/rho0

            energy.append([E, Umom, Vmom, mFlux])

            if stops:
                # check if vertical speed goes to zero
                if vertspeed:
                    if np.abs(dz2 / tstep) < 1e-4:
                        print(
                            'Vertical Group speed = zero {} meters from bottom'
                            .format(bottom - X[2]))
                        break
                    if np.abs(E) > 1000:
                        # this checks if energy has gone to some unrealistic asymptote like behavior
                        print('ENERGY ERROR')
                        break

                    if ii > 3:

                        if np.abs(E - energy[ii - 2][0]) > .8 * E:
                            print('Non Linear')
                            break

                # data Boundary checks
                if not self.lonlim[0] <= X[0] <= self.lonlim[1]:
                    print('lon out of bounds')
                    break

                if not self.latlim[0] <= X[1] <= self.latlim[1]:
                    print('lat out of bounds')
                    break

                if not self.tlim[0] <= t <= self.tlim[1]:
                    print('time out of bounds')
                    print(t)
                    print(self.tlim)
                    break

                #  Check if near the bottom or surface
                if X[2] + clearance * np.abs((2 * np.pi) / K1[2]) >= bottom:
                    print('Hit Bottom - {} meters from bottom'.format(bottom -
                                                                      X[2]))
                    break

                if X[2] <= 0:
                    print('Hit Surface')
                    break

                # Check if  frequency gets too high
                if K1[3]**2 >= self.F.N2(xi2):
                    print('frequency above Bouyancy frequency')
                    # print(K[3]**2)
                    # print(self.F.N2(xi2))
                    break

                if not np.isfinite(X1[0]):
                    print('X Update Error')

                    break

                if np.abs(u0) < 0.0001:
                    print('U amplitude zero')

                    break
                if np.abs(v0) < 0.0001:
                    print('v amplitude zero')

                    break
                if np.abs(w0) < 0.0001:
                    print('w amplitude zero')

                    break

                if not np.isfinite(dx1):
                    print('Field Error')

                    break

        # Save data in pandas data
        data = pd.DataFrame(np.concatenate(
            (np.real(np.stack(Xall)), np.real(np.stack(Kall)),
             np.real(np.stack(localfield)), np.real(
                 np.stack(cg)), np.real(np.stack(np.cumsum(steps, axis=0))),
             np.real(np.stack(amplitudes)), np.real(np.stack(energy)),
             np.real(np.expand_dims(T[:ii + 1], axis=1))),
            axis=1),
                            columns=names)

        if save_data:
            data.to_csv(fname)

        return data
コード例 #23
0
ファイル: plot_fluxes.py プロジェクト: austinctodd/USEast
uflx=np.empty((2923,N,len(shelf_i),))
vflx=np.empty((2923,N,len(shelf_i),))
uflx[:]=np.nan
vflx[:]=np.nan

#-------------------------------------------------------------------------------
# Loop through each point calculate the angle of the shelf break at each point
#-------------------------------------------------------------------------------
for i in range (0,len(shelf_i)):
  
    print 'Fluxes for point %03i' % i
    
    #------------------------------------------------------------------------
    # Read in velocity and sea level data
    #------------------------------------------------------------------------
    gsw.distance(lon[shelf_j[i],shelf_i[i])
    
    #------------------------------------------------------------------------
    # Calculate depths of each rho layer
    #------------------------------------------------------------------------
    z =np.zeros(shape=(2923,N  ,))
    zw=np.zeros(shape=(2923,N+1,))
    for k in range(0,len(s_r)):
        z0  =(hc*s_r[k]+Cs_r[k]*h[shelf_j[i],shelf_i[i]])/(hc+h[shelf_j[i],shelf_i[i]]);
        z[:,k]=zeta+(zeta+h[shelf_j[i],shelf_i[i]])*z0;
    
        z0   =(hc*s_w[k]+Cs_w[k]*h[shelf_j[i],shelf_i[i]])/(hc+h[shelf_j[i],shelf_i[i]]);
        zw[:,k]=zeta+(zeta+h[shelf_j[i],shelf_i[i]])*z0;

    # Add last depth for zw
    z0   =(hc*s_w[N]+Cs_w[N]*h[shelf_j[i],shelf_i[i]])/(hc+h[shelf_j[i],shelf_i[i]]);
コード例 #24
0
def load_combine_ladcp_ctd_data(pathLADCP, pathCTD):
    import os
    import glob
    import gsw
    import xarray as xr
    import pandas as pd
    import numpy as np
    from . import met132_calc_functions as cf

    # load CTD data
    pathCTD = r'/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/CTD_calibrated/Down_Casts/1db_mean/data/'  # use your path
    data_files = glob.glob(
        os.path.join(pathCTD, "*.asc")
    )  # advisable to use os.path.join as this makes concatenation OS independent
    ctd_data = load_ctd_data(data_files)

    # load LADCP data
    pathLADCP = r'/Users/North/Drive/Work/UniH_Work/DataAnalysis/Data/MET_132/LADCP/profiles/'  # use your path
    all_files = glob.glob(
        os.path.join(pathLADCP, "*.lad")
    )  # advisable to use os.path.join as this makes concatenation OS independent
    ladcp_data = load_ladcp_data(all_files)

    # create transects
    ind_LADCP_section, ind_CTD_section = list((1, 1, 1, 1, 1)), list(
        (1, 1, 1, 1, 1))

    ind_LADCP_section[0] = np.logical_and(
        np.logical_and(
            np.logical_and(ladcp_data.lon.values > 11.5,
                           ladcp_data.lon.values < 12.5),
            ladcp_data.lat.values < -26.25),
        ladcp_data.time.values < np.datetime64('2016-11-21T09:00:00'))
    ind_LADCP_section[1] = np.logical_and(
        np.logical_and(
            np.logical_and(
                np.logical_and(ladcp_data.lon.values > 11.5,
                               ladcp_data.lon.values < 12.5),
                ladcp_data.lat.values < -26.25),
            ladcp_data.time.values < np.datetime64('2016-11-22T23:00:00')),
        ladcp_data.time.values > np.datetime64('2016-11-21T09:00:00'))
    ind_LADCP_section[2] = np.logical_and(
        ladcp_data.time.values < np.datetime64('2016-12-02T03:30:00'),
        ladcp_data.time.values > np.datetime64('2016-11-30T20:30:00'))
    ind_true = np.where(ind_LADCP_section[2])[0]
    ind_LADCP_section[2][
        ind_true[6]] = False  # CTD is missing for this station
    ind_LADCP_section[3] = np.logical_and(
        np.logical_and(
            np.logical_and(
                np.logical_and(
                    np.logical_and(ladcp_data.lon.values > 12.95,
                                   ladcp_data.lon.values < 13.25),
                    ladcp_data.lat.values < -25.9),
                ladcp_data.lat.values > -26.25),
            ladcp_data.time.values > np.datetime64('2016-11-26T09:55:00')),
        ladcp_data.time.values < np.datetime64('2016-11-27T02:30:00'))
    ind_LADCP_section[4] = np.logical_and(
        np.logical_and(
            np.logical_and(
                np.logical_and(
                    np.logical_and(ladcp_data.lon.values > 12.5,
                                   ladcp_data.lon.values < 13.1),
                    ladcp_data.lat.values < -26.15),
                ladcp_data.lat.values > -26.5),
            ladcp_data.time.values > np.datetime64('2016-12-02T20:30:00')),
        ladcp_data.time.values < np.datetime64('2016-12-03T13:00:00'))

    ind_CTD_section[0] = np.logical_and(
        np.logical_and(
            np.logical_and(ctd_data.lon.values > 11.5,
                           ctd_data.lon.values < 12.5),
            ctd_data.lat.values < -26.25),
        ctd_data.time.values < np.datetime64('2016-11-21T09:00:00'))
    ind_CTD_section[1] = np.logical_and(
        np.logical_and(
            np.logical_and(
                np.logical_and(ctd_data.lon.values > 11.5,
                               ctd_data.lon.values < 12.5),
                ctd_data.lat.values < -26.25),
            ctd_data.time.values < np.datetime64('2016-11-22T23:00:00')),
        ctd_data.time.values > np.datetime64('2016-11-21T09:00:00'))
    ind_CTD_section[2] = np.logical_and(
        ctd_data.time.values < np.datetime64('2016-12-02T03:30:00'),
        ctd_data.time.values > np.datetime64('2016-11-30T20:30:00'))
    ind_CTD_section[3] = np.logical_and(
        np.logical_and(
            np.logical_and(
                np.logical_and(
                    np.logical_and(ctd_data.lon.values > 12.95,
                                   ctd_data.lon.values < 13.25),
                    ctd_data.lat.values < -25.9),
                ctd_data.lat.values > -26.25),
            ctd_data.time.values > np.datetime64('2016-11-26T09:55:00')),
        ctd_data.time.values < np.datetime64('2016-11-27T02:30:00'))
    ind_CTD_section[4] = np.logical_and(
        np.logical_and(
            np.logical_and(
                np.logical_and(
                    np.logical_and(ctd_data.lon.values > 12.5,
                                   ctd_data.lon.values < 13.1),
                    ctd_data.lat.values < -26.15),
                ctd_data.lat.values > -26.5),
            ctd_data.time.values > np.datetime64('2016-12-02T20:30:00')),
        ctd_data.time.values < np.datetime64('2016-12-03T13:00:00'))

    # combine LADCP and CTD into one dataset
    ctd_ladcp = list((1, 1, 1, 1, 1))
    for ri in range(len(ind_CTD_section)):
        ctd_test = ctd_data.isel(xy=ind_CTD_section[ri])
        ladcp_test = ladcp_data.isel(xy=ind_LADCP_section[ri])

        # no temporal interpolation, because casts are so random

        # get same z coords too, before merging; using ladcp which has bigger spacing
        ctd_test = ctd_test.interp(z=ladcp_test.z)
        #print(ctd_test,ladcp_test)

        # time/position of each cast may differ between ladcp and ctd, but referring to the same cast; so set to consistent times/positions
        ctd_test = ctd_test.reset_index('xy')  # need to separate out 'time'
        ladcp_temp = ladcp_test.reset_index(
            'xy')  # only way I found to get ctd_test to accept ladcp times
        ctd_test['time'] = ladcp_temp.xy.time
        ctd_test['lon'] = ladcp_temp.xy.lon
        ctd_test['lat'] = ladcp_temp.xy.lat
        ctd_test['station'] = ladcp_temp.xy.station
        ctd_test = ctd_test.set_index(xy=('lon', 'lat', 'station',
                                          'time'))  # put back to multi-index

        # merge along all matching coords
        ctd_ladcp[ri] = xr.merge([ctd_test, ladcp_test])

        # For consistency with scan_sadcp, make average Pressure dim
        ctd_ladcp[ri]['Pressure_array'] = ctd_ladcp[ri].Pressure
        ctd_ladcp[ri] = ctd_ladcp[ri].assign_coords(
            Pressure=ctd_ladcp[ri].z)  # to get right dims
        ctd_ladcp[ri].Pressure.values = ctd_ladcp[ri].Pressure_array.mean(
            dim='xy')
        # and re-order dimensions
        ctd_ladcp[ri] = ctd_ladcp[ri].transpose('xy', 'z')

        # calculate Vorticity, M**2, N**2, and Ri_Balanced
        ctd_ladcp[ri]['across_track_vel'] = ctd_ladcp[
            ri].u  #(ctd_ladcp[ri].u**2+ctd_ladcp[ri].v**2)**0.5
        # need to create DataArray to get coords right
        ctd_ladcp[ri]['distance'] = xr.DataArray(np.cumsum(
            np.trunc(
                np.append(
                    np.array(0),
                    gsw.distance(ctd_ladcp[ri].lon.dropna(dim='xy').values,
                                 ctd_ladcp[ri].lat.dropna(dim='xy').values,
                                 p=0)))),
                                                 dims='xy')

        # for plotting better if there is a coord option
        ctd_ladcp[ri] = ctd_ladcp[ri].assign_coords(
            x_km=ctd_ladcp[ri].distance / 1000)
        ctd_ladcp[ri] = ctd_ladcp[ri].assign_coords(x_m=ctd_ladcp[ri].distance)
        # and add as to multi-dimension (nned to reset in order to set it seems)
        ctd_ladcp[ri] = ctd_ladcp[ri].reset_index('xy').set_index(
            xy=['x_m', 'x_km', 'lon', 'lat', 'station', 'time'])
        #dx = ctd_ladcp[ri].x_km.diff('xy').mean().values  !!! Taken care of by xarray
        #ctd_ladcp[ri] = ctd_ladcp[ri].assign_coords(x_km_shift=ctd_ladcp[ri].x_km) # for contour plot on pcolormesh
        #dz = ctd_ladcp[ri].z.diff('z').mean().values
        #ctd_ladcp[ri] = ctd_ladcp[ri].assign_coords(z_shift=ctd_ladcp[ri].z + dz/2) # for contour plot on pcolormesh

        ctd_ladcp[ri] = cf.calc_N2_M2(ctd_ladcp[ri])
        ctd_ladcp[ri] = cf.calc_vertical_vorticity(ctd_ladcp[ri])
        ctd_ladcp[ri] = cf.calc_Ri_Balanced(ctd_ladcp[ri])
        ctd_ladcp[ri] = cf.SI_GI_Check(ctd_ladcp[ri])

    return ctd_ladcp, ctd_data, ladcp_data  #, ctd_test, ladcp_test
コード例 #25
0
def test_1darray_default_p():
    # @match_args_return doesn't see the default p.
    value = gsw.distance(np.array(lon), np.array(lat))
    assert_almost_equal(expected, value)
コード例 #26
0
ファイル: plotting.py プロジェクト: imclab/python-ctd
def plot_section(self, reverse=False, filled=False, **kw):
    lon, lat, data = map(np.asanyarray, (self.lon, self.lat, self.values))
    data = ma.masked_invalid(data)
    h = self.get_maxdepth()
    if reverse:
        lon = lon[::-1]
        lat = lat[::-1]
        data = data.T[::-1].T
        h = h[::-1]
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    z = self.index.values.astype(float)

    if filled:  # FIXME: Cause discontinuities.
        data = data.filled(fill_value=np.nan)
        data = extrap_sec(data, x, z, w1=0.97, w2=0.03)

    # Contour key words.
    fmt = kw.pop('fmt', '%1.0f')
    extend = kw.pop('extend', 'both')
    fontsize = kw.pop('fontsize', 12)
    labelsize = kw.pop('labelsize', 11)
    cmap = kw.pop('cmap', plt.cm.rainbow)
    levels = kw.pop('levels', np.arange(np.floor(data.min()),
                    np.ceil(data.max()) + 0.5, 0.5))

    # Colorbar key words.
    pad = kw.pop('pad', 0.04)
    aspect = kw.pop('aspect', 40)
    shrink = kw.pop('shrink', 0.9)
    fraction = kw.pop('fraction', 0.05)

    # Topography mask key words.
    dx = kw.pop('dx', 1.)
    kind = kw.pop('kind', 'linear')
    linewidth = kw.pop('linewidth', 1.5)

    # Station symbols key words.
    color = kw.pop('color', 'k')
    offset = kw.pop('offset', -5)
    marker = kw.pop('marker', 'v')
    alpha = kw.pop('alpha', 0.5)

    # Figure.
    figsize = kw.pop('figsize', (12, 6))
    fig, ax = plt.subplots(figsize=figsize)
    xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
    ax.plot(xm, hm, color='black', linewidth=linewidth, zorder=3)
    ax.fill_between(xm, hm, y2=hm.max(), color='0.9', zorder=3)

    if marker:
        ax.plot(x, [offset] * len(h), color=color, marker=marker, alpha=alpha,
                zorder=5)
    ax.set_xlabel('Cross-shore distance [km]', fontsize=fontsize)
    ax.set_ylabel('Depth [m]', fontsize=fontsize)
    ax.set_ylim(offset, hm.max())
    ax.invert_yaxis()

    ax.xaxis.set_ticks_position('top')
    ax.xaxis.set_label_position('top')
    ax.yaxis.set_ticks_position('left')
    ax.yaxis.set_label_position('left')
    ax.xaxis.set_tick_params(tickdir='out', labelsize=labelsize, pad=1)
    ax.yaxis.set_tick_params(tickdir='out', labelsize=labelsize, pad=1)

    if False:  # TODO: +/- Black-and-White version.
        cs = ax.contour(x, z, data, colors='grey', levels=levels,
                        extend=extend, linewidths=1., alpha=1., zorder=2)
        ax.clabel(cs, fontsize=8, colors='grey', fmt=fmt, zorder=1)
        cb = None
    if True:  # Color version.
        cs = ax.contourf(x, z, data, cmap=cmap, levels=levels, alpha=1.,
                         extend=extend, zorder=2)  # manual=True
        # Colorbar.
        cb = fig.colorbar(mappable=cs, ax=ax, orientation='vertical',
                          aspect=aspect, shrink=shrink, fraction=fraction,
                          pad=pad)
    return fig, ax, cb
コード例 #27
0
    def spectra(self,
                varname='analysed_sst',
                lonname='lon',
                latname='lat',
                maskname='mask',
                lonrange=(154.9, 171.7),
                latrange=(30, 45.4),
                nbins=32,
                MAX_LAND=0.01):
        """Calculate a two-dimensional power spectrum of netcdf variable 'varname'
            in the box defined by lonrange and latrange.
        """
        tlon = self.nc[lonname].sel(lon=slice(lonrange[0], lonrange[1])).values
        tlat = self.nc[latname].sel(lat=slice(latrange[0], latrange[1])).values
        tlon[tlon < 0.] += 360.

        # step 1: figure out the box indices
        #lon, lat = np.meshgrid(tlon, tlat)
        Nx = len(tlon)
        Ny = len(tlat)
        #########
        # derive dx, dy using the gsw package (takes a long time)
        #########
        dx = gsw.distance([tlon[Nx / 2], tlon[Nx / 2 + 1]],
                          [tlat[Ny / 2], tlat[Ny / 2]])[0]
        dy = gsw.distance([tlon[Nx / 2], tlon[Nx / 2]],
                          [tlat[Ny / 2], tlat[Ny / 2 + 1]])[0]
        #########
        # derive dx, dy just at the center point
        #########
        # a = gsw.earth_radius
        # dx = a * np.cos(np.pi/180.*tlat[Ny/2]) * np.pi/180.*np.diff(tlon)[Nx/2]
        # dy = a * np.pi/180.*np.diff(tlat)[Ny/2]

        # step 2: load the data
        T = self.nc[varname].sel(lon=slice(lonrange[0], lonrange[1]),
                                 lat=slice(latrange[0], latrange[1])).values[0]

        # step 3: figure out if there is too much land in the box
        #MAX_LAND = 0.01 # only allow up to 1% of land
        #mask_domain = mask.roll( nlon=roll )[jmin_bound:jmax_bound+100, imin_bound:imax_bound+100]
        region_mask = self.nc[maskname].sel(
            lon=slice(lonrange[0], lonrange[1]),
            lat=slice(latrange[0], latrange[1])).values - 1.
        land_fraction = region_mask.sum().astype('f8') / (Ny * Nx)
        if land_fraction == 0.:
            # no problem
            pass
        elif land_fraction <= MAX_LAND:
            crit = 'false'
            errstr = 'The sector has too much land. land_fraction = ' + str(
                land_fraction)
            warnings.warn(errstr)
            #raise ValueError('The sector has too much land. land_fraction = ' + str(land_fraction))
        else:
            # do some interpolation
            errstr = 'The sector has land (land_fraction=%g) but we are interpolating it out.' % land_fraction
            warnings.warn(errstr)

        # step 4: figure out FFT parameters (k, l, etc.) and set up result variable
        #dlon = lon[np.round(np.floor(lon.shape[0]*0.5)), np.round(
        #             np.floor(lon.shape[1]*0.5))+1]-lon[np.round(
        #             np.floor(lon.shape[0]*0.5)), np.round(np.floor(lon.shape[1]*0.5))]
        #dlat = lat[np.round(np.floor(lat.shape[0]*0.5))+1, np.round(
        #             np.floor(lat.shape[1]*0.5))]-lat[np.round(
        #             np.floor(lat.shape[0]*0.5)), np.round(np.floor(lat.shape[1]*0.5))]

        # Spatial step
        #dx = gfd.A*np.cos(np.radians(lat[np.round(
        #             np.floor(lat.shape[0]*0.5)),np.round(
        #             np.floor(lat.shape[1]*0.5))]))*np.radians(dlon)
        #dy = gfd.A*np.radians(dlat)

        # Wavenumber step
        #dx_domain = dx[jmin:jmax,imin:imax].copy()
        #dy_domain = dy[jmin:jmax,imin:imax].copy()
        #dk = np.diff(k)[0]*.5/np.pi
        #dl = np.diff(l)[0]*.5/np.pi
        k = fft.fftshift(fft.fftfreq(Nx, dx))
        l = fft.fftshift(fft.fftfreq(Ny, dy))
        dk = np.diff(k)[0]
        dl = np.diff(l)[0]

        ################################
        ###  MUR data is given daily individually ###
        ################################
        #Nt = T.shape[0]
        #Decor_lag = 13
        #tilde2_sum = np.zeros((Ny, Nx))
        #Ti2_sum = np.zeros((Ny, Nx))
        #Days = np.arange(0,Nt,Decor_lag)
        #Neff = len(Days)
        #for n in Days:
        Ti = np.ma.masked_array(T.copy(), region_mask)

        # step 5: interpolate the missing data (only if necessary)
        if land_fraction > 0. and land_fraction < MAX_LAND:
            Ti = interpolate_2d(Ti)
        elif land_fraction == 0.:
            # no problem
            pass
        else:
            sys.exit(0)

        # step 6: detrend and window the data in two dimensions (least squares plane fit)
        Ti = self._detrend_and_window_2d(Ti)
        Ti2 = Ti**2

        # step 8: do the FFT for each timestep and aggregate the results
        Tif = fft.fftshift(fft.fft2(Ti))  # [u^2] (u: unit)
        tilde2 = np.real(Tif * np.conj(Tif))

        # step 9: check whether the Plancherel theorem is satisfied
        breve2 = tilde2 / ((Nx * Ny)**2 * dk * dl)
        if land_fraction == 1.:
            #np.testing.assert_almost_equal(breve2_ave.sum()/(dx_domain[Ny/2,Nx/2]*dy_domain[Ny/2,Nx/2]*(spac2_ave).sum()), 1., decimal=5)
            np.testing.assert_almost_equal(breve2.sum() /
                                           (dx * dy * Ti2.sum()),
                                           1.,
                                           decimal=5)

        # step 10: derive the isotropic spectrum
        kk, ll = np.meshgrid(k, l)
        K = np.sqrt(kk**2 + ll**2)
        #Ki = np.linspace(0, k.max(), nbins)
        if k.max() > l.max():
            Ki = np.linspace(0, l.max(), nbins)
        else:
            Ki = np.linspace(0, k.max(), nbins)
        #Ki = np.linspace(0, K.max(), nbins)
        deltaKi = np.diff(Ki)[0]
        Kidx = np.digitize(K.ravel(), Ki)
        invalid = Kidx[-1]
        area = np.bincount(Kidx)
        iso_wv = np.ma.masked_invalid(
            np.bincount(Kidx, weights=K.ravel()) / area)
        isotropic_PSD = np.ma.masked_invalid(
            np.bincount(Kidx, weights=breve2.ravel()) / area) * iso_wv

        # Usage of digitize
        #>>> x = np.array([-0.2, 6.4, 3.0, 1.6, 20.])
        #>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
        #>>> inds = np.digitize(x, bins)
        #array([0, 4, 3, 2, 5])

        # Usage of bincount
        #>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
        #array([1, 3, 1, 1, 0, 0, 0, 1])
        # With the option weight
        #>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
        #>>> x = np.array([0, 1, 1, 2, 2, 2])
        #>>> np.bincount(x,  weights=w)
        #array([ 0.3,  0.7,  1.1])  <- [0.3, 0.5+0.2, 0.7+1.-0.6]

        # step 10: return the results
        return Nx, Ny, k, l, Ti2, tilde2, breve2, iso_wv, isotropic_PSD[:], area[
            1:-1]
コード例 #28
0
ファイル: plot_TS_regions.py プロジェクト: ERDuran/cm-tools
temp_150299 = nc_fid.variables['temp'][150:,:,:]


#%%
lon_edges = np.concatenate([\
np.array([lon[0]-(lon[1]-lon[0])/2]),\
lon[:-1]+(lon[1:]-lon[:-1])/2,\
np.array([lon[-1]+(lon[-1]-lon[-2])/2])])

lat_edges = np.concatenate([\
np.array([lat[0]-(lat[1]-lat[0])/2]),\
lat[:-1]+(lat[1:]-lat[:-1])/2,\
np.array([lat[-1]+(lat[-1]-lat[-2])/2])])

lon_grid, lat_grid = np.meshgrid(lon_edges, lat)
lon_dist = gsw.distance(lon_grid, lat_grid)

lat_grid, lon_grid = np.meshgrid(lat_edges, lon)
lat_dist_dummy = gsw.distance(lon_grid, lat_grid)
lat_dist = np.transpose(lat_dist_dummy)

cell_area = lon_dist*lat_dist


#%%
temp_000149_avg = np.ones([150,8]) *100
temp_150299_avg = np.ones([150,8]) *100

for ii in range(time.shape[0]/2):
    for nn in np.arange(1,8,1):
        temp_000149_w = temp_000149[ii,:,:] * cell_area
コード例 #29
0
def geoflow(S, T, p, lat, lon, plot=False):
    """
    Function for calculating the geostrophic flow for an array of ctd data
    using geopotential heights


    Parameters:
        S : Practical Salinity
        T : Practical Temperature
        P : Pressure Measurements -- NOT A NORMALIZED PRESSURE GRID
        lat : Latitude
        lon : Longitude
        plot : switch to run test plots of velocity ---> Default=False

    Returns:
        U : Geostrophic flow in the U Component (Perpendicular to transect)
        V : Geostrophic flow in the V Component (Perpendicular to transect)

    """

    # More precise gravity calculations and density calculations
    g = gsw.grav(lat, p)
    SA = gsw.SA_from_SP(S, p, lon, lat)
    CT = gsw.CT_from_t(SA, T, p)
    rho = gsw.rho(SA, CT, p)

    # Specific Volume
    sv = gsw.density.specvol_anom_standard(SA, CT, p)
    f = gsw.geostrophy.f(lat)

    # calculate distance traveled in meters
    dist = gsw.distance(lon, lat)
    dist = np.cumsum(dist) / 1000
    dist = np.append(0, dist)
    dist = dist * 1000

    dist = np.tile(dist, (np.shape(rho)[0], 1))
    f = np.tile(f, (np.shape(rho)[0], 1))

    # Geopotential Anomaly Calculation
    phiSum = np.nancumsum(sv, axis=0)

    # loop over grids to make calculations for vertical Shear
    v = np.full((sv.shape[0], sv.shape[1] - 1), np.nan)
    for i in range(sv.shape[1] - 1):
        for m in range(sv.shape[0] - 1):
            dphiB = np.trapz([
                sv[m + 1, i + 1] * p[m + 1, i + 1], sv[m, i + 1] * p[m, i + 1]
            ])
            dphiA = np.trapz([sv[m + 1, i] * p[m + 1, i], sv[m, i] * p[m, i]])
            slope = (dphiB - dphiA) / dist[m, i + 1]
            v[m, i] = (1 / f[m, i]) * slope

    # Integrate vertically to calculate absolute velocity (assume no flow at
    # and integrate upwards)
    geoMag = np.flipud(np.nancumsum(np.flipud(v), axis=0))

    # Calculate U and V components of geostrophic flow
    dlat = np.diff(np.squeeze(lat))
    dlon = np.diff(np.squeeze(lon))
    theta = np.arctan(dlat / dlon)

    # U and V components
    U = np.vstack(
        [-geoMag[:, i] * np.sin(theta[i]) for i in range(len(theta))]).T
    V = np.vstack([geoMag[:, i] * np.cos(theta[i])
                   for i in range(len(theta))]).T

    # revised distance with dropped first column (distance = 0) to match size
    # of Velocity array
    distRev = dist[:, 1:]

    return U, V, geoMag, distRev
コード例 #30
0
def size_bin(x0, x1, x2, y0, y1, y2):
    x = gsw.distance([x2, x0], [y1, y1], [0, 0]) / 2.
    y = gsw.distance([x1, x1], [y2, y0], [0, 0]) / 2.
    area = x * y
    return area
コード例 #31
0
def test_list():
    value = gsw.distance(lon, lat, p=0, axis=-1)
    assert_almost_equal(expected, value)
def wave_components_with_strain(ctd, ladcp, strain,
                                rho0=default_params['rho0'],
                                ctd_bin_size=1024, ladcp_bin_size=1024,
                                wl_min=300, wl_max=1000,
                                nfft=default_params['nfft'],
                                plots=default_params['plots'], save_data=False):

    """
    Calculating Internal Wave Energy

    Internal wave energy calcuations following methods in waterman et al 2012.

    """
    


    # Load Hydrographic Data
    g = 9.8
    U, V, p_ladcp = oc.loadLADCP(ladcp)
    S, T, p_ctd, lat, lon = oc.loadCTD(ctd)
    SA = gsw.SA_from_SP(S, p_ctd, lon, lat)
    CT = gsw.CT_from_t(SA, T, p_ctd)
    N2, dump = gsw.stability.Nsquared(SA, CT, p_ctd, lat)

    maxDepth = 4000
    idx_ladcp = p_ladcp[:, -1] <= maxDepth
    idx_ctd = p_ctd[:, -1] <= maxDepth

    strain = strain[idx_ctd, :]
    S = S[idx_ctd,:]
    T = T[idx_ctd,:]
    p_ctd = p_ctd[idx_ctd, :]
    U = U[idx_ladcp, :]
    V = V[idx_ladcp, :]
    p_ladcp = p_ladcp[idx_ladcp, :]
    rho = oc.rhoFromCTD(S, T, p_ctd, lon, lat)
    # Bin CTD data
    ctd_bins = oc.binData(S, p_ctd[:, 0], ctd_bin_size)
    # Bin Ladcp Data
    ladcp_bins = oc.binData(U, p_ladcp[:, 0], ladcp_bin_size)

    # Depth and lat/long grids
    depths = np.vstack([np.nanmean(p_ctd[binIn]) for binIn in ctd_bins])
    dist = gsw.distance(lon, lat)
    dist = np.cumsum(dist)/1000
    dist = np.append(0,dist)


    # Calculate Potential Energy
    z = -1*gsw.z_from_p(p_ctd, lat)
    PE, PE_grid, eta_psd, N2mean, pe_peaks = PE_strain(N2, z, strain,
                                             wl_min, wl_max, ctd_bins, nfft=2048)

    # Calculate Kinetic Energy
    z = -1*gsw.z_from_p(p_ladcp, lat)
    KE, KE_grid, KE_psd, Uprime, Vprime, ke_peaks = KE_UV(U, V, z, ladcp_bins,
                                wl_min, wl_max, lc=wl_min-50,
                                nfft=2048, detrend='constant')

    # Total Kinetic Energy
    Etotal = 1027*(KE + PE) # Multiply by density to get Joules

    # wave components
    f = np.nanmean(gsw.f(lat))

    # version 2 omega calculation
    omega = f*np.sqrt((KE+PE)/(KE-PE))

    # version 2 omega calculation
    omega2 = np.abs((f**2)*((KE+PE)/(KE-PE)))
    rw = KE/PE
    w0 = ((f**2)*((rw+1)/(rw-1)))
#    m = (2*np.pi)/np.mean((wl_min, wl_max))
    m = np.nanmean(ke_peaks, axis=1)
    m = ke_peaks[:,0]
    m = m.reshape(omega.shape)
    m = (2*np.pi)*m

    # version 1 kh calculation
    khi = m*np.sqrt(((f**2 - omega**2)/(omega**2 - N2mean)))

    # version 2 kh calculation
    kh = (m/np.sqrt(N2mean))*(np.sqrt(omega2 - f**2))
    mask = khi == 0
    khi[mask]= np.nan
    lambdaH = 1e-3*(2*np.pi)/khi

    # Get coherence of u'b' and v'b' and use to estimate horizontal wavenumber
    # components. This uses the previously binned data but now regrids velocity
    # onto the density grid so there are the same number of grid points
    b = (-g*rho)/rho0
    b_poly = []
    z = -1*gsw.z_from_p(p_ctd, lat)
    fs = 1/np.nanmean(np.diff(z, axis=0))
    for cast in b.T:
        fitrev = oc.vert_polyFit(cast, z[:, 0], 100, deg=1)
        b_poly.append(fitrev)

    b_poly = np.vstack(b_poly).T
    b_prime = b - b_poly

    dz = 1/fs  # This is the vertical spacing between measurements in metres.
    lc = wl_min-50  # This is the cut off vertical scale in metres, the filter will remove variability smaller than this.
    mc = 1./lc  # Cut off wavenumber.
    normal_cutoff = mc*dz*2.  # Nyquist frequency is half 1/dz.
    a1, a2 = sig.butter(4, normal_cutoff, btype='lowpass')  # This specifies you use a lowpass butterworth filter of order 4, you can use something else if you want
    for i in range(b_prime.shape[1]):
        mask = ~np.isnan(b_prime[:,i])
        b_prime[mask,i] = sig.filtfilt(a1, a2, b_prime[mask,i])

    ub = []
    vb = []

    for i in range(ctd_bins.shape[0]):

        Uf = interpolate.interp1d(p_ladcp[ladcp_bins[i,:]].squeeze(),
                                        Uprime[ladcp_bins[i, :], :],
                                        axis=0, fill_value='extrapolate')

        Vf = interpolate.interp1d(p_ladcp[ladcp_bins[i,:]].squeeze(),
                                        Vprime[ladcp_bins[i, :], :],
                                        axis=0, fill_value='extrapolate')
        new_z = p_ctd[ctd_bins[i,:],0]
        u_f, ub_i = sig.coherence(b_prime[ctd_bins[i,:],:],
                                   Uf(new_z), nfft=nfft, fs=fs, axis=0)
        v_f, vb_i = sig.coherence(b_prime[ctd_bins[i,:],:],
                                   Vf(new_z), nfft=nfft, fs=fs, axis=0)

        ub.append(ub_i)
        vb.append(vb_i)

    ub = np.hstack(ub).T
    vb = np.hstack(vb).T


    # Random plots (only run if youre feeling brave)
    m_plot = np.array([(2*np.pi)/wl_max,
                       (2*np.pi)/wl_max, (2*np.pi)/wl_min,
                       (2*np.pi)/wl_min])

    if plots:
        plt.figure(figsize=[12,6])
        plt.subplot(121)
        plt.loglog(KE_grid, KE_psd.T, linewidth=.6, c='b', alpha=.1)
        plt.loglog(KE_grid, np.nanmean(KE_psd, axis=0).T, lw=1.5, c='k')
        ylims = plt.gca().get_ylim()
        ylim1 = np.array([ylims[0], ylims[1]])
        plt.plot(m_plot[2:], ylim1, lw=1,
                 c='k', alpha=.5,
                 linestyle='dotted')
        plt.plot(m_plot[:2], ylim1, lw=1,
                 c='k', alpha=.5,
                 linestyle='dotted')
        plt.ylim(ylims)
        plt.ylabel('Kinetic Energy Density')
        plt.xlabel('Vertical Wavenumber')
        plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2)
        plt.subplot(122)
        plt.loglog(PE_grid, .5*np.nanmean(N2)*eta_psd.T,
                   lw=.6, c='b', alpha=.1)
        plt.loglog(KE_grid, .5*np.nanmean(N2)*np.nanmean(eta_psd, axis=0).T,
                   lw=1.5, c='k')
        plt.plot(m_plot[2:], ylim1, lw=1,
                 c='k', alpha=.5,
                 linestyle='dotted')
        plt.plot(m_plot[:2], ylim1, lw=1,
                 c='k', alpha=.5,
                 linestyle='dotted')
        plt.ylim(ylims)
        plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2)
        plt.ylabel('Potential Energy Density')
        plt.xlabel('Vertical Wavenumber')

        plt.figure()
        Kemax = np.nanmax(KE_psd, axis=1)
        kespots = np.nanargmax(KE_psd, axis=1)
        ax = plt.gca()
        ax.scatter(KE_grid[kespots],Kemax , c='blue', alpha=0.3, edgecolors='none')
        ax.set_yscale('log')
        ax.set_xscale('log')

        plt.figure(figsize=[12,6])
        plt.subplot(121)
        plt.semilogx(u_f, ub.T, linewidth=.5, alpha=.5)
        plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2)
        plt.subplot(122)
        plt.semilogx(v_f, vb.T, linewidth=.5)
        plt.gca().grid(True, which="both", color='k', linestyle='dotted', linewidth=.2)
#        plt.xlim([10**(-2.5), 10**(-2)])

        plt.figure()
        ub_max = np.nanmax(ub, axis=1)
        kespots = np.argmax(ub, axis=1)
        ax = plt.gca()
        ax.scatter(u_f[kespots],ub_max , c='blue', alpha=0.3, edgecolors='none')
        ax.set_xscale('log')
        ax.set_xlim([1e-3, 1e-5])

        Kemax = np.nanmax(.5*np.nanmean(N2)*eta_psd.T, axis=1)
        kespots = np.nanargmax(.5*np.nanmean(N2)*eta_psd.T, axis=1)
        ax = plt.gca()
        ax.scatter(PE_grid[kespots],Kemax , c='red', alpha=0.3, edgecolors='none')
        ax.set_yscale('log')
        ax.set_xscale('log')


        # Peaks lots
        plt.figure()
        mask = np.isfinite(Etotal)
        Etotal[~mask]= 0
        distrev = np.tile(dist, [kh.shape[0],1])
        depthrev = np.tile(depths, [1, kh.shape[1]])
        plt.pcolormesh(distrev, depthrev, Etotal, shading='gouraud')
        plt.gca().invert_yaxis()

        plt.figure()
        plt.pcolormesh(dist, p_ladcp.squeeze(),
                       Uprime, cmap=cmocean.cm.balance,
                       shading='flat')
        levels = np.arange(np.nanmin(Etotal), np.nanmax(Etotal)+.5,.05)
        plt.contour(distrev, depthrev, Etotal)
        plt.gca().invert_yaxis()

    if save_data:

        file2save = pd.DataFrame(lambdaH)
        file2save.index = np.squeeze(depths)
        file2save.to_excel('lambdaH_dec24.xlsx')
        file2save = pd.DataFrame(Etotal)
        file2save.index = np.squeeze(depths)
        file2save.to_excel('E_total.xlsx')

    return PE, KE, omega, m, kh, lambdaH,\
            Etotal, khi, Uprime, Vprime, b_prime,\
            ctd_bins, ladcp_bins, KE_grid, PE_grid,\
            ke_peaks, pe_peaks, dist, depths, KE_psd,\
            eta_psd, N2, N2mean
コード例 #33
0
def test_1darray():
    value = gsw.distance(np.array(lon), np.array(lat), p=0, axis=-1)
    assert_almost_equal(expected, value)
コード例 #34
0
ファイル: dynamics.py プロジェクト: regeirk/klib
def f(lat, lat0=None, central=True, returns='full'):
    r"""Calculates the Coriolis parameter f using the beta plane 
    approximation.
    
    PARAMETERS
        lat (array like) :
            Latitude in degrees.
        lat0 (array like) :
            Central latitudes in degrees.
        returns (string, optional) :
            If set to 'full' returns f, f0 and \beta.
    
    RETURNS
        f (array like) :
            The Coriolis parameter f = f_0 + \beta y
        f0 (array like) :
            
        beta (array like) :
            The beta parameter.

        Note that the unit of f and f0 is s^{-1} and that the unit
        of \beta is (m s)^{-1}.

    """
    # Checks input arrays. If central latitudes are not set, creates an array
    # with central latitudes every 10 degrees. If central latitudes are 
    # equally spaced, determines spatial step in degrees.
    lat = numpy.asarray(lat)
    
    if central:
        if (lat0 == None) | (type(lat0) in [int, float]):
            if lat0 == None:
                dy = 10.
            else:
                dy = lat0
            ymin = numpy.floor(lat.min() / dy) * dy
            ymax = numpy.ceil(lat.max() / dy) * dy
            y0 = numpy.arange(ymin, ymax+dy, dy)
        else:
            dy = lat[1:] - lat[0:-1]
            if (dy == dy[0]).all():
                dy = dy[0]
            else:
                dy = None
        
        # Determine to which central latitude y0 every latitude y belongs
        # to. The first method (fast way) assumes regular spaced y0's and the
        # second method (slow way) is not implemented yet.
        if dy != None:
            Lat = numpy.round(lat / dy) * dy
        else:
            # TODO: Implement slow way!
            raise Warning, 'Slow way not implemented yet.'
        
        # Calculate the distance from the equator to the latitudes in meters.
        d = gsw.distance(0, lat).flatten()
        d0 = gsw.distance(0, [0, lat[0]])[0, 0]
        y = numpy.concatenate([[0], d.cumsum()]) - d0
        #
        d = gsw.distance(0, Lat).flatten()
        d0 = gsw.distance(0, [0, Lat[0]])[0, 0]
        Y = numpy.concatenate([[0], d.cumsum()]) - d0
    else:
        Lat = lat
        
    # The Coriolis parameter calculated at the central latitudes
    K = constants()
    f0 = 2. * K.omega * numpy.sin(numpy.deg2rad(Lat))
    b = 2. * K.omega / K.a * numpy.cos(numpy.deg2rad(Lat))
    if central:
        f = f0 + b * (y - Y)
    
    if returns == 'full':
        if central:
            return f, f0, b
        else:
            return f0, b
    else:
        return f
コード例 #35
0
def test_2dlist():
    value = gsw.distance(np.atleast_2d(lon), np.atleast_2d(lat), p=0, axis=1)
    assert_almost_equal(expected, value)
コード例 #36
0
ファイル: extras.py プロジェクト: pyoceans/python-ctd
def plot_section(self, reverse=False, filled=False, **kw):
    import gsw

    lon, lat, data = list(
        map(np.asanyarray, (self.lon, self.lat, self.values))
    )
    data = ma.masked_invalid(data)
    h = self.get_maxdepth()
    if reverse:
        lon = lon[::-1]
        lat = lat[::-1]
        data = data.T[::-1].T
        h = h[::-1]
    lon, lat = map(np.atleast_2d, (lon, lat))
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    z = self.index.values.astype(float)

    if filled:  # CAVEAT: this method cause discontinuities.
        data = data.filled(fill_value=np.nan)
        data = extrap_sec(data, x, z, w1=0.97, w2=0.03)

    # Contour key words.
    extend = kw.pop("extend", "both")
    fontsize = kw.pop("fontsize", 12)
    labelsize = kw.pop("labelsize", 11)
    cmap = kw.pop("cmap", plt.cm.rainbow)
    levels = kw.pop(
        "levels",
        np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
    )

    # Colorbar key words.
    pad = kw.pop("pad", 0.04)
    aspect = kw.pop("aspect", 40)
    shrink = kw.pop("shrink", 0.9)
    fraction = kw.pop("fraction", 0.05)

    # Topography mask key words.
    dx = kw.pop("dx", 1.0)
    kind = kw.pop("kind", "linear")
    linewidth = kw.pop("linewidth", 1.5)

    # Station symbols key words.
    station_marker = kw.pop("station_marker", None)
    color = kw.pop("color", "k")
    offset = kw.pop("offset", -5)
    alpha = kw.pop("alpha", 0.5)

    # Figure.
    figsize = kw.pop("figsize", (12, 6))
    fig, ax = plt.subplots(figsize=figsize)
    xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
    ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
    ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)

    if station_marker:
        ax.plot(
            x,
            [offset] * len(h),
            color=color,
            marker=station_marker,
            alpha=alpha,
            zorder=5,
        )
    ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
    ax.set_ylabel("Depth [m]", fontsize=fontsize)
    ax.set_ylim(offset, hm.max())
    ax.invert_yaxis()

    ax.xaxis.set_ticks_position("top")
    ax.xaxis.set_label_position("top")
    ax.yaxis.set_ticks_position("left")
    ax.yaxis.set_label_position("left")
    ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
    ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)

    # Color version.
    cs = ax.contourf(
        x,
        z,
        data,
        cmap=cmap,
        levels=levels,
        alpha=1.0,
        extend=extend,
        zorder=2,
    )  # manual=True
    # Colorbar.
    cb = fig.colorbar(
        mappable=cs,
        ax=ax,
        orientation="vertical",
        aspect=aspect,
        shrink=shrink,
        fraction=fraction,
        pad=pad,
    )
    return fig, ax, cb
コード例 #37
0
def plot_section(self, reverse=False, filled=False, **kw):
    import gsw

    lon, lat, data = list(map(np.asanyarray,
                              (self.lon, self.lat, self.values)))
    data = ma.masked_invalid(data)
    h = self.get_maxdepth()
    if reverse:
        lon = lon[::-1]
        lat = lat[::-1]
        data = data.T[::-1].T
        h = h[::-1]
    lon, lat = map(np.atleast_2d, (lon, lat))
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    z = self.index.values.astype(float)

    if filled:  # CAVEAT: this method cause discontinuities.
        data = data.filled(fill_value=np.nan)
        data = extrap_sec(data, x, z, w1=0.97, w2=0.03)

    # Contour key words.
    extend = kw.pop('extend', 'both')
    fontsize = kw.pop('fontsize', 12)
    labelsize = kw.pop('labelsize', 11)
    cmap = kw.pop('cmap', plt.cm.rainbow)
    levels = kw.pop(
        'levels',
        np.arange(np.floor(data.min()),
                  np.ceil(data.max()) + 0.5, 0.5))

    # Colorbar key words.
    pad = kw.pop('pad', 0.04)
    aspect = kw.pop('aspect', 40)
    shrink = kw.pop('shrink', 0.9)
    fraction = kw.pop('fraction', 0.05)

    # Topography mask key words.
    dx = kw.pop('dx', 1.)
    kind = kw.pop('kind', 'linear')
    linewidth = kw.pop('linewidth', 1.5)

    # Station symbols key words.
    station_marker = kw.pop('station_marker', None)
    color = kw.pop('color', 'k')
    offset = kw.pop('offset', -5)
    alpha = kw.pop('alpha', 0.5)

    # Figure.
    figsize = kw.pop('figsize', (12, 6))
    fig, ax = plt.subplots(figsize=figsize)
    xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
    ax.plot(xm, hm, color='black', linewidth=linewidth, zorder=3)
    ax.fill_between(xm, hm, y2=hm.max(), color='0.9', zorder=3)

    if station_marker:
        ax.plot(x, [offset] * len(h),
                color=color,
                marker=station_marker,
                alpha=alpha,
                zorder=5)
    ax.set_xlabel('Cross-shore distance [km]', fontsize=fontsize)
    ax.set_ylabel('Depth [m]', fontsize=fontsize)
    ax.set_ylim(offset, hm.max())
    ax.invert_yaxis()

    ax.xaxis.set_ticks_position('top')
    ax.xaxis.set_label_position('top')
    ax.yaxis.set_ticks_position('left')
    ax.yaxis.set_label_position('left')
    ax.xaxis.set_tick_params(tickdir='out', labelsize=labelsize, pad=1)
    ax.yaxis.set_tick_params(tickdir='out', labelsize=labelsize, pad=1)

    # Color version.
    cs = ax.contourf(x,
                     z,
                     data,
                     cmap=cmap,
                     levels=levels,
                     alpha=1.,
                     extend=extend,
                     zorder=2)  # manual=True
    # Colorbar.
    cb = fig.colorbar(mappable=cs,
                      ax=ax,
                      orientation='vertical',
                      aspect=aspect,
                      shrink=shrink,
                      fraction=fraction,
                      pad=pad)
    return fig, ax, cb
コード例 #38
0
def figure5_lat_lon_map(cruise,
                        data_dir,
                        extent,
                        ncp,
                        results_dir,
                        plot_dir=None):
    """
    Create a map with the ship's location throughout the cruise, with the estimated biological change points overlaid.

    :param cruise: Name of the cruise.
    :param data_dir: Directory where the physical data is stored.
    :param extent: List with the longitudes and latitudes defining the extent of the map.
    :param ncp: Number of change points to use in the plot.
    :param results_dir: Directory where the estimated change points are stored.
    :param plot_dir: Directory where the figure will be saved.
    """
    print('\nCreating Figure 5')
    cps_file = glob.glob1(os.path.join(results_dir, cruise),
                          'cps_bio*rule-of-thumb_*')
    if len(cps_file) > 1:
        print('WARNING: More than 1 results file for cruise', cruise)
    cps_bio = json.load(
        open(os.path.join(results_dir, cruise, cps_file[0]),
             'r'))['cps_bio'][str(ncp)]
    phys_data = pd.read_parquet(
        os.path.join(data_dir, cruise + '_phys.parquet'))
    lats = phys_data['latitude']
    lons = phys_data['longitude']
    addl_lat = np.array(lats)[cps_bio]
    addl_lon = np.array(lons)[cps_bio]

    figures.cruise_path_plot(lats,
                             lons,
                             extent=extent,
                             addl_lat=addl_lat[:5],
                             addl_lon=addl_lon[:5],
                             addl_lat2=addl_lat[5:],
                             addl_lon2=addl_lon[5:],
                             colors='gray-blue',
                             figure_size=(9.6, 7.2),
                             save_path=os.path.join(plot_dir,
                                                    cruise + '_lat_lon_map'))

    dists = utils.compute_distances(lats, lons)
    lats_cps = [lats[cps_bio[i]] for i in range(len(cps_bio))]
    lons_cps = [lons[cps_bio[i]] for i in range(len(cps_bio))]
    dists_bio = np.inf * np.ones((len(lats_cps), len(lats_cps)))
    for i, locs in enumerate(zip(lats_cps, lons_cps)):
        for j, locs2 in enumerate(zip(lats_cps, lons_cps)):
            if i != j:
                dists_bio[i, j] = dists_bio[j, i] = gsw.distance(
                    [locs[1], locs2[1]], [locs[0], locs2[0]], 0) / 1000.
    min_dists = np.min(dists_bio, axis=1)
    argmin_dists = np.argmin(dists_bio, axis=1)

    print('---------------------------')
    print('Distance traveled before reaching each change point:',
          [dists[cps_bio[i]] for i in range(len(cps_bio))])
    print(
        'Distance of each change biological change point to the closest other biological change point:',
        min_dists)
    print('Indices corresponding to the closest change points:', argmin_dists)
コード例 #39
0
ファイル: Utils.py プロジェクト: cesar-rocha/MARN-5995
def Get_CTD_Section(datadir, stations):
    """ Get data from CTD section and put it 
            into a xarray DataArray """

    # Load and organize CTD profiles
    casts = np.arange(len(stations))
    lons, lats = [], []

    for station, cast in zip(stations, casts):

        # Load CTD data
        ctdfile = os.path.join(datadir, 'sam03_ctd_' + station + '.cnv')
        ctdcast = LoadCTD(ctdfile)

        # Create pandas series to be converted into a data array
        if cast == 0:
            Ts = pd.Series(data=ctdcast['temperature'],
                           index=ctdcast['pressure'],
                           name=cast)
            Ss = pd.Series(data=ctdcast['salinity'],
                           index=ctdcast['pressure'],
                           name=cast)
        else:
            Ts = pd.concat([
                Ts,
                pd.Series(data=ctdcast['temperature'],
                          index=ctdcast['pressure'],
                          name=cast)
            ],
                           axis=1)
            Ss = pd.concat([
                Ss,
                pd.Series(data=ctdcast['salinity'],
                          index=ctdcast['pressure'],
                          name=cast)
            ],
                           axis=1)

        # Attributes to feed data array
        lons.append(ctdcast['longitude'])
        lats.append(ctdcast['latitude'])

    # Dataframe to DataArray
    Txr = Ts.stack().to_xarray().rename({
        'level_0': 'pressure',
        'level_1': 'cast'
    })
    Txr.name = 'temperature'
    Sxr = Ss.stack().to_xarray().rename({
        'level_0': 'pressure',
        'level_1': 'cast'
    })
    Sxr.name = 'salinity'

    ctdsection = xr.merge([Txr, Sxr])

    # Calculate distance shallowest station [km]
    dists = np.cumsum(np.hstack([0, gsw.distance(lons, lats)])) / 1e3

    # Add other dimensions
    ctdsection = ctdsection.assign_coords({
        'latitude':
        xr.DataArray(lats, coords=[casts], dims="cast"),
        'longitude':
        xr.DataArray(lons, coords=[casts], dims="cast"),
        'station':
        xr.DataArray(stations, coords=[casts], dims="cast"),
        'distance':
        xr.DataArray(dists, coords=[casts], dims="cast")
    })

    # Add physical units
    ctdsection.temperature.attrs['units'] = 'oC'
    ctdsection.salinity.attrs['units'] = 'psu'
    ctdsection.pressure.attrs['units'] = 'dbar'
    ctdsection.distance.attrs['units'] = 'km'

    return ctdsection
コード例 #40
0
def plot_section(self, reverse=False, filled=False, **kw):
    """Plot a sequence of CTD casts as a section."""
    import gsw

    lon, lat, data = list(map(np.asanyarray, (self.lon, self.lat, self.values)))
    data = ma.masked_invalid(data)
    h = self.get_maxdepth()
    if reverse:
        lon = lon[::-1]
        lat = lat[::-1]
        data = data.T[::-1].T
        h = h[::-1]
    lon, lat = map(np.atleast_2d, (lon, lat))
    x = np.append(0, np.cumsum(gsw.distance(lon, lat)[0] / 1e3))
    z = self.index.values.astype(float)

    if filled:  # CAVEAT: this method cause discontinuities.
        data = data.filled(fill_value=np.nan)
        data = extrap_sec(data, x, z, w1=0.97, w2=0.03)

    # Contour key words.
    extend = kw.pop("extend", "both")
    fontsize = kw.pop("fontsize", 12)
    labelsize = kw.pop("labelsize", 11)
    cmap = kw.pop("cmap", plt.cm.rainbow)
    levels = kw.pop(
        "levels",
        np.arange(np.floor(data.min()), np.ceil(data.max()) + 0.5, 0.5),
    )

    # Colorbar key words.
    pad = kw.pop("pad", 0.04)
    aspect = kw.pop("aspect", 40)
    shrink = kw.pop("shrink", 0.9)
    fraction = kw.pop("fraction", 0.05)

    # Topography mask key words.
    dx = kw.pop("dx", 1.0)
    kind = kw.pop("kind", "linear")
    linewidth = kw.pop("linewidth", 1.5)

    # Station symbols key words.
    station_marker = kw.pop("station_marker", None)
    color = kw.pop("color", "k")
    offset = kw.pop("offset", -5)
    alpha = kw.pop("alpha", 0.5)

    # Figure.
    figsize = kw.pop("figsize", (12, 6))
    fig, ax = plt.subplots(figsize=figsize)
    xm, hm = gen_topomask(h, lon, lat, dx=dx, kind=kind)
    ax.plot(xm, hm, color="black", linewidth=linewidth, zorder=3)
    ax.fill_between(xm, hm, y2=hm.max(), color="0.9", zorder=3)

    if station_marker:
        ax.plot(
            x,
            [offset] * len(h),
            color=color,
            marker=station_marker,
            alpha=alpha,
            zorder=5,
        )
    ax.set_xlabel("Cross-shore distance [km]", fontsize=fontsize)
    ax.set_ylabel("Depth [m]", fontsize=fontsize)
    ax.set_ylim(offset, hm.max())
    ax.invert_yaxis()

    ax.xaxis.set_ticks_position("top")
    ax.xaxis.set_label_position("top")
    ax.yaxis.set_ticks_position("left")
    ax.yaxis.set_label_position("left")
    ax.xaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)
    ax.yaxis.set_tick_params(tickdir="out", labelsize=labelsize, pad=1)

    # Color version.
    cs = ax.contourf(
        x,
        z,
        data,
        cmap=cmap,
        levels=levels,
        alpha=1.0,
        extend=extend,
        zorder=2,
    )  # manual=True
    # Colorbar.
    cb = fig.colorbar(
        mappable=cs,
        ax=ax,
        orientation="vertical",
        aspect=aspect,
        shrink=shrink,
        fraction=fraction,
        pad=pad,
    )
    return fig, ax, cb