Пример #1
0
 def getTime(self,timeinfo):
     """
     Get the particle time step info
     """
     self.dt = timeinfo[2]
     
     self.time_track = othertime.TimeVector(timeinfo[0],timeinfo[1],timeinfo[2],timeformat ='%Y%m%d.%H%M%S')
     
     self.time_track_sec = othertime.SecondsSince(self.time_track)
     self.time_sec = othertime.SecondsSince(self.time)
     
     self.time_index = -9999
Пример #2
0
    def __init__(self,
                 xin,
                 yin,
                 zin,
                 tin,
                 xout,
                 yout,
                 zout,
                 tout,
                 mask=None,
                 **kwargs):
        """
        Construct the interpolation components

        **kwargs are passed straight to interpXYZ

        """
        self.is4D = True
        if zin == None:
            self.is4D = False
            self.nz = 1
        else:
            self.zin = zin
            self.zout = zout
            self.nz = zin.shape[0]

        # Create a 3D mask
        self.szxy = xin.shape
        if mask == None:
            self.mask = np.zeros((self.nz, ) + self.szxy, np.bool)
        else:
            self.mask = mask

        # Horizontal interpolation for each layer
        self._Fxy = []
        for kk in range(self.nz):
            if self.is4D:
                mask = self.mask[kk, ...]
            else:
                mask = self.mask
            xyin = np.vstack([xin[~mask].ravel(), yin[~mask].ravel()]).T
            xyout = np.vstack([xout.ravel(), yout.ravel()]).T
            self.nxy = xyout.shape[0]

            self._Fxy.append(interpXYZ(xyin, xyout, **kwargs))

        # Just store the other coordinates for now
        # Convert time to floats
        self.tin = othertime.SecondsSince(tin)
        self.tout = othertime.SecondsSince(tout)
        self.nt = tin.shape[0]
Пример #3
0
def window_index_time(t, windowsize, overlap):
    """
    Determines the indices for window start and end points of a time vector
    
    The window does not need to be evenly spaced
    
    Inputs:
        t - list or array of datetime objects
        windowsize - length of the window [seconds]
        overlap - number of overlap points [seconds]
        
    Returns: pt1,pt2 the start and end indices of each window
    """

    tsec = othertime.SecondsSince(t)

    t1 = tsec[0]
    t2 = t1 + windowsize
    pt1 = [0]
    pt2 = [np.searchsorted(tsec, t2)]
    while t2 < tsec[-1]:
        t1 = t2 - overlap
        t2 = t1 + windowsize

        pt1.append(np.searchsorted(tsec, t1))
        pt2.append(np.searchsorted(tsec, t2))

    return pt1, pt2
Пример #4
0
    def __init__(self, t, y, **kwargs):

        self.__dict__.update(kwargs)
        self.t = t  # independent variable (t,x, etc)
        self.y = y  # dependent variable

        self.shape = self.y.shape
        self.ndim = len(self.shape)

        self.tsec = othertime.SecondsSince(self.t, basetime=self.basetime)

        self.ny = np.size(self.y)

        # make sure the original data is a masked array
        if not isinstance(self.y, np.ma.core.MaskedArray):
            mask = ~np.isfinite(self.y)
            self.y = np.ma.MaskedArray(self.y, mask=mask)

        self._checkDT()

        self.Nt = self.t.shape[0]

        # Make sure that time is the last dimension
        if self.y.shape[-1] != self.Nt:
            self.y = self.y.T
Пример #5
0
def tide_pred(modfile,lon,lat,time,z=None,conlist=None):
    """
    Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
    
    """
    
    # Read and interpolate the constituents
    u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
    
    # Initialise the output arrays
    sz = lon.shape
    nx = np.prod(sz)
    nt = time.shape[0]
    ncon = omega.shape[0]
    
    h_re = h_re.reshape((ncon,nx))
    h_im = h_im.reshape((ncon,nx))
    u_re = u_re.reshape((ncon,nx))
    u_im = u_im.reshape((ncon,nx))
    v_re = v_re.reshape((ncon,nx))
    v_im = v_im.reshape((ncon,nx))
        
    # Calculate nodal correction to amps and phases
    #baseyear = time[0].year
    #t1992 = othertime.SecondsSince(datetime(baseyear,1,1),basetime=datetime(1992,1,1))/86400.0
    t1992 = othertime.SecondsSince(time[0],basetime=datetime(1992,1,1))/86400.0
    
    pu,pf,v0u = nodal(t1992+48622.0,conlist)
       
    # Calculate the time series
    tsec = othertime.SecondsSince(time,basetime=datetime(1992,1,1)) # Needs to be referenced to 1992
    h=np.zeros((nt,nx))
    u=np.zeros((nt,nx))
    v=np.zeros((nt,nx))
    for nn,om in enumerate(omega):
        for ii in range(0,nx):
            h[:,ii] += pf[nn]*h_re[nn,ii] * np.cos(om*tsec + v0u[nn] + pu[nn]) - \
                pf[nn]*h_im[nn,ii] * np.sin(om*tsec + v0u[nn] + pu[nn])
                
            u[:,ii] += pf[nn]*u_re[nn,ii] * np.cos(om*tsec + v0u[nn] + pu[nn]) - \
                pf[nn]*u_im[nn,ii] * np.sin(om*tsec + v0u[nn] + pu[nn])
                
            v[:,ii] += pf[nn]*v_re[nn,ii] * np.cos(om*tsec + v0u[nn] + pu[nn]) - \
                pf[nn]*v_im[nn,ii] * np.sin(om*tsec + v0u[nn] + pu[nn])
    
    szo = (nt,)+sz
    return h.reshape(szo), u.reshape(szo), v.reshape(szo)
Пример #6
0
    def filter2nc(self,
                  outfile,
                  tstart,
                  tend,
                  substep=12,
                  varlist=None,
                  **kwargs):
        """
        Filters the variables in the list, varlist, and outputs the results to netcdf

        """
        self.__dict__.update(kwargs)

        if tstart == -1:
            self.tstep = np.arange(0, self.Nt, 1)
        else:
            self.tstep = self.getTstep(tstart, tend)

        if varlist == None:
            varlist = ['eta', 'uc', 'vc', 'w']

        # Create the output file
        self.writeNC(outfile)

        # Create the output variables
        for vv in varlist:
            print('Creating variable: %s' % vv)
            self.create_nc_var(outfile, vv, ugrid[vv]['dimensions'], ugrid[vv]['attributes'],\
                dtype=ugrid[vv]['dtype'],zlib=ugrid[vv]['zlib'],complevel=ugrid[vv]['complevel'],fill_value=ugrid[vv]['fill_value'])

        self.create_nc_var(outfile, 'time', ugrid['time']['dimensions'],
                           ugrid['time']['attributes'])

        # Loop through and filter each variable (do layer by layer on 3D variables for sake of memory)
        nc = Dataset(outfile, 'a')

        # Create the time variable first
        nctime = othertime.SecondsSince(
            self.time[self.tstep[0]:self.tstep[-1]:substep])

        nc.variables['time'][:] = nctime

        for vv in varlist:
            print('Filtering variable: %s' % vv)

            if len(ugrid[vv]['dimensions']) == 2:
                dataf = self.__call__(tstart, tend, varname=vv)
                nc.variables[vv][:] = dataf[::substep, :].copy()
            elif len(ugrid[vv]['dimensions']) == 3:
                for kk in range(0, self.Nkmax):
                    print('   layer: %d' % kk)
                    self.klayer = [kk]
                    dataf = self.__call__(tstart, tend, varname=vv)
                    nc.variables[vv][:, kk, :] = dataf[::substep, :].copy()

        nc.close()
        print('#####\nComplete - Filtered data written to: \n%s \n#####' %
              outfile)
Пример #7
0
    def interp(self,
               timein,
               method='linear',
               timeformat='%Y%m%d.%H%M%S',
               axis=-1):
        """
        Interpolate the data onto an equally spaced vector
        
        timein is either:
            (3x1 tuple) - (tstart,tend,dt)
                tstart and tend - string with format 'yyyymmdd.HHMMSS'
        or
            datetime vector
        
        method - method passed to interp1d
               - use 'nearest' to preserve masking in gap regions
        """

        # Create the time vector
        try:
            tstart = timein[0]
            tend = timein[1]
            dt = timein[2]
            tnew = othertime.TimeVector(tstart,
                                        tend,
                                        dt,
                                        timeformat=timeformat)
        except:
            tnew = timein
            dt = (tnew[1] - tnew[0]).total_seconds()

        if method == 'nearest':
            # Nearest neighbour doesn't use interp1d to preserve mask
            self._evenly_dist_data(dt)
            tnew, output = self.subset(tnew[0], tnew[-1])

        else:

            t = othertime.SecondsSince(tnew, basetime=self.basetime)
            # Don't include nan points
            if self.ndim > 1:
                # Interpolate multidimensional arrays without a mask
                F = interpolate.interp1d(self.tsec,self.y,kind=method,axis=axis,\
                    bounds_error=False,fill_value=0)

                output = F(t)
            else:
                #mask = np.isnan(self.y) == False
                mask = ~self.y.mask
                F = interpolate.interp1d(self.tsec[mask],self.y[mask],kind=method,axis=axis,\
                bounds_error=False,fill_value=0)

                output = F(t)

        return tnew, output
Пример #8
0
def tide_pred_old(modfile,lon,lat,time,z=None,conlist=None):
    """
	
	### UNUSED ###
    Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
    
    """
    
    # Read and interpolate the constituents
    u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
    
    # Initialise the output arrays
    sz = lon.shape
    nx = np.prod(sz)
    nt = time.shape[0]
    ncon = omega.shape[0]
    
    h_re = h_re.reshape((ncon,nx))
    h_im = h_im.reshape((ncon,nx))
    u_re = u_re.reshape((ncon,nx))
    u_im = u_im.reshape((ncon,nx))
    v_re = v_re.reshape((ncon,nx))
    v_im = v_im.reshape((ncon,nx))
    
    # Nodal correction to amps and phases here...
    baseyear = time[0].year
    amp, phase = cart2pol(h_re, h_im)
    amp,phase = nodal_correction(baseyear,conlist, amp, phase)
    h_re, h_im = pol2cart(amp, phase)
    
    amp, phase = cart2pol(u_re, u_im)
    amp, phase = nodal_correction(baseyear,conlist, amp, phase)
    u_re, u_im = pol2cart(amp, phase)
        
    amp, phase = cart2pol(v_re, v_im)
    amp, phase = nodal_correction(baseyear,conlist, amp, phase)
    v_re, v_im = pol2cart(amp, phase)
    
    
    # Calculate the time series
    tsec = othertime.SecondsSince(time,basetime=datetime(baseyear,1,1))
    h=np.zeros((nt,nx))
    u=np.zeros((nt,nx))
    v=np.zeros((nt,nx))
    for nn,om in enumerate(omega):
        for ii in range(0,nx):
            h[:,ii] += h_re[nn,ii] * np.cos(om*tsec) + h_im[nn,ii] * np.sin(om*tsec)
            u[:,ii] += u_re[nn,ii] * np.cos(om*tsec) + u_im[nn,ii] * np.sin(om*tsec)
            v[:,ii] += v_re[nn,ii] * np.cos(om*tsec) + v_im[nn,ii] * np.sin(om*tsec)
    
    szo = (nt,)+sz
    return h.reshape(szo), u.reshape(szo), v.reshape(szo)
Пример #9
0
    def __init__(self, data, dtime, **kwargs):
        """
	Time series w/ specific IO methods
	"""
        self.__dict__.update(kwargs)

        TimeSeries.__init__(self, data, index=dtime)
        #super(ObsTimeSeries,self).__init__(data,index=dtime)

        # Time coordinates
        self.nt = self.index.shape
        self.tsec = othertime.SecondsSince(self.index,\
         basetime = pd.datetime(self.baseyear,1,1))
Пример #10
0
    def __init__(self, x, y, z, timeinfo, tformat='%Y%m%d.%H%M%S', **kwargs):

        metfile.__init__(self, mode='create')

        self.x = x
        self.y = y
        self.z = z
        self.time =\
            otime.TimeVector(timeinfo[0],timeinfo[1],timeinfo[2],timeformat=tformat)
        self.nctime = otime.SecondsSince(self.time)

        self.varnames = [
            'Uwind', 'Vwind', 'Tair', 'Pair', 'RH', 'rain', 'cloud'
        ]

        # Update all of the metdata objects
        for vv in self.varnames:
            self.updateMetData(self[vv])
Пример #11
0
    def __call__(self,tstart,tend,varnames=['eta','uc','vc']):
        """
        Actually does the harmonic calculation for the model time steps in tsteps
        (or at least calls the class that does the calculation)
        
        Set tstart = -1 to do all steps
        """
        if tstart == -1:
            self.tstep=np.arange(0,self.Nt,1)
        else:
            self.tstep=self.getTstep(tstart,tend)
        
        time = othertime.SecondsSince(self.time[self.tstep])
        
        self.varnames=varnames
        self._prepDict(varnames)

        
        for vv in varnames:
            if vv in ['ubar','vbar']:
                ndim=2
                if vv=='ubar': self.variable='uc'
                if vv=='vbar': self.variable='vc'
            else:
                ndim = self._returnDim(vv)
                self.variable=vv
                
            if ndim  == 2 or self.Nkmax==1:
                print 'Loading data from %s...'%vv
                if vv in ['ubar','vbar']:
                    data=self.loadDataBar()
                else:
                    data=self.loadData()
                print 'Performing harmonic fit on variable, %s...'%(self.variable)
                self.Amp[vv], self.Phs[vv], self.Mean[vv] = harmonic_fit(time,data,self.frq,phsbase=self.reftime)
                
            elif ndim == 3:
                for k in range(self.Nkmax):
                    self.klayer=[k]
                    print 'Loading data...'
                    data = self.loadData()
                    print 'Performing harmonic fit on variable, %s, layer = %d of %d...'%(self.variable,self.klayer[0],self.Nkmax)
                    self.Amp[vv][:,k,:], self.Phs[vv][:,k,:], self.Mean[vv][k,:] = harmonic_fit(time,data,self.frq,phsbase=self.reftime)
Пример #12
0
def calc_agebin(binfile, ncfile, polyfile, ntout):
    """
    Calculate the from a binary file and save to netcdf
    """
    # Load the polygon from a shapefile
    xypoly, field = readShpPoly(polyfile)

    # Load the binary file object
    PTM = PtmBin(binfile)

    # Count the number of particles from the first time step
    time, pdata = PTM.read_timestep()
    N = pdata.shape[0]
    tsec = othertime.SecondsSince(PTM.time)
    dt = tsec[1] - tsec[0]

    # Initialize the age particle object
    Age = ParticleAge(xypoly[0], N)

    # Loop through
    outctr = ntout
    ncctr = 0
    for tt in range(PTM.nt - 1):
        # Read the current time step
        time, pdata = PTM.read_timestep(ts=tt)

        # Update the age variable
        Age.update_age(pdata['x'][:, 0], pdata['x'][:, 1], pdata['x'][:, 2],
                       dt)

        # Write to netcdf
        if outctr == ntout:
            Age.write_nc(time, ncctr, ncfile=ncfile)
            ncctr += 1
            outctr = 0
        outctr += 1

    print 'Done.'
Пример #13
0
    def write_nc(self, time, tstep, ncfile=None):
        """
        Writes the particle locations at the output time step, 'tstep'
        """
        if self.verbose:
            print '\tWriting netcdf output at tstep: %d...' % tstep

        # Check if the file is opened
        if not self.__dict__.has_key('_nc'):
            if ncfile == None:
                raise Exception, 'must set "ncfile" on call to write_nc().'
            else:
                self.create_nc(ncfile)

        t = othertime.SecondsSince(time, basetime=self.basetime)

        self._nc.variables['tp'][tstep] = t
        self._nc.variables['xp'][:, tstep] = self.X
        self._nc.variables['yp'][:, tstep] = self.Y
        self._nc.variables['zp'][:, tstep] = self.Z

        if self.has_age:
            self._nc.variables['age'][:, tstep] = self.age
            self._nc.variables['agemax'][:, tstep] = self.agemax
Пример #14
0
def ProfilePlot(t,y,z,scale=86400,\
        axis=0,color=[0.5,0.5,0.5],xlim=None,units='m/s',scalebar=1.0):
    """
    Plot a series of vertical profiles as a time series
    
    scale - Sets 1 unit = scale (seconds)
    
    See this page on formatting:
        http://matplotlib.org/examples/pylab_examples/date_index_formatter.html
    """
    from matplotlib import collections
    from matplotlib.ticker import Formatter

    class MyFormatter(Formatter):
        def __init__(self, dates, fmt='%b %d %Y'):
            self.fmt = fmt
            self.dates = dates

        def __call__(self, x, pos=0):
            'Return the label for time x s'
            return datetime.strftime(
                datetime(1990, 1, 1) + timedelta(seconds=x), self.fmt)

    tsec = othertime.SecondsSince(t)
    formatter = MyFormatter(tsec)

    y = np.swapaxes(y, 0, axis)

    lines = []
    line2 = []
    for ii, tt in enumerate(tsec):
        #xplot = set_scale(y[:,ii],tt)
        xplot = tt + y[:, ii] * scale
        lines.append(np.array((xplot, z)).T)
        line2.append(np.array([[tt, tt], [z[0], z[-1]]]).T)

    LC1 = collections.LineCollection(lines, colors=color, linewidths=1.5)
    LC2 = collections.LineCollection(line2, colors='k',
                                     linestyles='dashed')  # Zero axis

    ax = plt.gca()
    ax.add_collection(LC1)
    ax.add_collection(LC2)
    ax.set_ylim((z.min(), z.max()))
    ax.xaxis.set_major_formatter(formatter)
    if xlim == None:
        xlim = (tsec[0] - scale / 2, tsec[-1] + scale / 2)
    else:
        xlim = othertime.SecondsSince(xlim)
    ax.set_xlim(xlim)
    plt.xticks(rotation=17)

    ###
    # Add a scale bar
    ###

    # Compute the scale bar size in dimensionless units
    if not scalebar == None:
        xscale = scalebar * scale / (xlim[-1] - xlim[0])
        x0 = 0.1
        y0 = 0.8
        dy = 0.02
        ax.add_line(
            Line2D([x0, x0 + xscale], [y0, y0],
                   linewidth=0.5,
                   color='k',
                   transform=ax.transAxes))
        #Little caps
        ax.add_line(
            Line2D([x0, x0], [y0 - dy, y0 + dy],
                   linewidth=0.5,
                   color='k',
                   transform=ax.transAxes))
        ax.add_line(
            Line2D([x0 + xscale, x0 + xscale], [y0 - dy, y0 + dy],
                   linewidth=0.5,
                   color='k',
                   transform=ax.transAxes))
        plt.text(x0,y0+0.05,'Scale %3.1f %s'%(scalebar,units),\
            transform=ax.transAxes)

    return ax
Пример #15
0
def runmpi(ncfile,
           outfile,
           tstart,
           tend,
           dt,
           dtout,
           x,
           y,
           z,
           agepoly=None,
           method='nearest',
           is3D=False):

    # Generate a list of tuples for the time info
    timevec = othertime.TimeVector(tstart,
                                   tend,
                                   dtout,
                                   timeformat='%Y%m%d.%H%M%S')
    timevec_sec = othertime.SecondsSince(timevec)
    timeinfos = []
    for ii in range(timevec.shape[0] - 1):
        if ii == 0:
            timestart = datetime.strftime(timevec[ii], '%Y%m%d.%H%M%S')
        else:
            timestart = datetime.strftime(timevec[ii] + timedelta(seconds=dt),
                                          '%Y%m%d.%H%M%S')

        timeend = datetime.strftime(timevec[ii + 1], '%Y%m%d.%H%M%S')
        timeinfos.append((timestart, timeend, dt))

    # Initialise the particle tracking object
    print('Initialising the particle tracking object on processor: %d...' %
          (comm.rank))
    sun = SunTrack(ncfile,
                   interp_method='mesh',
                   interp_meshmethod=method,
                   is3D=is3D)

    # Initialise the age values
    if not agepoly == None:
        calcage = True
    else:
        calcage = False
        age = None
        agemax = None

    # On rank = 0 only
    if comm.rank == 0:
        n = int(x.shape[0])

        # Check if the number of processes divides eveny into the array length
        rem = np.mod(n, size)
        if rem == 0:
            #length of each process's portion of the original vector
            local_n = np.array([n / size])
        else:
            print('Padding array with extra values...')
            nextra = size - rem
            xpad = np.zeros((nextra, ))
            x = np.hstack((x, xpad))
            y = np.hstack((y, xpad))
            z = np.hstack((z, xpad))

            n = n + nextra
            local_n = np.array([n / size])

        print('Size of original vector = %d\nSize of split vector = %d' %
              (n, local_n))

        if calcage:
            age = np.zeros_like(x)
            agemax = np.zeros_like(x)

        # Initialise the output netcdf file
        sun.initParticleNC(outfile, n, age=calcage)

    else:
        x = None
        y = None
        z = None
        local_n = np.array([0])
        if calcage:
            age = None
            agemax = None

    comm.Barrier()
    t_start = MPI.Wtime()
    # Broadcast the particle tracking object everywhere
    #sun = comm.bcast(sun, root=0) # !! Doesn't work for this object !!

    # Scatter the x,y,z locations up amongst all processors
    #communicate local array size to all processes
    comm.Bcast(local_n, root=0)

    #initialize the local particle arrays as numpy arrays
    x_local = np.zeros(local_n)
    y_local = np.zeros(local_n)
    z_local = np.zeros(local_n)
    if calcage:
        age_local = np.zeros(local_n)
        agemax_local = np.zeros(local_n)

    #divide up vectors
    comm.Scatter(x, x_local, root=0)
    comm.Scatter(y, y_local, root=0)
    comm.Scatter(z, z_local, root=0)
    if calcage:
        comm.Scatter(age, age_local, root=0)
        comm.Scatter(agemax, agemax_local, root=0)
    else:
        age_local = None
        agemax_local = None

    ###
    # Testing plot of particle positions
    ###
    #if comm.rank ==0:
    #   import matplotlib.pyplot as plt
    #    plt.figure
    #    plt.plot(x_local,y_local,'.')
    #    plt.show()
    #comm.Barrier()

    ###
    # Write out the initial location to netcdf
    if comm.rank == 0:
        sun.writeParticleNC(outfile,
                            x,
                            y,
                            z,
                            timevec_sec[0],
                            0,
                            age=age,
                            agemax=agemax)
    comm.Barrier()

    ###
    # ... Call the particle tracking module on each processor
    for ii, timeinfo in enumerate(timeinfos):
        if comm.rank == 0:
            sun(x_local,
                y_local,
                z_local,
                timeinfo,
                agepoly=agepoly,
                age=age_local,
                agemax=agemax_local,
                verbose=True)
        else:
            sun(x_local,
                y_local,
                z_local,
                timeinfo,
                agepoly=agepoly,
                age=age_local,
                agemax=agemax_local,
                verbose=False)

        # Send the particles back to their main array
        comm.Barrier()

        comm.Gather(sun.particles['X'], x, root=0)
        comm.Gather(sun.particles['Y'], y, root=0)
        comm.Gather(sun.particles['Z'], z, root=0)
        if calcage:
            comm.Gather(sun.particles['age'], age, root=0)
            comm.Gather(sun.particles['agemax'], agemax, root=0)

        # Write the output to a netcdf file
        if comm.rank == 0:
            sun.writeParticleNC(outfile,
                                x,
                                y,
                                z,
                                sun.time_track_sec[-1],
                                ii + 1,
                                age=age,
                                agemax=agemax)

        comm.Barrier()

    t_diff = MPI.Wtime() - t_start  ### Stop stopwatch ###
    if comm.rank == 0:
        print(78 * '=' + '\n' + 78 * '=')
        print('Completed particle tracking using %d cores in %6.2f seconds.' %
              (comm.size, t_diff))
        print(78 * '=' + '\n' + 78 * '=')
Пример #16
0
def tide_pred_correc(modfile,lon,lat,time,dbfile,ID,z=None,conlist=None):
    """
    Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
    
    Applies an amplitude and phase correction based on a time series
    """
    from timeseries import timeseries, loadDBstation
    
    print 'Calculating tidal correction factors from time series...'
    # Load using the timeseries module
    t0 = datetime.strftime(time[0],'%Y%m%d.%H%M%S')
    t1 = datetime.strftime(time[-1],'%Y%m%d.%H%M%S')
    dt = time[1]-time[0]
    
    print t0, t1, dt.total_seconds()
    timeinfo = (t0,t1,dt.total_seconds())
    TS,meta = loadDBstation(dbfile,ID,'waterlevel',timeinfo=timeinfo,filttype='low',cutoff=2*3600,output_meta=True)
    lonpt=meta['longitude']
    latpt=meta['latitude']
    print lonpt,latpt
    
    # Extract the OTIS tide prediction
    u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lonpt,latpt)
    h_amp = np.abs(h_re+1j*h_im)[:,0]
    h_phs = np.angle(h_re+1j*h_im)[:,0]
    
    # Harmonic analysis of observation time series
    amp, phs, frq, frqnames, htide = TS.tidefit(frqnames=conlist)
    TS_harm = timeseries(time,htide)
    residual = TS.y - htide
    
    # Calculate the amp and phase corrections
    dphs = phs - h_phs + np.pi
    damp = amp/h_amp

    # Extract the data along the specified points
    u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
    
    h_amp = np.abs(h_re+1j*h_im)
    h_phs = np.angle(h_re+1j*h_im)
    u_amp = np.abs(u_re+1j*u_im)
    u_phs = np.angle(u_re+1j*u_im)
    v_amp = np.abs(v_re+1j*v_im)
    v_phs = np.angle(v_re+1j*v_im)
        
    # Initialise the output arrays
    sz = lon.shape
    nx = np.prod(sz)
    nt = time.shape[0]

    h=np.zeros((nt,nx))
    u=np.zeros((nt,nx))
    v=np.zeros((nt,nx))
    
    # Rebuild the time series
    #tsec=TS_harm.tsec - TS_harm.tsec[0]
    tsec = othertime.SecondsSince(time,basetime=time[0])
    print tsec[0]
    for nn,om in enumerate(omega):
        for ii in range(0,nx):
            h[:,ii] += damp[nn]*h_amp[nn,ii] * np.cos(om*tsec - (h_phs[nn,ii] + dphs[nn]))
            u[:,ii] += damp[nn]*u_amp[nn,ii] * np.cos(om*tsec - (u_phs[nn,ii] + dphs[nn]))
            v[:,ii] += damp[nn]*v_amp[nn,ii] * np.cos(om*tsec - (v_phs[nn,ii] + dphs[nn]))
            
    szo = (nt,)+sz
    return h.reshape(szo), u.reshape(szo), v.reshape(szo), residual
Пример #17
0
def harmonic_fit(t, X, frq, mask=None, axis=0, phsbase=None):
    """
    Least-squares harmonic fit on an array, X, with frequencies, frq. 
    
    X - vector [Nt] or array [Nt, (size)]
    t - vector [Nt]
    frq - vector [Ncon]
    mask - array [(size non-time X)]
    phsbase - phase offset
    
    where, dimension with Nt should correspond to axis = axis.
    """

    t = np.asarray(t)

    # Reshape the array sizes
    X = X.swapaxes(0, axis)
    sz = X.shape
    lenX = np.prod(sz[1:])

    if not len(t) == sz[0]:
        raise 'length of t (%d) must equal dimension of X (%s)' % (len(t),
                                                                   sz[0])

    X = np.reshape(X, (sz[0], lenX))

    if not mask == None:
        mask = np.reshape(mask, (lenX, ))
    else:
        mask = np.ones((lenX, ))

    frq = np.array(frq)
    Nfrq = frq.shape[0]

    def buildA(t, frq):
        """
        Construct matrix A
        """
        nt = t.shape[0]
        nf = frq.shape[0]
        nff = nf * 2 + 1
        A = np.ones((nt, nff))
        for ff in range(0, nf):
            A[:, ff * 2 + 1] = np.cos(frq[ff] * t)
            A[:, ff * 2 + 2] = np.sin(frq[ff] * t)

        return A

    def lstsqnumpy(A, y):
        """    
        Solve the least square problem
        
        Return:
            the complex amplitude 
            the mean
        """
        N = A.shape[1]
        b = np.linalg.lstsq(A, y)
        A = b[0][1::2]
        B = b[0][2::2]

        return A + 1j * B, b[0][0::N]

    def phsamp(C):
        return np.abs(C), np.angle(C)

    # Least-squares matrix approach
    A = buildA(t, frq)
    C, C0 = lstsqnumpy(A, X)  # This works on all columns of X!!
    Amp, Phs = phsamp(C)

    # Reference the phase to some time
    if not phsbase == None:
        base = othertime.SecondsSince(phsbase)
        phsoff = phase_offset(frq, t[0], base)
        phsoff = np.repeat(phsoff.reshape((phsoff.shape[0], 1)), lenX, axis=1)
        phs = np.mod(Phs + phsoff, 2 * np.pi)

    # Non-vectorized method (~20x slower)


#    Amp = np.zeros((Nfrq,lenX))
#    Phs = np.zeros((Nfrq,lenX))
#    for ii in range(0,lenX):
#        if mask[ii]==True:
#            C = lstsqnumpy(A,X[:,ii])
#            # Calculate the phase and amplitude
#            am, ph= phsamp(C)
#            Amp[:,ii] = am; Phs[:,ii] = ph

# reshape the array
    Amp = np.reshape(Amp, (Nfrq, ) + sz[1:])
    Phs = np.reshape(Phs, (Nfrq, ) + sz[1:])
    C0 = np.reshape(C0, sz[1:])

    # Output back along the original axis
    return Amp.swapaxes(axis, 0), Phs.swapaxes(axis, 0), C0.swapaxes(axis, 0)