コード例 #1
0
 def testfeq_equal(self):
     """feq should return true when they are equal"""
     val1 = 1.1234
     val2 = 1.1235
     self.assertTrue(tb.feq(val1, val2, 0.0001))
     numpy.testing.assert_array_equal(
         [False, True, False, False],
         tb.feq([1., 2., 3., 4.],
                [1.25, 2.05, 2.2, 500.1],
                0.1)
         )
コード例 #2
0
ファイル: data_assimilation.py プロジェクト: yugidoge/spacepy
def addmodelerror_old(dd, A, y, L):
    """
    this routine will add a standard error to the ensemble states
    """
    Lgrid = dd['model']['Lgrid']
    dL = Lgrid[1] - Lgrid[0]
    nens = int(dd['kalman']['nens'])
    #print(y, L)

    radius = 1

    for Lcenter, yval in zip(L, y):
        L1 = np.max((Lcenter - radius, Lgrid[0]))
        L2 = np.min((Lcenter + radius, Lgrid[-1]))
        #print Lcenter, Lcenter+radius, Lgrid[-1], min((Lcenter+radius,Lgrid[-1])), (L2-L1)/dL
        NLs = int(round((L2 - L1) / dL) + 1)
        for Lpos in np.linspace(L1, L2, NLs):
            #print dL, L1, L2, NLs
            index = np.where(feq(Lpos, Lgrid))  # use float point comparison
            stdev = 0.5 * np.abs(yval - np.mean(A[index, :]))
            #print Lpos
            center = np.reshape(A[index, :], (nens))
            A[index, :] = np.random.normal(center, scale=stdev)
            # now check if any below 1e-99 and repeat
            for i in range(nens):
                icnt = 0
                while A[index, i] < 1e-99:
                    #A[index,i] = np.random.normal( center[i], scale=stdev)
                    A[index, i] = 10**np.random.normal(np.log10(center[i]),
                                                       scale=0.001)
                    icnt += 1
                    if icnt > 1000: print('too many iterations')
    return A
コード例 #3
0
ファイル: data_assimilation.py プロジェクト: yugidoge/spacepy
def addmodelerror_old2(dd, A, y, L):
    """
    this routine will add a standard error to the ensemble states
    """
    from numpy import where, random, log10, mean, reshape, abs, max, arange
    from numpy import linspace, min

    Lgrid = dd['model']['Lgrid']
    dL = Lgrid[1] - Lgrid[0]
    nens = int(dd['kalman']['nens'])
    #print y, L

    radius = 1

    for Lcenter, yval in zip(L, y):
        L1 = max((Lcenter - radius, Lgrid[0]))
        L2 = min((Lcenter + radius, Lgrid[-1]))
        #print Lcenter, Lcenter+radius, Lgrid[-1], min((Lcenter+radius,Lgrid[-1])), (L2-L1)/dL
        NLs = int(round((L2 - L1) / dL) + 1)
        for Lpos in linspace(L1, L2, NLs):
            #print dL, L1, L2, NLs
            index = where(feq(Lpos, Lgrid))  # use float point comparison
            stdev = 0.5 * abs(yval - mean(A[index, :]))
            #print Lpos
            center = reshape(A[index, :], (nens))
            A[index, :] = random.normal(center, scale=stdev)
            # now check if any below 1e-99 and repeat
            for i in range(nens):
                icnt = 0
                while A[index, i] < 1e-99:
                    A[index, i] = random.normal(center[i], scale=stdev)
                    icnt += 1
                    if icnt > 1000: print('too many iterations')
    return A
コード例 #4
0
ファイル: data_assimilation.py プロジェクト: yugidoge/spacepy
def average_window(PSDdata, Lgrid):
    """
    combine observations on same L shell in

    Parameters
    ==========
    model :

    PSDdata :

    HAp :

    Returns
    =======
    out :

    Examples
    ========
    """
    # sort observations first in L
    idx = PSDdata['Lstar'].argsort()
    Lobs = PSDdata['Lstar'][idx]
    y = PSDdata['PSD'][idx]

    # map Lobs onto closest grid point
    for i, obs in enumerate(y):
        Lobs[i] = Lgrid[np.argmin(np.abs(Lobs[i] - Lgrid))]

    # identify unique grid-points of Lobs
    tmpLobs = np.unique(Lobs)

    # declare average observation array
    tmpy = np.zeros_like(tmpLobs)

    # run through all unique grid-points and compute average observation
    for i, iL in enumerate(tmpLobs):
        # identify idex of each unique grid-point
        idx = np.where(feq(iL, Lobs))
        # compute average observation for each unique grid-point
        tmpy[i] = np.average(y[idx])

    # walk through all grid points and find how many obs available
    #for i, iL in enumerate(Lgrid):
    #   idx = np.where( iL == Lobs[:])[0]

    # assign Lobs and y
    Lobs = tmpLobs
    y = tmpy
    return Lobs, y
コード例 #5
0
def fill_gaps(data,
              fillval=9999999,
              sigma=5,
              winsor=0.05,
              noise=False,
              constrain=False):
    '''Fill gaps in input data series, using interpolation plus noise

    The noise approach is based on Owens et al. (Space Weather, 2014).

    data - input numpy ndarray-like
    fillval - value marking fill in the time series
    sigma - width of gaussian filter for finding fluctuation CDF
    winsor - winsorization threshold, values above p=1-winsor and below p=winsor are capped
    noise - Boolean, if True add noise to interpolated region, if False use linear interp only
    constrain - Boolean, if True 
    '''
    # identify sequences of fill in data series
    gaps = np.zeros((len(data), 2), dtype=int)
    k = 0
    for i in range(1, len(data) - 1):
        # Single space gap/fillval
        if (tb.feq(data[i], fillval)) and (~tb.feq(data[i + 1], fillval)) and (
                ~tb.feq(data[i - 1], fillval)):
            gaps[k][0] = i
            gaps[k][1] = i
            k += 1
        # Start of multispace gap/fillval
        elif (tb.feq(data[i], fillval)) and (~tb.feq(data[i - 1], fillval)):
            gaps[k][0] = i
        # End of multispace gap/fillval
        elif (tb.feq(data[i], fillval)) and (~tb.feq(data[i + 1], fillval)):
            gaps[k][1] = i
            k += 1
    gaps = gaps[:k]

    #if no gaps detected
    if k == 0:
        return data

    # fill gaps with linear interpolation
    for gap in gaps:
        a = data[gap[0] - 1]
        b = data[gap[1] + 1]
        dx = (b - a) / (gap[1] - gap[0] + 2)
        for i in range(gap[1] - gap[0] + 1):
            data[gap[0] + i] = a + dx * (i + 1)

    if noise:
        # generate CDF from delta var
        series = data.copy()
        smooth = gaussian_filter(series, sigma)
        dx = series - smooth
        dx.sort()
        p = np.linspace(0, 1, len(dx))
        # "Winsorize" - all delta-Var above/below threshold at capped at threshold
        dx[:p.searchsorted(0. + winsor)] = dx[p.searchsorted(0. + winsor) + 1]
        dx[p.searchsorted(1. - winsor):] = dx[p.searchsorted(1. - winsor) - 1]

        # draw fluctuations from CDF and apply to linearly filled gaps
        for gap in gaps:
            for i in range(gap[1] - gap[0] + 1):
                data[gap[0] + i] += dx[p.searchsorted(random.random())]

        # cap variable if it should be strictly positive (e.g. number density)
        # use lowest measured value as floor
        if constrain and series.min() > 0.0:
            data[data < series.min()] = series.min()

    return data
コード例 #6
0
 def testfeq_notequal(self):
     """feq should return false when they are not equal"""
     val1 = 1.1234
     val2 = 1.1235
     self.assertFalse(tb.feq(val1, val2, 0.000005))
コード例 #7
0
ファイル: radbelt.py プロジェクト: seiyu32/spacepy
    def assimilate(self, method='EnKF', inflation=0):
        """
        Assimilates data for the radiation belt model using the Ensemble
        Kalman Filter. The algorithm used is the SVD method presented by
        Evensen in 2003 (Evensen, G., Ocean dynamics, 53, pp.343--367, 2003).
        To compensate for model errors, three inflation algorithms are
        implemented. The inflation methodology is specified by the
        'inflation' argument, and the options are the following:

            inflation == 0: Add model error (perturbation for the ensemble)
            around model state values only where observations are available
            (DEFAULT).

            inflation == 1: Add model error (perturbation for the ensemble)
            around observation values only where observations are available.

            inflation == 2: Inflate around ensemble average for EnKF.

        Prior to assimilation, a set of data values has to be speficied by
        setting the start and end dates, and time step, using the setup_ticks
        funcion of the radiation belt model:

        >>> import spacepy
        >>> import datetime
        >>> from spacepy import radbelt

        >>> start = datetime.datetime(2002,10,23)
        >>> end = datetime.datetime(2002,11,4)
        >>> delta = datetime.timedelta(hours=0.5)
        >>> rmod.setup_ticks(start, end, delta, dtype='UTC')

        Once the dates and time step are specified, the data is added using the
        add_PSD function:

        >>> rmod.add_PSD()

        The observations are averaged over the time windows, whose interval is
        give by the time step.

        Once the dates and data are set, the assimiation is performed using the
        'assimilate' function:

        >>> rmod.assimilate(inflation=1)

        This function will add the PSDa values, which are the analysis state of
        the radiation belt using the observations within the dates. To plot the
        analysis simply use the plot funtion:

        >>> rmod.plot(values=rmod.PSDa,clims=[-10,-6],Lmax=False,Kp=False,Dst=False)

        """
        import spacepy.data_assimilation
        import spacepy.sandbox.PSDdata as PD
        import copy as c

        # add PSD observations with add_PSD,
        # this has to be done to the class
        # module when running the RBmodel.
        # add_PSD will add the PSDdata to the class
        # which is a dictionary for the data that has been added

        # debugging command
        #pdb.set_trace()

        # setup method
        assert method in [
            'EnKF', 'insert'
        ], 'data assimilation method=' + method + ' not implemented'

        nTAI = len(self.ticks)

        # enKF method
        if method == 'EnKF':
            da = spacepy.data_assimilation.ensemble()

            # initialize A with initial condition
            # the initial condition is ones, have to change this to initialize
            # the ensemble with perturbed reference state
            A = np.ones((self.NL, da.Nens)) * self.PSDinit[:, np.newaxis]

            self.PSDf = np.zeros((self.PSDinit.shape[0], nTAI))
            self.PSDa = np.zeros((self.PSDinit.shape[0], nTAI))
            self.PSDa[:, 0] = self.PSDinit
            self.PSDf[:, 0] = self.PSDinit

            # diagnostic tools:
            # observations-minus-background
            self.PSD_omb = [''] * (nTAI)
            # observations-minus-analysis
            self.PSD_oma = [''] * (nTAI)
            # analysis-minus-background
            self.PSD_amb = np.zeros((self.PSDinit.shape[0], nTAI))

            # add model error (perturbation) in the ensemble initial conditionp.
            #A = da.add_model_error(self, A, self.PSDdata[0])
            std = 0.35
            normal = np.random.randn(da.Nens)
            for iens in np.arange(da.Nens):
                A[:, iens] = A[:, iens] + std * normal[iens] * A[:, iens]
            A[np.where(A < self.MIN_PSD)] = self.MIN_PSD

            # ==========================================
            # DEBUG
            #np.savetxt('ensemble_IC.dat',A)
            #np.savetxt('model_IC.dat',self.PSDinit)
            #np.savetxt('model_grid_IC.dat',self.Lgrid)
            # ==========================================

            # create temporary RB class instance
            rbtemp = c.copy(self)

            # time loop
            for i, Tnow, Tfut in zip(
                    np.arange(nTAI - 1) + 1, self.ticks[:-1], self.ticks[1:]):

                # make forcast and add model error
                # make forecast using all ensembles in A
                iens = 0
                for f in A.T:
                    rbtemp.ticks = st.Ticktock([Tnow.UTC[0], Tfut.UTC[0]],
                                               'UTC')
                    rbtemp.PSDinit = f.copy()
                    rbtemp.evolve()
                    #rbtemp.PSDdata = self.PSDdata[i-1]
                    A[:, iens] = rbtemp.PSD[:, 1].copy()
                    iens += 1

                # save result in ff
                Tnow = Tfut
                self.PSDf[:, i] = np.mean(A, axis=1)

                # verify that there are data points within the interval, if
                # there are data points then extract average observations
                # within the window, if not return empty observation array y
                # and Lobs.

                if len(self.PSDdata[i - 1]) > 0:
                    # get observations for time window ]Tnow-Twindow,Tnow]
                    Lobs, y = spacepy.data_assimilation.average_window(
                        self.PSDdata[i - 1], self.Lgrid)
                else:
                    y = np.array([])
                    Lobs = np.array([])

                print(Lobs)
                print(y)

                # ==========================================
                # DEBUG
                #np.savetxt('obs_location_IC.dat',Lobs)
                #np.savetxt('obs_IC.dat',y)
                #pdb.set_trace()
                # ==========================================

                # then assimilate otherwise do another forcast
                if len(y) > 0:

                    ### check for minimum PSD values
                    ### A[np.where(A<self.MIN_PSD)] = self.MIN_PSD
                    ### # insert observations directly
                    ### A = da.add_model_error_obs(self, A, Lobs, y)
                    ### # dictionary
                    ### self.PSDa[:,i] = np.mean(A, axis=1)

                    # INFLATION SCHEMES
                    if inflation == 0:
                        print(
                            'inflation around model state values at observation locations'
                        )
                        # Add model error (perturbation for the ensemble) around model
                        # state values.  This acts as an inflation scheme for EnKF
                        A = da.add_model_error(self, A, self.PSDdata[i - 1])
                    elif inflation == 1:
                        print(
                            'inflation around observation values at observation locations'
                        )
                        # Add model error (perturbation for the ensemble) around
                        # observation values. This acts as an inflation scheme for EnKF
                        A = da.add_model_error_obs(self, A, Lobs, y)
                    elif inflation == 2:
                        print('inflation around ensemble average')
                        # Inflate around ensemble average for EnKF
                        # ensemble average
                        ens_avg = np.mean(A, axis=1)

                        # inflation factor
                        inflation_factor = 1.8

                        # loop over ensemble members and inflate
                        iens = 0
                        for ens in A.T:
                            # inflate ensemble
                            A[:, iens] = inflation_factor * (ens -
                                                             ens_avg) + ens_avg
                            iens += 1
                    A[np.where(A < self.MIN_PSD)] = self.MIN_PSD

                    # prepare assimilation analysis
                    # project ensemble states to obs. grid
                    HA = da.getHA(self, Lobs, A)

                    # measurement perturbations ensemble
                    Psi = da.getperturb(self, y)

                    # ensemble of innovation vectors
                    Inn = da.getInnovation(y, Psi, HA)

                    # calculate ensemble perturbation HA' = HA-HA_mean
                    HAp = da.getHAprime(HA)

                    # calculate prior diagnostics
                    # observation minus background
                    omb = y - np.average(HA, axis=1)
                    self.PSD_omb[i] = {
                        'Lobs': Lobs,
                        'y': y,
                        'omb': omb,
                    }
                    xf_avg = np.average(A, axis=1)

                    # now call the main analysis routine
                    if len(y) == 1:
                        A = da.EnKF_oneobs(A, Psi, Inn, HAp)
                    else:
                        A = da.EnKF(A, Psi, Inn, HAp)

                    # check for minimum PSD values
                    A[np.where(A < self.MIN_PSD)] = self.MIN_PSD

                    # average A from analysis step and save in results
                    # dictionary
                    self.PSDa[:, i] = np.mean(A, axis=1)

                    # calculate posterior diagnostics
                    # observation minus analysis
                    Hanalysis = da.getHA(self, Lobs, A)
                    oma = y - np.average(Hanalysis, axis=1)
                    self.PSD_oma[i] = {
                        'Lobs': Lobs,
                        'y': y,
                        'omb': oma,
                    }
                    # analysis minus background
                    xa_avg = np.average(A, axis=1)
                    self.PSD_amb[:, i] = xa_avg - xf_avg
                    #pdb.set_trace()

                    # print assimilated result
                    Hx = np.zeros_like(y)
                    for iL, Lstar in enumerate(Lobs):
                        idx = np.where(tb.feq(self.Lgrid, Lstar))
                        Hx[iL] = self.PSDa[idx, i]
                    print(Hx)
                    print(HA)

                elif len(y) == 0:
                    print('no observations within this window')
                    self.PSDa[:, i] = self.PSDf[:, i]
                    continue  #

                # print message
                print('Tnow: ', self.ticks[i].ISO)
        # insert obsrvations for data assimilation
        elif method == 'insert':
            da = spacepy.data_assimilation.ensemble()

            A = np.ones((self.NL, 1)) * self.PSDinit[:, np.newaxis]

            self.PSDf = np.zeros((self.PSDinit.shape[0], nTAI))
            self.PSDa = np.zeros((self.PSDinit.shape[0], nTAI))
            self.PSDa[:, 0] = self.PSDinit
            self.PSDf[:, 0] = self.PSDinit

            # add model error (perturbation) in the ensemble initial condition.
            std = 0.15
            normal = np.random.randn(self.NL)
            A[:, 0] = (1.0 + std * normal) * A[:, 0]
            A[np.where(A < self.MIN_PSD)] = self.MIN_PSD

            rbtemp = c.copy(self)

            # time loop
            for i, Tnow, Tfut in zip(
                    np.arange(nTAI - 1) + 1, self.ticks[:-1], self.ticks[1:]):
                # evolve solution
                rbtemp.ticks = st.Ticktock([Tnow.UTC[0], Tfut.UTC[0]], 'UTC')
                rbtemp.PSDinit = A[:, 0]
                rbtemp.evolve()
                A[:, 0] = rbtemp.PSD[:, 1]

                # save result in ff
                Tnow = Tfut
                self.PSDf[:, i] = A[:, 0]

                # verify that there are data points within the interval, if
                # there are data points then extract average observations
                # within the window, if not return empty observation array y
                # and Lobs.
                if len(self.PSDdata[i - 1]) > 0:
                    Lobs, y = spacepy.data_assimilation.average_window(
                        self.PSDdata[i - 1], self.Lgrid)
                else:
                    y = np.array([])
                    Lobs = np.array([])

                print(Lobs)
                print(y)

                #pdb.set_trace()
                # then assimilate otherwise do another forcast
                if len(y) > 0:
                    # check for minimum PSD values
                    A[np.where(A < self.MIN_PSD)] = self.MIN_PSD

                    # create index of obs location
                    idx = np.array([], dtype=int)
                    for Lval in Lobs:
                        Lidx = np.where(fp_equality.eq(Lval, self.Lgrid))[0]
                        idx = np.append(idx, np.array([int(Lidx)]))

                    # insert observations directly
                    A[idx, 0] = y

                    #A = da.add_model_error_obs(self, A, Lobs, y)
                    # dictionary
                    self.PSDa[:, i] = A[:, 0]

                elif len(y) == 0:
                    print('no observations within this window')
                    self.PSDa[:, i] = self.PSDf[:, i]
                    continue  #

                # print message
                print('Tnow: ', self.ticks[i].ISO)
コード例 #8
0
ファイル: radbelt.py プロジェクト: seiyu32/spacepy
    def add_PSD_obs(self, time=None, PSD=None, Lstar=None, satlist=None):
        """
        add PSD observations

        Parameters
        ----------

        time : Ticktock datetime array
            array of observation times

        PSD : list of numpy arrays
            PSD observational data for each time. Each entry in the list is a
            numpy array with the observations for the corresponding time

        Lstar : list of numpy arrays
            Lstar location of each PSD observations. Each entry in the list is
            a numpy array with the location of the observations for the
            corresponding time

        satlist : list of satellite names

        Returns
        -------
        out : list of dicts
            Information of the observational data, where each entry
            contains the observations and locations of observations for each
            time specified in the time array. Each list entry is a dictionary
            with the following information:

        Ticks : Ticktock array
            time of observations
        Lstar : numpy array
            location of observations
        PSD   : numpy array
            PSD observation values
        sat   : list of strings
            satellite names
        MU    : scalar value
            Mu value for the observations
        K     : scalar value
            K value for the observations
        """

        import pdb

        assert 'ticks' in self.__dict__ , \
            "Provide tick range with 'setup_ticks'"
        Tgrid = self.ticks
        nTAI = len(Tgrid)

        # initialize PSDdata list
        self.PSDdata = [''] * (nTAI - 1)

        if (PSD == None):
            # PSD data not provided,
            # extract from database
            import spacepy.sandbox.PSDdata as PD

            for i, Tnow, Tfut in zip(np.arange(nTAI - 1), Tgrid[:-1],
                                     Tgrid[1:]):
                start_end = spacepy.time.Ticktock([Tnow.UTC[0], Tfut.UTC[0]],
                                                  'UTC')
                self.PSDdata[i] = PD.get_PSD(start_end, self.MU, self.K,
                                             satlist)

        else:
            # PSD data arrays provided

            # model grid
            Lgrid = self.Lgrid

            itime = 0
            # loop over time defined for the model integration
            for i, Tnow, Tfut in zip(np.arange(nTAI - 1), Tgrid[:-1],
                                     Tgrid[1:]):

                # empty array
                lstar = np.array([], dtype=float)
                time_idx = np.array([], dtype=int)

                # loop over observation time
                for itime in np.arange(len(time)):

                    if (Tnow <= time[itime] and time[itime] <= Tfut):
                        #print 'match!! '
                        #print itime
                        #print i
                        #print time[itime]
                        #print Tnow
                        #print Tfut

                        # concatenate to lstar
                        lstar = np.concatenate((lstar, Lstar[itime]))
                        lstar = np.unique(lstar)
                        #idx = lstar.argsort()

                        #pdb.set_trace()
                        # add time index
                        time_idx = np.append(time_idx, itime)
                #end loop

                #pdb.set_trace()
                if (time_idx.shape[0] > 0):
                    # initialize PSD array
                    psd = np.zeros_like(lstar)

                    # initialize number of obs array
                    num_obs = np.zeros_like(lstar)

                    # sort time index
                    time_idx = np.unique(time_idx)

                    # loop over time index
                    for itime in time_idx:
                        # sort observations
                        idx = Lstar[itime].argsort()
                        tmplstar = Lstar[itime][idx]
                        tmppsd = PSD[itime][idx]

                        # run through all unique grid-points and compute
                        # average observation
                        for j, iL in enumerate(lstar):
                            # identify idex of grid-point
                            idx = np.where(tb.feq(iL, tmplstar))
                            # assign observation for grid-point
                            psd[j] = psd[j] + tmppsd[idx]
                            # add for number of observations
                            num_obs[j] = num_obs[j] + 1.0

                    psd = psd / num_obs

                    # assign time for observations
                    #Ticks = time[itime]
                    Ticks = Tfut
                    # determine position of observations
                    #lstar = Lstar[itime]
                    # determine observations PSD
                    #psd = PSD[itime]
                    # provide MU
                    MU = self.MU * np.ones_like(lstar)
                    # provide K
                    K = self.K * np.ones_like(lstar)
                    # empy satellite
                    sat = ['']

                    # add to dictionary
                    self.PSDdata[i] = {'Ticks':Ticks, 'Lstar':lstar, \
                                       'PSD':psd, 'sat':sat, \
                                       'MU':MU, 'K':K}

        # adjust initial conditions to these PSD values
        mval = np.mean(self.PSDdata[0]['PSD'])
        self.PSDinit = mval * np.exp(-(self.Lgrid - 5.5)**2 / 0.8)

        return
コード例 #9
0
ファイル: data_assimilation.py プロジェクト: yugidoge/spacepy
def assimilate_JK(dd):
    """
    this version is currently not working
    main function to assimilate all data provided in init

    Parameters
    ==========
    model :

    PSDdata :

    HAp :

    Returns
    =======
    out :

    Examples
    ========
    """
    np.random.seed(123)

    Lgrid = dd['model']['Lgrid']
    dd['model']['initPSD'] = np.array(4e-7 * np.exp(-(Lgrid - 5)**2 / 2))

    nens = dd['kalman']['nens']
    NL = dd['model']['ngrid']
    f0 = np.array(dd['model']['initPSD'])
    Tgrid = dd['model']['Tgrid']

    dd['results'] = {}
    dd['results']['fa'] = {}
    dd['results']['ff'] = {}

    # --- initialize some stuff
    # broadcast all f0 into A and randomize
    A = np.ones((NL, nens)) * f0[:, np.newaxis]
    #A = 10**np.random.normal(np.log10(A),0.3)  # watch out for negative values

    dd['results']['fa']['Tgrid'] = np.zeros(len(Tgrid))
    dd['results']['ff']['Tgrid'] = np.zeros(len(Tgrid))
    dd['results']['fa']['PSD'] = np.zeros((f0.shape[0], Tgrid.shape[0]))
    dd['results']['fa']['PSD'][:, 0] = f0
    dd['results']['fa']['Tgrid'][0] = Tgrid[0]
    dd['results']['ff']['PSD'] = np.zeros((f0.shape[0], Tgrid.shape[0]))
    dd['results']['ff']['PSD'][:, 0] = f0
    dd['results']['ff']['Tgrid'][0] = Tgrid[1]

    minPSD = np.min(A) / 100

    # time loop to assimilate everything
    for Tnow, tcnt in zip(Tgrid, range(len(Tgrid) - 1)):

        # make forecast using all ensembles in A
        icnt = 0
        for f in A.T:
            fnew = forecast(dd, f, Tnow)
            A[:, icnt] = fnew[:]
            icnt += 1

        #print(np.log10(np.mean(A,axis=1)))
        Tnow = Tnow + dd['model']['Twindow']
        tcnt = tcnt + 1
        dd['results']['ff']['PSD'][:, tcnt] = np.mean(A, axis=1)
        dd['results']['ff']['Tgrid'][tcnt] = Tnow

        # get observations for time window ]Tnow-Twindow,Tnow]
        dd, L, y = getobs4window(dd, Tnow)

        # check if len(y) ==0
        if len(y) == 0:
            dd['results']['fa']['PSD'][:, tcnt] = np.mean(A, axis=1)
            dd['results']['fa']['Tgrid'][tcnt] = Tnow
            continue

        # perturb observations so that ensemble get sufficient spread
        HA = getHA(dd, L,
                   A)  # project ensemble states to obs. grid: HA(nobs,nens)
        e = np.zeros((len(y), nens))  # this is Psi in Evensen 2003
        D = np.zeros((len(y), nens))
        err_z = 0.3
        for yval, iobs in zip(y, range(len(y))):
            relstd = yval * err_z
            rnd = np.random.normal(yval * np.ones(nens), relstd)
            rnd[np.where(rnd < minPSD)] = minPSD
            e[iobs, :] = rnd - yval
            D[iobs, :] = yval + e[iobs, :]

        # add model error
        relstd = np.zeros(nens)
        for yval, iobs in zip(y, range(len(y))):
            idx = np.where(feq(L[iobs], Lgrid))
            relstd[:] = 0.5 * (yval - HA[iobs, :])
            A[idx, :] = np.random.normal(HA[iobs, :], np.abs(relstd))
            idx2 = np.where(A[idx, :] < minPSD)
            A[idx, idx2] = minPSD  # add min flux here

        # get residual or innovation in the classical sense
        Res = D - HA  # dim = (nobs,nens)

        # error of covariant matrix
        ffmean = np.mean(A, axis=1)
        V = np.zeros((NL, nens))
        for iens in range(nens):
            V[:, iens] = A[:, iens] - ffmean

        Pfxx = np.dot(V, V.T) / float(nens)
        Pfyy = np.dot(e, e.T) / float(nens)

        # get Kalman denominator (H*Pfxx*HT + Pfyy)^-1 by inversion
        PH, HPH = getHPH(dd, L, Pfxx)
        Rinv = np.linalg.inv(HPH + Pfyy)
        Adj = np.dot(np.dot(PH, Rinv), Res)

        # update all ensemble members
        A = A + Adj

        # check for negative fluxes
        idx = np.where(A < minPSD)
        A[idx] = minPSD

        #
        # average A from analysis step and save in results dictionary
        #print y, np.mean(HA,axis=1), np.mean(getHA(init,L,A),axis=1)
        dd['results']['fa']['PSD'][:, tcnt] = np.mean(A, axis=1)
        dd['results']['fa']['Tgrid'][tcnt] = Tnow

        # print message
        print('Tnow: ', rbtools.TAInum2ISOdate(Tnow))

        #print np.log10(np.mean(A,axis=1))[30]

    # copy some results into ['kalman'] key
    dd['kalman']['fa'] = dd['results']['fa']
    dd['kalman']['ff'] = dd['results']['ff']
    return dd
コード例 #10
0
ファイル: data_assimilation.py プロジェクト: yugidoge/spacepy
    def add_model_error_obs(self, model, A, Lobs, y):
        """
        this routine will add a standard error to the ensemble states

        Parameters
        ==========
        model :

        A :

        Lobs :

        y :

        Returns
        =======
        out :

        Examples
        ========
        """
        # debugging command
        #pdb.set_trace()

        Lgrid = model.Lgrid
        dL = Lgrid[1] - Lgrid[0]
        nens = A.shape[1]
        #nens = int(self.Nens)
        #print y, L

        # in units of L (should not be smaller than dL)
        radius = dL * 0.9
        radius = 1

        # create L vector where random noise will be added
        #erridx = np.array([False]*len(Lgrid))
        erridx = np.array([], dtype=int)
        for Lval in Lobs:
            Lidx = np.where(feq(Lval, Lgrid))[0]
            erridx = np.append(erridx, np.array([int(Lidx)]))

        # compute residual
        # calculate the rel. average uncertainty based on (obs -
        # ensemble)/obs
        rst = np.zeros((len(Lobs), nens))
        for iens, member in enumerate(A.T):
            rst[:, iens] = np.abs(y - A[erridx, iens])

        # perturb with normal distribution
        # around observation values
        stdev = np.std(rst, axis=1)

        # debug output files
        #np.savetxt('Lobs.dat',Lobs)
        #np.savetxt('y.dat',y)
        #np.savetxt('Lgrid.dat',Lgrid)
        #np.savetxt('A.dat',A)
        #np.savetxt('erridx.dat',erridx)
        #np.savetxt('rst.dat',rst)
        #np.savetxt('stdev.dat',stdev)

        pert_obs = (1.0 + 0.30 * np.random.normal(size=y.shape[0])) * y

        for i, idx in enumerate(erridx):
            if (stdev[i] > 0.0):
                A[idx, :] = np.random.normal(y[i], stdev[i], nens)
                #A[idx,:] = np.random.normal( pert_obs[i], stdev[i], nens )
        print('done')
        return A
コード例 #11
0
ファイル: data_assimilation.py プロジェクト: yugidoge/spacepy
    def add_model_error(self, model, A, PSDdata):
        """
        this routine will add a standard error to the ensemble states

        Parameters
        ==========
        model :

        A :

        PSDdata :

        Returns
        =======
        out :

        Examples
        ========
        """
        Lgrid = model.Lgrid
        dL = Lgrid[1] - Lgrid[0]
        nens = int(self.Nens)
        #print y, L
        # in units of L (should not be smaller than dL)
        radius = dL * 0.9
        radius = 1

        # create L vector where random noise will be added
        #       erridx = np.array([False]*len(Lgrid))
        #       for Lval in PSDdata['Lstar']:
        #           L1 = np.max((Lval-radius, Lgrid[0]))
        #           L2 = np.min((Lval+radius, Lgrid[-1]))
        #           Lidx = np.where( (Lgrid > L1) & (L2 > Lgrid) )[0]
        #           erridx[Lidx] = True

        # calculate the rel. average uncertainty based on (obs -
        # modelfcst)/obs
        #       fcst = np.mean(A, axis=1)
        #       rst = np.zeros( len(PSDdata['Lstar']) )
        #       for Lval, yval, i in zip(PSDdata['Lstar'], PSDdata['PSD'], range(len(PSDdata['Lstar'])) ):
        #           idx = np.argmin(np.abs(Lval - Lgrid))
        #           rst[i] = np.abs( yval - fcst[idx] )

        #       stdev = np.mean(rst)
        #       for iens in range(nens):
        #           f = A[erridx, iens]
        #           A[erridx, iens] = np.random.normal( f, scale = stdev )

        ##TODO: Check why above code was commented... this block was unindented for consistency, but should it be here?
        # create L vector where random noise will be added
        erridx = np.array([], dtype=int)
        for Lval in PSDdata['Lstar']:
            Lidx = np.where(feq(Lval, Lgrid))[0]
            erridx = np.append(erridx, np.array([int(Lidx)]))

        # calculate the rel. average uncertainty based on (obs -
        # modelfcst)/obs
        fcst = np.mean(A, axis=1)
        rst = np.zeros((len(PSDdata['Lstar']), nens))
        for iens, member in enumerate(A.T):
            rst[:, iens] = np.abs(PSDdata['PSD'] - member[erridx])

        stdev = 2.0 * np.std(rst, axis=1)

        # debug output files
        #np.savetxt('Lobs_init.dat',PSDdata['Lstar'])
        #np.savetxt('y_init.dat',PSDdata['PSD'])
        #np.savetxt('Lgrid_init.dat',Lgrid)
        #np.savetxt('A_init.dat',A)
        #np.savetxt('erridx_init.dat',erridx)
        #np.savetxt('rst_init.dat',rst)
        #np.savetxt('stdev_init.dat',stdev)

        for i, idx in enumerate(erridx):
            if (stdev[i] > 0.0):
                A[idx, :] = np.random.normal(fcst[i], stdev[i], nens)
        return A