Exemple #1
0
    def test_run(self):

        # input
        MatchUpData = MatchUp()
        MatchUpData.values = array(
            [1.0, 4.0, 5.0, 6.0, 4.0, 7.0, 3.0, 3.0, 2.0, 1.0])
        MatchUpData.ks = array([1.0, 4.0, 3.0, 3.0, 3.0])
        MatchUpData.idx = {
            'N_var': [2, 2, 3, 3],
            'Nm': [2, 3],
            'cNm': [0, 2, 5],
            'idx': [0, 2, 4, 7, 10],
            'another': [1, 2, 3, 4]
        }
        MatchUpData.a = 'test'
        MatchUpData.sensor_model = 'test'
        MatchUpData.sensor_model_constant = 'test'
        MatchUpData.adjustment_model = 'test'
        MatchUpData._original_idx = 'test'

        n_samples_mu = 5

        # expected output
        SimulateMatchUpOp = SimulateMatchUp()
        idx_sim = SimulateMatchUpOp.return_simulation_idx(
            MatchUpData.idx, n_samples_mu)
        values_sim = SimulateMatchUpOp.return_simulation_x(
            MatchUpData.values, MatchUpData.idx['idx'], idx_sim['idx'])
        ks_sim = SimulateMatchUpOp.return_simulation_x(MatchUpData.ks,
                                                       MatchUpData.idx['cNm'],
                                                       idx_sim['cNm'])
        a_sim = MatchUpData.a
        sensor_model_sim = MatchUpData.sensor_model
        sensor_model_constant_sim = MatchUpData.sensor_model_constant
        adjustment_model_sim = MatchUpData.adjustment_model
        _original_idx_sim = MatchUpData._original_idx

        # run test
        MatchUpSimulation = SimulateMatchUpOp.run(MatchUpData, n_samples_mu)

        # check output

        for x_sim_test_i, x_sim_i in zip(values_sim, MatchUpSimulation.values):
            self.assertEquals(x_sim_test_i, x_sim_i)

        for x_sim_test_i, x_sim_i in zip(ks_sim, MatchUpSimulation.ks):
            self.assertEquals(x_sim_test_i, x_sim_i)

        for key in idx_sim.keys():
            self.assertSequenceEqual(idx_sim[key], MatchUpSimulation.idx[key],
                                     key)

        self.assertEqual(MatchUpSimulation.a, a_sim)
        self.assertEqual(MatchUpSimulation.sensor_model, sensor_model_sim)
        self.assertEqual(MatchUpSimulation.sensor_model_constant,
                         sensor_model_constant_sim)
        self.assertEqual(MatchUpSimulation.adjustment_model,
                         adjustment_model_sim)
        self.assertEqual(MatchUpSimulation._original_idx, _original_idx_sim)
Exemple #2
0
    def run(self, MatchUpData, sf=1.0, n_sample_max=100000, show=False):
        """
        Return instance of ``eopy.matchup.matchupIO.MatchUp`` for which the only data correlations arise from systematic
        effects

        :type MatchUpData: *eopy.matchup.matchupIO.MatchUp*
        :param MatchUpData: Input match-up data for sampling

        :type sf: int
        :param sf: Sampling factor

        :return:
            :MatchUpSample: *eopy.matchup.matchupIO.MatchUp*

            Sampled harmonisation data
        """

        # initialise parameters
        mcxyz = MatchUpData.idx[
            'idx']  # cumulative total of variables data block
        mc = MatchUpData.idx['cNm']  # cumulative total of match-ups by series

        if mc[-1] * sf > n_sample_max:
            sf = float(n_sample_max) / float(mc[-1])
        print "Minimum Sampling Factor:", sf

        ################################################################################################################
        # 1. Determine Match Ups to Include in Sample
        ################################################################################################################

        # Choose a sample of match ups such that data is left with independent errors.
        #
        # N.B. - This is not possible for a fully systematic effect, but data with error correlation structures defined
        #        by w matrices can be sampled to achieve this

        sampling_idxs = {
        }  # initialise dictionary of sampling indices per match up series

        n_mus = set(MatchUpData.idx['n_mu'])

        # Find sampling indices by match-up series
        for n_mu in n_mus:

            # total number of match up in series (should be the same for each variable so take the first one)
            mu_total = [
                MatchUpData.idx['N_var'][i]
                for i, n_mu_i in enumerate(MatchUpData.idx['n_mu'])
                if n_mu_i == n_mu
            ][0]
            mu_samples = int(
                mu_total * sf
            )  # required number sample match ups (determined by scaling factor)

            # Find w_matrices indices of any w matrices used to describe error correlation structure of match up data
            mu_block_idxs = [
                i for i, n_mu_i in enumerate(MatchUpData.idx['n_mu'])
                if n_mu_i == n_mu
            ]
            w_indices_mu = [
                MatchUpData.unc[mu_block_idx].w_i
                for mu_block_idx in mu_block_idxs
                if (MatchUpData.unc[mu_block_idx].typeID == 3) or (
                    MatchUpData.unc[mu_block_idx].typeID == 4)
            ]

            # a. If w matrices present select sample such that errors independent of each other for all w matrices -----
            if w_indices_mu != []:

                idxs = get_sample_idxs(asarray(w_indices_mu,
                                               dtype=int32), mu_samples,
                                       [w for w in MatchUpData.w_matrices])

                # If more match ups sampled than maximum randomly reduce to required amount
                if len(idxs) > mu_samples:
                    idxs = sorted(sample(idxs, mu_samples))
            # ----------------------------------------------------------------------------------------------------------

            # b. If no w matrices present free to sample randomly ------------------------------------------------------
            else:
                idxs = sorted(sample(arange(mu_total), mu_samples))
            # ----------------------------------------------------------------------------------------------------------

            sampling_idxs[
                n_mu] = idxs  # add match up sample indices to dictionary

        ################################################################################################################
        # 2. Sample Data
        ################################################################################################################

        # a. Initialise sampled harmonisation data product -------------------------------------------------------------
        MatchUpSample = MatchUp()
        MatchUpSample.a = MatchUpData.a[:]
        MatchUpSample.sensor_model = MatchUpData.sensor_model
        MatchUpSample.sensor_model_constant = MatchUpData.sensor_model_constant
        MatchUpSample.adjustment_model = MatchUpData.adjustment_model
        # --------------------------------------------------------------------------------------------------------------

        # b. Update idx attribute of MatchUpSample to describe structure of sampled data --------------------------------

        # Start with copy of full dataset idx dictionary attribute
        # N.B. - deepcopy because of python memory issues with lists nested in dicts
        MatchUpSample.idx = deepcopy(MatchUpData.idx)

        # Formulate required replacement idx entries (several can remain the same as the full dataset)
        idxs = [0]
        total = 0
        for i, n_mu in enumerate(MatchUpData.idx['n_mu']):
            block_samples = len(sampling_idxs[n_mu])
            MatchUpSample.idx['N_var'][i] = block_samples
            total += block_samples
            idxs.append(int(total))
        MatchUpSample.idx['idx'] = idxs

        cNm = [0]
        total = 0
        for i, n_mu in enumerate(n_mus):
            n_mu_sample = len(sampling_idxs[n_mu])
            MatchUpSample.idx['Nm'][i] = n_mu_sample
            total += n_mu_sample
            cNm.append(total)
        MatchUpSample.idx['cNm'] = cNm

        if show:
            print "Initial Size: ", MatchUpData.idx['Nm']
            print "Sample Size: ", MatchUpSample.idx['Nm']
        # --------------------------------------------------------------------------------------------------------------

        # c. Sample variables and respective uncertainty ---------------------------------------------------------------

        # Initialise data arrays
        MatchUpSample.values = zeros(MatchUpSample.idx['idx'][-1])
        MatchUpSample.unc = [0] * len(MatchUpSample.idx['n_cov'])

        #  Sample data by data block
        for i, block_unc in enumerate(MatchUpData.unc):
            istart = mcxyz[i]  # start of full dataset values data block
            iend = mcxyz[i + 1]  # end of full dataset values data block
            istart_s = MatchUpSample.idx['idx'][
                i]  # start of sampled values data block
            iend_s = MatchUpSample.idx['idx'][
                i + 1]  # end of sampled values data block
            s_idx = sampling_idxs[MatchUpData.idx['n_mu'][
                i]]  # indices of match up to sample within values data block

            # i. Sample values
            MatchUpSample.values[istart_s:iend_s] = MatchUpData.values[
                istart:iend][s_idx]

            # ii. Sample values uncertainty data

            if (block_unc.typeID == 3) or (block_unc.typeID == 4):
                # If block error correlation form was defined by a w matrix
                # - now simplified to random error correlation by sampling choice

                # Initialise uncertainty data array
                MatchUpSample.unc[i] = Uncertainty(1, zeros(len(s_idx)))

                # Retrieve required w and u matrix and hence determine new random uncertainties
                w = MatchUpData.w_matrices[block_unc.w_i]
                u = MatchUpData.u_matrices[block_unc.u_i]
                for j, s_i in enumerate(s_idx):
                    col_start = w.indices[w.indptr[s_i]]
                    col_end = w.indices[w.indptr[s_i + 1] - 1] + 1
                    MatchUpSample.unc[i].uR[j] = npsum(
                        u[col_start:col_end]**
                        2)**0.5 / (col_end - col_start)**0.5
            else:
                # If block error correlation form random or random and systematic simplify to random and sample
                MatchUpSample.unc[i] = Uncertainty(
                    1, deepcopy(block_unc.uR[s_idx]))
        # --------------------------------------------------------------------------------------------------------------

        # d. sample k --------------------------------------------------------------------------------------------------

        # Initialise k data arrays
        MatchUpSample.ks = zeros(MatchUpSample.idx['cNm'][-1])
        MatchUpSample.unck = [0] * len(MatchUpSample.idx['Nm'])

        # Sample k and respective uncertainty data by match-up series
        for i, mu_unck in enumerate(MatchUpData.unck):
            istart = mc[i]  # start of full dataset k data block
            iend = mc[i + 1]  # end of full dataset k data block
            istart_s = MatchUpSample.idx['cNm'][
                i]  # start of sampled dataset k data block
            iend_s = MatchUpSample.idx['cNm'][
                i + 1]  # end of sampled dataset k data block
            s_idx = sampling_idxs[
                i + 1]  # indices of match up to sample within k data block

            # i. Sample data
            MatchUpSample.ks[istart_s:iend_s] = MatchUpData.ks[istart:iend][
                s_idx]

            # ii. Sample uncertainties
            MatchUpSample.unck[i] = Uncertainty(1, deepcopy(mu_unck.uR[s_idx]))
        # --------------------------------------------------------------------------------------------------------------

        # d. sample per match-up data ----------------------------------------------------------------------------------

        # Initialise data arrays
        MatchUpSample.ks = zeros(MatchUpSample.idx['cNm'][-1])
        MatchUpSample.unck = [0] * len(MatchUpSample.idx['Nm'])
        MatchUpSample.time1 = zeros(MatchUpSample.idx['cNm'][-1],
                                    dtype=datetime)
        MatchUpSample.time2 = zeros(MatchUpSample.idx['cNm'][-1],
                                    dtype=datetime)

        if MatchUpData.across_track_index1 is not None:
            MatchUpSample.across_track_index1 = zeros(
                MatchUpSample.idx['cNm'][-1])

        if MatchUpData.across_track_index2 is not None:
            MatchUpSample.across_track_index2 = zeros(
                MatchUpSample.idx['cNm'][-1])

        if MatchUpData.along_track_index1 is not None:
            MatchUpSample.along_track_index1 = zeros(
                MatchUpSample.idx['cNm'][-1])

        if MatchUpData.along_track_index2 is not None:
            MatchUpSample.along_track_index2 = zeros(
                MatchUpSample.idx['cNm'][-1])

        # Sample by match-up series
        for i, mu_unck in enumerate(MatchUpData.unck):
            istart = mc[i]  # start of full dataset k data block
            iend = mc[i + 1]  # end of full dataset k data block
            istart_s = MatchUpSample.idx['cNm'][
                i]  # start of sampled dataset k data block
            iend_s = MatchUpSample.idx['cNm'][
                i + 1]  # end of sampled dataset k data block
            s_idx = sampling_idxs[
                i + 1]  # indices of match up to sample within k data block

            # i. Sample k data
            MatchUpSample.ks[istart_s:iend_s] = MatchUpData.ks[istart:iend][
                s_idx]

            # ii. Sample k uncertainties
            MatchUpSample.unck[i] = Uncertainty(1, deepcopy(mu_unck.uR[s_idx]))

            # iii. Sample indices
            if MatchUpData.across_track_index1 is not None:
                MatchUpSample.across_track_index1[
                    istart_s:iend_s] = MatchUpData.across_track_index1[
                        istart:iend][s_idx]

            if MatchUpData.across_track_index2 is not None:
                MatchUpSample.across_track_index2[
                    istart_s:iend_s] = MatchUpData.across_track_index2[
                        istart:iend][s_idx]

            if MatchUpData.along_track_index1 is not None:
                MatchUpSample.along_track_index1[
                    istart_s:iend_s] = MatchUpData.along_track_index1[
                        istart:iend][s_idx]

            if MatchUpData.along_track_index2 is not None:
                MatchUpSample.along_track_index2[
                    istart_s:iend_s] = MatchUpData.along_track_index2[
                        istart:iend][s_idx]

            # iv. Sample time
            MatchUpSample.time1[istart_s:iend_s] = MatchUpData.time1[
                istart:iend][s_idx]
            MatchUpSample.time2[istart_s:iend_s] = MatchUpData.time2[
                istart:iend][s_idx]

        # --------------------------------------------------------------------------------------------------------------

        # d. sample additional variables -------------------------------------------------------------------------------

        # todo - write sampling of additional variables

        # --------------------------------------------------------------------------------------------------------------

        return MatchUpSample
Exemple #3
0
def return_MatchUpTest():
    """
    Return a MatchUp dataset object for testing

    :return:
        :MatchUpTest: *eopy.matchup.matchupIO.MatchUp*

        Test match-up dataset
    """

    ####################################################################################################################
    # 1. Define test sensor function
    ####################################################################################################################

    def test_reference_function(X, a, sensor_c, sensor_xt_i, sensor_at_i,
                                sensor_t):
        return X[:, 0], None

    def test_sensor_function(X, a, sensor_c, sensor_xt_i, sensor_at_i,
                             sensor_t):
        """
        Arbitary sensor function

        :type a: numpy.ndarray
        :param a: calibration parameters

        :type X: list
        :param X: List of arrays of sensor observed parameters

        :return:
            :measurand: *numpy.ndarray*

            Computed sensor measurand
        """

        # Extract observed parameters
        X1 = X[:, 0]
        X2 = X[:, 1]
        X3 = X[:, 2]
        M = len(X1)

        # Evaluate measurand
        parameter_derivatives = vstack((ones(M), X1 * X2 / X3, X1**2)).T
        parameter = [a[0], a[1], a[2]]
        measurand = dot(parameter_derivatives, parameter)

        # Evaluate derivatives
        # In the following order:
        # > d(measurand)/dX1
        # > d(measurand)/dX2
        # > d(measurand)/dX3
        # > d(measurand)/da0
        # > d(measurand)/da1
        # > d(measurand)/da2

        derivatives = column_stack(
            (parameter[1] * X1 / X2 + 2 * parameter[2] * X1,
             parameter[1] * X1 / X3, -parameter[1] * X2 * X1 / X3**2,
             parameter_derivatives))

        return measurand, derivatives

    def test_adjustment_model(measurand):
        """
        Arbitary sensor adjustment function to sample sensor 1

        :type measurand: numpy.ndarray
        :param measurand: measurand data

        :return:
            :adjusted_measurand: *numpy.ndarray*

            Adjusted sensor measurand

            :adjusted_measurand_derivatives: *numpy.ndarray*

            Adjusted sensor measurand derivatives
        """

        adjusted_measurand = 2 * measurand
        adjusted_measurand_derivatives = ones(len(adjusted_measurand))
        return adjusted_measurand, adjusted_measurand_derivatives

    ####################################################################################################################
    # 2. Initialise test data
    ####################################################################################################################

    values = array([
        470.5,
        720.56,
        450.9,
        295.6,
        315.23,
        70.5,
        70.6,
        70.3,
        70.7,
        70.5,
        71.5,
        71.6,
        71.3,
        71.7,
        80.5,
        80.6,
        80.3,
        80.7,
        150.5,
        151.1,
        149.8,
        150.2,
        151.4,
        140.5,
        141.1,
        139.8,
        140.2,
        160.5,
        161.1,
        169.8,
        160.2,
        30.2,
        20.4,
        28.2,
        50.7,
        45.6,
        29.2,
        37.4,
        28.2,
        50.7,
        28.2,
        32.4,
        22.2,
        53.7,
    ])
    unc = [
        Uncertainty("r", array([1.6, 1.5, 1.5, 1.3, 1.5])),
        Uncertainty("r", array([3.1, 3.2, 3.2, 3.1, 3.0])),
        Uncertainty("r", array([3.3, 3.4, 3.1, 3.2])),
        Uncertainty("r", array([2.1, 2.2, 2.2, 2.1])),
        Uncertainty("r", array([5.0, 4.7, 5.1, 5.2, 5.3])),
        Uncertainty("r", array([4.2, 4.3, 4.4, 4.3])),
        Uncertainty("r", array([4.0, 3.7, 4.4, 4.7])),
        Uncertainty("r", array([2.2, 1.7, 2.0, 4.3, 2.6])),
        Uncertainty("r", array([2.3, 1.2, 2.3, 4.4])),
        Uncertainty("r", array([3.2, 2.7, 3.0, 5.3]))
    ]
    ks = array([1.2, 1.7, 1.3, 1.4, 1.3, 3.2, 3.7, 3.3, 3.4])
    unck = [
        Uncertainty("r", array([0.25, 0.25, 0.25, 0.25, 0.25])),
        Uncertainty("r", array([0.2644, 0.2644, 0.2644, 0.2644]))
    ]
    idx = {
        "Nm": [5, 4],
        "cNm": [0, 5, 9],
        "Im": [[0, 1], [1, 2]],
        "sensors": [-1, 1, 2],
        "sensor_m": [1, 3, 3],
        "n_sensor": [0, 1, 1, 2, 1, 1, 2, 1, 1, 2],
        "n_mu": [1, 1, 2, 2, 1, 2, 2, 1, 2, 2],
        "n_cov": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
        "N_var": [5, 5, 4, 4, 5, 4, 4, 5, 4, 4],
        "idx": [0, 5, 10, 14, 18, 23, 27, 31, 36, 40, 44],
        "parameter_sensor": [1, 1, 1, 2, 2, 2],
        "sensor_model_constant_sensor": [],
        "sensor_model_contant": None
    }
    a = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])

    ####################################################################################################################
    # 2. Initialise MatchUp object
    ####################################################################################################################

    MatchUpTest = MatchUp()
    MatchUpTest.values = values
    MatchUpTest.unc = unc
    MatchUpTest.ks = ks
    MatchUpTest.unck = unck
    MatchUpTest.idx = idx
    MatchUpTest.a = a
    MatchUpTest.sensor_model = [
        test_reference_function, test_sensor_function, test_sensor_function
    ]
    MatchUpTest.adjustment_model = [
        test_adjustment_model, test_adjustment_model, test_adjustment_model
    ]

    return MatchUpTest
Exemple #4
0
def return_MatchUpTest_r__():
    """
    Return a MatchUp dataset object for testing

    :return:
        :MatchUpTest: *eopy.matchup.matchupIO.MatchUp*

        Test match-up dataset
    """

    ####################################################################################################################
    # 1. Initialise test data
    ####################################################################################################################

    values = array([
        470.5,
        720.56,
        450.9,
        295.6,
        315.23,
        70.5,
        70.6,
        70.3,
        70.7,
        70.5,
        71.5,
        71.6,
        71.3,
        71.7,
        80.5,
        80.6,
        80.3,
        80.7,
        150.5,
        151.1,
        149.8,
        150.2,
        151.4,
        140.5,
        141.1,
        139.8,
        140.2,
        160.5,
        161.1,
        169.8,
        160.2,
        30.2,
        20.4,
        28.2,
        50.7,
        45.6,
        29.2,
        37.4,
        28.2,
        50.7,
        28.2,
        32.4,
        22.2,
        53.7,
    ])
    unc = [
        Uncertainty(1, array([1.6, 1.5, 1.5, 1.3, 1.5])),
        Uncertainty(1, array([3.1, 3.2, 3.2, 3.1, 3.0])),
        Uncertainty(1, array([3.3, 3.4, 3.1, 3.2])),
        Uncertainty(1, array([2.1, 2.2, 2.2, 2.1])),
        Uncertainty(1, array([5.0, 4.7, 5.1, 5.2, 5.3])),
        Uncertainty(1, array([4.2, 4.3, 4.4, 4.3])),
        Uncertainty(1, array([4.0, 3.7, 4.4, 4.7])),
        Uncertainty(1, array([2.2, 1.7, 2.0, 4.3, 2.6])),
        Uncertainty(1, array([2.3, 1.2, 2.3, 4.4])),
        Uncertainty(1, array([3.2, 2.7, 3.0, 5.3]))
    ]
    ks = array([1.2, 1.7, 1.3, 1.4, 1.3, 3.2, 3.7, 3.3, 3.4])
    unck = [
        Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25])),
        Uncertainty(1, array([0.2644, 0.2644, 0.2644, 0.2644]))
    ]
    idx = {
        "Nm": [5, 4],
        "cNm": [0, 5, 9],
        "Im": [[0, 1], [1, 2]],
        "sensors": [-1, 1, 2],
        "sensor_ms": [1, 3, 3],
        "n_sensor": [0, 1, 1, 2, 1, 1, 2, 1, 1, 2],
        "n_mu": [1, 1, 2, 2, 1, 2, 2, 1, 2, 2],
        "n_cov": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
        "N_var": [5, 5, 4, 4, 5, 4, 4, 5, 4, 4],
        "idx": [0, 5, 10, 14, 18, 23, 27, 31, 36, 40, 44],
        "Ia": [1, 1, 1, 2, 2, 2]
    }
    a = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])

    ####################################################################################################################
    # 3. Initialise MatchUp object
    ####################################################################################################################

    MatchUpTest = MatchUp()
    MatchUpTest.values = values
    MatchUpTest.unc = unc
    MatchUpTest.ks = ks
    MatchUpTest.unck = unck
    MatchUpTest.idx = idx
    MatchUpTest.a = a

    return MatchUpTest
Exemple #5
0
def return_MatchUpTest_rsw():
    """
    Return a MatchUp dataset object for testing

    :return:
        :MatchUpTest: *eopy.matchup.matchupIO.MatchUp*

        Test match-up dataset
    """

    ####################################################################################################################
    # 1. Initialise test data
    ####################################################################################################################

    w2_matchup1 = array([[0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0],
                         [0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0],
                         [0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0],
                         [0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0],
                         [0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5]])
    w1_matchup2 = array([[0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0],
                         [0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0],
                         [0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0],
                         [0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5]])
    w2_matchup2 = array([[0.5, 0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.5, 0.5, 0.0],
                         [0.0, 0.0, 0.5, 0.5, 0.0], [0.0, 0.0, 0.0, 0.5, 0.5]])
    u2_matchup1 = array([1.0, 0.9, 0.78, 1.0, 0.9, 0.68, 1.0])
    u1_matchup2 = array([1.0, 0.9, 0.9, 0.5, 0.9, 0.58, 1.1])
    u2_matchup2 = array([1.0, 0.9, 0.9, 0.5, 0.8])

    values = array([
        470.5, 720.56, 450.9, 295.6, 315.23, 70.5, 70.6, 70.3, 70.7, 70.5,
        71.5, 71.6, 71.3, 71.7, 80.5, 80.6, 80.3, 80.7, 150.5, 151.1, 149.8,
        150.2, 151.4, 140.5, 141.1, 139.8, 140.2, 160.5, 161.1, 169.8, 160.2,
        20.2, 21.0, 19.7, 19.7, 20.7, 13.1, 13.2, 11.8, 15.2, 12.6, 13.7, 13.7,
        11.3
    ])
    unc = [
        Uncertainty(1, array([1.6, 1.5, 1.5, 1.3, 1.5])),
        Uncertainty(1, array([3.1, 3.2, 3.2, 3.1, 3.0])),
        Uncertainty(1, array([3.3, 3.4, 3.1, 3.2])),
        Uncertainty(1, array([2.1, 2.2, 2.2, 2.1])),
        Uncertainty(1, array([5.0, 4.7, 5.1, 5.2, 5.3])),
        Uncertainty(1, array([4.2, 4.3, 4.4, 4.3])),
        Uncertainty(1, array([4.0, 3.7, 4.4, 4.7])),
        Uncertainty(3, (0, 0)),
        Uncertainty(3, (1, 1)),
        Uncertainty(3, (2, 2))
    ]
    ks = array([1.2, 1.7, 1.3, 1.4, 1.3, 3.2, 3.7, 3.3, 3.4])
    unck = [
        Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25])),
        Uncertainty(1, array([0.2644, 0.2644, 0.2644, 0.2644]))
    ]
    idx = {
        "Nm": [5, 4],
        "cNm": [0, 5, 9],
        "Im": [[0, 1], [1, 2]],
        "sensors": [-1, 1, 2],
        "sensor_ms": [1, 3, 3],
        "n_sensor": [0, 1, 1, 2, 1, 1, 2, 1, 1, 2],
        "n_mu": [1, 1, 2, 2, 1, 2, 2, 1, 2, 2],
        "n_cov": [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
        "N_var": [5, 5, 4, 4, 5, 4, 4, 5, 4, 4],
        "idx": [0, 5, 10, 14, 18, 23, 27, 31, 36, 40, 44],
        "Ia": [1, 1, 1, 2, 2, 2]
    }
    a = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])
    w_matrices = [
        csr_matrix(w2_matchup1),
        csr_matrix(w1_matchup2),
        csr_matrix(w2_matchup2)
    ]
    u_matrices = [u2_matchup1, u1_matchup2, u2_matchup2]

    ####################################################################################################################
    # 3. Initialise MatchUp object
    ####################################################################################################################

    MatchUpTest = MatchUp()
    MatchUpTest.values = values
    MatchUpTest.unc = unc
    MatchUpTest.ks = ks
    MatchUpTest.unck = unck
    MatchUpTest.idx = idx
    MatchUpTest.a = a
    MatchUpTest.w_matrices = w_matrices
    MatchUpTest.u_matrices = u_matrices

    return MatchUpTest
Exemple #6
0
    def calc_f(self, xyza, HData):
        """
        Return value for f, array containing the residual between the the current and original estimates of radiances,
        variables, and ks

        :type xyza: numpy.ndarray
        :param xyza: array containing the current estimates of variables and parameters

        :return:
            :f: *numpy.ndarray*

            array containing the differences between original values of *R*s, *X*s, and *K*s with the current estimates,
            structured as:
                    *f = [ dR | dX | dK ]*
        """

        # initialise parameters
        mc = HData.idx['cNm']  # cumulative
        N_mu = HData.idx['cNm'][-1]  # total match-ups (= number of ks)
        N_var = HData.idx['idx'][-1]  # total variables

        # initialise f (length number of variables + number of ks)
        f = zeros(N_var + N_mu, dtype=float32)

        ################################################################################################################
        # 1. Calculate f for values
        ################################################################################################################

        f[0:N_var] = xyza[0:N_var] - HData.values[0:N_var]

        ################################################################################################################
        # 2. Calculate f for ks
        ################################################################################################################

        # Estimate of k, k_est, determined as,
        #
        # k_est =  B(R_2) - B(R_1),
        #
        # where:
        # - R_1/2 - Radiances from sensor 1 and sensor 2 respectively
        # - B - adjustment model

        MatchUpEstimate_NormInd = MatchUp()
        MatchUpEstimate_NormInd.idx = self.HData.idx
        MatchUpEstimate_NormInd._original_idx = self.HData._original_idx
        MatchUpEstimate_NormInd.a = deepcopy(xyza[N_var:])
        MatchUpEstimate_NormInd.unc = self.HData.unc
        MatchUpEstimate_NormInd.unck = self.HData.unck
        MatchUpEstimate_NormInd.w_matrices = self.HData.w_matrices
        MatchUpEstimate_NormInd.u_matrices = self.HData.u_matrices
        MatchUpEstimate_NormInd.sensor_model = self.HData.sensor_model
        MatchUpEstimate_NormInd.sensor_model_constant = self.HData.sensor_model_constant
        MatchUpEstimate_NormInd.adjustment_model = self.HData.adjustment_model
        MatchUpEstimate_NormInd.values = deepcopy(xyza[:N_var])
        MatchUpEstimate_NormInd.ks = zeros(N_mu, dtype=float32)

        MatchUpEstimate = self.Transform2NormIndOp.reverse(
            MatchUpEstimate_NormInd)
        MatchUpEstimate.ks = evaluate_K(MatchUpEstimate)

        # Normalise by uncertainty
        for i in xrange(len(MatchUpEstimate.idx['Im'])):
            istart = MatchUpEstimate.idx['cNm'][i]
            iend = MatchUpEstimate.idx['cNm'][i + 1]

            MatchUpEstimate.ks[istart:iend] /= MatchUpEstimate.unck[i].uR

        f[N_var:] = MatchUpEstimate.ks - HData.ks

        return f
Exemple #7
0
    def run(self, MatchUpData, sf=1, samples=None, show=False):
        """
        Return a randomly sampled instance of ``eopy.matchup.matchupIO.MatchUp``

        :type MatchUpData: *eopy.matchup.matchupIO.MatchUp*
        :param MatchUpData: Input match-up data for sampling

        :type sf: int
        :param sf: Sampling factor

        :type samples: int
        :param samples: Number of samples to include per match-up series

        :return:
            :MatchUpSample: *eopy.matchup.matchupIO.MatchUp*

            Sampled harmonisation data
        """

        # initialise parameters
        mcxyz = MatchUpData.idx[
            'idx']  # cumulative total of variables data block
        mc = MatchUpData.idx['cNm']  # cumulative total of match-ups by series

        ################################################################################################################
        # 1. Determine Match Ups to Include in Sample
        ################################################################################################################

        # Choose a sample of match ups such that data is left with independent errors.
        #
        # N.B. - This is not possible for a fully systematic effect, but data with error correlation structures defined
        #        by w matrices can be sampled to achieve this

        sampling_idxs = {
        }  # initialise dictionary of sampling indices per match up series

        n_mus = set(MatchUpData.idx['n_mu'])

        # Find sampling indices by match-up series
        for n_mu in n_mus:

            # total number of match up in series (should be the same for each variable so take the first one)
            mu_total = [
                MatchUpData.idx['N_var'][i]
                for i, n_mu_i in enumerate(MatchUpData.idx['n_mu'])
                if n_mu_i == n_mu
            ][0]
            mu_samples = mu_total / sf  # required number sample match ups (determined by scaling factor)

            idxs = sorted(sample(arange(mu_total), mu_samples))
            sampling_idxs[
                n_mu] = idxs  # add match up sample indices to dictionary

        ################################################################################################################
        # 2. Sample Data
        ################################################################################################################

        # a. Initialise sampled harmonisation data product -------------------------------------------------------------
        MatchUpSample = MatchUp()
        MatchUpSample.a = MatchUpData.a[:]
        MatchUpSample.sensor_model = MatchUpData.sensor_model
        MatchUpSample.sensor_model_constant = MatchUpData.sensor_model_constant
        MatchUpSample.adjustment_model = MatchUpData.adjustment_model
        # --------------------------------------------------------------------------------------------------------------

        # b. Update idx attribute of MatchUpSample to describe structure of sampled data -------------------------------

        # Start with copy of full dataset idx dictionary attribute
        # N.B. - deepcopy because of python memory issues with lists nested in dicts
        MatchUpSample.idx = deepcopy(MatchUpData.idx)

        # Formulate required replacement idx entries (several can remain the same as the full dataset)
        idxs = [0]
        total = 0
        for i, n_mu in enumerate(MatchUpData.idx['n_mu']):
            block_samples = len(sampling_idxs[n_mu])
            MatchUpSample.idx['N_var'][i] = block_samples
            total += block_samples
            idxs.append(int(total))
        MatchUpSample.idx['idx'] = idxs

        cNm = [0]
        total = 0
        for i, n_mu in enumerate(n_mus):
            n_mu_sample = len(sampling_idxs[n_mu])
            MatchUpSample.idx['Nm'][i] = n_mu_sample
            total += n_mu_sample
            cNm.append(total)
        MatchUpSample.idx['cNm'] = cNm

        if show:
            print "Sample Size: ", MatchUpSample.idx['Nm']
        # --------------------------------------------------------------------------------------------------------------

        # c. Sample variables and respective uncertainty ---------------------------------------------------------------

        # Initialise data arrays
        MatchUpSample.values = zeros(MatchUpSample.idx['idx'][-1])
        MatchUpSample.unc = [0] * len(MatchUpSample.idx['n_cov'])

        #  Sample data by data block
        for i, block_unc in enumerate(MatchUpData.unc):
            istart = mcxyz[i]  # start of full dataset values data block
            iend = mcxyz[i + 1]  # end of full dataset values data block
            istart_s = MatchUpSample.idx['idx'][
                i]  # start of sampled values data block
            iend_s = MatchUpSample.idx['idx'][
                i + 1]  # end of sampled values data block
            s_idx = sampling_idxs[MatchUpData.idx['n_mu'][
                i]]  # indices of match up to sample within values data block

            # i. Sample values
            MatchUpSample.values[istart_s:iend_s] = MatchUpData.values[
                istart:iend][s_idx]

            # ii. Sample values uncertainty data

            if block_unc.form == 'ave':
                # If block error correlation form was defined by a w matrix
                # - now simplified to random error correlation by sampling choice

                # Initialise uncertainty data array
                MatchUpSample.unc[i] = Uncertainty("r", zeros(len(s_idx)))

                # Retrieve required W matrix and uncertainty vector and hence determine new random uncertainties
                w = MatchUpData.w_matrices[block_unc.w_i]
                u = MatchUpData.uncertainty_vectors[block_unc.u_i]
                for j, s_i in enumerate(s_idx):
                    col_start = w.indices[w.indptr[j]]
                    col_end = w.indices[w.indptr[j + 1] - 1] + 1
                    MatchUpSample.unc[i].uR[j] = npsum(
                        u[col_start:col_end]**2)**0.5

            else:
                # If block error correlation form random or random and systematic simplify to random and sample
                MatchUpSample.unc[i] = Uncertainty(
                    "r", deepcopy(block_unc.uR[s_idx]))
        # --------------------------------------------------------------------------------------------------------------

        # d. sample k --------------------------------------------------------------------------------------------------

        # Initialise k data arrays
        MatchUpSample.ks = zeros(MatchUpSample.idx['cNm'][-1])
        MatchUpSample.unck = [0] * len(MatchUpSample.idx['Nm'])

        # Sample k and respective uncertainty data by match-up series
        for i, mu_unck in enumerate(MatchUpData.unck):
            istart = mc[i]  # start of full dataset k data block
            iend = mc[i + 1]  # end of full dataset k data block
            istart_s = MatchUpSample.idx['cNm'][
                i]  # start of sampled dataset k data block
            iend_s = MatchUpSample.idx['cNm'][
                i + 1]  # end of sampled dataset k data block
            s_idx = sampling_idxs[
                i + 1]  # indices of match up to sample within k data block

            # i. Sample data
            MatchUpSample.ks[istart_s:iend_s] = MatchUpData.ks[istart:iend][
                s_idx]

            # ii. Sample uncertainties
            MatchUpSample.unck[i] = Uncertainty("r",
                                                deepcopy(mu_unck.uR[s_idx]))
        # --------------------------------------------------------------------------------------------------------------

        # d. sample times ----------------------------------------------------------------------------------------------

        # todo - write sampling of times

        # --------------------------------------------------------------------------------------------------------------

        # e. sample additional variables -------------------------------------------------------------------------------

        # todo - write sampling of additional variables

        # --------------------------------------------------------------------------------------------------------------

        return MatchUpSample
Exemple #8
0
    def run(self,
            tol=1e-6,
            tolA=1e-8,
            tolB=1e8,
            tolU=1e-8,
            show=False,
            return_covariance=True):
        """
        Run Gauss-Newton Algorithm to perform harmonisation

        :type tol: float
        :param tol: Tolerance for convergance of GN algorithm

        :type tolA: float
        :param tolA: tolerance tolA for LSMR in GN algorithm

        :type tolB: float
        :param tolB: tolerance tolB for LSMR in GN algorithm

        :type tolU: float
        :param tolU: tolerance for uncertainty calculation convergence (rtol in Minres)

        :type show: bool
        :param show: boolean to decide if stdout output of algorithm

        :return:
            :a: *numpy.ndarray*

            Estimate of parameters

            :Va: *numpy.ndarray*

            Covariance matrix for the parameter estimates
        """

        if show:
            print "Initial Parameter Estimates:"
            print self.HData.a
            print "Determining Parameters..."

        # Useful parameters
        N_mu = self.HData.idx['cNm'][-1]  # total match-ups
        N_var = self.HData.idx['idx'][-1]  # total number of variables
        N_a = len(self.HData.a)  # number calibration parameters

        ################################################################################################################
        # 1. Gauss Newton Solver
        ################################################################################################################

        # a. Preparation -----------------------------------------------------------------------------------------------
        # i. Iteration parameters
        niter = 0  # counter of iterations
        mxiter = ceil(N_var)  # max number of iterations of GN
        mxiter_lsmr = ceil(N_var)  # max number of iterations of LSMR
        conv = False  # convergence boolean
        GNlog = []

        # ii. Initialise Operators
        # - J LinearOperator
        J = LinearOperator((N_var + N_mu, N_var + N_a),
                           matvec=self.get_JPx,
                           rmatvec=self.get_JPTx)
        # - LSMR Operators
        LSMROp = LSMRFramework(J)

        # Calculate initial cost
        residuals = self.calc_f(self.xyza, self.HData)
        cost_previous = norm(residuals)**2
        # --------------------------------------------------------------------------------------------------------------

        # b. Gauss Newton Iterations -----------------------------------------------------------------------------------
        while (conv is False) and (niter < mxiter):
            # i. GN Step ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

            niter += 1  # Update iteration counter

            # Determine Gauss-Newton step d as solution to linear least-squares
            # problem J*d = -f with the pre-conditioner applied.
            LSMROp.solve(-residuals,
                         damp=0,
                         atol=tolA,
                         btol=tolA,
                         conlim=tolB,
                         itnlim=mxiter_lsmr,
                         show=show)
            d = self.calc_Px(LSMROp.x)

            # Update parameter estimates, as well as residuals, cost and gradient
            self.xyza += d
            residuals = self.calc_f(self.xyza, self.HData)
            cost = norm(residuals)**2
            gradient = 2 * self.get_JPTx(residuals)
            # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

            # ii. Test convergence ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            cost_reduction = cost_previous - cost
            cost_reduction_tol = tol * (1 + cost)
            norm_d = norm(d, inf)
            norm_d_tol = (tol**0.5) * (1 + norm(self.xyza, inf))
            norm_g = norm(gradient, inf)
            norm_g_tol = (tol**(1. / 3.)) * (1 + cost)

            # Store cost at this iteration
            cost_previous = cost

            # Check for convergence
            if (cost_reduction >= 0) and (cost_reduction < cost_reduction_tol)\
                    and (norm_d < norm_d_tol) and (norm_g <= norm_g_tol):
                conv = True

            # Write log
            GNlog.append([
                niter, cost_reduction, cost_reduction_tol, norm_d, norm_d_tol,
                norm_g, norm_g_tol
            ])
            if show:
                print "\n\t\t\t\tGNlog"
                print "niter\tU1\t\ttol1\t\tU2\t\ttol2\t\tU3\t\ttol3"
                for GN in GNlog:
                    print "{0:2d}\t{1:.2e}\t{2:.2e}\t{3:.2e}\t{4:.2e}\t{5:.2e}\t{6:.2e}"\
                          .format(GN[0], GN[1], GN[2], GN[3], GN[4], GN[5], GN[6])
            # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # --------------------------------------------------------------------------------------------------------------

        # Unpack solution
        a = self.xyza[N_var:]

        if show:
            print "Determined Parameter Estimates:"
            print a

        ################################################################################################################
        # 2. Uncertainty evaluation
        ################################################################################################################

        # Uncertainty evaluation
        parameter_covariance_matrix = zeros((len(a), len(a)))
        if return_covariance:
            if show:
                print 'Determining uncertainty...'

            parameter_covariance_matrix = self.calculate_parameter_covariance_matrix(
                tolU=tolU, show=show)

            if show:
                print "Determined Parameter Covariance Matrix:"
                print parameter_covariance_matrix

        ################################################################################################################
        # 3. Prepare Solution
        ################################################################################################################

        if show:
            print 'Preparing output...'

        MatchUpRes = MatchUp()
        MatchUpRes.idx = self.HData.idx
        MatchUpRes._original_idx = self.HData._original_idx
        MatchUpRes.unc = self.HData.unc
        MatchUpRes.unck = self.HData.unck
        MatchUpRes.w_matrices = self.HData.w_matrices
        MatchUpRes.u_matrices = self.HData.u_matrices
        MatchUpRes.values = residuals[:N_var]
        MatchUpRes.ks = residuals[N_var:]
        MatchUpRes = self.Transform2NormIndOp.reverse(MatchUpRes)

        cost_dof = N_var - N_mu - N_a
        cost_p_value = 0

        # Return fitted systematic errors
        n_uS = max([0] + [
            unc_i.uS_i for unc_i in self.HData.unc
            if (unc_i.typeID == 2) or (unc_i.typeID == 4)
        ])
        systematic_errors = None
        systematic_error_sensors = None
        if n_uS != 0:
            systematic_errors = self.xyza[N_var - n_uS:N_var]

            n_uSs = [
                unc_i.uS_i if (unc_i.typeID == 2) or (unc_i.typeID == 4) else 0
                for unc_i in self.HData.unc
            ]
            systematic_error_sensors = [
                self.HData.idx['sensors'][self.HData.idx['n_sensor'][
                    n_uSs.index(i)]] for i in range(1, n_uS + 1)
            ]

        return a, parameter_covariance_matrix, cost, cost_dof, cost_p_value, MatchUpRes.values, MatchUpRes.ks, \
               systematic_errors, systematic_error_sensors
Exemple #9
0
    def run(self, MatchUpData):
        """
        Return a reparameterisation of the input data such that output data are independent quantities with
        uncertainties of unity

        :type MatchUpData: *eopy.matchup.matchupIO.MatchUp*
        :param MatchUpData: Input match-up data for transformation

        :return:
            :MatchUpData: *eopy.matchup.matchupIO.MatchUp*

            Transformed input data
        """

        # Convert data depending on it's correlation form to remove correlation:
        # 1. Random Form
        #    No correlation, so no action required. Scale by uncertainty.
        #
        # 2. Random+Systematic Form
        #    Separate random and systematic components:
        #    > random component - scale data by random uncertainty
        #    > systematic component - add 0 value for each block to the end of the final
        #                             block of the covariate
        #
        # 3. Average Form
        #    Simulate raw data used to compute averages (results in n_mu + n - 1 variables
        #    per block, where n_mu is the number of match-ups in the block and n is size of
        #    the averaging window)

        ################################################################################################################
        # 1. Determine idx for transformed data
        ################################################################################################################

        # Initialise copy of harmonisation idx to update for converted data
        # N.B. deepcopy() required to ensure copy of nested lists in dict
        independent_idx = deepcopy(MatchUpData.idx)

        # a. determine new number of variables per block ---------------------------------------------------------------
        for i, block_unc in enumerate(MatchUpData.unc):

            # i. number of variables remains the same for a block already with independent errors
            if block_unc.typeID == 1:
                pass

            # ii. number of variables for a block with error correlation defined by a w matrix changes to the number
            #     of independent variables it transforms to, or length of the corresponding u matrix
            elif (block_unc.typeID == 3) or (block_unc.typeID == 4):
                independent_idx['N_var'][i] = MatchUpData.u_matrices[block_unc.u_i].shape[0]

        # iii. Number of systematic error variables to introduce
        n_uS = max([0]+[unc_i.uS_i for unc_i in MatchUpData.unc if (unc_i.typeID == 2) or (unc_i.typeID == 4)])

        # --------------------------------------------------------------------------------------------------------------

        # b. determine new data block indices --------------------------------------------------------------------------
        idxs = [0]
        total = 0
        for N in independent_idx['N_var']:
            total += N
            idxs.append(int(total))

        # Add systematic error variables
        idxs[-1] += n_uS

        independent_idx['idx'] = idxs
        # --------------------------------------------------------------------------------------------------------------

        ################################################################################################################
        # 2. Determine transformed independent normalised data
        ################################################################################################################

        # Initialise transformed data product
        MatchUpNormInd = MatchUp()
        MatchUpNormInd.values = zeros(independent_idx['idx'][-1], dtype=float32)
        MatchUpNormInd.ks = zeros(independent_idx['cNm'][-1])
        MatchUpNormInd.a = MatchUpData.a[:]
        MatchUpNormInd.sensor_model = MatchUpData.sensor_model
        MatchUpNormInd.sensor_model_constant = MatchUpData.sensor_model_constant
        MatchUpNormInd.adjustment_model = MatchUpData.adjustment_model
        MatchUpNormInd.idx = independent_idx
        MatchUpNormInd._original_idx = MatchUpData._original_idx
        MatchUpNormInd.unc = MatchUpData.unc
        MatchUpNormInd.unck = MatchUpData.unck
        MatchUpNormInd.w_matrices = MatchUpData.w_matrices
        MatchUpNormInd.u_matrices = MatchUpData.u_matrices
        MatchUpNormInd.across_track_index1 = MatchUpData.across_track_index1
        MatchUpNormInd.across_track_index2 = MatchUpData.across_track_index2
        MatchUpNormInd.along_track_index1 = MatchUpData.along_track_index1
        MatchUpNormInd.along_track_index2 = MatchUpData.along_track_index2

        # Convert data block by depending on correlation form
        for i, block_unc in enumerate(MatchUpData.unc):
            istart = MatchUpData.idx['idx'][i]                        # start of original dataset values data block
            iend = istart + int(MatchUpData.idx['N_var'][i])          # number of variables in tranformed data block
            istart_i = independent_idx['idx'][i]                      # start of transformed dataset values data block
            iend_i = istart_i + int(MatchUpNormInd.idx['N_var'][i])   # number of variables in tranformed data block

            # a. independent type correlation --------------------------------------------------------------------------
            if (block_unc.typeID == 1) or (block_unc.typeID == 2):
                # scale data by uncertainty
                MatchUpNormInd.values[istart_i:iend_i] = MatchUpData.values[istart:iend]/block_unc.uR

            # c. structured type correlation ---------------------------------------------------------------------------
            elif (block_unc.typeID == 3) or (block_unc.typeID == 4):
                # Simulate independent data, X_ind, by determining solution to,
                #       X = W X_ind,
                # where W is the W matrix and X is the original data

                # Retrieve required W matrix and u matrix
                w_matrix = MatchUpData.w_matrices[block_unc.w_i]
                u_matrix = MatchUpData.u_matrices[block_unc.u_i]

                encountered_cols = zeros(w_matrix.shape[1], dtype=bool_)

                for i_row, i_values in enumerate(xrange(istart, iend)):

                    row_cols = w_matrix.indices[w_matrix.indptr[i_row]:w_matrix.indptr[i_row+1]]
                    row_encountered_cols = [bool(encountered_cols[row_col]) for row_col in row_cols]
                    row_unencountered_cols_num = row_encountered_cols.count(False)

                    w_row = w_matrix.data[w_matrix.indptr[i_row]:w_matrix.indptr[i_row+1]]
                    n_w = len(row_cols)

                    if row_unencountered_cols_num == n_w:
                        # averaged values
                        for col in row_cols:
                            MatchUpNormInd.values[istart_i+col] = MatchUpData.values[i_values] / u_matrix[col]
                            encountered_cols[col] = True
                        pass

                    elif 0 < row_unencountered_cols_num < n_w:

                        row_idx_e = [(i, col) for i, col in enumerate(row_cols) if encountered_cols[col] == True]
                        row_idx_une = [(i, col) for i, col in enumerate(row_cols) if encountered_cols[col] == False]

                        average_sofar = sum([w_row[col[0]] * MatchUpNormInd.values[istart_i+col[1]] *
                                             u_matrix[col[1]] for col in row_idx_e])
                        weight_remaining = sum([w_row[col[0]] for col in row_idx_une])

                        for col in row_idx_une:
                            MatchUpNormInd.values[istart_i+col[1]] = (MatchUpData.values[i_values] - average_sofar) / weight_remaining / u_matrix[col[1]]
                            encountered_cols[col[1]] = True
                        pass

                    elif row_unencountered_cols_num == 0:
                        pass

            # ----------------------------------------------------------------------------------------------------------

        # d. scale ks --------------------------------------------------------------------------------------------------
        for i in xrange(len(MatchUpData.idx['Im'])):
            istart = MatchUpData.idx['cNm'][i]
            iend = MatchUpData.idx['cNm'][i + 1]

            MatchUpNormInd.ks[istart:iend] = MatchUpData.ks[istart:iend] / MatchUpData.unck[i].uR
        # --------------------------------------------------------------------------------------------------------------

        return MatchUpNormInd
Exemple #10
0
    def reverse(self, MatchUpNormInd):
        """
        Return reparameterised input match-up data in its original parameterisation

        :type MatchUpNormInd: *eopy.matchup.matchupIO.MatchUp*
        :param MatchUpNormInd: Transformed match-up data

        :return:
            :MatchUpData: *eopy.matchup.matchupIO.MatchUp*

            Input data with transformation reversed
        """

        # Initialise untransformed data product
        MatchUpData = MatchUp()
        MatchUpData.values = zeros(MatchUpNormInd._original_idx['idx'][-1])
        MatchUpData.ks = zeros(MatchUpNormInd._original_idx['cNm'][-1])
        MatchUpData.a = MatchUpNormInd.a[:]
        MatchUpData.sensor_model = MatchUpNormInd.sensor_model
        MatchUpData.sensor_model_constant = MatchUpNormInd.sensor_model_constant
        MatchUpData.adjustment_model = MatchUpNormInd.adjustment_model
        MatchUpData.idx = MatchUpNormInd._original_idx
        MatchUpData._original_idx = MatchUpNormInd._original_idx

        # todo - review how to better use memory here
        MatchUpData.unc = MatchUpNormInd.unc
        MatchUpData.unck = MatchUpNormInd.unck
        MatchUpData.w_matrices = MatchUpNormInd.w_matrices
        MatchUpData.u_matrices = MatchUpNormInd.u_matrices
        MatchUpData.across_track_index1 = MatchUpNormInd.across_track_index1
        MatchUpData.across_track_index2 = MatchUpNormInd.across_track_index2
        MatchUpData.along_track_index1 = MatchUpNormInd.along_track_index1
        MatchUpData.along_track_index2 = MatchUpNormInd.along_track_index2

        # Required to find systematic errors
        n_uS = max([0]+[unc_i.uS_i for unc_i in MatchUpData.unc if (unc_i.typeID == 2) or (unc_i.typeID == 4)])

        for i, block_unc in enumerate(MatchUpData.unc):

            istart = MatchUpData.idx['idx'][i]  # start of original dataset values data block
            iend = istart + int(MatchUpData.idx['N_var'][i])  # number of variables in tranformed data block
            istart_i = MatchUpNormInd.idx['idx'][i]  # start of transformed dataset values data block
            iend_i = istart_i + int(MatchUpNormInd.idx['N_var'][i])  # number of variables in tranformed data block

            # a. random correlation - rescale and add to covariate list
            if block_unc.typeID == 1:
                MatchUpData.values[istart:iend] = MatchUpNormInd.values[istart_i:iend_i] * block_unc.uR

            # b. random+systematic correlation - rescale components and recombine
            if block_unc.typeID == 2:

                # get index of required systematic value
                isys = MatchUpNormInd.idx['idx'][-1] - n_uS - 1 + block_unc.uS_i

                MatchUpData.values[istart:iend] = MatchUpNormInd.values[istart_i:iend_i]*block_unc.uR
                MatchUpData.values[istart:iend] += MatchUpNormInd.values[isys]*block_unc.uS

            # c. structured correlation - transform from independent to original variables
            if block_unc.typeID == 3:

                # Retrieve required W matrix and u matrix
                w = MatchUpData.w_matrices[block_unc.w_i]
                u = MatchUpData.u_matrices[block_unc.u_i]

                MatchUpData.values[istart:iend] = w.dot(u*MatchUpNormInd.values[istart_i:iend_i])

            # d. structured+systematic correlation - add sys error, then transform from independent to original variable
            if block_unc.typeID == 4:
                # Retrieve required W matrix and u matrix
                w = MatchUpData.w_matrices[block_unc.w_i]
                u = MatchUpData.u_matrices[block_unc.u_i]
                isys = MatchUpNormInd.idx['idx'][-1] - n_uS - 1 + block_unc.uS_i

                MatchUpData.values[istart:iend] = w.dot(MatchUpNormInd.values[istart_i:iend_i] * u
                                                        + MatchUpNormInd.values[isys] * block_unc.uS)

        # d. rescale ks ------------------------------------------------------------------------------------------------
        for i in xrange(len(MatchUpData.idx['Im'])):
            istart = MatchUpData.idx['cNm'][i]
            iend = MatchUpData.idx['cNm'][i + 1]

            MatchUpData.ks[istart:iend] = MatchUpNormInd.ks[istart:iend] * MatchUpData.unck[i].uR

        return MatchUpData
Exemple #11
0
    def run(self, MatchUpData, n_samples_mu=20, verbose=False):
        """
        Returning a space filling simulation of the match-ups based on the data ranges an input dataset

        :type MatchUpData: *eopy.matchup.matchupIO.MatchUp*
        :param MatchUpData: Input match-up data

        :type n_samples_mu: int
        :param n_samples_mu: Number of simulation samples per matchup series

        :type

        :return:
            :MatchUpData: *eopy.matchup.matchupIO.MatchUp*

            Simulated matchup data
        """

        # Initialise transformed data product
        MatchUpSimulation = MatchUp()
        MatchUpSimulation.idx = self.return_simulation_idx(
            MatchUpData.idx, n_samples_mu)
        MatchUpSimulation.values = self.return_simulation_x(
            MatchUpData.values, MatchUpData.idx['idx'],
            MatchUpSimulation.idx['idx'])
        MatchUpSimulation.ks = self.return_simulation_x(
            MatchUpData.ks, MatchUpData.idx['cNm'],
            MatchUpSimulation.idx['cNm'])
        MatchUpSimulation.a = MatchUpData.a[:]
        MatchUpSimulation.sensor_model = MatchUpData.sensor_model
        MatchUpSimulation.sensor_model_constant = MatchUpData.sensor_model_constant
        MatchUpSimulation.adjustment_model = MatchUpData.adjustment_model
        MatchUpSimulation._original_idx = MatchUpData._original_idx

        if verbose:
            sensors = MatchUpSimulation.idx['sensors']
            plotted_sensors = []

            i_mu = 0
            while set(plotted_sensors) != set(sensors):

                pair = MatchUpSimulation.idx['Im'][i_mu]
                for i_pair, n_sensor in enumerate(pair):
                    sensor_name = sensors[n_sensor]
                    print sensor_name
                    print "Variable\tMin\t\t\tMax"
                    for cov in range(
                            1,
                            MatchUpSimulation.idx['sensor_ms'][n_sensor] + 1):
                        maximum = max(
                            MatchUpSimulation.getVariableData(
                                cov, sensor_name, i_mu + 1))
                        minimum = min(
                            MatchUpSimulation.getVariableData(
                                cov, sensor_name, i_mu + 1))
                        print str(cov) + "\t\t\t" + str(
                            minimum) + "\t\t" + str(maximum)
                    print "\n"
                    plotted_sensors.append(sensor_name)
                i_mu += 1

        # todo - improve simplifications of simulation, does not handle unc/k, w/u_matrices, across/along_track_index1/2
        return MatchUpSimulation
Exemple #12
0
def return_MatchUpTest___w():
    """
    Return a MatchUp dataset object for testing

    :return:
        :MatchUpTest: *eopy.matchup.matchupIO.MatchUp*

        Test match-up dataset
    """

    ####################################################################################################################
    # 1. Initialise test data
    ####################################################################################################################

    w1 = array([[
        0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
        0.00
    ],
                [
                    0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00,
                    0.00, 0.00, 0.00
                ],
                [
                    0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00,
                    0.00, 0.00, 0.00
                ],
                [
                    0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00,
                    0.00, 0.00, 0.00
                ],
                [
                    0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25,
                    0.25, 0.25, 0.25
                ]])
    w2 = array([[
        0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
        0.00
    ],
                [
                    0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
                    0.00, 0.00, 0.00
                ],
                [
                    0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00, 0.00, 0.00,
                    0.00, 0.00, 0.00
                ],
                [
                    0.00, 0.00, 0.00, 0.00, 0.00, 0.25, 0.25, 0.25, 0.25, 0.00,
                    0.00, 0.00, 0.00
                ],
                [
                    0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.25,
                    0.25, 0.25, 0.25
                ]])

    u1 = array(
        [1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0])
    u2 = array(
        [2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0])

    values = array([5.0, 3.0, 3.0, 2.5, 6.0, 3.0, 2.0, 4.0, 3.0, 4.0])
    unc = [Uncertainty(3, (0, 0)), Uncertainty(3, (1, 1))]
    ks = array([1.2, 1.7, 1.3, 1.4, 1.3])
    unck = [Uncertainty(1, array([0.25, 0.25, 0.25, 0.25, 0.25]))]
    idx = {
        "Nm": [5],
        "cNm": [0, 5, 10],
        "Im": [[1, 2]],
        "sensors": [1, 2],
        "sensor_ms": [1],
        "n_sensor": [1, 2],
        "n_mu": [1, 1],
        "n_cov": [1, 1],
        "N_var": [5, 5],
        "idx": [0, 5, 10],
        "Ia": [1, 1, 1, 2, 2, 2]
    }
    a = array([1., 1.3, 0.002, 0.5, 1.1, 0.0005])
    w_matrices = [csr_matrix(w1), csr_matrix(w2)]
    u_matrices = [u1, u2]

    ####################################################################################################################
    # 3. Initialise MatchUp object
    ####################################################################################################################

    MatchUpTest = MatchUp()
    MatchUpTest.values = values
    MatchUpTest.unc = unc
    MatchUpTest.ks = ks
    MatchUpTest.unck = unck
    MatchUpTest.idx = idx
    MatchUpTest.a = a
    MatchUpTest.w_matrices = w_matrices
    MatchUpTest.u_matrices = u_matrices

    return MatchUpTest