示例#1
0
    def deform(self, mesh, anchors):

        #t_start = time.time()
        delta = np.array(self.L.dot(mesh.points()))
        #t_end = time.time()
        #print("delta computation time is %.5f seconds." % (t_end - t_start))

        #t_start = time.time()
        # augment delta solution matrix with weighted anchors
        for i in range(self.k):
            delta[self.n + i, :] = self.weight * anchors[i, :]
        #t_end = time.time()
        #print("give anchor value computation time is %.5f seconds." % (t_end - t_start))

        #t_start = time.time()
        # update mesh vertices with least-squares solution
        for i in range(3):
            mesh.points()[:, i] = sparseqr.solve(self.L,
                                                 delta[:, i],
                                                 tolerance=1e-8)
            #mesh.points()[:, i] = lsqr(self.L, delta[:, i])[0]
        #t_end = time.time()
        #print("sparse lsqr time is %.5f seconds." % (t_end - t_start))

        return mesh
示例#2
0
    def deform(self, verts, achr_verts):

        self.L = sparse.coo_matrix((self.V, (self.I, self.J)),
                                   shape=(self.n + self.k, self.n)).tocsr()

        delta = np.array(self.L.dot(verts))

        # augment delta solution matrix with weighted anchors
        for i in range(self.k):
            delta[self.n + i, :] = self.weight * achr_verts[i, :]

        # update mesh vertices with least-squares solution
        deformed_verts = np.zeros(verts.shape)
        for i in range(3):
            deformed_verts[:, i] = sparseqr.solve(self.L,
                                                  delta[:, i],
                                                  tolerance=1e-8)

        return deformed_verts
示例#3
0
def solveLaplacianMesh(mesh, anchors, anchorsIdx, cotangent=True):
    n = mesh.n_vertices()
    k = anchorsIdx.shape[0]

    operator = (getLaplacianMatrixUmbrella, getLaplacianMatrixCotangent)

    L = operator[1](mesh, anchorsIdx) if cotangent else operator[0](mesh,
                                                                    anchorsIdx)
    delta = np.array(L.dot(mesh.points()))

    # augment delta solution matrix with weighted anchors
    for i in range(k):
        delta[n + i, :] = WEIGHT * anchors[i, :]

    # update mesh vertices with least-squares solution
    for i in range(3):
        #mesh.points()[:, i] = lsqr(L, delta[:, i])[0]
        mesh.points()[:, i] = sparseqr.solve(L, delta[:, i], tolerance=1e-8)

    return mesh
示例#4
0
    def run(self):
        self.get_patches()
        self.poisson_b = []
        cols_all = []
        vals_all = []
        rows_all = []

        row_global = 0
        for i in self.valid_index:
            [colidx, colvals,
             bvals] = self.build_patch_for_poisson(self.mask_patches[i],
                                                   self.data_patches[i],
                                                   self.index_1d_patches[i])
            self.poisson_b.append(bvals)
            cols_all.append(colidx)
            vals_all.append(colvals)
            rows_all.append(np.ones_like(colidx) * row_global)
            row_global += 1

        rows_all_flat = list(itertools.chain.from_iterable(rows_all))
        cols_all_flat = list(itertools.chain.from_iterable(cols_all))
        vals_all_flat = list(itertools.chain.from_iterable(vals_all))

        self.poisson_A = sparse.coo_matrix(
            (vals_all_flat, (rows_all_flat, cols_all_flat)),
            shape=(row_global, self.valid_num))
        self.poisson_b = np.array(self.poisson_b)

        # depth fusion
        if self.depth_A is not None:
            self.poisson_A = sparse.vstack((self.poisson_A, self.depth_A))
            self.poisson_b = np.hstack((self.poisson_b, self.depth_b))

        depth = sparseqr.solve(self.poisson_A, self.poisson_b, tolerance=0)
        self.depth.reshape(-1)[self.valid_index] = depth
        return self.depth
示例#5
0
        print "Average time:", total_time / float(n_trials), "sec."
        print "Average error:", total_error / float(n_trials)

    # PySPQR
    if PySPQR == 1:
        total_time = 0
        total_error = 0
        for i in range(n_trials):
            A = scipy.sparse.rand(matrix_size[0],
                                  matrix_size[1],
                                  density=matrix_density)
            x_true = np.random.random(matrix_size[1])
            b = A * x_true
            start = time.clock()
            A = A.tocsr()
            x = sparseqr.solve(A, b, tolerance=1e-4)
            end = time.clock()
            total_time += end - start
            total_error += np.linalg.norm(x - x_true, ord=2)
        print "Average time:", total_time / float(n_trials), "sec."
        print "Average error:", total_error / float(n_trials)

    # CVXPY
    if CVXPY_SCS == 1:
        total_time = 0
        total_error = 0
        for i in range(n_trials):
            A = scipy.sparse.rand(matrix_size[0],
                                  matrix_size[1],
                                  density=matrix_density)
            x_true = np.random.random(matrix_size[1])
示例#6
0
def smooth_xyt_fit(**kwargs):
    required_fields = ('data', 'W', 'ctr', 'spacing', 'E_RMS')
    args = {
        'reference_epoch': 0,
        'W_ctr': 1e4,
        'mask_file': None,
        'mask_scale': None,
        'compute_E': False,
        'max_iterations': 10,
        'srs_WKT': None,
        'N_subset': None,
        'bias_params': None,
        'repeat_res': None,
        'repeat_dt': 1,
        'Edit_only': False,
        'dzdt_lags': [1, 4],
        'VERBOSE': True
    }
    args.update(kwargs)
    for field in required_fields:
        if field not in kwargs:
            raise ValueError("%s must be defined", field)
    valid_data = np.ones_like(args['data'].x, dtype=bool)
    timing = dict()

    if args['N_subset'] is not None:
        tic = time()
        valid_data = edit_data_by_subset_fit(args['N_subset'], args)
        timing['edit_by_subset'] = time() - tic
        if args['Edit_only']:
            return {
                'timing': timing,
                'data': args['data'].copy().subset(valid_data)
            }
    m = dict()
    E = dict()

    # define the grids
    tic = time()
    bds = {
        coord: args['ctr'][coord] + np.array([-0.5, 0.5]) * args['W'][coord]
        for coord in ('x', 'y', 't')
    }
    grids = dict()
    grids['z0'] = fd_grid([bds['y'], bds['x']],
                          args['spacing']['z0'] * np.ones(2),
                          name='z0',
                          srs_WKT=args['srs_WKT'],
                          mask_file=args['mask_file'])
    grids['dz']=fd_grid( [bds['y'], bds['x'], bds['t']], \
        [args['spacing']['dz'], args['spacing']['dz'], args['spacing']['dt']], col_0=grids['z0'].N_nodes, name='dz', srs_WKT=args['srs_WKT'], mask_file=args['mask_file'])
    grids['z0'].col_N = grids['dz'].col_N
    grids['t'] = fd_grid([bds['t']], [args['spacing']['dt']], name='t')

    # select only the data points that are within the grid bounds
    valid_z0 = grids['z0'].validate_pts((args['data'].coords()[0:2]))
    valid_dz = grids['dz'].validate_pts((args['data'].coords()))
    valid_data = valid_data & valid_dz & valid_z0

    # if repeat_res is given, resample the data to include only repeat data (to within a spatial tolerance of repeat_res)
    if args['repeat_res'] is not None:
        valid_data[valid_data]=valid_data[valid_data] & \
            select_repeat_data(args['data'].copy().subset(valid_data), grids, args['repeat_dt'], args['repeat_res'])

    # subset the data based on the valid mask
    data = args['data'].copy().subset(valid_data)

    # if we have a mask file, use it to subset the data
    # needs to be done after the valid subset because otherwise the interp_mtx for the mask file fails.
    if args['mask_file'] is not None:
        temp = fd_grid([bds['y'], bds['x']],
                       [args['spacing']['z0'], args['spacing']['z0']],
                       name='z0',
                       srs_WKT=args['srs_WKT'],
                       mask_file=args['mask_file'])
        data_mask = lin_op(temp, name='interp_z').interp_mtx(
            data.coords()[0:2]).toCSR().dot(grids['z0'].mask.ravel())
        data_mask[~np.isfinite(data_mask)] = 0
        if np.any(data_mask == 0):
            data.subset(~(data_mask == 0))
            valid_data[valid_data] = ~(data_mask == 0)

    # define the interpolation operator, equal to the sum of the dz and z0 operators
    G_data = lin_op(grids['z0'],
                    name='interp_z').interp_mtx(data.coords()[0:2])
    G_data.add(lin_op(grids['dz'], name='interp_dz').interp_mtx(data.coords()))

    # define the smoothness constraints
    grad2_z0 = lin_op(grids['z0'], name='grad2_z0').grad2(DOF='z0')
    grad2_dz = lin_op(grids['dz'], name='grad2_dzdt').grad2_dzdt(DOF='z',
                                                                 t_lag=1)
    grad_dzdt = lin_op(grids['dz'], name='grad_dzdt').grad_dzdt(DOF='z',
                                                                t_lag=1)
    constraint_op_list = [grad2_z0, grad2_dz, grad_dzdt]
    if 'd2z_dt2' in args['E_RMS'] and args['E_RMS']['d2z_dt2'] is not None:
        d2z_dt2 = lin_op(grids['dz'], name='d2z_dt2').d2z_dt2(DOF='z')
        constraint_op_list.append(d2z_dt2)

    # if bias params are given, create a set of parameters to estimate them
    if args['bias_params'] is not None:
        data, bias_model = assign_bias_ID(data, args['bias_params'])
        G_bias, Gc_bias, Cvals_bias, bias_model = param_bias_matrix(
            data,
            bias_model,
            bias_param_name='bias_ID',
            col_0=grids['dz'].col_N)
        G_data.add(G_bias)
        constraint_op_list.append(Gc_bias)

    # put the equations together
    Gc = lin_op(None, name='constraints').vstack(constraint_op_list)
    N_eq = G_data.N_eq + Gc.N_eq

    # put together all the errors
    Ec = np.zeros(Gc.N_eq)
    root_delta_V_dz = np.sqrt(np.prod(grids['dz'].delta))
    root_delta_A_z0 = np.sqrt(np.prod(grids['z0'].delta))
    Ec[Gc.TOC['rows']['grad2_z0']] = args['E_RMS'][
        'd2z0_dx2'] / root_delta_A_z0 * grad2_z0.mask_for_ind0(
            args['mask_scale'])
    Ec[Gc.TOC['rows']['grad2_dzdt']] = args['E_RMS'][
        'd3z_dx2dt'] / root_delta_V_dz * grad2_dz.mask_for_ind0(
            args['mask_scale'])
    Ec[Gc.TOC['rows']['grad_dzdt']] = args['E_RMS'][
        'd2z_dxdt'] / root_delta_V_dz * grad_dzdt.mask_for_ind0(
            args['mask_scale'])
    if 'd2z_dt2' in args['E_RMS'] and args['E_RMS']['d2z_dt2'] is not None:
        Ec[Gc.TOC['rows']
           ['d2z_dt2']] = args['E_RMS']['d2z_dt2'] / root_delta_V_dz
    if args['bias_params'] is not None:
        Ec[Gc.TOC['rows'][Gc_bias.name]] = Cvals_bias
    Ed = data.sigma.ravel()
    # calculate the inverse square root of the data covariance matrix
    TCinv = sp.dia_matrix((1. / np.concatenate((Ed, Ec)), 0),
                          shape=(N_eq, N_eq))

    # define the right hand side of the equation
    rhs = np.zeros([N_eq])
    rhs[0:data.size] = data.z.ravel()

    # put the fit and constraint matrices together
    Gcoo = sp.vstack([G_data.toCSR(), Gc.toCSR()]).tocoo()
    cov_rows = G_data.N_eq + np.arange(Gc.N_eq)

    # define the matrix that sets dz[reference_epoch]=0 by removing columns from the solution:
    # Find the identify the rows and columns that match the reference epoch
    temp_r, temp_c = np.meshgrid(np.arange(0, grids['dz'].shape[0]),
                                 np.arange(0, grids['dz'].shape[1]))
    z02_mask = grids['dz'].global_ind([
        temp_r.transpose().ravel(),
        temp_c.transpose().ravel(),
        args['reference_epoch'] + np.zeros_like(temp_r).ravel()
    ])

    # Identify all of the DOFs that do not include the reference epoch
    cols = np.arange(G_data.col_N, dtype='int')
    include_cols = np.setdiff1d(cols, z02_mask)
    # Generate a matrix that has diagonal elements corresponding to all DOFs except the reference epoch.
    # Multiplying this by a matrix with columns for all model parameters yeilds a matrix with no columns
    # corresponding to the reference epoch.
    Ip_c = sp.coo_matrix((np.ones_like(include_cols),
                          (include_cols, np.arange(include_cols.size))),
                         shape=(Gc.col_N, include_cols.size)).tocsc()

    # eliminate the columns for the model variables that are set to zero
    Gcoo = Gcoo.dot(Ip_c)
    timing['setup'] = time() - tic

    if np.any(data.z > 2500):
        print('outlier!')
    # initialize the book-keeping matrices for the inversion
    m0 = np.zeros(Ip_c.shape[0])
    if "three_sigma_edit" in data.list_of_fields:
        inTSE = np.where(data.three_sigma_edit)[0]
    else:
        inTSE = np.arange(G_data.N_eq, dtype=int)
    if args['VERBOSE']:
        print("initial: %d:" % G_data.r.max())
    tic_iteration = time()
    for iteration in range(args['max_iterations']):
        # build the parsing matrix that removes invalid rows
        Ip_r = sp.coo_matrix(
            (np.ones(Gc.N_eq + inTSE.size),
             (np.arange(Gc.N_eq + inTSE.size), np.concatenate(
                 (inTSE, cov_rows)))),
            shape=(Gc.N_eq + inTSE.size, Gcoo.shape[0])).tocsc()

        m0_last = m0
        if args['VERBOSE']:
            print("starting qr solve for iteration %d" % iteration)
        # solve the equations
        tic = time()
        m0 = Ip_c.dot(
            sparseqr.solve(Ip_r.dot(TCinv.dot(Gcoo)),
                           Ip_r.dot(TCinv.dot(rhs))))
        timing['sparseqr_solve'] = time() - tic

        # quit if the solution is too similar to the previous solution
        if (np.max(np.abs(
            (m0_last - m0)[Gc.TOC['cols']['dz']])) < 0.05) and (iteration > 2):
            break

        # calculate the full data residual
        rs_data = (data.z - G_data.toCSR().dot(m0)) / data.sigma
        # calculate the robust standard deviation of the scaled residuals for the selected data
        sigma_hat = RDE(rs_data[inTSE])
        inTSE_last = inTSE
        # select the data that are within 3*sigma of the solution
        inTSE = np.where(np.abs(rs_data) < 3.0 * np.maximum(1, sigma_hat))[0]
        if args['VERBOSE']:
            print('found %d in TSE, sigma_hat=%3.3f' % (inTSE.size, sigma_hat))
        if (sigma_hat <= 1 or
            (inTSE.size == inTSE_last.size
             and np.all(inTSE_last == inTSE))) and (iteration > 2):
            if args['VERBOSE']:
                print("sigma_hat LT 1, exiting")
            break
    timing['iteration'] = time() - tic_iteration
    inTSE = inTSE_last
    valid_data[valid_data] = (np.abs(rs_data) < 3.0 * np.maximum(1, sigma_hat))
    data.assign(
        {'three_sigma_edit': np.abs(rs_data) < 3.0 * np.maximum(1, sigma_hat)})
    # report the model-based estimate of the data points
    data.assign({'z_est': np.reshape(G_data.toCSR().dot(m0), data.shape)})

    # reshape the components of m to the grid shapes
    m['z0'] = np.reshape(m0[Gc.TOC['cols']['z0']], grids['z0'].shape)
    m['dz'] = np.reshape(m0[Gc.TOC['cols']['dz']], grids['dz'].shape)

    # calculate height rates
    for lag in args['dzdt_lags']:
        this_name = 'dzdt_lag%d' % lag
        m[this_name] = lin_op(grids['dz'], name='dzdt',
                              col_N=G_data.col_N).dzdt(lag=lag).grid_prod(m0)

    # build a matrix that takes the average of the central 20 km of the delta-z grid
    XR = np.mean(grids['z0'].bds[0]) + np.array([-1., 1.]) * args['W_ctr'] / 2.
    YR = np.mean(grids['z0'].bds[1]) + np.array([-1., 1.]) * args['W_ctr'] / 2.
    center_dzbar = lin_op(grids['dz'], name='center_dzbar',
                          col_N=G_data.col_N).vstack([
                              lin_op(grids['dz']).mean_of_bounds(
                                  (XR, YR, [season, season]))
                              for season in grids['dz'].ctrs[2]
                          ])
    G_dzbar = center_dzbar.toCSR()
    # calculate the grid mean of dz
    m['dz_bar'] = G_dzbar.dot(m0)

    # build a matrix that takes the lagged temporal derivative of dzbar (e.g. quarterly dzdt, annual dzdt)
    for lag in args['dzdt_lags']:
        this_name = 'dzdt_bar_lag%d' % lag
        this_op = lin_op(grids['t'], name=this_name).diff(lag=lag).toCSR()
        # calculate the grid mean of dz/dt
        m[this_name] = this_op.dot(m['dz_bar'].ravel())

    # report the parameter biases.  Sorted in order of the parameter bias arguments
    #???
    if args['bias_params'] is not None:
        m['bias'] = parse_biases(m0, bias_model['bias_ID_dict'],
                                 args['bias_params'])

    # report the entire model vector, just in case we want it.

    m['all'] = m0

    # report the geolocation of the output map
    m['extent'] = np.concatenate((grids['z0'].bds[1], grids['z0'].bds[0]))

    # parse the resduals to assess the contributions of the total error:
    # Make the C matrix for the constraints
    TCinv_cov = sp.dia_matrix((1. / Ec, 0), shape=(Gc.N_eq, Gc.N_eq))
    rc = TCinv_cov.dot(Gc.toCSR().dot(m0))
    ru = Gc.toCSR().dot(m0)
    R = dict()
    RMS = dict()
    for eq_type in ['d2z_dt2', 'grad2_z0', 'grad2_dzdt']:
        if eq_type in Gc.TOC['rows']:
            R[eq_type] = np.sum(rc[Gc.TOC['rows'][eq_type]]**2)
            RMS[eq_type] = np.sqrt(np.mean(ru[Gc.TOC['rows'][eq_type]]**2))
    R['data'] = np.sum(((data.z_est - data.z) / data.sigma)**2)
    RMS['data'] = np.sqrt(np.mean((data.z_est - data.z)**2))

    # if we need to compute the errors in the solution, continue
    if args['compute_E']:
        tic = time()
        # take the QZ transform of Gcoo
        z, R, perm, rank = sparseqr.qz(Ip_r.dot(TCinv.dot(Gcoo)),
                                       Ip_r.dot(TCinv.dot(rhs)))
        z = z.ravel()
        R = R.tocsr()
        R.sort_indices()
        R.eliminate_zeros()
        timing['decompose_qz'] = time() - tic

        E0 = np.zeros(R.shape[0])

        # compute Rinv for use in propagating errors.
        # what should the tolerance be?  We will eventually square Rinv and take its
        # row-wise sum.  We care about errors at the cm level, so
        # size(Rinv)*tol^2 = 0.01 -> tol=sqrt(0.01/size(Rinv))~ 1E-4
        tic = time()
        RR, CC, VV, status = inv_tr_upper(R, np.int(np.prod(R.shape) / 4),
                                          1.e-5)
        # save Rinv as a sparse array.  The syntax perm[RR] undoes the permutation from QZ
        Rinv = sp.coo_matrix((VV, (perm[RR], CC)), shape=R.shape).tocsr()
        timing['Rinv_cython'] = time() - tic
        tic = time()
        E0 = np.sqrt(Rinv.power(2).sum(axis=1))
        timing['propagate_errors'] = time() - tic

        # generate the full E vector.  E0 appears to be an ndarray,
        E0 = np.array(Ip_c.dot(E0)).ravel()
        E['z0'] = np.reshape(E0[Gc.TOC['cols']['z0']], grids['z0'].shape)
        E['dz'] = np.reshape(E0[Gc.TOC['cols']['dz']], grids['dz'].shape)

        # generate the lagged dz errors:

        for lag in args['dzdt_lags']:
            this_name = 'dzdt_lag%d' % lag
            E[this_name] = lin_op(grids['dz'],
                                  name=this_name,
                                  col_N=G_data.col_N).dzdt(lag=lag).grid_error(
                                      Ip_c.dot(Rinv))

            this_name = 'dzdt_bar_lag%d' % lag
            this_op = lin_op(grids['t'], name=this_name).diff(lag=lag).toCSR()
            E[this_name] = np.sqrt(
                (this_op.dot(Ip_c).dot(Rinv)).power(2).sum(axis=1))
        # calculate the grid mean of dz/dt

        # generate the season-to-season errors
        #E['dzdt_qyr']=lin_op(grids['dz'], name='dzdt_1yr', col_N=G_data.col_N).dzdt().grid_error(Ip_c.dot(Rinv))

        # generate the annual errors
        #E['dzdt_1yr']=lin_op(grids['dz'], name='dzdt_1yr', col_N=G_data.col_N).dzdt(lag=4).grid_error(Ip_c.dot(Rinv))

        # generate the grid-mean error
        E['dz_bar'] = np.sqrt(
            (G_dzbar.dot(Ip_c).dot(Rinv)).power(2).sum(axis=1))

        # generate the grid-mean quarterly dzdt error
        #E['dzdt_bar_qyr']=np.sqrt((ddt_qyr.dot(G_dzbar).dot(Ip_c).dot(Rinv)).power(2).sum(axis=1))

        # generate the grid-mean annual dzdt error
        #E['dzdt_bar_1yr']=np.sqrt((ddt_1yr.dot(G_dzbar).dot(Ip_c).dot(Rinv)).power(2).sum(axis=1))

        # report the rgt bias errors.  Sorted by RGT, then by  cycle
        if args['bias_params'] is not None:
            E['bias'] = parse_biases(E0, bias_model['bias_ID_dict'],
                                     args['bias_params'])

    TOC = Gc.TOC
    return {
        'm': m,
        'E': E,
        'data': data,
        'grids': grids,
        'valid_data': valid_data,
        'TOC': TOC,
        'R': R,
        'RMS': RMS,
        'timing': timing,
        'E_RMS': args['E_RMS']
    }
示例#7
0
def sparsesolve(A, b, eps_w=0, eps_laplace=0):
    A_mod, b_mod = modify_system(A, b, eps_w, eps_laplace)
    return sparseqr.solve(A_mod, b_mod)
示例#8
0
 def sparsesolve_qr(A, b, eps=0, R=None):
     if eps > 0:
         A, b = modify_system(A, b, eps, R)
     return scipy.sparse.csc_matrix(sparseqr.solve(A, b))
def main():
    ######################
    # Config
    ######################

    # Choose least-squares solver to use:
    #
    #    lsq_solver = "dense"  # LAPACK DGELSD, direct, good for small problems
    #    lsq_solver = "sparse"  # SciPy LSQR, iterative, asymptotically faster, good for large problems
    #    lsq_solver = "optimize"  # general nonlinear optimizer using Trust Region Reflective (trf) algorithm
    #    lsq_solver = "qr"
    #    lsq_solver = "cholesky"
    #    lsq_solver = "sparse_qr"
    lsq_solver = "sparse_qr_solve"

    ######################
    # Load multiscale data
    ######################

    print("Loading measurement data...")

    # measurements are provided on a meshgrid over (Hx, sigxx)

    # data2.mat contains virtual measurements, generated from a multiscale model.

    #    data2 = scipy.io.loadmat("data2.mat")
    #    Hx    = np.squeeze(data2["Hx"])     # 1D array, (M,)
    #    sigxx = np.squeeze(data2["sigxx"])  # 1D array, (N,)
    #    Bx    = data2["Bx"]                 # 2D array, (M, N)
    #    lamxx = data2["lamxx"]              #       --"--
    ##    lamyy = data2["lamyy"]              #       --"--
    ##    lamzz = data2["lamzz"]              #       --"--

    data2 = scipy.io.loadmat("umair_gal_denoised.mat")
    sigxx = -1e6 * np.array([
        0, 1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80
    ][::-1],
                            dtype=np.float64)
    assert sigxx.shape[0] == 18
    Hx = data2["Hval"][0, :]  # same for all sigma, just take the first row
    Bx = data2["Bval"].T
    lamxx = data2["LHval"].T * 1e-6
    Bx = Bx[::-1, :]
    lamxx = lamxx[::-1, :]

    # HACK, fix later (must decouple number of knots from number of data sites)
    ii = np.arange(Hx.shape[0])
    n_newi = 401
    newii = np.linspace(0, ii[-1], n_newi)
    nsigma = sigxx.shape[0]
    fH = scipy.interpolate.interp1d(ii, Hx)
    newB = np.empty((n_newi, Bx.shape[1]), dtype=np.float64)
    newlam = np.empty((n_newi, lamxx.shape[1]), dtype=np.float64)
    for j in range(nsigma):
        fB = scipy.interpolate.interp1d(ii, Bx[:, j])
        newB[:, j] = fB(newii)

        flam = scipy.interpolate.interp1d(ii, lamxx[:, j])
        newlam[:, j] = flam(newii)
    Hx = fH(newii)
    Bx = newB
    lamxx = newlam

    # Order of spline (as-is! 3 = cubic)
    ordr = 3

    # Auxiliary variables (H, sig_xx, sig_xy)
    Hscale = np.max(Hx)
    sscale = np.max(np.abs(sigxx))
    x = Hx / Hscale
    y = sigxx / sscale
    nx = x.shape[0]  # number of grid points, x axis
    ny = y.shape[0]  # number of grid points, y axis

    # Partial derivatives (B, lam_xx, lam_xy) from multiscale model
    #
    # In the magnetostriction components, the multiscale model produces nonzero lamxx at zero stress.
    # We normalize this away for purposes of performing the curve fit.
    #
    dpsi_dx = Bx * Hscale
    dpsi_dy = (lamxx - lamxx[0, :]) * sscale

    ######################
    # Set up splines
    ######################

    print("Setting up splines...")

    # The evaluation algorithm used in bspline.py uses half-open intervals  t_i <= x < t_{i+1}.
    #
    # This causes havoc for evaluation at the end of each interval, because it is actually the start
    # of the next interval.
    #
    # Especially, the end of the last interval is the start of the next (non-existent) interval.
    #
    # We work around this by using a small epsilon to avoid evaluation exactly at t_{i+1} (for the last interval).
    #
    def marginize_end(x):
        out = x.copy()
        out[-1] += 1e-10 * (x[-1] - x[0])
        return out

    # create knots and spline basis
    xknots = splinelab.aptknt(marginize_end(x), ordr)
    yknots = splinelab.aptknt(marginize_end(y), ordr)
    splx = bspline.Bspline(xknots, ordr)
    sply = bspline.Bspline(yknots, ordr)

    # get number of basis functions (perform dummy evaluation and count)
    nxb = len(splx(0.))
    nyb = len(sply(0.))

    # TODO Check if we need to convert input Bx and sigxx to u,v (what is actually stored in the data files?)

    # Create collocation matrices:
    #
    #   A[i,j] = d**deriv_order B_j(tau[i])
    #
    # where d denotes differentiation and B_j is the jth basis function.
    #
    # We place the collocation sites at the points where we have measurements.
    #
    Au = splx.collmat(x)
    Av = sply.collmat(y)
    Du = splx.collmat(x, deriv_order=1)
    Dv = sply.collmat(y, deriv_order=1)

    ######################
    # Assemble system
    ######################

    print("Assembling system...")

    # Assemble the equation system for fitting against data on the partial derivatives of psi.
    #
    # By writing psi in the spline basis,
    #
    #   psi_{ij}       = A^{u}_{ik} A^{v}_{jl} c_{kl}
    #
    # the quantities to be fitted, which are the partial derivatives of psi, become
    #
    #   B_{ij}         = D^{u}_{ik} A^{v}_{jl} c_{kl}
    #   lambda_{xx,ij} = A^{u}_{ik} D^{v}_{jl} c_{kl}
    #
    # Repeated indices are summed over.
    #
    # Column: kl converted to linear index (k = 0,1,...,nxb-1,  l = 0,1,...,nyb-1)
    # Row:    ij converted to linear index (i = 0,1,...,nx-1,   j = 0,1,...,ny-1)
    #
    # (Paavo's notes, Stresses4.pdf)

    nf = 2  # number of unknown fields
    nr = nx * ny  # equation system rows per unknown field
    A = np.empty((nf * nr, nxb * nyb), dtype=np.float64)  # global matrix
    b = np.empty((nf * nr), dtype=np.float64)  # global RHS

    # zero array element detection tolerance
    tol = 1e-6

    I, J, IJ = util.index.genidx((nx, ny))
    K, L, KL = util.index.genidx((nxb, nyb))

    # loop only over rows of the equation system
    for i, j, ij in zip(I, J, IJ):
        A[nf * ij, KL] = Du[i, K] * Av[j, L]
        A[nf * ij + 1, KL] = Au[i, K] * Dv[j, L]

    b[nf * IJ] = dpsi_dx[I, J]  # RHS for B_x
    b[nf * IJ + 1] = dpsi_dy[I, J]  # RHS for lambda_xx

    #    # the above is equivalent to this much slower version:
    #    #
    #    # equation system row
    #    for j in range(ny):
    #        for i in range(nx):
    #            ij = np.ravel_multi_index( (i,j), (nx,ny) )
    #
    #            # equation system column
    #            for l in range(nyb):
    #                for k in range(nxb):
    #                    kl = np.ravel_multi_index( (k,l), (nxb,nyb) )
    #                    A[nf*ij,  kl] = Du[i,k] * Av[j,l]
    #                    A[nf*ij+1,kl] = Au[i,k] * Dv[j,l]
    #
    #            b[nf*ij]   = dpsi_dx[i,j] if abs(dpsi_dx[i,j]) > tol else 0.  # RHS for B_x
    #            b[nf*ij+1] = dpsi_dy[i,j] if abs(dpsi_dy[i,j]) > tol else 0.  # RHS for lambda_xx

    ######################
    # Solve
    ######################

    # Solve the optimal coefficients.

    # Note that we are constructing a potential function from partial derivatives only,
    # so the solution is unique only up to a global additive shift term.
    #
    # Under the hood, numpy.linalg.lstsq uses LAPACK DGELSD:
    #
    #   http://stackoverflow.com/questions/29372559/what-is-the-difference-between-numpy-linalg-lstsq-and-scipy-linalg-lstsq
    #
    # DGELSD accepts also rank-deficient input (rank(A) < min(nrows,ncols)), returning  arg min( ||x||_2 ) ,
    # so we don't need to do anything special to account for this.
    #
    # Same goes for the sparse LSQR.

    # equilibrate row and column norms
    #
    # See documentation of  scipy.sparse.linalg.lsqr,  it requires this to work properly.
    #
    # https://github.com/Technologicat/python-wlsqm
    #
    print("Equilibrating...")
    S = A.copy(order='F')  # the rescaler requires Fortran memory layout
    A = scipy.sparse.csr_matrix(A)  # save memory (dense "A" no longer needed)

    #    eps = 7./3. - 4./3. - 1  # http://stackoverflow.com/questions/19141432/python-numpy-machine-epsilon
    #    print( S.max() * max(S.shape) * eps )  # default zero singular value detection tolerance in np.linalg.matrix_rank()

    #    import wlsqm.utils.lapackdrivers as wul
    #    rs,cs = wul.do_rescale( S, wul.ScalingAlgo.ALGO_DGEEQU )

    #    # row scaling only (for weighting)
    #    with np.errstate(divide='ignore', invalid='ignore'):
    #        rs = np.where( np.abs(b) > tol, 1./b, 1. )
    #    for i in range(S.shape[0]):
    #        S[i,:] *= rs[i]
    #    cs = 1.

    # scale rows corresponding to Bx
    #
    rs = np.ones_like(b)
    rs[nf * IJ] = 2
    for i in range(S.shape[0]):
        S[i, :] *= rs[i]
    cs = 1.

    #    # It seems this is not needed in the 2D problem (fitting error is slightly smaller without it).
    #
    #    # Additional row scaling.
    #    #
    #    # This equilibrates equation weights, but deteriorates the condition number of the matrix.
    #    #
    #    # Note that in a least-squares problem the row weighting *does* matter, because it affects
    #    # the fitting error contribution from the rows.
    #    #
    #    with np.errstate(divide='ignore', invalid='ignore'):
    #        rs2 = np.where( np.abs(b) > tol, 1./b, 1. )
    #    for i in range(S.shape[0]):
    #        S[i,:] *= rs2[i]
    #    rs *= rs2

    #    a = np.abs(rs2)
    #    print( np.min(a), np.mean(a), np.max(a) )

    #    rs = np.asanyarray(rs)
    #    cs = np.asanyarray(cs)
    #    a = np.abs(rs)
    #    print( np.min(a), np.mean(a), np.max(a) )

    b *= rs  # scale RHS accordingly

    #    colnorms = np.linalg.norm(S, ord=np.inf, axis=0)  # sum over rows    -> column norms
    #    rownorms = np.linalg.norm(S, ord=np.inf, axis=1)  # sum over columns -> row    norms
    #    print( "    rescaled column norms min = %g, avg = %g, max = %g" % (np.min(colnorms), np.mean(colnorms), np.max(colnorms)) )
    #    print( "    rescaled row    norms min = %g, avg = %g, max = %g" % (np.min(rownorms), np.mean(rownorms), np.max(rownorms)) )

    print("Solving with algorithm = '%s'..." % (lsq_solver))
    if lsq_solver == "dense":
        print("    matrix shape %s = %d elements" %
              (S.shape, np.prod(S.shape)))
        ret = numpy.linalg.lstsq(S, b)  # c,residuals,rank,singvals
        c = ret[0]

    elif lsq_solver == "sparse":
        S = scipy.sparse.coo_matrix(S)
        print("    matrix shape %s = %d elements; %d nonzeros (%g%%)" %
              (S.shape, np.prod(
                  S.shape), S.nnz, 100. * S.nnz / np.prod(S.shape)))

        ret = scipy.sparse.linalg.lsqr(S, b)
        c, exit_reason, iters = ret[:3]
        if exit_reason != 2:  # 2 = least-squares solution found
            print("WARNING: solver did not converge (exit_reason = %d)" %
                  (exit_reason))
        print("    sparse solver iterations taken: %d" % (iters))

    elif lsq_solver == "optimize":
        # make sparse matrix (faster for dot products)
        S = scipy.sparse.coo_matrix(S)
        print("    matrix shape %s = %d elements; %d nonzeros (%g%%)" %
              (S.shape, np.prod(
                  S.shape), S.nnz, 100. * S.nnz / np.prod(S.shape)))

        def fitting_error(c):
            return S.dot(c) - b

        ret = scipy.optimize.least_squares(fitting_error,
                                           np.ones(S.shape[1],
                                                   dtype=np.float64),
                                           method="trf",
                                           loss="linear")

        c = ret.x
        if ret.status < 1:
            # status codes: https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.least_squares.html
            print("WARNING: solver did not converge (status = %d)" %
                  (ret.status))

    elif lsq_solver == "qr":
        print("    matrix shape %s = %d elements" %
              (S.shape, np.prod(S.shape)))
        # http://glowingpython.blogspot.fi/2012/03/solving-overdetermined-systems-with-qr.html
        Q, R = np.linalg.qr(S)  # qr decomposition of A
        Qb = (Q.T).dot(b)  # computing Q^T*b (project b onto the range of A)
        #        c = np.linalg.solve(R,Qb) # solving R*x = Q^T*b
        c = scipy.linalg.solve_triangular(R, Qb, check_finite=False)

    elif lsq_solver == "cholesky":
        # S is rank-deficient by one, because we are solving a potential based on data on its partial derivatives.
        #
        # Before solving, force S to have full rank by fixing one coefficient.
        #
        S[0, :] = 0.
        S[0, 0] = 1.
        b[0] = 1.
        rs[0] = 1.
        S = scipy.sparse.csr_matrix(S)
        print("    matrix shape %s = %d elements; %d nonzeros (%g%%)" %
              (S.shape, np.prod(
                  S.shape), S.nnz, 100. * S.nnz / np.prod(S.shape)))

        # Be sure to use the new sksparse from
        #
        #   https://github.com/scikit-sparse/scikit-sparse
        #
        # instead of the old scikits.sparse (which will fail with an error).
        #
        # Requires libsuitesparse-dev for CHOLMOD headers.
        #
        from sksparse.cholmod import cholesky_AAt
        # Notice that CHOLMOD computes AA' and we want M'M, so we must set A = M'!
        factor = cholesky_AAt(S.T)
        c = factor.solve_A(S.T * b)

    elif lsq_solver == "sparse_qr":
        # S is rank-deficient by one, because we are solving a potential based on data on its partial derivatives.
        #
        # Before solving, force S to have full rank by fixing one coefficient;
        # otherwise the linear solve step will fail because R will be exactly singular.
        #
        S[0, :] = 0.
        S[0, 0] = 1.
        b[0] = 1.
        rs[0] = 1.
        S = scipy.sparse.coo_matrix(S)
        print("    matrix shape %s = %d elements; %d nonzeros (%g%%)" %
              (S.shape, np.prod(
                  S.shape), S.nnz, 100. * S.nnz / np.prod(S.shape)))

        # pip install sparseqr
        # or https://github.com/yig/PySPQR
        #
        # Works like MATLAB's [Q,R,e] = qr(...):
        #
        # https://se.mathworks.com/help/matlab/ref/qr.html
        #
        # [Q,R,E] = qr(A) or [Q,R,E] = qr(A,'matrix') produces unitary Q, upper triangular R and a permutation matrix E
        # so that A*E = Q*R. The column permutation E is chosen to reduce fill-in in R.
        #
        # [Q,R,e] = qr(A,'vector') returns the permutation information as a vector instead of a matrix.
        # That is, e is a row vector such that A(:,e) = Q*R.
        #
        import sparseqr
        print("    performing sparse QR decomposition...")
        Q, R, E, rank = sparseqr.qr(S)

        # produce reduced QR (for least-squares fitting)
        #
        # - cut away bottom part of R (zeros!)
        # - cut away the corresponding far-right part of Q
        #
        # see
        #    np.linalg.qr
        #    https://andreask.cs.illinois.edu/cs357-s15/public/demos/06-qr-applications/Solving%20Least-Squares%20Problems.html
        #
        #        # inefficient way:
        #        k = min(S.shape)
        #        R = scipy.sparse.csr_matrix( R.A[:k,:] )
        #        Q = scipy.sparse.csr_matrix( Q.A[:,:k] )

        print("    reducing matrices...")
        # somewhat more efficient way:
        k = min(S.shape)
        R = R.tocsr()[:k, :]
        Q = Q.tocsc()[:, :k]

        #        # maybe somewhat efficient way: manipulate data vectors, create new coo matrix
        #        #
        #        # (incomplete, needs work; need to shift indices of rows/cols after the removed ones)
        #        #
        #        k    = min(S.shape)
        #        mask = np.nonzero( R.row < k )[0]
        #        R = scipy.sparse.coo_matrix( ( R.data[mask], (R.row[mask], R.col[mask]) ), shape=(k,k) )
        #        mask = np.nonzero( Q.col < k )[0]
        #        Q = scipy.sparse.coo_matrix( ( Q.data[mask], (Q.row[mask], Q.col[mask]) ), shape=(k,k) )

        print("    solving...")
        Qb = (Q.T).dot(b)
        x = scipy.sparse.linalg.spsolve(R, Qb)
        c = np.empty_like(x)
        c[E] = x[:]  # apply inverse permutation

    elif lsq_solver == "sparse_qr_solve":
        S[0, :] = 0.
        S[0, 0] = 1.
        b[0] = 1.
        rs[0] = 1.
        S = scipy.sparse.coo_matrix(S)
        print("    matrix shape %s = %d elements; %d nonzeros (%g%%)" %
              (S.shape, np.prod(
                  S.shape), S.nnz, 100. * S.nnz / np.prod(S.shape)))

        import sparseqr
        c = sparseqr.solve(S, b)

    else:
        raise ValueError("unknown solver '%s'; valid: 'dense', 'sparse'" %
                         (lsq_solver))

    c *= cs  # undo column scaling in solution

    # now c contains the spline coefficients, c_{kl}, where kl has been raveled into a linear index.

    ######################
    # Save
    ######################

    filename = "tmp_s2d.mat"
    L = locals()
    data = {
        key: L[key]
        for key in ["ordr", "xknots", "yknots", "c", "Hscale", "sscale"]
    }
    scipy.io.savemat(filename, data, format='5', oned_as='row')

    ######################
    # Plot
    ######################

    print("Visualizing...")

    # unpack results onto meshgrid
    #
    fitted = A.dot(
        c
    )  # function values corresponding to each row in the global equation system
    X, Y = np.meshgrid(
        Hx, sigxx,
        indexing='ij')  # indexed like X[i,j]  (i is x index, j is y index)
    Z_Bx = np.empty_like(X)
    Z_lamxx = np.empty_like(X)

    Z_Bx[I, J] = fitted[nf * IJ]
    Z_lamxx[I, J] = fitted[nf * IJ + 1]

    #    # the above is equivalent to:
    #    for ij in range(nr):
    #        i,j = np.unravel_index( ij, (nx,ny) )
    #        Z_Bx[i,j]    = fitted[nf*ij]
    #        Z_lamxx[i,j] = fitted[nf*ij+1]

    data_Bx = {
        "x": (X, r"$H_{x}$"),
        "y": (Y, r"$\sigma_{xx}$"),
        "z": (Z_Bx / Hscale, r"$B_{x}$")
    }

    data_lamxx = {
        "x": (X, r"$H_{x}$"),
        "y": (Y, r"$\sigma_{xx}$"),
        "z": (Z_lamxx / sscale, r"$\lambda_{xx}$")
    }

    def relerr(data, refdata):
        refdata_linview = refdata.reshape(-1)
        return 100. * np.linalg.norm(refdata_linview - data.reshape(-1)
                                     ) / np.linalg.norm(refdata_linview)

    plt.figure(1)
    plt.clf()
    ax = util.plot.plot_wireframe(data_Bx, legend_label="Spline", figno=1)
    ax.plot_wireframe(X, Y, dpsi_dx / Hscale, label="Multiscale", color="r")
    plt.legend(loc="best")
    print("B_x relative error %g%%" % (relerr(Z_Bx, dpsi_dx)))

    plt.figure(2)
    plt.clf()
    ax = util.plot.plot_wireframe(data_lamxx, legend_label="Spline", figno=2)
    ax.plot_wireframe(X, Y, dpsi_dy / sscale, label="Multiscale", color="r")
    plt.legend(loc="best")
    print("lambda_{xx} relative error %g%%" % (relerr(Z_lamxx, dpsi_dy)))

    # match the grid point numbering used in MATLAB version of this script
    #
    def t(A):
        return np.transpose(A, [1, 0])

    dpsi_dx = t(dpsi_dx)
    Z_Bx = t(Z_Bx)
    dpsi_dy = t(dpsi_dy)
    Z_lamxx = t(Z_lamxx)

    plt.figure(3)
    plt.clf()
    ax = plt.subplot(1, 1, 1)
    ax.plot(dpsi_dx.reshape(-1) / Hscale,
            'ro',
            markersize='2',
            label="Multiscale")
    ax.plot(Z_Bx.reshape(-1) / Hscale, 'ko', markersize='2', label="Spline")
    ax.set_xlabel("Grid point number")
    ax.set_ylabel(r"$B_{x}$")
    plt.legend(loc="best")

    plt.figure(4)
    plt.clf()
    ax = plt.subplot(1, 1, 1)
    ax.plot(dpsi_dy.reshape(-1) / sscale,
            'ro',
            markersize='2',
            label="Multiscale")
    ax.plot(Z_lamxx.reshape(-1) / sscale, 'ko', markersize='2', label="Spline")
    ax.set_xlabel("Grid point number")
    ax.set_ylabel(r"$\lambda_{xx}$")
    plt.legend(loc="best")

    print("All done.")
示例#10
0
 def solve(self, vector):
     return sparseqr.solve(self.matrix, vector)
示例#11
0
def iterate_fit(data, Gcoo, rhs, TCinv, G_data, Gc, in_TSE, Ip_c, timing, args,\
                bias_model=None):
    cov_rows = G_data.N_eq + np.arange(Gc.N_eq)

    #print(f"iterate_fit: G.shape={Gcoo.shape}, G.nnz={Gcoo.nnz}, data.shape={data.shape}", flush=True)
    in_TSE_original = np.zeros(data.shape, dtype=bool)
    in_TSE_original[in_TSE] = True

    min_tse_iterations = 2
    if args['bias_nsigma_iteration'] is not None:
        min_tse_iterations = np.max(
            [min_tse_iterations, args['bias_nsigma_iteration'] + 1])

    for iteration in range(args['max_iterations']):
        # build the parsing matrix that removes invalid rows
        Ip_r=sp.coo_matrix((np.ones(Gc.N_eq+in_TSE.size), \
                            (np.arange(Gc.N_eq+in_TSE.size), \
                             np.concatenate((in_TSE, cov_rows)))), \
                           shape=(Gc.N_eq+in_TSE.size, Gcoo.shape[0])).tocsc()

        m0_last = np.zeros(Ip_c.shape[0])

        if args['VERBOSE']:
            print("starting qr solve for iteration %d at %s" %
                  (iteration, ctime()),
                  flush=True)
        # solve the equations
        tic = time()
        m0 = Ip_c.dot(
            sparseqr.solve(Ip_r.dot(TCinv.dot(Gcoo)),
                           Ip_r.dot(TCinv.dot(rhs))))
        timing['sparseqr_solve'] = time() - tic

        # calculate the full data residual
        rs_data = (data.z - G_data.toCSR().dot(m0)) / data.sigma
        # calculate the robust standard deviation of the scaled residuals for the selected data
        sigma_hat = RDE(rs_data[in_TSE])

        # select the data that have scaled residuals < 3 *max(1, sigma_hat)
        in_TSE_last = in_TSE
        ###testing
        in_TSE = (np.abs(rs_data) < 3.0 * np.maximum(1, sigma_hat))

        # if bias_nsigma_edit is specified, check for biases that are more than
        # args['bias_nsigma_edit'] times their expected values.
        if args['bias_nsigma_edit'] is not None and iteration >= args[
                'bias_nsigma_iteration']:
            if 'edited' not in bias_model['bias_param_dict']:
                bias_model['bias_param_dict']['edited'] = np.zeros_like(
                    bias_model['bias_param_dict']['ID'], dtype=bool)
            bias_dict, slope_bias_dict = parse_biases(m0, bias_model,
                                                      args['bias_params'])
            bad_bias_IDs=np.array(bias_dict['ID'])\
                [(np.abs(bias_dict['val']) > args['bias_nsigma_edit'] * np.array(bias_dict['expected'])\
                                                                      * np.maximum(1, sigma_hat)) \
                 | bias_model['bias_param_dict']['edited']]
            print(bad_bias_IDs)
            for ID in bad_bias_IDs:
                mask = np.ones(data.size, dtype=bool)
                #Mark the ID as edited (because it will have a bias estimate of zero in subsequent iterations)
                bias_model['bias_param_dict']['edited'][
                    bias_model['bias_param_dict']['ID'].index(ID)] = True
                # mark all data associated with the ID as invalid
                for field, field_val in bias_model['bias_ID_dict'][ID].items():
                    if field in data.fields:
                        mask &= (getattr(data, field).ravel() == field_val)
                in_TSE[mask == 1] = 0
        if 'editable' in data.fields:
            in_TSE[data.editable == 0] = in_TSE_original[data.editable == 0]
        in_TSE = np.flatnonzero(in_TSE)

        if args['DEM_tol'] is not None:
            in_TSE = check_data_against_DEM(in_TSE, data, m0, G_data,
                                            args['DEM_tol'])

        # quit if the solution is too similar to the previous solution
        if (np.max(np.abs((m0_last - m0)[Gc.TOC['cols']['dz']])) <
                args['converge_tol_dz']) and (iteration > 2):
            if args['VERBOSE']:
                print(
                    "Solution identical to previous iteration with tolerance %3.1f, exiting after iteration %d"
                    % (args['converge_tol_dz'], iteration))
            break
        # select the data that are within 3*sigma of the solution
        if args['VERBOSE']:
            print('found %d in TSE, sigma_hat=%3.3f, dt=%3.0f' %
                  (in_TSE.size, sigma_hat, timing['sparseqr_solve']),
                  flush=True)
        if iteration > 0:
            if in_TSE.size == in_TSE_last.size and np.all(
                    in_TSE_last == in_TSE):
                if args['VERBOSE']:
                    print("filtering unchanged, exiting after iteration %d" %
                          iteration)
                break
        if iteration >= min_tse_iterations:
            if sigma_hat <= 1:
                if args['VERBOSE']:
                    print("sigma_hat LT 1, exiting after iteration %d" %
                          iteration,
                          flush=True)
                break
    return m0, sigma_hat, in_TSE, in_TSE_last, rs_data
示例#12
0
def smooth_xyt_fit(**kwargs):
    required_fields=('data','W','ctr','spacing','E_RMS')
    args={'reference_epoch':0,
    'W_ctr':1e4,
    'mask_file':None,
    'mask_scale':None,
    'compute_E':False,
    'max_iterations':10,
    'srs_proj4': None,
    'N_subset': None,
    'bias_params': None,
    'repeat_res':None,
    'converge_tol_dz':0.05,
    'repeat_dt': 1,
    'Edit_only': False,
    'dzdt_lags':[1, 4],
    'data_slope_sensors':None,
    'E_slope':0.05,
    'VERBOSE': True}
    args.update(kwargs)
    for field in required_fields:
        if field not in kwargs:
            raise ValueError("%s must be defined", field)
    valid_data = np.isfinite(args['data'].z) & np.isfinite(args['data'].sigma)
    timing=dict()

    if args['N_subset'] is not None:
        tic=time()
        valid_data &= edit_data_by_subset_fit(args['N_subset'], args)
        timing['edit_by_subset']=time()-tic
        if args['Edit_only']:
            return {'timing':timing, 'data':args['data'].copy()[valid_data]}
    m={}
    E={}
    R={}
    RMS={}

    # define the grids
    tic=time()
    bds={coord:args['ctr'][coord]+np.array([-0.5, 0.5])*args['W'][coord] for coord in ('x','y','t')}
    grids=dict()
    grids['z0']=fd_grid( [bds['y'], bds['x']], args['spacing']['z0']*np.ones(2),\
         name='z0', srs_proj4=args['srs_proj4'], mask_file=args['mask_file'])
    grids['dz']=fd_grid( [bds['y'], bds['x'], bds['t']], \
        [args['spacing']['dz'], args['spacing']['dz'], args['spacing']['dt']], \
         name='dz', col_0=grids['z0'].N_nodes, srs_proj4=args['srs_proj4'], \
        mask_file=args['mask_file'])
    grids['z0'].col_N=grids['dz'].col_N
    grids['t']=fd_grid([bds['t']], [args['spacing']['dt']], name='t')

    # select only the data points that are within the grid bounds
    valid_z0=grids['z0'].validate_pts((args['data'].coords()[0:2]))
    valid_dz=grids['dz'].validate_pts((args['data'].coords()))
    valid_data=valid_data & valid_dz & valid_z0
    
    if not np.any(valid_data):
        return {'m':m, 'E':E, 'data':None, 'grids':grids, 'valid_data': valid_data, 'TOC':{},'R':{}, 'RMS':{}, 'timing':timing,'E_RMS':args['E_RMS']}

    # if repeat_res is given, resample the data to include only repeat data (to within a spatial tolerance of repeat_res)
    if args['repeat_res'] is not None:
        N_before_repeat=np.sum(valid_data)   
        valid_data[valid_data]=valid_data[valid_data] & \
            select_repeat_data(args['data'].copy_subset(valid_data), grids, args['repeat_dt'], args['repeat_res'], reference_time=grids['t'].ctrs[0][args['reference_epoch']])
        if args['VERBOSE']:
            print("before repeat editing found %d data" % N_before_repeat)
            print("after repeat editing found %d data" % valid_data.sum())

    # subset the data based on the valid mask
    data=args['data'].copy_subset(valid_data)

    # if we have a mask file, use it to subset the data
    # needs to be done after the valid subset because otherwise the interp_mtx for the mask file fails.
    if args['mask_file'] is not None:
        temp=fd_grid( [bds['y'], bds['x']], [args['spacing']['z0'], args['spacing']['z0']], name='z0', srs_proj4=args['srs_proj4'], mask_file=args['mask_file'])
        data_mask=lin_op(temp, name='interp_z').interp_mtx(data.coords()[0:2]).toCSR().dot(grids['z0'].mask.ravel())
        data_mask[~np.isfinite(data_mask)]=0
        if np.any(data_mask==0):
            data.index(~(data_mask==0))
            valid_data[valid_data]= ~(data_mask==0)

    # Check if we have any data.  If not, quit
    if data.size==0:
        return {'m':m, 'E':E, 'data':data, 'grids':grids, 'valid_data': valid_data, 'TOC':{},'R':{}, 'RMS':{}, 'timing':timing,'E_RMS':args['E_RMS']}

    # define the interpolation operator, equal to the sum of the dz and z0 operators
    G_data=lin_op(grids['z0'], name='interp_z').interp_mtx(data.coords()[0:2])
    G_data.add(lin_op(grids['dz'], name='interp_dz').interp_mtx(data.coords()))

     # define the smoothness constraints
    grad2_z0=lin_op(grids['z0'], name='grad2_z0').grad2(DOF='z0')
    grad2_dz=lin_op(grids['dz'], name='grad2_dzdt').grad2_dzdt(DOF='z', t_lag=1)
    grad_dzdt=lin_op(grids['dz'], name='grad_dzdt').grad_dzdt(DOF='z', t_lag=1)
    constraint_op_list=[grad2_z0, grad2_dz, grad_dzdt]
    if 'd2z_dt2' in args['E_RMS'] and args['E_RMS']['d2z_dt2'] is not None:
        d2z_dt2=lin_op(grids['dz'], name='d2z_dt2').d2z_dt2(DOF='z')
        constraint_op_list.append(d2z_dt2)

    # if bias params are given, create a set of parameters to estimate them
    if args['bias_params'] is not None:
        data, bias_model=assign_bias_ID(data, args['bias_params'])
        G_bias, Gc_bias, Cvals_bias, bias_model=\
            param_bias_matrix(data, bias_model, bias_param_name='bias_ID', 
                              col_0=grids['dz'].col_N)
        G_data.add(G_bias)
        constraint_op_list.append(Gc_bias)

    if args['data_slope_sensors'] is not None:
        bias_model['E_slope']=args['E_slope']
        G_slope_bias, Gc_slope_bias, Cvals_slope_bias, bias_model= data_slope_bias(data,  bias_model, sensors=args['data_slope_sensors'],  col_0=G_data.col_N)
        G_data.add(G_slope_bias)
        constraint_op_list.append(Gc_slope_bias)
    # put the equations together
    Gc=lin_op(None, name='constraints').vstack(constraint_op_list)
    N_eq=G_data.N_eq+Gc.N_eq

    # put together all the errors
    Ec=np.zeros(Gc.N_eq)
    root_delta_V_dz=np.sqrt(np.prod(grids['dz'].delta))
    root_delta_A_z0=np.sqrt(np.prod(grids['z0'].delta))
    Ec[Gc.TOC['rows']['grad2_z0']]=args['E_RMS']['d2z0_dx2']/root_delta_A_z0*grad2_z0.mask_for_ind0(args['mask_scale'])
    Ec[Gc.TOC['rows']['grad2_dzdt']]=args['E_RMS']['d3z_dx2dt']/root_delta_V_dz*grad2_dz.mask_for_ind0(args['mask_scale'])
    Ec[Gc.TOC['rows']['grad_dzdt']]=args['E_RMS']['d2z_dxdt']/root_delta_V_dz*grad_dzdt.mask_for_ind0(args['mask_scale'])
    if 'd2z_dt2' in args['E_RMS'] and args['E_RMS']['d2z_dt2'] is not None:
        Ec[Gc.TOC['rows']['d2z_dt2']]=args['E_RMS']['d2z_dt2']/root_delta_V_dz
    if args['bias_params'] is not None:
        Ec[Gc.TOC['rows'][Gc_bias.name]] = Cvals_bias
    if args['data_slope_sensors'] is not None:
        Ec[Gc.TOC['rows'][Gc_slope_bias.name]] = Cvals_slope_bias
    Ed=data.sigma.ravel()
    # calculate the inverse square root of the data covariance matrix
    TCinv=sp.dia_matrix((1./np.concatenate((Ed, Ec)), 0), shape=(N_eq, N_eq))

    # define the right hand side of the equation
    rhs=np.zeros([N_eq])
    rhs[0:data.size]=data.z.ravel()

    # put the fit and constraint matrices together
    Gcoo=sp.vstack([G_data.toCSR(), Gc.toCSR()]).tocoo()
    cov_rows=G_data.N_eq+np.arange(Gc.N_eq)
     
    # build a matrix that takes the average of the center of the delta-z grid
    # this gets used both in the averaging and error-calculation codes
    XR=np.mean(grids['z0'].bds[0])+np.array([-1., 1.])*args['W_ctr']/2.
    YR=np.mean(grids['z0'].bds[1])+np.array([-1., 1.])*args['W_ctr']/2.
    center_dzbar=lin_op(grids['dz'], name='center_dzbar', col_N=G_data.col_N).vstack([lin_op(grids['dz']).mean_of_bounds((XR, YR, [season, season] )) for season in grids['dz'].ctrs[2]])
    G_dzbar=center_dzbar.toCSR()

    # define the matrix that sets dz[reference_epoch]=0 by removing columns from the solution:
    # Find the rows and columns that match the reference epoch
    temp_r, temp_c=np.meshgrid(np.arange(0, grids['dz'].shape[0]), np.arange(0, grids['dz'].shape[1]))
    z02_mask=grids['dz'].global_ind([temp_r.transpose().ravel(), temp_c.transpose().ravel(),\
                  args['reference_epoch']+np.zeros_like(temp_r).ravel()])

    # Identify all of the DOFs that do not include the reference epoch
    cols=np.arange(G_data.col_N, dtype='int')
    include_cols=np.setdiff1d(cols, z02_mask)
    # Generate a matrix that has diagonal elements corresponding to all DOFs except the reference epoch.
    # Multiplying this by a matrix with columns for all model parameters yeilds a matrix with no columns
    # corresponding to the reference epoch.
    Ip_c=sp.coo_matrix((np.ones_like(include_cols), (include_cols, np.arange(include_cols.size))), \
                       shape=(Gc.col_N, include_cols.size)).tocsc()

    # eliminate the columns for the model variables that are set to zero
    Gcoo=Gcoo.dot(Ip_c)
    timing['setup']=time()-tic

    # initialize the book-keeping matrices for the inversion
    m0=np.zeros(Ip_c.shape[0])
    if "three_sigma_edit" in data.fields:
        inTSE=np.flatnonzero(data.three_sigma_edit)
    else:
        inTSE=np.arange(G_data.N_eq, dtype=int)
    inTSE_last = np.zeros([0])
    if args['VERBOSE']:
        print("initial: %d:" % G_data.r.max())
    tic_iteration=time()
    for iteration in range(args['max_iterations']):
        # build the parsing matrix that removes invalid rows
        Ip_r=sp.coo_matrix((np.ones(Gc.N_eq+inTSE.size), (np.arange(Gc.N_eq+inTSE.size), np.concatenate((inTSE, cov_rows)))), \
                           shape=(Gc.N_eq+inTSE.size, Gcoo.shape[0])).tocsc()

        m0_last=m0
        if args['VERBOSE']:
            print("starting qr solve for iteration %d" % iteration)
        # solve the equations
        tic=time(); 
        m0=Ip_c.dot(sparseqr.solve(Ip_r.dot(TCinv.dot(Gcoo)), Ip_r.dot(TCinv.dot(rhs)))); 
        timing['sparseqr_solve']=time()-tic

        # calculate the full data residual
        rs_data=(data.z-G_data.toCSR().dot(m0))/data.sigma
        # calculate the robust standard deviation of the scaled residuals for the selected data
        sigma_hat=RDE(rs_data[inTSE])
        
        # select the data that have scaled residuals < 3 *max(1, sigma_hat)
        inTSE_last=inTSE
        inTSE = np.flatnonzero(np.abs(rs_data) < 3.0 * np.maximum(1, sigma_hat))
        
        # quit if the solution is too similar to the previous solution
        if (np.max(np.abs((m0_last-m0)[Gc.TOC['cols']['dz']])) < args['converge_tol_dz']) and (iteration > 2):
            if args['VERBOSE']:
                print("Solution identical to previous iteration with tolerance %3.1f, exiting after iteration %d" % (args['converge_tol_dz'], iteration))
            break
        # select the data that are within 3*sigma of the solution
        if args['VERBOSE']:
            print('found %d in TSE, sigma_hat=%3.3f' % ( inTSE.size, sigma_hat ))
        if iteration > 0:
            if inTSE.size == inTSE_last.size and np.all( inTSE_last == inTSE ):
                if args['VERBOSE']:
                    print("filtering unchanged, exiting after iteration %d" % iteration)
                break 
        if iteration >= 2:
            if sigma_hat <= 1:
                if args['VERBOSE']:
                    print("sigma_hat LT 1, exiting after iteration %d" % iteration)
                break             

    # if we've done any iterations, parse the model and the data residuals
    if args['max_iterations'] > 0:
        timing['iteration']=time()-tic_iteration
        inTSE=inTSE_last
        valid_data[valid_data]=(np.abs(rs_data)<3.0*np.maximum(1, sigma_hat))
        data.assign({'three_sigma_edit':np.abs(rs_data)<3.0*np.maximum(1, sigma_hat)})
        # report the model-based estimate of the data points
        data.assign({'z_est':np.reshape(G_data.toCSR().dot(m0), data.shape)})
        parse_model(m, m0, G_data, G_dzbar, Gc.TOC, grids, args['bias_params'], bias_model, dzdt_lags=args['dzdt_lags'])
        # parse the resduals to assess the contributions of the total error:
        # Make the C matrix for the constraints
        TCinv_cov=sp.dia_matrix((1./Ec, 0), shape=(Gc.N_eq, Gc.N_eq))
        rc=TCinv_cov.dot(Gc.toCSR().dot(m0))
        ru=Gc.toCSR().dot(m0)
        for eq_type in ['d2z_dt2','grad2_z0','grad2_dzdt']:
            if eq_type in Gc.TOC['rows']:
                R[eq_type]=np.sum(rc[Gc.TOC['rows'][eq_type]]**2)
                RMS[eq_type]=np.sqrt(np.mean(ru[Gc.TOC['rows'][eq_type]]**2))
    R['data']=np.sum((((data.z_est[data.three_sigma_edit==1]-data.z[data.three_sigma_edit==1])/data.sigma[data.three_sigma_edit==1])**2))
    RMS['data']=np.sqrt(np.mean((data.z_est[data.three_sigma_edit==1]-data.z[data.three_sigma_edit==1])**2))

    # Compute the error in the solution if requested
    if args['compute_E']:
        # We have generally not done any iterations at this point, so need to make the Ip_r matrix
        Ip_r=sp.coo_matrix((np.ones(Gc.N_eq+inTSE.size), (np.arange(Gc.N_eq+inTSE.size), np.concatenate((inTSE, cov_rows)))), \
                           shape=(Gc.N_eq+inTSE.size, Gcoo.shape[0])).tocsc()
        parse_errors(E, Gcoo, TCinv, rhs, Ip_c, Ip_r, grids, G_data, Gc, G_dzbar, \
                         bias_model, args['bias_params'], dzdt_lags=args['dzdt_lags'], timing=timing)

 

    TOC=Gc.TOC
    return {'m':m, 'E':E, 'data':data, 'grids':grids, 'valid_data': valid_data, 'TOC':TOC,'R':R, 'RMS':RMS, 'timing':timing,'E_RMS':args['E_RMS'], 'dzdt_lags':args['dzdt_lags']}
示例#13
0
文件: test.py 项目: yustiks/PySPQR
import scipy.sparse.linalg
import sparseqr

# QR decompose a sparse matrix M such that  Q R = M E
#
M = scipy.sparse.rand(10, 10, density=0.1)
Q, R, E, rank = sparseqr.qr(M)
print(abs(Q * R - M * sparseqr.permutation_vector_to_matrix(E)).sum()
      )  # should be approximately zero

# Solve many linear systems "M x = b for b in columns(B)"
#
B = scipy.sparse.rand(
    10, 5, density=0.1
)  # many RHS, sparse (could also have just one RHS with shape (10,))
x = sparseqr.solve(M, B, tolerance=0)

# Solve an overdetermined linear system  A x = b  in the least-squares sense
#
# The same routine also works for the usual non-overdetermined case.
#
A = scipy.sparse.rand(20, 10, density=0.1)  # 20 equations, 10 unknowns
b = numpy.random.random(
    20)  # one RHS, dense, but could also have many (in shape (20,k))
x = sparseqr.solve(A, b, tolerance=0)

# Solve a linear system  M x = B  via QR decomposition
#
# This approach is slow due to the explicit construction of Q, but may be
# useful if a large number of systems need to be solved with the same M.
#