def estimate_m_matrix(self):
     if self.m_matrix is None:
         self.m_matrix = np.zeros(shape=(len(self.albedos), 3, 10))
     counter = 0
     for k, pixel_list in self.rgb_clusters.items():
         if self.albedos[counter][0] > 0 and self.albedos[counter][1] > 0 and self.albedos[counter][2] > 0:
             A = []
             bs = []
             for pixel in pixel_list:
                 i, j = pixel
                 n1, n2, n3 = self.depth_normals[i][j]
                 if n1 != 0 or n2 != 0 or n3 != 0:
                     bs.append(self.color_map[i][j] / self.albedos[counter])
                     A.append([n1 * n1, 2 * n1 * n2, 2 * n1 * n3, n1,
                                            n2 * n2, 2 * n2 * n3, n2,
                                                         n3 * n3, n3,
                                                                   1])
             if not bs:
                 counter += 1
                 continue
             A = np.array(A)
             bs = np.array(bs)
             red_lighting = lsq_linear(A, bs[:, 0])
             self.m_matrix[counter][0] = red_lighting.x
             green_lighting = lsq_linear(A, bs[:, 1])
             self.m_matrix[counter][1] = green_lighting.x
             blue_lighting = lsq_linear(A, bs[:, 2])
             self.m_matrix[counter][2] = blue_lighting.x
         counter += 1
Пример #2
0
    def test_dense_rank_deficient(self):
        A = np.array([[-0.307, -0.184]])
        b = np.array([0.773])
        lb = [-0.1, -0.1]
        ub = [0.1, 0.1]
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A,
                             b, (lb, ub),
                             method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, [-0.1, -0.1])

        A = np.array([
            [0.334, 0.668],
            [-0.516, -1.032],
            [0.192, 0.384],
        ])
        b = np.array([-1.436, 0.135, 0.909])
        lb = [0, -1]
        ub = [1, -0.5]
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A,
                             b, (lb, ub),
                             method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.optimality, 0, atol=1e-11)
    def AICSP(self):
        #A CSP approach to the problem... empty board, make first move.
        global matrixOne
        global vectorSolutions
        global next_Button_Open
        global vectorSolutions
        global outcome
        global terminate

        if (self.newBoard() == True):
            vectorSolutions = np.array([])
            self.open_button(numRows // 2, numCols // 2)
            #set up matrix for first time...
            for row in range(0, numRows):
                for col in range(0, numCols):
                    if (
                            but[row][col]['state'] == 'disabled'
                            and but[row][col]["text"] != ""
                    ):  #a button that is opened and is not useless (numbered)
                        vectorSolutions = np.append(
                            vectorSolutions, [int(but[row][col]["text"])])
                        temp = np.zeros(numRows * numCols)
                        temp = self.add_3by3_constraint(temp, row, col)
                        matrixOne = np.append(matrixOne,
                                              np.array([temp]),
                                              axis=0)
            myAns = lsq_linear(matrixOne,
                               vectorSolutions,
                               bounds=(-0.0000, 1.0001)).x
            myAns = np.reshape(myAns, (numRows, numCols))
            next_Button_Open = self.findCurrMin(myAns)
            but[next_Button_Open[0][0]][next_Button_Open[0][1]]["bg"] = "blue"
        #append... solve... display... find min/solve... click...
        # click...
        else:
            self.open_button(next_Button_Open[0][0], next_Button_Open[0][1])
            if (terminate == True):
                return
            for row in range(0, numRows):
                for col in range(0, numCols):
                    if (but[row][col]['state'] == 'disabled'
                            and but[row][col]["text"] != ""):
                        vectorSolutions = np.append(
                            vectorSolutions, [int(but[row][col]["text"])])
                        temp = np.zeros(numRows * numCols)
                        temp = self.add_3by3_constraint(temp, row, col)
                        matrixOne = np.append(matrixOne,
                                              np.array([temp]),
                                              axis=0)
            myAns = lsq_linear(matrixOne,
                               vectorSolutions,
                               bounds=(-0.0000, 1.0001)).x
            myAns = np.reshape(myAns, (numRows, numCols))
            next_Button_Open = self.findCurrMin(myAns)
            if (next_Button_Open == [-1, -1]):
                next_Button_Open = self.moveLeftIssue(myAns)
            but[next_Button_Open[0][0]][next_Button_Open[0][1]]["bg"] = "blue"
Пример #4
0
    def test_option_lsmr_maxiter(self):
        # Should work with positive integers or None
        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1)
        _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None)

        # Should raise error with 0 or negative max iter
        err_message = "`lsmr_maxiter` must be None or positive integer."
        with pytest.raises(ValueError, match=err_message):
            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0)
        with pytest.raises(ValueError, match=err_message):
            _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1)
Пример #5
0
    def test_sparse_and_LinearOperator(self):
        m = 5000
        n = 1000
        A = rand(m, n, random_state=0)
        b = self.rnd.randn(m)
        res = lsq_linear(A, b)
        assert_allclose(res.optimality, 0, atol=1e-6)

        A = aslinearoperator(A)
        res = lsq_linear(A, b)
        assert_allclose(res.optimality, 0, atol=1e-6)
Пример #6
0
    def test_sparse_and_LinearOperator(self):
        m = 5000
        n = 1000
        A = rand(m, n, random_state=0)
        b = self.rnd.randn(m)
        res = lsq_linear(A, b)
        assert_allclose(res.optimality, 0, atol=1e-6)

        A = aslinearoperator(A)
        res = lsq_linear(A, b)
        assert_allclose(res.optimality, 0, atol=1e-6)
Пример #7
0
def update_weight_map(bone_transforms, rest_bones_t, poses, rest_pose, sparseness):
    """
    Update the bone-vertex weight map W by fixing bone transformations and using a least squares
    solver subject to non-negativity constraint, affinity constraint, and sparseness constraint.

    inputs: bone_transforms |num_bones| x |num_poses| x 4 x 3 matrix representing the stacked 
                                Rotation and Translation for each pose, for each bone.
            rest_bones_t    |num_bones| x 3 matrix representing the translations of the rest bones
            poses           |num_poses| x |num_verts| x 3 matrix representing coordinates of vertices of each pose
            rest_pose       |num_verts| x 3 numpy matrix representing the coordinates of vertices in rest pose
            sparseness      Maximum number of bones allowed to influence a particular vertex

    return: A |num_verts| x |num_bones| weight map representing the influence of the jth bone on the ith vertex
    """
    num_verts = rest_pose.shape[0]
    num_poses = poses.shape[0]
    num_bones = bone_transforms.shape[0]

    W = np.empty((num_verts, num_bones))

    for v in range(num_verts):
        # For every vertex, solve a least squares problem
        Rp = np.empty((num_bones, num_poses, 3))
        for bone in range(num_bones):
            Rp[bone] = bone_transforms[bone,:,:3,:].dot(rest_pose[v] - rest_bones_t[bone]) # |num_bones| x |num_poses| x 3
        # R * p + T
        Rp_T = Rp + bone_transforms[:, :, 3, :] # |num_bones| x |num_poses| x 3
        A = Rp_T.transpose((1, 2, 0)).reshape((3 * num_poses, num_bones)) # 3 * |num_poses| x |num_bones|
        b = poses[:, v, :].reshape(3 * num_poses) # 3 * |num_poses| x 1

        # Bounds ensure non-negativity constraint and kind of affinity constraint
        w = lsq_linear(A, b, bounds=(0, 1), method='bvls').x  # |num_bones| x 1
        w /= np.sum(w) # Ensure that w sums to 1 (affinity constraint)

        # Remove |B| - |K| bone weights with the least "effect"
        effect = np.linalg.norm((A * w).reshape(num_poses, 3, num_bones), axis=1) # |num_poses| x |num_bones|
        effect = np.sum(effect, axis=0) # |num_bones| x 1
        num_discarded = max(num_bones - sparseness, 0)
        effective = np.argpartition(effect, num_discarded)[num_discarded:] # |sparseness| x 1

        # Run least squares again, but only use the most effective bones
        A_reduced = A[:, effective] # 3 * |num_poses| x |sparseness|
        w_reduced = lsq_linear(A_reduced, b, bounds=(0, 1), method='bvls').x # |sparseness| x 1
        w_reduced /= np.sum(w_reduced) # Ensure that w sums to 1 (affinity constraint)

        w_sparse = np.zeros(num_bones)
        w_sparse[effective] = w_reduced
        w_sparse /= np.sum(w_sparse) # Ensure that w_sparse sums to 1 (affinity constraint)

        W[v] = w_sparse

    return W
Пример #8
0
    def fit_linear_nnls(self, X, y, sample_weight=None):
        if not isinstance(self.model, LinearRegression):
            raise ValueError(
                'Model is not linearRegression, could not call fit for linear nnls'
            )
        n_jobs_ = self.model.n_jobs
        self.model.coef_ = []
        X, y = check_X_y(X,
                         y,
                         accept_sparse=['csr', 'csc', 'coo'],
                         y_numeric=True,
                         multi_output=True)

        if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1:
            raise ValueError("Sample weights must be 1D array or scalar")

        X, y, X_offset, y_offset, X_scale = self.model._preprocess_data(
            X,
            y,
            fit_intercept=self.model.fit_intercept,
            normalize=self.model.normalize,
            copy=self.model.copy_X,
            sample_weight=sample_weight)

        if sample_weight is not None:
            # Sample weight can be implemented via a simple rescaling.
            X, y = _rescale_data(X, y, sample_weight)

        if sp.issparse(X):
            if y.ndim < 2:
                # out = sparse_lsqr(X, y)
                out = lsq_linear(X, y, bounds=(0, np.Inf))
                self.model.coef_ = out[0]
                self.model._residues = out[3]
            else:
                # sparse_lstsq cannot handle y with shape (M, K)
                outs = Parallel(n_jobs=n_jobs_)(
                    delayed(lsq_linear)(X, y[:, j].ravel())
                    for j in range(y.shape[1]))
                self.model.coef_ = np.vstack(out[0] for out in outs)
                self.model._residues = np.vstack(out[3] for out in outs)
        else:
            # self.model.coef_, self.model.cost_, self.model.fun_, self.model.optimality_, self.model.active_mask_,
            # self.model.nit_, self.model.status_, self.model.message_, self.model.success_\
            out = lsq_linear(X, y, bounds=(0, np.Inf))
            self.model.coef_ = out.x
            self.model.coef_ = self.model.coef_.T

        if y.ndim == 1:
            self.model.coef_ = np.ravel(self.model.coef_)
        self.model._set_intercept(X_offset, y_offset, X_scale)
        return self.model
Пример #9
0
    def update_weight_map(self, bone_transforms, rest_bones_t, poses,
                          rest_pose, sparseness):

        num_verts = rest_pose.shape[0]
        num_poses = poses.shape[0]
        num_bones = bone_transforms.shape[0]

        W = np.empty((num_verts, num_bones))

        for v in range(num_verts):
            # For every vertex, solve a least squares problem
            Rp = np.empty((num_bones, num_poses, 3))
            for bone in range(num_bones):
                Rp[bone] = bone_transforms[bone, :, :3, :].dot(
                    rest_pose[v] -
                    rest_bones_t[bone])  # |num_bones| x |num_poses| x 3
            # R * p + T
            Rp_T = Rp + bone_transforms[:, :,
                                        3, :]  # |num_bones| x |num_poses| x 3
            A = Rp_T.transpose((1, 2, 0)).reshape(
                (3 * num_poses, num_bones))  # 3 * |num_poses| x |num_bones|
            b = poses[:, v, :].reshape(3 * num_poses)  # 3 * |num_poses| x 1

            # Bounds ensure non-negativity constraint and kind of affinity constraint
            w = lsq_linear(A, b, bounds=(0, 1),
                           method='bvls').x  # |num_bones| x 1
            w /= np.sum(w)  # Ensure that w sums to 1 (affinity constraint)

            # Remove |B| - |K| bone weights with the least "effect"
            effect = np.linalg.norm((A * w).reshape(num_poses, 3, num_bones),
                                    axis=1)  # |num_poses| x |num_bones|
            effect = np.sum(effect, axis=0)  # |num_bones| x 1
            num_discarded = max(num_bones - sparseness, 0)
            effective = np.argpartition(
                effect, num_discarded)[num_discarded:]  # |sparseness| x 1

            # Run least squares again, but only use the most effective bones
            A_reduced = A[:, effective]  # 3 * |num_poses| x |sparseness|
            w_reduced = lsq_linear(A_reduced, b, bounds=(0, 1),
                                   method='bvls').x  # |sparseness| x 1
            w_reduced /= np.sum(
                w_reduced)  # Ensure that w sums to 1 (affinity constraint)

            w_sparse = np.zeros(num_bones)
            w_sparse[effective] = w_reduced
            w_sparse /= np.sum(
                w_sparse
            )  # Ensure that w_sparse sums to 1 (affinity constraint)

            W[v] = w_sparse

        return W
Пример #10
0
    def test_convergence_small_matrix(self):
        A = np.array([[49.0, 41.0, -32.0], [-19.0, -32.0, -8.0],
                      [-13.0, 10.0, 69.0]])
        b = np.array([-41.0, -90.0, 47.0])
        bounds = np.array([[31.0, -44.0, 26.0], [54.0, -32.0, 28.0]])

        x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
        x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x

        cost_bvls = np.sum((A @ x_bvls - b)**2)
        cost_trf = np.sum((A @ x_trf - b)**2)

        assert_(abs(cost_bvls - cost_trf) < cost_trf * 1e-10)
Пример #11
0
def SparsifyDynamics(Theta, dxdt, llambda):
    # Theta = [ x(0) x(1) ... x(n)
    #           y(0) y(1) ... y(n)
    #           z(0) z(1) ... z(n) ]
    xdim = len(dxdt)
    par_num = len(Theta[0])

    opt_num = 0  # [0: lsq_linear, 1: leastsq]

    # 初期解
    for i in range(xdim):
        # 最適化手法の選択
        if opt_num == 0:
            xi_i = lsq_linear(Theta.T, dxdt[i])
            xi_i = xi_i.x
        elif opt_num == 1:
            xi_i = leastsq_for_matrix(Theta, dxdt[i])
            xi_i = xi_i[0]
            print(xi_i)

        if i == 0:
            Xi = np.array([xi_i])
        else:
            Xi = np.append(Xi, np.array([xi_i]), axis=0)

    # 閾値以下は無視して、残ったものでもう一度回帰
    for _ in range(10): #   10回回す様にする
        for i in range(xdim):
            useID = [n for n in range(len(Xi[i])) if abs(Xi[i][n]) > llambda]
            zeroID = [n for n in range(len(Xi[i])) if abs(Xi[i][n]) <= llambda]

            #   Xiの不要な部分を全てゼロに
            for j in zeroID:
                Xi[i][j] = 0
            #   Thetaの中で必要なものだけをまとめる
            Theta_tmp = np.array([Theta[n] for n in useID])
            print(Theta_tmp.shape, dxdt[i].shape)
            #   もう一度回帰
            if opt_num == 0:
                xi_i = lsq_linear(Theta_tmp.T, dxdt[i])
                xi_i = xi_i.x
            if opt_num == 1:
                xi_i = leastsq_for_matrix(Theta_tmp, dxdt[i])
                xi_i = xi_i[0]
                print(xi_i)
            #   Xiに代入
            for n, xi_n in enumerate(useID):
                Xi[i][xi_n] = xi_i[n]
    return Xi
Пример #12
0
    def test_sparse_bounds(self):
        m = 5000
        n = 1000
        A = rand(m, n, random_state=0)
        b = self.rnd.randn(m)
        lb = self.rnd.randn(n)
        ub = lb + 1
        res = lsq_linear(A, b, (lb, ub))
        assert_allclose(res.optimality, 0.0, atol=1e-6)

        res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)
        assert_allclose(res.optimality, 0.0, atol=1e-6)

        res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
        assert_allclose(res.optimality, 0.0, atol=1e-6)
Пример #13
0
    def test_sparse_bounds(self):
        m = 5000
        n = 1000
        A = rand(m, n, random_state=0)
        b = self.rnd.randn(m)
        lb = self.rnd.randn(n)
        ub = lb + 1
        res = lsq_linear(A, b, (lb, ub))
        assert_allclose(res.optimality, 0.0, atol=1e-8)

        res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13)
        assert_allclose(res.optimality, 0.0, atol=1e-8)

        res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
        assert_allclose(res.optimality, 0.0, atol=1e-8)
Пример #14
0
    def _step(self, use_cached_xw):
        #search for FW vertex and compute line search
        f = self._search()

        #check to make sure value to add is not in the current set (error should be ortho to current subspace)
        if self.wts[f] > 0:
            warnings.warn(self.alg_name +
                          '.run(): search selected a nonzero weight to update')

        #run least squares optimal weight update
        active_idcs = self.wts > 0
        active_idcs[f] = True
        X = self.x[active_idcs, :]
        res = lsq_linear(X.T,
                         self.snorm * self.xs,
                         bounds=(0., np.inf),
                         max_iter=max(1000, 10 * self.xs.shape[0]))

        #if the optimizer failed or our cost increased, stop
        prev_cost = self.error()
        if not res.success or np.sqrt(2. * res.cost) >= prev_cost:
            self.reached_numeric_limit = True
            return False

        #update weights, xw, and prev_cost
        self.wts[active_idcs] = res.x
        self.xw = self.wts.dot(self.x)

        return True
Пример #15
0
    def infer(self, Ms, ys, scale_factors=None):
        ''' Either:
            1) Ms is a single M and ys is a single y 
               (scale_factors ignored) or
            2) Ms and ys are lists of M matrices and y vectors
               and scale_factors is a list of the same length.
        '''
        A, y = self._apply_scales(Ms, ys, scale_factors)

        if self.method == 'AS':
            assert isinstance(
                A, numpy.ndarray), "method 'AS' only works with dense matrices"
            x_est, _ = optimize.nnls(A, y)
        elif self.method == 'LB':
            if self.lasso is None:
                x_est, info = nls_lbfgs_b(A, y)
            if self.lasso:
                lasso = max(
                    0,
                    lsmr(A, y)[0].sum()) if self.lasso is True else self.lasso
                x_est = nls_slsqp(A, y, lasso)
        elif self.method == 'TRF':
            x_est = optimize.lsq_linear(A, y, bounds=(0, numpy.inf),
                                        tol=1e-3)['x']

        elif self.method == 'new':
            x_est, info = nnls(A, y, 1e-6, 1e-6)

        x_est = x_est.reshape(A.shape[1])  # reshape to match shape of x

        return x_est
Пример #16
0
    def enforce_consistency(self):
        """Make the tree consistent by solving a least square optimization problem.

    See 'Answering Range Queries Under Local Differential Privacy. Graham
    Cormode, Tejas Kulkarni, Divesh Srivastava' for details. Improve the
    accuracy of range queries. When this function is invoked, `use_efficient`
    will be automatically set to `False` because using both optimizations does
    not further improve the accuracy.
    """

        # As consistency enforcement and Honaker trick together does not further
        # improve the query accuracy. Honaker trick will be automatically disabled
        # if this function is called.
        self._use_efficient = False

        if self._check_consistency():
            return

        ls_matrix = self._construct_matrix()
        ls_rhs = self._flatten_hierarhical_hist()
        consistent_hist = optimize.lsq_linear(ls_matrix,
                                              ls_rhs,
                                              bounds=(0, np.inf)).x
        self._hierarchical_histogram = build_tree_from_leaf.create_hierarchical_histogram(
            consistent_hist, self._arity)
Пример #17
0
def NLSQ(T10, cp, signal):
    '''
    Implementation of linear least-squares(LLS), "Murase K: Efficient method for calculating kinetic parameters using T1-weighted dynamic contrast-enhanced magnetic resonance imaging. Magnetic Resonance in Medicine 2004; 51:858–862."
    :param T10:
    :param cp:
    :param signal:
    :return:
    '''
    R10 = 1 / T10
    s0 = (1 - np.exp(-TR * R10)) * np.sin(alpha) / (
        1 - np.exp(-TR * R10) * np.cos(alpha))
    M = np.mean(signal[:6]) / s0
    R1t = -np.log((signal - M * np.sin(alpha)) /
                  (signal * np.cos(alpha) - M * np.sin(alpha))) / TR
    ctis = (R1t - R10) / r1
    cp_intergral = np.zeros_like(cp, dtype=np.float)
    ctis_intergral = np.zeros_like(cp, dtype=np.float)
    cp_length = np.size(cp)
    for t in range(cp_length):
        cp_intergral[t] = np.sum(cp[:t + 1]) * deltt
    for t in range(cp_length):
        ctis_intergral[t] = np.sum(ctis[:t + 1]) * deltt
    matrixA = np.concatenate((cp_intergral.reshape(
        (cp_length, 1)), -ctis_intergral.reshape(
            (cp_length, 1)), cp.reshape((cp_length, 1))),
                             axis=1)
    matrixC = ctis
    bounds = [(1e-5, 1e-5, 0.0005), (0.7, 5, 0.1)]
    matrixB = lsq_linear(matrixA, matrixC, bounds=bounds).x
    vp = matrixB[2]
    k2 = matrixB[1]
    ktrans = matrixB[0] - k2 * vp
    ve = ktrans / k2
    return np.array([ktrans, vp, ve])
Пример #18
0
def perform_regr_L2(indiv, comb, val_cur):
    """ Perform bounded linear regression using lsq_linear.
    """
    indiv, comb = indiv.copy(), comb.copy()
    indiv = indiv.reset_index()

    a = []
    b = []

    ### create systems of equations
    # condition that total funds stay the same
    a.append(list(np.ones(len(indiv))))
    b.append(indiv.val.sum())

    # relating individual currencies to their total target val
    for cur_id in comb.index:
        # skip bitcoin to prevent regression from overshooting targets
        # if allocation is unbalanceable given exchange distribution
        if cur_id == val_cur:
            continue
        # a is list of ilocs for each currency
        a.append((indiv.cur_id == cur_id).astype(int).tolist())
        # b is currency total target val
        b.append(comb.val_tgt.loc[cur_id])

    # regression
    a = np.array(a)
    b = np.array(b)
    s = lsq_linear(a, b, bounds=(0, np.inf))

    s = [s['x']]

    indiv['val_nnls'] = s[0]
    return indiv
Пример #19
0
def estimate_fluxes(S, AeqCons=None, beqCons=None, bndCons=None):
    '''
	Parameters
	S: df, stoichiometric matrix, balanced metabolites in rows, total reactions in columns
	AeqCons: df, A of equality constraints
	beqCons: ser, b of equality constraints
	bndCons: df, boundary constraints of flux
	
	Returns
	V: ser, net flux distribution
	'''

    # prepare A and b
    A = pd.concat((S, AeqCons), sort=False)
    A = A[S.columns]
    A.replace(np.nan, 0, inplace=True)

    b = pd.concat((pd.Series(np.zeros(S.shape[0]),
                             index=S.index,
                             dtype=np.float), beqCons))

    if bndCons is None:
        bndCons = pd.DataFrame(np.full((len(rxns), 2), [0, np.inf]),
                               index=rxns,
                               columns=['lb', 'ub'])

    bnds = (bndCons['lb'].values, bndCons['ub'].values)

    res = lsq_linear(A, b, bounds=bnds).x

    V = pd.Series(res, index=S.columns, dtype=np.float)

    return V
Пример #20
0
    def _fit_rc(idx: int, td: np.ndarray, rs: np.ndarray, c_in: float,
                cl: np.ndarray, res: np.ndarray, cs: np.ndarray,
                cd: np.ndarray, r_unit: float, c_unit: float,
                t_unit: float) -> None:
        c_min = 1.0e-18

        rs_flat = rs.flatten()
        cl_flat = cl.flatten()
        a = np.empty((rs_flat.size, 3))
        a[:, 0] = rs_flat * c_unit / t_unit
        a[:, 1] = cl_flat * r_unit / t_unit
        a[:, 2] = 1

        b = (td.flatten() - rs_flat * (cl_flat + c_in)) / t_unit
        x = lstsq(a, b)[0]
        rp = x[1] * r_unit
        cd0 = x[2] * t_unit / rp
        cs0 = x[0] * c_unit - cd0
        if cs0 < 0 or cd0 < 0:
            # we got negative capacitance, which causes problems.
            # Now, assume the rp we got is correct, do another least square fit to enforce
            # cs and cd are positive
            a2 = np.empty((rs_flat.size, 2))
            a2[:, 0] = a[:, 0]
            a2[:, 1] = a[:, 0] + (rp * c_unit / t_unit)
            b -= rp * cl_flat / t_unit
            # noinspection PyUnresolvedReferences
            opt_res = lsq_linear(a2, b, bounds=(c_min, float('inf'))).x
            cs0 = opt_res[0] * c_unit
            cd0 = opt_res[1] * c_unit

        res[idx] = rp
        cs[idx] = cs0
        cd[idx] = cd0
Пример #21
0
def fromPhaseCurve(phiObs,
                   flux,
                   nSlices,
                   phiIn=None,
                   priorStd=1e3,
                   G=None,
                   brightnessMin=0,
                   brightnessMax=np.inf,
                   fullOutput=False):
    '''Computes the slice brightnesses from a given phase curve and prior uncertainty.'''
    # Compute G if it was not provided
    if G is None:
        G = getG(phiObs, nSlices, phiIn)
    # Compute with bounded ridge regression using the pseudo-observations method.
    X = np.vstack([G, np.eye(nSlices) / priorStd])
    y = np.concatenate(
        [flux, np.mean(flux) * np.ones(nSlices) / (2 * priorStd)])
    f = lsq_linear(X, y, bounds=[brightnessMin, brightnessMax])
    if not fullOutput:
        return f.x
    # Compute the log posterior probability
    sigma = np.sqrt(2. * f.cost / (len(flux) - nSlices))
    logLike = -len(flux) * np.log(
        2 * np.pi) / 2. - len(flux) * sigma - f.cost / sigma**2
    # Get the uncertainties on the brightness
    errors = sigma**2 * np.linalg.inv(np.matmul(X.T, X))
    return {
        'brightness': f.x,
        'brightnessCov': errors,
        'logLike': logLike,
        'residStd': sigma
    }
Пример #22
0
def get_model(A, y, lamb=0, regularization='ridge', bounds=False, verbose=0):
    n_col = A.shape[1]

    if isinstance(regularization, str):
        if regularization == 'ridge':
            #
            L = np.identity(n_col)
        elif regularization == 'smooth':
            # fill with a finite difference matrix
            L = np.identity(n_col) * 2 + np.diag(
                -np.ones(n_col - 1), k=1) + np.diag(-np.ones(n_col - 1), k=-1)
    else:
        L = regularization

    if not bounds:
        return linalg.solve(A.T.dot(A) + lamb * L, A.T.dot(y))
    else:
        lower_bnd = np.ones(n_unknowns)
        upper_bnd = np.ones_like(lower_bnd)
        lower_bnd[:nev + n_stations] *= -np.inf
        lower_bnd[nev + n_stations:] = 0
        upper_bnd *= np.inf

        return lsq_linear(A.T.dot(A) + lamb * L,
                          A.T.dot(y), (lower_bnd, upper_bnd),
                          verbose=verbose)
Пример #23
0
    def get_phase_compositions(self, point, simplex_id=None):
        """Compute phase contributions given a composition
        
        input:
        ------
            point : composition as a numpy array (dim,)
            
        output:
        -------
            x. : Phase compositions as a numpy array of shape (dim, )
            vertices  : Compositions of coexisting phases. Each row correspond to 
                        an entry in x with the same index
        """
        from scipy.optimize import lsq_linear

        if not self.is_solved:
            raise RuntimeError(
                'Phase diagram is not computed\n'
                'Use .compute() before requesting phase compositions')

        assert len(
            point
        ) == self.dimension, 'Expected {}-component composition got {}'.format(
            self.dimension, len(point))

        if self.is_boundary_point(point):
            raise RuntimeError(
                'Boundary points are not considered in the computation.')

        if simplex_id is None:
            inside = np.asarray([
                self.in_simplex(point, s)
                for s in self.simplices[~self.coplanar]
            ],
                                dtype=bool)
            in_simplices_ids = np.where(inside)[0]
        else:
            in_simplices_ids = [simplex_id]

        for i in in_simplices_ids:
            simplex = self.simplices[i]
            num_comps = np.asarray(self.num_comps)[i]
            vertices = self.grid[:, simplex]
            energies = np.asarray([self.energy_func(x) for x in vertices.T])

            if num_comps == 1:
                # no-phase splits if the simplex is labelled 1-phase
                continue
            A = np.vstack(
                (vertices.T[:-1, :], energies, np.ones(self.dimension)))
            B = np.hstack((point[:-1], self.energy_func(point), 1))
            lb = np.zeros(self.dimension)
            ub = np.ones(self.dimension)
            res = lsq_linear(A, B, bounds=(lb, ub), lsmr_tol='auto', verbose=0)
            x = res.x
            if not (x < 0).any():
                # STOP if you found a simplex that has x>0
                break

        return x, vertices.T, num_comps
Пример #24
0
def gauss_newton(f, x0, J, atol: float = 1e-4, max_iter: int = 100):
    """Implements the Gauss-Newton method for NLLS

    :param f: function to compute the residual vector
    :param x0: array corresponding to initial guess
    :param J: function to compute the jacobian of f
    :param atol: stopping criterion for the root mean square 
    of the squared norm of the gradient of f
    :param max_iter: maximum number of iterations to run before 
    terminating
    """
    iterates = [
        x0,
    ]
    rms = lambda x: np.sqrt(np.mean(np.square(x)))
    costs = [
        rms(f(x0)),
    ]
    cnt = 0
    grad_rms = np.inf

    while cnt < max_iter and grad_rms > atol:
        x_k = iterates[-1]
        A = J(x_k)
        b = A @ x_k - f(x_k)
        result = lsq_linear(A, b)
        iterates.append(result.x)
        costs.append(rms(f(result.x)))
        grad_rms = rms(A.T * f(x_k))
        cnt += 1

    return iterates, np.asarray(costs)
Пример #25
0
def perform_regr(indiv, comb, val_cur):
    """ Perform bounded linear regression using lsq_linear.
    """
    indiv, comb = indiv.copy(), comb.copy()
    # determine exchange val totals
    ex_tot = indiv.reset_index().groupby(['ex_id'])[['val']].sum()
    a = []
    b = []
    indiv = indiv.reset_index()
    # create systems of equations
    # relating exchange currencies to exchange total
    for ex_id in ex_tot.index:
        # a is a list of ilocs for each exchange
        a.append((indiv.ex_id == ex_id).astype(int).tolist())
        # b is exchange total
        b.append(ex_tot.val.loc[ex_id])
    # relating individual currencies to their total target val
    for cur_id in comb.index:
        # skip bitcoin to prevent regression from overshooting targets
        # if allocation is unbalanceable given exchange distribution
        if cur_id == val_cur:
            continue
        # a is list of ilocs for each currency
        a.append((indiv.cur_id == cur_id).astype(int).tolist())
        # b is currency total target val
        b.append(comb.val_tgt.loc[cur_id])
    # regression
    a = np.array(a)
    b = np.array(b)
    s = lsq_linear(a, b, bounds=(0, np.inf))
    # pprint(s)
    s = [s['x']]
    # print(['%.8f' % v for v in s[0]])
    indiv['val_nnls'] = s[0]
    return indiv
Пример #26
0
    def calcN_Waq(self):
        res = self.res
        pvt = self.pvt
        aquif = self.aquif
        # Create dataframe to be returned
        r = pd.DataFrame(columns=['WeCalc', 'Pb', 'So', 'Sg', 'Sw', 'Vp', \
               'Bo', 'Bg', 'Bw', 'Rs', 'Eo', 'Eg', 'Efw', 'N'])
        r = pd.concat([self.hist.data.dropna(subset=['p']), r], axis=1)
        # Solve MBE for  VOIP and Aquifer size using least square method
        A = []
        B = []
        for index, row in r.iterrows():
            Pb = self.updatePb(row['Np'], row['Gp'], row['Gi'])
            p = row['p']
            E = self.Eo(p, Pb) + res.m * self.Eg(p) + self.Efw(p)
            Fi = row['Wi'] * pvt.Bw(p) + row['Gi'] * pvt.Bg(p)  # Fi - Bw2*We
            Fp = row['Np']*(pvt.Bo(p, Pb)+(row['Gp']/row['Np']-pvt.Rs(p, Pb)) \
                    *pvt.Bg(p))+row['Wp']*pvt.Bw(p)

            A.append([E, (aquif.cr + pvt.cw) * (res.p0 - p)])
            B.append(Fp - Fi)

        Waqmin = r.tail(1)['Wp'] / (aquif.cr + pvt.cw) / (res.p0 -
                                                          r.tail(1)['p'])

        return sp.lsq_linear(np.array(A),
                             np.array(B),
                             bounds=[(0., Waqmin), (np.inf, np.inf)]), A, B
Пример #27
0
 def test_np_matrix(self):
     # gh-10711
     with np.testing.suppress_warnings() as sup:
         sup.filter(PendingDeprecationWarning)
         A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]])
     k = np.array([20, 15])
     s_t = lsq_linear(A, k)
Пример #28
0
def edit_fibermodel(Felist, Fiblist, header_tup, B, avgB, A, args):
    nifu = len(Fiblist)
    wave = np.arange(header_tup[0]) * header_tup[2] + header_tup[1]
    for i in xrange(nifu):
        F = FiberModel(Fiblist[i])
        nfib, nw = Felist[i][0].data.shape
        through = A[i] * B[i, :] / avgB
        for j in xrange(nfib):
            if args.debug:
                t1 = time.time()
                print("Working on Fibermodel, fiber: %i, %i" % (i, j))
            mask = np.where(np.isfinite(through[j, :]))[0]
            basis = np.vstack(
                [F.amplitudes[j].get_basis(F._scal_w(w)) for w in wave])
            x = through[j, mask]
            x[-1] = x[-2]
            y = basis[mask, :]
            ax, ay = y.shape
            lb = -1. * np.ones((ay, ))
            lb[-1] = -10.
            ub = 1. * np.ones((ay, ))
            ub[-1] = 10.
            sol = lsq_linear(np.array(basis[mask, :]),
                             np.array(x),
                             bounds=(lb, ub))
            F.amplitudes[j].A = sol['x'][:-1]
            F.amplitudes[j].mean = sol['x'][-1]
        filename = Fiblist[i][:-6] + 'adjpy_' + Fiblist[i][-6] + '.fmod'
        if args.debug:
            print("Writing out %s" % filename)
        F.writeto(op.join(args.outfolder, filename))
        if args.debug:
            t2 = time.time()
            print("Time Taken to reset amplitudes for IFU %i: %0.2f" %
                  (i, t2 - t1))
Пример #29
0
 def _rank_pass(self, teams, passN=0, prev_rank=[] ):
   '''Returns list of rankings  using linear chi2
   For pass 0, set passN = 0, all weights are 1
   For all others, set passN = n and send prev_rank '''
   A   = []  # g x t array (games x teams)
   b   = []  # g list of game results R_g
   N_g = self._calc_Ng_list(teams)  # list of games with home/away ids
   # error of game measurement w = 1/sig^2
   if passN == 0:
     sig_g = np.ones(len(N_g)) # equal weight first iteration
   else:
     sig_g = self._calc_sig_g(prev_rank, N_g)
   # Loop over games & home/away team ids
   for g, h_a in enumerate(N_g):
     row_g = [] # row for game g in A matrix
     # loop over teams 
     for team in teams:
       if team.teamId == h_a[0]:
         r_tg = 1./sig_g[g] # home team
       elif team.teamId == h_a[1]:
         r_tg = -1./sig_g[g] #away
       else:
         r_tg = 0 #not in game
       row_g.append(r_tg)
     # add row to A matrix
     R_g = self._calc_Rg(h_a[2], h_a[3]) # send( score_home, score_away)
     b.append(R_g/sig_g[g])
     A.append(row_g)
   # Calculate the ranks
   rank = lsq_linear(A, b, bounds=(30,130))#,lsq_solver='exact')
   if rank.success == False:
     print('WARNING:\t%s'%rank.message)
   for i,r in enumerate(rank.x):
     teams[i].rank.lsq = r
   return rank.x
 def _find_conservative_equilibrium(self,guess,tolerance=1e-9,max_iter=10):
     y = guess
     lb = np.array([-np.pi,-np.pi, -1* y[2], -1 * y[3]])
     ub = np.array([np.pi,np.pi,np.inf,np.inf])
     fun = self.H_flow
     jac = self.H_flow_jac
     f = fun(y)[:-1]
     J = jac(y)[:-1,:-1]
     it=0
         # Newton method iteration
     while np.linalg.norm(f)>tolerance and it < max_iter:
         # Note-- using constrained least-squares
         # to avoid setting actions to negative
         # values.
         
         # The lower bounds ensure that  I1 and I2 are positive quantities
         lb[2:] =  -1* y[2:-1]
         dy = lsq_linear(J,-f,bounds=(lb,ub)).x
         y[:-1] = y[:-1] + dy
         f = fun(y)[:-1]
         J = jac(y)[:-1,:-1]
         it+=1
         if it==max_iter:
             raise RuntimeError("Max iterations reached!")
     return y
Пример #31
0
def solve_linear_system(emplproj_list, cost_list, skills_list, idx_selected, totals):
	'''
	'idx_selected' is a tuple with row and column index of selected (empl,proj).
	'totals' is a tuple constaining two tuples, one for emplTotals and one for projTotals.
	'''

	# Check that employee has skill
	if not skills_list[idx_selected[0]][idx_selected[1]]:
		status_msg = "NO_SKILL"
		return status_msg

	# User-supplied data in array form (#empl x #proj)
	emplproj_arr = np.array(emplproj_list, np.dtype('d'))
	nEmpl, nProj = emplproj_arr.shape
	N = nEmpl*nProj
	cost_arr = np.array(cost_list, np.dtype('d'))
	skills_mask = np.array(skills_list, np.dtype('?'))

	# Flatten arrays into vectors, taking only allowed values based on skills_mask
	emplproj_vec = emplproj_arr[skills_mask]
	cost_vec = cost_arr[skills_mask]
	skills_mask_ind = np.nonzero(skills_mask.flatten())[0]

	# Adjust index of selected [empl,proj] after flattening and skill filtering
	idx_mat = np.arange(N).reshape(nEmpl,nProj)
	idx_selec_mat = idx_mat[idx_selected[0]][idx_selected[1]]
	idx_vec	= idx_mat[skills_mask]
	idx_selec_flat = np.where(np.isin(idx_vec, idx_selec_mat))[0][0]

	# Create design matrix for fixed marginal totals, excluding user-selected element
	A = create_design_matrix(emplproj_arr, skills_mask, excluded_elem=[idx_selected])
	m, n = A.shape

	# Create right-hand-side vector.
	# Must subtract the user-supplied value (constant) from the totals!
	selec_value = emplproj_vec[idx_selec_flat]
	tot_empl = list(totals[0])
	tot_empl[idx_selected[0]] = totals[0][idx_selected[0]] - selec_value
	tot_proj = list(totals[1])
	tot_proj[idx_selected[1]] = totals[1][idx_selected[1]] - selec_value
	b = tot_empl + tot_proj[:-1]

	# Compute least-squares solution of linear matrix equation
	print(A)
	print(b)
	lb = np.ones(n)
	ub = np.full(n, np.inf)
	x = optimize.lsq_linear(A, b, bounds=(lb, ub))['x']

	# Check that solution satisfies equations
	if not np.allclose(np.dot(A, x), b):
		status_msg = "NO_SOLUTION"
		return status_msg

	# Add user-selected element back to x vector
	x = np.insert(x, idx_selec_flat, selec_value)

	# Put all values back in matrix
	new_mat = put_solution(emplproj_arr, x, skills_mask_ind)
	return new_mat.tolist()
Пример #32
0
def down_sample_SchWMA_model(rx, dcv, num_slow_samps):
    ry = np.zeros(2 * num_slow_samps)

    A = np.zeros((len(ry), len(ry)))
    C = np.zeros(len(ry))

    for L in range(1, len(ry) + 1):
        vec = np.concatenate([[L], 2 * (L - np.arange(1, L))])
        A[L - 1, :len(vec)] = vec

        vec = np.concatenate([[L * dcv],
                              2 * (L * dcv - np.arange(1, L * dcv))])
        vec = vec[:np.minimum(len(vec), len(rx))]
        C[L - 1] = np.sum(vec * rx[:len(vec)])

    ry = opt.lsq_linear(A, C)['x']

    # x0 = np.zeros(len(ry))
    # x0[0] = np.sqrt(ry[0]+2*np.sum(ry[1:]))

    # out = opt.minimize(lambda x: np.sum((acv_from_b(x)-ry)**2),x0)

    # return out['x']

    Ry = np.real(np.fft.fft(np.concatenate([ry, ry[1::][::-1]])))
    Ry[Ry <= 0] = np.min(Ry[Ry > 0]) / 10.
    alpha = .5 * np.log(Ry)
    phi = np.imag(si.hilbert(alpha))
    Bmp = np.exp(alpha + 1j * phi)
    bmp = np.fft.fftshift(np.fft.ifft(Bmp))
    return np.abs(bmp)
Пример #33
0
    def test_almost_singular(self):
        A = np.array(
            [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789],
             [0.3742460132129041, 0.0130523214078376, 0.0130523214077873],
             [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]])

        b = np.array(
            [0.0055029366538097, 0.0026677442422208, 0.0066612514782381])

        result = lsq_linear(A, b, method=self.method)
        assert_(result.cost < 1.1e-8)
Пример #34
0
    def test_dense_rank_deficient(self):
        A = np.array([[-0.307, -0.184]])
        b = np.array([0.773])
        lb = [-0.1, -0.1]
        ub = [0.1, 0.1]
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (lb, ub), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, [-0.1, -0.1])

        A = np.array([
            [0.334, 0.668],
            [-0.516, -1.032],
            [0.192, 0.384],
        ])
        b = np.array([-1.436, 0.135, 0.909])
        lb = [0, -1]
        ub = [1, -0.5]
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (lb, ub), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.optimality, 0, atol=1e-11)
Пример #35
0
Файл: base.py Проект: eso/xtool
    def solve_design_matrix(self, dmatrix, order, solver='lsmr',
                            solver_dict={}):
        b = order.data.compressed() / order.uncertainty.compressed()
        if solver == 'lsmr':
            result = sparse.linalg.lsmr(dmatrix.tobsr(), b, **solver_dict)
        elif solver == 'lsq':
            lsq_dict = dict(bounds=(0, np.inf), lsmr_tol='auto', verbose=1)
            lsq_dict.update(solver_dict)
            result = lsq_linear(dmatrix, b,  **lsq_dict)
            result = [result.x, result]

        else:
            raise NotImplementedError('Solver {0} is not implemented'.format(
                solver))
        return result
Пример #36
0
    def test_full_result(self):
        lb = np.array([0, -4])
        ub = np.array([1, 0])
        res = lsq_linear(A, b, (lb, ub), method=self.method)

        assert_allclose(res.x, [0.005236663400791, -4])

        r = A.dot(res.x) - b
        assert_allclose(res.cost, 0.5 * np.dot(r, r))
        assert_allclose(res.fun, r)

        assert_allclose(res.optimality, 0.0, atol=1e-12)
        assert_equal(res.active_mask, [0, -1])
        assert_(res.nit < 15)
        assert_(res.status == 1 or res.status == 3)
        assert_(isinstance(res.message, str))
        assert_(res.success)
Пример #37
0
def invert_STF(st_data, st_synth, method='bound_lsq', len_stf=None, eps=1e-3):
    # print('Using %d stations for STF inversion' % len(st_data))
    # for tr in st_data:
    #     fig = plt.figure()
    #     ax = fig.add_subplot(111)
    #     ax.plot(tr.data, label='data')
    #     ax.plot(st_synth.select(station=tr.stats.station,
    #                             network=tr.stats.network,
    #                             location=tr.stats.location)[0].data,
    #             label='synth')
    #     ax.legend()
    #     fig.savefig('%s.png' % tr.stats.station)
    #     plt.close(fig)

    # Calculate number of samples for STF:
    if len_stf:
        npts_stf = int(len_stf * st_data[0].stats.delta)
    else:
        npts_stf = st_data[0].stats.npts

    d, G = _create_matrix_STF_inversion(st_data, st_synth, npts_stf)

    if method == 'bound_lsq':
        m = lsq_linear(G, d, (-0.1, 1.1))
        stf = np.r_[m.x[(len(m.x) - 1) / 2:], m.x[0:(len(m.x) - 1) / 2]]

    elif method == 'lsq':
        stf, residual, rank, s = np.linalg.lstsq(G, d)

    elif method == 'dampened':
        # J from eq.3 in Sigloch (2006) to dampen later part of STFs
        GTG = np.matmul(G.T, G)
        diagGmean = np.mean(np.diag(GTG))
        J = np.diag(np.linspace(0, diagGmean, G.shape[1]))
        Ginv = np.matmul(np.linalg.inv(GTG + eps * J), G.T)
        stf = np.matmul(Ginv, d)

    else:
        raise ValueError('method %s unknown' % method)

    return stf
Пример #38
0
    def test_dense_bounds(self):
        # Solutions for comparison are taken from MATLAB.
        lb = np.array([-1, -10])
        ub = np.array([1, 0])
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (lb, ub), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, lstsq(A, b)[0])

        lb = np.array([0.0, -np.inf])
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (lb, np.inf), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
                            atol=1e-6)

        lb = np.array([-1, 0])
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (lb, np.inf), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, np.array([0.448427311733504, 0]),
                            atol=1e-15)

        ub = np.array([np.inf, -5])
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, np.array([-0.105560998682388, -5]))

        ub = np.array([-1, np.inf])
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, np.array([-1, -4.181102129483254]))

        lb = np.array([0, -4])
        ub = np.array([1, 0])
        for lsq_solver in self.lsq_solvers:
            res = lsq_linear(A, b, (lb, ub), method=self.method,
                             lsq_solver=lsq_solver)
            assert_allclose(res.x, np.array([0.005236663400791, -4]))
	def solve_SAT2(self,cnf,number_of_variables,number_of_clauses):
		#Solves CNFSAT by a Polynomial Time Approximation scheme:
		#	- Encode each clause as a linear equation in n variables: missing variables and negated variables are 0, others are 1
		#	- Solve previous system of equations by least squares algorithm to fit a line
		#	- Variable value above 0.5 is set to 1 and less than 0.5 is set to 0
		#	- Rounded of assignment array satisfies the CNFSAT with high probability
		#Returns: a tuple with set of satisfying assignments
		satass=[]
		x=[]
		self.solve_SAT(cnf,number_of_variables,number_of_clauses)
		for clause in self.cnfparsed:
			equation=[]
			for n in xrange(number_of_variables):
				equation.append(0)
			#print "clause:",clause
			for literal in clause:
				if literal[0] != "!":
					equation[int(literal[1:])-1]=1
				else:
					equation[int(literal[2:])-1]=0
			self.equationsA.append(equation)
		for n in xrange(number_of_clauses):
			self.equationsB.append(1)
		a = np.array(self.equationsA)
                b = np.array(self.equationsB)
		init_guess = []
		for n in xrange(number_of_variables):
			init_guess.append(0.00000000001)
		initial_guess = np.array(init_guess)
		self.A=a
		self.B=b
		#print "a:",a
		#print "b:",b
                #print "a.shape:",a.shape
                #print "b.shape:",b.shape
		matrixa=matrix(a,tc='d')
		matrixb=matrix(b,tc='d')

		x=None
		if number_of_variables == number_of_clauses:
			if self.Algorithm=="lsqr()":
				#x = np.dot(np.linalg.inv(a),b)
				#x = gmres(a,b) 
				#x = lgmres(a,b) 
				#x = minres(a,b) 
				#x = bicg(a,b) 
				#x = cg(a,b) 
				#x = cgs(a,b) 
				#x = bicgstab(a,b)
                		x = lsqr(a,b,atol=0,btol=0,conlim=0,show=True)
			if self.Algorithm=="lapack()":
				try:
					x = gesv(matrixa,matrixb)
					#x = gels(matrixa,matrixb)
					#x = sysv(matrixa,matrixb)
					#x = getrs(matrixa,matrixb)
					x = [matrixb]
				except:
					print "Exception:",sys.exc_info()
					return None 
			if self.Algorithm=="l1regls()":
				l1x = l1regls(matrixa,matrixb)
				ass=[]
				x=[]
				for n in l1x:
					ass.append(n)
				x.append(ass)
			if self.Algorithm=="lsmr()":
                		x = lsmr(a,b,atol=0,btol=0,conlim=0,show=True,x0=initial_guess)
		else:
			if self.Algorithm=="solve()":
                		x = solve(a,b)
			if self.Algorithm=="lstsq()":
                		x = lstsq(a,b,lapack_driver='gelsy')
			if self.Algorithm=="lsqr()":
                		x = lsqr(a,b,atol=0,btol=0,conlim=0,show=True)
			if self.Algorithm=="lsmr()":
                		#x = lsmr(a,b,atol=0.1,btol=0.1,maxiter=5,conlim=10,show=True)
                		x = lsmr(a,b,atol=0,btol=0,conlim=0,show=True,x0=initial_guess)
			if self.Algorithm=="spsolve()":
				x = dsolve.spsolve(csc_matrix(a),b)
			if self.Algorithm=="pinv2()":
				x=[]
				#pseudoinverse_a=pinv(a)
				pseudoinverse_a=pinv2(a,check_finite=False)
				x.append(matmul(pseudoinverse_a,b))
			if self.Algorithm=="lsq_linear()":
				x = lsq_linear(a,b,lsq_solver='exact')
			if self.Algorithm=="lapack()":
				try:
					#x = gesv(matrixa,matrixb)
					x = gels(matrixa,matrixb)
					#x = sysv(matrixa,matrixb)
					#x = getrs(matrixa,matrixb)
					x = [matrixb]
				except:
					print "Exception:",sys.exc_info()
					return None 
			if self.Algorithm=="l1regls()":
				l1x = l1regls(matrixa,matrixb)
				ass=[]
				x=[]
				for n in l1x:
					ass.append(n)
				x.append(ass)

		print "solve_SAT2(): ",self.Algorithm,": x:",x
		if x is None:
			return None
		cnt=0
		binary_parity=0
		real_parity=0.0
		if rounding_threshold == "Randomized":
			randomized_rounding_threshold=float(random.randint(1,100000))/100000.0
		else:
			min_assignment=min(x[0])
			max_assignment=max(x[0])
			randomized_rounding_threshold=(min_assignment + max_assignment)/2
		print "randomized_rounding_threshold = ", randomized_rounding_threshold
		print "approximate assignment :",x[0]
		for e in x[0]:
			if e > randomized_rounding_threshold:
				satass.append(1)
				binary_parity += 1
			else:
				satass.append(0)
				binary_parity += 0
			real_parity += e
			cnt+=1
		print "solve_SAT2(): real_parity = ",real_parity
		print "solve_SAT2(): binary_parity = ",binary_parity
		return (satass,real_parity,binary_parity,x[0])
Пример #40
0
 def test_dense_no_bounds(self):
     for lsq_solver in self.lsq_solvers:
         res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
         assert_allclose(res.x, lstsq(A, b)[0])