示例#1
0
    def _exponential_euler(self, __x__, dt, *args, **kwargs):
        """
        Exponential Euler evaluation method.

        **Notes**

        See ``evaluate`` method for parameters.
        Only available for equation of the form dy/dt = A + B*y
        """
        A = float(self.__A__(*args, **kwargs))
        B = float(self.__B__(*args, **kwargs))
        AB = A
        AB /= B
        E = np.exp(-B*dt)

        if self._out is not None:
            np.mul(__x__,E,self._out)
            __x__ = self._out
        elif self._in_out is not None:
            __x__ *=  E
        else:
            __x__ =  __x__*E
        __x__ +=  AB
        AB *= E
        __x__ -= AB
        return __x__
示例#2
0
文件: mosaic.py 项目: folkol/mosaic
def generate_mosaic(path, w, h):
    im = Image.open(path)
    im.thumbnail((w, h), Image.ANTIALIAS)
    pixel_data = im.load()
    mosaic = Image.new("RGB", tuple(mul(FRAGMENT_SIZE, (w, h))))
    for x, y in cartesian(im.size):
        sys.stdout.write('.')
        sys.stdout.flush()
        color = pixel_data[x, y]
        _, image = closest_color(color, colors)
        mosaic.paste(image, tuple(mul(FRAGMENT_SIZE, (x, y))))
    return mosaic
    def getGramMatrix(self, A, B, K2=None, w=None):
        Q = asmatrix(diagflat(1.0 / self.bw2))

        AQ = A * Q
        K = mul(AQ, A).sum(1) + mul(B * Q, B).sum(1).T
        K -= 2.0 * AQ * B.T

        if K2 is not None:
            K = w * K + (1 - w) * K2

        K = ev('exp(-0.5 * K)')

        return asmatrix(K)
示例#4
0
def conjugateGradient(A, b, x, max_iteration=100, epsilon=10**(-10), w=1):
    r = b - mul(A, x)
    p = r
    iterations = [x]
    for k in range(max_iteration):
        A_p = mul(A, p)
        alpha = iner(r, r) / iner(p, A_p)
        iterations.append(iterations[k] + alpha * p)
        new_r = r - alpha * A_p
        if LA.norm(new_r) < epsilon:
            break
        beta = iner(new_r, new_r) / iner(r, r)
        p = new_r + beta * p
        r = new_r
    return iterations
示例#5
0
def GPRfit(xs, k1, k2, sig):
    Ky = sqexp(x, None, k1, k2**0.5)[0] + (sig**2) * np.identity(n)
    Ks = sqexp(xs, x, k1, k2**0.5)
    Kss = sqexp(xs, None, k1, k2**0.5)[0]
    L = cholesky(Ky)
    al = solve(T(L), solve(L, y))
    fmst = mul(Ks, al)
    varfmst = np.empty([n, 1])
    for i in range(np.size(xs)):
        v = solve(L, T(Ks[:, i]))
        varfmst[i] = Kss[i, i] - mul(T(v), v) + sig**2
    lmlopt = -0.5 * mul(T(y), al) - np.trace(
        np.log(L)) - 0.5 * n * np.log(2 * np.pi)
    #return fmst, varfmst[::-1], lmlopt
    return fmst, varfmst, lmlopt
def weightedLS(A,Y,w):
        At = A.transpose()
        w = np.diag(w)
        At_w_A = reduce(mul, [At, w, A])
        x_weighted = reduce(mul, [inv(At_w_A), At, w, Y])
        r = mul(A, x_weighted) - Y
        return(x_weighted)
    def computeWeighting(self, Q, PHI_S):
        self.numSamples, self.numFeatures = PHI_S.shape

        self.Q = Q
        self.PHI_S = PHI_S
        self.PHI_HAT = PHI_S.mean(0)

        # initial params
        theta = asmatrix(zeros((self.numFeatures, 1)))
        eta = max(1.0, std(Q) * 0.1)

        bestDiv = Inf
        lastDiv = Inf
        withoutImprovement = 0

        returnWeighting = ones((self.numSamples, 1)) / self.numSamples

        for i in range(40):
            theta, eta = self._optimizeDualFunction(theta, eta)

            weighting = self._computeWeightingFromThetaAndEta(theta, eta)
            divKL = self._getKLDivergence(weighting)

            if divKL > 3 or isnan(divKL):
                print('diVKL warning')

            stateFeatureDifference = self.PHI_HAT - mul(PHI_S, weighting).sum(0)

            featureError = abs(stateFeatureDifference).max()
            print('Feature Error: {:f}, KL: {:f}'.format(featureError, divKL))

            if not isinf(bestDiv) and i >= 10 and featureError >= bestDiv:
                withoutImprovement = withoutImprovement + 1
            if withoutImprovement >= 3:
                print('No improvement within the last 3 iterations.')
                break

            if abs(divKL - self.epsilonAction) < 0.05 \
                and featureError < 0.01 \
                and featureError < bestDiv:

                print('Accepted solution.')
                withoutImprovement = 0
                returnWeighting = weighting

                bestDiv = featureError

                if abs(divKL - self.epsilonAction) < 0.05 \
                    and featureError < 0.001:
                    print('Found sufficient solution.')
                    break

            if (abs(stateFeatureDifference) - lastDiv).max() > -0.000001:
                print('Solution unchanged or degrading, restart from new point')
                theta = random.random(theta.shape) * 2.0 - 1.0
                lastDiv = Inf
            else:
                lastDiv = featureError

        return returnWeighting
def gaussSeidel(A, b, x, max_iteration=100, w=1, epsilon=10**(-10)):
    iterations = [x]

    D = np.diag(np.diag(A))
    L = np.tril(A, -1)
    l_and_d = inv(L + D)

    for k in range(max_iteration):
        x = iterations[k]
        x = x + mul(l_and_d, b - mul(A, x))
        iterations.append(x)
        r = mul(A, x) - b
        norm = LA.norm(r)
        if norm <= epsilon:
            break
    return iterations
def task_2b(l=1,eps=0.001):
    x_1 = np.arange(0.0, 5.1, 0.1)
    x_1 = x_1.reshape((51, 1))
    x_i=x_1
    n = x_i.shape[0]
    w= np.ones(n)
    A = np.identity(n)
    G = G_matrix(n)
    for i in range(0, 10):
        print("w:  {0}\n".format(w))
        Y = y_func(x_i)
        print("Y:  {0}\n".format(Y))
        x_i=weightedLS(A,Y,w)
        print("x_i:  {0}\n".format(x_i))
        G_x_i=mul(G,x_i)
        print("G_x_i:  {0}\n".format(G_x_i))
        for j in range(0,n-1):
          w[j]=1/(math.fabs(G_x_i[j])+eps)
    # plt.plot(x_1, y_func(x), 'r')
    # plt.plot(x_1.reshape(x_1.shape[0],1, f(x_1), 'b'))
    # plt.plot(x_i.reshape(x_i.shape[0],1, f(x_i), 'r'))
    # plt.title("task 3b")
    # plt.show()
    # plt.plot(x_i, x_i, 'b')
    # plt.plot(x_1, y_func(x_1), 'r')
    plt.plot(x_1, f(x_1), 'b')
    plt.plot(x_1, x_i, 'r')
    plt.title("task 2b")
    plt.show()
    def sampleActions(self, S):
        if not self.trained:
            return self._getRandomActions(S.shape[0])

        actionDim = self.alpha.shape[1]

        kVec = self.GPPriorVariance * self.kernel.getGramMatrix(self.Ssub, S).T
        meanGP = dot(kVec, self.alpha)

        temp = solve(self.cholKy.T, kVec.T)
        temp = square(temp.T)
        sigmaGP = temp.sum(1)

        kernelSelf = self.GPPriorVariance * self.kernel.getGramDiag(S)
        sigmaGP = kernelSelf.squeeze() - sigmaGP.squeeze()
        sigmaGP = asarray(sigmaGP).squeeze()

        if sigmaGP.shape == (): # single number
            sigmaGP = matrix([sigmaGP])

        sigmaGP[sigmaGP < 0] = 0

        sigmaGP = tile(sqrt(sigmaGP)[:, newaxis], (1, actionDim))

        if self.UseGPBug:
            sigmaGP += sqrt(self.GPRegularizer)
        else:
            sigmaGP = sqrt(square(sigmaGP) + self.GPRegularizer)
        sigmaGP[sigmaGP < self.GPMinVariance] = self.GPMinVariance

        N = random.normal(0.0, 1.0, (S.shape[0], actionDim))
        A = mul(N, sigmaGP) + meanGP

        return A
示例#11
0
def svd():
    u, s, vt = np.linalg.svd(A, full_matrices=False)
    s = np.diag(s)
    x_svd = reduce(mul, [vt.transpose(), inv(s), u.transpose(), b])
    r = mul(A,x_svd) - b
    print("least squares via SVD\n",x_svd)
    print("r:\n", r)
    def _dualFunction(self, params):
        theta = asmatrix(params[0:self.numFeatures]).T
        eta = params[-1]
        epsilon = self.epsilonAction

        V = self.PHI_S * theta
        VHat = self.PHI_HAT * theta

        advantage = self.Q - V
        maxAdvantage = advantage.max()
        QNorm = self.Q - maxAdvantage
        advantage = (QNorm - V) / eta

        g = 0
        gD = zeros((self.numFeatures + 1,))

        if advantage.max() > 500:
            g = 1e30 - eta
            gD[-2] = -1
            return g, gD

        expAdvantage = ev('exp(advantage)')
        sumExpAdvantage = expAdvantage.sum()

        realmin = finfo(double).tiny
        if sumExpAdvantage < realmin:
            sumExpAdvantage = realmin

        gLogPart = (1.0 / self.numSamples) * sumExpAdvantage

        g += eta * log(gLogPart) + VHat + maxAdvantage
        g += eta * epsilon + self.alphaL2ThetaPunishment * (theta.T * theta)

        # gradient
        if (eta * sumExpAdvantage) == 0:
            gDEta = 1e100
        else:
            gDEta = epsilon + log(gLogPart) - \
                    mul(expAdvantage, QNorm - V).sum() / (eta * sumExpAdvantage)
        gD[-1] = gDEta

        gDTheta = self.PHI_HAT + mul(-self.PHI_S, expAdvantage).sum(0) / \
            sumExpAdvantage + 2 * self.alphaL2ThetaPunishment * theta.T
        gD[0:self.numFeatures] = gDTheta

        return g, 0.5 * gD
def armijo(max_iter, x, f, gradient, direction, alpha=1, b=0.5, c=10**-5):
    while max_iter > 0:
        objective = f(x + alpha * direction)
        limit = f(x) + c * alpha * mul(gradient.transpose(), direction)
        if objective <= limit:
            break
        alpha = alpha * b
        max_iter = max_iter - 1
    return alpha
示例#14
0
def findNearest(y, y_train, w, test):
    _min = 1e8
    pre_label = 0
    y_pre = mul(w.T, test)  # continous value
    for i in range(len(y)):
        if abs(y_pre - y[i]) < _min:
            _min = abs(y_pre - y[i])
            pre_label = y_train[i]
    return pre_label
示例#15
0
def weighted_least_squares():
    At = A.transpose()
    w = np.array([1000,1, 1, 1])
    w = np.diag(w)
    At_w_A = reduce(mul, [At, w, A])
    x_weighted = reduce(mul, [inv(At_w_A), At, w, b])
    r = mul(A, x_weighted) - b
    print("X weighted least squares\n", x_weighted)
    print(r)
    print(r[0][0])
    print(abs(r[0][0]) < 1/1000 )
示例#16
0
def ass_a():
    x = np.zeros(10)

    iterations = [x]
    D = np.diag(np.diag(L))
    inv_D = inv(D)
    norm_list = []
    for k in range(100):
        x = x + mul(inv_D, b - mul(L, x))
        iterations.append(x)
        r = mul(L, x) - b
        norm = LA.norm(r)
        norm_list.append(norm)
        if norm <= 10**-5:
            break

    print("iteration number of 4a:", k)
    plot.semilogy(norm_list, label='jacobi')
    plot.title("question 4a")
    plot.show()
示例#17
0
    def rcost(y, u):
        """
        Running cost (a.k.a. utility, reward, instantaneous cost etc.)

        See class documentation
        """
        chi = np.concatenate([y, u])
        r = 0
        R1 = np.diag([1, 100, 1, 0, 0, 0, 0])  #self.rcost_pars[0] ->R1
        r = np.mul(chi, R1, chi)

        return r
示例#18
0
def newton(max_iter, x_k, alpha, epsilon, f, df):
    output = []
    first_alpha = alpha
    while max_iter > 0:
        output = output + [x_k]

        f_theta = f(x_k)
        J = df(x_k)
        Jt_J = mul(J.transpose(), J)
        gradient = mul(J.transpose(), f_theta - data)
        d_LM = mul(inv(Jt_J), -gradient)

        alpha = armijo(20, x_k, f, gradient, d_LM, first_alpha)
        next_x = np.array(x_k) + alpha * d_LM

        # if stop(x_k, next_x, epsilon):
        #     break

        x_k = next_x
        max_iter = max_iter - 1
    output = output + [x_k]
    return output
    def learnLSTD(self, stateActionFeatures, nextStateActionFeatures, reward):
        phi = asmatrix(stateActionFeatures)
        phi_ = asmatrix(nextStateActionFeatures)

        A_ = phi.T * (phi - self.discountFactor * phi_)
        b_ = mul(phi, reward).sum(0).T

        n = phi.shape[1]
        C = phi * inv(phi.T * phi + self.lstdRegularizationFactor * eye(n))
        X = C * (A_ + self.lstdRegularizationFactor * eye(n))
        y = C * b_

        return solve(X.T * X + self.lstdProjectionRegularizationFactor * eye(n),
                X.T * y)
示例#20
0
def ass_b():
    m1 = L[:3, :3]
    m2 = L[3:, 3:]
    inv_m1 = inv(m1)
    inv_m2 = inv(m2)
    inv_m = np.zeros(100).reshape(10, 10)
    inv_m[:3, :3] = inv_m1
    inv_m[3:, 3:] = inv_m2
    x = np.zeros(10)

    iterations = [x]
    for k in range(100):
        x = x + mul(0.7 * inv_m, b - mul(L, x))
        iterations.append(x)
        r = mul(L, x) - b
        norm = LA.norm(r)
        if norm <= 10**-5:
            break

    print("iteration number of 4b:", k)
    x_asix = list(map(lambda u: LA.norm(mul(L, u) - b), iterations))
    plot.semilogy(x_asix, label='jacobi')
    plot.title("question 4b")
    plot.show()
def computeGuellrichDomain2D(DIMS, REFS, hx, dhdx):
    # Get data from DIMS and REFS
    ZH = DIMS[2]
    NX = DIMS[3] + 1
    NZ = DIMS[4]

    # input REFS = [x, z, HFM, whf, CPM, wcp]
    x = REFS[0]
    z = REFS[1]

    # Compute the flat XZ mesh
    HTZL, dummy = np.meshgrid(hx, z)
    XL, ZL = np.meshgrid(x, z)

    # High Order Improved Guellrich coordinate 3 parameter function
    xi = 1.0 / ZH * ZL
    ang = 0.5 * mt.pi * xi
    AR = 1.0E-3
    p = 20
    q = 5

    expdec = np.exp(-p / q * xi)
    cosvar = np.power(np.cos(ang), p)
    cosvard = np.power(np.cos(ang), p - 1)
    fxi1 = mul(expdec, cosvar)
    fxi2 = AR * mul(xi, (1.0 - xi))
    fxi = np.add(fxi1, fxi2)

    dfdxi1 = -p / q * mul(expdec, cosvar)
    dfdxi2 = -(0.5 * p) * mt.pi * mul(mul(expdec, np.sin(ang)), cosvard)
    dfdxi3 = -AR * (1.0 - 2.0 * xi)
    dfdxi = np.add(np.add(dfdxi1, dfdxi2), dfdxi3)

    dzdh = fxi
    dxidz = ZH + mul(HTZL, np.add(dfdxi, -fxi))
    sigma = ZH * np.power(dxidz, -1.0)

    # Make the global array of terrain height and slope features
    ZTL = np.zeros((NZ, NX))
    DZT = np.zeros((NZ, NX))

    for rr in range(NZ):
        ZTL[rr, :] = np.add(mul(dzdh[rr, :], hx), ZL[rr, :])
        DZT[rr, :] = mul(dzdh[rr, :], dhdx)

    return XL, ZTL, DZT, sigma
示例#22
0
def chebpolym(NM, xi):
    # Compute Chebyshev pols (first kind) into a matrix transformation
    # Functions need to be arranged bottom to top!
    NX = len(xi)
    CTM = np.zeros((NX, NM + 1))

    CTM[:, 0] = np.ones(NX)
    CTM[:, 1] = xi

    # 3 Term recursion
    for ii in range(2, NM + 1):
        CTM[:,ii] = 2.0 * \
        mul(xi, CTM[:,ii-1]) - \
        CTM[:,ii-2]

    return CTM
    def train(self, S, A, w, Ssub):
        self.Ssub = Ssub

        # kernel matrix on subset of samples
        K = self.GPPriorVariance * \
            self.kernel.getGramMatrix(self.Ssub, self.Ssub)

        w /= w.max()

        GPRegularizerEffective = self.GPRegularizer
        counter = 1
        while True:
            Ky = K + eye(K.shape[0]) * GPRegularizerEffective
            try:
                self.cholKy = chol(Ky)
                break
            except LinAlgError:
                GPRegularizerEffective *= 2

            counter += 1
            assert counter < 100, 'SparseGPPolicy: chol failed'

        kernelVectors = self.GPPriorVariance * \
            self.kernel.getGramMatrix(self.Ssub, S).T

        cholKyInvReg = 0
        while True:
            try:
                cholKyInv = pinv(self.cholKy + cholKyInvReg * eye(self.cholKy.shape[0]))
                cholKyInvT = pinv(self.cholKy.T + cholKyInvReg * eye(self.cholKy.shape[0]))
                break
            except LinAlgError:
                if cholKyInvReg == 0:
                    cholKyInvReg = 1e-10
                else:
                    cholKyInvReg *= 2

        featureVectors = dot(dot(kernelVectors, cholKyInv), cholKyInvT)
        featureVectorsW = mul(featureVectors, w)

        X = dot(featureVectorsW.T, featureVectors)
        X += eye(featureVectors.shape[1]) * self.SparseGPInducingOutputRegularization
        y = dot(solve(X, featureVectorsW.T), A)

        self.alpha = solve(self.cholKy, solve(self.cholKy.T, y))

        self.trained = True
示例#24
0
def exact_Newton(f, jacobian, gardient, x_0, eps=10**-3, max_iterations=100):
    x_k = np.clip(x_0, -1, 1)
    output = []
    for i in range(0, max_iterations):
        d_n = -mul(np.linalg.inv(jacobian(x_k)), gardient(x_k))
        d_n = np.reshape(d_n, d_n.size)
        a = armijo(x_k, -d_n, f, gardient)
        # a=1
        next_x = x_k + a * d_n
        next_x = np.clip(next_x, -1, 1)
        if (np.linalg.norm(x_k) != 0):
            if np.linalg.norm(next_x - x_k) / np.linalg.norm(x_k) < eps:
                output = output + [next_x]
                return output
        output = output + [next_x]
        x_k = next_x
    return output
示例#25
0
def parseExpr(expr):
    if expr.data == "add":
        return add(parseExpr(expr.children[0]), parseExpr(expr.children[1]))
    elif expr.data == "sub":
        return sub(parseExpr(expr.children[0]), parseExpr(expr.children[1]))
    elif expr.data == "mul":
        return mul(parseExpr(expr.children[0]), parseExpr(expr.children[1]))
    elif expr.data == "div":
        return div(parseExpr(expr.children[0]), parseExpr(expr.children[1]))
    elif expr.data == "neg":
        return neg(parseExpr(expr.children[0]))
    elif expr.data == "word":
        word = expr.children[0].value.lower()
        if word in w2idx.keys():
            return mat[w2idx[word]]
        else:
            return np.zeros(100)
def ConjugateGradient2(A,
                       b,
                       naught=None,
                       ShowProgress=True,
                       tol=1e-8,
                       lo=0,
                       hi=0,
                       max_iter=20000,
                       dt=float64):
    #
    #   preamble
    #
    if naught is None:
        naught = [uniform(lo, hi) for w in range(len(b))]


#    if isinstance(A, Matrix):                   # only for compatability with the above, otherwise can be omitted
#        A = arr(A.body, dtype=dt)
    A = A.astype(dt)
    x = arr([naught], dtype=dt).T  # convert list into numpy column vector
    b = arr([b], dtype=dt).T
    r = b - mul(A, x)  # resideual
    v = arr([j for j in r], dtype=dt)
    Measure = norm((mul(A, x) - b).T[0], ord=inf)
    modr = mul(r.T, r)[0][0]
    #
    #   iterate
    #
    k = 0
    while (k < max_iter and Measure > tol * max(x.max(), x.min())):
        Av = mul(A, v)
        modv = mul(v.T, Av)[0][
            0]  # modulus (aka. norm) of v w.r.t. the inner product (x,y):=x^TAy (this is an inner prod when A is symmetric)
        alpha = float(modr) / modv
        x += alpha * v
        r -= alpha * Av
        mod_old_r = modr
        modr = mul(r.T, r)[0][0]  # ||b-alphaAv||^2, not ||residual||^2
        alpha = float(modr) / mod_old_r  # ||new.r||^2 / ||old.r||^2
        v = r + alpha * v
        k += 1
        Measure = norm((mul(A, x) - b).T[0], ord=inf)
        if ShowProgress:
            print('k=' + str(k) + '; ' + str(Measure))
    #
    #   postamble
    #
    return {'soln': x, 'times': k}
示例#27
0
    def Intersect(self, i):

        # Transform ray into object space
        o = PointMul(self.inv_xform, i.ray.o)
        dir = VecMul(self.inv_xform, i.ray.dir)
        dir_n = Normalize(dir)

        # Assume sphere is centered at origin with radius 1.0 in object space
        b = 2.0 * dot(dir_n, o)
        c = dot(o, o) - 1

        # Use quadratic formula to solve for t
        delta = b * b - 4 * c

        if (delta < -EPSILON):  # no intersection if k is negative
            return (False)

        t = 0
        if (delta < EPSILON):  # intersects only once (on tangent)
            t = -b / 2.0
        else:
            sqrt_delta = sqrt(delta)
            t0 = (-b - sqrt_delta) / 2.0
            t1 = (-b + sqrt_delta) / 2.0
            if (t0 > EPSILON and t1 > EPSILON):
                t = min(t0, t1)  # first intersection along ray
            elif (t0 > EPSILON):
                t = t0  # t1 is behind ray
            elif (t1 > EPSILON):
                t = t1  # t0 is behind ray
            else:
                return (False)  # ray intersection are both behind ray

        t_dir = mul(t, dir_n)  # scaled dir vec in global space
        t_dir_glob = VecMul(self.xform, t_dir)  # in global space
        i.dist = LA.norm(t_dir_glob)
        i.p = add(i.ray.o, t_dir_glob)
        p_obj = add(o, t_dir)
        i.n = VecMul(self.xform, p_obj)
        i.uv = (
            (atan2(p_obj[2], p_obj[0]) + pi) / (2.0 * pi),  # longitude angle 
            acos(p_obj[1]) / pi)  # latitude angle

        return True
def gramSchmidtQR(a):
    m, n = a.shape
    # initiation
    r = np.zeros(shape=(n, n))
    q = np.zeros(shape=(m, n))
    a1 = a[:, 0]
    r[0][0] = LA.norm(a1, ord=2)
    q[:, 0] = a1 / r[0][0]

    for i in range(1, n):
        ai = a[:, i]
        q[:, i] = ai
        for j in range(0, i):
            qj = q[:, j]
            r[j][i] = mul(qj.transpose(), ai)
            q[:, i] = q[:, i] - r[j][i] * qj
        r[i][i] = LA.norm(q[:, i], ord=2)
        q[:, i] = q[:, i] / r[i][i]
    return q, r
示例#29
0
def generate_2QB_Cliffords(_index):
    seq_QB1 = []
    seq_QB2 = []
    sequence_rb.add_twoQ_clifford(_index, seq_QB1, seq_QB2)
    m2QBClifford = np.identity(4, dtype=complex)
    for i in range(len(seq_QB1)):
        _mGate = np.matrix([1])
        if (seq_QB1[i] == gates.CZ
                or seq_QB2[i] == gates.CZ):  # two qubit gates
            _mGate = np.kron(dict_m2QBGate['CZ'], _mGate)
        else:  # 1QB gates
            for g in [seq_QB2[i], seq_QB1[i]]:
                if (g == gates.I):
                    _mGate = np.kron(dict_m1QBGate['I'], _mGate)
                elif (g == gates.Xp):
                    _mGate = np.kron(dict_m1QBGate['Xp'], _mGate)
                elif (g == gates.Xm):
                    _mGate = np.kron(dict_m1QBGate['Xm'], _mGate)
                elif (g == gates.X2p):
                    _mGate = np.kron(dict_m1QBGate['X2p'], _mGate)
                elif (g == gates.X2m):
                    _mGate = np.kron(dict_m1QBGate['X2m'], _mGate)
                elif (g == gates.Yp):
                    _mGate = np.kron(dict_m1QBGate['Yp'], _mGate)
                elif (g == gates.Ym):
                    _mGate = np.kron(dict_m1QBGate['Ym'], _mGate)
                elif (g == gates.Y2p):
                    _mGate = np.kron(dict_m1QBGate['Y2p'], _mGate)
                elif (g == gates.Y2m):
                    _mGate = np.kron(dict_m1QBGate['Y2m'], _mGate)
                elif (g == gates.Zp):
                    _mGate = np.kron(dict_m1QBGate['Zp'], _mGate)
                elif (g == gates.Zm):
                    _mGate = np.kron(dict_m1QBGate['Zm'], _mGate)
                elif (g == gates.Z2p):
                    _mGate = np.kron(dict_m1QBGate['Z2p'], _mGate)
                elif (g == gates.Z2m):
                    _mGate = np.kron(dict_m1QBGate['Z2m'], _mGate)
        m2QBClifford = mul(_mGate, m2QBClifford)
    return (m2QBClifford)
示例#30
0
def desent(max_iter, x_k, alpha, epsilon, f, df):
    output = [x_k]

    while max_iter > 0:
        f_theta = f(x_k)
        J = df(x_k)

        curr_gradient = mul(J.transpose(), f_theta - data)
        gradient_norm = LA.norm(curr_gradient)
        normal_gradient = curr_gradient / gradient_norm

        curr_alpha = armijo(30, x_k, f, normal_gradient, -normal_gradient, alpha)
        D = curr_alpha * normal_gradient
        next_x = x_k + D

        # if stop(x_k, next_x, epsilon):
        #     output = output + [x_k]
        #     break

        x_k = next_x
        output = output + [x_k]
        max_iter = max_iter - 1
    return output
示例#31
0
    def Shade(self, i):
        i.color = self.texture.Sample(i.uv)

        n_norm = Normalize(i.n)
        e_norm = Normalize(i.ray.dir)
        e_dot_n = dot(e_norm, n_norm)
        r = Normalize(add(e_norm, mul(-2.0 * e_dot_n, n_norm)))

        diffuse = (0.0, 0.0, 0.0)
        specular = (0.0, 0.0, 0.0)
        for light in i.scene.lights:
            light_info = light.GetLightSampleInfo(i)
            light_dist = LA.norm(light_info.dir)
            l_norm = mul(light_info.dir, 1.0 / light_dist)

            # Trace shadows
            if (i.next != None):
                i.next.ray.Set(i.p, l_norm, True)
                if (i.next.Trace(False, True)):
                    if (i.next.dist < light_dist):
                        continue

                l_dot_n = dot(l_norm, n_norm)
                if (l_dot_n > 0.0):
                    diffuse = add(diffuse, mul(l_dot_n, light_info.emission))

                    spec = l_dot_n * pow(dot(r, l_norm), self.spec_exp)
                    specular = add(specular, mul(spec, light_info.emission))

        i.color = add(mul(i.color, diffuse), mul(self.ks, specular))
        i.opacity = [1, 1, 1]

        # Trace reflection
        if (i.next != None):
            if (GreaterThan3(self.kr, .01)):
                i.next.ray.Set(i.p, r, True)
                if (i.next.Trace(True)):
                    i.color = add(i.color, mul(i.next.color, self.kr))
def add_graph_line_first(fun, A, b, x, w, label, max_iterations=100):
    ans = fun(A, b, x, max_iterations, w=w)
    x_asix = list(map(lambda u: LA.norm(mul(A, u) - b), ans))
    plot.semilogy(x_asix, label=label)
def iner(a, b):
    return mul(a.transpose(), b)
示例#34
0
import numpy as np
import pandas as pd
from pandas import read_csv
from numpy import transpose as tp
from numpy import matmul as mul

x = read_csv('./NN_Datasets/face_x.txt', header=None, delimiter=' ').values
y = read_csv('./NN_Datasets/face_y.txt', header=None, delimiter=' ').values

w = tp(2 * np.random.rand(x.shape[1], 1) - 1)
b = 2 * np.random.rand(1, 1) - 1

f = tp(mul(w, tp(x))) - b

err = np.sum(y * f <= 0)
print(err)
 def __mask2mat(self, mask):
     k, h, w = mask.shape
     v = mask.reshape(k, h * w)
     mat = np.mul(v.T, v)
     return mat
示例#36
0
def muls(dst, one, two):
	with stats['muls']:
		np.mul(one.np, two, out=dst.np)
示例#37
0
def mul(dst, one, two):
	with stats['mul']:
		np.mul(one.np, two.np, out=dst.np)
示例#38
0
def vifSelect(m, xs, ys):
    w = 0.5
    deltaw = .05

    vcxs = np.matrix(xs).T

    center(vcxs)
    ys = np.array(ys)
    mys = np.matrix(ys)
    subsample = sample(range(len(xs[0])), m)
    f = 0
    rmse = np.std(ys)
    residuals = np.matrix([y - ys.mean() for y in ys])
    model = np.matrix([1] * m).T

    results = np.matrix([1] * len(xs[0])).T

    whichVars = []

    for j in range(len(xs)):
        alpha = w / (1 + j - f)
        gammahat = residuals.dot(vcxs[:, j]) / norm(vcxs[:, j])
        xsub = []
        for i in subsample:
            xsub.append(vcxs[i, j])
        xsub = np.matrix(xsub).T

        matrixOfFun = mul(
            mul(mul(xsub.T, model), np.linalg.inv(mul(model.T, model))),
            mul(model.T, xsub))
        rsquared = matrixOfFun[0, 0] / (norm(xsub)**2)
        t = gammahat / (rmse * np.sqrt(1 - rsquared))

        cdfval = 2 * normal.cdf(-abs(t))
        if cdfval < alpha:
            model = np.hstack((model, xsub))
            results = np.hstack((results, vcxs[:, j]))

            whichVars.append(j)

            residuals = mys - mul(
                mul(mul(results, np.linalg.inv(mul(results.T, results))),
                    results.T), ys)
            rmse = norm(residuals) / np.sqrt(len(xs[0]) - 1 - model.shape[1])
            w = w + deltaw
            f = j
        else:
            w = (w - alpha) / (1 - alpha)
    coeffs = mul(mul(np.linalg.inv(mul(results.T, results)), results.T), ys)
    return coeffs, whichVars
示例#39
0
def qr():
    Q, R = np.linalg.qr(A)
    x_qr = reduce(mul, [inv(R), Q.transpose(), b])
    r = mul(A,x_qr) - b
    print("least squares via QR factorization:\n",x_qr)
    print("r:\n", r)
示例#40
0
def F_gradient(teta, f, df):
    J = df(teta)
    return mul(J.transpose, f(teta) - data)