Esempio n. 1
0
 def _energy_gradss(self,argnum,max_scf=301,max_d=300,printguess=None,name='Output.molden',output=False,order='first'):
   """This function returns the gradient of args"""
   ## For the moment it retuns a value at a time
   ## This is used only by testing functions.
   eigen = True
   rguess = False
   args=[np.log(self.sys.alpha),self.sys.coef,self.sys.xyz,self.sys.l,self.sys.charges,self.sys.atom,self.sys.natoms,self.sys.nbasis,
           self.sys.list_contr,self.sys.ne,
           max_scf,max_d,log,eigen,None,None,
           name,output,self.sys.alpha] # Last term is only used for Algopy
   if self.verbose:
         self.tape.write(' \n Grad point ...\n')
         self.tape.write(' ---Start--- \n')
         self.tape.write(' Initial parameters \n')
         self.tape.write(' Maximum number of SCF: %d\n'%max_scf)
         self.tape.write(' Default SCF tolerance: %f\n'%1e-8)
         self.tape.write(' Initial density matrix: %s\n'%str(rguess))
         self.sys.printcurrentgeombasis(self.tape)
   grad_fun =[]
   for i in argnum:
       var = UTPM.init_jacobian(args[i])
       diff_args = list(args)              # We are making a copy of args
       diff_args[i] = var
       diff_args[-1]= var
       t0 = time.clock()
       grad = UTPM.extract_jacobian(rhfenergy(*(diff_args)))
       timer = time.clock() - t0
       self.sys.grad = grad
       self.tape.write(' ---End--- \n')
       self.tape.write(' Time %3.7f :\n'%timer)
   return grad
Esempio n. 2
0
    def test_expm(self):

        def f(x):
            x = x.reshape((2,2))
            return sum(expm(x))

        x = numpy.random.random(2*2)


        # forward mode

        ax = UTPM.init_jacobian(x)
        ay = f(ax)
        g1  = UTPM.extract_jacobian(ay)

        # reverse mode

        cg = CGraph()
        ax = Function(x)
        ay = f(ax)
        cg.independentFunctionList = [ax]
        cg.dependentFunctionList = [ay]

        g2 = cg.gradient(x)

        assert_array_almost_equal(g1, g2)
Esempio n. 3
0
    def __test_kineticmatrix__(self, Mol):
        ''' In computes the kinetics matrix 
        and compare it with the file benckmark '''

        tool = 1E-7
        epsilon = 1e-5

        T = kineticmatrix(Mol.alpha, Mol.coef, Mol.xyz, Mol.l, Mol.nbasis,
                          Mol.list_contr, np.float64(1.0))

        f = open(Mol.tape + '_kinetics_pyquante.out', 'r')
        f_lines = f.read().split('\n')
        line = 0
        for i in range(Mol.nbasis):
            for j in range(Mol.nbasis):
                self.assertAlmostEqual(T[i, j],
                                       float(f_lines[line].split()[2]),
                                       msg="Error: Test Kinetic " +
                                       Mol.mol_name,
                                       places=5)
                line += 1
        f.close()

        grad_algo_alpha = UTPM.extract_jacobian(
            kineticmatrix(Mol.alpha_algopy, Mol.coef, Mol.xyz, Mol.l,
                          Mol.nbasis, Mol.list_contr, Mol.alpha_algopy))
        ### Testing grad alpha:
        for i in range(len(Mol.alpha)):
            for j in range(len(Mol.alpha)):
                alpha_epsilon = np.copy(Mol.alpha)
                alpha_epsilon[i] = Mol.alpha[i] + epsilon
                Tij_epsilon = kineticmatrix(alpha_epsilon, Mol.coef, Mol.xyz,
                                            Mol.l, Mol.nbasis, Mol.list_contr,
                                            np.float64(1.0))
                dTij_da = (Tij_epsilon - T) / epsilon
                np.testing.assert_almost_equal(
                    dTij_da,
                    grad_algo_alpha[:, :, i],
                    decimal=3,
                    verbose=True,
                    err_msg='Error: Test Overlap Grad')

        grad_algo_coef = UTPM.extract_jacobian(
            kineticmatrix(Mol.alpha, Mol.coef_algopy, Mol.xyz, Mol.l,
                          Mol.nbasis, Mol.list_contr, Mol.coef_algopy))
        for i in range(len(Mol.alpha)):
            for j in range(len(Mol.alpha)):
                coef_epsilon = np.copy(Mol.coef)
                coef_epsilon[i] = Mol.coef[i] + epsilon
                Tij_epsilon = kineticmatrix(Mol.alpha, coef_epsilon, Mol.xyz,
                                            Mol.l, Mol.nbasis, Mol.list_contr,
                                            np.float64(1.0))
                dTij_da = (Tij_epsilon - T) / epsilon
                np.testing.assert_almost_equal(
                    dTij_da,
                    grad_algo_coef[:, :, i],
                    decimal=3,
                    verbose=True,
                    err_msg='Error: Test Overlap Grad')
        pass
Esempio n. 4
0
def nuclear_gradient(mol,center = np.array([0.0,0.0,0.0])):
        ''' In computes the geometric derivatives of the kinetics matrix '''
        '''
        grad_algo_xyz = np.zeros((mol.nbasis,mol.nbasis,3))
        #for i in range(len(mol.alpha)):
        for i in range(1):
            xyz = np.array(mol.xyz[i] - center)
            print xyz
            xyz_algopy = UTPM.init_jacobian(xyz)
            charges = [1]
            ncharges = 1
            grad_algo_xyz = grad_algo_xyz + UTPM.extract_jacobian(nuclearmatrix(mol.alpha,mol.coef,mol.xyz,mol.l,mol.nbasis,charges,xyz_algopy,ncharges,xyz_algopy))
        '''
         
        xyz = np.array(center)
        xyz_algopy = UTPM.init_jacobian(xyz)
        charges = [1]
        ncharges = 1
        grad_algo_xyz = UTPM.extract_jacobian(nuclearmatrix(mol.alpha,mol.coef,mol.xyz,mol.l,mol.nbasis,charges,xyz_algopy,ncharges,xyz_algopy))
        print mol.atom
        print grad_algo_xyz[:,:,0]
        print grad_algo_xyz[:,:,1]
        print grad_algo_xyz[:,:,2]
        print grad_algo_xyz.shape 
        
        return grad_algo_xyz
Esempio n. 5
0
def d_f(x):
    """function"""
    return x[0] * x[1] * x[2] + exp(x[0]) * x[1]  # x[differnce]

    # forward AD without building a computational graph
    x = UTPM.init_jacobian([3, 5, 7])
    y = d_f(x)
    algopy_jacobian = UTPM.extract_jacobian(y)
    print('jacobian = ', algopy_jacobian)

    # reverse mode using a computational graph
    # Step 1/2 - trace the evaluation function
    cg = algopy.CGraph()
    x = algopy.Function([1, 2, 3])
    y = d_f(x)
    cg.trace_off()
    cg.independentFunctionList = [x]
    cg.dependentFunctionList = [y]

    # Step 2/2 - use the graph to evaluate derivatives
    print('gradient =', cg.gradient([3., 5, 7]))
    print(
        'Jacobian =', cg.jacobian([3., 5, 7])
    )  # a square matrix of first order partial derivatives, the derivative of f at all possible points wrt x
    print(
        'Hessian =', cg.hessian([3., 5., 7.])
    )  # a matrix of second order partial derivatives of the function in question (square), can use optimisation for local min/max/saddle of a critical value.
    print('Hessian vector product =', cg.hess_vec([3., 5., 7.], [4, 5, 6]))
Esempio n. 6
0
def probconv(p, x):
    """1-component BD(NBD)"""
    #    def BDNBD(p, x):
    #        return ((p[1] * (((1 - p[3]) / (1 - (p[3] * x))) ** p[2])) + (1. - p[1])) ** p[0]
    """1-component BD(NBD) X NBD"""
    #    def BDNBD(p, x):
    #        return (((p[1] * (((1. - p[3]) / (1. - (p[3] * x))) ** p[2])) + (1. - p[1])) ** p[0]) * (((1. - p[5]) / (1. - (p[5] * x))) ** p[4])
    """2-component BD(NBD) """
    #    def BDNBD(p, x):
    #        return (p[8] * (((p[1] * (((1 - p[3]) / (1 - (p[3] * x))) ** p[2])) + (1. - p[1])) ** p[0])) + ((1. - p[8]) * ((p[5] * (((1. - p[7]) / (1. - (p[7] * x))) ** p[6]) + (1. - p[5])) ** p[4]))
    """2-component BD(NBD) X NBD"""
    def BDNBD(p, x):
        return (p[12] *
                ((((p[1] * (((1. - p[3]) / (1. - (p[3] * x)))**p[2])) +
                   (1. - p[1]))**p[0]) *
                 (((1. - p[5]) /
                   (1. - (p[5] * x)))**p[4]))) + ((1. - p[12]) * ((
                       ((p[7] * (((1. - p[9]) / (1. - (p[9] * x)))**p[8])) +
                        (1. - p[7]))**p[6]) * (((1. - p[11]) /
                                                (1. - (p[11] * x)))**p[10])))

    D = len(x)
    P = 1
    xderv = UTPM(np.zeros((D, P)))
    xderv.data[0, 0] = 0  #the value to evaluate Cj at corresponding to z=0
    xderv.data[1, 0] = 1
    derv = BDNBD(p, xderv)
    prob = derv.data[:, 0]
    return prob
Esempio n. 7
0
 def algo_jaco(*args, **kwargs):
     var = UTPM.init_hessian(args[narg])
     diff_args = list(args)              # We are making a copy of args
     diff_args[narg] = var
     diff_args[-1]= var
     diff_args = tuple(diff_args)
     return UTPM.extract_hessian(rhfenergy(*(diff_args)))
Esempio n. 8
0
def magic5():    
    x = UTPM(derp)
    x.data[1,0] = 1

    y = algo_f(x)    
    #print len(y.data)
    #print y.data[100], 
    return y.data[100][0]
Esempio n. 9
0
    def test_utpm_logdet_trace_expm(self):
        D, P, N = 3, 5, 4

        x = 0.1 * UTPM(numpy.random.randn(D, P, N, N))
        x = UTPM.dot(x.T, x)
        observed_logdet = UTPM.logdet(expm(x))
        desired_logdet = UTPM.trace(x)
        assert_allclose(observed_logdet.data, desired_logdet.data)
Esempio n. 10
0
def alg_grad(x, func, args=()):
    if not algopy_avail:
        raise ImportError(
            "Algopy not installed. Please install algopy or use diff_grad")
    x = UTPM.init_jacobian(x)
    y = func(x, *args)
    #print("Y", y)
    return UTPM.extract_jacobian(y)
Esempio n. 11
0
def gradient(f, theta, x):

    a = UTPM.init_hessian(x)
    y = f(theta, a)
    res = UTPM.extract_hessian(2, y)  #UTPM.extract_jacobian(y)
    out = res.diagonal(1)

    return out[0]
Esempio n. 12
0
    def test_utpm_logdet_trace_expm(self):
        D, P, N = 3, 5, 4

        x = 0.1 * UTPM(numpy.random.randn(D, P, N, N))
        x = UTPM.dot(x.T, x)
        observed_logdet = UTPM.logdet(expm(x))
        desired_logdet = UTPM.trace(x)
        assert_allclose(observed_logdet.data, desired_logdet.data)
Esempio n. 13
0
 def J_fcn(x_new, x, t_new, t, p):
     """ computes the Jacobian of F_fcn
     all inputs are double arrays
     """
     y = UTPM(numpy.zeros((D, N, N)))
     y.data[0, :] = x_new
     y.data[1, :, :] = numpy.eye(N)
     F = F_fcn(y, x, t_new, t, p)
     return F.data[1, :, :].T
Esempio n. 14
0
 def J_fcn(x_new, x, t_new, t, p):
     """ computes the Jacobian of F_fcn
     all inputs are double arrays
     """
     y = UTPM(numpy.zeros((D,N,N)))
     y.data[0,:]   = x_new
     y.data[1,:,:] = numpy.eye(N)
     F = F_fcn(y, x, t_new, t, p)
     return F.data[1,:,:].T
Esempio n. 15
0
    def __call__(self, z0=0):
        z = np.atleast_1d(z0).ravel()
        x = UTPM(np.zeros((self.n, 1, z.size), dtype=z.dtype))

        x.data[0, 0, :] = z
        x.data[1, 0, :] = 1

        y = self.fun(x)
        coefs = np.squeeze(y.data)
        return coefs
Esempio n. 16
0
 def __init__(self,mol,shifted=False):
       self.mol = select_geom.get(mol)
       System_mol.__init__(self,self.mol,select_basis.get(mol),select_e.get(mol),mol)
       self.tape = "./test/"+mol
       self.energy = select_energy.get(mol)
       self.coef = normalization(np.array(self.alpha),self.coef,self.l,self.list_contr)
       #coef = normalization(np.array(self.alpha),np.copy(self.coef),self.xyz,self.l,self.list_contr)
       self.alpha_algopy = UTPM.init_jacobian(self.alpha)
       self.coef_algopy = UTPM.init_jacobian(self.coef)
       return 
Esempio n. 17
0
    def _forward(self, x, *args, **kwds):
        x0 = np.asarray(x)
        shape = x0.shape
        P = 1
        x = UTPM(np.zeros((self.n + 1, P) + shape))
        x.data[0, 0] = x0
        x.data[1, 0] = 1
        z = self.fun(x, *args, **kwds)
        y = UTPM.as_utpm(z)

        return y.data[self.n, 0] * special.factorial(self.n)
Esempio n. 18
0
    def _forward(self, x, *args, **kwds):
        d, n = 2+1, x.size
        p = n
        y = UTPM(np.zeros((d, p, n)))

        y.data[0, :] = x.ravel()
        y.data[1, :] = np.eye(n)
        z0 = self.fun(y, *args, **kwds)
        z = UTPM.as_utpm(z0)
        H = z.data[2, ...] * 2
        return H
Esempio n. 19
0
def _example_taylor():
    def f(x):
        return x*x*x*x  # np.sin(np.cos(x) + np.sin(x))
    D = 5
    P = 1
    x = UTPM(np.zeros((D, P)))
    x.data[0, 0] = 1.0
    x.data[1, 0] = 1

    y = f(x)
    print('coefficients of y =', y.data[:, 0])
Esempio n. 20
0
    def _forward(self, x, *args, **kwds):
        x0 = np.asarray(x)
        shape = x0.shape
        P = 1
        x = UTPM(np.zeros((self.n + 1, P) + shape))
        x.data[0, 0] = x0
        x.data[1, 0] = 1
        z = self.f(x, *args, **kwds)
        y = UTPM.as_utpm(z)

        return y.data[self.n, 0] * misc.factorial(self.n)
Esempio n. 21
0
    def _forward(self, x, *args, **kwds):
        d, n = 2 + 1, x.size
        p = n
        y = UTPM(np.zeros((d, p, n)))

        y.data[0, :] = x.ravel()
        y.data[1, :] = np.eye(n)
        z0 = self.fun(y, *args, **kwds)
        z = UTPM.as_utpm(z0)
        H = z.data[2, ...] * 2
        return H
Esempio n. 22
0
    def _forward(self, x, *args, **kwds):
        # return np.diag(super(Hessdiag, self)._forward(x, *args, **kwds))
        D, Nm = 2+1, x.size
        P = Nm
        y = UTPM(np.zeros((D, P, Nm)))

        y.data[0, :] = x.ravel()
        y.data[1, :] = np.eye(Nm)
        z0 = self.f(y, *args, **kwds)
        z = UTPM.as_utpm(z0)
        H = z.data[2, ...] * 2
        return H
Esempio n. 23
0
def alg_jac(x, func, args=()):
    try:
        func(x, *args)[0]
    except IndexError:
        # If func returns a scalar, the jacobian is just the gradient
        return alg_grad(x, func, args)
    if not algopy_avail:
        raise ImportError(
            "Algopy not installed. Please install algopy or use diff_jac")
    x = UTPM.init_jacobian(x)
    y = func_part(x, func, args)
    return UTPM.extract_jacobian(y)
Esempio n. 24
0
    def _forward(self, x, *args, **kwds):
        # return np.diag(super(Hessdiag, self)._forward(x, *args, **kwds))
        D, Nm = 2 + 1, x.size
        P = Nm
        y = UTPM(np.zeros((D, P, Nm)))

        y.data[0, :] = x.ravel()
        y.data[1, :] = np.eye(Nm)
        z0 = self.f(y, *args, **kwds)
        z = UTPM.as_utpm(z0)
        H = z.data[2, ...] * 2
        return H
Esempio n. 25
0
    def _jacobian_forward(self, x, *args, **kwds):
        x = np.asarray(x, dtype=float)
        # shape = x.shape
        D, Nm = 2, x.size
        P = Nm
        y = UTPM(np.zeros((D, P, Nm)))

        y.data[0, :] = x.ravel()
        y.data[1, :] = np.eye(Nm)
        z0 = self.f(y, *args, **kwds)
        z = UTPM.as_utpm(z0)
        J = z.data[1, :, :, 0]
        return J
Esempio n. 26
0
def init_UTPM_jacobian(x):
    # print 'type(x)=', type(x)
    if isinstance(x, Function):
        return x.init_UTPM_jacobian()

    elif isinstance(x, numpy.ndarray):
        return UTPM.init_jacobian(x)

    elif isinstance(x, UTPM):
        # print x.data.shape
        return UTPM.init_UTPM_jacobian(x.data[0,0])

    else:
        raise ValueError('don\'t know what to do with this input!')
Esempio n. 27
0
    def test_expm_jacobian_vector_product(self):
        n = 4
        x = numpy.random.randn(n, n)
        E = numpy.random.randn(n, n)

        # use algopy to get the jacobian vector product
        ax = UTPM.init_jac_vec(x.flatten(), E.flatten())
        ay = expm(ax.reshape((n, n))).reshape((n * n, ))
        g1 = UTPM.extract_jac_vec(ay)

        # compute the jacobian vector product directly using expm_frechet
        M = expm_frechet(x, E, compute_expm=False).flatten()

        assert_allclose(g1, M, rtol=1e-6)
Esempio n. 28
0
def init_UTPM_jacobian(x):
    # print 'type(x)=', type(x)
    if isinstance(x, Function):
        return x.init_UTPM_jacobian()

    elif isinstance(x, numpy.ndarray):
        return UTPM.init_jacobian(x)

    elif isinstance(x, UTPM):
        # print x.data.shape
        return UTPM.init_UTPM_jacobian(x.data[0, 0])

    else:
        raise ValueError('don\'t know what to do with this input!')
Esempio n. 29
0
    def algopy_fprime(xk, *args):
        """ Evaluates the gradient of the function 
        Parameters:

         xk : array_like
             The coordinate vector at which to determine the gradient of `f`.

        Returns:
          grad : ndarray
              The partial derivatives of `f` to `xk`.
        """
        var = UTPM.init_jacobian(xk)
        grad = UTPM.extract_jacobian(function(*(tuple([var])+args)))
        return grad
Esempio n. 30
0
    def test_expm_jacobian_vector_product(self):
        n = 4
        x = numpy.random.randn(n, n)
        E = numpy.random.randn(n, n)

        # use algopy to get the jacobian vector product
        ax = UTPM.init_jac_vec(x.flatten(), E.flatten())
        ay = expm(ax.reshape((n, n))).reshape((n*n,))
        g1 = UTPM.extract_jac_vec(ay)

        # compute the jacobian vector product directly using expm_frechet
        M = expm_frechet(x, E, compute_expm=False).flatten()

        assert_allclose(g1, M, rtol=1e-6)
Esempio n. 31
0
def ones(shape, dtype=float, order='C'):
    """
    generic implementation of numpy.ones
    """

    if numpy.isscalar(shape):
        shape = (shape, )

    if isinstance(dtype, type):
        return numpy.ones(shape, dtype=dtype, order=order)

    elif isinstance(dtype, numpy.ndarray):
        return numpy.ones(shape, dtype=dtype.dtype, order=order)

    elif isinstance(dtype, UTPM):
        D, P = dtype.data.shape[:2]
        tmp = numpy.zeros((D, P) + shape, dtype=dtype.data.dtype)
        tmp[0, ...] = 1.
        return UTPM(tmp)

    elif isinstance(dtype, Function):
        return dtype.pushforward(ones, [shape, dtype, order])

    else:
        return numpy.ones(shape, dtype=type(dtype), order=order)
Esempio n. 32
0
def hessian_forward():
    def f(x, *args, **kwds):
        return x[0] + x[1] ** 2 + x[2] ** 3
    x = np.asarray([1, 2, 3], dtype=float)
    # shape = x.shape
    D, Nm = 2+1, x.size
    P = Nm
    y = UTPM(np.zeros((D, P, Nm)))

    y.data[0, :] = x.ravel()
    y.data[1, :] = np.eye(Nm)

    z0 = f(y)
    z = UTPM.as_utpm(z0)
    J = z.data[2, ...] * 2
    return J
Esempio n. 33
0
def extract_UTPM_jacobian(x):
    if isinstance(x, Function):
        return x.extract_UTPM_jacobian()

    elif isinstance(x, UTPM):
        return UTPM.extract_UTPM_jacobian(x)
    else:
        raise ValueError('don\'t know what to do with this input!')
Esempio n. 34
0
    def test_expm_jacobian(self):
        n = 4
        x = numpy.random.randn(n, n)

        # use algopy to get the jacobian
        ax = UTPM.init_jacobian(x)
        ay = expm(ax)
        g1 = UTPM.extract_jacobian(ay)

        # compute the jacobian directly using expm_frechet
        M = numpy.zeros((n, n, n*n))
        ident = numpy.identity(n*n)
        for i in range(n*n):
            E = ident[i].reshape(n, n)
            M[:, :, i] = expm_frechet(x, E, compute_expm=False)

        assert_allclose(g1, M, rtol=1e-6)
Esempio n. 35
0
def gradient(x):

    out = []
    if type(x) == float:
        i = UTPM.init_jacobian([x])
        y = f(i)
        algopy_jacobian = UTPM.extract_jacobian(y)
        out = algopy_jacobian
    else:
        for i in x:
            i = UTPM.init_jacobian([i])
            y = f(i)
            algopy_jacobian = UTPM.extract_jacobian(y)
            #     print('jacobian = ',algopy_jacobian)
            out.append(algopy_jacobian[0])

    return np.array(out)
Esempio n. 36
0
    def test_expm_jacobian(self):
        n = 4
        x = numpy.random.randn(n, n)

        # use algopy to get the jacobian
        ax = UTPM.init_jacobian(x)
        ay = expm(ax)
        g1 = UTPM.extract_jacobian(ay)

        # compute the jacobian directly using expm_frechet
        M = numpy.zeros((n, n, n * n))
        ident = numpy.identity(n * n)
        for i in range(n * n):
            E = ident[i].reshape(n, n)
            M[:, :, i] = expm_frechet(x, E, compute_expm=False)

        assert_allclose(g1, M, rtol=1e-6)
Esempio n. 37
0
def extract_UTPM_jacobian(x):
    if isinstance(x, Function):
        return x.extract_UTPM_jacobian()

    elif isinstance(x, UTPM):
        return UTPM.extract_UTPM_jacobian(x)
    else:
        raise ValueError('don\'t know what to do with this input!')
Esempio n. 38
0
def eigh1(A):
    """
    generic implementation of eigh1
    """

    if isinstance(A, UTPM):
        return UTPM.eigh1(A)

    elif isinstance(A, Function):
        return Function.eigh1(A)

    elif isinstance(A, numpy.ndarray):
        A = UTPM(A.reshape((1,1) + A.shape))
        retval = UTPM.eigh1(A)
        return retval[0].data[0,0], retval[1].data[0,0],retval[2]

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 39
0
def eigh1(A):
    """
    generic implementation of eigh1
    """

    if isinstance(A, UTPM):
        return UTPM.eigh1(A)

    elif isinstance(A, Function):
        return Function.eigh1(A)

    elif isinstance(A, numpy.ndarray):
        A = UTPM(A.reshape((1,1) + A.shape))
        retval = UTPM.eigh1(A)
        return retval[0].data[0,0], retval[1].data[0,0],retval[2]

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 40
0
 def test_interpolation(self):
     def f(x):
         return x[0] + x[1] + 3.*x[0]*x[1] + 7.*x[1]*x[1] + 17.*x[0]*x[0]*x[0]
     
     N = 2
     D = 5
     deg_list = [0,1,2,3,4]
     coeff_list = []
     for n,deg in enumerate(deg_list):
         Gamma, rays = generate_Gamma_and_rays(N,deg)
         x = UTPM(numpy.zeros((D,) + rays.shape))
         x.data[1,:,:] = rays
         y = f(x)
         coeff_list.append(numpy.dot(Gamma, y.data[deg]))
         
     assert_array_almost_equal([0], coeff_list[0])
     assert_array_almost_equal([1,1], coeff_list[1])
     assert_array_almost_equal([0,3,7], coeff_list[2])
     assert_array_almost_equal([17,0,0,0], coeff_list[3])        
Esempio n. 41
0
def vecsym(v):
    if isinstance(v, UTPM):
        return UTPM.vecsym(v)

    elif isinstance(v, Function):
        return Function.vecsym(v)

    elif isinstance(v, numpy.ndarray):
        return utils.vecsym(v)

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 42
0
def dot(a,b):
    """
    Same as NumPy dot but in UTP arithmetic
    """
    if isinstance(a,Function) or isinstance(b,Function):
        return Function.dot(a,b)

    elif isinstance(a,UTPM) or isinstance(b,UTPM):
        return UTPM.dot(a,b)

    else:
        return numpy.dot(a,b)
Esempio n. 43
0
def outer(a, b):
    """
    Same as NumPy outer but in UTP arithmetic
    """
    if isinstance(a, Function) or isinstance(b, Function):
        return Function.outer(a, b)

    elif isinstance(a, UTPM) or isinstance(b, UTPM):
        return UTPM.outer(a, b)

    else:
        return numpy.outer(a, b)
Esempio n. 44
0
def dot(a, b):
    """
    Same as NumPy dot but in UTP arithmetic
    """
    if isinstance(a, Function) or isinstance(b, Function):
        return Function.dot(a, b)

    elif isinstance(a, UTPM) or isinstance(b, UTPM):
        return UTPM.dot(a, b)

    else:
        return numpy.dot(a, b)
Esempio n. 45
0
def symvec(A, UPLO='F'):
    if isinstance(A, UTPM):
        return UTPM.symvec(A, UPLO=UPLO)

    elif isinstance(A, Function):
        return Function.symvec(A, UPLO=UPLO)

    elif isinstance(A, numpy.ndarray):
        return utils.symvec(A, UPLO=UPLO)

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 46
0
def vecsym(v):
    if isinstance(v, UTPM):
        return UTPM.vecsym(v)

    elif isinstance(v, Function):
        return Function.vecsym(v)

    elif isinstance(v, numpy.ndarray):
        return utils.vecsym(v)

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 47
0
def IV_algopy_jac (Ee, Tc, Rs, Rsh, Isat1_0, Isat2, Isc0, alpha_Isc, Eg, Vd):
    """
    Calculate Jacobian of IV curve using AlgoPy

    :param Ee: [suns] effective irradiance
    :param Tc: [C] cell temperature
    :param Rs: [ohms] series resistance
    :param Rsh: [ohms] shunt resistance
    :param Isat1_0: [A] saturation current of first diode at STC
    :param Isat2: [A] saturation current of second diode
    :param Isc0: [A] short circuit current at STC
    :param alpha_Isc: [1/K] short circuit current temperature coefficient
    :param Eg: [eV] band gap
    :param Vd: [V] diode voltages
    :return: Jacobian :math:`\\frac{\\partial f_i}{\\partial x_{j,k}}`
        where :math:`k` are independent observations of :math:`x`
    """
    x = UTPM.init_jacobian([
        Ee, Tc, Rs, Rsh, Isat1_0, Isat2, Isc0, alpha_Isc, Eg
    ])
    return UTPM.extract_jacobian(IV_algopy(x, Vd))
Esempio n. 48
0
def outer(a,b):
    """
    Same as NumPy outer but in UTP arithmetic
    """
    if isinstance(a,Function) or isinstance(b,Function):
        return Function.outer(a,b)

    elif isinstance(a,UTPM) or isinstance(b,UTPM):
        return UTPM.outer(a,b)

    else:
        return numpy.outer(a,b)
Esempio n. 49
0
def symvec(A, UPLO='F'):
    if isinstance(A, UTPM):
        return UTPM.symvec(A, UPLO=UPLO)

    elif isinstance(A, Function):
        return Function.symvec(A, UPLO=UPLO)

    elif isinstance(A, numpy.ndarray):
        return utils.symvec(A, UPLO=UPLO)

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 50
0
    def test_interpolation(self):
        def f(x):
            return x[0] + x[1] + 3. * x[0] * x[1] + 7. * x[1] * x[1] + 17. * x[
                0] * x[0] * x[0]

        N = 2
        D = 5
        deg_list = [0, 1, 2, 3, 4]
        coeff_list = []
        for n, deg in enumerate(deg_list):
            Gamma, rays = generate_Gamma_and_rays(N, deg)
            x = UTPM(numpy.zeros((D, ) + rays.shape))
            #print x
            #print type(x)
            x.data[1, :, :] = rays
            y = f(x)
            coeff_list.append(numpy.dot(Gamma, y.data[deg]))

        assert_array_almost_equal([0], coeff_list[0])
        assert_array_almost_equal([1, 1], coeff_list[1])
        assert_array_almost_equal([0, 3, 7], coeff_list[2])
        assert_array_almost_equal([17, 0, 0, 0], coeff_list[3])
Esempio n. 51
0
File: fft.py Progetto: b45ch1/algopy
def fft(a, n=None, axis=-1):
    """
     
    equivalent to numpy.fft.fft(a, n=None, axis=-1)

    """

    if isinstance(a, UTPM):
        return UTPM.fft(a, n=n, axis=axis)

    elif isinstance(a, Function):
        return Function.fft(a, n=n, axis=axis)

    elif isinstance(a, numpy.ndarray):
        return numpy.fft.fft(a, n=n, axis=axis)

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
Esempio n. 52
0
def qr_full(A):
    """
    Q,R = qr_full(A)

    This function is merely a wrapper of
    UTPM.qr_full,  Function.qr_full, scipy.linalg.qr

    Parameters
    ----------

    A:      algopy.UTPM or algopy.Function or numpy.ndarray
            A.shape = (M,N),  M >= N

    Returns
    --------

    Q:      same type as A
            Q.shape = (M,M)

    R:      same type as A
            R.shape = (M,N)


    """

    if isinstance(A, UTPM):
        return UTPM.qr_full(A)

    elif isinstance(A, Function):
        return Function.qr_full(A)

    elif isinstance(A, numpy.ndarray):
        return scipy.linalg.qr(A)

    else:
        raise NotImplementedError('don\'t know what to do with this instance')
# compute Taylor series
#
#  Jx( 1. + 2.*t + 3.*t**2 + 4.*t**3 + 5.*t**5,
#      6. + 7.*t + 8.*t**2 + 9.*t**3 + 10.*t**5 )
#  Jy( 1. + 2.*t + 3.*t**2 + 4.*t**3 + 5.*t**5,
#      6. + 7.*t + 8.*t**2 + 9.*t**3 + 10.*t**5 )
#
# where
#
# Jx = dg/dx
# Jy = dg/dy


# setup input Taylor polynomials
D,P = 5, 3  # order D=5, number of directions P
ax = UTPM(numpy.zeros((D, P)))
ay = UTPM(numpy.zeros((D, P)))
ax.data[:, :] = numpy.array([1., 2. ,3., 4. ,5.]).reshape((5,1))  # input Taylor polynomial
ay.data[:, :] = numpy.array([6., 7. ,8., 9. ,10.]).reshape((5,1))  # input Taylor polynomial

# forward sweep
cg.pushforward([ax, ay])

azbar = UTPM(numpy.zeros((D, P, 3)))
azbar.data[0, ...] = numpy.eye(3)

# reverse sweep
cg.pullback([azbar])

# get results
Jx = cg.independentFunctionList[0].xbar
import numpy
from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, solve

# first order derivatives, one directional derivative
# D - 1 is the degree of the Taylor polynomial
# P directional derivatives at once
# M number of rows of A
# N number of cols of A
D,P,M,N = 2,1,5,2

# generate badly conditioned matrix A
A = UTPM(numpy.zeros((D,P,M,N)))
x = UTPM(numpy.zeros((D,P,M,1)))
y = UTPM(numpy.zeros((D,P,M,1)))

x.data[0,0,:,0] = [1,1,1,1,1]
x.data[1,0,:,0] = [1,1,1,1,1]

y.data[0,0,:,0] = [1,2,1,2,1]
y.data[1,0,:,0] = [1,2,1,2,1]

alpha = 10**-5
A = dot(x,x.T) + alpha*dot(y,y.T)

A = A[:,:2]


# Method 1: Naive approach
Apinv = dot(inv(dot(A.T,A)),A.T)

print('naive approach: A Apinv A - A = 0 \n', dot(dot(A, Apinv),A) - A)
Esempio n. 55
0
import numpy; from numpy import sin,cos; from algopy import UTPM, zeros
D,P = 4,1
x = UTPM(numpy.zeros((D,P,2)))
x.data[0,:,0] = 1
p = UTPM(numpy.zeros((D,P)))
p.data[0,:] = 3; p.data[1,:] = 1

def f(t, x, p):
    retval = x.copy()
    retval[0] = x[1]
    retval[1] = -p* x[0]
    return retval
    
def implicit_euler(f_fcn, x0, ts, p):
    """ implicit euler with fixed stepsizes, using Newton's method to solve
    the occuring implicit system of nonlinear equations
    """
    
    def F_fcn(x_new, x, t_new, t, p):
        """ implicit function to solve:  0 = F(x_new, x, t_new, t_old)"""
        return (t_new - t) * f_fcn(t_new, x_new, p) - x_new + x
        
    def J_fcn(x_new, x, t_new, t, p):
        """ computes the Jacobian of F_fcn
        all inputs are double arrays
        """
        y = UTPM(numpy.zeros((D,N,N)))
        y.data[0,:]   = x_new
        y.data[1,:,:] = numpy.eye(N)
        F = F_fcn(y, x, t_new, t, p)
        return F.data[1,:,:].T
print eval_jac_g_forward(x)
print eval_g(x)

def f(x):
    nobs = x.shape[1:]
    f0 = x[0]**2 * sin(x[1])**2
    f1 = x[0]**2 * cos(x[1])**2
    out = zeros((2,) + nobs, dtype=x)
    out[0,:] = f0
    out[1,:] = f1
    return out

x = np.array([(1, 2, 3, 4),(5, 6, 7, 8)],dtype=float)
y = f(x)

xj = UTPM.init_jacobian(x)
j = UTPM.extract_jacobian(f(xj))

print "x =\n%r\n" % x
print "f =\n%r\n" % y
print "j =\n%r\n" % j

# time it
jaca = nda.Jacobian(f)
x = np.array([np.arange(100),np.random.rand(100)])
%timeit jaca(x)

# x =
# array([[ 1.,  2.,  3.,  4.],
       # [ 5.,  6.,  7.,  8.]])
Esempio n. 57
0
import numpy; from algopy import UTPM

# symmetric eigenvalue decomposition, forward UTPM
D,P,M,N = 3,1,4,4
Q,R = UTPM.qr(UTPM(numpy.random.rand(D,P,M,N)))
l = UTPM(numpy.random.rand(*(D,P,N)))
l.data[0,0,:4] = [1,1,2,3]
l.data[1,0,:4] = [0,0,3,4]
l.data[2,0,:4] = [1,2,5,6]
L = UTPM.diag(l)
B = UTPM.dot(Q,UTPM.dot(L,Q.T))

print('B = \n', B)
l2,Q2 = UTPM.eigh(B)
print('l2 - l =\n',l2 - l)
Esempio n. 58
0
In the reverse mode of AD one computes M adjoint derivatives, i.e. Q = M.
"""

import numpy
from algopy import CGraph, Function, UTPM, dot, qr, eigh, inv, zeros

def f(y):
    retval = zeros((3,1),dtype=y)
    retval[0,0] = numpy.log(dot(y.T,y))
    retval[1,0] = numpy.exp(dot(y.T,y))
    retval[2,0] = numpy.exp(dot(y.T,y)) -  numpy.log(dot(y.T,y))
    return retval
    
D,Nm = 2,40
P = Nm
y = UTPM(numpy.zeros((2,P,Nm)))

y.data[0,:] = numpy.random.rand(Nm)
y.data[1,:] = numpy.eye(Nm)


# print f(y)
J = f(y).data[1,:,:,0]
print('Jacobian J(y) = \n', J)

C_epsilon = 0.3*numpy.eye(Nm)

print(J.shape)

C = dot(J.T, dot(C_epsilon,J))
def eval_jac_g_forward(x):
    x = UTPM.init_jacobian(x)
    return UTPM.extract_jacobian(eval_g(x))