Exemple #1
0
 def __init__(self,mol,shifted=False):
       self.mol = select_geom.get(mol)
       System_mol.__init__(self,self.mol,select_basis.get(mol),select_e.get(mol),mol)
       self.tape = "./test/"+mol
       self.energy = select_energy.get(mol)
       self.coef = normalization(np.array(self.alpha),self.coef,self.l,self.list_contr)
       #coef = normalization(np.array(self.alpha),np.copy(self.coef),self.xyz,self.l,self.list_contr)
       self.alpha_algopy = UTPM.init_jacobian(self.alpha)
       self.coef_algopy = UTPM.init_jacobian(self.coef)
       return 
Exemple #2
0
    def test_expm(self):

        def f(x):
            x = x.reshape((2,2))
            return sum(expm(x))

        x = numpy.random.random(2*2)


        # forward mode

        ax = UTPM.init_jacobian(x)
        ay = f(ax)
        g1  = UTPM.extract_jacobian(ay)

        # reverse mode

        cg = CGraph()
        ax = Function(x)
        ay = f(ax)
        cg.independentFunctionList = [ax]
        cg.dependentFunctionList = [ay]

        g2 = cg.gradient(x)

        assert_array_almost_equal(g1, g2)
Exemple #3
0
 def algo_jaco(*args, **kwargs):
     var = UTPM.init_jacobian(args[narg])
     diff_args = list(args)              # We are making a copy of args
     diff_args[narg] = var
     diff_args[-1]= var
     diff_args = tuple(diff_args)
     return UTPM.extract_jacobian(rhfenergy(*(diff_args)))
Exemple #4
0
 def _energy_gradss(self,argnum,max_scf=301,max_d=300,printguess=None,name='Output.molden',output=False,order='first'):
   """This function returns the gradient of args"""
   ## For the moment it retuns a value at a time
   ## This is used only by testing functions.
   eigen = True
   rguess = False
   args=[np.log(self.sys.alpha),self.sys.coef,self.sys.xyz,self.sys.l,self.sys.charges,self.sys.atom,self.sys.natoms,self.sys.nbasis,
           self.sys.list_contr,self.sys.ne,
           max_scf,max_d,log,eigen,None,None,
           name,output,self.sys.alpha] # Last term is only used for Algopy
   if self.verbose:
         self.tape.write(' \n Grad point ...\n')
         self.tape.write(' ---Start--- \n')
         self.tape.write(' Initial parameters \n')
         self.tape.write(' Maximum number of SCF: %d\n'%max_scf)
         self.tape.write(' Default SCF tolerance: %f\n'%1e-8)
         self.tape.write(' Initial density matrix: %s\n'%str(rguess))
         self.sys.printcurrentgeombasis(self.tape)
   grad_fun =[]
   for i in argnum:
       var = UTPM.init_jacobian(args[i])
       diff_args = list(args)              # We are making a copy of args
       diff_args[i] = var
       diff_args[-1]= var
       t0 = time.clock()
       grad = UTPM.extract_jacobian(rhfenergy(*(diff_args)))
       timer = time.clock() - t0
       self.sys.grad = grad
       self.tape.write(' ---End--- \n')
       self.tape.write(' Time %3.7f :\n'%timer)
   return grad
Exemple #5
0
def nuclear_gradient(mol,center = np.array([0.0,0.0,0.0])):
        ''' In computes the geometric derivatives of the kinetics matrix '''
        '''
        grad_algo_xyz = np.zeros((mol.nbasis,mol.nbasis,3))
        #for i in range(len(mol.alpha)):
        for i in range(1):
            xyz = np.array(mol.xyz[i] - center)
            print xyz
            xyz_algopy = UTPM.init_jacobian(xyz)
            charges = [1]
            ncharges = 1
            grad_algo_xyz = grad_algo_xyz + UTPM.extract_jacobian(nuclearmatrix(mol.alpha,mol.coef,mol.xyz,mol.l,mol.nbasis,charges,xyz_algopy,ncharges,xyz_algopy))
        '''
         
        xyz = np.array(center)
        xyz_algopy = UTPM.init_jacobian(xyz)
        charges = [1]
        ncharges = 1
        grad_algo_xyz = UTPM.extract_jacobian(nuclearmatrix(mol.alpha,mol.coef,mol.xyz,mol.l,mol.nbasis,charges,xyz_algopy,ncharges,xyz_algopy))
        print mol.atom
        print grad_algo_xyz[:,:,0]
        print grad_algo_xyz[:,:,1]
        print grad_algo_xyz[:,:,2]
        print grad_algo_xyz.shape 
        
        return grad_algo_xyz
Exemple #6
0
def d_f(x):
    """function"""
    return x[0] * x[1] * x[2] + exp(x[0]) * x[1]  # x[differnce]

    # forward AD without building a computational graph
    x = UTPM.init_jacobian([3, 5, 7])
    y = d_f(x)
    algopy_jacobian = UTPM.extract_jacobian(y)
    print('jacobian = ', algopy_jacobian)

    # reverse mode using a computational graph
    # Step 1/2 - trace the evaluation function
    cg = algopy.CGraph()
    x = algopy.Function([1, 2, 3])
    y = d_f(x)
    cg.trace_off()
    cg.independentFunctionList = [x]
    cg.dependentFunctionList = [y]

    # Step 2/2 - use the graph to evaluate derivatives
    print('gradient =', cg.gradient([3., 5, 7]))
    print(
        'Jacobian =', cg.jacobian([3., 5, 7])
    )  # a square matrix of first order partial derivatives, the derivative of f at all possible points wrt x
    print(
        'Hessian =', cg.hessian([3., 5., 7.])
    )  # a matrix of second order partial derivatives of the function in question (square), can use optimisation for local min/max/saddle of a critical value.
    print('Hessian vector product =', cg.hess_vec([3., 5., 7.], [4, 5, 6]))
Exemple #7
0
def gradient(x):

    out = []
    if type(x) == float:
        i = UTPM.init_jacobian([x])
        y = f(i)
        algopy_jacobian = UTPM.extract_jacobian(y)
        out = algopy_jacobian
    else:
        for i in x:
            i = UTPM.init_jacobian([i])
            y = f(i)
            algopy_jacobian = UTPM.extract_jacobian(y)
            #     print('jacobian = ',algopy_jacobian)
            out.append(algopy_jacobian[0])

    return np.array(out)
Exemple #8
0
def alg_grad(x, func, args=()):
    if not algopy_avail:
        raise ImportError(
            "Algopy not installed. Please install algopy or use diff_grad")
    x = UTPM.init_jacobian(x)
    y = func(x, *args)
    #print("Y", y)
    return UTPM.extract_jacobian(y)
Exemple #9
0
def alg_jac(x, func, args=()):
    try:
        func(x, *args)[0]
    except IndexError:
        # If func returns a scalar, the jacobian is just the gradient
        return alg_grad(x, func, args)
    if not algopy_avail:
        raise ImportError(
            "Algopy not installed. Please install algopy or use diff_jac")
    x = UTPM.init_jacobian(x)
    y = func_part(x, func, args)
    return UTPM.extract_jacobian(y)
Exemple #10
0
def init_UTPM_jacobian(x):
    # print 'type(x)=', type(x)
    if isinstance(x, Function):
        return x.init_UTPM_jacobian()

    elif isinstance(x, numpy.ndarray):
        return UTPM.init_jacobian(x)

    elif isinstance(x, UTPM):
        # print x.data.shape
        return UTPM.init_UTPM_jacobian(x.data[0,0])

    else:
        raise ValueError('don\'t know what to do with this input!')
Exemple #11
0
def init_UTPM_jacobian(x):
    # print 'type(x)=', type(x)
    if isinstance(x, Function):
        return x.init_UTPM_jacobian()

    elif isinstance(x, numpy.ndarray):
        return UTPM.init_jacobian(x)

    elif isinstance(x, UTPM):
        # print x.data.shape
        return UTPM.init_UTPM_jacobian(x.data[0, 0])

    else:
        raise ValueError('don\'t know what to do with this input!')
Exemple #12
0
    def algopy_fprime(xk, *args):
        """ Evaluates the gradient of the function 
        Parameters:

         xk : array_like
             The coordinate vector at which to determine the gradient of `f`.

        Returns:
          grad : ndarray
              The partial derivatives of `f` to `xk`.
        """
        var = UTPM.init_jacobian(xk)
        grad = UTPM.extract_jacobian(function(*(tuple([var])+args)))
        return grad
Exemple #13
0
    def test_expm_jacobian(self):
        n = 4
        x = numpy.random.randn(n, n)

        # use algopy to get the jacobian
        ax = UTPM.init_jacobian(x)
        ay = expm(ax)
        g1 = UTPM.extract_jacobian(ay)

        # compute the jacobian directly using expm_frechet
        M = numpy.zeros((n, n, n*n))
        ident = numpy.identity(n*n)
        for i in range(n*n):
            E = ident[i].reshape(n, n)
            M[:, :, i] = expm_frechet(x, E, compute_expm=False)

        assert_allclose(g1, M, rtol=1e-6)
Exemple #14
0
    def test_expm_jacobian(self):
        n = 4
        x = numpy.random.randn(n, n)

        # use algopy to get the jacobian
        ax = UTPM.init_jacobian(x)
        ay = expm(ax)
        g1 = UTPM.extract_jacobian(ay)

        # compute the jacobian directly using expm_frechet
        M = numpy.zeros((n, n, n * n))
        ident = numpy.identity(n * n)
        for i in range(n * n):
            E = ident[i].reshape(n, n)
            M[:, :, i] = expm_frechet(x, E, compute_expm=False)

        assert_allclose(g1, M, rtol=1e-6)
def IV_algopy_jac (Ee, Tc, Rs, Rsh, Isat1_0, Isat2, Isc0, alpha_Isc, Eg, Vd):
    """
    Calculate Jacobian of IV curve using AlgoPy

    :param Ee: [suns] effective irradiance
    :param Tc: [C] cell temperature
    :param Rs: [ohms] series resistance
    :param Rsh: [ohms] shunt resistance
    :param Isat1_0: [A] saturation current of first diode at STC
    :param Isat2: [A] saturation current of second diode
    :param Isc0: [A] short circuit current at STC
    :param alpha_Isc: [1/K] short circuit current temperature coefficient
    :param Eg: [eV] band gap
    :param Vd: [V] diode voltages
    :return: Jacobian :math:`\\frac{\\partial f_i}{\\partial x_{j,k}}`
        where :math:`k` are independent observations of :math:`x`
    """
    x = UTPM.init_jacobian([
        Ee, Tc, Rs, Rsh, Isat1_0, Isat2, Isc0, alpha_Isc, Eg
    ])
    return UTPM.extract_jacobian(IV_algopy(x, Vd))
Exemple #16
0
    def test_expm(self):
        def f(x):
            x = x.reshape((2, 2))
            return sum(expm(x))

        x = numpy.random.random(2 * 2)

        # forward mode

        ax = UTPM.init_jacobian(x)
        ay = f(ax)
        g1 = UTPM.extract_jacobian(ay)

        # reverse mode

        cg = CGraph()
        ax = Function(x)
        ay = f(ax)
        cg.independentFunctionList = [ax]
        cg.dependentFunctionList = [ay]

        g2 = cg.gradient(x)

        assert_array_almost_equal(g1, g2)
Exemple #17
0
y = F(x)

print('y0 = ', y0)
print('y  = ', y)
print('y.shape =', y.shape)
print('y.data.shape =', y.data.shape)
print('dF/dx(x0) * x1 =', y.data[1, 0])

import numpy
from numpy import log, exp, sin, cos, abs
import algopy
from algopy import UTPM, dot, inv, zeros


def f(x):
    A = zeros((2, 2), dtype=x)
    A[0, 0] = numpy.log(x[0] * x[1])
    A[0, 1] = numpy.log(x[1]) + exp(x[0])
    A[1, 0] = sin(x[0])**2 + abs(cos(x[0]))**3.1
    A[1, 1] = x[0]**cos(x[1])
    return log(dot(x.T, dot(inv(A), x)))


x = numpy.array([3., 7.])
x = UTPM.init_jacobian(x)

y = f(x)

print('normal function evaluation f(x) = ', y.data[0, 0])
print('Jacobian df/dx = ', UTPM.extract_jacobian(y))
Exemple #18
0
    #define parameters
    params = np.array([r_0, alpha])

    #define values that we want to take a derivative with respect to
    xin = np.array([x_r[3], x_r[6], y_r[3], y_r[6]])

    #Finite Differencing
    step = 1e-6
    p1 = np.array([step, 0, 0, 0])
    p2 = np.array([0, step, 0, 0])
    p3 = np.array([0, 0, step, 0])
    p4 = np.array([0, 0, 0, step])
    p = np.array([p1, p2, p3, p4])

    derivative_FD = np.zeros(4)
    for i in range(len(p)):
        derivative_FD[i] = (overlap(xin + p[i], params) -
                            overlap(xin, params)) / step

    print "FD: ", derivative_FD

    #Automatic Differentiation

    x_algopy = UTPM.init_jacobian(xin)

    overlap_fraction = overlap(x_algopy, params)

    derivative_auto = UTPM.extract_jacobian(overlap_fraction)

    print "Automatic: ", derivative_auto
Exemple #19
0
import numpy, algopy
from algopy import UTPM, exp


def eval_f(x):
    """ some function """
    return x[0] * x[1] * x[2] + exp(x[0]) * x[1]


# forward mode without building the computational graph
# -----------------------------------------------------
x = UTPM.init_jacobian([3, 5, 7])
y = eval_f(x)
algopy_jacobian = UTPM.extract_jacobian(y)
print('jacobian = ', algopy_jacobian)

# reverse mode using a computational graph
# ----------------------------------------

# STEP 1: trace the function evaluation
cg = algopy.CGraph()
x = algopy.Function([1, 2, 3])
y = eval_f(x)
cg.trace_off()
cg.independentFunctionList = [x]
cg.dependentFunctionList = [y]

# STEP 2: use the computational graph to evaluate derivatives
print('gradient =', cg.gradient([3., 5, 7]))
print('Jacobian =', cg.jacobian([3., 5, 7]))
print('Hessian =', cg.hessian([3., 5., 7.]))
def eval_jac_g_forward(x):
    x = UTPM.init_jacobian(x)
    return UTPM.extract_jacobian(eval_g(x))
print eval_jac_g_forward(x)
print eval_g(x)

def f(x):
    nobs = x.shape[1:]
    f0 = x[0]**2 * sin(x[1])**2
    f1 = x[0]**2 * cos(x[1])**2
    out = zeros((2,) + nobs, dtype=x)
    out[0,:] = f0
    out[1,:] = f1
    return out

x = np.array([(1, 2, 3, 4),(5, 6, 7, 8)],dtype=float)
y = f(x)

xj = UTPM.init_jacobian(x)
j = UTPM.extract_jacobian(f(xj))

print "x =\n%r\n" % x
print "f =\n%r\n" % y
print "j =\n%r\n" % j

# time it
jaca = nda.Jacobian(f)
x = np.array([np.arange(100),np.random.rand(100)])
%timeit jaca(x)

# x =
# array([[ 1.,  2.,  3.,  4.],
       # [ 5.,  6.,  7.,  8.]])
Exemple #22
0
def ANN_Model(X, Y, test_X, test_y):
    """----------测试集原数据作图----------------"""
    # plt.figure(0)  # 创建图表1
    # plt.title('observe')
    # plt.scatter([_ for _ in range(test_y.shape[0])], test_y)

    # 训练次数
    # epochs = input('输入训练批次:\n')

    # loss_func = input('loss函数('
    #                   'mae[mean_absolute_error]\n'
    #                   'mse[mean_squared_error]\n'
    #                   'msle[mean_squared_logarithmic_error]\n'
    #                   'squared_hinge[squared_hinge]\n'
    #                   'logcosh[logcosh]\n'
    #                   '):\n')
    loss_func = 'mse'
    """----------配置网络模型----------------"""
    # 配置网络结构
    model = Sequential()

    # hidden_units = input('隐藏层单元数量:\n')
    hidden_units = 20
    # 第一隐藏层的配置:输入17,输出20
    if layers_num == 1:
        model.add(
            Dense(hidden_units,
                  input_dim=len(InputIndex),
                  activation='sigmoid'))
        model.add(Dense(1, activation='sigmoid'))
    else:
        hidden_units1 = 20
        hidden_units2 = 16
        model.add(
            Dense(hidden_units1,
                  input_dim=len(InputIndex),
                  activation='sigmoid'))
        model.add(Dense(hidden_units2, activation='sigmoid'))
        model.add(Dense(1))

    # 编译模型,指明代价函数和更新方法
    Ada = optimizers.Adagrad(lr=0.018, epsilon=1e-06)
    model.compile(loss=loss_func, optimizer=Ada, metrics=[loss_func])
    """----------训练模型--------------------"""
    print("training starts.....")
    model.fit(X, Y, epochs=epochs, verbose=1, batch_size=256)
    """----------评估模型--------------------"""
    # 用测试集去评估模型的准确度
    cost = model.evaluate(test_X, test_y)
    print('\nTest accuracy:', cost)
    """----------模型存储--------------------"""
    save_model(model, weight_file_path)

    # 数据反归一化
    trueTestYv = org_teY
    temp = model.predict(test_X).reshape(-1, 1)
    predTestYv = (temp.T * npscale.reshape(-1, 1)[-1, :] +
                  npminthred.reshape(-1, 1)[-1, :]).T

    save_data = {
        'Test': list(trueTestYv.T[0]),
        'Predict': list(predTestYv.T[0])
    }
    predict_predYv = pd.DataFrame(save_data)
    predict_predYv.to_csv('data/predict_test_value.csv')
    """----------计算R^2--------------------"""
    testYv = test_y.values.flatten()
    predYv = model.predict(test_X).flatten()
    slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(
        testYv, predYv)
    print('R square is: ', r_value**2)

    # 数据反归一化
    trueAllYv = org_target.values
    temp = model.predict(train).reshape(-1, 1)
    predAllYv = (temp.T * npscale.reshape(-1, 1)[-1, :] +
                 npminthred.reshape(-1, 1)[-1, :]).T

    save_data = {
        'TrueData': list(trueAllYv.T[0]),
        'PredictData': list(predAllYv.T[0])
    }
    predict_AllYv = pd.DataFrame(save_data)
    predict_AllYv.to_csv('data/predict_all_value.csv')

    # 求偏导,然后直接作图

    # 获取每一层的权重
    weights = {}
    for layer in model.layers:
        weight = layer.get_weights()
        info = layer.get_config()
        weights[info['name']] = weight
        if info['name'] == 'dense_1':
            df_weights = pd.DataFrame(weight[0].T, columns=InputIndex)
        else:
            df_weights = pd.DataFrame(weight[0].T)

        df_bias = pd.DataFrame(weight[1].T, columns=['bias'])
        df = pd.concat([df_weights, df_bias], axis=1)
        df.to_csv('weights/' + info['name'] + '.csv')

    res = []
    for RawNo in range(train.shape[0]):
        TargetValue = list(train.loc[RawNo].values)
        x = UTPM.init_jacobian(TargetValue)
        # 求导函数,根据隐藏层数量选择
        # 1层:DerivativeExpression_jac1
        # 2层:DerivativeExpression_jac2
        if layers_num == 1:
            y = DerivativeExpression_jac1(x, weights)
        else:
            y = DerivativeExpression_jac2(x, weights)
        algopy_jacobian = UTPM.extract_jacobian(y)
        # 最后一列插入NEE
        res.append(list(algopy_jacobian))
    res = np.array(res)
    save_data = {
        'TrueNEE': list(trueAllYv.T[0]),
        'PredNEE': list(predAllYv.T[0])
    }
    predict_AllYv = pd.DataFrame(save_data)

    deriColumns = ['d' + str(_) for _ in train.columns.tolist()]
    result = pd.DataFrame(res, columns=deriColumns)
    result = pd.concat([result, original_data, predict_AllYv], axis=1)
    result.to_csv('data/result_jacobian.csv')

    result.dropna(inplace=True)

    for i in range(len(InputIndex)):
        plt.figure(i)  # 创建图表1
        IndexName = InputIndex[i]

        # result = result[(result['d'+IndexName] > -5000) & (result['d'+IndexName] < 5000)]

        y = abs(result['d' + IndexName].values *
                scale[IndexName]) / result.shape[0]
        x = result[IndexName].values
        plt.xlabel(IndexName)
        plt.ylabel("NEE-" + IndexName)
        plt.scatter(x, y, s=1)
        plt.savefig("res/" + IndexName + ".png")
    plt.show()
Exemple #23
0
            y = f(i)
            algopy_jacobian = UTPM.extract_jacobian(y)
            #     print('jacobian = ',algopy_jacobian)
            out.append(algopy_jacobian[0])

    return np.array(out)


def eval_f(x):
    """ some function """
    return x[0]**2 * x[1]**2  #x[1]*x[2] + np.exp(x[0])*x[1]


# forward mode without building the computational graph
# -----------------------------------------------------
x = UTPM.init_jacobian([10, 10])
y = eval_f(x)
algopy_jacobian = UTPM.extract_jacobian(y)

# xax = np.linspace(-8,8,100)
# gradient(xax)

# right
a = np.linspace(1, 10, 1000)

# plt.plot(a, f([a]))
# xopt = spo.brenth(lambda x: gradient(x), 0, 20, xtol = 10e-7, full_output = True)

h = gradient(a)
plt.plot(a, h)
Exemple #24
0
import numpy, algopy
from algopy import UTPM, exp

def eval_f(x):
    """ some function """
    return x[0]*x[1]*x[2] + exp(x[0])*x[1]

# forward mode without building the computational graph
# -----------------------------------------------------
x = UTPM.init_jacobian([3,5,7])
y = eval_f(x)
algopy_jacobian = UTPM.extract_jacobian(y)
print('jacobian = ',algopy_jacobian)

# reverse mode using a computational graph
# ----------------------------------------

# STEP 1: trace the function evaluation
cg = algopy.CGraph()
x = algopy.Function([1,2,3])
y = eval_f(x)
cg.trace_off()
cg.independentFunctionList = [x]
cg.dependentFunctionList = [y]

# STEP 2: use the computational graph to evaluate derivatives
print('gradient =', cg.gradient([3.,5,7]))
print('Jacobian =', cg.jacobian([3.,5,7]))
print('Hessian =', cg.hessian([3.,5.,7.]))
print('Hessian vector product =', cg.hess_vec([3.,5.,7.],[4,5,6]))
Exemple #25
0
    def _forward(self, x, *args, **kwds):
        # forward mode without building the computational graph

        tmp = UTPM.init_jacobian(x)
        y = self.fun(tmp, *args, **kwds)
        return UTPM.extract_jacobian(y)
    a = 1./3.
    Cp = 4.*a*(1-a)**2.
    A = r_0**2*np.pi
    V = (1-loss)*U_velocity
    "Calculate Power from a single turbine"
    P = 0.5*rho*A*Cp*V**3
    return P

if __name__ == '__main__':

    rho = 1.1716
    U_velocity = 8.
    r_0 = 40
    loss = 0.5
    params = np.array([r_0, rho, U_velocity])
    xin = loss
    x_algopy = UTPM.init_jacobian(xin)
    power = jensen_power(x_algopy,params)
    derivative_automatic = UTPM.extract_jacobian(power)
    print "Automatic Differentiation Derivative: ", derivative_automatic
    h = 1e-6
    xin_h = xin + h
    power_h = jensen_power(xin_h,params)
    power_normal = jensen_power(loss,params)
    derivative_finite = (power_h - power_normal)/h
    print derivative_finite