Esempio n. 1
0
def PyAdolc_dvLJ(x):
    adolc.trace_on(0)
    ad_x = adolc.adouble(np.zeros(np.shape(np.ravel(x)),dtype=float))
    adolc.independent(ad_x)
    ad_y = Adolc_vLJ(ad_x)
    adolc.dependent(ad_y)
    adolc.trace_off()
    return adolc.gradient(0, np.ravel(x))
Esempio n. 2
0
    def pCostpW_adolc(self, W, p_target):

        tag = self.pCostpW_tag
        if not self.pCostpW_traced:
            aW = adouble(W.flatten(order='F'))
            ap = adouble(p_target)

            adolc.trace_on(tag)
            adolc.independent(aW)
            aW3 = np.reshape(aW, W.shape, order='F')
            acost = cost.inverse_pressure_design(aW3, ap)
            adolc.dependent(acost)
            adolc.trace_off()

        return adolc.gradient(self.pCostpW_tag, W.flatten(order='F'))
Esempio n. 3
0
def PyAdolc_dvLJ_for_Optimize(x):
    return adolc.gradient(0, x)
    t0 = 0.0
    tf = 1.0
    conp = [ 1.0, 1.0 ]
    adolc.trace_on(tag_full)
    con = adolc.as_adouble(conp)
    for c in con:
        c.declareIndependent()
    y = con
    ynew = adolc.as_adouble([ 0.0, 0.0 ])
    for i in range(steps):
        euler_step_act(n,y,m,ynew)
        y = ynew
    f = y[0] + y[1]
    f.declareDependent()
    adolc.trace_off()
    grad = adolc.gradient(tag_full,conp)
    print(" full taping:\n gradient=(",grad[0],", ",grad[1],")\n\n")

    # now taping external function
    adolc.trace_on(tag_ext_fct)
    con = adolc.as_adouble(conp)
    for c in con:
        c.declareIndependent()
    y = con
    ynew = adolc.as_adouble([ 0.0, 0.0 ])
    euler_step_act(2,y,2,ynew)
    for c in ynew:
        c.declareDependent()
    adolc.trace_off()

    edf = euler_step_edf(tag_ext_fct)    
 def gradient(self,x):
     return adolc.gradient(self.tape_number,
                           np.ravel(x)).reshape(x.shape)
Esempio n. 6
0
 def A_gradA_taped(self, XP):
     return adolc.function(self.adolcID,
                           XP), adolc.gradient(self.adolcID, XP)
Esempio n. 7
0
 def __call__(self, x):
     return adolc.gradient(self.id, x)
adolc.independent(x)
y = f(x)
adolc.dependent(y)
adolc.trace_off()


# point at which the derivatives should be evaluated
x = random((N,D))

print('\n\n')
print('Sympy function = function  check (should be almost zero)')
print(f(x) - sym_f(x))

print('\n\n')
print('Sympy vs Hand Derived Gradient check (should be almost zero)')
print(df(x) - sym_df(x))

print('Sympy vs Ad Derived Gradient check (should be almost zero)')
print(adolc.gradient(0, numpy.ravel(x)).reshape(x.shape) - sym_df(x))

print('\n\n')
print('Sympy vs Hand Derived Hessian check (should be almost zero)')
print(ddf(x) - sym_ddf(x))

print('Sympy vs Ad Derive Hessian check (should be almost zero)')
print(adolc.hessian(0, numpy.ravel(x)).reshape(x.shape + x.shape) - sym_ddf(x))




Esempio n. 9
0
def eval_grad_f(x, user_data=None):
    return adolc.gradient(fID,x)
Esempio n. 10
0
def grLLadolc(x):
    return adolc.gradient(1, x)
Esempio n. 11
0
    ## PyADOLC taping
    start_time = time()
    ax = numpy.array([adolc.adouble(0.0) for i in range(N)])
    adolc.trace_on(0)
    for n in range(N):
        ax[n].is_independent(x[n])
    ay = f(ax)
    adolc.depends_on(ay)
    adolc.trace_off()
    end_time = time()
    adolc_tape_times.append(end_time - start_time)

    ## PyADOLC gradient
    start_time = time()
    adolc_g = adolc.gradient(0, x)
    end_time = time()
    adolc_gradient_times.append(end_time - start_time)

    ### check that both derivatives give the same result
    # print 'difference between forward and reverse gradient computation', numpy.linalg.norm(g_forward - g_reverse)
    # print 'difference between forward and reverse gradient2 computation', numpy.linalg.norm(g_forward - g_reverse2)
    # print 'difference between Algopy and PyAdolc', numpy.linalg.norm(adolc_g - g_reverse2)


import pylab

function_plot = pylab.loglog(Ns, function_eval_times, "r.")
forward_plot = pylab.loglog(Ns, forward_eval_times, "b.")
taperev_plot = pylab.loglog(Ns, tape_rev_eval_times, "r^")
tape_plot = pylab.loglog(Ns, tape_eval_times, "b^")
Esempio n. 12
0
def alglib_func(x,grad,p):
    grad[:] = adolc.gradient(adolcID,x)    
    return  adolc.function(adolcID,x)
Esempio n. 13
0
 def _adolc_grad(self, x, **kwargs):
     "Evaluate the objective gradient from the ADOL-C tape."
     return adolc.gradient(self._obj_trace_id, x)
Esempio n. 14
0
 def gradA_taped(self, XP):
     return adolc.gradient(self.adolcID, XP)
Esempio n. 15
0
def eval_grad_f_adolc(x, user_data=None):
    return adolc.gradient(1, x)
 def evaluateCostGradient(self, grad_f, x):
     agrad_f = adolc.gradient(1, x)
     np.copyto(grad_f, agrad_f)
Esempio n. 17
0
 def _adolc_grad(self, x, **kwargs):
     """Evaluate the objective gradient."""
     return adolc.gradient(self._obj_trace_id, x)
Esempio n. 18
0
 def gradA_taped(self, XP, user_data=None):
     return adolc.gradient(self.adolcID, XP)
Esempio n. 19
0
 def gradient(self, x):
     return adolc.gradient(0,x)
Esempio n. 20
0
    ## PyADOLC taping
    start_time = time()
    ax = numpy.array([adolc.adouble(0.) for i in range(N)])
    adolc.trace_on(0)
    for n in range(N):
        ax[n].is_independent(x[n])
    ay = f(ax)
    adolc.depends_on(ay)
    adolc.trace_off()
    end_time = time()
    adolc_tape_times.append(end_time - start_time)

    ## PyADOLC gradient
    start_time = time()
    adolc_g = adolc.gradient(0, x)
    end_time = time()
    adolc_gradient_times.append(end_time - start_time)

    ### check that both derivatives give the same result
    #print 'difference between forward and reverse gradient computation', numpy.linalg.norm(g_forward - g_reverse)
    #print 'difference between forward and reverse gradient2 computation', numpy.linalg.norm(g_forward - g_reverse2)
    #print 'difference between Algopy and PyAdolc', numpy.linalg.norm(adolc_g - g_reverse2)

import pylab

function_plot = pylab.loglog(Ns, function_eval_times, 'r.')
forward_plot = pylab.loglog(Ns, forward_eval_times, 'b.')
taperev_plot = pylab.loglog(Ns, tape_rev_eval_times, 'r^')
tape_plot = pylab.loglog(Ns, tape_eval_times, 'b^')
rev_plot = pylab.loglog(Ns, rev_eval_times, 'cs')
Esempio n. 21
0
import numpy
import math
import adolc

# tape a function evaluation
ax = numpy.array([adolc.adouble(0.) for n in range(2)])
# ay = adolc.adouble(0)
adolc.trace_on(13)
adolc.independent(ax)
ay = numpy.sin(ax[0] + ax[1]*ax[0])
adolc.dependent(ay)
adolc.trace_off()

x = numpy.array([3., 7.])
y = numpy.zeros(1)
adolc.tape_to_latex(13, x, y)

y = adolc.function(13, x)
g = adolc.gradient(13, x)
J = adolc.jacobian(13, x)

print('function y=', y)
print('gradient g=', g)
print('Jacobian J=', J)


Esempio n. 22
0
adolc.independent(x)
y = f(x)
adolc.dependent(y)
adolc.trace_off()


# point at which the derivatives should be evaluated
x = random((N,D))

print '\n\n'
print 'Sympy function = function  check (should be almost zero)'
print f(x) - sym_f(x)

print '\n\n'
print 'Sympy vs Hand Derived Gradient check (should be almost zero)'
print df(x) - sym_df(x)

print 'Sympy vs Ad Derived Gradient check (should be almost zero)'
print adolc.gradient(0, numpy.ravel(x)).reshape(x.shape) - sym_df(x)

print '\n\n'
print 'Sympy vs Hand Derived Hessian check (should be almost zero)'
print ddf(x) - sym_ddf(x)

print 'Sympy vs Ad Derive Hessian check (should be almost zero)'
print adolc.hessian(0, numpy.ravel(x)).reshape(x.shape + x.shape) - sym_ddf(x)




def eval_grad_f_adolc(x, user_data=None):
    return adolc.gradient(1, x)
def grFun(x): 
    return adolc.gradient(1, x)
Esempio n. 25
0
 def gradient(self, x):
     return adolc.gradient(0, x)