Exemple #1
0
    def test_exp_log(self):
        from pyaudi import gdual_double as gdual
        from pyaudi import exp, log
        x = gdual(2.3, "x", 5)
        y = gdual(1.5, "y", 5)

        p1 = x * x * y - x * y * x * x * x + 3 * y * y * y * y * x * y * x
        self.assertTrue((exp(log(p1)) - p1).is_zero(1e-10))
Exemple #2
0
 def test_function_methods(self):
     from pyaudi import gdual_double as gdual
     from pyaudi import exp, log, sin, cos
     x = gdual(0.5, "x", 11)
     self.assertEqual(exp(x), x.exp())
     self.assertEqual(log(x), x.log())
     self.assertEqual(sin(x), x.sin())
     self.assertEqual(cos(x), x.cos())
Exemple #3
0
    def test_exp_log(self):
        from pyaudi import gdual_double as gdual
        from pyaudi import exp, log
        x = gdual(2.3, "x",5);
        y = gdual(1.5, "y",5);

        p1 = x*x*y - x*y*x*x*x + 3*y*y*y*y*x*y*x;
        self.assertTrue((exp(log(p1)) - p1).is_zero(1e-10))
Exemple #4
0
def some_complex_irrational_f(x, y, z):
    from pyaudi import exp, log, cos, sin, tan, sqrt, cbrt, cos, sin, tan, acos, asin, atan, cosh, sinh, tanh, acosh, asinh, atanh
    from pyaudi import abs as gd_abs
    from pyaudi import sin_and_cos, sinh_and_cosh
    f = (x + y + z) / 10.
    retval = exp(f) + log(f) + f**2 + sqrt(f) + cbrt(f) + cos(f) + sin(f)
    retval += tan(f) + acos(f) + asin(f) + atan(f) + cosh(f) + sinh(f)
    retval += tanh(f) + acosh(f) + asinh(f) + atanh(f)
    a = sin_and_cos(f)
    b = sinh_and_cosh(f)
    retval += a[0] + a[1] + b[0] + b[1]
    return retval
Exemple #5
0
def some_complex_irrational_f(x,y,z):
    from pyaudi import exp, log, cos, sin, tan, sqrt, cbrt, cos, sin, tan, acos, asin, atan, cosh, sinh, tanh, acosh, asinh, atanh
    from pyaudi import abs as gd_abs
    from pyaudi import sin_and_cos, sinh_and_cosh
    f = (x+y+z) / 10.
    retval = exp(f) + log(f) + f**2 + sqrt(f) + cbrt(f) + cos(f) + sin(f)
    retval += tan(f) + acos(f) + asin(f) + atan(f)  + cosh(f) + sinh(f)
    retval += tanh(f) + acosh(f) + asinh(f) + atanh(f)
    a = sin_and_cos(f)
    b = sinh_and_cosh(f)
    retval+=a[0]+a[1]+b[0]+b[1]
    return retval
 def nn_predict(self, model_input):
     vector = model_input
     dense_layer_count = 0
     for layer_config in self.config:
         if layer_config['class_name'] == 'Dense':
             wgts, biases = self.weights[dense_layer_count *
                                         2:(dense_layer_count + 1) * 2]
             vector = wgts.T.dot(vector) + biases
             dense_layer_count += 1
         elif layer_config['class_name'] == 'Activation':
             if layer_config['config']['activation'] == 'relu':
                 vector[convert_gdual_to_float(vector) < 0] = 0
             elif layer_config['config']['activation'] == 'tanh':
                 vector = np.vectorize(lambda x: tanh(x))(vector)
             elif layer_config['config']['activation'] == 'softplus':
                 vector = np.vectorize(lambda x: exp(x))(vector) + 1
                 vector = np.vectorize(lambda x: log(x))(vector)
     return vector
Exemple #7
0
 def nn_predict(self, model_input):
     vector = model_input
     dense_layer_count = 0
     for layer_config in self.config:
         if layer_config['class_name'] == 'Dense':
             wgts, biases = self.weights[dense_layer_count *
                                         2:(dense_layer_count + 1) * 2]
             vector = wgts.T.dot(vector) + biases
             dense_layer_count += 1
         elif layer_config['class_name'] == 'Activation':
             if layer_config['config']['activation'] == 'relu':
                 vector[convert_gdual_to_float(vector) < 0] = 0
             elif layer_config['config']['activation'] == 'tanh':
                 vector = np.vectorize(lambda x: tanh(x))(vector)
             elif layer_config['config']['activation'] == 'softplus':
                 # avoiding overflow with exp; | log(1+exp(x)) - x | < 1e-10   for x>=30
                 floatize = lambda x: x.constant_cf if type(
                     x) == gdual_double else x
                 softplus = lambda x: x if floatize(x) > 30.0 else log(
                     exp(x) + 1)
                 # softplus = lambda x : log(exp(x)+1)
                 vector = np.vectorize(softplus)(vector)
     return vector
Exemple #8
0
 def do(self, x1, x2):
     import pyaudi as pd
     res = x2.log2()
     assert (res == pd.log(x2) / np.log(2.0))
Exemple #9
0
 def do(self, x1, x2):
     import pyaudi as pd
     res = x2.log1p()
     assert (res == pd.log(x2 + 1.0))

def __atan2_helper(x1, x2):
    return atan(x1 / x2) + (float(x2) < 0) * pi


# gdual_double Definitions
try:
    gdual_double.__float__ = lambda self: self.constant_cf
    # gdual_double.__int__  = lambda self: int(self.constant_cf)
    gdual_double.__abs__ = pyaudi.abs
    gdual_double.sqrt = pyaudi.sqrt
    gdual_double.exp = pyaudi.exp
    gdual_double.expm1 = lambda self: pyaudi.exp(self) - 1.0
    gdual_double.log = pyaudi.log
    gdual_double.log10 = lambda self: pyaudi.log(self) / pyaudi.log(
        gdual_double(10.0))
    gdual_double.log1p = lambda self: pyaudi.log(self + 1.0)
    gdual_double.log2 = lambda self: pyaudi.log(self) / pyaudi.log(
        gdual_double(2.0))
    gdual_double.cos = pyaudi.cos
    gdual_double.sin = pyaudi.sin
    gdual_double.tan = pyaudi.tan
    gdual_double.cosh = pyaudi.cosh
    gdual_double.sinh = pyaudi.sinh
    gdual_double.tanh = pyaudi.tanh
    gdual_double.arccos = pyaudi.acos
    gdual_double.arcsin = pyaudi.asin
    gdual_double.arctan = pyaudi.atan
    gdual_double.arctan2 = lambda self, x2: pyaudi.__atan2_helper(self, x2)
    gdual_double.erf = pyaudi.erf
Exemple #11
0
from pyaudi import gdual_double as gdual
from pyaudi import exp, log, cbrt

# We want to compute the Taylor expansion of a function f (and thus all derivatives) at x=2, y=3
# 1 - Define the generalized dual numbers (7 is the truncation order, i.e. the maximum order of derivation we will need)

x = gdual(2, "x", 7)
y = gdual(3, "y", 7)

# 2 - Compute your function as usual
f = exp(x * x + cbrt(y) / log(x * y))

# 3 - Inspect the results (this does not require any more computations)
print("Taylor polynomial: " +
      str(f))  # This is the Taylor expansion of f (truncated at the 7th order)
print("Derivative value [1,0]: " + str(f.get_derivative(
    [1, 0])))  # This is the value of the derivative (d / dx)
print("Derivative value [4,3]: " + str(f.get_derivative(
    [4, 3])))  # This is the value of the mixed derivative (d^7 / dx^4dy^3)

# 4 - Using the dictionary interface (note the presence of the "d" before all variables)
print("Derivative value [1,0]: " + str(f.get_derivative(
    {"dx": 1})))  # This is the value of the derivative (d / dx)
print("Derivative value [4,3]: " + str(f.get_derivative({
    "dx": 4,
    "dy": 3
})))  # This is the value of the mixed derivative (d^7 / dx^4dy^3)
Exemple #12
0
from pyaudi import gdual_double as gdual
from pyaudi import exp, log, cbrt

# We want to compute the Taylor expansion of a function f (and thus all derivatives) at x=2, y=3
# 1 - Define the generalized dual numbers (7 is the truncation order, i.e. the maximum order of derivation we will need)

x = gdual(2, "x", 7);
y = gdual(3, "y", 7);

# 2 - Compute your function as usual
f = exp(x*x + cbrt(y) / log(x*y));

# 3 - Inspect the results (this does not require any more computations)
print("Taylor polynomial: " + str(f))                          # This is the Taylor expansion of f (truncated at the 7th order)
print("Derivative value [1,0]: " + str(f.get_derivative([1,0])))     # This is the value of the derivative (d / dx)
print("Derivative value [4,3]: " + str(f.get_derivative([4,3])))     # This is the value of the mixed derivative (d^7 / dx^4dy^3)

# 4 - Using the dictionary interface (note the presence of the "d" before all variables)
print("Derivative value [1,0]: " + str(f.get_derivative({"dx":1})))     # This is the value of the derivative (d / dx)
print("Derivative value [4,3]: " + str(f.get_derivative({"dx":4, "dy":3})))     # This is the value of the mixed derivative (d^7 / dx^4dy^3)