def test_inverse_functions(self): from pyaudi import gdual_double as gdual from pyaudi import sinh, cosh, tanh from pyaudi import asinh, acosh, atanh from pyaudi import sin, cos, tan from pyaudi import asin, acos, atan x = gdual(1.1, "x",6); y = gdual(1.2, "y",6); p1 = 1. / (x + y); self.assertTrue((cos(acos(p1))-p1).is_zero(1e-12)) self.assertTrue((acos(cos(p1))-p1).is_zero(1e-12)) self.assertTrue((sin(asin(p1))-p1).is_zero(1e-12)) self.assertTrue((asin(sin(p1))-p1).is_zero(1e-12)) self.assertTrue((tan(atan(p1))-p1).is_zero(1e-12)) self.assertTrue((atan(tan(p1))-p1).is_zero(1e-12)) self.assertTrue((cosh(acosh(p1))-p1).is_zero(1e-12)) self.assertTrue((acosh(cosh(p1))-p1).is_zero(1e-12)) self.assertTrue((sinh(asinh(p1))-p1).is_zero(1e-12)) self.assertTrue((asinh(sinh(p1))-p1).is_zero(1e-12)) self.assertTrue((tanh(atanh(p1))-p1).is_zero(1e-12)) self.assertTrue((atanh(tanh(p1))-p1).is_zero(1e-12))
def test_inverse_functions(self): from pyaudi import gdual_double as gdual from pyaudi import sinh, cosh, tanh from pyaudi import asinh, acosh, atanh from pyaudi import sin, cos, tan from pyaudi import asin, acos, atan x = gdual(1.1, "x", 6) y = gdual(1.2, "y", 6) p1 = 1. / (x + y) self.assertTrue((cos(acos(p1)) - p1).is_zero(1e-12)) self.assertTrue((acos(cos(p1)) - p1).is_zero(1e-12)) self.assertTrue((sin(asin(p1)) - p1).is_zero(1e-12)) self.assertTrue((asin(sin(p1)) - p1).is_zero(1e-12)) self.assertTrue((tan(atan(p1)) - p1).is_zero(1e-12)) self.assertTrue((atan(tan(p1)) - p1).is_zero(1e-12)) self.assertTrue((cosh(acosh(p1)) - p1).is_zero(1e-12)) self.assertTrue((acosh(cosh(p1)) - p1).is_zero(1e-12)) self.assertTrue((sinh(asinh(p1)) - p1).is_zero(1e-12)) self.assertTrue((asinh(sinh(p1)) - p1).is_zero(1e-12)) self.assertTrue((tanh(atanh(p1)) - p1).is_zero(1e-12)) self.assertTrue((atanh(tanh(p1)) - p1).is_zero(1e-12))
def test_tanh(self): from pyaudi import gdual_double as gdual from pyaudi import sinh, cosh, tanh x = gdual(2.3, "x", 10) y = gdual(1.5, "y", 10) p1 = x + y self.assertTrue((tanh(p1) - sinh(p1) / cosh(p1)).is_zero(1e-12))
def test_tanh(self): from pyaudi import gdual_double as gdual from pyaudi import sinh, cosh, tanh x = gdual(2.3, "x",10); y = gdual(1.5, "y",10); p1 = x + y; self.assertTrue((tanh(p1) - sinh(p1) / cosh(p1)).is_zero(1e-12))
def some_complex_irrational_f(x, y, z): from pyaudi import exp, log, cos, sin, tan, sqrt, cbrt, cos, sin, tan, acos, asin, atan, cosh, sinh, tanh, acosh, asinh, atanh from pyaudi import abs as gd_abs from pyaudi import sin_and_cos, sinh_and_cosh f = (x + y + z) / 10. retval = exp(f) + log(f) + f**2 + sqrt(f) + cbrt(f) + cos(f) + sin(f) retval += tan(f) + acos(f) + asin(f) + atan(f) + cosh(f) + sinh(f) retval += tanh(f) + acosh(f) + asinh(f) + atanh(f) a = sin_and_cos(f) b = sinh_and_cosh(f) retval += a[0] + a[1] + b[0] + b[1] return retval
def some_complex_irrational_f(x,y,z): from pyaudi import exp, log, cos, sin, tan, sqrt, cbrt, cos, sin, tan, acos, asin, atan, cosh, sinh, tanh, acosh, asinh, atanh from pyaudi import abs as gd_abs from pyaudi import sin_and_cos, sinh_and_cosh f = (x+y+z) / 10. retval = exp(f) + log(f) + f**2 + sqrt(f) + cbrt(f) + cos(f) + sin(f) retval += tan(f) + acos(f) + asin(f) + atan(f) + cosh(f) + sinh(f) retval += tanh(f) + acosh(f) + asinh(f) + atanh(f) a = sin_and_cos(f) b = sinh_and_cosh(f) retval+=a[0]+a[1]+b[0]+b[1] return retval
def nn_predict(self, model_input): vector = model_input dense_layer_count = 0 for layer_config in self.config: if layer_config['class_name'] == 'Dense': wgts, biases = self.weights[dense_layer_count * 2:(dense_layer_count + 1) * 2] vector = wgts.T.dot(vector) + biases dense_layer_count += 1 elif layer_config['class_name'] == 'Activation': if layer_config['config']['activation'] == 'relu': vector[convert_gdual_to_float(vector) < 0] = 0 elif layer_config['config']['activation'] == 'tanh': vector = np.vectorize(lambda x: tanh(x))(vector) elif layer_config['config']['activation'] == 'softplus': vector = np.vectorize(lambda x: exp(x))(vector) + 1 vector = np.vectorize(lambda x: log(x))(vector) return vector
def nn_predict(self, model_input): vector = model_input dense_layer_count = 0 for layer_config in self.config: if layer_config['class_name'] == 'Dense': wgts, biases = self.weights[dense_layer_count * 2:(dense_layer_count + 1) * 2] vector = wgts.T.dot(vector) + biases dense_layer_count += 1 elif layer_config['class_name'] == 'Activation': if layer_config['config']['activation'] == 'relu': vector[convert_gdual_to_float(vector) < 0] = 0 elif layer_config['config']['activation'] == 'tanh': vector = np.vectorize(lambda x: tanh(x))(vector) elif layer_config['config']['activation'] == 'softplus': # avoiding overflow with exp; | log(1+exp(x)) - x | < 1e-10 for x>=30 floatize = lambda x: x.constant_cf if type( x) == gdual_double else x softplus = lambda x: x if floatize(x) > 30.0 else log( exp(x) + 1) # softplus = lambda x : log(exp(x)+1) vector = np.vectorize(softplus)(vector) return vector
def do(self, x1, x2): import pyaudi as pd res = x1.tanh() assert (res == pd.tanh(x1))