def test_sub_vector_results():
    x = AutoDiff([3, 1], [2, 1], 2, 1)
    y = AutoDiff([2, -3], [3, 2], 2, 2)
    f = x - y
    assert np.all(f.val == np.array([[1], [4]]))
    assert np.all(f.der == np.array([[2, -3], [1, -2]]))
    assert np.all(f.jacobian == np.array([[1, -1], [1, -1]]))
def test_tan_ad_results():
	# Defined value and derivative when cos(val)!=0
	# Positive reals
	x = AutoDiff(0.5, 2.0)
	f = ef.tan(x)
	assert f.val == np.array([[np.tan(0.5)]])
	assert f.der == np.array([[2.0 / (np.cos(0.5)**2)]])
	assert f.jacobian == np.array([[1.0 / (np.cos(0.5)**2)]])
	# Negative reals
	y = AutoDiff(-0.5, 2.0)
	f = ef.tan(y)
	assert f.val == np.array([[np.tan(-0.5)]])
	assert f.der == np.array([[2.0 / (np.cos(-0.5)**2)]])
	assert f.jacobian == np.array([[1.0 / (np.cos(-0.5)**2)]])
	# Zero
	z = AutoDiff(0.0, 2.0)
	f = ef.tan(z)
	assert f.val == np.array([[np.tan(0)]])
	assert f.der == np.array([[2.0]])
	assert f.jacobian == np.array([[1.0]])

	# Undefined value and derivative when cos(val)==0
	with pytest.warns(RuntimeWarning):
		h = AutoDiff(np.pi/2, 1.0)
		f = ef.tan(h)
		assert np.isnan(f.val)
		assert np.isnan(f.der)
		assert np.isnan(f.jacobian)
def test_arctanh_ad_results():
	# value defined at real numbers (-1, 1)
	x = AutoDiff(0.5, 2)
	f = ef.arctanh(x)
	assert f.val == np.arctanh(0.5)
	assert f.der == np.array([[((2)/(1-(0.5)**2))]])
	assert f.jacobian == np.array([[((1)/(1-(0.5)**2))]])
	
	y = AutoDiff(-0.99999, 2)
	f = ef.arctanh(y)
	assert f.val == np.arctanh(-0.99999)
	assert f.der == np.array([[((2)/(1-(-0.99999)**2))]])
	assert f.jacobian == np.array([[((1)/(1-(-0.99999)**2))]])
	# test for real numbers not in (-1, 1)
	with pytest.warns(RuntimeWarning):
		z = AutoDiff(-1, 2)
		f = ef.arctanh(z)
		assert np.isinf(f.val)
		assert np.isinf(f.der)
		assert np.isinf(f.jacobian)
	with pytest.warns(RuntimeWarning):
		z = AutoDiff(10, 2)
		f = ef.arctanh(z)
		assert np.isnan(f.val)
		assert f.der == np.array([[((2)/(1-(10)**2))]])
		assert f.jacobian == np.array([[((1)/(1-(10)**2))]])
def test_arccos_ad_results():
	# positive real numbers
	x = AutoDiff(0.5, 2)
	f = ef.arccos(x)
	assert f.val == np.array([[np.arccos(0.5)]])
	assert f.der == np.array([[-2/np.sqrt(1-0.5**2)]])
	assert f.jacobian == np.array([[-1/np.sqrt(1-0.5**2)]])

	# out of bounds - negative sqrt
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(-2, 2)
		f = ef.arccos(y)
		assert np.isnan(f.val[0][0])
		assert np.isnan(f.der[0][0])
		assert np.isnan(f.jacobian[0][0])

	# out of bounds - divide by 0
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(1, 2)
		f = ef.arccos(y)
		assert f.val == np.array([[np.arccos(1)]])
		assert np.isneginf(f.der[0][0])
		assert np.isneginf(f.jacobian[0][0])

	# zero
	z = AutoDiff(0, 2)
	f = ef.arccos(z)
	assert f.val == np.array([[np.arccos(0)]])
	assert f.der == np.array([[-2/np.sqrt(1-0**2)]])
	assert f.jacobian == np.array([[-1/np.sqrt(1-0**2)]])
def test_arcsin_ad_results():
	# positive real numbers
	x = AutoDiff(0.5, 2)
	f = ef.arcsin(x)
	assert f.val == np.array([[np.arcsin(0.5)]])
	assert f.der == np.array([[2/np.sqrt(1-0.5**2)]])
	assert f.jacobian == np.array([[1/np.sqrt(1-0.5**2)]])

	# out of bounds - undefined sqrt
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(-2, 2)
		f = ef.arcsin(y)
		assert np.isnan(f.val[0][0])
		assert np.isnan(f.der[0][0])
		assert np.isnan(f.jacobian[0][0])

	# out of bounds - div by zero
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(1, 2)
		f = ef.arcsin(y)
		assert f.val == np.array([[np.arcsin(1)]])
		assert np.isinf(f.der[0][0])
		assert np.isinf(f.jacobian[0][0])

	# zero
	z = AutoDiff(0, 2)
	f = ef.arcsin(z)
	assert f.val == np.array([[0.0]])
	assert f.der == np.array([[2.0]])
	assert f.jacobian == np.array([[1.0]])
def test_mul_ad_results():
    # single input case
    # positive numbers
    x = AutoDiff(5, 2)
    f = x * x
    assert f.val == 25
    assert f.der == 20
    assert f.jacobian == 10
    # negative numbers
    x = AutoDiff(-5, 2)
    f = x * x
    assert f.val == 25
    assert f.der == -20
    assert f.jacobian == -10
def test_sub_ad_results():
    # single input cases
    # positive numbers
    x = AutoDiff(5, 2)
    f = x - x
    assert f.val == 0
    assert f.der == 0
    assert f.jacobian == 0
    # negative numbers
    y = AutoDiff(-5, 2)
    f = y - y
    assert f.val == 0
    assert f.der == 0
    assert f.jacobian == 0
示例#8
0
def logistic(x):
	''' Compute logistic function for AutoDiff or Dual object.
	
	INPUTS
	======
	x: an AutoDiff object or Dual object
	
	RETURNS
	=======
	A new AutoDiff or Dual object with calculated value and derivative.
	
	
	'''
	try:
		f_l = (1/(1+np.exp(-x.val)))
		new_val = f_l
		new_der = (1 - f_l)*f_l*x.der
		new_jacobian = (1 - f_l)*f_l*x.jacobian
		return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
	except AttributeError:
		try:
			f_l = (1/(1 + np.exp(-x.Real)))
			return Dual(f_l, (1 - f_l)*f_l*x.Dual)		
		except AttributeError:
			try:
				return Dual(logistic(x.Real), (1 - logistic(x.Real))*logistic(x.Real)*x.Dual)
			except AttributeError:
			# Constant
				return_val = (1/(1+np.exp(-x)))
				return return_val
示例#9
0
def tanh(X):
	''' Compute the tanh of an AutoDiff object and its derivative.
	INPUTS
	======
	X: an AutoDiff object
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	EXAMPLES
	========
	>>> X = AutoDiff(0.5, 2, 1)
	>>> tanhAutoDiff = tanh(X)
	>>> tanhAutoDiff.val
	0.46211715726000974
	>>> tanhAutoDiff.der
	1.572895465931855
	>>>tanhAutoDiff.jacobian
	0.7864477329659275
	'''
	try:
		val = np.tanh(X.val)
		der = 1/(np.cosh(X.val)**2)*X.der
		jacobian = 1/(np.cosh(X.val)**2)*X.jacobian
		return AutoDiff(val, der, X.n, 0, jacobian)
	except AttributeError:
		try:
			return Dual(np.tanh(X.Real), X.Dual/(np.cosh(X.Real)**2))		
		except AttributeError:
			try:
				X.Real
				return sinh(X)/cosh(X)
			except AttributeError:
			# Constant
				return_val = np.tanh(X)
				return return_val
示例#10
0
def cosh(X):
	''' Compute the cosh of an AutoDiff object and its derivative.
	INPUTS
	======
	X: an AutoDiff object
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	EXAMPLES
	========
	>>> X = AutoDiff(0.5, 2, 1)
	>>> coshAutoDiff = cosh(X)
	>>> coshAutoDiff.val
	1.1276259652063807
	>>> coshAutoDiff.der
	1.0421906109874948
	>>> coshAutoDiff.jacobian
	0.5210953054937474
	'''
	try:
		val = np.cosh(X.val)
		der = np.sinh(X.val)*X.der
		jacobian = np.sinh(X.val)*X.jacobian
		return AutoDiff(val, der, X.n, 0, jacobian)
	except AttributeError:
		try:
			return Dual(np.cosh(X.Real), X.Dual*np.sinh(X.Real))		
		except AttributeError:
			try:
				return Dual(cosh(X.Real), X.Dual*sinh(X.Real))
			except AttributeError:
			# Constant
				return_val = np.cosh(X)
				return return_val
示例#11
0
def sinh(X):
	''' Compute the sinh of an AutoDiff object and its derivative.
	INPUTS
	======
	X: an AutoDiff object
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	EXAMPLES
	========
	>>> X = AutoDiff(0.5, 2, 1)
	>>> sinhAutoDiff = sinh(X)
	>>> sinhAutoDiff.val
	0.5210953054937474
	>>> sinhAutoDiff.der
	2.2552519304127614
	>>> sinhAutoDiff.jacobian
	1.1276259652063807
	'''
	try:
		val = np.sinh(X.val)
		der = np.cosh(X.val)*X.der
		jacobian = np.cosh(X.val)*X.jacobian
		return AutoDiff(val, der, X.n, 0, jacobian)
	except AttributeError:
		try:
			return Dual(np.sinh(X.Real), X.Dual*np.cosh(X.Real))		
		except AttributeError:
			try:
				return Dual(sinh(X.Real), X.Dual*cosh(X.Real))
			except AttributeError:
			# Constant
				return_val = np.sinh(X)
				return return_val