def test_basic_der(): # Decorator function maker that can be used to create function variables def fm(f): def fun(x): return f(x) return fun value = 0.5 assert gd.trace(fm(sin), value) == np.cos(value) assert gd.trace(fm(cos), value) == -np.sin(value) assert gd.trace(fm(tan), value) == 1 / (np.cos(value) * np.cos(value))
def test_basic_reverse(): # Decorator function maker that can be used to create function variables def fm(f): def fun(x): return f(x) return fun value = 0.5 assert gd.trace(fm(sin), value, mode='reverse') == np.cos(value) assert gd.trace(fm(cos), value, mode='reverse') == -np.sin(value) assert gd.trace(fm(tan), value, mode='reverse') == 1 / (np.cos(value) * np.cos(value)) with pytest.raises(ValueError): gd.trace(fm(sin), value, mode='test')
def find_decreasing(function, xmin, xmax, n_pts=100, verbose=False): ''' Locate the region in a given interval where function is decreasing. Inputs: function[function]: the function that you want to locate decreasing regions on (one scalar input) xmin: lower bound of interval xmax: upper bound of interval n_pts (default = 100): how many points to use when evaluating derivative (more = better resolution but slower) Outputs: xs: the x values where the function is decreasing ys: the y values where the function is decreasing ''' xs = np.linspace(xmin, xmax, num=n_pts) # derivative comes as a matrix since we are passing in a vectorized input to a single-variable function # the only entries in the derivative matrix are therefore the diagonals, since it is a single-variable function f_ = gd.trace(function, xs, verbose=verbose) y_ders = np.array([f_[i, i] for i in range(n_pts)]) idx = np.where(y_ders < 0)[0].astype(int) if len(idx) == 0: print(f'No decreasing values located in the interval {xmin} to {xmax}') return None return xs[idx], y_ders[idx]
def test_RMtoR(): def f(v): return v[0] + exp(v[1]) + 6 * v[2]**2 x = gd.trace(f, [1, 2, 4], verbose=True) assert x[0][0] == 1.0 assert x[0][1] == pytest.approx(np.exp(2)) assert x[0][2] == 48.0
def test_composite_reverse(): def f(x): return cos(x) * tan(x) + exp(x) value = 0.5 der = gd.trace(f, value, mode='reverse') assert der[0] == -1 * np.sin(value) * np.tan(value) + 1 / np.cos( value) + np.exp(value)
def test_composition_der(): def f(x): return cos(x) * tan(x) + exp(x) value = 0.5 der = gd.trace(f, value) assert der[0] == -1 * np.sin(value) * np.tan(value) + 1 / np.cos( value) + np.exp(value)
def hessian_demo(): seed1 = [1, 2, 3, 4] def f1(x, y, z, w): return 2 * x * y + w * z / y f_, f__ = gd.trace(f1, seed1, return_second_deriv=True, verbose=True) print(f_) print(f__)
def test_RMtoRN(): def f(v): return [v[0] + v[1], v[1] - v[2], cos(v[2]), exp(v[3]) * sin(v[2])] x = gd.trace(f, [1, 2, 3, 4]) assert x[0][0] == 1.0 assert x[0][1] == 1.0 assert x[1][1] == 1.0 assert x[1][2] == -1.0 assert x[2][2] == pytest.approx(-0.14112001) assert x[3][2] == pytest.approx(-54.05175886) assert x[3][3] == pytest.approx(7.70489137)
def test_multiple_inputs(): seed1 = [1, 2, 3, 4] def f1(x, y, z, w): return 2 * x * y + w * z / y x1_, x1__ = gd.trace(f1, seed1, return_second_deriv=True) assert x1_[0][0] == pytest.approx(4) assert x1_[0][1] == pytest.approx(-1) assert x1__[1][1] == pytest.approx(3) assert x1__[3][2] == pytest.approx(0.5) seed2 = [np.pi] * 4 def trig(w, x, y, z): return sin(x) + cos(z) * y + sin(w) x2_, x2__ = gd.trace(trig, seed2, return_second_deriv=True) assert x2_[0][1] == pytest.approx(-1) assert x2_[0][3] == pytest.approx(0) assert x2__[2][2] == pytest.approx(0) assert x2__[3][3] == pytest.approx(np.pi)
def find_extrema_firstorder(function, xmin, xmax, n_pts=100, tolerance=1e-10, verbose=False): ''' Locate the point where the derivative is closest to zero on the given interval. Inputs: function[function]: the function of which you'd like to plot the derivative. (one scalar input) xmin: lower bound on which to calculate derivative xmax: upper bound on which to calculate derivative n_pts [int](Default: 100): how many points to use when evaluating derivative (more = better resolution but slower) tolerance [float](Default: 1e-10): how close to zero should a value be before it's considered an extrema? Outputs: If it can locate your extrema exactly, it will return only the x value(s) of the extrema. If not it will return: xs: a tuple containing the two x values between which the extrema is located ''' xs = np.linspace(xmin, xmax, num=n_pts) # derivative comes as a matrix since we are passing in a vectorized input to a single-variable function # the only entries in the derivative matrix are therefore the diagonals, since it is a single-variable function f_ = gd.trace(function, xs, verbose=verbose) y_ders = np.array([f_[i, i] for i in range(n_pts)]) zeroidx = np.where(np.abs(y_ders) < tolerance)[0].astype(int) if len(zeroidx) != 0: return xs[zeroidx] else: decreasingIDX = np.where(y_ders < tolerance)[0].astype(int) increasingIDX = np.where(y_ders > tolerance)[0].astype(int) if len(decreasingIDX) == 0 or len(increasingIDX) == 0: print(f'No extrema located in the interval {xmin} to {xmax}.') return None if decreasingIDX[-1] == increasingIDX[ 0] - 1: # Function goes from decreasing -> increasing print( f'Extrema located between x={xs[decreasingIDX[-1]]} and {xs[increasingIDX[0]]}' ) return (xs[decreasingIDX[-1]], xs[increasingIDX[0]]) elif increasingIDX[ -1] == decreasingIDX[0] - 1: #Function goes from inc -> dec print( f'Extrema located between x={xs[increasingIDX[-1]]} and {xs[decreasingIDX[0]]}' ) return (xs[increasingIDX[-1]], xs[decreasingIDX[0]])
def test_basic(): def poly(x): return 2 * x**4 def trig(x): return sin(x) x1_, x1__ = gd.trace(poly, 2, return_second_deriv=True) assert x1_[0][0] == pytest.approx(64) assert x1__[0][0] == pytest.approx(96) x2_, x2__ = gd.trace(trig, np.pi, return_second_deriv=True) assert x2_[0][0] == pytest.approx(-1) assert x2__[0][0] == pytest.approx(0) non_scalar = [0, np.pi / 2, np.pi, 3 * np.pi / 2, 2 * np.pi] with pytest.raises(ValueError): x1_, x1__ = gd.trace(trig, non_scalar, return_second_deriv=True) with pytest.raises(ValueError): x1_, x1__ = gd.trace(trig, non_scalar, return_second_deriv=True, mode='forward') x3_, x3__ = gd.trace(poly, 2, return_second_deriv=True, verbose=True) assert x3_[0][0] == pytest.approx(64) assert x3__[0][0] == pytest.approx(96)
def plot_with_tangent_line(function, xtangent, xmin, xmax, n_pts=100, figsize=(6, 6), xlabel='x', ylabel='y', plotTitle='Function with tangent line', verbose=False): ''' Plot the a function between xmin and xmax, with a tangent line at xtangent, using n_pts linearly spaced points to evaluate it. Inputs: function[function]: the function you'd like to plot. xtangent: value at which you want the tangent line to intersect the function xmin: lower bound on which to calculate derivative xmax: upper bound on which to calculate derivative n_pts [int](Default: 100): how many points to use when plotting function (more = better resolution but slower) figsize [tuple](Default: (6,6)): figsize in inches (see matplotlib documentation) xlabel [string](Default: 'x'): Label for x axis of plot ylabel [string](Default: 'y'): Label for y axis of plot plotTitle [string](Default: 'Derivative'): Label for title of plot Outputs: xs: the array of linearly spaced x values between xmin and xmax ys: the derivative evaluated at the values in xs ''' deriv = gd.trace(function, xtangent, verbose=verbose) xs = np.linspace(xmin, xmax, num=n_pts) values = function(xs) ytangent = function(xtangent) plt.figure(figsize=figsize) derivativevalue = deriv[0, 0] print( f'At the point x={xtangent}, the function has a slope of {derivativevalue}' ) plt.plot(xs, values) plt.plot(xs, derivativevalue * (xs - xtangent) + ytangent) plt.title(plotTitle) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xlim(xmin, xmax) plt.show() return xs, values
def plot_derivative(function, xmin, xmax, n_pts=100, figsize=(6, 6), xlabel='x', ylabel='y', plotTitle='Derivative', verbose=False): ''' Plot the derivative of a function between xmin and xmax, using n_pts linearly spaced points to evaluate it. Inputs: function[function]: the function of which you'd like to plot the derivative (must only take single input) xmin: lower bound on which to calculate derivative xmax: upper bound on which to calculate derivative n_pts [int](Default: 100): how many points to use when evaluating derivative (more = better resolution but slower) figsize [tuple](Default: (6,6)): figsize in inches (see matplotlib documentation) xlabel [string](Default: 'x'): Label for x axis of plot ylabel [string](Default: 'y'): Label for y axis of plot plotTitle [string](Default: 'Derivative'): Label for title of plot ''' xs = np.linspace(xmin, xmax, num=n_pts) # derivative comes as a matrix since we are passing in a vectorized input to a single-variable function # the only entries in the derivative matrix are therefore the diagonals, since it is a single-variable function f_ = gd.trace(function, xs, verbose=verbose) y_ders = [f_[i, i] for i in range(n_pts)] plt.figure(figsize=figsize) plt.plot(xs, y_ders, color='red', label='f\'(x)') plt.title(plotTitle) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xlim(xmin, xmax) plt.legend() plt.show() return xs, y_ders
def run_demos(): for i, f in enumerate(fs): print('demo', i) f_ = gd.trace(f, seeds[i]) print(f_) print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')