def test_lagfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) assert_raises(TypeError, lag.lagfit, [], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = lag.lagfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) # coef4 = lag.lagfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) # coef2d = lag.lagfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = lag.lagfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def mainPricing(I, M, df): S = GenS(I, M) # generate stock price paths h = IV(S) # inner value matrix V = IV(S) # value matrix for t in range(M - 1, -1, -1): # rg = polyfit(S[t,:], V[t+1,:]*df, reg) # regression at time t rg = a.lagfit(S[t, :], V[t + 1, :] * df, reg) C = a.lagval(S[t, :], rg, True) # C = polyval(rg, S[t, :]) ##C = polyval(rg,S[t,:]) # continuation values V[t, :] = where(h[t, :] > C, h[t, :], V[t + 1, :] * df) # exercise decision V0 = sum(V[0, :]) / I # LSM estimator return V0
def lagfit(xs, ys, deg): coeffs = lag.lagfit(xs, ys, deg) p = lag.Laguerre(coeffs) return mkseries(xs, ys, p)
def test_lagfit(self): def f(x): return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) assert_raises(TypeError, lag.lagfit, [], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) assert_raises(TypeError, lag.lagfit, [1], [1], []) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = lag.lagfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) # coef4 = lag.lagfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) # coef2d = lag.lagfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) # test scaling with complex values x points whose square # is zero when summed. x = [1, 1j, -1, -1j] assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
for m in range(1, 10): # Simulation Parameters I = 25000 M = 50 dt=T/M df = exp(-r * dt) # Stock Price Paths S = S0 * np.exp(np.cumsum((r - 0.5 * sigma ** 2) * dt + sigma * math.sqrt(dt) * np.random.standard_normal((M + 1, I)), axis=0)) S[0] = S0 # Inner Values h = np.maximum(K - S, 0) # Present Value Vector (Initialization) V = h[-1] # American Option Valuation by Backwards Induction for t in xrange(M - 1, 0, -1): rg= laguerre.lagfit(S[t], V * df, m) C = laguerre.lagval(S[t], rg)# continuation values V = np.where(h[t] > C, h[t], V * df) # exercise decision V0 = df*np.sum(V)/I #LSMestimator error.append(abs(V0 - Vbsm)) print error plt.xlabel('Order of Polynomial for LS regression') plt.ylabel('Errors') plt.plot(range(1, 10), error) plt.show()
def test_lagfit(self): def f(x): return x * (x - 1) * (x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) assert_raises(TypeError, lag.lagfit, [], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) # Test fit x = np.linspace(0, 2) y = f(x) # coef3 = lag.lagfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) # coef4 = lag.lagfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) # coef2d = lag.lagfit(x, np.array([y, y]).T, 3) assert_almost_equal(coef2d, np.array([coef3, coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) #test NA y = f(x) y[10] = 100 xm = x.view(maskna=1) xm[10] = np.NA res = lag.lagfit(xm, y, 3) assert_almost_equal(res, coef3) ym = y.view(maskna=1) ym[10] = np.NA res = lag.lagfit(x, ym, 3) assert_almost_equal(res, coef3) y2 = np.vstack((y, y)).T y2[10, 0] = 100 y2[15, 1] = 100 y2m = y2.view(maskna=1) y2m[10, 0] = np.NA y2m[15, 1] = np.NA res = lag.lagfit(x, y2m, 3).T assert_almost_equal(res[0], coef3) assert_almost_equal(res[1], coef3) wm = np.ones_like(x, maskna=1) wm[10] = np.NA res = lag.lagfit(x, y, 3, w=wm) assert_almost_equal(res, coef3)
def apply_fitting_fields(plotgui): """ Apply the set fitting parameters from the window. This routine reads the fitting parameters from the window and then applies the fitting. Each time it is called a new data set should be generated, unless the fit order is too large for the number of points. The fit order must be at most 1 less than the number of points, otherwise the routine just shows an error message pop-up and returns. Parameters ---------- plotgui: by assumption a matplotlib_user_interface object Returns ------- None """ fit_type = plotgui.set_fitting_fields[0].get() if 'Cubic Spline' in fit_type: fit_order = float(plotgui.set_fitting_fields[1].get()) else: try: fit_order = int(plotgui.set_fitting_fields[1].get()) except ValueError: str1 = 'Error: bad fit order (%s). Settng to 4.' % ( plotgui.set_fitting_fields[1].get()) plotgui.fit_text.insert(Tk.END, str1) plotgui.fit_text.see(Tk.END) fit_order = 4 set_number = plotgui.set_fitting_list_area.current() fit_flag = plotgui.fit_option.get() if fit_flag == 0: xvalues = numpy.copy(plotgui.xdata[set_number]['values']) yvalues = numpy.copy(plotgui.ydata[set_number]['values']) else: xvalues = numpy.copy(plotgui.ydata[set_number]['values']) yvalues = numpy.copy(plotgui.xdata[set_number]['values']) inds = numpy.argsort(xvalues) xvalues = xvalues[inds] yvalues = yvalues[inds] npoints = len(xvalues) if ('Spline' not in fit_type) and ('Internal' not in fit_type): if npoints + 1 <= fit_order: tkinter.messagebox.showinfo( 'Error', 'The number of points is too few for the fit order.' + ' Please check your inputs.') return xmin = numpy.min(xvalues) xmax = numpy.max(xvalues) delx = xmax - xmin xstep = 1.2 * delx / 1201. xout = numpy.arange(xmin - delx / 10., xmax + delx / 10., xstep) if 'Internal' in fit_type: if npoints < 2: tkinter.messagebox.showinfo( 'Error', 'The number of points is too few for a linear fit.' + ' Please check your inputs.') return if fit_flag == 0: yerrors = (plotgui.ydata[set_number]['lowerror'] + plotgui.ydata[set_number]['higherror']) / 2. else: yerrors = (plotgui.xdata[set_number]['lowerror'] + plotgui.xdata[set_number]['higherror']) / 2. if (numpy.min(yerrors) == 0.) and (numpy.max(yerrors) == 0.): yerrors = yerrors + 1. slope, intercept, slope_error, intercept_error, covariance, \ correlation = general_utilities.slope_calculation( xvalues, yvalues, yerrors) if slope is None: tkinter.messagebox.showinfo('Error', 'Error in the standard slope fit.\n') return yfit = intercept + xvalues * slope yout = intercept + xout * slope errorterm1 = xout * 0. + intercept_error errorterm2 = xout * slope_error youterror = numpy.sqrt(errorterm1 * errorterm1 + errorterm2 * errorterm2) labelstring = 'Standard linear fit' rms = numpy.sqrt(numpy.mean((yvalues - yfit) * (yvalues - yfit))) str1 = 'Regression calculation results:\n' str1 = str1 + 'Slope: %g +/- %g\n' % (slope, slope_error) str1 = str1 + 'Intercept: %g +/- %g\n' % (intercept, intercept_error) str1 = str1 + 'Covariance: %f\n' % (covariance) str1 = str1 + 'Correlation: %f\n' % (correlation) str1 = str1 + 'RMS deviation: %f\n' % (rms) tkinter.messagebox.showinfo('Information', str1) outfile = open('fit_values.txt', 'a') print(str1, file=outfile) print(' ', file=outfile) outfile.close() if fit_type == 'Polynomial': fitpars = polynomial.polyfit(xvalues, yvalues, fit_order) yout = polynomial.polyval(xout, fitpars) yfit = polynomial.polyval(xvalues, fitpars) labelstring = 'Order %d polynomial fit' % (fit_order) general_utilities.list_polynomial_fitpars(fit_type, fit_order, fitpars) if fit_type == 'Legendre': fitpars = legendre.legfit(xvalues, yvalues, fit_order) yout = legendre.legval(xout, fitpars) yfit = legendre.legval(xvalues, fitpars) labelstring = 'Order %d Legendre polynomial fit' % (fit_order) general_utilities.list_polynomial_fitpars(fit_type, fit_order, fitpars) if fit_type == 'Laguerre': fitpars = laguerre.lagfit(xvalues, yvalues, fit_order) yout = laguerre.lagval(xout, fitpars) yfit = laguerre.lagval(xvalues, fitpars) labelstring = 'Order %d Laguerre polynomial fit' % (fit_order) general_utilities.list_polynomial_fitpars(fit_type, fit_order, fitpars) if fit_type == 'Chebyshev': fitpars = chebyshev.chebfit(xvalues, yvalues, fit_order) yout = chebyshev.chebval(xout, fitpars) yfit = chebyshev.chebval(xvalues, fitpars) labelstring = 'Order %d Chebyshev polynomial fit' % (fit_order) general_utilities.list_polynomial_fitpars(fit_type, fit_order, fitpars) if fit_type == 'Least-Squares Spline': if fit_flag == 0: yerrors = (plotgui.ydata[set_number]['lowerror'] + plotgui.ydata[set_number]['higherror']) / 2. else: yerrors = (plotgui.xdata[set_number]['lowerror'] + plotgui.xdata[set_number]['higherror']) / 2. if (numpy.min(yerrors) == 0.) and (numpy.max(yerrors) == 0.): yerrors = yerrors + 1. xmin1 = numpy.min(xvalues) xmax1 = numpy.max(xvalues) xrange = xmax1 - xmin1 nknots = int(fit_order) if (nknots < 3) or (nknots > int(len(xvalues) / 2)): nknots = 3 xstep = xrange / (nknots - 2) xknots = numpy.arange( numpy.min(xvalues) + xstep, numpy.max(xvalues) * 0.999999999, xstep) k = 3 # Use cubic splines knotedges = numpy.r_[(xmin, ) * (k + 1), xknots, (xmax, ) * (k + 1)] weights = 1. / yerrors weights[yerrors == 0.] = 0. fitobject = make_lsq_spline(xvalues, yvalues, knotedges, k, w=weights) yout = fitobject(xout) yfit = fitobject(xvalues) labelstring = 'Least squares spline fit, sections = %d' % (nknots) if fit_type == 'Spline': fitpars = UnivariateSpline(xvalues, yvalues, k=1, s=None, bbox=[xmin - delx, xmax + delx]) labelstring = 'Default spline fit' yout = fitpars(xout) yfit = fitpars(xvalues) if fit_type == 'Cubic Spline': if fit_order < 0.: str1 = 'Error: smoothing value %f (< 0) is not allowed.'\ + ' Settng to 0.0' % (fit_order) plotgui.fit_text.insert(Tk.END, str1) plotgui.fit_text.see(Tk.END) fit_order = 0.0 fitpars = UnivariateSpline(xvalues, yvalues, k=3, bbox=[xmin - delx, xmax + delx], s=fit_order) yout = fitpars(xout) yfit = fitpars(xvalues) labelstring = 'Cubic spline fit, smoothing = %f' % (fit_order) rms = fit_statistics(yvalues, yfit) if 'Internal' in fit_type: if fit_flag == 0: xlowerror = youterror * 0. xhigherror = youterror * 0. ylowerror = youterror yhigherror = youterror else: xlowerror = youterror xhigherror = youterror ylowerror = youterror * 0. yhigherror = youterror * 0. else: xlowerror = xout * 0. xhigherror = xout * 0. ylowerror = yout * 0. yhigherror = yout * 0. xmin = numpy.min(xout) xmax = numpy.max(xout) ymin = numpy.min(yfit) ymax = numpy.max(yfit) if rms is not None: str1 = 'Fit: RMS = %g for %d points\n' % (rms, len(yfit)) plotgui.fit_text.insert(Tk.END, str1) plotgui.fit_text.see(Tk.END) if fit_flag == 0: plotgui.xdata[plotgui.nsets] = { 'values': xout, 'lowerror': xlowerror, 'higherror': xhigherror, 'minimum': xmin, 'maximum': xmax, 'errors': False, 'legend': True } plotgui.ydata[plotgui.nsets] = { 'values': yout, 'lowerror': ylowerror, 'higherror': yhigherror, 'minimum': ymin, 'maximum': ymax, 'errors': True, 'legend': True } else: plotgui.xdata[plotgui.nsets] = { 'values': yout, 'lowerror': ylowerror, 'higherror': yhigherror, 'minimum': ymin, 'maximum': ymax, 'errors': False, 'legend': True } plotgui.ydata[plotgui.nsets] = { 'values': xout, 'lowerror': xlowerror, 'higherror': xhigherror, 'minimum': xmin, 'maximum': xmax, 'errors': False, 'legend': True } m = plotgui.nsets % 10 n = int(math.floor(plotgui.nsets / 10)) plotgui.set_properties[plotgui.nsets]['symbol'] = None plotgui.set_properties[plotgui.nsets]['linestyle'] = '-' plotgui.set_properties[plotgui.nsets]['colour'] = plotgui.colourset[m] plotgui.set_properties[plotgui.nsets]['symbolsize'] = 4.0 + 0.3 * n plotgui.set_properties[plotgui.nsets]['label'] = labelstring plotgui.nsets = plotgui.nsets + 1 make_plot.make_plot(plotgui)
import matplotlib.pyplot as plt import numpy as np import pandas as pd from numpy.polynomial.laguerre import lagfit, lagval import random random.seed(1) x = np.linspace(10, 100, 30) y = random.sample(range(10, 100), 30) y = np.ma.masked_less_equal(y, 50) p = lagfit(x, y, 3) print(p) c = lagval(x, p) plt.scatter(x, y, label="actual") plt.plot(x, c, label="fit") plt.legend() plt.show()
from numpy.polynomial.laguerre import lagfit, lagval from numpy.polynomial.legendre import legval, legfit import numpy as np import matplotlib.pyplot as plt N = 100 X = np.linspace(0, N, N) Y = np.sin(0.3 * X) Y = np.sin(0.1 * (X + np.random.normal(0, 0.1, size=100))) regression = lagfit(X, Y, 4) regression1 = legfit(X, Y, 4) fit = lagval(X, regression) fit1 = legval(X, regression1) print(fit - fit1) plt.plot(X, Y, linewidth=1, alpha=1, label="Actual") plt.plot(X, fit, linewidth=0.5, alpha=1, label="Laguerre") plt.plot(X, fit1, linewidth=0.5, alpha=1, label="Legendre") plt.legend() plt.show()
df = exp(-r * dt) # Stock Price Paths S = S0 * np.exp( np.cumsum((r - 0.5 * sigma**2) * dt + sigma * math.sqrt(dt) * np.random.standard_normal( (M + 1, I)), axis=0)) S[0] = S0 # Inner Values h = np.maximum(K - S, 0) # Present Value Vector (Initialization) V = h[-1] # American Option Valuation by Backwards Induction for t in xrange(M - 1, 0, -1): rg = laguerre.lagfit(S[t], V * df, m) C = laguerre.lagval( S[t], rg) # continuation values V = np.where(h[t] > C, h[t], V * df) # exercise decision V0 = df * np.sum(V) / I #LSMestimator error.append(abs(V0 - Vbsm)) print error plt.xlabel('Order of Polynomial for LS regression') plt.ylabel('Errors') plt.plot(range(1, 10), error) plt.show()
def test_lagfit(self) : def f(x) : return x*(x - 1)*(x - 2) # Test exceptions assert_raises(ValueError, lag.lagfit, [1], [1], -1) assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) assert_raises(TypeError, lag.lagfit, [], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1,1]) # Test fit x = np.linspace(0,2) y = f(x) # coef3 = lag.lagfit(x, y, 3) assert_equal(len(coef3), 4) assert_almost_equal(lag.lagval(x, coef3), y) # coef4 = lag.lagfit(x, y, 4) assert_equal(len(coef4), 5) assert_almost_equal(lag.lagval(x, coef4), y) # coef2d = lag.lagfit(x, np.array([y,y]).T, 3) assert_almost_equal(coef2d, np.array([coef3,coef3]).T) # test weighting w = np.zeros_like(x) yw = y.copy() w[1::2] = 1 y[0::2] = 0 wcoef3 = lag.lagfit(x, yw, 3, w=w) assert_almost_equal(wcoef3, coef3) # wcoef2d = lag.lagfit(x, np.array([yw,yw]).T, 3, w=w) assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T) #test NA y = f(x) y[10] = 100 xm = x.view(maskna=1) xm[10] = np.NA res = lag.lagfit(xm, y, 3) assert_almost_equal(res, coef3) ym = y.view(maskna=1) ym[10] = np.NA res = lag.lagfit(x, ym, 3) assert_almost_equal(res, coef3) y2 = np.vstack((y,y)).T y2[10,0] = 100 y2[15,1] = 100 y2m = y2.view(maskna=1) y2m[10,0] = np.NA y2m[15,1] = np.NA res = lag.lagfit(x, y2m, 3).T assert_almost_equal(res[0], coef3) assert_almost_equal(res[1], coef3) wm = np.ones_like(x, maskna=1) wm[10] = np.NA res = lag.lagfit(x, y, 3, w=wm) assert_almost_equal(res, coef3)
def LSMC_Laguerre(price_matrix, K, r, paths, T, dt, type, polydegree): from numpy.polynomial.laguerre import lagfit, lagval # start timer tic = time.time() # total number of steps N = T * dt N = int(N) # adjust yearly discount factor r = (1 + r)**(1 / dt) - 1 # cash flow matrix cf_matrix = np.zeros((N + 1, paths * 2)) # calculated cf when executed in time T (cfs European option) cf_matrix[N] = payoff_executing(K, price_matrix[N], type) # 1 if in the money, otherwise 0 execute = np.where(payoff_executing(K, price_matrix, type) > 0, 1, 0) # execute = np.ones_like(execute) # use to convert to consider all paths for t in range(1, N): # discounted cf 1 time period discounted_cf = cf_matrix[N - t + 1] * np.exp(-r) # slice matrix and make all out of the money paths = 0 by multiplying with matrix "execute" X = price_matrix[N - t, :] * execute[N - t, :] # +1 here because otherwise will loose an in the money path at T-t, # that is out of the money in T-t+1(and thus has payoff=0) Y = (discounted_cf + 1) * execute[N - t, :] # mask all zero values(out of the money paths) and run regression #X1 = np.ma.masked_less_equal(X, 0) #Y1 = np.ma.masked_less_equal(Y, 0) - 1 X1 = X[np.where(X > 0)] Y1 = Y[np.where(X > 0)] if np.count_nonzero( X1 ) > 0: # meaning all paths are out of the money, thus never optimal to exercise regression = lagfit(X1, Y1, polydegree) # warnings.simplefilter('ignore', np.RankWarning) # calculate continuation value cont_value = np.zeros_like(X) cont_value[np.where(X > 0)] = lagval(X1, regression) # update cash flow matrix imm_ex = payoff_executing(K, X, type) # works because for ootm --> imm_ex=0 + cont value =0(not true obviously) but condition works cf_matrix[N - t] = np.where(imm_ex > cont_value, imm_ex, cf_matrix[N - t + 1] * np.exp(-r)) cf_matrix[N - t + 1:] = np.where(imm_ex > cont_value, 0, cf_matrix[N - t + 1:]) else: cf_matrix[N - t] = cf_matrix[N - t + 1] * np.exp(-r) # obtain option value cf_matrix[0] = cf_matrix[1] * np.exp(-r) option_value = np.sum(cf_matrix[0]) / (paths * 2) # st dev st_dev = np.std(cf_matrix[0]) / np.sqrt(paths) # Time and print the elapsed time toc = time.time() elapsed_time = toc - tic print('Total running time of LSMC: {:.2f} seconds'.format(elapsed_time)) print("Ran this with T: ", T, " and dt: ", dt, "\n") print("Value of this", type, "option is:", option_value) print("St dev of this", type, "option is:", st_dev, "\n") return option_value