def shapingTransverse(self): ''' Compute coefficients for the transverse (theta) shape ''' thetaFunc = shapeFunctions(self.N, shorthand=self.thetaShape, tMax=self.tofSec) thetaFuncFree = shapeFunctionsFree(self.N, self.thetaFreeC, shorthand=self.thetaShapeFree, tMax=self.tofSec) # intermediate values [K1, K2] = np.dot( np.linalg.inv( [[thetaFunc.v1(0), thetaFunc.v2(0)], [thetaFunc.v1(self.tofSec), thetaFunc.v2(self.tofSec)]]), [-thetaFunc.v3(0), -thetaFunc.v3(self.tofSec)]) [L1, L2] = np.dot( np.linalg.inv( [[thetaFunc.v1(0), thetaFunc.v2(0)], [thetaFunc.v1(self.tofSec), thetaFunc.v2(self.tofSec)]]), [ self.vDepCyl[1] - thetaFuncFree.v(0), self.vArrCyl[1] - thetaFuncFree.v(self.tofSec) ]) # cTheta3 integrand1 = lambda t: (L1 * thetaFunc.v1(t) + L2 * thetaFunc.v2(t) + thetaFuncFree.v(t)) / self.r(t) integrand2 = lambda t: (K1 * thetaFunc.v1(t) + K2 * thetaFunc.v2(t) + thetaFunc.v3(t)) / self.r(t) int1 = integrate(integrand1, 0, self.tofSec, method='trapz', nSteps=25) int2 = integrate(integrand2, 0, self.tofSec, method='trapz', nSteps=25) cTheta3 = (self.thetaArr - int1) / (int2) # cTheta1 and cTheta2 cTheta12 = cTheta3 * np.array([K1, K2]) + np.array([L1, L2]) self.cTheta = np.array([cTheta12[0], cTheta12[1], cTheta3]) # assemble shape self.tDot = lambda t: (self.cTheta[0] * thetaFunc.v1(t) + self.cTheta[ 1] * thetaFunc.v2(t) + self.cTheta[2] * thetaFunc.v3(t) + thetaFuncFree.v(t)) self.thetaDot = lambda t: self.tDot(t) / self.r(t) self.tDDot = lambda t: (self.cTheta[0] * thetaFunc.Dv1( t) + self.cTheta[1] * thetaFunc.Dv2(t) + self.cTheta[2] * thetaFunc .Dv3(t) + thetaFuncFree.Dv(t))
def t(self, time): ''' Convenience function to call the polar angle as a function of time Computationally inefficient due to numerical integration ''' # compute theta value by integration of thetaDot thetaChange = integrate(self.thetaDot, 0, time, method='trapz', nSteps=25) thetaFinal = thetaChange + self.rDepCyl[1] return thetaFinal
def integrationTest1(rtol = 1e-3, atol = 1e-2): func1 = lambda x: x**2 * np.cos(x) x0 = 0 x1 = 100 nSteps = int(1e5) # quad is reference solution intResult1 = integrate(func1, x0, x1, method='quad') # compute same result using other methods intResult2 = integrate(func1, x0, x1, method='trapz', nSteps=nSteps) test = np.allclose(intResult1, intResult2, rtol, atol) if test == True: print('OK\tIntegration test') else: print('ERROR\tIntegration test') print('\tComputed quad:\t', intResult1) print('\tComputed others:', intResult2)
def test_integrate(): """ integrate with a given tolerance """ p = Harmonic(1, 0) x0, x1 = 0, np.pi for tol in 10.**np.arange(-9, -2): s, err = integrate(p, x0, x1, tol=tol) print( f'Check for tol {tol:.2e}: res = {s-err:.6f} .. {s:.6f} .. {s+err:.6f}' ) assert err >= 0, 'estimated error should be >= 0' assert np.abs( p[x0, x1] - s) <= 1.1 * err, 'actual error should be <= estimated error + 10%' assert np.abs(p[x0, x1] - s) <= tol, 'actual error should be <= tolerance'
def evaluate(self, evalThrust=False, nEvalPoints=100, printTime=False): ''' Compute DeltaV and maximum thrust By numerically integrating and sampling the thrust profile Number of sampling points has a serious impact on performance -> Activate thrust evaluation only when needed ''' deltaVtemp = integrate(self.fTotal, 0, self.tofSec, method='trapz', nSteps=25) self.deltaV = deltaVtemp if printTime == True: time1 = time.time() # perform grid search at equally spaced sample points if evalThrust == 'Grid': self.maxThrust = np.max( self.fTotal(np.linspace(0, self.tofSec, nEvalPoints))) # call local optimizer from scipy (NOT RECOMMENED as not robust) elif evalThrust == 'Optimize': maxThrustTime = sci.optimize.minimize_scalar( lambda t: -self.fTotal(t), bounds=[0, self.tofSec], method='bounded') self.maxThrust = self.fTotal(maxThrustTime.fun) # don't look for maximum thrust value else: self.maxThrust = -1 # print the measured time spent in this method if printTime == True: time2 = time.time() print(f'Finding maximum of thrust profile took ' f'{(time2-time1)*1e3:.3f} ms')
def get_ans(keyboard_position_dict, sample, threshold, L, sigma, r, all_words, fitts_time_dict, t, gamma): [shape_dist_dict, template_points_dict] = getDistanceDict(keyboard_position_dict, sample, threshold, L, all_words) new_shape_dist_dict = copy.deepcopy(shape_dist_dict) new_shape_dist_dict = sorted(new_shape_dist_dict.items(), key=lambda e: e[1], reverse=False) print(new_shape_dist_dict) # print(template_points_dict) # print(sample) # for item in sample: # print(item) remain_shape_lis = [] remain_shape_dist = [] for i in shape_dist_dict: remain_shape_lis.append(i) remain_shape_dist.append(shape_dist_dict[i]) word_lis = shape_dist_dict.keys() word_dist_lis = calc_word_dist_lis(word_lis, len(sample), template_points_dict, sample, r) remain_loc_lis, remain_loc_dist = calc_pruned_lis(sigma, word_lis, word_dist_lis, fitts_time_dict, t, gamma) ans_lis = integrate(remain_shape_lis, remain_shape_dist, remain_loc_lis, remain_loc_dist, sigma, fitts_time_dict, t, gamma) print(ans_lis) return ans_lis
def integrate(self, var): ''' Integrate the expression with respect to var; used in similar fashion as d. ''' return Expr(expr_tree=integration.integrate(self.tree_repr, var))
def on_begin_clicked(self, b = None): if b is None: return if self.opts is None or self.indexdata is None: QMessageBox.warning(self, "No parameters", "No parameters set up yet") return outfile = str(self.destfile.text()) if len(outfile) == 0: QMessageBox.warning(self, "No output file", "please set up a results file") return if self.opts is None or self.indexdata is None: QMessageBox.warning(self, "No parameters", "No parameters set up yet") return workdir = os.path.dirname(self.opts.indexfile) nfiles = len(self.indexdata) self.intprogress.setMaximum(len(self.indexdata)) parser = datafile.SpecDataFile() errorfiles = [] self.begin.setEnabled(False) # Get ranges from options try: lowerpeak = self.opts.ranges.getrange("lpeak").checkvalid() upperpeak = self.opts.ranges.getrange("upeak").checkvalid() bglower = self.opts.ranges.getrange("bglower").checkvalid() bgupper = self.opts.ranges.getrange("bgupper").checkvalid() except datarange.DataRangeError as e: QMessageBox.warning(self, "Range problem", e.args[0]) return bg1low = bglower.lower bg1high = bglower.upper bg2low = bgupper.lower bg2high = bgupper.upper peak1low = lowerpeak.lower peak1high = lowerpeak.upper peak2low = upperpeak.lower peak2high = upperpeak.upper # Do it this way to minimise the subtractions bgwidth = (bg1high + bg2high) - (bg1low + bg2low) peak1width = peak1high - peak1low peak2width = peak2high - peak2low results = [] for cfilenum, indentry in enumerate(self.indexdata): self.intprogress.setValue(cfilenum) try: dataf, jdate, modjdate, helioc = indentry if not os.path.isabs(dataf): dataf = os.path.join(workdir, dataf) specarray = parser.parsefile(dataf) # Got array, convert to x,y pairs ready for integration xyspecarray = xyvalue.convert_to_xy(specarray, 0, 1) # Make adjustments for Doppler if required if self.opts.apply_doppler: xyspecarray = doppler.apply_doppler_xy(xyspecarray, helioc) # Get lower background integration, both peaks, upper background bglower = integration.integrate(xyspecarray, bg1low, bg1high) bgupper = integration.integrate(xyspecarray, bg2low, bg2high) peak1 = integration.integrate(xyspecarray, peak1low, peak1high) peak2 = integration.integrate(xyspecarray, peak2low, peak2high) # Get average background averagebg = (bglower + bgupper) / bgwidth # Slice that off resulta peak1 -= averagebg * peak1width peak2 -= averagebg * peak2width # Get result as diffence over sum peakc = (peak1 - peak2) / (peak1 + peak2) # Construct result as jdate, modjdate, peakdata, averagebg results.append((jdate, modjdate, peakc, averagebg)) except datafile.Datafile_error as e: # Cope with data file errors errorfiles.append((dataf, e.args[0])) results.append((jdate, modjdate, 0.0, 0.0)) except integration.Integration_error as e: # Cope with integration errors errorfiles.append((dataf, e.args[0])) results.append((jdate, modjdate, 0.0, 0.0)) # Set progress to 100% self.intprogress.setValue(nfiles) if len(errorfiles) != 0: QMessageBox.warning(self, "Errors in processing", str(len(errorfiles)) + " gave errors in processing") try: outparser = datafile.IntResult() outparser.writefile(outfile, results) except datafile.Datafile_error as e: QMessageBox.warning(self, "Error writing results", e.args[0])
def on_begin_clicked(self, b = None): if b is None: return if self.opts is None or self.indexdata is None: QMessageBox.warning(self, "No parameters", "No parameters set up yet") return outfile = str(self.destfile.text()) if len(outfile) == 0: QMessageBox.warning(self, "No output file", "please set up a results file") return if self.opts is None or self.indexdata is None: QMessageBox.warning(self, "No parameters", "No parameters set up yet") return workdir = os.path.dirname(self.opts.indexfile) nfiles = len(self.indexdata) self.intprogress.setMaximum(len(self.indexdata)) parser = datafile.SpecDataFile() errorfiles = [] self.begin.setEnabled(False) bg1low = self.opts.intparams.background.lower rangelow = self.opts.intparams.peak.lower rangehigh = self.opts.intparams.peak.upper bg2high = self.opts.intparams.background.upper bgwidth = (rangelow - bg1low) + (bg2high - rangehigh) rangewidth = rangehigh - rangelow results = [] for cfilenum, indentry in enumerate(self.indexdata): self.intprogress.setValue(cfilenum) try: dataf, jdate, modjdate, helioc = indentry if not os.path.isabs(dataf): dataf = os.path.join(workdir, dataf) specarray = parser.parsefile(dataf) # Got array, convert to x,y pairs ready for integration xyspecarray = xyvalue.convert_to_xy(specarray, 0, 1) # Make adjustments for Doppler if required if self.opts.intparams.apply_doppler: xyspecarray = doppler.apply_doppler_xy(xyspecarray, helioc) # Get lower background integration, thing itself, upper background bglower = integration.integrate(xyspecarray, bg1low, rangelow) peakdata = integration.integrate(xyspecarray, rangelow, rangehigh) bgupper = integration.integrate(xyspecarray, rangehigh, bg2high) # Get average background averagebg = (bglower + bgupper) / bgwidth # Slice that off result peakdata -= averagebg * rangewidth # Construct result as jdate, modjdate, peakdata, averagebg results.append((jdate, modjdate, peakdata, averagebg)) except datafile.Datafile_error as e: # Cope with data file errors errorfiles.append((dataf, e.args[0])) results.append((jdate, modjdate, 0.0, 0.0)) except integration.Integration_error as e: # Cope with integration errors errorfiles.append((dataf, e.args[0])) results.append((jdate, modjdate, 0.0, 0.0)) # Set progress to 100% self.intprogress.setValue(nfiles) if len(errorfiles) != 0: QMessageBox.warning(self, "Errors in processing", str(len(errorfiles)) + " gave errors in processing") try: outparser = datafile.IntResult() outparser.writefile(outfile, results) except datafile.Datafile_error as e: QMessageBox.warning(self, "Error writing results", e.args[0])
def F(curr_p): return coeff - 2 * integrate(0, 1, lambda z: Nt(T(z), curr_p) * z)
def integrate(self, initial, tmin, tmax, **kwargs): return integrate(initial, self, tmin, tmax, giveTime=True, **kwargs)
L = 1 g = 9.806 T_0 = (2 * m.pi) * (m.sqrt(L / g)) xmin = 0 xmax = m.pi / 2 # Initializing function def T(beta, parameters): alpha = parameters[0] T_0 = parameters[1] alpha_rad = alpha * m.pi / 180 k = m.sin(alpha_rad / 2) T = (2 * T_0) / (m.pi * m.sqrt(1 - ((k ** 2) * (m.sin(beta)) ** 2))) return T # Calculating integral to find relative period for each step of alpha period_plot = [] domain = np.arange(0, alpha_deg + step, step) for degree in domain: period = integration.integrate(T, xmin, xmax, N, seed, degree, T_0) period_plot.append(period / T_0) # Setting up figure fig = plt.figure() ax = fig.add_subplot(111) ax.plot(domain, period_plot, c = 'purple') ax.set_xlim([0,alpha_deg]) ax.set_title(r'Period ratio $T/T_0$ as function of $\alpha$', fontsize = 13) ax.set_xlabel(r'Maximum swing angle $\alpha$', fontsize = 11) ax.set_ylabel(r'$\frac{T(\alpha)}{T_0}$', rotation = 0, labelpad = 12, fontsize = 14) plt.show()
plt.subplot(121) plt.plot(x_lb, y_lb, label='Interpolated') plt.plot(x_lb, diff_function(x_lb), label='Actual') plt.xlabel(r'$x$', fontsize='18') plt.ylabel(r'$f(x)$', fontsize='18') plt.legend() plt.subplot(122) plt.plot(x_lb, l_lb, label='Error') plt.scatter(x0_lb, np.zeros(order + 1), label='Sampling points') plt.legend() plt.xlabel(r'$x$', fontsize='18') plt.ylabel(r'$e(x)$', fontsize='18') plt.xlim([-1, 1]) plt.show() x0_lg = lg.root(order) x0_lb = lb.root(order) y0_lg = function(x0_lg) y0_lb = function(x0_lb) weight_lg = ig.legendre_weight(order) weight_lb = ig.lobatto_weight(order) int_lg = ig.integrate(y0_lg, weight_lg) int_lb = ig.integrate(y0_lb, weight_lb) act = 4.0 / np.pi print 'Exact integral is ', act print 'Legendre integral is ', int_lg print 'Lobatto integral is ', int_lb print 'DONE'
""" This script is the main part of a program written to to and model the solar core. This project is the first term project in AST3310 at UiO """ # Importing packages import matplotlib.pyplot as plt import integration import plots if __name__ == "__main__": # Calling the integration script t, m, r, l, p, rho, epsilon, T0, M0, R0, \ L0, P0, initial_rho, initial_epsilon, i_break = integration.integrate() # Calling the plotting script plots.plot_l(m, l, M0, L0, i_break) plots.plot_p(m, p, M0, P0, i_break) plots.plot_r(m, r, M0, R0, i_break) plots.plot_t(m, t, M0, T0, i_break) plots.plot_rho(m, rho, M0, initial_rho, i_break) plots.plot_epsilon(m, epsilon, M0, initial_epsilon, i_break) # Show plots plt.show() # Info __author__ = 'Tommy Ryan' __email__ = '*****@*****.**'