def get_footprint_xyz(self, re=1.0): if self.trace_n is None: if self.trace_s is not None: if algc.mag(self.trace_s[0]) < self.safe_boundry: return self.trace_s[0] return [np.NaN, np.NaN, np.NaN] assert self.trace_n is not None, "ERROR: Cannot Find North Trace Data." rd = algc.mag(self.trace_n) # print ("Radial Distance: {}".format(rd)) # print ("North Trace: {}".format(self.trace_n)) # print ("Looking for RE: {}".format(re)) assert self.trace_n is not None, "ERROR: Cannot Find North Trace Data." rd = algc.mag(self.trace_n) # new_pos = sint.spline(xk=rd, yk=self.trace_n, xnew=re, order=1) if not np.any((rd < re)): re = np.min(rd) new_pos = algc.lin_interp(v=re, X=rd, F=self.trace_n) # print ("Footprint from FL: {}".format(algc.cart_to_sphere(new_pos))) # print ("Radial Distance: {}".format(rd)) # print ("Monotonic? {}".format(algc.mon_dec(rd) or algc.mon_inc(rd))) # print ("RE requested: {}".format(re)) if new_pos is None: if self.m_trace is not None and self.m_trace_re[0] < self.safe_boundry: return self.m_trace[0] return [np.NaN, np.NaN, np.NaN] return new_pos.flatten()
def __min_B__(self): """ returns the minimum values for the northern and southern hemispheres :return: 2 x np.array: min_n, min_s of form [index value]. Should cast index to int prior to use """ assert self.trace_n is not None and self.trace_data_s is not None, "ERROR: Field Line Trace is of None value. Starting Location: {}".format( self.start) assert self.trace_data_n is not None and self.trace_data_s is not None, "ERROR: No field line trace data. Starting Location: {}".format( self.start) if len(self.trace_n) == 0 or len(self.trace_data_n) == 0: # print ("Invalid Trace") # print ("Zero Length Trace Produced.") # print ("North Trace: {}".format(self.trace_n)) # print ("South Trace: {}".format(self.trace_s)) return None, None b_mag_n = algc.mag(self.trace_data_n) b_mag_s = algc.mag(self.trace_data_s) min_n = np.min(b_mag_n) min_n_i = np.where(b_mag_n == min_n)[0][0] min_s = np.min(b_mag_s) min_s_i = np.where(b_mag_s == min_s)[0][0] return np.array([min_n_i, min_n]), np.array([min_s_i, min_s])
def __repr__(self): return "-------------------\n" \ "LShell Object:\n" \ "Data Trace Boundary: {}\n" \ "Data Calculation Boundary: {}\n" \ "Starting Location: {}\n"\ "Start RE: {}\n" \ "- - - - - -\n"\ "Drift Path (r, lambda, phi):\n{}\n" \ "Number of Lines: {}\n" \ "K: {}\n" \ "B_mirror: {}\n" \ "L*: {}\n" \ "Is Valid?: {}\n" \ "Retaining Lines: {}\n" \ "-------------------".format(self.data.get_trace_boundary(), self.data.get_calc_boundary(), self.start, algc.mag(self.start), self.__ordered_path__(), len(self.path), self.k, self.b, self.l_star(res=1000), self.valid, isinstance(self.lines, dict))
def __k_dir_loc__(self, loc_data, b_data, idx): bm = self.m_trace_b_mirror[idx] # print ("B: {}".format(self.m_trace_b_mirror)) # print ("IDX in kdirloc: {}".format(idx)) try: bidx = np.argmax(b_data[idx+1:] > bm) + idx + 1 except ValueError: return np.NaN, bm int_list = b_data[idx:(bidx+1)] loc_list = loc_data[idx:(bidx+1)] den = int_list[-1] - int_list[-2] if den == 0: lw = 0 else: lw = (bm - int_list[-2])/den bend = int_list[-2] + ((int_list[-1] - int_list[-2]) * lw) lend = loc_list[-2] + ((loc_list[-1] - loc_list[-2]) * lw) loc_list[-1] = lend int_list[-1] = bend ds = algc.mag(np.concatenate([np.diff(loc_list, axis=0), np.array([[0,0,0]])])) k = np.sqrt(int_list[0] - int_list) * ds k = np.nansum(k) return k, bm
def get_footprint_rlp(self, re=1.0): if self.trace_n is None: if self.trace_s is not None: if algc.mag(self.trace_s[0]) < self.safe_boundry: return self.trace_s[0] return [np.NaN, np.NaN, np.NaN] assert self.trace_data_n is not None, "ERROR: Cannot Find North Trace Data." xyzf = self.get_footprint_xyz(re=re) rlpf = np.array(algc.cart_to_sphere(xyzf)) # print ("Footprint RLPF: {}".format(rlpf)) return rlpf
def get_xyz(self, xyz): # print("Looking for: {} @ {} RE".format(xyz, algc.mag(xyz))) if algc.mag(xyz) < self.get_trace_boundary(): return np.array([np.NaN, np.NaN, np.NaN]) cell_coord = self.get_nhood(xyz) value = self.valuate_point(xyz, locA=cell_coord) if np.any(np.isnan(value)): return np.array([np.NaN, np.NaN, np.NaN]) else: return self.valuate_point(xyz, locA=cell_coord)
def __bisection_model(self): import scipy.interpolate as interp import ghostpy.algorithms.common as algc from ghostpy.algorithms import DipoleField as dpf ds = np.linspace(start=1.0, stop=55, num=2000) B = [] Ls = [] for L in ds: dipole_L = L Ls.append(dipole_L) loc = algc.sphere_to_cart(r=dipole_L, lam=0, phi=0) B.append(algc.mag(dpf.dipole_field(x=loc[0], y=loc[1], z=loc[2]))) B = np.array(B, dtype=np.float_) return interp.interp1d(B, Ls, kind='cubic')
def newell_surf_normal(poly): # This algorithm is from the Newell's method pseudo-code found at: # https://www.khronos.org/opengl/wiki/Calculating_a_Surface_Normal old_settings = np.geterr() np.seterr(invalid='ignore') norm = np.array([0.0, 0.0, 0.0]) for i in range(len(poly)): v_curr = poly[i] v_next = poly[(i + 1) % len(poly)] norm[0] += (v_curr[1] - v_next[1]) * (v_curr[2] + v_next[2]) norm[1] += (v_curr[2] - v_next[2]) * (v_curr[0] + v_next[0]) norm[2] += (v_curr[0] - v_next[0]) * (v_curr[1] + v_next[1]) mag_norm = algc.mag(norm) unit_norm = norm / mag_norm np.seterr(invalid=old_settings['invalid']) return unit_norm
def cross_surf_norm(p1, p2, p3): l1 = vector_between(p1, p2) l2 = vector_between(p1, p3) cp = np.cross(l1, l2) mcp = algc.mag(cp) return cp / mcp
def main(): timings = {} if rank == 0: print( "Output is being re-directed to rank-dependent files. Please see these files for output." ) sys.stdout = open("stdout_rank{}.txt".format(rank), "w+", buffering=1) sys.stderr = open("stderr_rank{}.txt".format(rank), "w+", buffering=1) print("rank {}".format(rank)) print("rank {}".format(rank), file=sys.stderr) if rank == 0: print("Starting efficiency test") # get the arguments from the command line usage = "System Usage: {} -f <filename> -v <vector> -u <unitSize> -t <convergence tolerance>".format( sys.argv[0]) valid_args = {'f': "filename", 'v': "vector", 'u': "unitSize", 't': "tol"} arg_dict = lsArgs.lsArgs(valid_args=valid_args).arg_dict assert len(arg_dict) > 0, usage tol = float(arg_dict['tol']) if rank == 0: print("loading Data") dtimeS = time.time() data = vtkd.VtkData(filename=arg_dict['filename'], vector=arg_dict['vector']) dtimeE = time.time() dtimeD = dtimeE - dtimeS timings['data'] = dtimeD # get just the points below 9.0 dtimeS = time.time() points = np.array(data.points) pRE = algc.mag(points) wRE = pRE < 9.0 points = points[np.where(wRE)] if rank == 0: print("Binning Points") pt_bins, pct = binData(points) dtimeE = time.time() dtimeD = dtimeE - dtimeS timings['binning'] = dtimeD dtimeS = time.time() work_unit = {} ctwu = 0 while ctwu < pct: for rdx in range(size): bin_keys = pt_bins.keys() num_bins = len(pt_bins) if num_bins == 0: if rank == 0: print("ctwu: {}".format(ctwu)) break len_bins = [] for k in pt_bins: len_bins.append(len(pt_bins[k])) if rank == 0: print(np.asarray(len_bins)) bin = np.argmin(len_bins) pt = pt_bins[bin_keys[bin]].pop() try: work_unit[rdx].append(pt) except KeyError: work_unit[rdx] = [] work_unit[rdx].append(pt) len_bin = len(pt_bins[bin_keys[bin]]) if len_bin == 0: x = pt_bins.pop(bin_keys[bin]) if rank == 0: print("Length x: {}".format(len(x))) ctwu += 1 print("Total points assigned: {}".format(ctwu)) print("[{}] - Work unit size: {}".format(rank, len(work_unit[rank]))) dtimeE = time.time() dtimeD = dtimeE - dtimeS timings['workDivs'] = dtimeD dtimeS = time.time() workerbot = worker.LStarConvergeWorker(comm=comm) workerbot(conv_tol=tol, data=data, vector=arg_dict['vector'], pointlist=work_unit[rank]) dtimeE = time.time() dtimeD = dtimeE - dtimeS timings['dataCalc'] = dtimeD dtimeS = time.time() data = comm.gather(workerbot.get_data(), root=0) dtimeE = time.time() dtimeD = dtimeE - dtimeS timings['comms'] = dtimeD dtimeS = time.time() if rank == 0: grid_dat = vtkd.VtkData(filename=arg_dict['filename'], vector='B') lu = ReverseLookup(data=grid_dat) locx = np.array([]) locy = np.array([]) locz = np.array([]) lstar = np.array([]) clstar = np.array([]) clines = np.array([]) kvals = np.array([]) bvals = np.array([]) ptime = np.array([]) for datum in data: locx = np.concatenate([locx, (datum[0])[:, 0]]) locy = np.concatenate([locy, (datum[0])[:, 1]]) locz = np.concatenate([locz, (datum[0])[:, 2]]) lstar = np.concatenate([lstar, datum[1]]) clstar = np.concatenate([clstar, datum[2]]) clines = np.concatenate([clines, datum[3]]) kvals = np.concatenate([kvals, datum[4]]) bvals = np.concatenate([bvals, datum[5]]) ptime = np.concatenate([ptime, datum[6]]) kinval = np.where(kvals < 0) kvals[kinval] = np.NaN lines = __format_data_for_output__(filename=arg_dict['filename'], vector=arg_dict['vector'], locx=locx, locy=locy, locz=locz, lstar=lstar, clstar=clstar, clines=clines, ptime=ptime, tol=tol) lstar_aligned = np.zeros(shape=[len(grid_dat.points)]) lstar_aligned[:] = np.NaN lstarc_aligned = np.zeros(shape=[len(grid_dat.points)]) lstarc_aligned[:] = np.NaN clines_aligned = np.zeros(shape=[len(grid_dat.points)]) clines_aligned[:] = np.NaN k_aligned = np.zeros(shape=[len(grid_dat.points)]) k_aligned[:] = np.NaN k_aligned_s = np.zeros(shape=[len(grid_dat.points)]) k_aligned_s[:] = np.NaN b_aligned = np.zeros(shape=[len(grid_dat.points)]) b_aligned[:] = np.NaN t_aligned = np.zeros(shape=[len(grid_dat.points)]) t_aligned[:] = np.NaN for idx in range(len(locx)): pt = tuple([locx[idx], locy[idx], locz[idx]]) lstar_aligned[lu(pt)] = lstar[idx] lstarc_aligned[lu(pt)] = clstar[idx] clines_aligned[lu(pt)] = clines[idx] k_aligned[lu(pt)] = kvals[idx] k_aligned_s[lu(pt)] = kvals[idx] b_aligned[lu(pt)] = bvals[idx] t_aligned[lu(pt)] = ptime[idx] # print (lstarc_aligned) # for val in lstarc_aligned: # print (val) zloc = grid_dat.points[:, 2] print("zloc: {}".format(zloc)) wzp = np.where(zloc < 0) print("WZP: {}".format(wzp)) k_aligned_s[wzp] *= -1 print(len(np.argwhere(np.isfinite(lstarc_aligned)))) mdims = grid_dat.dims[:-1].copy() mdims[0] = grid_dat.dims[2] mdims[2] = grid_dat.dims[0] gx, gy, gz = build_grid(grid_dat.points, mdims) ldat = build_dat(lstar_aligned, mdims) lcdat = build_dat(lstarc_aligned, mdims) llines = build_dat(clines_aligned, mdims) kdat = build_dat(k_aligned, mdims) kdats = build_dat(k_aligned_s, mdims) bdat = build_dat(b_aligned, mdims) tdat = build_dat(t_aligned, mdims) from pyevtk.hl import gridToVTK gridToVTK("lfm_lstar_test", gx, gy, gz, pointData={ "L": ldat, "LC": lcdat, "Lines": llines, "K": kdat, "Bm": bdat, "cTime": tdat }) ofile = open("outdata.csv", "w+") ofile.writelines(lines) dtimeE = time.time() dtimeD = dtimeE - dtimeS timings['io'] = dtimeD full_times = comm.gather(timings, root=0) if rank == 0: pickle.dump(full_times, open("timings.pickle", "wb"))
def find_ib(self): gridRE = algc.mag(self.points) gridRE = np.around(gridRE, 3) * 1.01 return np.nanmin(gridRE)
def main(): if rank == 0: print( "Output is being re-directed to rank-dependent files. Please see these files for output." ) sys.stdout = open("stdout_rank{}.txt".format(rank), "w+", buffering=1) sys.stderr = open("stderr_rank{}.txt".format(rank), "w+", buffering=1) print("rank {}".format(rank)) print("rank {}".format(rank), file=sys.stderr) # get the arguments from the command line usage = "System Usage: {} -f <filename> -v <vector> -p <pointsFile> -r <pointsRadius> -t <convergence tolerance>".format( sys.argv[0]) valid_args = { 'f': "filename", 'v': "vector", 'r': "radius", 'p': "pointsFile", 't': "tol" } arg_dict = lsArgs.lsArgs(valid_args=valid_args).arg_dict assert len(arg_dict) > 0, usage tol = float(arg_dict['tol']) worker_responsibility = {} data = vtkd.VtkData(filename=arg_dict['filename'], vector=arg_dict['vector']) print("inner boundary: {}".format(data.get_trace_boundary())) points = np.array(data.points) bRE = float(arg_dict['radius']) pRE = algc.mag(points) pREu = pRE < bRE pREl = pRE > data.get_calc_boundary() pREsel = np.where(pREu == pREl) point_list = points[pREsel] parts = len(point_list) num_workers = size points_per_proc = float(parts) / num_workers if rank == 0: print("Total Number of Points: {}".format(parts)) print("Maximum number of points per processor: {}".format( np.ceil(points_per_proc))) print("Convergence Tolerance: {}".format(tol)) for proc in range(num_workers): worker_responsibility[proc] = [] # parting out points for idx in range(parts): procNum = idx % num_workers worker_responsibility[procNum].append(point_list[idx]) workerbot = worker.LStarConvergeWorker(comm=comm) workerbot(conv_tol=tol, data=data, vector=arg_dict['vector'], pointlist=worker_responsibility[rank]) data = comm.gather(workerbot.get_data(), root=0) if rank == 0: grid_dat = vtkd.VtkData(filename=arg_dict['filename'], vector='B') lu = ReverseLookup(data=grid_dat) locx = np.array([]) locy = np.array([]) locz = np.array([]) lstar = np.array([]) clstar = np.array([]) clines = np.array([]) kvals = np.array([]) bvals = np.array([]) for datum in data: locx = np.concatenate([locx, (datum[0])[:, 0]]) locy = np.concatenate([locy, (datum[0])[:, 1]]) locz = np.concatenate([locz, (datum[0])[:, 2]]) lstar = np.concatenate([lstar, datum[1]]) clstar = np.concatenate([clstar, datum[2]]) clines = np.concatenate([clines, datum[3]]) kvals = np.concatenate([kvals, datum[4]]) bvals = np.concatenate([bvals, datum[5]]) kinval = np.where(kvals < 0) kvals[kinval] = np.NaN lines = __format_data_for_output__(filename=arg_dict['filename'], vector=arg_dict['vector'], locx=locx, locy=locy, locz=locz, lstar=lstar, clstar=clstar, clines=clines, tol=tol) lstar_aligned = np.zeros(shape=[len(grid_dat.points)]) lstar_aligned[:] = np.NaN lstarc_aligned = np.zeros(shape=[len(grid_dat.points)]) lstarc_aligned[:] = np.NaN clines_aligned = np.zeros(shape=[len(grid_dat.points)]) clines_aligned[:] = np.NaN k_aligned = np.zeros(shape=[len(grid_dat.points)]) k_aligned[:] = np.NaN k_aligned_s = np.zeros(shape=[len(grid_dat.points)]) k_aligned_s[:] = np.NaN b_aligned = np.zeros(shape=[len(grid_dat.points)]) b_aligned[:] = np.NaN for idx in range(len(locx)): pt = tuple([locx[idx], locy[idx], locz[idx]]) lstar_aligned[lu(pt)] = lstar[idx] lstarc_aligned[lu(pt)] = clstar[idx] clines_aligned[lu(pt)] = clines[idx] k_aligned[lu(pt)] = kvals[idx] k_aligned_s[lu(pt)] = kvals[idx] b_aligned[lu(pt)] = bvals[idx] # print (lstarc_aligned) # for val in lstarc_aligned: # print (val) zloc = grid_dat.points[:, 2] print("zloc: {}".format(zloc)) wzp = np.where(zloc < 0) print("WZP: {}".format(wzp)) k_aligned_s[wzp] *= -1 print(len(np.argwhere(np.isfinite(lstarc_aligned)))) mdims = grid_dat.dims[:-1].copy() mdims[0] = grid_dat.dims[2] mdims[2] = grid_dat.dims[0] gx, gy, gz = build_grid(grid_dat.points, mdims) ldat = build_dat(lstar_aligned, mdims) lcdat = build_dat(lstarc_aligned, mdims) llines = build_dat(clines_aligned, mdims) kdat = build_dat(k_aligned, mdims) kdats = build_dat(k_aligned_s, mdims) bdat = build_dat(b_aligned, mdims) from pyevtk.hl import gridToVTK gridToVTK("lfm_lstar_test", gx, gy, gz, pointData={ "L": ldat, "LC": lcdat, "Lines": llines, "K": kdat, "Ks": kdats, "Bm": bdat }) ofile = open("outdata.csv", "w+") ofile.writelines(lines)
def solout(self, t, y): if algc.mag(y) <= self.trace_boundary: self.stop = True # print ("location: {}".format(y)) return -1
def rk45(self, inner_boundary=0.5, val_fun=None, h=1e-6, x0=None, max_steps=2000, error_tol=1e-3, direct="f"): """ Implementation of the Runge-Kutta 4/5 algorithm :param inner_boundary: Where to stop when approaching origin :param val_fun: function that can provide a value at (xyz) :param h: starting step size :param x0: starting point :param max_steps: maximum number of steps to compute :param error_tol: Error tolerance for calculating step size :param direct: Direction of integration ('f' for forward, 'b' for backward) :return: """ # print ("Getting Trace:") if x0 is None: x0 = [6.0, 0.0, 0.0] if direct == 'b': d = -1 else: d = 1 mag_x0 = algc.mag(x0) x = x0 RE = mag_x0 # print ("RE: {}".format(RE)) dpv = val_fun(x) path = [tuple(x)] value = [tuple(dpv)] steps = 0 # print ("about to start loop") # print ("Inner Boundary: {}".format(inner_boundary)) while RE > inner_boundary: # print ("Inn Loop") s = 0 # print ("S: {}".format(s)) # while not np.isclose(s, 1.0, atol=0.0001, rtol=0.00) and not np.isnan(s): k1 = h * (val_fun(x) * d) k2 = h * (val_fun(x + 1. / 4. * k1) * d) k3 = h * (val_fun(x + 3. / 32. * k1 + 9. / 32. * k2) * d) k4 = h * (val_fun(x + 1932. / 2197. * k1 - 7200. / 2197. * k2 + 7296. / 2197. * k3) * d) k5 = h * (val_fun(x + 439. / 216. * k1 - 8. * k2 + 3680. / 513. * k3 - 845. / 4104. * k4) * d) k6 = h * (val_fun(x - 8. / 27. * k1 + 2. * k2 - 3544. / 2565. * k3 + 1859. / 4104. * k4 - 11. / 40. * k5) * d) x1 = x + 25. / 216. * k1 + 1408. / 2565. * k3 + 2197. / 4104. * k4 - 1. / 5. * k5 x2 = x + 16. / 135. * k1 + 6656. / 12825. * k3 + 28561. / 56430. * k4 - 9. / 50. * k5 + 2. / 55. * k6 s = 0.84 * (error_tol * h / (algc.mag(x2 - x1)))**0.25 if np.isnan(s) or np.isinf(s): s = 1.0 h *= s x = x1 RE = algc.mag(x) dpv = val_fun(x) steps += 1 if steps > max_steps: print( "Truncating on number of steps.\nConsider Increasing number of steps." ) break path.append(tuple(x1)) value.append(tuple(dpv)) # print ("Path: {}".format(path)) return np.array(path), np.array(value)
def __search_B_adaptive__(self, outerRE=30, innerRE=None, phi_0=0, ref_r=None, debug=False, tol=1e-4): ol = algc.mag(self.start) # ol = self.l(res=10) ib = self.data.get_trace_boundary() dob = 1.85 * ol dib = 0.65 * ol if dib < ib: dib = ib print("================") # initial values phi_in = phi_out = phi_c = self.normalize_phi(phi_0) eps = np.finfo(np.float32).eps newfp = self.get_raw_path() lam_0 = 0 # # if len(newfp) > 0: # lam_0 = 0.75 * newfp[0][1] # else: # return None, None print(lam_0) if ref_r is None: r_0 = r_c = self.l_star(res=100) else: r_0 = ref_r if debug is not None: stats = {} stats['tau_in'] = [] stats['tau_out'] = [] stats['tau'] = [] stats['loc_t'] = [] r_in = dib r_out = dob tau_in = None tau_out = None gap_out = None gap_in = None r_gap = r_out - r_in initcount = 0 while tau_in is None or tau_out is None and r_gap > eps: # print ("Main Bounds Loop") loc_t = algc.sphere_to_cart(r=r_c, lam=lam_0, phi=phi_c) tau = self.__trace_from_location__(loc_t) # if we don't have a returning trace, try again. if len(tau.trace_n) == 0 or not algc.mag( tau.trace_n[-1]) < tau.safe_boundry: if tau_out is not None: r_in = r_c else: r_out = r_c r_c = (r_in + r_out) / 2 initcount += 1 if initcount > 90: print("Out on init Count 1") break continue # check the footprint tau_fp = tau.get_footprint_rlp(re=self.calc_boundary) # check valid footprint if tau_fp is None or np.any(np.isnan(tau_fp)): # print ("Foot Print: {}".format(tau_fp)) # print ("Find out why a valid line is not giving a Foot print") return None, None # assert False # check phi_tau from both directions phi_gap, phi_tau = self.get_phi_gap(phi_0, tau_fp) # print("Phi Gap: {}".format(phi_gap)) if False: #not np.isclose(phi_gap, 0.0, rtol=0, atol=tol): if phi_gap > 0: phi_g_u = self.normalize_phi(phi_tau + phi_gap) phi_g_l = self.normalize_phi(phi_tau - phi_gap) else: phi_g_l = self.normalize_phi(phi_tau + phi_gap) phi_g_u = self.normalize_phi(phi_tau - phi_gap) pcount = 0 cthresh = 90 while not np.isclose(phi_gap, 0.0, rtol=0, atol=tol) and pcount < cthresh: # print ("Phi Loop 1") phi_c = self.normalize_phi((phi_g_l + phi_g_u) / 2) loc_t = algc.sphere_to_cart(r=r_c, lam=lam_0, phi=phi_c) tau = self.__trace_from_location__(loc=loc_t) tau_fp = tau.get_footprint_rlp(re=self.calc_boundary) phi_gap, phi_tau = self.get_phi_gap(phi_0, tau_fp) if phi_gap < 0: phi_g_l += phi_gap phi_g_l = self.normalize_phi(phi_g_l) else: phi_g_u += phi_gap phi_g_u = self.normalize_phi(phi_g_u) pcount += 1 if pcount > cthresh: print("Maximum Itterations on Initial Phi Search") break else: b_gap_tau = tau.__get_min_b_gap__(k=self.k, b=self.b) # print ("B Gap Initial: {}".format(b_gap_tau)) if b_gap_tau is not None and np.isclose( b_gap_tau, 0.0, rtol=0, atol=self.error_tol): print("Out with match.") return tau, tau if b_gap_tau is None: if gap_out is not None: # print ("No gap, moving back out") r_in = r_c elif gap_in is None and gap_out is None: # print ("Moving outer boundary in, searching for a valid point") r_out = r_c else: r_out = r_c if b_gap_tau > 0: if r_out >= dob: dob *= 1.5 r_out = dob r_in = 0.95 * r_c if r_in < ib: r_in = ib r_c = (r_in + r_out) / 2 print("Moving outer boundary out") continue r_in = r_c tau_in = tau gap_in = b_gap_tau elif b_gap_tau is not None: if r_in <= dib: dib *= 0.5 r_in = dib r_out = 1.1 * r_c if r_in < ib: r_in = ib r_c = (r_in + r_out) / 2 print("Moving inner boundary in") continue r_out = r_c tau_out = tau gap_out = b_gap_tau r_c = (r_in + r_out) / 2 r_gap = r_out - r_in # print("R Gap: {}".format(r_gap)) initcount += 1 # print ("Bounds Loop Count: {}".format(initcount)) if r_gap < eps: print("Out on EPS convergence") return None, None # break r_out = r_out #* 2.5 r_gap = r_out - r_in conv_count = 0 nan_count = 0 while r_gap > eps: # print ("Main Search Loop") r_c = (r_out + r_in) / 2 loc_t = algc.sphere_to_cart(r=r_c, lam=lam_0, phi=phi_c) tau = self.__trace_from_location__(loc=loc_t) tau_fp = tau.get_footprint_rlp(re=self.calc_boundary) phi_gap, phi_tau = self.get_phi_gap(phi_0, tau_fp) if False: # not np.isclose(phi_gap, 0.0, rtol=0, atol=tol): if phi_gap > 0: phi_g_u = self.normalize_phi(phi_tau + phi_gap) phi_g_l = self.normalize_phi(phi_tau - phi_gap) else: phi_g_l = self.normalize_phi(phi_tau + phi_gap) phi_g_u = self.normalize_phi(phi_tau - phi_gap) pcount = 0 while not np.isclose(phi_gap, 0.0, rtol=0, atol=tol): phi_c = self.normalize_phi((phi_g_l + phi_g_u) / 2) loc_t = algc.sphere_to_cart(r=r_c, lam=lam_0, phi=phi_c) tau = self.__trace_from_location__(loc=loc_t) tau_fp = tau.get_footprint_rlp(re=self.calc_boundary) phi_gap, phi_tau = self.get_phi_gap(phi_0, tau_fp) if phi_gap < 0: phi_g_l += (0.6 * phi_gap) phi_g_l = self.normalize_phi(phi_g_l) else: phi_g_u += (0.6 * phi_gap) phi_g_u = self.normalize_phi(phi_g_u) pcount += 1 if pcount > 90: # print ("Out on pcount") break b_gap_tau = tau.__get_min_b_gap__(k=self.k, b=self.b) # print ("sB_gap: {}".format(b_gap_tau)) if b_gap_tau is not None and np.isclose( b_gap_tau, 0.0, atol=self.error_tol): # print ("Returning on good find") # print("Relative B Error: {}".format(b_gap_tau)) return tau, tau if b_gap_tau is None or np.isnan(b_gap_tau): print("Bad Tau. Moving in.") r_out = (r_in + r_out) / 2 conv_count += 1 nan_count += 1 if nan_count > 50: # print ("NaN Count: {}".format(nan_count)) break continue if b_gap_tau >= 0: # print("Looking at Tau > 0") # print("gap_in: {}".format(gap_in)) # print("gap_out: {}".format(gap_out)) if b_gap_tau < gap_in: # print ("Saving Tau in with Gap: {}".format(b_gap_tau)) tau_in = tau gap_in = b_gap_tau if debug: stats['tau'].append(tau) else: pass # print ("Tau: {}".format(b_gap_tau)) r_in = r_c else: # print ("Looking at Tau < 0") # print("gap_in: {}".format(gap_in)) # print("gap_out: {}".format(gap_out)) if b_gap_tau > gap_out: # print ("Saving Tau Out with Gap: {}".format(b_gap_tau)) tau_out = tau gap_out = b_gap_tau if debug: stats['tau'].append(tau) else: pass # print ("Tau: {}".format(b_gap_tau)) r_out = r_c r_gap = r_out - r_in conv_count += 1 if debug: stats['tau_in'].append(tau_in) stats['tau_out'].append(tau_out) if conv_count > 150: print("Out on conv count") break if debug: return stats print("Relative B Error (IN): {}".format(gap_in)) print("Relative B Error (OUT): {}".format(gap_out)) return tau_in, tau_out
def __init__(self, k=None, b_mirror=None, start_loc=None, alpha=None, data=None, save_lines=False, error_tol=1e-6, pre_converge=True): """ Creates an LShell object instance, allowing several different LStar manipulations. :param k: Starting K for the calculation. Only required if needing a k/loc starting point or a k/b search :param b_mirror: Starting B_mirror for the particle. Only required for a B/loc starting point for a k/b search :param start_loc: staring location for the initial trace. Required unless performing a k/b search :param data: Required for all modes. Must be of type data.GpData or one of its subclasses :param save_lines: True=Retain all lines for plotting. False=Discard lines, only keep trajectory :param error_tol: changes the error tollerance for several algorithms. Default is 1e-6 :param pre_converge: True = calculate close approximation with 4 field line traces immediately False = Only calculate the first field line. L* may be invalid for any but Dipole Fields Until the LShell.converge_lstar() is called. This allows for more fine grained manipulation, allowing for adding of individual lines for experimentation. """ np.seterr(invalid='ignore') assert ((k is not None and start_loc is not None) or (b_mirror is not None and start_loc is not None) or (b_mirror is None and k is None and start_loc is not None)), \ "LShell was not called properly.\n\n " \ "LShell must be called with one of the following three forms:\n\n" \ "LShell(start_loc=[x,y,z], k=?, data=gp.data)\n" \ "LShell(start_loc=[x,y,z], b_mirror=?, data=dp.data)\n" \ "LShell(start_loc=[x,y,z], data=dp.data)\n\n" \ "Specifiying only a starting location will result in utilizing the location B value for b_mirror.\n\n" assert isinstance(data, gpd.data), "The supplied data structure is of an incorrect type. " \ "All data structures must inherit ghostpy.data.GpData" self.k = k self.b = b_mirror self.start_l = None self.data = data self.BL = self.data.get_bisection_model() self.calc_boundary = self.data.get_calc_boundary() self.start = start_loc self.error_tol = error_tol self.valid = True # path tracking self.path = dict() self.conv_path = [] self.dipole = dpd.DipoleData() self.pre_converge = pre_converge if alpha is None: self.alpha = np.pi / 2 else: self.alpha = alpha # line tracking self.save_lines = save_lines if self.save_lines: self.lines = dict() else: self.lines = None if self.b is not None and start_loc is not None: # Initialize with k/b trace at phi = 0 assert False, "Not yet implemented" elif start_loc is not None and self.b is None and self.k is None: print( "\n\n\nVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV" ) print("Calculating L*, K for point {}".format(self.start)) newFL = self.__trace_from_location__(loc=start_loc) # print ("Trace Data: {}".format(newFL.trace_data_n)) if newFL.valid and len(newFL.trace_data_n) > 0: self.k, self.b = newFL.__get_kb_for_start__(alpha=self.alpha) if self.k is None: self.valid = False return print("B_mirror = {}".format(self.b)) print("K = {}".format(self.k)) else: print("Invalid line initiation") print("Starting Location: {}".format(newFL.start)) self.valid = False return else: # Initialize with location/k and find K newFL = self.__trace_from_location__(loc=start_loc) assert isinstance( newFL, fl.FieldLine), "ERROR: Incorrect type for field line." self.b = newFL.get_b(k=self.k) if self.b < 0: # newFL.valid = False self.valid = False if self.b > 0 and self.k >= 0: fp = newFL.get_footprint_rlp(re=self.calc_boundary) if fp is not None: self.path[algc.cart_to_sphere(self.start)[2]] = fp else: print("vvvvvvvvvvvvvvvvvvvvvvvv") print("INITIALIZATION OF LINE") print("Origin: {}".format(self.start)) print("B: {}".format(self.b)) print("K: {}".format(self.k)) if self.k < 0: newFL.__all_k__(debug=True) print("INVALID K") print("NewLine all K: {}".format(newFL.K)) print("B_Mirror: {}".format(newFL.__get_b_mirror__(self.k))) print("B_North: {}".format(algc.mag(newFL.trace_data_n[0:10]))) print("B_South: {}".format(algc.mag(newFL.trace_data_s[0:10]))) print("RE: {}".format(algc.mag(newFL.trace_n[0:10]))) self.valid = False print("^^^^^^^^^^^^^^^^^^^^^^^^") # if we are saving lines, save it. if self.lines is not None and self.valid: self.lines[algc.cart_to_sphere(self.start)[2]] = [newFL, newFL] start_phi = algc.cart_to_sphere(self.start)[2] print("Start Phi: {}".format(start_phi)) self.conv_path.append(start_phi) # initial convergence if self.pre_converge: self.valid = self.__4_line_conv__() print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
def __trace_line__(self): """ internal method to perform the line trace :return: None -- internal method """ self.trace_n, self.trace_data_n = self.integrator.integrate(x0=self.start, direct='f', error_tol=self.error_tol) self.trace_s, self.trace_data_s = self.integrator.integrate(x0=self.start, direct='b', error_tol=self.error_tol) if self.trace_n is None or self.trace_s is None: self.valid = False self.valid_code = -1 return re_n = algc.mag(self.trace_n) re_s = algc.mag(self.trace_s) if len(self.trace_n) > 0: re_n_1 = re_n[0] re_n_2 = re_n[-1] re_n_max = np.nanmax(re_n) if len(re_s) > 0: re_s_max = np.nanmax(re_s) else: re_s_max = np.nan else: re_n_1 = 1000. re_n_2 = 1000. re_n_max = np.nan re_s_max = np.nan if len(self.trace_s) > 0: re_s_1 = algc.mag(self.trace_s[0]) re_s_2 = algc.mag(self.trace_s[-1]) re_s_max = np.nanmax(re_n) if len(re_n) > 0: re_n_max = np.nanmax(re_n) else: re_n_max = np.nan else: re_s_1 = 1000. re_s_2 = 1000. re_n_max = np.nan re_s_max = np.nan if re_n_1 <= self.safe_boundry and re_n_2 <= self.safe_boundry and re_n_max > re_s_max: # Full trace in North... must flip print ("Full Trace North: {}".format(len(self.trace_n))) self.start_idx = len(self.trace_n)-1 self.m_trace = np.flipud(self.trace_n) self.m_trace_data = np.flipud(self.trace_data_n) self.m_trace_b_mirror = algc.mag(self.m_trace_data) self.m_trace_re = algc.mag(self.m_trace) print ("Full Trace RE:\n{}".format(self.m_trace_re)) elif re_s_1 <= self.safe_boundry and re_s_2 <= self.safe_boundry and re_s_max > re_n_max: print ("Full Trace South") # Full trace in South... no flip needed self.start_idx = 0 self.m_trace = self.trace_s self.m_trace_data = self.trace_data_s self.m_trace_b_mirror = algc.mag(self.m_trace_data) self.m_trace_re = algc.mag(self.m_trace) elif re_n_2 <= self.safe_boundry and re_s_2 <= self.safe_boundry: # print ("Combined Trace") # Full trace in combination... must combine self.start_idx = len(self.trace_n) - 1 data_array = np.delete(self.trace_data_s, 0, axis=0) data_array = np.concatenate([np.flipud(self.trace_data_n), data_array], axis=0) # Combine North and South Location Arrays # Values should move from north to south along the line loc_array = np.delete(self.trace_s, 0, axis=0) loc_array = np.concatenate([np.flipud(self.trace_n), loc_array], axis=0) self.m_trace = loc_array self.m_trace_data = data_array self.m_trace_b_mirror = algc.mag(data_array) self.m_trace_re = algc.mag(loc_array) else: self.valid = False self.valid_code = -2 return if self.smooth > 0: try: # print ("heavy") self.m_trace_b_mirror = savgol_filter(self.m_trace_b_mirror, self.smooth, 2) except TypeError: pass
def main(): if rank == 0: print ("Output is being re-directed to rank-dependent files. Please see these files for output.") sys.stdout = open("stdout_rank{}.txt".format(rank), "w+", buffering=1) sys.stderr = open("stderr_rank{}.txt".format(rank), "w+", buffering=1) if rank == 0: ################### # MASTER RANK # ################### # master dependent modules import lsArgs import master from ghostpy.data import VtkData as gpd from ghostpy.algorithms import common as algc import numpy as np # get the arguments from the command line usage = "System Usage: {} -f <filename> -v <vector> -p <pointsFile>, -r <pointsRadius>".format(sys.argv[0]) valid_args = {'f': "filename", 'v': "vector", 'r': "radius", 'p': "pointsFile"} arg_dict = lsArgs.lsArgs(valid_args=valid_args).arg_dict assert len(arg_dict) > 0, usage point_list = None # load the master processor masterProc = master.LstarMaster(comm=comm) if arg_dict['filename'] is not None: # load the filename to the master process # TODO: Need to adjust this to work on a list of files masterProc.add_file_time_pair(tuple((arg_dict['filename'], 0, arg_dict['vector']))) else: # We need a file print("Cannot continue with a filename") assert False if arg_dict['radius'] is not None: # get list of points within the RE distance requested # TODO: Need to work on list of file names data = gpd.VtkData(filename=arg_dict['filename'], vector=arg_dict['vector']) points = np.array(data.points) bRE = float(arg_dict['radius']) pRE = algc.mag(points) pREu = pRE < bRE pREl = pRE > data.get_calc_boundary() pREsel = np.where(pREu == pREl) point_list = points[pREsel] master_list = [] for p in point_list: pdata = np.concatenate([p, [0]]) master_list.append(pdata) elif arg_dict['pointsFile'] is not None: # get list of points/times from file print ("Getting Points list from file...") assert False, "File Reading not yet implemented." else: print ("Must have a list of points to process, or a radius to process") assert False # load points to the master process for pair in master_list: masterProc.add_point_time_pair(pair) print ("Number of points to process: {}".format(masterProc.get_number_of_points())) # start master process masterProc() else: #################### # WORKER RANKS # #################### # Worker dependent modules import worker workerProc = worker.LstarWorker(comm=comm) workerProc()
def __k_mod__(self, mdata, mtrace): assert mdata is not None and mtrace is not None bdata = mdata.copy() bdata2 = bdata.copy() mtrace2 = mtrace.copy() re_north = algc.mag(mtrace2[0]) re_south = algc.mag(mtrace2[-1]) bm_north = bdata[0] bm_south = bdata[-1] calc_bound = self.data.get_calc_boundary() * 1.5 len_data = len(mtrace2) if (not np.isclose(re_north, calc_bound) and re_north > calc_bound) or (not np.isclose(re_south, calc_bound) and re_south > calc_bound ): # print ("Field line through starting point {} does not return to boundary.".format(self.start)) # print ("Required Boundary: {} RE".format(calc_bound)) # print ("Line Bounds: Start: {} RE, End: {} RE".format(re_north, re_south)) self.valid_code = -2 self.valid = False return None, None # list for K: klist = [] blist = [] start = 0 while bm_north > bm_south: bm_north = bdata[start] klist.append(np.NaN) blist.append(bdata2[start]) start += 1 for idx in np.arange(start=start, stop=len_data): cb = bdata2[idx] # find the bounding point index. If is same as idx, then no bounding point exists try: bidx = np.argmax(bdata[idx + 1:] > cb) + idx + 1 except ValueError: klist.append(np.NaN) blist.append(bdata2[idx]) break int_list = bdata[idx:(bidx+1)] loc_array = mtrace2[idx:(bidx + 1)] # interpolate the last value bm = cb den = (int_list[-1] - int_list[-2]) if den == 0: print ("Points contain equivilant b_mirrors") print ("B_mirror: {}".format(bm)) print ("Setting Weight to 0") lw = 0 else: lw = (bm - int_list[-2]) / den bend = int_list[-2] + ((int_list[-1] - int_list[-2]) * lw) lend = loc_array[-2] + ((loc_array[-1] - loc_array[-2]) * lw) loc_array[-1] = lend int_list[-1] = bend dtrace = algc.mag(np.concatenate([np.diff(loc_array, axis=0), np.array([[0, 0, 0]])])) k = np.sqrt(int_list[0] - int_list) * dtrace k = np.nansum(k) klist.append(k) blist.append(cb) klist = np.array(klist) blist = np.array(blist) return blist, klist