def draw(self, label_fmt=None): "Visualize points with interactive scatter plot browser." if not self.points: print('[warn] ConvexHull is empty.') return import scipy.spatial points = np.array([(p.x, p.y) for p in self.points]) hull = scipy.spatial.ConvexHull(points) for simplex in hull.simplices: pl.plot(points[simplex, 0], points[simplex, 1], 'k-', zorder=-1, alpha=0.5, lw=.5) for p in self: pl.scatter(p.x, p.y, c='r', alpha=0.5, zorder=-1) pl.box(False) pl.xticks([p.x for p in self], rotation='vertical') pl.yticks([p.y for p in self]) for p in self: pl.text(x=p.x, y=p.y, s=str(p.d) if label_fmt is None else label_fmt(p))
def fitzpatrick(input,wavelength=True,plot=False): if wavelength: x = (input/ 10**4.)**-1. # convert from angstroms to micrometers else: x = input ''' for R_v = 3.1 ''' wavelength = [0.000,0.377,0.820,1.667,1.828,2.141,2.433,3.704,3.846] ratio = [0.000,0.265,0.829,2.688,3.055,3.806,4.315,6.265,6.591] #wavelength = [1.667,1.828,2.141,2.433] #ratio = [2.688,3.055,3.806,4.315] import scipy from scipy import interpolate fitzSpline = scipy.interpolate.interp1d(wavelength,ratio, kind='cubic') #, bounds_error=False) if plot: import pylab, scipy pylab.clf() x_range = scipy.arange(wavelength[0],wavelength[-1],0.01) pylab.plot(x_range,fitzSpline(x_range)) pylab.scatter(wavelength,ratio) pylab.box() pylab.xlim([0,4]) pylab.savefig('/Users/pkelly/Dropbox/spline.png') ''' normalized so that A_1 um = 1 mag ''' A = fitzSpline(x) / fitzSpline(1.) return A
def gui_repr(self): """Generate a GUI to represent the sentence alignments """ if __pylab_loaded__: fig_width = max(len(self.sent1), len(self.sent2)) + 1 fig_height = 3 pylab.figure(figsize=(fig_width*0.8, fig_height*0.8), facecolor='w') pylab.box(on=False) pylab.subplots_adjust(left=0, right=1, bottom=0, top=1) pylab.xlim(-1, fig_width - 1) pylab.ylim(0, fig_height) pylab.xticks([]) pylab.yticks([]) for i in xrange(len(self.sent1)): pylab.text(i, 2.5, self.sent1[i], ha = 'center', va = 'center', rotation=30) if len(self.sent1_index[i]) > 0: pylab.arrow(i, 2.5, 0, -0.5, color='r', alpha=0.3, lw=2) for j in self.sent1_index[i]: pylab.arrow(i, 2, j - i, -1, color='r') for i in xrange(len(self.sent2)): pylab.text(i, 0.5, self.sent2[i], ha = 'center', va = 'center', rotation=30) if len(self.sent2_index[i]) > 0: pylab.arrow(i, 0.5, 0, 0.5, color='r', alpha=0.3, lw=2) pylab.draw()
def gui_repr(self): """Generate a GUI to represent the sentence alignments """ if __pylab_loaded__: fig_width = max(len(self.text_e), len(self.text_f)) + 1 fig_height = 3 pylab.figure(figsize=(fig_width*0.8, fig_height*0.8), facecolor='w') pylab.box(on=False) pylab.subplots_adjust(left=0, right=1, bottom=0, top=1) pylab.xlim(-1, fig_width - 1) pylab.ylim(0, fig_height) pylab.xticks([]) pylab.yticks([]) e = [0 for _ in xrange(len(self.text_e))] f = [0 for _ in xrange(len(self.text_f))] for (i, j) in self.align: e[i] = 1 f[j] = 1 # draw the middle line pylab.arrow(i, 2, j - i, -1, color='r') for i in xrange(len(e)): # draw e side line pylab.text(i, 2.5, self.text_e[i], ha = 'center', va = 'center', rotation=30) if e[i] == 1: pylab.arrow(i, 2.5, 0, -0.5, color='r', alpha=0.3, lw=2) for i in xrange(len(f)): # draw f side line pylab.text(i, 0.5, self.text_f[i], ha = 'center', va = 'center', rotation=30) if f[i] == 1: pylab.arrow(i, 0.5, 0, 0.5, color='r', alpha=0.3, lw=2) pylab.draw()
def PlotNumberOfTags(corpus): word_tag_dict = defaultdict(set) for (word, tag) in corpus: word_tag_dict[word].add(tag) # using Counter for efficiency (leaner than FreqDist) C = Counter(len(val) for val in word_tag_dict.itervalues()) pylab.subplot(211) pylab.plot(C.keys(), C.values(), '-go', label='Linear Scale') pylab.suptitle('Word Ambiguity:') pylab.title('Number of Words by Possible Tag Number') pylab.box('off') # for better appearance pylab.grid('on') # for better appearance pylab.ylabel('Words With This Number of Tags (Linear)') pylab.legend(loc=0) # add value tags for x,y in zip(C.keys(), C.values()): pylab.annotate(str(y), (x,y + 0.5)) pylab.subplot(212) pylab.plot(C.keys(), C.values(), '-bo', label='Logarithmic Scale') pylab.yscale('log') # to make the graph more readable, for the log graph version pylab.box('off') # for better appearance pylab.grid('on') # for better appearance pylab.xlabel('Number of Tags per Word') pylab.ylabel('Words With This Number of Tags (Log)') pylab.legend(loc=0) # add value tags for x,y in zip(C.keys(), C.values()): pylab.annotate(str(y), (x,y + 0.5)) pylab.show()
def vis_result(images, image_t_syn, flow, filename='./result.png'): ''' Visualize estimated results images: input images to deep-voxel-flow, should be stack of 2 or 3 images shape(2or3, h, w, 3) image_t_syn: synthesized image at time slice t, shape(h, w, 3) flow: sub-estimated optical flow, shape(h, w, 2) ''' if len(images) == 3: num_contents = 6 is_triplet = True elif len(images) == 2: num_contents = 4 is_triplet = False else: raise ValueError('Invalid number of images') n_cols = num_contents / 2 tick_config = { 'labelbottom': False, 'bottom': False, 'labelleft': False, 'left': False } fig = plt.figure(figsize=(8 * 2, 8 * (n_cols / 2))) for i, image in enumerate(images): plt.subplot(2, n_cols, i + 1) plt.imshow(image) plt.title(f'Image at time slice {["0", "t", "1"][i]}') plt.tick_params(**tick_config) plt.xticks([]) box(False) plt.subplot(2, n_cols, n_cols + 1) plt.imshow(vis_flow(flow)) plt.title('Estimated optical flow') plt.tick_params(**tick_config) plt.xticks([]) box(False) plt.subplot(2, n_cols, n_cols + 2) plt.imshow(image_t_syn) plt.title('Synthesized image at time slice t') plt.tick_params(**tick_config) plt.xticks([]) box(False) if is_triplet: image_diff = np.mean(np.abs(images[1] - image_t_syn), axis=-1) plt.subplot(2, n_cols, n_cols + 3) plt.imshow(image_diff) plt.title('Diff-map between GT and synthesized frame') plt.tick_params(**tick_config) plt.xticks([]) box(False) plt.tight_layout() plt.savefig(filename, bbox_inches='tight', pad_inches=0.1) plt.close()
def show_graph(src): img = plt.imread(src) xpixels, ypixels = img.shape[0], img.shape[1] dpi = 100 margin = 0.01 figsize = (1 + margin) * ypixels / dpi, (1 + margin) * xpixels / dpi fig = plt.figure(figsize=figsize, dpi=dpi) ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin]) ax.tick_params(labelbottom="off", bottom="off") ax.tick_params(labelleft="off", left="off") ax.imshow(img, interpolation='none') box("off") plt.show()
def computeInputResistance(segment, Irange, dur, delay, dt=0.005, plot=False): if plot: import pylab as p stim = makeIclamp(segment, dur, 0, delay) rec = makeRecorders(segment, {'v': '_ref_v'}) ap = makeAPcount(segment) I = [] V = [] if plot: p.figure() p.subplot(1,2,1) for k,i in enumerate(np.arange(Irange[0],Irange[1],Irange[2])): ap.n = 0 stim.amp = i run(2*delay+dur, dt) t = np.array(rec['t']) v = np.array(rec['v']) if ap.n == 0: idx = np.intersect1d(np.nonzero(t > delay+0.75*dur)[0], np.nonzero(t < delay+dur)[0]) I.append(i) V.append(np.mean(v[idx])) else: print('The neuron emitted spikes at I = %g pA' % (stim.amp*1e3)) if plot: p.plot(1e-3*t,v) V = np.array(V)*1e-3 I = np.array(I)*1e-9 poly = np.polyfit(I,V,1) if plot: ymin,ymax = p.ylim() p.plot([1e-3*(delay+0.75*dur),1e-3*(delay+0.75*dur)],[ymin,ymax],'r--') p.plot([1e-3*(delay+dur),1e-3*(delay+dur)],[ymin,ymax],'r--') p.xlabel('t (s)') p.ylabel('V (mV)') p.box(True) p.grid(False) p.subplot(1,2,2) x = np.linspace(I[0],I[-1],100) y = np.polyval(poly,x) p.plot(1e12*x,1e3*y,'k--') p.plot(1e12*I,1e3*V,'bo') p.xlabel('I (pA)') p.ylabel('V (mV)') p.show() return poly[0]
def NumberLine(gt, pred, path, label="auto"): """ 発生年数がどうなってるかを確認したくって Args gt: 真値t list[nk,tnk,tk] pred: 予測値t [perticles,cells] """ for cell in np.arange(3): #pdb.set_trace() # predict year [perticles,] x = gt[cell] xhat = pred[:, cell] y = [0] * 1 # y = 0 yhat = [0] * xhat.shape[0] # 数直線 --------------------------------------------------------------- fig, ax = plt2.subplots(figsize=(10, 10)) #画像サイズ fig.set_figheight(1) #高さ調整 ax.tick_params(labelbottom=True, bottom=False) #x軸設定 ax.tick_params(labelleft=False, left=False) #y軸設定 # --------------------------------------------------------------------- # グラフの体裁 ----------------------------------------------------------- #xMin, xMax = np.min(np.append(x,xhat)), np.max(np.append(x,xhat)) xMin, xMax = 0, 1400 plt2.tight_layout() #グラフの自動調整 plt2.hlines(y=0, xmin=xMin, xmax=xMax, color="silver") #横軸 pylab.box(False) #枠を消す # ----------------------------------------------------------------- # 散布図 ----------------------------------------------------------- plt2.scatter(xhat, yhat, c='skyblue') # 予測値 plt2.scatter(x, y[0], c='coral') # 真値 plt2.title(f"min:{int(np.min(xhat))} max:{int(np.max(xhat))}") # ----------------------------------------------------------------- myData.isDirectory(path) plt2.savefig(os.path.join(path, f"{label}_{cellname[cell]}.png"), bbox_inches="tight") plt2.close()
def vis_flow_pyramid(flow_pyramid, flow_gt=None, images=None, filename='./flow.png'): num_contents = len(flow_pyramid) + int( flow_gt is not None) + int(images is not None) * 2 fig = plt.figure(figsize=(12, 15 * num_contents)) fig_id = 1 if images is not None: plt.subplot(1, num_contents, fig_id) plt.imshow(images[0]) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) fig_id += 1 plt.subplot(1, num_contents, num_contents) plt.imshow(images[1]) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) for flow in flow_pyramid: plt.subplot(1, num_contents, fig_id) plt.imshow(vis_flow(flow)) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) fig_id += 1 if flow_gt is not None: plt.subplot(1, num_contents, fig_id) plt.imshow(vis_flow(flow_gt)) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) plt.tight_layout() plt.savefig(filename, bbox_inches='tight', pad_inches=0.1) plt.close()
def vis_flow_pyramid(flow_pyramid, flow_gt = None, images = None, filename = './flow.png'): num_contents = len(flow_pyramid) + int(flow_gt is not None) + int(images is not None)*2 fig = plt.figure(figsize = (12, 15*num_contents)) fig_id = 1 if images is not None: plt.subplot(1, num_contents, fig_id) plt.imshow(images[0]) plt.tick_params(labelbottom = False, bottom = False) plt.tick_params(labelleft = False, left = False) plt.xticks([]) box(False) fig_id += 1 plt.subplot(1, num_contents, num_contents) plt.imshow(images[1]) plt.tick_params(labelbottom = False, bottom = False) plt.tick_params(labelleft = False, left = False) plt.xticks([]) box(False) for flow in flow_pyramid: plt.subplot(1, num_contents, fig_id) plt.imshow(vis_flow(flow)) plt.tick_params(labelbottom = False, bottom = False) plt.tick_params(labelleft = False, left = False) plt.xticks([]) box(False) fig_id += 1 if flow_gt is not None: plt.subplot(1, num_contents, fig_id) plt.imshow(vis_flow(flow_gt)) plt.tick_params(labelbottom = False, bottom = False) plt.tick_params(labelleft = False, left = False) plt.xticks([]) box(False) plt.tight_layout() plt.savefig(filename, bbox_inches = 'tight', pad_inches = 0.1) plt.close()
def adv_convergence(width=2e-2, delta=1e-2, relp=1, Nadapt=10, use_adapt=True, problem=3, outname='', use_reform=False, CGorderL=[2, 3], noplot=False, Lx=3.): ### SETUP SOLUTION sy = Symbol('sy') width_ = Symbol('ww') if problem == 3: stepfunc = 0.5 + 165. / 104. / width_ * sy - 20. / 13. / width_**3 * sy**3 - 102. / 13. / width_**5 * sy**5 + 240. / 13. / width_**7 * sy**7 elif problem == 2: stepfunc = 0.5 + 15. / 8. / width_ * sy - 5. / width_**3 * sy**3 + 6. / width_**5 * sy**5 elif problem == 1: stepfunc = 0.5 + 1.5 / width_ * sy - 2 / width_**3 * sy**3 stepfunc = str(stepfunc).replace('sy', 'x[1]').replace('x[1]**2', '(x[1]*x[1])') #REPLACE ** with pow stepfunc = stepfunc.replace('x[1]**3', 'pow(x[1],3.)') stepfunc = stepfunc.replace('x[1]**5', 'pow(x[1],5.)') stepfunc = stepfunc.replace('x[1]**7', 'pow(x[1],7.)') testsol = '(-ww/2 < x[1] && x[1] < ww/2 ? ' + stepfunc + ' : 0) + (ww/2<x[1] ? 1 : 0)' testsol = testsol.replace('ww**2', '(ww*ww)').replace( 'ww**3', 'pow(ww,3.)').replace('ww**5', 'pow(ww,5.)').replace('ww**7', 'pow(ww,7.)') testsol = testsol.replace('ww', str(width)) dp = Constant(1.) fac = Constant(1 + 2. * delta) delta = Constant(delta) def left(x, on_boundary): return x[0] + Lx / 2. < DOLFIN_EPS def right(x, on_boundary): return x[0] - Lx / 2. > -DOLFIN_EPS def top_bottom(x, on_boundary): return x[1] - 0.5 > -DOLFIN_EPS or x[1] + 0.5 < DOLFIN_EPS class Inletbnd(SubDomain): def inside(self, x, on_boundary): return x[0] + Lx / 2. < DOLFIN_EPS for CGorder in [2]: #CGorderL: dofs = [] L2errors = [] for eta in 0.04 * pyexp2(-array(range(9)) * pylog(2) / 2): ### SETUP MESH meshsz = int( round(80 * 0.005 / (eta * (bool(use_adapt) == False) + 0.05 * (bool(use_adapt) == True)))) if not bool(use_adapt) and meshsz > 80: continue mesh = RectangleMesh(-Lx / 2., -0.5, Lx / 2., 0.5, meshsz, meshsz, "left/right") # PERFORM TEN ADAPTATION ITERATIONS for iii in range(Nadapt): V = VectorFunctionSpace(mesh, "CG", CGorder) Q = FunctionSpace(mesh, "CG", CGorder - 1) W = V * Q (u, p) = TrialFunctions(W) (v, q) = TestFunctions(W) alpha = Expression( "-0.25<x[0] && x[0]<0.25 && 0. < x[1] ? 1e4 : 0") boundaries = FacetFunction("size_t", mesh) #outletbnd = Outletbnd() inletbnd = Inletbnd() boundaries.set_all(0) #outletbnd.mark(boundaries, 1) inletbnd.mark(boundaries, 1) ds = Measure("ds")[boundaries] bc0 = DirichletBC(W.sub(0), Constant((0., 0.)), top_bottom) bc1 = DirichletBC(W.sub(1), dp, left) bc2 = DirichletBC(W.sub(1), Constant(0), right) bc3 = DirichletBC(W.sub(0).sub(1), Constant(0.), left) bc4 = DirichletBC(W.sub(0).sub(1), Constant(0.), right) bcs = [bc0, bc1, bc2, bc3, bc4] bndterm = dp * dot(v, Constant((-1., 0.))) * ds(1) a = eta * inner(grad(u), grad( v)) * dx - div(v) * p * dx + q * div(u) * dx + alpha * dot( u, v) * dx #+bndterm L = inner(Constant((0., 0.)), v) * dx - bndterm U = Function(W) solve(a == L, U, bcs) u, ps = U.split() #SOLVE CONCENTRATION mm = mesh_metric2(mesh) vdir = u / sqrt(inner(u, u) + DOLFIN_EPS) if iii == 0 or use_reform == False: Q2 = FunctionSpace(mesh, 'CG', 2) c = Function(Q2) q = TestFunction(Q2) p = TrialFunction(Q2) newq = (q + dot(vdir, dot(mm, vdir)) * inner(grad(q), vdir) ) #SUPG if use_reform: F = newq * (fac / ( (1 + exp(-c))**2) * exp(-c)) * inner(grad(c), u) * dx J = derivative(F, c) bc = DirichletBC( Q2, Expression("-log(" + str(float(fac)) + "/(" + testsol + "+" + str(float(delta)) + ")-1)"), left) # bc = DirichletBC(Q, -ln(fac/(Expression(testsol)+delta)-1), left) problem = NonlinearVariationalProblem(F, c, bc, J) solver = NonlinearVariationalSolver(problem) solver.parameters["newton_solver"][ "relaxation_parameter"] = relp solver.solve() else: a2 = newq * inner(grad(p), u) * dx bc = DirichletBC(Q2, Expression(testsol), left) L2 = Constant(0.) * q * dx solve(a2 == L2, c, bc) if (not bool(use_adapt)) or iii == Nadapt - 1: break um = project(sqrt(inner(u, u)), FunctionSpace(mesh, 'CG', 2)) H = metric_pnorm(um, eta, max_edge_ratio=1 + 49 * (use_adapt != 2), p=2) H2 = metric_pnorm(c, eta, max_edge_ratio=1 + 49 * (use_adapt != 2), p=2) #H3 = metric_pnorm(ps , eta, max_edge_ratio=1+49*(use_adapt!=2), p=2) H4 = metric_ellipse(H, H2) #H5 = metric_ellipse(H3,H4,mesh) mesh = adapt(H4) if use_reform: Q2 = FunctionSpace(mesh, 'CG', 2) c = interpolate(c, Q2) if use_reform: c = project(fac / (1 + exp(-c)) - delta, FunctionSpace(mesh, 'CG', 2)) L2error = bnderror(c, Expression(testsol), ds) dofs.append(len(c.vector().array()) + len(U.vector().array())) L2errors.append(L2error) # fid = open("DOFS_L2errors_mesh_c_CG"+str(CGorder)+outname+".mpy",'w') # pickle.dump([dofs[0],L2errors[0],c.vector().array().min(),c.vector().array().max()-1,mesh.cells(),mesh.coordinates(),c.vector().array()],fid) # fid.close(); log( INFO + 1, "%1dX ADAPT<->SOLVE complete: DOF=%5d, error=%0.0e, min(c)=%0.0e,max(c)-1=%0.0e" % (Nadapt, dofs[len(dofs) - 1], L2error, c.vector().array().min(), c.vector().array().max() - 1)) # PLOT MESH + solution figure() testf = interpolate(c, FunctionSpace(mesh, 'CG', 1)) testfe = interpolate(Expression(testsol), FunctionSpace(mesh, 'CG', 1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG", 1)) zz = testf.vector().array()[vtx2dof] zz[zz == 1] -= 1e-16 hh = tricontourf(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), zz, 100, cmap=get_cmap('binary')) colorbar(hh) hold('on') triplot(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), color='r', linewidth=0.5) hold('off') axis('equal') box('off') # savefig(outname+'final_mesh_CG2.png',dpi=300) #; savefig('outname+final_mesh_CG2.eps',dpi=300) #PLOT ERROR figure() xe = interpolate(Expression("x[0]"), FunctionSpace(mesh, 'CG', 1)).vector().array() ye = interpolate(Expression("x[1]"), FunctionSpace(mesh, 'CG', 1)).vector().array() I = xe - Lx / 2 > -DOLFIN_EPS I2 = ye[I].argsort() pyplot(ye[I][I2], testf.vector().array()[I][I2] - testfe.vector().array()[I][I2], '-b') ylabel('error') # PLOT L2error graph figure() pyloglog(dofs, L2errors, '-b.', linewidth=2, markersize=16) xlabel('Degree of freedoms') ylabel('L2 error') # SAVE SOLUTION dofs = array(dofs) L2errors = array(L2errors) fid = open("DOFS_L2errors_CG" + str(CGorder) + outname + ".mpy", 'w') pickle.dump([dofs, L2errors], fid) fid.close() # #show() # #LOAD SAVED SOLUTIONS # fid = open("DOFS_L2errors_CG2"+outname+".mpy",'r') # [dofs,L2errors] = pickle.load(fid) # fid.close() # # PERFORM FITS ON LAST THREE POINTS NfitP = 5 I = array(range(len(dofs) - NfitP, len(dofs))) slope, ints = polyfit(pylog(dofs[I]), pylog(L2errors[I]), 1) fid = open("DOFS_L2errors_CG2_fit" + outname + ".mpy", 'w') pickle.dump([dofs, L2errors, slope, ints], fid) fid.close() #PLOT THEM TOGETHER if CGorderL != [2]: fid = open("DOFS_L2errors_CG3.mpy", 'r') [dofs_old, L2errors_old] = pickle.load(fid) fid.close() slope2, ints2 = polyfit(pylog(dofs_old[I]), pylog(L2errors_old[I]), 1) figure() pyloglog(dofs, L2errors, '-b.', dofs_old, L2errors_old, '--b.', linewidth=2, markersize=16) hold('on') pyloglog(dofs, pyexp2(ints) * dofs**slope, '-r', dofs_old, pyexp2(ints2) * dofs_old**slope2, '--r', linewidth=1) hold('off') xlabel('Degree of freedoms') ylabel('L2 error') legend([ 'CG2', 'CG3', "%0.2f*log(DOFs)" % slope, "%0.2f*log(DOFs)" % slope2 ]) #legend(['new data','old_data']) # savefig('comparison.png',dpi=300) #savefig('comparison.eps'); if not noplot: show()
def __call__(self, inputs): from pylab import box, gca box(self.get_input('on')) gca().get_figure().canvas.draw() return self.get_input('axes')
def makeOmniPlot(koiList,name,npix,useHardCodedFigSize=False, useHalfBox=2.0,ncols=4, scalebarLabel="", noLabels=False, figWidth=12, useFilts=["J","Ks"], useInstr= ["ARIES","PHARO"], plotExt=".pdf", plotContourList=False, extraScalebar=False): if (npix % ncols) == 0: nrows = npix/ncols else: nrows = npix / ncols +1 if ncols > 3: fig=pylab.figure(0,figsize=(figWidth,3.5*nrows)) elif ncols ==3: fig=pylab.figure(0,figsize=(figWidth,4.25*nrows)) elif useHardCodedFigSize != False: fig=pylab.figure(0,figsize=(useHardCodedFigSize[0],useHardCodedFigSize[1])) else: fig=pylab.figure(0,figsize=(4,2.5*nrows)) ii=0 for koi in koiList: if koi == "blank": print "leaving blank square" ii = ii + 1 elif koi == "scalebar": ax=pylab.subplot(nrows,ncols,ii+1) pylab.plot([0,1],[1,1],color="k",linewidth=2.0) pylab.text(0.5,1.05, scalebarLabel, color="k", fontsize=18, horizontalalignment='center') pylab.xlim(xmin=0,xmax=1) pylab.ylim(ymin=0,ymax=1.15) pylab.box(on=False) ax.set_xticks([]) ax.set_yticks([]) # pylab.title(scalebarLabel) ii = ii + 1 else: for filt in useFilts: for instr in useInstr: # print "Looking for object:", ao.koiFilterDir(koi,filt,instr) if os.path.isdir(ao.koiFilterDir(koi,filt,instr)): print koi, instr, filt, ii myKoi = koiPlusFilter.koiPlusFilter(koi,filt,instr) if plotContourList == False: useContours = False else: useContours = plotContourList[ii] if ii == 0: ## only plot the (optional) scale bar on the first image finderPlots.zoomedInSubPlot(myKoi,nrows,ncols,ii+1,useColorMap=colorScheme,plotContours=useContours,plotLowerScalebar=extraScalebar,scalebarLabel=scalebarLabel) else: finderPlots.zoomedInSubPlot(myKoi,nrows,ncols,ii+1,useColorMap=colorScheme,plotContours=useContours,plotLowerScalebar=False) if noLabels == False: if (len(useFilts)==1 & (len(useInstr)==1)): pylab.title(koi) else: # pylab.title(koi+"\n"+instr+" "+filt) pylab.title(koi+" "+" "+filt+" ("+scalebarLabel+")") ii = ii + 1 # Finish and export pylab.subplots_adjust(wspace=0.1,hspace=0.1) binaryOutfile = ao.plotDir+"allBinaries_"+name+plotExt if plotExt == ".eps": pylab.savefig(binaryOutfile, format='eps') else: pylab.savefig(binaryOutfile) pylab.close()
def skewtlogp(olga, si): """ Get directory of script for saving arrays """ tmpPath = os.path.dirname(os.path.realpath(__file__)) + '/tmp/' if (not os.path.isdir(tmpPath)): os.mkdir(tmpPath) """ Settings for high and low sounding top """ if (si.stype == 0): pbottom = 105000. # highest pressure in diagram (bottom) ptop = 20000. # lowest pressue in diagram (top) ptop_thd = 40000 # top of dry adiabats ptop_mxr = 60000 # top of mixing ratio lines pbottom_thw = pbottom # bottom of moist adiabats dp = 100. # pressure interval used in some calculations Tleft = -35. # lowest temperature (@pb) in diagram (left) Tright = 35. # highest temperature (@pb) in diagram (right) dp_label = 7000 isotherms = np.arange(-100, 30.001, 10) isobars = np.array([ 1050., 1000., 850., 700., 500., 400., 300., 250., 200., 150., 100., 50 ]) * 100. dryadiabats = np.arange(-30, 50.001, 10) moistadiabats = np.array([28., 24., 20., 16., 12., 8., 4., 0.]) mixrat = np.array([20., 12., 8., 5., 3., 2., 1.]) elif (si.stype == 1): pbottom = 105000. # highest pressure in diagram (bottom) ptop = 50000. # lowest pressue in diagram (top) ptop_thd = 70000 # top of dry adiabats ptop_mxr = 70000 # top of mixing ratio lines pbottom_thw = pbottom # bottom of moist adiabats dp = 100. # pressure interval used in some calculations Tleft = -11. # lowest temperature (@pb) in diagram (left) Tright = 35. # highest temperature (@pb) in diagram (right) dp_label = 2500 # spacing (in pressure coords) of some label placement isotherms = np.arange(-100, 60.001, 10) isobars = np.array([1050., 1000., 900., 850., 700., 600., 500.]) * 100. dryadiabats = np.arange(-10, 30.001, 5) moistadiabats = np.array([28., 24., 20., 16., 12., 8., 4., 0.]) mixrat = np.array([20., 12., 8., 5., 3., 2., 1.]) else: sys.exit('stype=%i not supported!' % stype) """ Setup figure """ if (olga != -1): fig = pl.figure(figsize=(olga.fig_width_px / float(olga.fig_dpi), olga.fig_width_px / float(olga.fig_dpi))) else: fig = pl.figure(figsize=(8, 8)) # L B R T ws hs fig.subplots_adjust(0.08, 0.10, 1.0, 0.93, 0.2, 0.08) pl.subplot(111) # Calculate bounds in figure coordinates y00 = skewty(pbottom) y11 = skewty(ptop) x00 = skewtx(Tleft, y00) x11 = skewtx(Tright, y00) # Spacing for some labels hs = np.abs(x11 - x00) / 200. vs = np.abs(y11 - y00) / 200. """ 1. Create isotherms """ for T in isotherms: y = np.array([skewty(pbottom), skewty(ptop)]) x = np.array([skewtx(T, y[0]), skewtx(T, y[1])]) # Check if partially out of bounds lloc = 0 if (x[0] < x00): x[0] = x00 y[0] = iskewtxT(x00, T) if (x[1] > x11): x[1] = x11 y[1] = iskewtxT(x11, T) if (x[1] >= x00 and y[1] >= y00): pl.plot(x, y, color=c2, alpha=a1) if (x[0] > x00 and x[0] < x11): pl.text(x[0], y[0] - 2 * hs, int(T), ha='center', va='top', color=c2) else: pl.text(x[1] - 5 * hs, y[1] - 5 * vs, int(T), ha='center', va='center', color=c2, alpha=a2) """ 2. Create isobars """ for p in isobars: # Check if out of bounds if ((p >= ptop) and (p <= pbottom)): y = skewty(p) pl.plot([x00, x11], [y, y], color=c2, alpha=a1) pl.text(x00 - hs, y, int(p / 100.), ha='right', va='center', color=c2) """ 3. Create dry adiabats """ # Try loading the data from the tmp directory. If not available, calculate # and save the arrays try: p = np.load(tmpPath + 'theta_p_%i.arr' % si.stype) y = np.load(tmpPath + 'theta_y_%i.arr' % si.stype) x = np.load(tmpPath + 'theta_x_%i.arr' % si.stype) except: p = np.arange(pbottom, (np.max(([ptop, ptop_thd])) - dp), -dp) x = np.zeros((dryadiabats.size, p.size)) y = np.zeros(p.size) for k in range(p.size): y[k] = skewty(p[k]) for i in range(dryadiabats.size): x[i, :] = 0. for k in range(p.size): xtmp = skewtx(((dryadiabats[i] + T0) * exner(p[k])) - T0, y[k]) if (xtmp >= x00 and xtmp <= x11): x[i, k] = xtmp else: x[i, k] = -9999 p.dump(tmpPath + 'theta_p_%i.arr' % si.stype) y.dump(tmpPath + 'theta_y_%i.arr' % si.stype) x.dump(tmpPath + 'theta_x_%i.arr' % si.stype) # Plot the dry adiabats for i in range(dryadiabats.size): doPlot = np.where(x[i, :] != -9999)[0] if (doPlot[0].size > 0): lloc = max(0, np.size(x[i, doPlot]) - int(5000. / dp)) pl.plot(x[i, doPlot], y[doPlot], color=c1) pl.text(x[i, doPlot][lloc], y[doPlot][lloc], int(dryadiabats[i]), color=c1, ha='center', va='center', backgroundcolor='w') """ 4. Create moist adiabats """ # Try loading the data from the tmp directory. If not available, calculate # and save the arrays try: p = np.load(tmpPath + 'thetam_p_%i.arr' % si.stype) y = np.load(tmpPath + 'thetam_y_%i.arr' % si.stype) x = np.load(tmpPath + 'thetam_x_%i.arr' % si.stype) except: p = np.arange(np.min(([pbottom, pbottom_thw])), ptop - dp, -dp) x = np.zeros((moistadiabats.size, p.size)) y = np.zeros(p.size) for k in range(p.size): y[k] = skewty(p[k]) for i in range(moistadiabats.size): for k in range(p.size): thw = dsatlftskewt(moistadiabats[i], p[k]) xtmp = skewtx(thw, y[k]) if (xtmp >= x00 and xtmp <= x11): x[i, k] = xtmp else: x[i, k] = -9999 p.dump(tmpPath + 'thetam_p_%i.arr' % si.stype) y.dump(tmpPath + 'thetam_y_%i.arr' % si.stype) x.dump(tmpPath + 'thetam_x_%i.arr' % si.stype) # Plot the moist adiabats for i in range(moistadiabats.size): doPlot = np.where(x[i, :] != -9999)[0] if (doPlot[0].size > 0): lloc = max(0, np.size(x[i, doPlot]) - int(8000. / dp)) pl.plot(x[i, doPlot], y[doPlot], '--', color=c3) pl.text(x[i, doPlot][lloc], y[doPlot][lloc], int(moistadiabats[i]), color=c3, ha='center', va='center', backgroundcolor='w') """ 5. Create isohumes / mixing ratio lines """ # Try loading the data from the tmp directory. If not available, calculate # and save the arrays try: p = np.load(tmpPath + 'mixr_p_%i.arr' % si.stype) y = np.load(tmpPath + 'mixr_y_%i.arr' % si.stype) x = np.load(tmpPath + 'mixr_x_%i.arr' % si.stype) except: p = np.arange(pbottom, (np.max(([ptop, ptop_mxr])) - dp), -dp) x = np.zeros((mixrat.size, p.size)) y = np.zeros(p.size) for k in range(p.size): y[k] = skewty(p[k]) for i in range(mixrat.size): for k in range(p.size): mix = Td(mixrat[i] / 1000., p[k]) - T0 xtmp = skewtx(mix, y[k]) if (xtmp >= x00 and xtmp <= x11): x[i, k] = xtmp else: x[i, k] = -9999 p.dump(tmpPath + 'mixr_p_%i.arr' % si.stype) y.dump(tmpPath + 'mixr_y_%i.arr' % si.stype) x.dump(tmpPath + 'mixr_x_%i.arr' % si.stype) # Plot the mixing ratio lines for i in range(mixrat.size): doPlot = np.where(x[i, :] != -9999)[0] if (doPlot[0].size > 0): pl.plot(x[i, doPlot], y[doPlot], color=c5, dashes=[3, 2]) pl.text(x[i, doPlot][-1], y[doPlot][-1] + vs, int(mixrat[i]), color=c5, ha='center', va='bottom', backgroundcolor='w') """ 6. Add sounding data """ # 6.1 Temperature and dewpoint temperature if (si.T.size > 0): y = skewty(si.p) x1 = skewtx(si.T - T0, y) x2 = skewtx(si.Td - T0, y) pl.plot(x1, y, '-', color=cT, linewidth=2) pl.plot(x2, y, '-', color=cTd, linewidth=2) # 6.2 Add height labels to axis if (si.z.size > 0 and si.z.size == si.p.size): y = skewty(si.p) p_last = 1e9 for k in range(si.z.size): if (y[k] <= y11 and np.abs(si.p[k] - p_last) > dp_label): pl.text(x11 + hs, y[k], str(int(si.z[k])) + 'm', color=c2, ha='right', va='center', size=7, backgroundcolor='w') p_last = si.p[k] # 6.2 Wind barbs if (si.u.size > 0): y = skewty(si.p) u = si.u * 1.95 v = si.v * 1.95 xb = x11 + 9 * hs p_last = 1e9 for k in range(si.z.size): if (y[k] <= y11 and np.abs(si.p[k] - p_last) > dp_label): pl.barbs(xb, y[k], u[k], v[k], length=5.2, linewidth=0.5, pivot='middle') p_last = si.p[k] """ 6.1 :) Try to add measured data """ # 6.1 Temperature and dewpoint temperature if (si.Tm.size > 0): y = skewty(si.pm) x1 = skewtx(si.Tm, y) x2 = skewtx(si.Tdm, y) pl.plot(x1, y, '--', color=cT, linewidth=1) pl.plot(x2, y, '--', color=cTd, linewidth=1) """ 7. Lauch parcel """ if (si.parcel == True): # Plot starting position: p0s = skewty(si.ps) T0s = skewtx(si.Ts - T0, p0s) Td0s = skewtx(Td(si.rs, si.ps) - T0, p0s) pl.scatter(T0s, p0s, facecolor='none') pl.scatter(Td0s, p0s, facecolor='none') # Lists to hold parcel pressure, temp and dewpoint temp during ascent pp = [si.ps] Tp = [si.Ts] Tdp = [Td(si.rs, si.ps)] # Launch parcel n = 0 while (Tp[-1] > Tdp[-1] and n < 1000): n += 1 dp2 = max(1, (Tp[-1] - Tdp[-1]) * 300) # bit weird, but fast pp.append(pp[-1] - dp2) Tp.append(si.Ts * exner(pp[-1], si.ps)) Tdp.append(Td(si.rs, pp[-1])) # Plot lines from surface --> LCL ps = skewty(np.array(pp)) Tps = skewtx(np.array(Tp) - T0, ps) Tdps = skewtx(np.array(Tdp) - T0, ps) pl.plot(Tps, ps, 'k', linewidth=1.5, dashes=[4, 2]) pl.plot(Tdps, ps, 'k', linewidth=1.5, dashes=[4, 2]) pl.scatter(Tps[-1], ps[-1], facecolor='none') # Iteratively find the moist adiabat starting at p0, which goes through the temperature at LCL Ths = si.Ts / exner(si.ps, p0) # potential temperature surface (@p0) ThwsLCL = dsatlftskewt( Ths - T0, pp[-1]) + T0 # Temp moist adiabat at plcl, through Ths thw0 = Ths - (ThwsLCL - Tp[-1] ) # First estimate of moist adiabat passing through Tlcl ThwLCL = dsatlftskewt(thw0 - T0, pp[-1]) + T0 while (np.abs(ThwLCL - Tp[-1]) > 0.1): thw0 -= ThwLCL - Tp[-1] ThwLCL = dsatlftskewt(thw0 - T0, pp[-1]) + T0 # Plot moist adiabat from LCL upwards p = np.arange(pp[-1], ptop, -dp) x = np.zeros(p.size) y = np.zeros(p.size) for k in range(p.size): thw = dsatlftskewt(thw0 - T0, p[k]) y[k] = skewty(p[k]) x[k] = skewtx(thw, y[k]) pl.plot(x, y, 'k', linewidth=1.5, dashes=[4, 2]) """ Add info from TEMF boundary layer scheme """ if (si.Tu.size > 0): # Cloud fraction dw = (x11 - x00) # width of diagram y = skewty(si.p) x = x00 + si.cfru * 0.2 * (x11 - x00) pl.plot(x, y, '-', linewidth=1.5, color=c6) # cloud cover cfr_pos = np.where(si.cfru > 0.001)[0] if (np.size(cfr_pos) > 1): pl.text(x00, y[cfr_pos[0] - 1], '- %im' % (si.z[cfr_pos[0] - 1]), ha='left', va='center', size=8, color=c6) pl.text(x00, y[cfr_pos[-1]], '- %im' % (si.z[cfr_pos[-1]]), ha='left', va='center', size=8, color=c6) kmax = np.where(si.cfru == si.cfru.max())[0][0] pl.text(x.max(), y[kmax], '- %i%%' % (si.cfru[kmax] * 100.), ha='left', va='center', size=8, color=c6) """ 6. Finish diagram """ pl.xticks([]) pl.yticks([]) pl.box('off') pl.xlim(x00 - 0.1, x11 + 16 * hs) pl.ylim(y00 - 0.1, y11 + 0.1) # draw axis pl.plot([x00, x11], [y00, y00], 'k-') pl.plot([x00, x00], [y00, y11], 'k-') pl.figtext(0.5, 0.05, 'Temperature (C)', ha='center') pl.figtext(0.015, 0.52, 'Pressure (hPa)', rotation=90) label = 'Skew-T log-P, %s, %s UTC' % (si.name, si.time) pl.figtext(0.5, 0.97, label, ha='center') if (olga != -1): img = image.imread(olga.olgaRoot + 'include/olga_left.png') pl.figimage(img, 7, 5) #pl.figimage(img,10,olga.fig_width_px-45) pl.figtext(0.99, 0.011, '%s' % olga.map_desc[0], size=7, ha='right') return fig
def minimal_example(width=2e-2, Nadapt=10, eta=0.01): ### CONSTANTS meshsz = 40 hd = Constant(width) ### SETUP MESH mesh = RectangleMesh(Point(-0.5, -0.5), Point(0.5, 0.5), 1 * meshsz, 1 * meshsz, "left/right") ### DERIVE FORCING TERM angle = pi / 8 #rand*pi/2 sx = Symbol('sx') sy = Symbol('sy') width_ = Symbol('ww') aa = Symbol('aa') testsol = pytanh((sx * pycos(aa) + sy * pysin(aa)) / width_) ddtestsol = str(diff(testsol, sx, sx) + diff(testsol, sy, sy)).replace( 'sx', 'x[0]').replace('sy', 'x[1]') #replace ** with pow ddtestsol = ddtestsol.replace( 'tanh((x[0]*sin(aa) + x[1]*cos(aa))/ww)**2', 'pow(tanh((x[0]*sin(aa) + x[1]*cos(aa))/ww),2.)') ddtestsol = ddtestsol.replace('cos(aa)**2', 'pow(cos(aa),2.)').replace( 'sin(aa)**2', 'pow(sin(aa),2.)').replace('ww**2', '(ww*ww)') #insert vaulues ddtestsol = ddtestsol.replace('aa', str(angle)).replace('ww', str(width)) testsol = str(testsol).replace('sx', 'x[0]').replace('sy', 'x[1]').replace( 'aa', str(angle)).replace('ww', str(width)) ddtestsol = "-(" + ddtestsol + ")" def boundary(x): return x[0]-mesh.coordinates()[:,0].min() < DOLFIN_EPS or mesh.coordinates()[:,0].max()-x[0] < DOLFIN_EPS \ or mesh.coordinates()[:,1].min()+0.5 < DOLFIN_EPS or mesh.coordinates()[:,1].max()-x[1] < DOLFIN_EPS # PERFORM TEN ADAPTATION ITERATIONS for iii in range(Nadapt): V = FunctionSpace(mesh, "CG", 2) dis = TrialFunction(V) dus = TestFunction(V) u = Function(V) a = inner(grad(dis), grad(dus)) * dx L = Expression(ddtestsol) * dus * dx bc = DirichletBC(V, Expression(testsol), boundary) solve(a == L, u, bc) startTime = time() H = metric_pnorm(u, eta, max_edge_length=3., max_edge_ratio=None) H = logproject(H) if iii != Nadapt - 1: mesh = adapt(H) L2error = errornorm(Expression(testsol), u, degree_rise=4, norm_type='L2') log( INFO + 1, "total (adapt+metric) time was %0.1fs, L2error=%0.0e, nodes: %0.0f" % (time() - startTime, L2error, mesh.num_vertices())) # # PLOT MESH # figure() coords = mesh.coordinates().transpose() # triplot(coords[0],coords[1],mesh.cells(),linewidth=0.1) # #savefig('mesh.png',dpi=300) #savefig('mesh.eps'); figure() #solution testf = interpolate(Expression(testsol), FunctionSpace(mesh, 'CG', 1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG", 1)) zz = testf.vector().array()[vtx2dof] hh = tricontourf(coords[0], coords[1], mesh.cells(), zz, 100) colorbar(hh) # savefig('solution.png',dpi=300) #savefig('solution.eps'); figure() #analytical solution testfe = interpolate(u, FunctionSpace(mesh, 'CG', 1)) zz = testfe.vector().array()[vtx2dof] hh = tricontourf(coords[0], coords[1], mesh.cells(), zz, 100) colorbar(hh) #savefig('analyt.png',dpi=300) #savefig('analyt.eps'); figure() #error zz -= testf.vector().array()[vtx2dof] zz[zz == 1] -= 1e-16 hh = tricontourf(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), zz, 100, cmap=get_cmap('binary')) colorbar(hh) hold('on') triplot(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), color='r', linewidth=0.5) hold('off') axis('equal') box('off') title('error') show()
def adv_convergence(width=2e-2, delta=1e-2, relp=1, Nadapt=10, use_adapt=True, problem=3, outname='', use_reform=False, CGorderL = [2, 3], noplot=False, Lx=3.): ### SETUP SOLUTION sy = Symbol('sy'); width_ = Symbol('ww') if problem == 3: stepfunc = 0.5+165./104./width_*sy-20./13./width_**3*sy**3-102./13./width_**5*sy**5+240./13./width_**7*sy**7 elif problem == 2: stepfunc = 0.5+15./8./width_*sy-5./width_**3*sy**3+6./width_**5*sy**5 elif problem == 1: stepfunc = 0.5+1.5/width_*sy-2/width_**3*sy**3 stepfunc = str(stepfunc).replace('sy','x[1]').replace('x[1]**2','(x[1]*x[1])') #REPLACE ** with pow stepfunc = stepfunc.replace('x[1]**3','pow(x[1],3.)') stepfunc = stepfunc.replace('x[1]**5','pow(x[1],5.)') stepfunc = stepfunc.replace('x[1]**7','pow(x[1],7.)') testsol = '(-ww/2 < x[1] && x[1] < ww/2 ? ' + stepfunc +' : 0) + (ww/2<x[1] ? 1 : 0)' testsol = testsol.replace('ww**2','(ww*ww)').replace('ww**3','pow(ww,3.)').replace('ww**5','pow(ww,5.)').replace('ww**7','pow(ww,7.)') testsol = testsol.replace('ww',str(width)) dp = Constant(1.) fac = Constant(1+2.*delta) delta = Constant(delta) def left(x, on_boundary): return x[0] + Lx/2. < DOLFIN_EPS def right(x, on_boundary): return x[0] - Lx/2. > -DOLFIN_EPS def top_bottom(x, on_boundary): return x[1] - 0.5 > -DOLFIN_EPS or x[1] + 0.5 < DOLFIN_EPS class Inletbnd(SubDomain): def inside(self, x, on_boundary): return x[0] + Lx/2. < DOLFIN_EPS for CGorder in [2]: #CGorderL: dofs = [] L2errors = [] for eta in 0.04*pyexp2(-array(range(9))*pylog(2)/2): ### SETUP MESH meshsz = int(round(80*0.005/(eta*(bool(use_adapt)==False)+0.05*(bool(use_adapt)==True)))) if not bool(use_adapt) and meshsz > 80: continue mesh = RectangleMesh(-Lx/2.,-0.5,Lx/2.,0.5,meshsz,meshsz,"left/right") # PERFORM TEN ADAPTATION ITERATIONS for iii in range(Nadapt): V = VectorFunctionSpace(mesh, "CG", CGorder); Q = FunctionSpace(mesh, "CG", CGorder-1) W = V*Q (u, p) = TrialFunctions(W) (v, q) = TestFunctions(W) alpha = Expression("-0.25<x[0] && x[0]<0.25 && 0. < x[1] ? 1e4 : 0") boundaries = FacetFunction("size_t",mesh) #outletbnd = Outletbnd() inletbnd = Inletbnd() boundaries.set_all(0) #outletbnd.mark(boundaries, 1) inletbnd.mark(boundaries, 1) ds = Measure("ds")[boundaries] bc0 = DirichletBC(W.sub(0), Constant((0., 0.)), top_bottom) bc1 = DirichletBC(W.sub(1), dp , left) bc2 = DirichletBC(W.sub(1), Constant(0), right) bc3 = DirichletBC(W.sub(0).sub(1), Constant(0.), left) bc4 = DirichletBC(W.sub(0).sub(1), Constant(0.), right) bcs = [bc0,bc1,bc2,bc3,bc4] bndterm = dp*dot(v,Constant((-1.,0.)))*ds(1) a = eta*inner(grad(u), grad(v))*dx - div(v)*p*dx + q*div(u)*dx+alpha*dot(u,v)*dx#+bndterm L = inner(Constant((0.,0.)), v)*dx -bndterm U = Function(W) solve(a == L, U, bcs) u, ps = U.split() #SOLVE CONCENTRATION mm = mesh_metric2(mesh) vdir = u/sqrt(inner(u,u)+DOLFIN_EPS) if iii == 0 or use_reform == False: Q2 = FunctionSpace(mesh,'CG',2); c = Function(Q2) q = TestFunction(Q2); p = TrialFunction(Q2) newq = (q+dot(vdir,dot(mm,vdir))*inner(grad(q),vdir)) #SUPG if use_reform: F = newq*(fac/((1+exp(-c))**2)*exp(-c))*inner(grad(c),u)*dx J = derivative(F,c) bc = DirichletBC(Q2, Expression("-log("+str(float(fac)) +"/("+testsol+"+"+str(float(delta))+")-1)"), left) # bc = DirichletBC(Q, -ln(fac/(Expression(testsol)+delta)-1), left) problem = NonlinearVariationalProblem(F,c,bc,J) solver = NonlinearVariationalSolver(problem) solver.parameters["newton_solver"]["relaxation_parameter"] = relp solver.solve() else: a2 = newq*inner(grad(p),u)*dx bc = DirichletBC(Q2, Expression(testsol), left) L2 = Constant(0.)*q*dx solve(a2 == L2, c, bc) if (not bool(use_adapt)) or iii == Nadapt-1: break um = project(sqrt(inner(u,u)),FunctionSpace(mesh,'CG',2)) H = metric_pnorm(um, eta, max_edge_ratio=1+49*(use_adapt!=2), p=2) H2 = metric_pnorm(c, eta, max_edge_ratio=1+49*(use_adapt!=2), p=2) #H3 = metric_pnorm(ps , eta, max_edge_ratio=1+49*(use_adapt!=2), p=2) H4 = metric_ellipse(H,H2) #H5 = metric_ellipse(H3,H4,mesh) mesh = adapt(H4) if use_reform: Q2 = FunctionSpace(mesh,'CG',2) c = interpolate(c,Q2) if use_reform: c = project(fac/(1+exp(-c))-delta,FunctionSpace(mesh,'CG',2)) L2error = bnderror(c,Expression(testsol),ds) dofs.append(len(c.vector().array())+len(U.vector().array())) L2errors.append(L2error) # fid = open("DOFS_L2errors_mesh_c_CG"+str(CGorder)+outname+".mpy",'w') # pickle.dump([dofs[0],L2errors[0],c.vector().array().min(),c.vector().array().max()-1,mesh.cells(),mesh.coordinates(),c.vector().array()],fid) # fid.close(); log(INFO+1,"%1dX ADAPT<->SOLVE complete: DOF=%5d, error=%0.0e, min(c)=%0.0e,max(c)-1=%0.0e" % (Nadapt, dofs[len(dofs)-1], L2error,c.vector().array().min(),c.vector().array().max()-1)) # PLOT MESH + solution figure() testf = interpolate(c ,FunctionSpace(mesh,'CG',1)) testfe = interpolate(Expression(testsol),FunctionSpace(mesh,'CG',1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG" ,1)) zz = testf.vector().array()[vtx2dof]; zz[zz==1] -= 1e-16 hh=tricontourf(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),zz,100,cmap=get_cmap('binary')) colorbar(hh) hold('on'); triplot(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),color='r',linewidth=0.5); hold('off') axis('equal'); box('off') # savefig(outname+'final_mesh_CG2.png',dpi=300) #; savefig('outname+final_mesh_CG2.eps',dpi=300) #PLOT ERROR figure() xe = interpolate(Expression("x[0]"),FunctionSpace(mesh,'CG',1)).vector().array() ye = interpolate(Expression("x[1]"),FunctionSpace(mesh,'CG',1)).vector().array() I = xe - Lx/2 > -DOLFIN_EPS; I2 = ye[I].argsort() pyplot(ye[I][I2],testf.vector().array()[I][I2]-testfe.vector().array()[I][I2],'-b'); ylabel('error') # PLOT L2error graph figure() pyloglog(dofs,L2errors,'-b.',linewidth=2,markersize=16); xlabel('Degree of freedoms'); ylabel('L2 error') # SAVE SOLUTION dofs = array(dofs); L2errors = array(L2errors) fid = open("DOFS_L2errors_CG"+str(CGorder)+outname+".mpy",'w') pickle.dump([dofs,L2errors],fid) fid.close(); # #show() # #LOAD SAVED SOLUTIONS # fid = open("DOFS_L2errors_CG2"+outname+".mpy",'r') # [dofs,L2errors] = pickle.load(fid) # fid.close() # # PERFORM FITS ON LAST THREE POINTS NfitP = 5 I = array(range(len(dofs)-NfitP,len(dofs))) slope,ints = polyfit(pylog(dofs[I]), pylog(L2errors[I]), 1) fid = open("DOFS_L2errors_CG2_fit"+outname+".mpy",'w') pickle.dump([dofs,L2errors,slope,ints],fid) fid.close() #PLOT THEM TOGETHER if CGorderL != [2]: fid = open("DOFS_L2errors_CG3.mpy",'r') [dofs_old,L2errors_old] = pickle.load(fid) fid.close() slope2,ints2 = polyfit(pylog(dofs_old[I]), pylog(L2errors_old[I]), 1) figure() pyloglog(dofs,L2errors,'-b.',dofs_old,L2errors_old,'--b.',linewidth=2,markersize=16) hold('on'); pyloglog(dofs,pyexp2(ints)*dofs**slope,'-r',dofs_old,pyexp2(ints2)*dofs_old**slope2,'--r',linewidth=1); hold('off') xlabel('Degree of freedoms'); ylabel('L2 error') legend(['CG2','CG3',"%0.2f*log(DOFs)" % slope, "%0.2f*log(DOFs)" % slope2]) #legend(['new data','old_data']) # savefig('comparison.png',dpi=300) #savefig('comparison.eps'); if not noplot: show()
pylab.hold('on') # xi = np.linspace(x.min(), x.max(), 10) xi = np.linspace(lim[0], lim[1], 10) yi = slope * xi + intercept pylab.plot(xi, yi, 'k-') pylab.title(titles[i]) pylab.text(xi.mean() * 1.05, yi.mean() * 0.6, '$R^2$ = ' + str.format('{0:.2f}', np.round(r**2, 2)), fontsize=fontSize + 1) # pylab.xlabel(r'MODIS $\rho_t$') # pylab.ylabel(r'DMC $\rho_t$') pylab.xlabel(r'MODIS surface reflectance') pylab.ylabel(r'DMC surface reflectance') pylab.grid('off') pylab.box('on') # for j in range(asterFnF.__len__()): # pylab.text(x[j]+.2, y[j], asterFnF[j]) pylab.tight_layout() pylab.axis(lim + lim) # Hide the right and top spines ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') pyplot.locator_params( axis='y',
def main(): usage = "Usage: %prog [options] <images>\n" parser = OptionParser(usage=usage) parser.add_option('-o', '--out', default='mosaic', dest='outroot', help='Root name for output') parser.add_option('-t', '--threshold', dest='threshold', default=0.05, type='float', help='Beam threshold [default=%default]') parser.add_option('-v', '--verbose', action="store_true", dest="verbose", default=False, help="Increase verbosity of output") (options, args) = parser.parse_args() images = args newbeam = [ re.sub('_regrid(\S+).fits', '_beamI_regrid\\1.fits', i) for i in images ] ff0 = pyfits.open('MWATS_201404_I_MOL.fits') mask = ff0[1].data <= 2 f = pyfits.open(images[0]) D = numpy.zeros(f[0].data.shape) W = numpy.zeros(f[0].data.shape) N = numpy.zeros(f[0].data.shape) pylab.clf() f = pylab.gcf() f.set_figwidth(12) f.set_figheight(6) for i in xrange(len(images)): #imagelist=' '.join(images[:i+1]) #command='python /localhome/kaplan/python/mosaic_images.py -i %s -t %f -o %s_%03d.fits %s' % ( #images[0], # options.threshold, # options.outroot, # i, # imagelist) #print command fi = pyfits.open(images[i]) try: fb = pyfits.open(newbeam[i]) except: #newbeam[i]=newbeam[i].replace('-v_','-i_') newbeam[i] = newbeam[i].replace(Vstring, Istring) fb = pyfits.open(newbeam[i]) beam = (fb[0].data) beam[0, 0][numpy.isnan(fi[0].data[0, 0])] = 0 beam[0, 0][beam[0, 0] <= options.threshold * beam[0, 0].max()] = 0 fi[0].data[0, 0][numpy.isnan(fi[0].data[0, 0])] = 0 D[0, 0] += fi[0].data[0, 0] * beam[0, 0]**2 W[0, 0] += beam[0, 0]**2 N[0, 0] += beam[0, 0] DD = D / W DD[0, 0][W[0, 0] == 0] = numpy.nan DD[mask] = numpy.nan fi[0].data = DD if os.path.exists('temp.fits'): os.remove('temp.fits') fi.writeto('temp.fits') w = wcs.WCS(fi[0].header, naxis=(1, 2)) f.clf() gc = aplpy.FITSFigure('temp.fits', figure=f) gc.show_colorscale(vmin=-0.1, vmax=0.1, cmap=pylab.cm.copper) gc.recenter(14 * 15, -22.5, width=160, height=95) gc.axis_labels.hide_x() gc.axis_labels.hide_y() gc.tick_labels.hide() gc.add_label(0.1, 0.9, '%03d' % i, relative=True, size=16) ra = numpy.linspace(8, 20, 50) * 15 for dec in xrange(-80, 30, 10): x, y = w.wcs_world2pix(ra, ra * 0 + dec, 0) pylab.plot(x, y, 'k') if dec < 20 and dec > -80: gc.add_label(15 * 16, dec, '$\\delta=%d^\\ocirc$' % dec, size=12, verticalalignment='bottom') dec = numpy.linspace(-80, 20, 50) for ra in xrange(8, 22, 2): ra_show = ra if ra_show < 0: ra_show += 24 x, y = w.wcs_world2pix(0 * dec + ra_show * 15, dec, 0) pylab.plot(x, y, 'k') gc.add_label(ra * 15, -45, '$\\alpha=%d^h$' % ra_show, size=12, verticalalignment='bottom', horizontalalignment='left') ax = pylab.gca() ax.axis('off') pylab.box('off') pylab.savefig('MWATS_201404_I_MOL_%03d.png' % i) print 'MWATS_201404_I_MOL_%03d.png' % i if os.path.exists('temp.fits'): os.remove('temp.fits')
def plotEvent3(clump, rawData=None, fitRes=None): from scipy import ndimage dt = 18.27 pl.figure() pl.subplot(211) c = clump if not fitRes is None: t0 = fitRes['t0'] else: t0 = 0 i1 = abs(c['fitError_Ag']) < 500 t = c['t'][i1] I = c['Ag'][i1] t = np.hstack([t[0] - 1, t]) I = np.hstack([0, I]) pl.plot((t - t0) * dt, ndimage.median_filter(I, 3), lw=4) pl.plot([0, 0], pl.ylim(), 'k:') pl.yticks([]) pl.xlim(-20 * dt, 100 * dt) pl.xticks([]) #pl.axes() pl.box() pl.subplot(212) i2 = (abs(c['fitError_Ar'] / c['fitResults_Ar']) < 2) * (c['fitResults_sigmag'] > 100) pl.plot((c['t'][i2] - t0) * dt, ndimage.median_filter(c['Ar'][i2], 3), 'r', lw=4) pl.xlim(-20 * dt, 100 * dt) pl.yticks([]) pl.plot([0, 0], pl.ylim(), 'k:') ysb = .2 * np.mean(pl.ylim()) pl.plot([1500, 1700], [ysb, ysb], 'k', lw=8) pl.text(1500, ysb + .6 * ysb, '200 ms') pl.xticks([]) #pl.axes() pl.box() pl.figure() tvals = np.array([-20, -5, 1, 5, 10, 20, 50, 100]) + int(t0) vs = 1e3 * clump.pipeline.mdh['voxelsize.x'] xp = int(clump['x'].mean() / vs) yp = int(clump['y'].mean() / vs) xp1 = int((clump['x'].mean() - clump.pipeline.mdh['chroma.dx'](0, 0)) / vs) yp1 = int( (clump['y'].mean() - clump.pipeline.mdh['chroma.dy'](0, 0)) / vs) + 256 lMax = 1.0 * rawData[max(xp - 10, 0):(xp + 10), max(yp - 10, 0):(yp + 10), tvals[3]].max() lMin = 1.0 * rawData[max(xp - 10, 0):(xp + 10), max(yp - 10, 0):(yp + 10), tvals[0]].min() cMax = 1.0 * rawData[max(xp1 - 10, 0):(xp1 + 10), max(yp1 - 10, 0):(yp1 + 10), tvals[4]].max() cMin = 1.0 * rawData[max(xp1 - 10, 0):(xp1 + 10), max(yp1 - 10, 0):(yp1 + 10), tvals[0]].min() for i, t_i in enumerate(tvals): pl.subplot(2, 8, i + 1) frame = rawData[max(xp - 10, 0):(xp + 10), max(yp - 10, 0):(yp + 10), t_i].squeeze() #pl.imshow(frame, interpolation='nearest', cmap='hot', clim=(lMin, lMax)) fr = 1.0 - np.clip(((frame - lMin) / float(lMax - lMin)), 0, 1)[:, :, None] * np.array([1, 1, .5])[None, None, :] pl.imshow(fr, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.subplot(2, 8, i + 8 + 1) frame = rawData[max(xp1 - 10, 0):(xp1 + 10), max(yp1 - 10, 0):(yp1 + 10), t_i].squeeze() #pl.imshow(frame, interpolation='nearest', cmap='hot', clim=(cMin, cMax)) fr = 1.0 - np.clip(((frame - cMin) / float(cMax - cMin)), 0, 1)[:, :, None] * np.array([.5, 1, 1])[None, None, :] pl.imshow(fr, interpolation='nearest') pl.xticks([]) pl.yticks([]) pl.tight_layout(pad=.5)
def skewtlogp(olga, si): """ Get directory of script for saving arrays """ tmpPath = os.path.dirname(os.path.realpath(__file__))+'/tmp/' if(not os.path.isdir(tmpPath)): os.mkdir(tmpPath) """ Settings for high and low sounding top """ if(si.stype==0): pbottom = 105000. # highest pressure in diagram (bottom) ptop = 20000. # lowest pressue in diagram (top) ptop_thd = 40000 # top of dry adiabats ptop_mxr = 60000 # top of mixing ratio lines pbottom_thw = pbottom # bottom of moist adiabats dp = 100. # pressure interval used in some calculations Tleft = -35. # lowest temperature (@pb) in diagram (left) Tright = 35. # highest temperature (@pb) in diagram (right) dp_label = 7000 isotherms = np.arange(-100,30.001,10) isobars = np.array([1050.,1000.,850.,700.,500.,400.,300.,250.,200.,150.,100.,50])*100. dryadiabats = np.arange(-30,50.001,10) moistadiabats = np.array([28.,24.,20.,16.,12.,8.,4.,0.]) mixrat = np.array([20.,12.,8.,5.,3.,2.,1.]) elif(si.stype==1): pbottom = 105000. # highest pressure in diagram (bottom) ptop = 50000. # lowest pressue in diagram (top) ptop_thd = 70000 # top of dry adiabats ptop_mxr = 70000 # top of mixing ratio lines pbottom_thw = pbottom # bottom of moist adiabats dp = 100. # pressure interval used in some calculations Tleft = -11. # lowest temperature (@pb) in diagram (left) Tright = 35. # highest temperature (@pb) in diagram (right) dp_label = 2500 # spacing (in pressure coords) of some label placement isotherms = np.arange(-100,60.001,10) isobars = np.array([1050.,1000.,900.,850.,700.,600.,500.])*100. dryadiabats = np.arange(-10,30.001,5) moistadiabats = np.array([28.,24.,20.,16.,12.,8.,4.,0.]) mixrat = np.array([20.,12.,8.,5.,3.,2.,1.]) else: sys.exit('stype=%i not supported!'%stype) """ Setup figure """ if(olga != -1): fig = pl.figure(figsize=(olga.fig_width_px/float(olga.fig_dpi), olga.fig_width_px/float(olga.fig_dpi))) else: fig = pl.figure(figsize=(8,8)) # L B R T ws hs fig.subplots_adjust(0.08,0.10,1.0,0.93,0.2,0.08) pl.subplot(111) # Calculate bounds in figure coordinates y00 = skewty(pbottom) y11 = skewty(ptop) x00 = skewtx(Tleft,y00) x11 = skewtx(Tright,y00) # Spacing for some labels hs = np.abs(x11-x00)/200. vs = np.abs(y11-y00)/200. """ 1. Create isotherms """ for T in isotherms: y = np.array([skewty(pbottom),skewty(ptop)]) x = np.array([skewtx(T,y[0]),skewtx(T,y[1])]) # Check if partially out of bounds lloc = 0 if(x[0] < x00): x[0] = x00 y[0] = iskewtxT(x00,T) if(x[1] > x11): x[1] = x11 y[1] = iskewtxT(x11,T) if(x[1] >= x00 and y[1] >= y00): pl.plot(x,y,color=c2,alpha=a1) if(x[0] > x00 and x[0] < x11): pl.text(x[0],y[0]-2*hs,int(T),ha='center',va='top',color=c2) else: pl.text(x[1]-5*hs,y[1]-5*vs,int(T),ha='center',va='center',color=c2,alpha=a2) """ 2. Create isobars """ for p in isobars: # Check if out of bounds if((p >= ptop) and (p <= pbottom)): y = skewty(p) pl.plot([x00,x11],[y,y],color=c2,alpha=a1) pl.text(x00-hs,y,int(p/100.),ha='right',va='center',color=c2) """ 3. Create dry adiabats """ # Try loading the data from the tmp directory. If not available, calculate # and save the arrays try: p = np.load(tmpPath+'theta_p_%i.arr'%si.stype) y = np.load(tmpPath+'theta_y_%i.arr'%si.stype) x = np.load(tmpPath+'theta_x_%i.arr'%si.stype) except: p = np.arange(pbottom,(np.max(([ptop,ptop_thd]))-dp),-dp) x = np.zeros((dryadiabats.size, p.size)) y = np.zeros(p.size) for k in range(p.size): y[k] = skewty(p[k]) for i in range(dryadiabats.size): x[i,:] = 0. for k in range(p.size): xtmp = skewtx(((dryadiabats[i]+T0) * exner(p[k]))-T0, y[k]) if(xtmp >= x00 and xtmp <= x11): x[i,k] = xtmp else: x[i,k] = -9999 p.dump(tmpPath+'theta_p_%i.arr'%si.stype) y.dump(tmpPath+'theta_y_%i.arr'%si.stype) x.dump(tmpPath+'theta_x_%i.arr'%si.stype) # Plot the dry adiabats for i in range(dryadiabats.size): doPlot = np.where(x[i,:] != -9999)[0] if(doPlot[0].size > 0): lloc = max(0,np.size(x[i,doPlot])-int(5000./dp)) pl.plot(x[i,doPlot],y[doPlot],color=c1) pl.text(x[i,doPlot][lloc],y[doPlot][lloc],int(dryadiabats[i]),color=c1,ha='center',va='center',backgroundcolor='w') """ 4. Create moist adiabats """ # Try loading the data from the tmp directory. If not available, calculate # and save the arrays try: p = np.load(tmpPath+'thetam_p_%i.arr'%si.stype) y = np.load(tmpPath+'thetam_y_%i.arr'%si.stype) x = np.load(tmpPath+'thetam_x_%i.arr'%si.stype) except: p = np.arange(np.min(([pbottom,pbottom_thw])),ptop-dp,-dp) x = np.zeros((moistadiabats.size, p.size)) y = np.zeros(p.size) for k in range(p.size): y[k] = skewty(p[k]) for i in range(moistadiabats.size): for k in range(p.size): thw = dsatlftskewt(moistadiabats[i], p[k]) xtmp = skewtx(thw, y[k]) if(xtmp >= x00 and xtmp <= x11): x[i,k] = xtmp else: x[i,k] = -9999 p.dump(tmpPath+'thetam_p_%i.arr'%si.stype) y.dump(tmpPath+'thetam_y_%i.arr'%si.stype) x.dump(tmpPath+'thetam_x_%i.arr'%si.stype) # Plot the moist adiabats for i in range(moistadiabats.size): doPlot = np.where(x[i,:] != -9999)[0] if(doPlot[0].size > 0): lloc = max(0,np.size(x[i,doPlot])-int(8000./dp)) pl.plot(x[i,doPlot],y[doPlot],'--',color=c3) pl.text(x[i,doPlot][lloc],y[doPlot][lloc],int(moistadiabats[i]),color=c3,ha='center',va='center',backgroundcolor='w') """ 5. Create isohumes / mixing ratio lines """ # Try loading the data from the tmp directory. If not available, calculate # and save the arrays try: p = np.load(tmpPath+'mixr_p_%i.arr'%si.stype) y = np.load(tmpPath+'mixr_y_%i.arr'%si.stype) x = np.load(tmpPath+'mixr_x_%i.arr'%si.stype) except: p = np.arange(pbottom,(np.max(([ptop,ptop_mxr]))-dp),-dp) x = np.zeros((mixrat.size, p.size)) y = np.zeros(p.size) for k in range(p.size): y[k] = skewty(p[k]) for i in range(mixrat.size): for k in range(p.size): mix = Td(mixrat[i]/1000.,p[k])-T0 xtmp = skewtx(mix,y[k]) if(xtmp >= x00 and xtmp <= x11): x[i,k] = xtmp else: x[i,k] = -9999 p.dump(tmpPath+'mixr_p_%i.arr'%si.stype) y.dump(tmpPath+'mixr_y_%i.arr'%si.stype) x.dump(tmpPath+'mixr_x_%i.arr'%si.stype) # Plot the mixing ratio lines for i in range(mixrat.size): doPlot = np.where(x[i,:] != -9999)[0] if(doPlot[0].size > 0): pl.plot(x[i,doPlot],y[doPlot],color=c5,dashes=[3,2]) pl.text(x[i,doPlot][-1],y[doPlot][-1]+vs,int(mixrat[i]),color=c5,ha='center',va='bottom',backgroundcolor='w') """ 6. Add sounding data """ # 6.1 Temperature and dewpoint temperature if(si.T.size > 0): y = skewty(si.p) x1 = skewtx(si.T-T0,y) x2 = skewtx(si.Td-T0,y) pl.plot(x1,y,'-',color=cT,linewidth=2) pl.plot(x2,y,'-',color=cTd,linewidth=2) # 6.2 Add height labels to axis if(si.z.size > 0 and si.z.size==si.p.size): y = skewty(si.p) p_last = 1e9 for k in range(si.z.size): if(y[k] <= y11 and np.abs(si.p[k]-p_last) > dp_label): pl.text(x11+hs,y[k],str(int(si.z[k]))+'m',color=c2,ha='right',va='center',size=7,backgroundcolor='w') p_last = si.p[k] # 6.2 Wind barbs if(si.u.size > 0): y = skewty(si.p) u = si.u * 1.95 v = si.v * 1.95 xb = x11 + 9*hs p_last = 1e9 for k in range(si.z.size): if(y[k] <= y11 and np.abs(si.p[k]-p_last) > dp_label): pl.barbs(xb,y[k],u[k],v[k],length=5.2,linewidth=0.5,pivot='middle') p_last = si.p[k] """ 6.1 :) Try to add measured data """ # 6.1 Temperature and dewpoint temperature if(si.Tm.size > 0): y = skewty(si.pm) x1 = skewtx(si.Tm,y) x2 = skewtx(si.Tdm,y) pl.plot(x1,y,'--',color=cT,linewidth=1) pl.plot(x2,y,'--',color=cTd,linewidth=1) """ 7. Lauch parcel """ if(si.parcel == True): # Plot starting position: p0s = skewty(si.ps) T0s = skewtx(si.Ts-T0, p0s) Td0s = skewtx(Td(si.rs,si.ps)-T0, p0s) pl.scatter(T0s, p0s, facecolor='none') pl.scatter(Td0s,p0s, facecolor='none') # Lists to hold parcel pressure, temp and dewpoint temp during ascent pp = [si.ps] Tp = [si.Ts] Tdp = [Td(si.rs, si.ps)] # Launch parcel n = 0 while(Tp[-1] > Tdp[-1] and n<1000): n += 1 dp2 = max(1,(Tp[-1] - Tdp[-1])*300) # bit weird, but fast pp. append(pp[-1] - dp2) Tp. append(si.Ts*exner(pp[-1], si.ps)) Tdp.append(Td(si.rs, pp[-1])) # Plot lines from surface --> LCL ps = skewty(np.array(pp)) Tps = skewtx(np.array(Tp)-T0, ps) Tdps = skewtx(np.array(Tdp)-T0, ps) pl.plot(Tps, ps, 'k', linewidth=1.5, dashes=[4,2]) pl.plot(Tdps, ps, 'k', linewidth=1.5, dashes=[4,2]) pl.scatter(Tps[-1], ps[-1], facecolor='none') # Iteratively find the moist adiabat starting at p0, which goes through the temperature at LCL Ths = si.Ts / exner(si.ps, p0) # potential temperature surface (@p0) ThwsLCL = dsatlftskewt(Ths-T0, pp[-1])+T0 # Temp moist adiabat at plcl, through Ths thw0 = Ths - (ThwsLCL - Tp[-1]) # First estimate of moist adiabat passing through Tlcl ThwLCL = dsatlftskewt(thw0-T0, pp[-1])+T0 while(np.abs(ThwLCL-Tp[-1]) > 0.1): thw0 -= ThwLCL-Tp[-1] ThwLCL = dsatlftskewt(thw0-T0, pp[-1])+T0 # Plot moist adiabat from LCL upwards p = np.arange(pp[-1], ptop, -dp) x = np.zeros(p.size) y = np.zeros(p.size) for k in range(p.size): thw = dsatlftskewt(thw0-T0,p[k]) y[k] = skewty(p[k]) x[k] = skewtx(thw,y[k]) pl.plot(x,y,'k', linewidth=1.5, dashes=[4,2]) """ Add info from TEMF boundary layer scheme """ if(si.Tu.size > 0): # Cloud fraction dw = (x11-x00) # width of diagram y = skewty(si.p) x = x00 + si.cfru * 0.2 * (x11-x00) pl.plot(x,y,'-',linewidth=1.5,color=c6) # cloud cover cfr_pos = np.where(si.cfru > 0.001)[0] if(np.size(cfr_pos)>1): pl.text(x00,y[cfr_pos[0]-1],'- %im'%(si.z[cfr_pos[0]-1]),ha='left',va='center',size=8,color=c6) pl.text(x00,y[cfr_pos[-1]],'- %im'%(si.z[cfr_pos[-1]]),ha='left',va='center',size=8,color=c6) kmax=np.where(si.cfru == si.cfru.max())[0][0] pl.text(x.max(),y[kmax],'- %i%%'%(si.cfru[kmax]*100.),ha='left',va='center',size=8,color=c6) """ 6. Finish diagram """ pl.xticks([]) pl.yticks([]) pl.box('off') pl.xlim(x00-0.1,x11+16*hs) pl.ylim(y00-0.1,y11+0.1) # draw axis pl.plot([x00,x11],[y00,y00],'k-') pl.plot([x00,x00],[y00,y11],'k-') pl.figtext(0.5,0.05,'Temperature (C)',ha='center') pl.figtext(0.015,0.52,'Pressure (hPa)',rotation=90) label = 'Skew-T log-P, %s, %s UTC'%(si.name,si.time) pl.figtext(0.5,0.97,label,ha='center') if(olga != -1): img = image.imread(olga.olgaRoot+'include/olga_left.png') pl.figimage(img,7,5) #pl.figimage(img,10,olga.fig_width_px-45) pl.figtext(0.99,0.011,'%s'%olga.map_desc[0],size=7,ha='right') return fig
def plot_vwp(data, times, parameters, fname=None, add_hodo=False, fixed=False, web=False, archive=False): img_title = "%s VWP valid ending %s" % ( data[0].rid, times[0].strftime("%d %b %Y %H%M UTC")) if fname is not None: img_file_name = fname else: img_file_name = "%s_vwp.png" % data[0].rid sat_age = 6 * 3600 now = datetime.utcnow() img_age = now - times[0] age_cstop = min(_total_seconds(img_age) / sat_age, 1) * 0.4 age_color = mpl.cm.get_cmap('hot')(age_cstop)[:-1] age_str = "Image created on %s (%s old)" % ( now.strftime("%d %b %Y %H%M UTC"), _fmt_timedelta(img_age)) fig_aspect = 2.5714 fig_wid = 24 fig_hght = fig_wid / fig_aspect pylab.figure(figsize=(fig_wid, fig_hght), dpi=200) axes_left = 0.01 axes_bot = 0.02 axes_hght = 0.94 axes_wid = axes_hght / fig_aspect pylab.axes((axes_left, axes_bot, 0.99, axes_hght)) _plot_vwp_background(times) _plot_vwp_data(data) #_plot_param_table(parameters, web=web) pylab.xlim(0, 1.) pylab.ylim(0, 1.) pylab.xticks([]) pylab.yticks([]) pylab.box(False) if not archive: pylab.title(img_title, color=age_color) pylab.text(x_start, 1.03, age_str, transform=pylab.gca().transAxes, ha='left', va='top', fontsize=9, color=age_color) else: pylab.title(img_title) if web: web_brand = "http://www.autumnsky.us/vad/" pylab.text(1.0, -0.01, web_brand, transform=pylab.gca().transAxes, ha='right', va='top', fontsize=9) if add_hodo: inset_ax = inset_axes(pylab.gca(), width="30%", height="55%", loc='upper left', bbox_to_anchor=(0.63, 0, 0.85, 1), bbox_transform=pylab.gca().transAxes) u, v = vec2comp(data[0]['wind_dir'], data[0]['wind_spd']) if fixed or len(u) == 0: ctr_u, ctr_v = 20, 20 size = 120 else: ctr_u = u.mean() ctr_v = v.mean() size = max(u.max() - u.min(), v.max() - v.min()) + 20 size = max(120, size) min_u = ctr_u - size / 2 max_u = ctr_u + size / 2 min_v = ctr_v - size / 2 max_v = ctr_v + size / 2 _plot_background(min_u, max_u, min_v, max_v) _plot_data(data[0], parameters) _plot_param_table(parameters, web=web) inset_ax.set_xlim(min_u, max_u) inset_ax.set_ylim(min_v, max_v) inset_ax.set_xticks([]) inset_ax.set_yticks([]) pylab.savefig(img_file_name, dpi=pylab.gcf().dpi) pylab.close() if web: bounds = { 'min_u': min_u, 'max_u': max_u, 'min_v': min_v, 'max_v': max_v } print(json.dumps(bounds))
def main(): print("Loading data...") #args parser = argparse.ArgumentParser() parser.add_argument("--input_file", type=str) parser.add_argument("--epochs", type=int) parser.add_argument("--load_npz", type=str) args = parser.parse_args() task_params = eval(args.input_file.split(".csv")[0] + '_params') ALL_TIME = time.time() traindata, valdata, testdata = load_data( task_params['data_file'], (task_params['train'], task_params['val'], task_params['test']), input_name='smiles', target_name=task_params['target_name']) x_trains, y_trains = traindata x_vals, y_vals = valdata x_tests, y_tests = testdata x_trains = np.reshape(x_trains, (task_params['train'], 1)) y_trains = np.reshape(y_trains, (task_params['train'], 1)) x_vals = np.reshape(x_vals, (task_params['val'], 1)) y_vals = np.reshape(y_vals, (task_params['val'], 1)) x_tests = np.reshape(x_tests, (task_params['test'], 1)) y_tests = np.reshape(y_tests, (task_params['test'], 1)).astype(np.float32) def run_conv_experiment(): '''Initialize model''' NNFP = Main(model_params) optimizer = optimizers.Adam() optimizer.setup(NNFP) #gpu_device = 0 #cuda.get_device(gpu_device).use() #NNFP.to_gpu(gpu_device) '''Learn''' trained_NNFP, conv_training_curve, undo_norm = \ train_nn(NNFP, x_trains, y_trains, args.epochs, validation_smiles=x_vals, validation_raw_targets=y_vals) save_name = "input-attention-ecfp-cep-top-remove.npz" serializers.save_npz(save_name, trained_NNFP) mse, _ = trained_NNFP.mse(x_tests, y_tests, undo_norm) return math.sqrt(mse._data[0]), conv_training_curve def load_model_experiment(): '''Initialize model''' trained_NNFP = Main(model_params) serializers.load_npz(args.load_npz, trained_NNFP) _, undo_norm = normalize_array(y_tests) mse, input_attention = trained_NNFP.mse(x_tests, y_tests, undo_norm) return math.sqrt(mse._data[0]), input_attention print("Starting neural fingerprint experiment...") if args.load_npz == None: test_loss_neural, conv_training_curve = run_conv_experiment() else: test_loss_neural, input_attention = load_model_experiment() x_ecfp = input_attention._data[0] y = [0] * len(x_ecfp) attentions = np.split(x_ecfp, 5, 1) xmin, xmax = 0, 1 for i in range(len(attentions)): fig, ax = plt.subplots(figsize=(10, 10)) fig.set_figheight(1) plt.tight_layout() plt.tick_params(labelbottom=True, bottom=False) plt.tick_params(labelleft=False, left=False) plt.scatter(attentions[i], y, c="red", marker="o", alpha=0.3) plt.hlines(y=0, xmin=xmin, xmax=xmax) plt.vlines(x=[i for i in range(xmin, xmax + 1, 1)], ymin=-0.04, ymax=0.04) plt.vlines(x=[i / 10 for i in range(xmin * 10, xmax * 10 + 1, 1)], ymin=-0.02, ymax=0.02) line_width = 0.1 plt.xticks(np.arange(xmin, xmax + line_width, line_width)) pylab.box(False) #plt.savefig(args.input_file + '_attention_ecfp_' + str(i) + '.png') plt.savefig("test.png") #plt.show() print("Neural test RMSE", test_loss_neural) print("time : ", time.time() - ALL_TIME)
pl.title("Right Parietal", fontsize="large") elif grp == "Occipital": pl.subplot(3, 4, 11) pl.title("Right Occipital", fontsize="large") ####Reading the Evoked data structure for (cfname, l) in zip(condEveFiles, colorList): evoked = mne.fiff.Evoked(cfname, setno="epochs_TaggedWord", baseline=(None, 0)) # evoked = mne.fiff.Evoked(fname, setno = c , baseline = (None, 0)) ##Use this if you are using condition numbers badChanSet = set(evoked.info["bads"]) # print c good_chan = list(set(chan_list)) sel = mne.fiff.pick_types(evoked.info, meg=False, eeg=False, include=good_chan) print sel data = evoked.data[sel] ###Computing the MEG RMS from the evoked data for the specified condition times = evoked.times * 1000 square = np.power(data, 2) meanSquare = np.mean(square, 0) rms = np.power(meanSquare, 0.5) ###Plotting the MEG rms value for the current condition pl.plot(times, rms * 1e13, color=l, linewidth=2) pl.ylim([ymin, ymax]) pl.xlim([xmin, xmax]) pl.box("off") pl.tick_params(axis="both", right="off", top="off") pl.yticks(np.array([0.0, 4.0, 8.0, 12.0, 16.0, 20.0, 24.0, 28.0, 32.0])) pl.xticks(np.array([0, 200, 400, 600])) pl.savefig(out_fname) pl.show()
def check_metric_ellipse(width=2e-2, eta=0.02, Nadapt=6): set_log_level(WARNING) parameters["allow_extrapolation"] = True ### CONSTANTS meshsz = 40 hd = Constant(width) ### SETUP MESH mesh = RectangleMesh(-0.5, -0.5, 0.5, 0.5, 1 * meshsz, 1 * meshsz, "left/right") ### SETUP SOLUTION angle = pi / 8 #rand()*pi/2 #testsol = 'tanh(x[0]/' + str(float(hd)) + ')' #tanh(x[0]/hd) testsol = 'tanh((' + str(cos(angle)) + '*x[0]+' + str( sin(angle)) + '*x[1])/' + str(float(hd)) + ')' #tanh(x[0]/hd) ddtestsol = str(cos(angle) + sin(angle) ) + '*2*' + testsol + '*(1-pow(' + testsol + ',2))/' + str( float(hd)**2) #testsol2 = 'tanh(x[1]/' + str(float(hd)) + ')' #tanh(x[0]/hd) testsol2 = 'tanh((' + str(cos(angle)) + '*x[1]-' + str( sin(angle)) + '*x[0])/' + str(float(hd)) + ')' #tanh(x[0]/hd) ddtestsol2 = str(cos(angle) - sin( angle)) + '*2*' + testsol2 + '*(1-pow(' + testsol2 + ',2))/' + str( float(hd)**2) def boundary(x): return x[0]-mesh.coordinates()[:,0].min() < DOLFIN_EPS or mesh.coordinates()[:,0].max()-x[0] < DOLFIN_EPS \ or mesh.coordinates()[:,1].min()+0.5 < DOLFIN_EPS or mesh.coordinates()[:,1].max()-x[1] < DOLFIN_EPS # PERFORM ONE ADAPTATION ITERATION for iii in range(Nadapt): V = FunctionSpace(mesh, "CG", 2) dis = TrialFunction(V) dus = TestFunction(V) u = Function(V) u2 = Function(V) bc = DirichletBC(V, Expression(testsol), boundary) bc2 = DirichletBC(V, Expression(testsol2), boundary) R = interpolate(Expression(ddtestsol), V) R2 = interpolate(Expression(ddtestsol2), V) a = inner(grad(dis), grad(dus)) * dx L = R * dus * dx L2 = R2 * dus * dx solve(a == L, u, bc) solve(a == L2, u2, bc2) H = metric_pnorm(u, eta, max_edge_length=1., max_edge_ratio=50) #Mp = project(H, TensorFunctionSpace(mesh, "CG", 1)) H2 = metric_pnorm(u2, eta, max_edge_length=1., max_edge_ratio=50) #Mp2 = project(H2, TensorFunctionSpace(mesh, "CG", 1)) H3 = metric_ellipse(H, H2) Mp3 = project(H3, TensorFunctionSpace(mesh, "CG", 1)) print("H11: %0.0f, H22: %0.0f, V: %0.0f,E: %0.0f" % (assemble(abs(H3[0, 0]) * dx), assemble( abs(H3[1, 1]) * dx), mesh.num_vertices(), mesh.num_cells())) startTime = time() if iii != 6: # mesh2 = Mesh(adapt(Mp2)) mesh = Mesh(adapt(Mp3)) # mesh3 = adapt(Mp) print("total time was %0.1fs" % (time() - startTime)) # PLOT MESH figure(1) triplot(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells()) axis('equal') axis('off') box('off') #figure(2); triplot(mesh2.coordinates()[:,0],mesh2.coordinates()[:,1],mesh2.cells()) #mesh = mesh2 #figure(3); triplot(mesh3.coordinates()[:,0],mesh3.coordinates()[:,1],mesh3.cells()) #mesh = mesh3 figure(4) testf = interpolate(u2, FunctionSpace(mesh, 'CG', 1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG", 1)) zz = testf.vector().array()[vtx2dof] zz[zz == 1] -= 1e-16 tricontourf(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), zz, 100) show()
def main(): usage="Usage: %prog [options] <images>\n" parser = OptionParser(usage=usage) parser.add_option('-o','--out',default='mosaic', dest='outroot', help='Root name for output') parser.add_option('-t','--threshold',dest='threshold',default=0.05, type='float', help='Beam threshold [default=%default]') parser.add_option('-v','--verbose',action="store_true", dest="verbose",default=False, help="Increase verbosity of output") (options, args) = parser.parse_args() images=args newbeam=[re.sub('_regrid(\S+).fits','_beamI_regrid\\1.fits',i) for i in images] ff0=pyfits.open('MWATS_201404_I_MOL.fits') mask=ff0[1].data<=2 f=pyfits.open(images[0]) D=numpy.zeros(f[0].data.shape) W=numpy.zeros(f[0].data.shape) N=numpy.zeros(f[0].data.shape) pylab.clf() f=pylab.gcf() f.set_figwidth(12) f.set_figheight(6) for i in xrange(len(images)): #imagelist=' '.join(images[:i+1]) #command='python /localhome/kaplan/python/mosaic_images.py -i %s -t %f -o %s_%03d.fits %s' % ( #images[0], # options.threshold, # options.outroot, # i, # imagelist) #print command fi=pyfits.open(images[i]) try: fb=pyfits.open(newbeam[i]) except: #newbeam[i]=newbeam[i].replace('-v_','-i_') newbeam[i]=newbeam[i].replace(Vstring,Istring) fb=pyfits.open(newbeam[i]) beam=(fb[0].data) beam[0,0][numpy.isnan(fi[0].data[0,0])]=0 beam[0,0][beam[0,0]<=options.threshold*beam[0,0].max()]=0 fi[0].data[0,0][numpy.isnan(fi[0].data[0,0])]=0 D[0,0]+=fi[0].data[0,0]*beam[0,0]**2 W[0,0]+=beam[0,0]**2 N[0,0]+=beam[0,0] DD=D/W DD[0,0][W[0,0]==0]=numpy.nan DD[mask]=numpy.nan fi[0].data=DD if os.path.exists('temp.fits'): os.remove('temp.fits') fi.writeto('temp.fits') w=wcs.WCS(fi[0].header,naxis=(1,2)) f.clf() gc=aplpy.FITSFigure('temp.fits',figure=f) gc.show_colorscale(vmin=-0.1,vmax=0.1,cmap=pylab.cm.copper) gc.recenter(14*15,-22.5, width=160, height=95) gc.axis_labels.hide_x() gc.axis_labels.hide_y() gc.tick_labels.hide() gc.add_label(0.1,0.9,'%03d' % i, relative=True, size=16) ra=numpy.linspace(8,20,50)*15 for dec in xrange(-80,30,10): x,y=w.wcs_world2pix(ra,ra*0+dec,0) pylab.plot(x,y,'k') if dec < 20 and dec>-80: gc.add_label(15*16,dec,'$\\delta=%d^\\ocirc$' % dec, size=12,verticalalignment='bottom') dec=numpy.linspace(-80,20,50) for ra in xrange(8,22,2): ra_show=ra if ra_show < 0: ra_show+=24 x,y=w.wcs_world2pix(0*dec+ra_show*15,dec,0) pylab.plot(x,y,'k') gc.add_label(ra*15,-45,'$\\alpha=%d^h$' % ra_show, size=12,verticalalignment='bottom', horizontalalignment='left') ax=pylab.gca() ax.axis('off') pylab.box('off') pylab.savefig('MWATS_201404_I_MOL_%03d.png' % i) print 'MWATS_201404_I_MOL_%03d.png' % i if os.path.exists('temp.fits'): os.remove('temp.fits')
#tkin ax = pl.subplot(projection=cube_wcs.celestial) ax.set(xlim=(50, 270), ylim=(60, 240)) ax.coords[0].set_ticks_visible(False) ax.coords[1].set_ticks_visible(False) ax.coords[0].set_ticklabel_visible(False) ax.coords[1].set_ticklabel_visible(False) ax.set_axis_off pl.imshow(cube_hdu.data[0, :, :], origin='lower', cmap='plasma', vmin=20, vmax=140) cbar = pl.colorbar() #cbar.ax.set_ylabel('K', rotation=0) pl.box(on=None) scalebar = ScaleBar(1.53) pl.gca().add_artist(scalebar) pl.title('Kinematic Temperature (K)', fontsize=18) pl.savefig('tkin.pdf') pl.close() #tex ax = pl.subplot(projection=cube_wcs.celestial) ax.set(xlim=(50, 270), ylim=(60, 240)) ax.coords[0].set_ticks_visible(False) ax.coords[1].set_ticks_visible(False) ax.coords[0].set_ticklabel_visible(False) ax.coords[1].set_ticklabel_visible(False) ax.set_axis_off pl.imshow(cube_hdu.data[1, :, :], origin='lower', cmap='bone')
FOS.sort(key=lambda x: len(x), reverse=True) for element in FOS: # if len(element) < 4: # continue count += 1 for xi in element: for i in range(size_of_fos): matrix[i + ((count - 1) * size_of_fos)][xi] = len(FOS) - count print(count) current_cmap = pylab.matplotlib.cm.get_cmap() current_cmap.set_bad(color='white') pylab.imshow(matrix[:size_of_fos * count]) pylab.box(False) pylab.xlim(xmin=0, xmax=20) pylab.xticks([x for x in range(0, 22, 2)]) pylab.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on') # pylab.axes().xaxis.set_visible(True) if full == "": pylab.title(f"Sparse FOS of CEC 2013 f{problem}\n ") elif full == "_overlap": pylab.title(f"Manual FOS of CEC 2013 f{problem}\n ") else: pylab.title(f"FOS of FBMP on OSoREB \n ")
def main(): print("Loading data...") #args parser = argparse.ArgumentParser() parser.add_argument("--input_file", type=str) parser.add_argument("--epochs", type=int) parser.add_argument("--fp_length", type=int) parser.add_argument("--i", type=int) parser.add_argument("--load_npz", type=str) args = parser.parse_args() model_params['fp_length'] = args.fp_length task_params = eval(args.input_file.split(".csv")[0] + '_params') ALL_TIME = time.time() traindata, valdata, testdata = load_data( task_params['data_file'], (task_params['train'], task_params['val'], task_params['test']), input_name='smiles', target_name=task_params['target_name']) x_trains, y_trains = traindata x_vals, y_vals = valdata x_tests, y_tests = testdata x_trains = np.reshape(x_trains, (task_params['train'], 1)) y_trains = np.reshape(y_trains, (task_params['train'], 1)) x_vals = np.reshape(x_vals, (task_params['val'], 1)) y_vals = np.reshape(y_vals, (task_params['val'], 1)) x_tests = np.reshape(x_tests, (task_params['test'], 1)) y_tests = np.reshape(y_tests, (task_params['test'], 1)).astype(np.float32) def run_conv_experiment(): '''Initialize model''' NNFP = Main(model_params) optimizer = optimizers.Adam() optimizer.setup(NNFP) '''Learn''' trained_NNFP, conv_training_curve, undo_norm = \ train_nn(NNFP, x_trains, y_trains, args.epochs, validation_smiles=x_vals, validation_raw_targets=y_vals) #save_name = "fp_concat_" + args.input_file + "_fp_length_" + str(args.fp_length) + "_" + str(args.i) + ".npz" save_name = "test_cep.npz" serializers.save_npz(save_name, trained_NNFP) mse, _, _ = trained_NNFP.mse(x_tests, y_tests, undo_norm) return math.sqrt(mse._data[0]), conv_training_curve def load_model_experiment(): '''Initialize model''' trained_NNFP = Main(model_params) serializers.load_npz(args.load_npz, trained_NNFP) _, undo_norm = normalize_array(y_tests) mse, attention_ecfp, attention_fcfp = trained_NNFP.mse( x_tests, y_tests, undo_norm) return math.sqrt(mse._data[0]), attention_ecfp, attention_fcfp print("Starting neural fingerprint experiment...") if args.load_npz == None: test_loss_neural, conv_training_curve = run_conv_experiment() else: test_loss_neural, attention_ecfp, attention_fcfp = load_model_experiment( ) x_ecfp = attention_ecfp._data[0] x_fcfp = attention_fcfp._data[0] print(x_fcfp, x_ecfp) np.savetxt('weight_delaney.txt', x_ecfp, delimiter=' ') y = [0] * len(x_ecfp) fig, ax = plt.subplots(figsize=(10, 10)) fig.set_figheight(1) ax.tick_params(labelbottom=True, bottom=False) ax.tick_params(labelleft=False, left=False) xmin, xmax = 0, 1 plt.tight_layout() plt.scatter(x_ecfp, y, c="red", marker="o", alpha=0.3, label="Input Representation 1") plt.scatter(x_fcfp, y, c="blue", marker="o", alpha=0.3, label="Input Representation 2") plt.hlines(y=0, xmin=xmin, xmax=xmax) plt.vlines(x=[i for i in range(xmin, xmax + 1, 1)], ymin=-0.04, ymax=0.04) plt.vlines(x=[i / 10 for i in range(xmin * 10, xmax * 10 + 1, 1)], ymin=-0.02, ymax=0.02) line_width = 0.1 plt.xticks(np.arange(xmin, xmax + line_width, line_width)) pylab.box(False) #plt.legend(loc='upper right', bbox_to_anchor=(0.2,1,0.15,0), borderaxespad=0.) #plt.show() #plt.savefig("fp_scalar_" + args.input_file + ".png") #plt.savefig("test.png") print("Neural test RMSE", test_loss_neural) print("time : ", time.time() - ALL_TIME)
def check_metric_ellipse(width=2e-2, eta = 0.02, Nadapt=6): set_log_level(WARNING) parameters["allow_extrapolation"] = True ### CONSTANTS meshsz = 40 hd = Constant(width) ### SETUP MESH mesh = RectangleMesh(Point(-0.5,-0.5),Point(0.5,0.5),1*meshsz,1*meshsz,"left/right") ### SETUP SOLUTION angle = pi/8#rand()*pi/2 #testsol = 'tanh(x[0]/' + str(float(hd)) + ')' #tanh(x[0]/hd) testsol = 'tanh((' + str(cos(angle)) + '*x[0]+'+ str(sin(angle)) + '*x[1])/' + str(float(hd)) + ')' #tanh(x[0]/hd) ddtestsol = str(cos(angle)+sin(angle))+'*2*'+testsol+'*(1-pow('+testsol+',2))/'+str(float(hd)**2) #testsol2 = 'tanh(x[1]/' + str(float(hd)) + ')' #tanh(x[0]/hd) testsol2 = 'tanh((' + str(cos(angle)) + '*x[1]-'+ str(sin(angle)) + '*x[0])/' + str(float(hd)) + ')' #tanh(x[0]/hd) ddtestsol2 = str(cos(angle)-sin(angle))+'*2*'+testsol2+'*(1-pow('+testsol2+',2))/'+str(float(hd)**2) def boundary(x): return x[0]-mesh.coordinates()[:,0].min() < DOLFIN_EPS or mesh.coordinates()[:,0].max()-x[0] < DOLFIN_EPS \ or mesh.coordinates()[:,1].min()+0.5 < DOLFIN_EPS or mesh.coordinates()[:,1].max()-x[1] < DOLFIN_EPS # PERFORM ONE ADAPTATION ITERATION for iii in range(Nadapt): V = FunctionSpace(mesh, "CG" ,2); dis = TrialFunction(V); dus = TestFunction(V); u = Function(V); u2 = Function(V) bc = DirichletBC(V, Expression(testsol), boundary) bc2 = DirichletBC(V, Expression(testsol2), boundary) R = interpolate(Expression(ddtestsol),V) R2 = interpolate(Expression(ddtestsol2),V) a = inner(grad(dis), grad(dus))*dx L = R*dus*dx L2 = R2*dus*dx solve(a == L, u, bc) solve(a == L2, u2, bc2) H = metric_pnorm(u , eta, max_edge_length=1., max_edge_ratio=50); #Mp = project(H, TensorFunctionSpace(mesh, "CG", 1)) H2 = metric_pnorm(u2, eta, max_edge_length=1., max_edge_ratio=50); #Mp2 = project(H2, TensorFunctionSpace(mesh, "CG", 1)) H3 = metric_ellipse(H,H2); Mp3 = project(H3, TensorFunctionSpace(mesh, "CG", 1)) print("H11: %0.0f, H22: %0.0f, V: %0.0f,E: %0.0f" % (assemble(abs(H3[0,0])*dx),assemble(abs(H3[1,1])*dx),mesh.num_vertices(),mesh.num_cells())) startTime = time() if iii != 6: # mesh2 = Mesh(adapt(Mp2)) mesh = Mesh(adapt(Mp3)) # mesh3 = adapt(Mp) print("total time was %0.1fs" % (time()-startTime)) # PLOT MESH figure(1); triplot(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells()); axis('equal'); axis('off'); box('off') #figure(2); triplot(mesh2.coordinates()[:,0],mesh2.coordinates()[:,1],mesh2.cells()) #mesh = mesh2 #figure(3); triplot(mesh3.coordinates()[:,0],mesh3.coordinates()[:,1],mesh3.cells()) #mesh = mesh3 figure(4); testf = interpolate(u2,FunctionSpace(mesh,'CG',1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG" ,1)) zz = testf.vector().array()[vtx2dof]; zz[zz==1] -= 1e-16 tricontourf(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),zz,100) show()
def vis_flow_image_final(flow_pyramid, flow_gt_pyramid, images_list, gray_images_list, filename='./flow.png'): num_contents = len(flow_pyramid) + len(flow_gt_pyramid) + len( images_list) + len(gray_images_list) nums_list = [ len(flow_pyramid), len(flow_gt_pyramid), len(images_list), len(gray_images_list) ] nums_list.sort() cols = nums_list[-2] if cols <= 3: cols = nums_list[-1] cols = 4 rows = math.ceil(num_contents / cols) fig_dpi = 200 plt.rcParams['savefig.dpi'] = fig_dpi plt.rcParams['figure.dpi'] = fig_dpi fig = plt.figure() fig_id = 1 for image in images_list: plt.subplot(rows, cols, fig_id) plt.imshow(image) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) fig_id += 1 for image in gray_images_list: plt.subplot(rows, cols, fig_id) plt.imshow(image, cmap='gray') plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) fig_id += 1 for flow_gt in flow_gt_pyramid: plt.subplot(rows, cols, fig_id) plt.imshow(vis_flow(flow_gt)) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) fig_id += 1 for flow in flow_pyramid: plt.subplot(rows, cols, fig_id) plt.imshow(vis_flow(flow)) plt.tick_params(labelbottom=False, bottom=False) plt.tick_params(labelleft=False, left=False) plt.xticks([]) box(False) fig_id += 1 plt.tight_layout() plt.savefig(filename, bbox_inches='tight', pad_inches=0.1, dpi=fig_dpi) plt.close()
def maximal_example(eta_list = array([0.001]), Nadapt=5, timet=1., period=2*pi): ### CONSTANTS ### SETUP SOLUTION #testsol = '0.1*sin(50*x+2*pi*t/T)+atan(-0.1/(2*x - sin(5*y+2*pi*t/T)))'; sx = Symbol('sx'); sy = Symbol('sy'); sT = Symbol('sT'); st = Symbol('st'); spi = Symbol('spi') testsol = 0.1*pysin(50*sx+2*spi*st/sT)+pyatan(-0.1,2*sx - pysin(5*sy+2*spi*st/sT)) ddtestsol = str(diff(testsol,sx,sx)+diff(testsol,sy,sy)).replace('sx','x[0]').replace('sy','x[1]').replace('spi','pi') # replacing **P with pow(,P) ddtestsol = ddtestsol.replace("(2*x[0] - sin(5*x[1] + 2*pi*st/sT))**2","pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.)") ddtestsol = ddtestsol.replace("cos(5*x[1] + 2*pi*st/sT)**2","pow(cos(5*x[1] + 2*pi*st/sT),2.)") ddtestsol = ddtestsol.replace("(pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.) + 0.01)**2","pow((pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.) + 0.01),2.)") ddtestsol = ddtestsol.replace("(1 + 0.01/pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.))**2","pow(1 + 0.01/pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.),2.)") ddtestsol = ddtestsol.replace("(2*x[0] - sin(5*x[1] + 2*pi*st/sT))**5","pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),5.)") #insert values ddtestsol = ddtestsol.replace('sT',str(period)).replace('st',str(timet)) testsol = str(testsol).replace('sx','x[0]').replace('sy','x[1]').replace('spi','pi').replace('sT',str(period)).replace('st',str(timet)) ddtestsol = "-("+ddtestsol+")" error_list = []; dof_list = [] for eta in eta_list: meshsz = 40 ### SETUP MESH # mesh = RectangleMesh(0.4,-0.1,0.6,0.3,1*meshsz,1*meshsz,"left/right") #shock # mesh = RectangleMesh(-0.75,-0.3,-0.3,0.5,1*meshsz,1*meshsz,"left/right") #waves mesh = RectangleMesh(-1.5,-0.25,0.5,0.75,1*meshsz,1*meshsz,"left/right") #shock+waves def boundary(x): return near(x[0],mesh.coordinates()[:,0].min()) or near(x[0],mesh.coordinates()[:,0].max()) \ or near(x[1],mesh.coordinates()[:,1].min()) or near(x[1],mesh.coordinates()[:,1].max()) # PERFORM ONE ADAPTATION ITERATION for iii in range(Nadapt): startTime = time() V = FunctionSpace(mesh, "CG" ,2); dis = TrialFunction(V); dus = TestFunction(V); u = Function(V) # R = interpolate(Expression(ddtestsol),V) a = inner(grad(dis), grad(dus))*dx L = Expression(ddtestsol)*dus*dx # bc = DirichletBC(V, Expression(testsol), boundary) solve(a == L, u, bc) soltime = time()-startTime startTime = time() H = metric_pnorm(u, eta, max_edge_ratio=50, CG0H=3, p=4) metricTime = time()-startTime if iii != Nadapt-1: mesh = adapt(H) TadaptTime = time()-startTime L2error = errornorm(Expression(testsol), u, degree_rise=4, norm_type='L2') printstr = "%5.0f elements, %0.0e L2error, adapt took %0.0f %% of the total time, (%0.0f %% of which was the metric calculation)" \ % (mesh.num_cells(),L2error,TadaptTime/(TadaptTime+soltime)*100,metricTime/TadaptTime*100) if len(eta_list) == 1: print(printstr) else: error_list.append(L2error); dof_list.append(len(u.vector().array())) print(printstr) if len(dof_list) > 1: dof_list = array(dof_list); error_list = array(error_list) figure() loglog(dof_list,error_list,'.b-',linewidth=2,markersize=16); xlabel('Degree of freedoms'); ylabel('L2 error') # # PLOT MESH # figure() coords = mesh.coordinates().transpose() # triplot(coords[0],coords[1],mesh.cells(),linewidth=0.1) # #savefig('mesh.png',dpi=300) #savefig('mesh.eps'); figure() #solution testf = interpolate(Expression(testsol),FunctionSpace(mesh,'CG',1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG" ,1)) zz = testf.vector().array()[vtx2dof] hh=tricontourf(coords[0],coords[1],mesh.cells(),zz,100) colorbar(hh) #savefig('solution.png',dpi=300) #savefig('solution.eps'); figure() #analytical solution testfe = interpolate(u,FunctionSpace(mesh,'CG',1)) zz = testfe.vector().array()[vtx2dof] hh=tricontourf(coords[0],coords[1],mesh.cells(),zz,100) colorbar(hh) #savefig('analyt.png',dpi=300) #savefig('analyt.eps'); figure() #error zz -= testf.vector().array()[vtx2dof]; zz[zz==1] -= 1e-16 hh=tricontourf(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),zz,100,cmap=get_cmap('binary')) colorbar(hh) hold('on'); triplot(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),color='r',linewidth=0.5); hold('off') axis('equal'); box('off'); title('error') show()
pl.title("P2") ##pl.title(1.0, 1.0, 'C6', fontsize='medium', verticalalignment='top') sel = fiff.pick_types(evoked.info,meg=False,eeg=False,include=[chan], exclude ="bads") print sel data = evoked.data[sel]*1e6 ##Mean region_mean = np.mean(data,0) ###plotting commands pl.plot(times,region_mean,color=colorList[c],linewidth=lWidth) #plot the data #pl.title(chan) pl.ylim([eegymin,eegymax]) #set the y limits pl.xlim([eegxmin,eegxmax]) #set the x limits pl.box('off') # turn off the box frame pl.axhline(y=0,xmin=0,xmax=1,color='k',linewidth=2) #draw a thicker horizontal line at 0 yfactor = abs(eegymax)+abs(eegymin) pl.axvline(x=0,ymin=(.5-(vertScaleBar/float(yfactor))),ymax=(.5+(vertScaleBar/float(yfactor))),color='k',linewidth=2) #draw a vertical line at 0 that goes 1/8 of the range in each direction from the middle (e.g., if the range is -8:8, =16, 1/8 of 16=2, so -2:2). pl.yticks(np.array([])) #turn off the y tick labels pl.xticks(np.array([])) #turn off the x tick labels pl.tick_params(axis='both',right='off',left='off',bottom='off',top='off') #turn off all the tick marks #draw vertical lines every hundred ms pl.axvline(x=100,ymin=.48, ymax=.52, color='k',linewidth=2) pl.axvline(x=200,ymin=.48, ymax=.52, color='k',linewidth=2) pl.axvline(x=300,ymin=.48, ymax=.52, color='k',linewidth=2) pl.axvline(x=400,ymin=.48, ymax=.52, color='k',linewidth=2) pl.axvline(x=500,ymin=.48, ymax=.52, color='k',linewidth=2) pl.axvline(x=600,ymin=.48, ymax=.52, color='k',linewidth=2)
def simanalyze( project=None, image=None, # if image==False: imagename=None, skymodel=None, # else: vis=None, modelimage=None, imsize=None, imdirection=None, cell=None, interactive=None, niter=None, threshold=None, weighting=None, mask=None, outertaper=None, pbcor=None, stokes=None, featherimage=None, # endif analyze=None, showuv=None, showpsf=None, showmodel=None, showconvolved=None, showclean=None, showresidual=None, showdifference=None, showfidelity=None, graphics=None, verbose=None, overwrite=None, dryrun=False, logfile=None ): #def simanalyze(project='sim', image=True, imagename='default', skymodel='', vis='default', modelimage='', imsize=[128, 128], imdirection='', cell='', interactive=False, niter=0, threshold='0.1mJy', weighting='natural', mask=[], outertaper=[''], stokes='I', featherimage='', analyze=False, showuv=True, showpsf=True, showmodel=True, showconvolved=False, showclean=True, showresidual=False, showdifference=True, showfidelity=True, graphics='both', verbose=False, overwrite=True, dryrun=False): # Collect a list of parameter values to save inputs in_params = locals() import re import glob casalog.origin('simanalyze') if verbose: casalog.filter(level="DEBUG2") a = inspect.stack() stacklevel = 0 for k in range(len(a)): if (string.find(a[k][1], 'ipython console') > 0): stacklevel = k myf = sys._getframe(stacklevel).f_globals # create the utility object: myutil = simutil() if logfile: myutil.reportfile=logfile myutil.openreport() if verbose: myutil.verbose = True msg = myutil.msg from simutil import is_array_type # put output in directory called "project" fileroot = project if not os.path.exists(fileroot): msg(fileroot+" directory doesn't exist - the task expects to find results from creating the datasets there, like the skymodel.",priority="error") # msg should raise an exception for priority=error saveinputs = myf['saveinputs'] saveinputs('simanalyze',fileroot+"/"+project+".simanalyze.last") # myparams=in_params) if (not image) and (not analyze): casalog.post("No operation to be done. Exiting from task.", "WARN") return True grscreen = False grfile = False if graphics == "both": grscreen = True grfile = True if graphics == "screen": grscreen = True if graphics == "file": grfile = True try: # Predefined parameters pbcoeff = 1.13 ## PB defined as pbcoeff*lambda/d # handle '$project' in modelimage modelimage = modelimage.replace('$project',project) featherimage = featherimage.replace('$project',project) #========================= # things we need: model_cell, model_direction if user doesn't specify - # so find those first, and get information using util.modifymodel # with skymodel=newmodel # we need to parse either the mslist or imagename (if image=False) # first, so that we can pick the appropriate skymodel, # if there are several. skymodel_searchstring="NOT SPECIFIED" if not (image or dryrun): user_imagename=imagename if user_imagename=="default" or len(user_imagename)<=0: images= glob.glob(fileroot+"/*image") if len(images)<1: msg("can't find any image in project directory",priority="error") return False if len(images)>1: msg("found multiple images in project directory",priority="warn") msg("using "+images[0],priority="warn") imagename=images[0] # trim .image suffix: imagename= imagename.replace(".image","") # if the user hasn't specified a sky model image, we can try to # see if their imagename contains something like the project and # configuration, as it would if simobserve created it. user_skymodel=skymodel if not os.path.exists(user_skymodel): if os.path.exists(fileroot+"/"+user_skymodel): user_skymodel=fileroot+"/"+user_skymodel elif len(user_skymodel)>0: raise Exception,"Can't find your specified skymodel "+user_skymodel # try to strip a searchable identifier tmpstring=user_skymodel.split("/")[-1] skymodel_searchstring=tmpstring.replace(".image","") if image: # check for default measurement sets: default_mslist = glob.glob(fileroot+"/*ms") n_default=len(default_mslist) # is the user requesting this ms? default_requested=[] for i in range(n_default): default_requested.append(False) # parse ms parameter and check for existance; # initial ms list if vis=="default" or len(vis)==0: mslist0=default_mslist else: mslist0 = vis.split(',') # verified found ms list mslist = [] mstype = [] mstoimage=[] tpmstoimage=None for ms0 in mslist0: if not len(ms0): continue ms1 = ms0.replace('$project',project) # MSes in fileroot/ have priority if os.path.exists(fileroot+"/"+ms1): ms1 = fileroot + "/" + ms1 if os.path.exists(ms1)or dryrun: mslist.append(ms1) # mark as requested if default_mslist.count(ms1): i=default_mslist.index(ms1) default_requested[i]=True # check if noisy in name if re.search('noisy.',ms1): ms1_raw=str.join("",re.split('noisy.',ms1)) if default_mslist.count(ms1_raw): i=default_mslist.index(ms1_raw) default_requested[i]=True else: # not noisy if ms1.endswith(".sd.ms"): ms1_noisy=re.split('.sd.ms',ms1)[0]+'.noisy.sd.ms' else: ms1_noisy=re.split('.ms',ms1)[0]+'.noisy.ms' if default_mslist.count(ms1_noisy): i=default_mslist.index(ms1_noisy) default_requested[i]=True if vis == "default": continue msg("You requested "+ms1+" but there is a noisy version of the ms in your project directory - if your intent is to model noisy data you may want to check inputs",priority="warn") # check if the ms is tp data or not. if dryrun: # HACK mstype.append('INT') mstoimage.append(ms1) elif myutil.ismstp(ms1,halt=False): mstype.append('TP') tpmstoimage = ms1 # XXX TODO more than one TP ms will not be handled # correctly msg("Found a total power measurement set, %s." % ms1,origin='simanalyze') else: mstype.append('INT') mstoimage.append(ms1) msg("Found a synthesis measurement set, %s." % ms1,origin='simanalyze') else: msg("measurement set "+ms1+" not found -- removing from imaging list") # check default mslist for unrequested ms: for i in range(n_default): if not default_requested[i]: msg("Project directory contains "+default_mslist[i]+" but you have not requested to include it in your simulated image.") if not mstoimage and len(tpmstoimage) == 0: raise Exception,"No MS found to image" # now try to parse the mslist for an identifier string that # we can use to find the right skymodel if there are several if len(mstoimage) == 0 and len(tpmstoimage) > 0: tmpstring = tpmstoimage.split("/")[-1] else: tmpstring=(mstoimage[0]).split("/")[-1] skymodel_searchstring=tmpstring.replace(".ms","") # more than one to image? if len(mstoimage) > 1: msg("Multiple interferometric ms found:",priority="info",origin='simanalyze') for i in range(len(mstoimage)): msg(" "+mstoimage[i],priority="info",origin='simanalyze') msg(" will be concated and simultaneously deconvolved; if something else is desired, please specify vis, or image manually and use image=F",priority="info",origin='simanalyze') concatms=project+"/"+project+".concat.ms" from concat import concat weights = get_concatweights(mstoimage) msg(" concat("+str(mstoimage)+",concatvis='"+concatms+"',visweightscale="+str(weights)+")",origin='simanalyze') if not dryrun: concat(mstoimage,concatvis=concatms,visweightscale=weights) mstoimage=[concatms] #======================================================== # now we can search for skymodel, and if there are several, # pick the one that is closest to either the imagename, # or the first MS if there are several MS to image. components_only=False # first look for skymodel, if not then compskymodel skymodels=glob.glob(fileroot+"/"+project+"*.skymodel")+glob.glob(fileroot+"/"+project+"*.newmodel") nmodels=len(skymodels) skymodel_index=0 if nmodels>1: msg("Found %i sky model images:" % nmodels,origin='simanalyze') # use the skymodel_searchstring to try to pick the right one # print them out for the user while we're at it. for i in range(nmodels): msg(" "+skymodels[i]) if skymodels[i].count(skymodel_searchstring)>0: skymodel_index=i msg("Using skymodel "+skymodels[skymodel_index],origin='simanalyze') if nmodels>=1: skymodel=skymodels[skymodel_index] else: skymodel="" if os.path.exists(skymodel): msg("Sky model image "+skymodel+" found.",origin='simanalyze') else: skymodels=glob.glob(fileroot+"/"+project+"*.compskymodel") nmodels=len(skymodels) if nmodels>1: msg("Found %i sky model images:" % nmodels,origin='simanalyze') for ff in skymodels: msg(" "+ff) msg("Using "+skymodels[0],origin='simanalyze') if nmodels>=1: skymodel=skymodels[0] else: skymodel="" if os.path.exists(skymodel): msg("Sky model image "+skymodel+" found.",origin='simanalyze') components_only=True elif not dryrun: msg("Can't find a model image in your project directory, named skymodel or compskymodel - output image will be created, but comparison with the input model is not possible.",priority="warn",origin='simanalyze') analyze=False modelflat = skymodel+".flat" if os.path.exists(skymodel): if not (os.path.exists(modelflat) or dryrun): myutil.flatimage(skymodel,verbose=verbose) # modifymodel just collects info if skymodel==newmodel (model_refdir,model_cell,model_size, model_nchan,model_center,model_width, model_stokes) = myutil.modifymodel(skymodel,skymodel, "","","","","",-1, flatimage=False) cell_asec=qa.convert(model_cell[0],'arcsec')['value'] ##################################################################### # clean if desired, use noisy image for further calculation if present # todo suggest a cell size from psf? ##################################################################### if image: # make sure cell is defined if is_array_type(cell): if len(cell) > 0: cell0 = cell[0] else: cell0 = "" else: cell0 = cell if len(cell0)<=0: cell = model_cell if is_array_type(cell): if len(cell) == 1: cell = [cell[0],cell[0]] else: cell = [cell,cell] # cells are positive by convention cell = [qa.abs(cell[0]),qa.abs(cell[1])] # and imsize if is_array_type(imsize): if len(imsize) > 0: imsize0 = imsize[0] if len(imsize) > 1: imsize1 = imsize[1] else: imsize1 = imsize0 else: imsize0 = -1 else: imsize0 = imsize if imsize0 <= 0: imsize = [int(pl.ceil(qa.convert(qa.div(model_size[0],cell[0]),"")['value'])), int(pl.ceil(qa.convert(qa.div(model_size[1],cell[1]),"")['value']))] else: imsize=[imsize0,imsize1] if len(mstoimage) == 0: if tpmstoimage: sd_only = True else: msg("no measurement sets found to image",priority="error",origin='simanalyze') else: sd_only = False # get some quantities from the interferometric ms # TODO use something like aU.baselineStats for this, and the 90% baseline maxbase=0. if len(mstoimage)>1 and dryrun: msg("imaging multiple ms not possible in dryrun mode",priority="warn",origin="simanalyze") # TODO make work better for multiple MS for msfile in mstoimage: if os.path.exists(msfile): tb.open(msfile) rawdata = tb.getcol("UVW") tb.done() maxbase = max([max(rawdata[0,]),max(rawdata[1,])]) # in m psfsize = 0.3/qa.convert(qa.quantity(model_center),'GHz')['value']/maxbase*3600.*180/pl.pi # lambda/b converted to arcsec minimsize = 8* int(psfsize/cell_asec) elif dryrun: minimsize = min(imsize) psfsize = qa.mul(cell[0],3) # HACK else: raise Exception,mstoimage+" not found." if imsize[0] < minimsize: msg("The number of image pixel in x-axis, %d, is small to cover 8 x PSF. Setting x pixel number, %d." % (imsize[0], minimsize), priority='warn',origin='simanalyze') imsize[0] = minimsize if imsize[1] < minimsize: msg("The number of image pixel in y-axis, %d, is small to cover 8 x PSF. Setting y pixel number, %d" % (imsize[1], minimsize), priority='warn',origin='simanalyze') imsize[1] = minimsize tpimage=None # Do single dish imaging first if tpmstoimage exists. if tpmstoimage and os.path.exists(tpmstoimage): msg('creating image from ms: '+tpmstoimage,origin='simanalyze') #if len(mstoimage): # tpimage = project + '.sd.image' #else: # tpimage = project + '.image' tpimage = project + '.sd.image' tpimage = fileroot + "/" + tpimage if len(mstoimage): if len(modelimage) and tpimage != modelimage and \ tpimage != fileroot+"/"+modelimage: msg("modelimage parameter set to "+modelimage+" but also creating a new total power image "+tpimage,priority="warn",origin='simanalyze') msg("assuming you know what you want, and using modelimage="+modelimage+" in deconvolution",priority="warn",origin='simanalyze') elif len(featherimage) and tpimage != featherimage and \ tpimage != fileroot+"/"+featherimage: msg("featherimage parameter set to "+featherimage+" but also creating a new total power image "+tpimage,priority="warn",origin='simanalyze') msg("assuming you know what you want, and using featherimage="+featherimage+" in feather",priority="warn",origin='simanalyze') # Get PB size of TP Antenna # !! aveant will only be set if modifymodel or setpointings and in # any case it will the the aveant of the INTERFM array - we want the SD if os.path.exists(tpmstoimage): # antenna diameter tb.open(tpmstoimage+"/ANTENNA") diams = tb.getcol("DISH_DIAMETER") tb.close() aveant = pl.mean(diams) # theoretical antenna beam size import sdbeamutil pb_asec = sdbeamutil.primaryBeamArcsec(qa.tos(qa.convert(qa.quantity(model_center),'GHz')),aveant,(0.75 if aveant==12.0 else 0.0),10.0) elif dryrun: aveant = 12.0 pb_asec = pbcoeff*0.29979/qa.convert(qa.quantity(model_center),'GHz')['value']/aveant*3600.*180/pl.pi else: raise Exception, tpmstoimage+" not found." # default PSF from PB of antenna imbeam = {'major': qa.quantity(pb_asec,'arcsec'), 'minor': qa.quantity(pb_asec,'arcsec'), 'positionangle': qa.quantity(0.0,'deg')} # Common imaging parameters sdim_param = dict(infiles=[tpmstoimage], overwrite=overwrite, phasecenter=model_refdir, mode='channel', nchan=model_nchan, start=0, width=1) if True: #SF gridding msg("Generating TP image using 'SF' kernel.",origin='simanalyze') beamsamp = 9 sfcell_asec = pb_asec/beamsamp sfcell = qa.tos(qa.quantity(sfcell_asec, "arcsec")) cell_asec = [qa.convert(cell[0],"arcsec")['value'], qa.convert(cell[1],"arcsec")['value']] if cell_asec[0] > sfcell_asec or \ cell_asec[1] > sfcell_asec: # imregrid() may not work properly for regrid of # small to large cell msg("The requested cell size is too large to invoke SF gridding. Please set cell size <= %f arcsec or grid TP MS '%s' manually" % (sfcell_asec, tpmstoimage),priority="error",origin='simanalyze') sfsupport = 6 temp_out = tpimage+"0" temp_cell = [sfcell, sfcell] # too small - is imsize too small to start with? # needs to cover all pointings. temp_imsize = [int(pl.ceil(cell_asec[0]/sfcell_asec*imsize[0])), int(pl.ceil(cell_asec[1]/sfcell_asec*imsize[1]))] msg("Using predefined algorithm to define grid parameters.",origin='simanalyze') msg("SF gridding summary",origin='simanalyze') msg("- Antenna primary beam: %f arcsec" % pb_asec,origin='simanalyze') msg("- Image pixels per antenna PB (predefined): %f" % beamsamp,origin='simanalyze') msg("- Cell size (arcsec): [%s, %s]" % (temp_cell[0], temp_cell[1]),origin='simanalyze') msg("- Imsize to cover final TP image area: [%d, %d] (type: %s)" % (temp_imsize[0], temp_imsize[1], type(temp_imsize[0])),origin='simanalyze') msg("- convolution support: %d" % sfsupport,origin='simanalyze') # kernel specific imaging parameters sdim_param['gridfunction'] = 'SF' sdim_param['convsupport'] = sfsupport sdim_param['outfile'] = temp_out sdim_param['imsize'] = temp_imsize sdim_param['cell'] = temp_cell msg(get_taskstr('sdimaging', sdim_param), priority="info") if not dryrun: sdimaging(**sdim_param) if not os.path.exists(temp_out): raise RuntimeError, "TP imaging failed." # Scale image by convolved beam / antenna primary beam ia.open(temp_out) imbeam = ia.restoringbeam() ia.close() beam_area_ratio = qa.getvalue(qa.convert(imbeam['major'], "arcsec")) \ * qa.getvalue(qa.convert(imbeam['minor'], "arcsec")) \ / pb_asec**2 msg("Scaling TP image intensity by %f." % (beam_area_ratio),origin='simanalyze') temp_in = temp_out temp_out = temp_out + ".scaled" immath(imagename=temp_in, mode='evalexpr', expr="IM0*%f" % (beam_area_ratio), outfile=temp_out) if not os.path.exists(temp_out): raise RuntimeError, "TP image scaling failed." # Regrid TP image to final resolution msg("Regridding TP image to final resolution",origin='simanalyze') msg("- cell size (arecsec): [%s, %s]" % (cell[0], cell[1]),origin='simanalyze') msg("- imsize: [%d, %d]" % (imsize[0], imsize[1]),origin='simanalyze') if not dryrun: ia.open(temp_out) newcsys = ia.coordsys() ia.close() dir_idx = newcsys.findcoordinate("direction")['world'] newcsys.setreferencepixel([imsize[0]/2., imsize[1]/2.], type="direction") incr = newcsys.increment(type='direction')['numeric'] newincr = [incr[0]*cell_asec[0]/sfcell_asec, incr[1]*cell_asec[1]/sfcell_asec,] newcsys.setincrement(newincr, type="direction") # sdtemplate = imregrid(imagename=temp_out, template="get") sdtemplate['csys'] = newcsys.torecord() for idx in range(len(dir_idx)): sdtemplate['shap'][ dir_idx[idx] ] = imsize[idx] imregrid(imagename=temp_out, interpolation="cubic", template=sdtemplate, output=tpimage, overwrite=overwrite) del newcsys, sdtemplate, incr, newincr, dir_idx del temp_out, temp_cell, temp_imsize, sfcell_asec, cell_asec else: #PB grid msg("Generating TP image using 'PB' kernel.",origin='simanalyze') # Final TP cell and image size. # imsize and cell are already int and quantum arrays sdimsize = imsize sdcell = [qa.tos(cell[0]), qa.tos(cell[1])] ### TODO: need to set phasecenter properly based on imdirection # kernel specific imaging parameters sdim_param['gridfunction'] = 'PB' sdim_param['outfile'] = tpimage sdim_param['imsize'] = sdimsize sdim_param['cell'] = sdcell msg(get_taskstr('sdimaging', sdim_param), priority="info") if not dryrun: sdimaging(**sdim_param) del sdimsize, sdcell # TODO: Define PSF of image here # for now use default # get image beam size form TP image if os.path.exists(tpimage): ia.open(tpimage) beam = ia.restoringbeam() ia.close() if sd_only: bmarea = beam['major']['value']*beam['minor']['value']*1.1331 #arcsec2 bmarea = bmarea/(cell[0]['value']*cell[1]['value']) # bm area in pix else: del beam #del beam msg('generation of total power image '+tpimage+' complete.',origin='simanalyze') # update TP ms name the for following steps sdmsfile = tpmstoimage sd_any = True imagename = re.split('.image$',tpimage)[0] # End of single dish imaging part outflat_current = False convsky_current = False if image and len(mstoimage) > 0: # for reruns foo=mstoimage[0] foo=foo.replace(".ms","") foo=foo.replace(project,"") foo=foo.replace("/","") project=project+foo imagename = fileroot + "/" + project # get nfld, sourcefieldlist, from (interfm) ms if it was not just created # TODO make work better for multiple mstoimage (figures below) if os.path.exists(mstoimage[0]): tb.open(mstoimage[0]+"/SOURCE") code = tb.getcol("CODE") sourcefieldlist = pl.where(code=='OBJ')[0] nfld = len(sourcefieldlist) tb.done() elif dryrun: nfld=1 # HACK msfile = mstoimage[0] # set cleanmode automatically (for interfm) if nfld == 1: cleanmode = "csclean" else: cleanmode = "mosaic" # clean insists on using an existing model if its present if os.path.exists(imagename+".image"): shutil.rmtree(imagename+".image") if os.path.exists(imagename+".model"): shutil.rmtree(imagename+".model") # An image in fileroot/ has priority if len(modelimage) > 0 and os.path.exists(fileroot+"/"+modelimage): modelimage = fileroot + "/" + modelimage msg("Found modelimage, %s." % modelimage,origin='simanalyze') # in simdata we use imdirection instead of model_refdir if not myutil.isdirection(imdirection,halt=False): imdirection=model_refdir myutil.imclean(mstoimage,imagename, cleanmode,cell,imsize,imdirection, interactive,niter,threshold,weighting, outertaper,pbcor,stokes, #sourcefieldlist=sourcefieldlist, modelimage=modelimage,mask=mask,dryrun=dryrun) # create imagename.flat and imagename.residual.flat: if not dryrun: myutil.flatimage(imagename+".image",verbose=verbose) myutil.flatimage(imagename+".residual",verbose=verbose) outflat_current = True # feather if featherimage: if not os.path.exists(featherimage): raise Exception,"Could not find featherimage "+featherimage else: featherimage="" if tpimage: # if you set modelimage, then it won't force tpimage into # featherimage. this could be hard to explain # to the user. if os.path.exists(tpimage) and not os.path.exists(modelimage): featherimage=tpimage if os.path.exists(featherimage): msg("feathering the interfermetric image "+imagename+".image with "+featherimage,origin='simanalyze',priority="info") from feather import feather # TODO call with params? msg("feather('"+imagename+".feather.image','"+imagename+".image','"+featherimage+"')",priority="info") if not dryrun: feather(imagename+".feather.image",imagename+".image",featherimage) # copy residual flat image shutil.copytree(imagename+".residual.flat",imagename+".feather.residual.flat") imagename=imagename+".feather" # but replace combined flat image myutil.flatimage(imagename+".image",verbose=verbose) if verbose: msg(" ") msg("done inverting and cleaning",origin='simanalyze') if not is_array_type(cell): cell = [cell,cell] if len(cell) <= 1: cell = [qa.quantity(cell[0]),qa.quantity(cell[0])] else: cell = [qa.quantity(cell[0]),qa.quantity(cell[1])] cell = [qa.abs(cell[0]),qa.abs(cell[0])] # get beam from output clean image if verbose: msg("getting beam from "+imagename+".image",origin='simanalyze') if os.path.exists(imagename+".image"): ia.open(imagename+".image") beam = ia.restoringbeam() ia.close() # model has units of Jy/pix - calculate beam area from clean image # (even if we are not plotting graphics) bmarea = beam['major']['value']*beam['minor']['value']*1.1331 #arcsec2 bmarea = bmarea/(cell[0]['value']*cell[1]['value']) # bm area in pix msg("synthesized beam area in output pixels = %f" % bmarea,origin='simanalyze') if image: # show model, convolved model, clean image, and residual if grfile: file = fileroot + "/" + project + ".image.png" else: file = "" else: mslist=[] if dryrun: grscreen=False grfile=False analyze=False if image and len(mstoimage) > 0: if grscreen or grfile: myutil.newfig(multi=[2,2,1],show=grscreen) # create regridded and convolved sky model image myutil.convimage(modelflat,imagename+".image.flat") convsky_current = True # don't remake this for analysis in this run disprange = [] # passing empty list causes return of disprange # original sky regridded to output pixels but not convolved with beam discard = myutil.statim(modelflat+".regrid",disprange=disprange,showstats=False) myutil.nextfig() # convolved sky model - units of Jy/bm disprange = [] discard = myutil.statim(modelflat+".regrid.conv",disprange=disprange) myutil.nextfig() # clean image - also in Jy/beam # although because of DC offset, better to reset disprange disprange = [] discard = myutil.statim(imagename+".image.flat",disprange=disprange) myutil.nextfig() if len(mstoimage) > 0: myutil.nextfig() # clean residual image - Jy/bm discard = myutil.statim(imagename+".residual.flat",disprange=disprange) myutil.endfig(show=grscreen,filename=file) ##################################################################### # analysis if analyze: if not os.path.exists(imagename+".image"): if os.path.exists(fileroot+"/"+imagename+".image"): imagename=fileroot+"/"+imagename else: msg("Can't find a simulated image - expecting "+imagename,priority="error") return False # we should have skymodel.flat created above if not image: if not os.path.exists(imagename+".image"): msg("you must image before analyzing.",priority="error") return False # get beam from output clean image if verbose: msg("getting beam from "+imagename+".image",origin="analysis") ia.open(imagename+".image") beam = ia.restoringbeam() ia.close() # model has units of Jy/pix - calculate beam area from clean image cell = myutil.cellsize(imagename+".image") cell= [ qa.convert(cell[0],'arcsec'), qa.convert(cell[1],'arcsec') ] # (even if we are not plotting graphics) bmarea = beam['major']['value']*beam['minor']['value']*1.1331 #arcsec2 bmarea = bmarea/(cell[0]['value']*cell[1]['value']) # bm area in pix msg("synthesized beam area in output pixels = %f" % bmarea) # flat output:? if the user manually cleaned, this may not exist outflat = imagename + ".image.flat" if (not outflat_current) or (not os.path.exists(outflat)): # create imagename.flat and imagename.residual.flat myutil.flatimage(imagename+".image",verbose=verbose) if os.path.exists(imagename+".residual"): myutil.flatimage(imagename+".residual",verbose=verbose) else: if showresidual: msg(imagename+".residual not found -- residual will not be plotted",priority="warn") showresidual = False outflat_current = True # regridded and convolved input:? if not convsky_current: myutil.convimage(modelflat,imagename+".image.flat") convsky_current = True # now should have all the flat, convolved etc even if didn't run "image" # make difference image. # immath does Jy/bm if image but only if ia.setbrightnessunit("Jy/beam") in convimage() convolved = modelflat + ".regrid.conv" difference = imagename + '.diff' diff_ia = ia.imagecalc(difference, "'%s' - '%s'" % (convolved, outflat), overwrite=True) diff_ia.setbrightnessunit("Jy/beam") # get rms of difference image for fidelity calculation #ia.open(difference) diffstats = diff_ia.statistics(robust=True, verbose=False,list=False) diff_ia.close() del diff_ia maxdiff = diffstats['medabsdevmed'] if maxdiff != maxdiff: maxdiff = 0. if type(maxdiff) != type(0.): if maxdiff.__len__() > 0: maxdiff = maxdiff[0] else: maxdiff = 0. # Make fidelity image. absdiff = imagename + '.absdiff' calc_ia = ia.imagecalc(absdiff, "max(abs('%s'), %f)" % (difference, maxdiff/pl.sqrt(2.0)), overwrite=True) calc_ia.close() fidelityim = imagename + '.fidelity' calc_ia = ia.imagecalc(fidelityim, "abs('%s') / '%s'" % (convolved, absdiff), overwrite=True) calc_ia.close() msg("fidelity image calculated",origin="analysis") # scalar fidelity absconv = imagename + '.absconv' calc_ia = ia.imagecalc(absconv, "abs('%s')" % convolved, overwrite=True) if ia.isopen(): ia.close() #probably not necessary calc_ia.close() del calc_ia ia.open(absconv) modelstats = ia.statistics(robust=True, verbose=False,list=False) maxmodel = modelstats['max'] if maxmodel != maxmodel: maxmodel = 0. if type(maxmodel) != type(0.): if maxmodel.__len__() > 0: maxmodel = maxmodel[0] else: maxmodel = 0. ia.close() scalarfidel = maxmodel/maxdiff msg("fidelity range (max model / rms difference) = "+str(scalarfidel),origin="analysis") # now, what does the user want to actually display? # need MS for showuv and showpsf if not image: msfile = fileroot + "/" + project + ".ms" elif sd_only: # imaged and single dish only msfile = tpmstoimage # psf is not available for SD only sim if os.path.exists(msfile) and myutil.ismstp(msfile,halt=False): if showpsf: msg("single dish simulation -- psf will not be plotted",priority='warn') showpsf = False if (not image) and (not os.path.exists(msfile)): if showpsf or showuv: msg("No image is generated in this run. Default MS, '%s', does not exist -- uv and psf will not be plotted" % msfile,priority='warn') showpsf = False showuv = False # if the order in the task input changes, change it here too figs = [showuv,showpsf,showmodel,showconvolved,showclean,showresidual,showdifference,showfidelity] nfig = figs.count(True) if nfig > 6: msg("only displaying first 6 selected panels in graphic output",priority="warn") if nfig <= 0: return True if nfig < 4: multi = [1,nfig,1] else: if nfig == 4: multi = [2,2,1] else: multi = [2,3,1] if grfile: file = fileroot + "/" + project + ".analysis.png" else: file = "" if grscreen or grfile: myutil.newfig(multi=multi,show=grscreen) # if order in task parameters changes, change here too if showuv: # TODO loop over all ms - show all UV including zero if len(mslist)>1: msg("Using only "+msfile+" for uv plot",priority="warn",origin='simanalyze') tb.open(msfile) rawdata = tb.getcol("UVW") tb.done() pl.box() maxbase = max([max(rawdata[0,]),max(rawdata[1,])]) # in m klam_m = 300/qa.convert(model_center,'GHz')['value'] pl.plot(rawdata[0,]/klam_m,rawdata[1,]/klam_m,'b,') pl.plot(-rawdata[0,]/klam_m,-rawdata[1,]/klam_m,'b,') ax = pl.gca() ax.yaxis.LABELPAD = -4 pl.xlabel('u[klambda]',fontsize='x-small') pl.ylabel('v[klambda]',fontsize='x-small') pl.axis('equal') # Add zero-spacing (single dish) if not yet plotted # TODO make this a check over all ms # if predict_sd and not myutil.ismstp(msfile,halt=False): # pl.plot([0.],[0.],'r,') myutil.nextfig() if showpsf: if image: psfim = imagename + ".psf" else: psfim = project + ".quick.psf" if not os.path.exists(psfim): if len(mslist)>1: msg("Using only "+msfile+" for psf generation",priority="warn") im.open(msfile) # TODO spectral parms im.defineimage(cellx=qa.tos(model_cell[0]),nx=max([minimsize,128])) if os.path.exists(psfim): shutil.rmtree(psfim) im.approximatepsf(psf=psfim) # beam is set above (even in "analyze" only) # note that if image, beam has fields 'major' whereas if not, it # has fields like 'bmaj'. # beam=im.fitpsf(psf=psfim) im.done() ia.open(psfim) beamcs = ia.coordsys() beam_array = ia.getchunk(axes=[beamcs.findcoordinate("spectral")['pixel'][0],beamcs.findcoordinate("stokes")['pixel'][0]],dropdeg=True) nn = beam_array.shape xextent = nn[0]*cell_asec*0.5 xextent = [xextent,-xextent] yextent = nn[1]*cell_asec*0.5 yextent = [-yextent,yextent] flipped_array = beam_array.transpose() ttrans_array = flipped_array.tolist() ttrans_array.reverse() pl.imshow(ttrans_array,interpolation='bilinear',cmap=pl.cm.jet,extent=xextent+yextent,origin="bottom") psfim.replace(project+"/","") pl.title(psfim,fontsize="x-small") b = qa.convert(beam['major'],'arcsec')['value'] pl.xlim([-3*b,3*b]) pl.ylim([-3*b,3*b]) ax = pl.gca() pl.text(0.05,0.95,"bmaj=%7.1e\nbmin=%7.1e" % (beam['major']['value'],beam['minor']['value']),transform = ax.transAxes,bbox=dict(facecolor='white', alpha=0.7),size="x-small",verticalalignment="top") ia.close() myutil.nextfig() disprange = [] # first plot will define range if showmodel: discard = myutil.statim(modelflat+".regrid",incell=cell,disprange=disprange,showstats=False) myutil.nextfig() disprange = [] if showconvolved: discard = myutil.statim(modelflat+".regrid.conv") # if disprange gets set here, it'll be Jy/bm myutil.nextfig() if showclean: # own scaling because of DC/zero spacing offset discard = myutil.statim(imagename+".image.flat") myutil.nextfig() if showresidual: # it gets its own scaling discard = myutil.statim(imagename+".residual.flat") myutil.nextfig() if showdifference: # it gets its own scaling. discard = myutil.statim(imagename+".diff") myutil.nextfig() if showfidelity: # it gets its own scaling. discard = myutil.statim(imagename+".fidelity",showstats=False) myutil.nextfig() myutil.endfig(show=grscreen,filename=file) sim_min,sim_max,sim_rms,sim_units = myutil.statim(imagename+".image.flat",plot=False) # if not displaying still print stats: # 20100505 ia.stats changed to return Jy/bm: msg('Simulation rms: '+str(sim_rms/bmarea)+" Jy/pix = "+ str(sim_rms)+" Jy/bm",origin="analysis") msg('Simulation max: '+str(sim_max/bmarea)+" Jy/pix = "+ str(sim_max)+" Jy/bm",origin="analysis") #msg('Simulation rms: '+str(sim_rms)+" Jy/pix = "+ # str(sim_rms*bmarea)+" Jy/bm",origin="analysis") #msg('Simulation max: '+str(sim_max)+" Jy/pix = "+ # str(sim_max*bmarea)+" Jy/bm",origin="analysis") msg('Beam bmaj: '+str(beam['major']['value'])+' bmin: '+str(beam['minor']['value'])+' bpa: '+str(beam['positionangle']['value']),origin="analysis") # cleanup - delete newmodel, newmodel.flat etc # flat kept by user request CAS-5509 # if os.path.exists(imagename+".image.flat"): # shutil.rmtree(imagename+".image.flat") if os.path.exists(imagename+".residual.flat"): shutil.rmtree(imagename+".residual.flat") # .flux.pbcoverage is nessesary for feather. #if os.path.exists(imagename+".flux.pbcoverage"): # shutil.rmtree(imagename+".flux.pbcoverage") absdiff = imagename + '.absdiff' if os.path.exists(absdiff): shutil.rmtree(absdiff) absconv = imagename + '.absconv' if os.path.exists(absconv): shutil.rmtree(absconv) # if os.path.exists(imagename+".diff"): # shutil.rmtree(imagename+".diff") if os.path.exists(imagename+".quick.psf") and os.path.exists(imagename+".psf"): shutil.rmtree(imagename+".quick.psf") finalize_tools() if myutil.isreport(): myutil.closereport() except TypeError, e: finalize_tools() #msg("simanalyze -- TypeError: %s" % e,priority="error") casalog.post("simanalyze -- TypeError: %s" % e, priority="ERROR") raise TypeError, e return
# Taken from PySparse # Made by Dominique Orban import pylab # original PySparse logo #irow = [0, 1, 1, 1, 2, 2, 3, 3, 0] #jcol = [2, 0, 1, 3, 1, 2, 0, 2, 3] irow = [0, 0, 0, 1, 2, 2, 3, 3] jcol = [0, 1, 3, 2, 0, 3, 1, 3] nrow = ncol = 4 ms = 60 fig = pylab.figure() ax = fig.gca() ax.plot(jcol, irow, 'ks', markersize=ms, linestyle='None') ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim(xmin=-1, xmax=ncol) ax.set_ylim(ymin=nrow, ymax=-1) ax.set_aspect('equal') pylab.box(on=False) pylab.show()
def ellipse_convergence(asp=2,width=1e-2, Nadapt=10, eta_list=0.04*pyexp2(-array(range(15))*pylog(2)/2), \ use_adapt=True, problem=2, outname='', CGorderL = [2, 3], noplot=False, octaveimpl=False): ### SETUP SOLUTION sx = Symbol('sx'); sy = Symbol('sy'); width_ = Symbol('ww'); asp_ = Symbol('a') rrpy = pysqrt(sx*sx/asp_+sy*sy*asp_) if problem == 2: stepfunc = 0.5+165./104./width_*(rrpy-0.25)-20./13./width_**3*(rrpy-0.25)**3-102./13./width_**5*(rrpy-0.25)**5+240./13./width_**7*(rrpy-0.25)**7 elif problem == 1: stepfunc = 0.5+15./8./width_*(rrpy-0.25)-5./width_**3*(rrpy-0.25)**3+6./width_**5*(rrpy-0.25)**5 else: stepfunc = 0.5+1.5/width_*(rrpy-0.25)-2/width_**3*(rrpy-0.25)**3 ddstepfunc = str(diff(stepfunc,sx,sx)+diff(stepfunc,sy,sy)).replace('sx','x[0]').replace('sy','x[1]').replace('x[0]**2','(x[0]*x[0])').replace('x[1]**2','(x[1]*x[1])') stepfunc = str(stepfunc).replace('sx','x[0]').replace('sy','x[1]').replace('x[0]**2','(x[0]*x[0])').replace('x[1]**2','(x[1]*x[1])') #REPLACE ** with pow stepfunc = stepfunc.replace('(a*(x[1]*x[1]) + (x[0]*x[0])/a)**(1/2)','sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a)') ddstepfunc = ddstepfunc.replace('(a*(x[1]*x[1]) + (x[0]*x[0])/a)**(1/2)','sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a)') ddstepfunc = ddstepfunc.replace('(a*(x[1]*x[1]) + (x[0]*x[0])/a)**(3/2)','pow(a*x[1]*x[1]+x[0]*x[0]/a,1.5)') ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**2','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,2.)') ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**3','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,3.)') ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**4','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,4.)') ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**5','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,5.)') ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**6','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,6.)') stepfunc = stepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**3','pow(sqrt(a*x[1]*x[1] + x[0]*x[0]/a) - 0.25,3.)') stepfunc = stepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**5','pow(sqrt(a*x[1]*x[1] + x[0]*x[0]/a) - 0.25,5.)') stepfunc = stepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**7','pow(sqrt(a*x[1]*x[1] + x[0]*x[0]/a) - 0.25,7.)') testsol = '(0.25-ww/2<sqrt(x[0]*x[0]/a+x[1]*x[1]*a) && sqrt(x[0]*x[0]/a+x[1]*x[1]*a) < 0.25+ww/2 ? (' + stepfunc + ') : 0) + (0.25+ww/2<sqrt(x[0]*x[0]/a+x[1]*x[1]*a) ? 1 : 0)' # testsol = '(0.25-ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) && sqrt(x[0]*x[0]+x[1]*x[1]) < 0.25+ww/2 ? (' + stepfunc + ')) : (0.25<sqrt(x[0]*x[0]+x[1]*x[1]) ? 1 : 0)' ddtestsol = '0.25-ww/2<sqrt(x[0]*x[0]/a+x[1]*x[1]*a) && sqrt(x[0]*x[0]/a+x[1]*x[1]*a) < 0.25+ww/2 ? (' + ddstepfunc + ') : 0' # A = array([[0.5,0.5**3,0.5**5],[1,3*0.5**2,5*0.5**4],[0,6*0.5,20*0.5**3]]); b = array([0.5,0,0]) # from numpy.linalg import solve as pysolve #15/8,-5,6 # X = pysolve(A,b); from numpy import linspace; xx = linspace(-0.5,0.5,100) # from pylab import plot as pyplot; pyplot(xx,X[0]*xx+X[1]*xx**3+X[2]*xx**5,'-b') # rrpy = pysqrt(sx*sx+sy*sy) # # ddstepfunc = str(diff(stepfunc,sx,sx)+diff(stepfunc,sy,sy)).replace('sx','x[0]').replace('sy','x[1]').replace('x[0]**2','(x[0]*x[0])').replace('x[1]**2','(x[1]*x[1])') # stepfunc = str(stepfunc).replace('sx','x[0]').replace('sy','x[1]').replace('x[0]**2','(x[0]*x[0])').replace('x[1]**2','(x[1]*x[1])') # #REPLACE ** with pow # ddstepfunc = ddstepfunc.replace('(a*(x[1]*x[1]) + (x[0]*x[0])/a)**(3/2)','pow(a*x[1]*x[1]+x[0]*x[0]/a,1.5)') # ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**2','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,2.)') # ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**3','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,3.)') # ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**4','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,4.)') # stepfunc = stepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**3','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,3.)') # stepfunc = stepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**5','pow(sqrt(a*x[1]*x[1]+x[0]*x[0]/a) - 0.25,5.)') # testsol = '(0.25-ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) && sqrt(x[0]*x[0]+x[1]*x[1]) < 0.25+ww/2 ? (' + stepfunc + ') : 0) + (0.25+ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) ? 1 : 0)' ## testsol = '(0.25-ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) && sqrt(x[0]*x[0]+x[1]*x[1]) < 0.25+ww/2 ? (' + stepfunc + ')) : (0.25<sqrt(x[0]*x[0]+x[1]*x[1]) ? 1 : 0)' # ddtestsol = '0.25-ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) && sqrt(x[0]*x[0]+x[1]*x[1]) < 0.25+ww/2 ? (' + ddstepfunc + ') : 0' # else: # problem == 0: # rrpy = pysqrt(sx*sx+sy*sy) # #'if(t<2*WeMax,0,if(t<4*WeMax,0.5+3/2/(2*WeMax)*(t-3*WeMax)-2/(2*WeMax)^3*(t-3*WeMax)^3,1))'; %0.5+3/2/dx*(x-xc)-2/dx^3*(x-xc)^3 # ddstepfunc = str(diff(stepfunc,sx,sx)+diff(stepfunc,sy,sy)).replace('sx','x[0]').replace('sy','x[1]').replace('x[0]**2','(x[0]*x[0])').replace('x[1]**2','(x[1]*x[1])') # stepfunc = str(stepfunc).replace('sx','x[0]').replace('sy','x[1]').replace('x[0]**2','(x[0]*x[0])').replace('x[1]**2','(x[1]*x[1])') # #REPLACE ** with pow # ddstepfunc = ddstepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**2','pow(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25,2.)') # ddstepfunc = ddstepfunc.replace('(a*(x[1]*x[1]) + (x[0]*x[0])/a)**(3/2)','pow(a*(x[1]*x[1]) + (x[0]*x[0])/a,1.5)') # stepfunc = stepfunc.replace('(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25)**3','pow(sqrt(a*(x[1]*x[1]) + (x[0]*x[0])/a) - 0.25,3.)') # testsol = '0.25-ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) && sqrt(x[0]*x[0]+x[1]*x[1]) < 0.25+ww/2 ? (' + stepfunc + ') : (0.25<sqrt(x[0]*x[0]+x[1]*x[1]) ? 1 : 0)' # ddtestsol = '0.25-ww/2<sqrt(x[0]*x[0]+x[1]*x[1]) && sqrt(x[0]*x[0]+x[1]*x[1]) < 0.25+ww/2 ? (' + ddstepfunc + ') : 0' ddtestsol = ddtestsol.replace('a**2','(a*a)').replace('ww**2','(ww*ww)').replace('ww**3','pow(ww,3.)').replace('ww**4','pow(ww,4.)').replace('ww**5','pow(ww,5.)').replace('ww**6','pow(ww,6.)').replace('ww**7','pow(ww,7.)') testsol = testsol.replace('ww**2','(ww*ww)').replace('ww**3','pow(ww,3.)').replace('ww**5','pow(ww,5.)').replace('ww**7','pow(ww,7.)') ddtestsol = ddtestsol.replace('ww',str(width)).replace('a',str(asp)) testsol = testsol.replace('ww',str(width)).replace('a',str(asp)) ddtestsol = '-('+ddtestsol+')' def boundary(x): return x[0]+0.5 < DOLFIN_EPS or 0.5-x[0] < DOLFIN_EPS or x[1]+0.5 < DOLFIN_EPS or 0.5-x[1] < DOLFIN_EPS for CGorder in CGorderL: dofs = [] L2errors = [] #for eta in [0.16, 0.08, 0.04, 0.02, 0.01, 0.005, 0.0025] #, 0.0025/2, 0.0025/4, 0.0025/8]: # for eta in eta_list: ### SETUP MESH meshsz = int(round(80*0.005/(eta*(bool(use_adapt)==False)+0.05*(bool(use_adapt)==True)))) if (not bool(use_adapt)) and meshsz > 80: continue mesh = RectangleMesh(-0.0,-0.0,0.5*sqrt(asp),0.5/sqrt(asp),meshsz,meshsz,"left/right") # PERFORM TEN ADAPTATION ITERATIONS for iii in range(Nadapt): V = FunctionSpace(mesh, "CG", CGorder); dis = TrialFunction(V); dus = TestFunction(V); u = Function(V) #V2 = FunctionSpace(mesh, "CG", CGorder+2) R = Expression(ddtestsol) #interpolate(Expression(ddtestsol),V2) a = inner(grad(dis), grad(dus))*dx L = R*dus*dx bc = DirichletBC(V, Expression(testsol), boundary) #Constant(0.) solve(a == L, u, bc) if not bool(use_adapt): break H = metric_pnorm(u, eta, max_edge_ratio=1+49*(use_adapt!=2), p=2) H = logproject(H) if iii != Nadapt-1: mesh = adapt(H, octaveimpl=octaveimpl, debugon=False) L2error = errornorm(Expression(testsol), u, degree_rise=CGorder+2, norm_type='L2') dofs.append(len(u.vector().array())) L2errors.append(L2error) log(INFO+1,"%1dX ADAPT<->SOLVE complete: DOF=%5d, error=%0.0e" % (Nadapt, dofs[len(dofs)-1], L2error)) # PLOT MESH + solution figure() testf = interpolate(u ,FunctionSpace(mesh,'CG',1)) testfe = interpolate(Expression(testsol),FunctionSpace(mesh,'CG',1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG" ,1)) zz = testf.vector().array()[vtx2dof]; zz[zz==1] -= 1e-16 hh=tricontourf(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),zz,100,cmap=get_cmap('binary')) colorbar(hh) axis('equal'); axis('off'); box('off'); xlim([0,0.5*sqrt(asp)]); ylim([0, 0.5/sqrt(asp)]); savefig('solution.png',dpi=300) figure() hold('on'); triplot(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),color='r',linewidth=0.5); hold('off') axis('equal'); box('off') axis('off'); xlim([0,0.5*sqrt(asp)]); ylim([0, 0.5/sqrt(asp)]) savefig(outname+'final_mesh_CG2.png',dpi=300) #; savefig('outname+final_mesh_CG2.eps',dpi=300) #PLOT ERROR figure() zz = pyabs(testf.vector().array()-testfe.vector().array())[vtx2dof]; zz[zz==1] -= 1e-16 hh=tricontourf(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),zz,100,cmap=get_cmap('binary')) colorbar(hh); axis('equal'); box('off'); title('error') # PLOT L2error graph figure() pyloglog(dofs,L2errors,'-b.',linewidth=2,markersize=16); xlabel('Degree of freedoms'); ylabel('L2 error') # SAVE SOLUTION dofs = array(dofs); L2errors = array(L2errors) fid = open("DOFS_L2errors_CG"+str(CGorder)+outname+".mpy",'w') pickle.dump([dofs,L2errors],fid) fid.close(); #LOAD SAVED SOLUTIONS fid = open("DOFS_L2errors_CG2"+outname+".mpy",'r') [dofs,L2errors] = pickle.load(fid) fid.close() # PERFORM FITS ON LAST THREE POINTS NfitP = 9 I = array(range(len(dofs)-NfitP,len(dofs))) slope,ints = polyfit(pylog(dofs[I]), pylog(L2errors[I]), 1) if slope < -0.7: fid = open("DOFS_L2errors_CG2_fit"+outname+".mpy",'w') pickle.dump([dofs,L2errors,slope,ints],fid) fid.close() log(INFO+1,'succes') else: os.system('rm '+outname+'.lock') log(INFO+1,'fail') #PLOT THEM TOGETHER if CGorderL != [2]: fid = open("DOFS_L2errors_CG3.mpy",'r') [dofs_old,L2errors_old] = pickle.load(fid) fid.close() slope2,ints2 = polyfit(pylog(dofs_old[I]), pylog(L2errors_old[I]), 1) figure() pyloglog(dofs,L2errors,'-b.',dofs_old,L2errors_old,'--b.',linewidth=2,markersize=16) hold('on'); pyloglog(dofs,pyexp2(ints)*dofs**slope,'-r',dofs_old,pyexp2(ints2)*dofs_old**slope2,'--r',linewidth=1); hold('off') xlabel('Degree of freedoms'); ylabel('L2 error') legend(['CG2','CG3',"%0.2f*log(DOFs)" % slope, "%0.2f*log(DOFs)" % slope2]) #legend(['new data','old_data']) # savefig('comparison.png',dpi=300) #savefig('comparison.eps'); if not noplot: show()
elif grp == 'Occipital': pl.subplot(3,4,11) pl.title('Right Occipital', fontsize = 'large') ####Reading the Evoked data structure for (c,l) in zip(condName, colorList): evoked = mne.fiff.Evoked(fname, setno = 'epochs_'+c , baseline = (None, 0)) #evoked = mne.fiff.Evoked(fname, setno = c , baseline = (None, 0)) ##Use this if you are using condition numbers badChanSet = set(evoked.info['bads']) print c good_chan=list(set(chan_list)) sel = mne.fiff.pick_types(evoked.info,meg=False, eeg=False, include = good_chan) data = evoked.data[sel] ###Computing the MEG RMS from the evoked data for the specified condition times = evoked.times*1000 square = np.power(data, 2) meanSquare = np.mean(square, 0) rms = np.power(meanSquare, 0.5) ###Plotting the MEG rms value for the current condition pl.plot(times, rms*1e13, color = l, linewidth=2) pl.ylim([ymin,ymax]) pl.xlim([xmin,xmax]) pl.box('off') pl.tick_params(axis='both',right='off',top='off') pl.yticks(np.array([0.,4.,8.,12.,16.,20.,24.,28.,32.])) pl.xticks(np.array([0, 200, 400, 600])) pl.savefig(out_fname) pl.show()
def minimal_example(width=2e-2, Nadapt=10, eta = 0.01): ### CONSTANTS meshsz = 40 hd = Constant(width) ### SETUP MESH mesh = RectangleMesh(Point(-0.5,-0.5),Point(0.5,0.5),1*meshsz,1*meshsz,"left/right") ### DERIVE FORCING TERM angle = pi/8 #rand*pi/2 sx = Symbol('sx'); sy = Symbol('sy'); width_ = Symbol('ww'); aa = Symbol('aa') testsol = pytanh((sx*pycos(aa)+sy*pysin(aa))/width_) ddtestsol = str(diff(testsol,sx,sx)+diff(testsol,sy,sy)).replace('sx','x[0]').replace('sy','x[1]') #replace ** with pow ddtestsol = ddtestsol.replace('tanh((x[0]*sin(aa) + x[1]*cos(aa))/ww)**2','pow(tanh((x[0]*sin(aa) + x[1]*cos(aa))/ww),2.)') ddtestsol = ddtestsol.replace('cos(aa)**2','pow(cos(aa),2.)').replace('sin(aa)**2','pow(sin(aa),2.)').replace('ww**2','(ww*ww)') #insert vaulues ddtestsol = ddtestsol.replace('aa',str(angle)).replace('ww',str(width)) testsol = str(testsol).replace('sx','x[0]').replace('sy','x[1]').replace('aa',str(angle)).replace('ww',str(width)) ddtestsol = "-("+ddtestsol+")" def boundary(x): return x[0]-mesh.coordinates()[:,0].min() < DOLFIN_EPS or mesh.coordinates()[:,0].max()-x[0] < DOLFIN_EPS \ or mesh.coordinates()[:,1].min()+0.5 < DOLFIN_EPS or mesh.coordinates()[:,1].max()-x[1] < DOLFIN_EPS # PERFORM TEN ADAPTATION ITERATIONS for iii in range(Nadapt): V = FunctionSpace(mesh, "CG" ,2); dis = TrialFunction(V); dus = TestFunction(V); u = Function(V) a = inner(grad(dis), grad(dus))*dx L = Expression(ddtestsol)*dus*dx bc = DirichletBC(V, Expression(testsol), boundary) solve(a == L, u, bc) startTime = time() H = metric_pnorm(u, eta, max_edge_length=3., max_edge_ratio=None) H = logproject(H) if iii != Nadapt-1: mesh = adapt(H) L2error = errornorm(Expression(testsol), u, degree_rise=4, norm_type='L2') log(INFO+1,"total (adapt+metric) time was %0.1fs, L2error=%0.0e, nodes: %0.0f" % (time()-startTime,L2error,mesh.num_vertices())) # # PLOT MESH # figure() coords = mesh.coordinates().transpose() # triplot(coords[0],coords[1],mesh.cells(),linewidth=0.1) # #savefig('mesh.png',dpi=300) #savefig('mesh.eps'); figure() #solution testf = interpolate(Expression(testsol),FunctionSpace(mesh,'CG',1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG" ,1)) zz = testf.vector().array()[vtx2dof] hh=tricontourf(coords[0],coords[1],mesh.cells(),zz,100) colorbar(hh) # savefig('solution.png',dpi=300) #savefig('solution.eps'); figure() #analytical solution testfe = interpolate(u,FunctionSpace(mesh,'CG',1)) zz = testfe.vector().array()[vtx2dof] hh=tricontourf(coords[0],coords[1],mesh.cells(),zz,100) colorbar(hh) #savefig('analyt.png',dpi=300) #savefig('analyt.eps'); figure() #error zz -= testf.vector().array()[vtx2dof]; zz[zz==1] -= 1e-16 hh=tricontourf(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),zz,100,cmap=get_cmap('binary')) colorbar(hh) hold('on'); triplot(mesh.coordinates()[:,0],mesh.coordinates()[:,1],mesh.cells(),color='r',linewidth=0.5); hold('off') axis('equal'); box('off'); title('error') show()
def maximal_example(eta_list=array([0.001]), Nadapt=5, timet=1., period=2 * pi): ### CONSTANTS ### SETUP SOLUTION #testsol = '0.1*sin(50*x+2*pi*t/T)+atan(-0.1/(2*x - sin(5*y+2*pi*t/T)))'; sx = Symbol('sx') sy = Symbol('sy') sT = Symbol('sT') st = Symbol('st') spi = Symbol('spi') testsol = 0.1 * pysin(50 * sx + 2 * spi * st / sT) + pyatan( -0.1, 2 * sx - pysin(5 * sy + 2 * spi * st / sT)) ddtestsol = str(diff(testsol, sx, sx) + diff(testsol, sy, sy)).replace( 'sx', 'x[0]').replace('sy', 'x[1]').replace('spi', 'pi') # replacing **P with pow(,P) ddtestsol = ddtestsol.replace("(2*x[0] - sin(5*x[1] + 2*pi*st/sT))**2", "pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.)") ddtestsol = ddtestsol.replace("cos(5*x[1] + 2*pi*st/sT)**2", "pow(cos(5*x[1] + 2*pi*st/sT),2.)") ddtestsol = ddtestsol.replace( "(pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.) + 0.01)**2", "pow((pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.) + 0.01),2.)") ddtestsol = ddtestsol.replace( "(1 + 0.01/pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.))**2", "pow(1 + 0.01/pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.),2.)") ddtestsol = ddtestsol.replace("(2*x[0] - sin(5*x[1] + 2*pi*st/sT))**5", "pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),5.)") #insert values ddtestsol = ddtestsol.replace('sT', str(period)).replace('st', str(timet)) testsol = str(testsol).replace('sx', 'x[0]').replace('sy', 'x[1]').replace( 'spi', 'pi').replace('sT', str(period)).replace('st', str(timet)) ddtestsol = "-(" + ddtestsol + ")" error_list = [] dof_list = [] for eta in eta_list: meshsz = 40 ### SETUP MESH # mesh = RectangleMesh(0.4,-0.1,0.6,0.3,1*meshsz,1*meshsz,"left/right") #shock # mesh = RectangleMesh(-0.75,-0.3,-0.3,0.5,1*meshsz,1*meshsz,"left/right") #waves mesh = RectangleMesh(-1.5, -0.25, 0.5, 0.75, 1 * meshsz, 1 * meshsz, "left/right") #shock+waves def boundary(x): return near(x[0],mesh.coordinates()[:,0].min()) or near(x[0],mesh.coordinates()[:,0].max()) \ or near(x[1],mesh.coordinates()[:,1].min()) or near(x[1],mesh.coordinates()[:,1].max()) # PERFORM ONE ADAPTATION ITERATION for iii in range(Nadapt): startTime = time() V = FunctionSpace(mesh, "CG", 2) dis = TrialFunction(V) dus = TestFunction(V) u = Function(V) # R = interpolate(Expression(ddtestsol),V) a = inner(grad(dis), grad(dus)) * dx L = Expression(ddtestsol) * dus * dx # bc = DirichletBC(V, Expression(testsol), boundary) solve(a == L, u, bc) soltime = time() - startTime startTime = time() H = metric_pnorm(u, eta, max_edge_ratio=50, CG0H=3, p=4) metricTime = time() - startTime if iii != Nadapt - 1: mesh = adapt(H) TadaptTime = time() - startTime L2error = errornorm(Expression(testsol), u, degree_rise=4, norm_type='L2') printstr = "%5.0f elements, %0.0e L2error, adapt took %0.0f %% of the total time, (%0.0f %% of which was the metric calculation)" \ % (mesh.num_cells(),L2error,TadaptTime/(TadaptTime+soltime)*100,metricTime/TadaptTime*100) if len(eta_list) == 1: print(printstr) else: error_list.append(L2error) dof_list.append(len(u.vector().array())) print(printstr) if len(dof_list) > 1: dof_list = array(dof_list) error_list = array(error_list) figure() loglog(dof_list, error_list, '.b-', linewidth=2, markersize=16) xlabel('Degree of freedoms') ylabel('L2 error') # # PLOT MESH # figure() coords = mesh.coordinates().transpose() # triplot(coords[0],coords[1],mesh.cells(),linewidth=0.1) # #savefig('mesh.png',dpi=300) #savefig('mesh.eps'); figure() #solution testf = interpolate(Expression(testsol), FunctionSpace(mesh, 'CG', 1)) vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG", 1)) zz = testf.vector().array()[vtx2dof] hh = tricontourf(coords[0], coords[1], mesh.cells(), zz, 100) colorbar(hh) #savefig('solution.png',dpi=300) #savefig('solution.eps'); figure() #analytical solution testfe = interpolate(u, FunctionSpace(mesh, 'CG', 1)) zz = testfe.vector().array()[vtx2dof] hh = tricontourf(coords[0], coords[1], mesh.cells(), zz, 100) colorbar(hh) #savefig('analyt.png',dpi=300) #savefig('analyt.eps'); figure() #error zz -= testf.vector().array()[vtx2dof] zz[zz == 1] -= 1e-16 hh = tricontourf(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), zz, 100, cmap=get_cmap('binary')) colorbar(hh) hold('on') triplot(mesh.coordinates()[:, 0], mesh.coordinates()[:, 1], mesh.cells(), color='r', linewidth=0.5) hold('off') axis('equal') box('off') title('error') show()
def main(): with serial.Serial('COM6', 115200, timeout=0) as ser: # 初期化 i = 0 x = np.zeros(50) y = np.zeros(50) ratio_array = np.zeros(50) eeg_array = np.zeros(50) status = False sampling_data_set = 50 int_data = 0 # 数直線 fig, ax = plt.subplots(figsize=(10, 10)) # 画像サイズ fig.set_figheight(1) # 高さ調整 ax.tick_params(labelbottom=True, bottom=False) # x軸設定 ax.tick_params(labelleft=False, left=False) # y軸設定 # 数直線上の数値を表示 while True: try: rri_data = ser.read_all() rri_data_str = rri_data.decode('utf-8') if rri_data_str != '': int_data = int(rri_data_str) i = i + 1 if (50 < int_data) and (int_data < 300): # 配列をキューと見たてて要素を追加・削除 x = np.append(x, i) x = np.delete(x, 0) y = np.append(y, int_data) y = np.delete(y, 0) if i > 50: sdnn_sigma = 0 rmssd_sigma = 0 sdnn = 0 rmssd = 0 ratio = 0 # RRIの平均・分散を計算 s = sum(y) N = len(y) ave_rri = s / N for index in range(sampling_data_set): sdnn_sigma += (y[index] - ave_rri)**2 for index in range(sampling_data_set - 1): rmssd_sigma += (y[index] - y[index + 1])**2 sdnn = math.sqrt(sdnn_sigma / 50) rmssd = math.sqrt(rmssd_sigma / (50 - 1)) ratio = sdnn / rmssd ratio_array = np.append(ratio_array, ratio) ratio_array = np.delete(ratio_array, 0) xmin = 0 # 数直線の最小値 xmax = max(ratio_array) # 数直線の最大値 plt.tight_layout() # グラフの自動調整 plt.scatter(ratio_array, eeg_array, s=10, c='r') # 散布図 #plt.hlines(y=0, xmin=xmin, xmax=xmax) # 横軸 #plt.vlines(x=[i for i in range(xmin, xmax + 1, 1)], ymin=-0.04, ymax=0.04) # 目盛り線(大) #plt.vlines(x=[i / 10 for i in range(xmin * 10, xmax * 10 + 1, 1)], ymin=-0.02, # ymax=0.02) # 目盛り線(小) line_width = 10 # 目盛り数値の刻み幅 plt.xticks( np.arange(xmin, xmax + line_width, line_width)) # 目盛り数値 pylab.box(False) # 枠を消す plt.pause(.01) if rmssd < 150: print('y:', y) print('s:', s) print('N:', N) print('ave:', ave_rri) print('rmssd_sigma:', rmssd_sigma) print('rmssd:', rmssd) print('-------------') elif i == 5: print('しばらくお待ち下さい') elif i == 40: print('残り数ステップです') elif (45 < i) and (i <= 50): print('残り', (51 - i), 'ステップです') except KeyboardInterrupt: plt.close() pylab.close() ser.close()
# In[] from pylab import box def show_graph(src): img = plt.imread(src) xpixels,ypixels = img.shape[0],img.shape[1] dpi = 100 margin = 0.01 figsize = (1+margin)*ypixels / dpi, (1+margin)*xpixels / dpi fig = plt.figure(figsize=figsize, dpi=dpi) ` ax = fig.add_axes([margin,margin,1-2*margin,1-2*margin]) ax.tick_params(labelbottom='off',bottom='off') ax.tick_params(labelleft='off',left='off') ax.imshow(img, interpolation='none') box('off') plt.show() show_graph('result/loss.png') show_graph('result/accuracy.png') # In[] # モデルを利用して予測をする関数を定義 def predict(model, X): # データ数が1の場合は、バッチサイズ分の次元を追加 if len(X.shape) == 1: pred = model.predictor(X[None, ...]).data.argmax() # データ数が2以上の場合はそのまま else: pred = model.predictor(X).data.argmax(axis=1) return pred