def plotScores(inputPath, outPath, mapping): with open(inputPath) as f: data = f.readlines() headers = data[0].split("\t")[1::3] distances = [] scores = [ [] for i in range(len(headers))] for line in data[1:]: tokens = line.strip().split("\t") distances.append(int(tokens[0])) scoreTokens = tokens[1::3] for i in range(len(scoreTokens)): scores[i].append(scoreTokens[i]) # set colors cm = plt.get_cmap('rainbow') ax = plt.gca() ax.set_color_cycle([cm(1.*i/len(headers)) for i in range(len(headers))]) for i in range(len(headers)): if i % 2 == 0: plt.plot(distances, scores[i], linewidth=2) else: plt.plot(distances, scores[i], linewidth=2, linestyle="--") # map headerto factor names factors = [] for head in headers: factors.append(mapping[head.replace("\\","/").split("/")[-1].split("-")[0]]) ax.legend(factors) plt.ylabel("ChIP Coverage (per bp per peak)") plt.xlabel("Distance from Peak Center") plt.savefig(outPath) plt.close()
def planck_cmap(ncolors=256): from matplotlib.colors import LinearSegmentedColormap as cm """ Returns a color map similar to the one used for the "Planck CMB Map". Parameters ---------- ncolors : int, *optional* Number of color segments (default: 256). Returns ------- cmap : matplotlib.colors.LinearSegmentedColormap instance Linear segmented color map. """ segmentdata = {"red": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00), (0.2, 0.00, 0.00), (0.3, 0.00, 0.00), (0.4, 0.00, 0.00), (0.5, 1.00, 1.00), (0.6, 1.00, 1.00), (0.7, 1.00, 1.00), (0.8, 0.83, 0.83), (0.9, 0.67, 0.67), (1.0, 0.50, 0.50)], "green": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00), (0.2, 0.00, 0.00), (0.3, 0.30, 0.30), (0.4, 0.70, 0.70), (0.5, 1.00, 1.00), (0.6, 0.70, 0.70), (0.7, 0.30, 0.30), (0.8, 0.00, 0.00), (0.9, 0.00, 0.00), (1.0, 0.00, 0.00)], "blue": [(0.0, 0.50, 0.50), (0.1, 0.67, 0.67), (0.2, 0.83, 0.83), (0.3, 1.00, 1.00), (0.4, 1.00, 1.00), (0.5, 1.00, 1.00), (0.6, 0.00, 0.00), (0.7, 0.00, 0.00), (0.8, 0.00, 0.00), (0.9, 0.00, 0.00), (1.0, 0.00, 0.00)]} return cm("Planck-like", segmentdata, N=int(ncolors), gamma=1.0)
def gmmClustering(X, k = 2, maxiter = 3): my_io.startLog(__name__) logger = logging.getLogger(__name__) X, r = kmeans.kmeansClustering(X, 2, 1) (N, D) = np.shape(X) pi_k_old = [np.divide(len(np.where(r==kth)[0]), float(N)) for kth in range(k) ] # mu_old = compute_muk(X, k, r) cova_old, mu_old = compute_cova(k, X, r) for i in range(maxiter): #if i==1: # pdb.set_trace() logger.info('ite: %d loss: %f',i, loss(X, mu_old, pi_k_old, cova_old) ) p = gmm_Esteps(X, pi_k_old, k, cova_old, mu_old) mu_new, cova_new, pi_k_new = gmm_Msteps(mu_old, X, p) pi_k_old = pi_k_new mu_old = mu_new cova_old = cova_new #matplotlib.cm=get_cmap("jet") cm = plt.get_cmap('jet') ax = plt.gca() # colors = ['r' if i==0 else 'g' for i in ?] #colors = 'r' #ax.scatter(X[:,0], X[:,1], c=colors,alpha=0.8) # plt.show() for j in range(N): likehood = p[1][j] color = cm(likehood) plt.plot(X[j,0], X[j,1] ,"o", color=color) plt.show() for j in range(N): likehood = p[0][j] color = cm(likehood) plt.plot(X[j,0], X[j,1] ,"o", color=color) plt.show()
def gen_color3(N): import matplotlib.cm import random cm = matplotlib.cm.get_cmap('Accent') N = float(N) colors = [map(lambda x:int(x * 256), cm(i/N)) for i in range(int(N))] random.shuffle(colors) for i in range(int(N)): yield colors[i]
def colors_list(self): ''' Returns a list of matplotlib color tuples in the ordered by the index of the clonofilter ''' import pylab cm = pylab.get_cmap('gist_rainbow') clonofilters = sorted(self.clonofilters_all()) returnable = [cm(1. * i / len(clonofilters)) for i in range(len(clonofilters))] return returnable
def colors_dict(self): ''' Returns a dict of colors indexed with clonofilters as keys and matplotlib colors and values ''' import pylab returnable = {} clonofilters = sorted(self.clonofilters_all()) cm = pylab.get_cmap('gist_rainbow') for index, clonofilter in enumerate(clonofilters): returnable[clonofilter] = cm(1. * index / len(clonofilters)) return returnable
def plot_connectivity_legend(num_lines=5, vmin=0, vmax=1,ax=P,cm=cm.jet): """Plot a legend that explains arrows in connectivity plot""" #TODO: Make it handle symmetric measures as well vals = N.linspace(vmin,vmax,num_lines) pvals = N.linspace(0,1,num_lines) fig = ax.gca() fig.axis('off') for i in range(num_lines): #ax.xlim([0,1]) ax.ylim([0,1]) ax.xlim([0,1]) arr1 = ax.Arrow(0.1, i*0.1 ,0.3,0, width=(pvals[i]*5+1)/50,ec=cm(pvals[i]),fc=cm(pvals[i])) fig.add_patch(arr1) ax.text(0.5,i*0.1,"%.3f"%vals[i], va="center")
def export_colormap(cm, filename, entries=20): """ Export a matplotlib colormap to a JSON file. The colormap is exported as a list of ``entries`` RGBA tuples for linearly spaced indices between 0 and 1 (inclusive). The list is part of a dictionary that also contains the minimum and maximum rent. """ data = { 'min': MIN_RENT, 'max': MAX_RENT, 'colors': cm(np.linspace(0, 1, entries)).tolist(), } with codecs.open(filename, 'w', encoding='utf8') as f: json.dump(data, f)
def get_line_style(default='o-', label=None, label_lookup=None, i=None, i_max=None, i_markers=None, colormap=None): """ colormap: str, default None colormap name. Try gist_rainbow. Requires i and i_max """ # ls, marker, color kwargs = { } style = default # Do an initial update to get starting values of kwargs for use in # future configuration. if label and label in label_lookup: kwargs.update(label_lookup[label]) else: print(label) #print label, label_lookup # Custom config of this function through kwargs config_vars = ['i', 'i_max', 'colormap'] if 'i' in kwargs: i = kwargs.pop('i') if 'i_max' in kwargs: i_max = kwargs.pop('i_max') if 'colormap' in kwargs: colormap = kwargs.pop('colormap') #print colormap, i, i_max if i is not None: if colormap: import pylab as plt import numpy cm = getattr(plt.cm, colormap) colors = cm(numpy.linspace(0, 1, i_max)) color = colors[i%i_max] kwargs['color'] = color if i_markers is None: kwargs['marker'] = 'o+x<>*^vsh.'[i%11] else: kwargs['marker'] = i_markers[i%len(i_markers)] # This overrides anything else we may set. if label and label in label_lookup: kwargs.update(label_lookup[label]) # Remove function-local configuration again: for var in config_vars: if var in kwargs: kwargs.pop(var) return kwargs
def powerHist(): """ Plots number of hits occured at each power level """ # Get DETPOW/MEANPOW relpow = [row[0]/row[1] for table in data for row in table] # Plot histogram plt.figure() plt.clf() cm = plt.cm.get_cmap('jet') Y,X = np.histogram(relpow, bins = 1e3) # arbitrary bin amount # Y,X = np.histogram(relpow, bins = 100) # arbitrary bin amount x_span = X.max()-X.min() C = [cm(((x-X.min())/x_span)) for x in X] plt.bar(X[:-1],Y,color=C,width=X[1]-X[0],edgecolor = "none") # plt.colorbar(orientation='vertical') plt.title('Power Histogram') plt.xlabel('Relative Power (DETPOW/MEANPOW)') plt.ylabel('Number of Hits') plt.autoscale(enable=True, axis='x', tight=True) plt.yscale('log', nonposy='clip')
def to_rgba(self): data = self.raster_data if self.is_enumerated: if self.enumeration_colors: cmap, norm = self.colormap() data2 = norm(data) # np.clip((data - MIN) / (MAX - MIN), 0, 1) rgba = (cmap(data2) * 255).astype(int) rgba[:, :, 3] = np.logical_not(data.mask).astype(int) * 255 return rgba else: raise NotImplementedError() # Not enumerated... if self.scalar_c_lims: MIN, MAX = self.scalar_c_lims else: MIN, MAX = data.min(), data.max() cm = self.colormap() data2 = np.clip((data - MIN) / (MAX - MIN), 0, 1) rgba = (cm(data2) * 255).astype(int) rgba[:, :, 3] = np.logical_not(data.mask).astype(int) * 255 return rgba
def gray_scale_to_color_ramp(gray_scale, colormap, min_colormap_cut=None, max_colormap_cut=None, alpha=False, output_8bit=True): """ Turns normalized gray scale np.array to rgba (np.array of 4 np.arrays r, g, b, a). Parameters ---------- gray_scale : np.array (2D) Normalized gray_scale img as np.array (0-1) colormap : str Colormap form matplotlib (https://matplotlib.org/3.3.2/tutorials/colors/colormaps.html) min_colormap_cut : float What lower part of colormap to cut to select part of colormap. Valid values are between 0 and 1, if 0.2 it cuts off (deletes) 20% of lower colors in colormap. If None cut is not applied. max_colormap_cut : float What upper part of colormap to cut to select part of colormap. Valid values are between 0 and 1, if 0.8 it cuts off (deletes) 20% of upper colors in colormap. If None cut is not applied. alpha : bool If True outputs 4D array RGBA, if False outputs 3D array RGB output_8bit : bool If true output values will be int 0-255 instead of normalized values. Returns ------- rgba_out : np.array (3D: red 0-255, green 0-255, blue 0-255) If alpha False: np.array (4D: red 0-255, green 0-255, blue 0-255, alpha 0-255) """ cm = mpl.cm.get_cmap(colormap) if min_colormap_cut is not None or max_colormap_cut is not None: if min_colormap_cut is None: min_colormap_cut = 0.0 if max_colormap_cut is None: max_colormap_cut = 1.0 if min_colormap_cut > 1 or min_colormap_cut < 0 or max_colormap_cut > 1 or max_colormap_cut < 0: raise Exception( "rvt.blend_funct.gray_scale_to_color_ramp: min_colormap_cut and max_colormap_cut must be" " between 0 and 1!") if min_colormap_cut >= max_colormap_cut: raise Exception( "rvt.blend_funct.gray_scale_to_color_ramp: min_colormap_cut can't be smaller than" " max_colormap_cut!") cm = truncate_colormap(cmap=cm, minval=min_colormap_cut, maxval=max_colormap_cut) rgba_mtpl_out = cm(gray_scale) # normalized rgb if output_8bit: rgba_mtpl_out = np.uint8( rgba_mtpl_out * 255) # 0-1 scale to 0-255 and change type to uint8 if not alpha: rgba_out = np.array([ rgba_mtpl_out[:, :, 0], rgba_mtpl_out[:, :, 1], rgba_mtpl_out[:, :, 2] ]) else: rgba_out = np.array([ rgba_mtpl_out[:, :, 0], rgba_mtpl_out[:, :, 1], rgba_mtpl_out[:, :, 2], rgba_mtpl_out[:, :, 3] ]) return rgba_out
import random import matplotlib.cm as cm import matplotlib.colors as colors NUM_COLORS = 2 cm = plt.get_cmap('Paired') file_list = sys.argv[1:] fig = plt.figure() #plt.suptitle(r'Pdf per i tempi di collisione in funzione di $\eta$',fontsize=16) ax = fig.add_subplot(111) ax.grid(True) plt.xlabel(r'$t_c$', fontsize='15') plt.ylabel(r'$P(t_c)$', fontsize='15') ax.set_color_cycle([cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]) i = 0 Markers = [ 'x', '+', '*', 's', 'd', 'v', '^', '<', '>', 'p', 'h', '.', '+', '*', 'o', 'x', '^', '<', 'h', '.', '>', 'p', 's', 'd', 'v', 'o', 'x', '+', '*', 's', 'd', 'v', '^', '<', '>', 'p', 'h', '.' ] col = ['r', 'b'] #labels for cfr between fit and mean labels = ['fit', 'calcolato'] for f in file_list: #eta_temp=float(f[-13:-4]) eta, mfp = np.loadtxt(f, unpack=True, usecols=(0, 1)) width = 0.7 * (eta[1] - eta[0]) color = cm(1. * i / NUM_COLORS) #plt.bar(eta, mfp, width=width,label=r'$\eta=%.5lf$'%(eta_temp), alpha=(0.8),color=color,linewidth=0)
def plot_minmax_levels(var_list, ID, times): print('computing min/max at each level') cm = plt.cm.get_cmap('coolwarm') zrange = dx[2] * krange minmax = {} minmax['time'] = times for var_name in var_list: minmax[var_name] = {} minmax[var_name]['max'] = np.zeros((len(times), kmax), dtype=np.double) minmax[var_name]['min'] = np.zeros((len(times), kmax), dtype=np.double) # # # minmax['w'] = {} # # # minmax['s'] = {} # # # minmax['temp'] = {} for var_name in var_list: print('') print('variable: ' + var_name) fig, (ax1, ax2) = plt.subplots(1, 2, sharey='all', figsize=(12, 6)) for it, t0 in enumerate(times): count_color = np.double(it) / len(times) if var_name == 'theta': s_var = read_in_netcdf_fields( 's', os.path.join(path_fields, str(t0) + '.nc')) var = theta_s(s_var) else: var = read_in_netcdf_fields( var_name, os.path.join(path_fields, str(t0) + '.nc')) for k in range(kmax): minmax[var_name]['max'][it, k] = np.amax(var[:, :, k]) minmax[var_name]['min'][it, k] = np.amin(var[:, :, k]) del var minn = ax1.plot(minmax[var_name]['min'][it, :], zrange, '-', color=cm(count_color), label='t=' + str(it) + 's') maxx = ax2.plot(minmax[var_name]['max'][it, :], zrange, '-', color=cm(count_color), label='t=' + str(it) + 's') ax1.legend(loc='upper center', bbox_to_anchor=(2.75, 0.75), fancybox=True, shadow=True, ncol=2, fontsize=10) fig.subplots_adjust(bottom=0.12, right=.75, left=0.1, top=0.95, wspace=0.25) ax1.set_xlim(np.amin(minmax[var_name]['min']), np.amax(minmax[var_name]['min'])) ax2.set_xlim(np.amin(minmax[var_name]['max']), np.amax(minmax[var_name]['max'])) ax1.set_xlabel('min(' + var_name + ') [m/s]') ax2.set_xlabel('max(' + var_name + ') [m/s]') ax1.set_ylabel('height z [m]') fig.suptitle(ID) fig_name = var_name + '_' + str(ID) + '_minmax_levels.png' fig.savefig(os.path.join(path_out_figs, fig_name)) plt.close(fig) print('') return minmax
data_types = args.data.split(",") bound_types = args.bounds.split(",") fig = plt.figure(figsize=(11, 3.6), dpi=120) ax = fig.add_subplot() max_bound = 0 data = {} for data_type in sorted(data_types): lines = load_data(data_type) bounds = create_real(lines, args.N, args.d) data[data_type] = lines ax.step(range(args.d), bounds, label=data_type + " (sample)", where="post", linestyle=":", color=cm(0 / 10)) # + f" ({sum(bounds)}b)" if max(bounds) > max_bound: max_bound = max(bounds) for data_type in sorted(data_types): for bound_type in sorted(bound_types): bound_name = "geom" if bound_type == "geomN" else bound_type bounds = create_bounds(data_type, bound_type, args.N, args.d) mean_err, std_err = estimate_errors(data[data_type], args.N, args.d, bounds) residuals = estimate_residuals(data[data_type], args.N, args.d, bounds) ax.boxplot(residuals, manage_ticks=False, notch=True, bootstrap=1000)
def plotTrace(data, stage = True): mxstage = 5; cm = plt.get_cmap('rainbow'); cdat = cm(data[:,-1] / mxstage); plt.scatter(data[:, 0], data[:,1], c = cdat);
def plot_unrealisticness(self, curves, file_name, time=None, show=True, save=True, title=False, timeseries=False, evaluation=None, eval_label='MSE'): fig = plt.figure(figsize=(5, 4)) ax = fig.add_subplot(111) ax.grid(True) if timeseries: print("curves.shape", curves.shape, curves.shape[1]) NUM_COLORS = curves.shape[1] cm = plt.get_cmap('coolwarm') ax.set_prop_cycle(color=[ cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)[::-1] ]) if time is None: ax.plot(curves, linestyle='None', marker='o') else: ax.plot(time, curves, linestyle='None', marker='o') plt.xticks(rotation=20) custom_lines = [ Line2D([0], [0], color=cm(1.), lw=4), Line2D([0], [0], color=cm(0.), lw=4) ] # Shrink current axis's height by 10% on the bottom box = ax.get_position() ax.set_position([ box.x0, box.y0 - box.height * 0.1, box.width, box.height * 0.9 ]) # Put a legend below current axis ax.legend(custom_lines, ['Short End', 'Long End'], loc='upper center', bbox_to_anchor=(0.5, 1.15), fancybox=True, shadow=False, ncol=5) fig.subplots_adjust(bottom=0.2) # ax.legend(custom_lines, ['Short End', 'Long End']) else: if time is None: ax.plot(curves, linestyle='None', marker='o') ax.set_xlabel('Time (days)') else: ax.plot(time, curves, linestyle='None', marker='o') plt.xticks(rotation=20) ax.set_xlabel('Date') ax.set_ylabel('Latent Value') if evaluation is not None: ax2 = ax.twinx() ax2.plot(evaluation, 'k', label='eval', alpha=1) # ax2.bar(np.arange(len(evaluation)), evaluation, color='k', alpha=0.5) ax.set_ylabel("Price ($\$$)") ax2.set_ylabel(eval_label) box2 = ax2.get_position() ax2.set_position([ box2.x0, box2.y0 - box2.height * 0.1, box2.width, box2.height * 0.9 ]) if title: plt.title(file_name) if save: plt.savefig(self.config.get_filepath_img(file_name), dpi=300, bbox_inches='tight') plt.savefig(self.config.get_filepath_pgf(file_name), dpi=300, transparent=True) # , bbox_inches='tight' if show: plt.show() plt.close()
nonzeroE = np.where(magE > eps_plot) zeroE = np.where(magE <= eps_plot) E[nonzeroE, 0] = E[nonzeroE, 0] / magE[nonzeroE] E[nonzeroE, 1] = E[nonzeroE, 1] / magE[nonzeroE] magE[nonzeroE] = np.log10(magE[nonzeroE]) magE[np.where(magE < 0)] = 0 magE[zeroE] = 0 norm = colors.Normalize() norm.autoscale(magE) cm = cm.get_cmap("turbo") sm = plt.cm.ScalarMappable(cmap=cm, norm=norm) sm.set_array([]) fig1, ax1 = plt.subplots(dpi=400) ax1.set_aspect('equal') quiv_scale = np.max(E) * 75 plt.quiver(C[:, 0], C[:, 1], E[:, 0], E[:, 1], color=cm(magE), scale=quiv_scale, headwidth=2, headlength=3) ax1.set_title('Electric Field Lines') cb = fig1.colorbar(sm) cb.ax.set_ylabel("log(|E [V/um]|), saturated at 0", rotation=90) plt.show()
def plot_tree(ax, tree, cm='cividis', median=None, ignore=None): def maybe_expand(x): a = np.asanyarray(x) return a if len(a.shape) else a[np.newaxis] tree = tree.map(maybe_expand) min_v, max_v, depth = tree.min(), tree.max(), tree.depth() extra_ticks = [min_v, max_v] min_v, max_v = get_value_range(median, min_v, max_v) # Normalize tree values to [0, 1] rng = max_v - min_v # if rng one of 0, +inf, -inf, or nan if rng == 0 or rng == float('inf') or rng == float('-inf') or rng != rng: rng = float('nan') tree = tree.map(lambda xs: [(x - min_v) / rng for x in xs]) acc = [] acc.append([0.5, 0, 1, 0, 2 * math.pi, 0]) # makes sure 2pi = one revolution draw_tree(acc, tree, 1, range_theta[0], range_theta[1], depth, False, False, ignore=ignore) acc = sorted(acc, key=lambda x: x[-1]) # sort by depth df = pd.DataFrame(acc, columns=['v', 'r', 'r0', 'theta', 'dtheta', 'depth']) cm = plt.cm.get_cmap(cm) if isinstance(cm, str) else cm plot = ax.bar(df['theta'], df['r'], width=df['dtheta'], bottom=df['r0'], color=cm(df['v']), align='edge') sm = plt.cm.ScalarMappable(cmap=cm, norm=plt.Normalize(min_v, max_v)) sm.set_array(df['v']) if display_colorbar: cbar = plt.colorbar(sm, ax=ax, shrink=0.8, orientation='horizontal', pad=0.0) cbar.ax.tick_params(labelsize='small') cbar.ax.ticklabel_format(style='sci', axis='x', scilimits=(-3, 3)) else: cbar = None ax.set_thetamin(range_theta[0] / 2 / math.pi * 360) ax.set_thetamax(range_theta[1] / 2 / math.pi * 360) ax.set_thetagrids([]) ax.set_rgrids([]) ax.grid(False) ax.set_axis_off() return plot, cbar
def getColors(): COLORS = [] cm = plt.cm.get_cmap('hsv', n) for i in np.arange(n): COLORS.append(cm(i)) return COLORS
# reshape so it can be a rectangle kpagecount_data = np.append(kpagecount, np.zeros((WIDTH - kpagecount.size % WIDTH,), dtype=np.int64)) kpagecount = np.reshape(kpagecount_data, (-1, WIDTH)) hilbert_curve(kpagecount_data, t=kpagecount, N=256) kpageflags_data = np.append(kpageflags, np.zeros((WIDTH - kpageflags.size % WIDTH,), dtype=np.uint64)) kpageflags = np.reshape(kpageflags_data, (-1, WIDTH)) hilbert_curve(kpageflags_data, t=kpageflags, N=256) #hilbert_curve(kpageflags_data, t=kpageflags, N=256, offset=65536, t_offset=[0, 256]) kpageflags_bounds = np.unique(kpageflags) kpageflags_bounds = np.append(kpageflags_bounds, kpageflags_bounds[-1] + np.uint64(1)) kpageflags_colors = ['black'] cm = plt.get_cmap('gist_rainbow') for i in range(kpageflags_bounds.size): color = cm(1. * i / (kpageflags_bounds.size)) kpageflags_colors.append(color) # np.random.shuffle(kpageflags_colors) kpageflags_cm = colors.ListedColormap(kpageflags_colors) kpageflags_norm = colors.BoundaryNorm(kpageflags_bounds, kpageflags_bounds.size) kpageflags_str = [] for b in np.nditer(kpageflags_bounds): s = "" for i in range(64): if b & np.uint64(1 << i): s += "LERUDlASWIBMasbHTGuXnxtrmdPpOhcIPAE______________________________"[i] kpageflags_str.append(s) kpagecount_bounds = np.unique(kpagecount) kpagecount_bounds = np.append(kpagecount_bounds, kpagecount_bounds[-1] + 1) kpagecount_colors = ['white', 'black', 'yellow']
def get_features(): """ scanpy examples: http://127.0.0.1:8000/features?db_name=1_scanpy_10xpbmc&feature=louvain http://127.0.0.1:8000/features?db_name=1_scanpy_10xpbmc&feature=expression&gene=SUMO3 seurat examples: http://127.0.0.1:8000/features?db_name=4_seurat_10xpbmc&feature=expression&gene=SUMO3 http://127.0.0.1:8000/features?db_name=4_seurat_10xpbmc&feature=expression&gene=SUMO3 velocity examples: http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=clusters http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=expression&gene=Rbbp7 http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=velocity&embed=umap&time=None http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=velocity&embed=umap&time=1 http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=velocity&embed=umap&time=10 velocity grid examples: http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=velocity_grid&embed=umap&time=10 http://127.0.0.1:8000/features?db_name=3_velocity_pancrease&feature=velocity_grid&embed=umap&time=100 """ database = request.args.get("db_name") feature = request.args.get("feature") filename = glob(os.path.join(DATASET_DIRECTORY, f"{database}.*"))[0] db_type = get_dataset_type_adata(filename) if feature.lower() == "velocity": embed = request.args.get("embed") try: del adata except: pass if get_dataset_type_adata(database).lower() in [ "scanpy", "velocity", "seurat", "paga" ]: adata = sc.read(filename) else: adata = st.read(filename, file_format="pkl", workdir="./") list_metadata = [] if feature in get_available_annotations_adata(adata): # cluster columns adata.obs[feature] = adata.obs[feature].astype('category') if f"{feature}_colors" in adata.uns.keys(): dict_colors = { feature: dict( zip(adata.obs[feature].cat.categories, adata.uns[f"{feature}_colors"])) } else: dict_colors = { feature: dict( zip(adata.obs[feature].cat.categories, get_colors(adata, feature))) } for i in range(adata.shape[0]): dict_metadata = dict() dict_metadata["cell_id"] = adata.obs_names[i] dict_metadata["label"] = adata.obs[feature].tolist()[i] dict_metadata["clusters"] = adata.obs[feature].tolist()[i] dict_metadata["clusters_color"] = dict_colors[feature][ dict_metadata["clusters"]] list_metadata.append(dict_metadata) elif feature in ["expression", "rna"]: # pseudotime or latent_time columns gene = request.args.get("gene") if gene not in adata.var_names: return jsonify({}) else: if "time" in feature: values = adata.obs[feature] else: if db_type == "seurat": values = (adata[:, gene].layers["norm_data"].toarray()[:, 0] if isspmatrix(adata.layers["norm_data"]) else adata[:, gene].layers["norm_data"][:, 0]) else: values = (adata[:, gene].X.toarray()[:, 0] if isspmatrix( adata.X) else adata[:, gene].X[:, 0]) cm = mpl.cm.get_cmap("viridis", 512) norm = mpl.colors.Normalize(vmin=0, vmax=max(values), clip=True) list_metadata = [] for i, x in enumerate(adata.obs_names): dict_genes = dict() dict_genes["cell_id"] = x dict_genes["color"] = mpl.colors.to_hex(cm(norm(values[i]))) list_metadata.append(dict_genes) elif feature == "velocity": list_metadata = [] time = request.args.get("time") for i in range(adata.shape[0]): dict_coord_cells = dict() if isinstance(adata.obs_names[i], bytes): dict_coord_cells["cell_id"] = adata.obs_names[i].decode( "utf-8") else: dict_coord_cells["cell_id"] = adata.obs_names[i] dict_coord_cells["x0"] = str(adata.obsm[f"X_{embed}"][i, 0]) dict_coord_cells["y0"] = str(adata.obsm[f"X_{embed}"][i, 1]) dict_coord_cells["z0"] = str(adata.obsm[f"X_{embed}"][i, 2]) if time == "None": dict_coord_cells["x1"] = str( adata.obsm[f"velocity_{embed}"][i, 0]) dict_coord_cells["y1"] = str( adata.obsm[f"velocity_{embed}"][i, 1]) dict_coord_cells["z1"] = str( adata.obsm[f"velocity_{embed}"][i, 2]) elif time in list(map(str, [0.01, 0.1, 1, 5, 10, 20, 30, 50, 100])): dict_coord_cells["x1"] = str( adata.obsm[f"absolute_velocity_{embed}_{time}s"][i, 0]) dict_coord_cells["y1"] = str( adata.obsm[f"absolute_velocity_{embed}_{time}s"][i, 1]) dict_coord_cells["z1"] = str( adata.obsm[f"absolute_velocity_{embed}_{time}s"][i, 2]) else: return jsonify({}) list_metadata.append(dict_coord_cells) elif feature == "velocity_grid": list_metadata = [] time = request.args.get("time") p_mass = adata.uns['p_mass'] for i in np.where(p_mass >= 1)[0]: dict_coord_cells = dict() if time == "None": dict_coord_cells["x0"] = str(adata.uns[f"X_grid"][i, 0]) dict_coord_cells["y0"] = str(adata.uns[f"X_grid"][i, 1]) dict_coord_cells["z0"] = str(adata.uns[f"X_grid"][i, 2]) dict_coord_cells["x1"] = str(adata.uns[f"V_grid"][i, 0]) dict_coord_cells["y1"] = str(adata.uns[f"V_grid"][i, 1]) dict_coord_cells["z1"] = str(adata.uns[f"V_grid"][i, 2]) elif time in list(map(str, [0.01, 0.1, 1, 5, 10, 20, 50, 80, 100])): dict_coord_cells["x0"] = str(adata.uns[f"X_grid_{time}"][i, 0]) dict_coord_cells["y0"] = str(adata.uns[f"X_grid_{time}"][i, 1]) dict_coord_cells["z0"] = str(adata.uns[f"X_grid_{time}"][i, 2]) dict_coord_cells["x1"] = str(adata.uns[f"V_grid_{time}"][i, 0]) dict_coord_cells["y1"] = str(adata.uns[f"V_grid_{time}"][i, 1]) dict_coord_cells["z1"] = str(adata.uns[f"V_grid_{time}"][i, 2]) else: return jsonify({}) list_metadata.append(dict_coord_cells) elif feature == "paga": G = nx.from_numpy_matrix(adata.uns["paga"]["connectivities"].toarray()) adata.uns["paga"]["pos"] = get_paga3d_pos(adata) ## output coordinates of paga graph list_lines = [] for edge_i in G.edges(): dict_coord_lines = dict() dict_coord_lines["branch_id"] = [[str(edge_i[0]), str(edge_i[1])]] dict_coord_lines["xyz"] = [{ "x": pos[0], "y": pos[1], "z": pos[2] } for pos in adata.uns["paga"]["pos"][[edge_i[0], edge_i[1]], :]] list_lines.append(dict_coord_lines) ## output topology of paga graph dict_nodes = dict() list_edges = [] dict_nodename = { i: adata.obs[adata.uns["paga"]["groups"]].cat.categories[i] for i in G.nodes() } for node_i in G.nodes(): dict_nodes_i = dict() dict_nodes_i["node_name"] = dict_nodename[node_i] dict_nodes_i["xyz"] = { "x": adata.uns["paga"]["pos"][:, 0][node_i], "y": adata.uns["paga"]["pos"][:, 1][node_i], "z": adata.uns["paga"]["pos"][:, 2][node_i], } dict_nodes[node_i] = dict_nodes_i for edge_i in G.edges(): dict_edges = dict() dict_edges["nodes"] = [str(edge_i[0]), str(edge_i[1])] dict_edges["weight"] = adata.uns["paga"]["connectivities"][ edge_i[0], edge_i[1]] list_edges.append(dict_edges) list_metadata = {"nodes": dict_nodes, "edges": list_edges} elif feature == "curves": flat_tree = adata.uns['flat_tree'] epg = adata.uns['epg'] epg_node_pos = nx.get_node_attributes(epg, 'pos') ft_node_label = nx.get_node_attributes(flat_tree, 'label') ft_node_pos = nx.get_node_attributes(flat_tree, 'pos') list_curves = [] for edge_i in flat_tree.edges(): branch_i_pos = np.array( [epg_node_pos[i] for i in flat_tree.edges[edge_i]['nodes']]) df_coord_curve_i = pd.DataFrame(branch_i_pos) dict_coord_curves = dict() dict_coord_curves['branch_id'] = ft_node_label[ edge_i[0]] + '_' + ft_node_label[edge_i[1]] dict_coord_curves['xyz'] = [{ 'x': df_coord_curve_i.iloc[j, 0], 'y': df_coord_curve_i.iloc[j, 1], 'z': df_coord_curve_i.iloc[j, 2] } for j in range(df_coord_curve_i.shape[0])] list_curves.append(dict_coord_curves) list_metadata = list_curves del adata gc.collect() return jsonify({feature: list_metadata})
return np.nan else: return dt.datetime(y, m, d).timetuple().tm_yday / dt.datetime( y, 12, 31).timetuple().tm_yday * 2 * np.pi full_df['degree'] = full_df.apply(year_frac2, axis=1) full_df = full_df.reset_index() # full_df.to_csv('/workspace/Shared/Users/jschroder/TMP/full_df.csv') # full_df = pd.read_csv( '/workspace/Shared/Users/jschroder/TMP/full_df.csv', index_col=0 ) cm = plt.get_cmap('rainbow') #cm = sns.color_palette("cubehelix", 8) length = full_df.degree.__len__() color = [cm(i / (length - 1)) for i in range(length)] fig = plt.figure() ax = plt.subplot(111, projection='polar') period = 1 for i in range(length): print(period) ax.plot(full_df.degree[i * period:((i + 1) * period + 1)], full_df.anomalies[i * period:((i + 1) * period + 1)], color=color[i * period], alpha=0.4, linewidth=0.8) ax.set_thetagrids(np.linspace(360 / 24, 360 * 23 / 24, 12)) ax.set_theta_direction('clockwise') ax.set_theta_offset(np.pi / 2) ax.xaxis.set_ticklabels([ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
def plot_2d(self, curves, file_name, curve2=None, time=None, show=True, save=True, title=False, timeseries=False, evaluation=None): fig = plt.figure(figsize=(5, 4)) ax = fig.add_subplot(111) ax.grid(True) if timeseries: print("curves.shape", curves.shape, curves.shape[1]) NUM_COLORS = curves.shape[1] cm = plt.get_cmap('coolwarm') ax.set_prop_cycle(color=[ cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)[::-1] ]) if time is None: ax.plot(curves) else: ax.plot(time, curves) plt.xticks(rotation=20) custom_lines = [ Line2D([0], [0], color=cm(1.), lw=4), Line2D([0], [0], color=cm(0.), lw=4) ] ax.legend(custom_lines, ['Short End', 'Long End']) ax.set_ylabel("Price ($\$$)") else: if isinstance(curves, pd.DataFrame): ax.plot(curves) plt.xticks(rotation=20) ax.set_xlabel('Date') elif time is None: ax.plot(curves) ax.set_xlabel('Time (days)') else: ax.plot(time, curves) plt.xticks(rotation=20) ax.set_xlabel('Date') if curve2 is not None: ax.plot(curve2, color='tab:orange') ax.set_ylabel('Latent Value') if evaluation is not None: # if isinstance(evaluation, pd.DataFrame): # _evaluation = evaluation # else: # _evaluation = pd.DataFrame(evaluation) ax2 = ax.twinx() ax2.plot(curves.index.values, evaluation, 'k', label='eval', alpha=0.5) ax2.set_ylabel("SMAPE") if title: plt.title(file_name) if save: plt.savefig(self.config.get_filepath_img(file_name), dpi=300, bbox_inches='tight') plt.savefig(self.config.get_filepath_pgf(file_name), dpi=300, transparent=True) # , bbox_inches='tight' if show: plt.show() plt.close()
if ( plot_flow_flag ) or ( plot_lobes_flag): # Create plot fig = plt.figure() ax = fig.add_subplot(111) if ( len(shape_name) > 0 ): # Read the shapefile sf = shapefile.Reader(shape_name) recs = sf.records() shapes = sf.shapes() Nshp = len(shapes) cm = plt.get_cmap('Dark2') cccol = cm(1.*np.arange(Nshp)/Nshp) for nshp in xrange(Nshp): ptchs = [] pts = np.array(shapes[nshp].points) prt = shapes[nshp].parts par = list(prt) + [pts.shape[0]] for pij in xrange(len(prt)): ptchs.append(Polygon(pts[par[pij]:par[pij+1]])) ax.add_collection(PatchCollection(ptchs,facecolor=cccol[nshp,:],edgecolor='k', linewidths=.1)) print ('')
def measure_lum_funcs(catl, path_to_mocks): volume = temp_dict.get('volume') mag_cen_arr = [] mag_n_arr = [] mag_err_arr = [] # box_id_arr = np.linspace(5001,5008,8) box_id_arr = np.array([0, 1, 2, 3]) mock_dir_arr = glob.glob(path_to_mocks + 'ECO_mvir*') for mock_dir in mock_dir_arr: for num in range(temp_dict.get('num_mocks')): filename = mock_dir + '/{0}_cat_{1}_Planck_memb_cat.hdf5'.\ format(temp_dict.get('mock_name'), num) mock_pd = reading_catls(filename) # Using the same survey definition as in mcmc smf # i.e excluding the buffer mock_pd = mock_pd.loc[ (mock_pd.cz.values >= temp_dict.get('min_cz')) & (mock_pd.cz.values <= temp_dict.get('max_cz')) & (mock_pd.M_r.values <= temp_dict.get('mag_limit')) & (mock_pd.logmstar.values >= temp_dict.get('mstar_limit'))] mag_cen, mag_edg, mag_n, mag_err, bw = cumu_num_dens( mock_pd.M_r.values, None, volume, True) mag_cen_arr.append(mag_cen) mag_n_arr.append(mag_n) mag_err_arr.append(mag_err) mag_cen_arr = np.array(mag_cen_arr) mag_n_arr = np.array(mag_n_arr) mag_err_arr = np.array(mag_err_arr) # Added this max cut so that it matches the data luminosity function used # for abundance matching in Victor's mock-making script catl = catl.loc[catl.absrmag.values >= -23.5] mag_cen, mag_edg, mag_n, mag_err, bw = cumu_num_dens( catl.absrmag.values, None, volume, True) cm = plt.get_cmap('Spectral') n_catls = temp_dict.get('num_mocks') * len(box_id_arr) col_arr = [cm(idx / float(n_catls)) for idx in range(n_catls)] fig6 = plt.figure(figsize=(10, 10)) for idx in range(n_catls): plt.errorbar(mag_cen_arr[idx], mag_n_arr[idx], yerr=mag_err_arr[idx], color=col_arr[idx], marker='o', markersize=4, capsize=5, capthick=0.5) plt.errorbar(mag_cen, mag_n, yerr=mag_err, markersize=4, capsize=5, capthick=2, marker='o', label='data', color='k', fmt='-s', ecolor='k', linewidth=2, zorder=10) plt.yscale('log') plt.gca().invert_xaxis() plt.xlabel(r'\boldmath $M_{r}$') plt.ylabel(r'\boldmath $\mathrm{n(< M_{r})} [\mathrm{h}^{3}' r'\mathrm{Mpc}^{-3}]$') if survey == 'resolvea' or survey == 'resolveb': plt.title(r'RESOLVE-{0} Luminosity Function'.\ format(temp_dict.get('mock_name'))) else: plt.title(r'{0} Luminosity Function'.format( temp_dict.get('mock_name'))) plt.legend(loc='lower left', prop={'size': 20}) plt.show()
def relax_label_two(orf, print_layers, tkagg=False, view_axis=False): """ Perform relaxation labeling of a two-class orientation field 'orf' """ assert orf.shape == print_layers.shape do_update_view = tkagg and view_axis # Used to determine convergence of labels EPSILON = 0.1 olpd_blks = print_layers[:,:,0] & print_layers[:,:,1] any_prnt_blks = print_layers[:,:,0] | print_layers[:,:,1] no_print_i, no_print_j = np.nonzero(~any_prnt_blks) sorted_coords = _sort_overlapped_coords(olpd_blks) n_overlapped = len(sorted_coords) p0 = print_layers[:,:,0] & (~olpd_blks) p0 = p0.astype(np.float) p0[olpd_blks] = 0.5 p1 = print_layers[:,:,1] & (~ olpd_blks) p1 = p1.astype(np.float) p1[olpd_blks] = 0.5 pk_next = np.dstack((p0, p1)) def unpack_and_zip_data(varg): """ Helper function which zips all data from the sorted overlapped pixels into one convenient iterable """ cntr, idx = varg imin = max(0, cntr[0] - 2) jmin = max(0, cntr[1] - 2) imax = min(orf.shape[0], cntr[0] + 3) jmax = min(orf.shape[1], cntr[1] + 3) nbri, nbrj = np.meshgrid(np.arange(imin,imax), \ np.arange(jmin, jmax), \ indexing="ij") not_cntr = (nbri.flat != cntr[0]) | (nbrj.flat != cntr[1]) has_print_inf = any_prnt_blks[nbri.flat, nbrj.flat] nbri = nbri.flat[not_cntr & has_print_inf] nbrj = nbrj.flat[not_cntr & has_print_inf] offsti = cntr[0] - nbri offstj = cntr[1] - nbrj epnt = -0.5 * (offsti ** 2 + offstj ** 2) wt = np.exp(epnt) wt = wt / np.sum(wt) return (cntr, (nbri, nbrj), wt, idx) ovlp_data = map(unpack_and_zip_data, \ zip(sorted_coords, range(n_overlapped))) def compat_func(lab_cnt, lab_nbr, cnt_coords, nbr_coords): """ Returns compatability between a center pixel and a neighbor. Used as a helper function. """ thta_cnt = orf[cnt_coords] thta_nbr = orf[nbr_coords] is_nbr_olpd = olpd_blks[nbr_coords] if not is_nbr_olpd: thta_diff = thta_cnt[lab_cnt] - thta_nbr[lab_cnt] r = 2 * abs(cos(thta_diff)) - 1.0 if not ( -1 <= r <= 1 ): print "BAD R!" raise RuntimeError if lab_cnt == lab_nbr: thta_diff = thta_cnt - thta_nbr[::1] else: thta_diff = thta_cnt - thta_nbr[::-1] abs_cos_thta = np.abs(np.cos(thta_diff)) r = np.sum(abs_cos_thta) - 1 if not (-1 <= r <= 1): print "BAD R!" raise RuntimeError return r def get_sop_rp(nbr_crds, cnt_crds, lab_cnt, pk): """Helper function which returns the sum of products term of the support update function """ r0 = compat_func(lab_cnt=lab_cnt, lab_nbr=0, \ cnt_coords=cnt_crds, nbr_coords=nbr_crds) sop0 = r0 * pk[nbr_crds][lab_cnt] r1 = compat_func(lab_cnt=lab_cnt, lab_nbr=1, \ cnt_coords=cnt_crds, nbr_coords=nbr_crds) sop1 = r1 * pk[nbr_crds][lab_cnt] return (r0 * sop0) + (r1 + sop1) k = 0 did_converge = False while (not did_converge) and k < 50: k = k + 1 pk = pk_next.copy() q = np.zeros((n_overlapped, 2)) if do_update_view: try: pkshow = np.copy(pk_next[:,:,0]) minpk = np.min(pkshow) maxpk = np.max(pkshow) pkshow = (pkshow-minpk)/(maxpk-minpk) cm = matplotlib.cm.get_cmap('rainbow') pkrgb = cm(pkshow) pkrgb = pkrgb[:,:,0:3] pkrgb[no_print_i, no_print_j] = 1.0 view_axis.imshow(pkrgb, interpolation="nearest") tkagg.show() except IndexError as err: print "pkshow.shape:" + str(pkshow.shape) print "cm:" + str(cm) print "pkrgb.shape:" + str(pkrgb.shape) print "any_prnt_blks:" + str(any_prnt_blks) raise err for crds, nbrs, wts, idx in ovlp_data: assert olpd_blks[crds] for lbl in [0, 1]: get_rp = partial(get_sop_rp, cnt_crds=crds, lab_cnt=lbl, pk=pk) sums_prod_rp = map(get_rp, zip(*nbrs)) wtd_sop = wts * sums_prod_rp q_i = np.sum(wtd_sop) q[idx, lbl] = q_i pk_i_nxt_unnorm = pk[crds] * (1 + q[idx]) pk_i_nxt = pk_i_nxt_unnorm / np.sum(pk_i_nxt_unnorm) pk_next[crds] = pk_i_nxt p_diff = np.sum(np.abs(pk - pk_next)) did_converge = p_diff < EPSILON print "p_diff:" + str(p_diff) if did_converge: print "CONVERGED!!" # extract labeled orientations from overlapped area p0_gt_p1 = pk[:,:,0] >= pk[:,:,1] print_0_orf = np.where(p0_gt_p1, orf[:,:,0], orf[:,:,1]) print_1_orf = np.where(~p0_gt_p1, orf[:,:,0], orf[:,:,1]) # extract primary orientation into known print blocks p0_only = print_layers[:,:,0] & (~olpd_blks) p1_only = print_layers[:,:,1] & (~olpd_blks) print_0_orf[p0_only] = orf[:,:,0][p0_only] print_1_orf[p1_only] = orf[:,:,1][p1_only] # set all other blocks (those without a part of a print) to NaN no_info = (~print_layers[:,:,0]) & (~print_layers[:,:,1]) print_0_orf[no_info] = np.nan print_1_orf[no_info] = np.nan return np.dstack((print_0_orf, print_1_orf))
def binary_diagonal(method_names, true_prevs, estim_prevs, pos_class=1, title=None, show_std=True, legend=True, train_prev=None, savepath=None, method_order=None): """ The diagonal plot displays the predicted prevalence values (along the y-axis) as a function of the true prevalence values (along the x-axis). The optimal quantifier is described by the diagonal (0,0)-(1,1) of the plot (hence the name). It is convenient for binary quantification problems, though it can be used for multiclass problems by indicating which class is to be taken as the positive class. (For multiclass quantification problems, other plots like the :meth:`error_by_drift` might be preferable though). :param method_names: array-like with the method names for each experiment :param true_prevs: array-like with the true prevalence values (each being a ndarray with n_classes components) for each experiment :param estim_prevs: array-like with the estimated prevalence values (each being a ndarray with n_classes components) for each experiment :param pos_class: index of the positive class :param title: the title to be displayed in the plot :param show_std: whether or not to show standard deviations (represented by color bands). This might be inconvenient for cases in which many methods are compared, or when the standard deviations are high -- default True) :param legend: whether or not to display the leyend (default True) :param train_prev: if indicated (default is None), the training prevalence (for the positive class) is hightlighted in the plot. This is convenient when all the experiments have been conducted in the same dataset. :param savepath: path where to save the plot. If not indicated (as default), the plot is shown. :param method_order: if indicated (default is None), imposes the order in which the methods are processed (i.e., listed in the legend and associated with matplotlib colors). """ fig, ax = plt.subplots() ax.set_aspect('equal') ax.grid() ax.plot([0, 1], [0, 1], '--k', label='ideal', zorder=1) method_names, true_prevs, estim_prevs = _merge(method_names, true_prevs, estim_prevs) order = list(zip(method_names, true_prevs, estim_prevs)) if method_order is not None: table = { method_name: [true_prev, estim_prev] for method_name, true_prev, estim_prev in order } order = [(method_name, *table[method_name]) for method_name in method_order] cm = plt.get_cmap('tab20') NUM_COLORS = len(method_names) ax.set_prop_cycle( color=[cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]) for method, true_prev, estim_prev in order: true_prev = true_prev[:, pos_class] estim_prev = estim_prev[:, pos_class] x_ticks = np.unique(true_prev) x_ticks.sort() y_ave = np.asarray( [estim_prev[true_prev == x].mean() for x in x_ticks]) y_std = np.asarray([estim_prev[true_prev == x].std() for x in x_ticks]) ax.errorbar(x_ticks, y_ave, fmt='-', marker='o', label=method, markersize=3, zorder=2) if show_std: ax.fill_between(x_ticks, y_ave - y_std, y_ave + y_std, alpha=0.25) if train_prev is not None: train_prev = train_prev[pos_class] ax.scatter(train_prev, train_prev, c='c', label='tr-prev', linewidth=2, edgecolor='k', s=100, zorder=3) ax.set(xlabel='true prevalence', ylabel='estimated prevalence', title=title) ax.set_ylim(0, 1) ax.set_xlim(0, 1) if legend: # box = ax.get_position() # ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='lower center', bbox_to_anchor=(1, -0.5), ncol=(len(method_names) + 1) // 2) _save_or_show(savepath)
def main_routine (wd="./",cfg="./python/parameter.cfg",gammaCnt=11,bloch=0,myMap="custom",project=0): ### read config file ### print ("load from config file: " + cfg) configParser = cp.ConfigParser() configParser.read(cfg) print (configParser.sections()) cfg=configParser.__dict__['_sections'].copy() #for src, target in cfg['NVSETUP'].items(): # print(src + " : " + target) omega_c = float(cfg['NVSETUP']['{omega_c}']) nWrite=int(cfg['OCFourier']['{write_harmonic}']) nRead =int(cfg['OCFourier']['{read_harmonic}']) nStore=int(cfg['MEFourier']['{storage_harmonic}']) nDown =nRead+nWrite nUp =nDown+nWrite ### read config file ### ### read data ### cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg) time =IOHelper.functionaltimes_readwrite(**cfg) time['write'][:] *= 1e9 time['read'][:] *= 1e9 ti = int(time['idx_ti']) tf = int(time['idx_tf']) functime = time['read'][ti:tf] dt = float(time['delta_t']) filename =IOHelper.getVectorOverlap(**cfg) reGamma,imGamma=sp.loadtxt(filename).T alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead] alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown] alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp] ### read data ### ### plotting Reg1Up = sp.dot(alphaU.conj(),cavityWrite) Reg1Down = sp.dot(alphaD.conj(),cavityWrite) Reg2Down = sp.dot(alphaD.conj(),cavityMemo) Reg2Up = sp.dot(alphaU.conj(),cavityMemo) Reg2Read = sp.dot(alphaR.conj(),cavityRead) Reg2DownRead = Reg2Down + Reg2Read Reg2UpRead = Reg2Up + Reg2Read myDensity = sp.zeros([functime.size,gammaCnt]) FuncCavity = sp.zeros([functime.size,gammaCnt],complex) spos = sp.zeros([2,gammaCnt]) FuncInfoOlap = sp.zeros([2,gammaCnt],complex) I00 = cumtrapz( sp.real(Reg2DownRead[ti:tf] * Reg2DownRead[ti:tf].conj()), x=None, dx=dt )[-1] I11 = cumtrapz( sp.real(Reg2UpRead[ti:tf] * Reg2UpRead[ti:tf].conj()), x=None, dx=dt )[-1] for g in sp.arange(0.0,gammaCnt): gamma = g/(gammaCnt-1.0) myDensity[:,g]=1.0-gamma spos[0,g] = gamma spos[1,g] = 1.0-gamma FuncCavity[:,g] = spos[0,g]*Reg2Down[ti:tf] + spos[1,g]*Reg2Up[ti:tf] FuncCavity[:,g] += Reg2Read [ti:tf] FuncInfoOlap [0,g] = cumtrapz( (FuncCavity[:,g] * Reg2DownRead[ti:tf].conj()).real, x=None, dx=dt )[-1] FuncInfoOlap [0,g] += 1j*cumtrapz( (FuncCavity[:,g] * Reg2DownRead[ti:tf].conj()).imag, x=None, dx=dt )[-1] FuncInfoOlap [0,g] /= I00 FuncInfoOlap [0,g] = sp.absolute(FuncInfoOlap [0,g]) FuncInfoOlap [1,g] = cumtrapz( (FuncCavity[:,g] * Reg2UpRead[ti:tf].conj()).real, x=None, dx=dt )[-1] FuncInfoOlap [1,g] += 1j*cumtrapz( (FuncCavity[:,g] * Reg2UpRead[ti:tf].conj()).imag, x=None, dx=dt )[-1] FuncInfoOlap [1,g] /= I11 FuncInfoOlap [1,g] = sp.absolute(FuncInfoOlap [1,g]) # FuncInfoOlap [0,g] = cumtrapz( sp.absolute(FuncCavity[:,g]) * sp.absolute(Reg2DownRead[ti:tf].conj()), x=None, dx=dt )[-1] # FuncInfoOlap [0,g] /= I00 # FuncInfoOlap [1,g] = cumtrapz( sp.absolute(FuncCavity[:,g]) * sp.absolute(Reg2UpRead[ti:tf].conj()), x=None, dx=dt )[-1] # FuncInfoOlap [1,g] /= I11 fig = plt.figure() fs = 22 label_size = 20 plt.rcParams['xtick.labelsize'] = label_size plt.rcParams['ytick.labelsize'] = label_size xx, yy = sp.meshgrid(sp.linspace(0.0,1.0,gammaCnt),functime) zmax = (sp.absolute(FuncCavity)**2).max() c = mcolors.ColorConverter().to_rgb if myMap =="custom" : cm = make_colormap( [c('blue'), c('purple'), c('red')]) else: cm = plt.cm.get_cmap(myMap) myColors = cm(myDensity) fig1 = fig.add_subplot(111, projection='3d') fig1.plot(spos[0,:], FuncInfoOlap [0,:].real, zs=functime[ project], zdir='y',lw=2.5, color="blue",label="$\gamma^{\prime}$",zorder=0.1) fig1.plot(spos[0,:], FuncInfoOlap [1,:].real, zs=functime[ project], zdir='y',lw=2.5, color="red" ,label="$\delta^{\prime}$",zorder=0.1) fig1.plot_surface(xx, yy, sp.absolute(FuncCavity)**2/zmax,rstride=1, cstride=1,alpha=1,facecolors=myColors, antialiased=False) fig1.plot_wireframe(xx, yy, sp.absolute(FuncCavity)**2/zmax, rstride=15, cstride=3,alpha=1,linewidth=1,color="black") fig1.legend(fontsize=fs) fig1.set_zlim(0,1.1) fig1.set_ylim(functime[0],functime[-1]) fig1.set_xticks([0,0.5,1]) fig1.set_yticks([55,75,95]) fig1.set_zticks([0,0.5,1]) fig1.set_xlim(0,1) fig1.set_zlabel("$|A(t)|^2$",fontsize=fs) fig1.set_ylabel("$t$ in ns",fontsize=fs) fig1.set_xlabel("$\gamma$",fontsize=fs) fig1.xaxis._axinfo['label']['space_factor'] = 2.0 fig1.yaxis._axinfo['label']['space_factor'] = 2.0 fig1.zaxis._axinfo['label']['space_factor'] = 2.0 # m.set_array(myDensity) # plt.colorbar(m,shrink=0.5,aspect=7) plt.show()
def _set_colors(ax, n_methods): NUM_COLORS = n_methods cm = plt.get_cmap('tab20') ax.set_prop_cycle( color=[cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)])
def plot(self, outputDirectory): # Skip this step if matplotlib is not installed try: import pylab except ImportError: return import matplotlib.cm cm = matplotlib.cm.jet Nreac = self.network.Nisom + self.network.Nreac Nprod = Nreac + self.network.Nprod Tlist = self.Tlist.value_si Plist = self.Plist.value_si Tcount = Tlist.shape[0] Pcount = Plist.shape[0] K = self.K count = 0 for prod in range(Nprod): for reac in range(Nreac): if reac == prod: continue reaction = self.network.netReactions[count] count += 1 reaction_str = '{0} {1} {2}'.format( ' + '.join([reactant.label for reactant in reaction.reactants]), '<=>' if prod < Nreac else '-->', ' + '.join([product.label for product in reaction.products]), ) fig = pylab.figure(figsize=(10,6)) K2 = numpy.zeros((Tcount, Pcount)) if reaction.kinetics is not None: for t in range(Tcount): for p in range(Pcount): K2[t,p] = reaction.kinetics.getRateCoefficient(Tlist[t], Plist[p]) K = self.K[:,:,prod,reac].copy() order = len(reaction.reactants) K *= 1e6 ** (order-1) K2 *= 1e6 ** (order-1) kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order] pylab.subplot(1,2,1) for p in range(Pcount): pylab.semilogy(1000.0 / Tlist, K[:,p], color=cm(1.*p/(Pcount-1)), marker='o', linestyle='') if reaction.kinetics is not None: pylab.semilogy(1000.0 / Tlist, K2[:,p], color=cm(1.*p/(Pcount-1)), marker='', linestyle='-') pylab.xlabel('1000 / Temperature (1000/K)') pylab.ylabel('Rate coefficient ({0})'.format(kunits)) pylab.title(reaction_str) pylab.subplot(1,2,2) for t in range(Tcount): pylab.loglog(Plist*1e-5, K[t,:], color=cm(1.*t/(Tcount-1)), marker='o', linestyle='') pylab.loglog(Plist*1e-5, K2[t,:], color=cm(1.*t/(Tcount-1)), marker='', linestyle='-') pylab.xlabel('Pressure (bar)') pylab.ylabel('Rate coefficient ({0})'.format(kunits)) pylab.title(reaction_str) fig.subplots_adjust(left=0.10, bottom=0.13, right=0.95, top=0.92, wspace=0.3, hspace=0.3) pylab.savefig(os.path.join(outputDirectory, 'kinetics_{0:d}.pdf'.format(count))) pylab.close()
def make_plot(X, Y, Z, Z_init, h_min, h_max, simtime, cr_angle, run_name, iter, plot_show_flag): from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib import colors from matplotlib.ticker import LinearLocator, FormatStrFormatter import matplotlib.pyplot as plt import numpy as np import imp try: imp.find_module('mayavi') found = True except ImportError: found = False if found: from mayavi import mlab plt.ion() plt.rcParams.update({'font.size': 8}) plt.close('all') Z_diff = Z - Z_init fig = plt.figure() fig.set_size_inches(11, 7) ax1 = fig.add_subplot(2, 2, 1, projection='3d') ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) plt.tight_layout(pad=4, w_pad=4, h_pad=4) time_text = ax3.text(0, 0, 'time =' + "{:8.2f}".format(simtime) + 's') delta_x = X[0, 1] - X[0, 0] delta_y = Y[1, 0] - Y[0, 0] Z_x, Z_y = np.gradient(Z, delta_x, delta_y) grad_Z = np.sqrt(Z_x**2 + Z_y**2) slope = (np.arctan(grad_Z) * 180.0 / np.pi) my_col = cm.jet(slope / cr_angle) norm = colors.Normalize(vmin=0.0, vmax=cr_angle) ax1.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=my_col, linewidth=0, antialiased=False, alpha=0.7) #m = cm.ScalarMappable(cmap=plt.cm.jet, norm=norm) #m.set_array([]) #plt.colorbar(m,ax=ax1,location='bottom') # clb1.set_label('Slope [°]') nx = X.shape[1] ny = X.shape[0] idx1 = int(np.floor(nx / 4)) idx2 = int(np.floor(nx / 2)) idx3 = int(np.floor(3 * nx / 4)) dh = h_max - h_min ax1.plot3D(X[idx1, :], Y[idx1, :], Z[idx1, :] + 0.01 * dh, 'blue') ax1.plot3D(X[idx2, :], Y[idx2, :], Z[idx2, :] + 0.01 * dh, 'red') ax1.plot3D(X[idx3, :], Y[idx3, :], Z[idx3, :] + 0.01 * dh, 'green') if (np.min(Z_diff) == np.max(Z_diff)): Z_diff[0, 0] = 0.001 Z_diff[-1, -1] = -0.001 ax1.set_xlabel('x [m]') ax1.set_xlim(np.amin(X), np.amax(X)) ax1.set_ylabel('y [m]') ax1.set_ylim(np.amin(Y), np.amax(Y)) ax1.set_zlabel('z [m]') ax1.set_zlim(h_min, h_max) extent = [np.amin(X), np.amax(X), np.amin(Y), np.amax(Y)] z_range = np.amax(np.abs(Z_diff)) cnt = ax2.imshow(Z_diff, cmap='seismic', extent=extent, vmin=-z_range, vmax=z_range) clb = plt.colorbar(cnt, ax=ax2) clb.set_label('Delta h [m]') # colorbar(cnt); ax2.set_xlabel('x [m]') ax2.set_xlim(np.amin(X), np.amax(X)) ax2.set_ylabel('y [m]') ax2.set_ylim(np.amin(Y), np.amax(Y)) l1, = ax3.plot(X[idx1, :], Z[idx1, :], 'b-') l2, = ax3.plot(X[idx2, :], Z[idx2, :], 'r-') l3, = ax3.plot(X[idx3, :], Z[idx3, :], 'g-') ax3.legend((l1, l2, l3), ("y="+str(Y[idx1,0])+' m',"y="+str(Y[idx2,0])+' m', \ "y="+str(Y[idx3,0])+' m'), loc='upper right', shadow=True) ax3.plot(X[idx1, :], Z_init[idx1, :], 'b--') ax3.plot(X[idx2, :], Z_init[idx2, :], 'r--') ax3.plot(X[idx3, :], Z_init[idx3, :], 'g--') ax3.set_xlabel('x [m]') ax3.set_ylabel('z [m]') x_min, x_max = ax3.get_xlim() y_min, y_max = ax3.get_ylim() time_text.set_position( (x_min + 0.05 * (x_max - x_min), y_min + 0.9 * (y_max - y_min))) time_text.set_text('time =' + "{:8.2f}".format(simtime) + 's') slope_flat = slope.flatten() """ n, bins, patches = ax4.hist(slope_flat, weights=100.0*np.ones(len(slope_flat)) / len(slope_flat),\ histtype='stepfilled', alpha=0.2) bin_centers = 0.5 * (bins[:-1] + bins[1:]) # scale values to interval [0,1] col = bin_centers - min(bin_centers) col /= max(col) cm = plt.cm.get_cmap('RdYlBu_r') for c, p in zip(col, patches): plt.setp(p, 'facecolor', cm(c)) """ # Get the histogramp cm = plt.cm.get_cmap('jet') Yh, Xh = np.histogram(slope_flat, density=True) x_span = Xh.max() - Xh.min() C = [cm(x / cr_angle) for x in Xh] ax4.bar(Xh[:-1] + 0.5 * (Xh[1] - Xh[0]), Yh, color=C, width=Xh[1] - Xh[0]) ax4b = ax4.twinx() # plot the cumulative histogram n_bins = 50 ax4b.hist(slope_flat, n_bins, density=True, histtype='step', cumulative=True, label='Empirical') ax4.set_xlabel('slope [degrees]') ax4.set_ylabel('probability density function') ax4b.set_ylabel('cumulative distribution function') frame_name = run_name + '_{0:03}'.format(iter) + '.png' plt.savefig(frame_name, dpi=200) frame_name = run_name + '_{0:03}'.format(iter) + '.pdf' plt.savefig(frame_name) if plot_show_flag: plt.show() plt.pause(0.01) return time_text
def make_tsne(session_data, data, labels, fig_filename='tsne.svg', batch_size=128, image_size=32): data_to_use = [x for x in data if x[1] in labels] data_generator = get_batches(data_to_use, batch_size, image_size, word2vec=True, send_raw_str=True) sess = session_data['session'] best_model = session_data['best_model'] model_output = session_data['model_output'] x = session_data['x'] restorer = tf.train.Saver() restorer.restore(sess, best_model) points = [] p_labels = [] for batch_x, batch_y, batch_labels in data_generator: output = sess.run(model_output, {x: batch_x}) for i, o in enumerate(output): label = normalize_label(batch_labels[i]) points.append(o) p_labels.append(label) labels = [normalize_label(L) for L in labels] label_points = [find_word_vec(L) for L in labels] for i, L in enumerate(labels): points.append(label_points[i]) p_labels.append('LABEL-' + normalize_label(L)) print('RUNNING TSNE') manifold = TSNE(n_components=2, metric='cosine').fit_transform(points) print('DONE') output_points = [ a for i, a in enumerate(manifold) if 'LABEL' not in p_labels[i] ] class_points = [[a, p_labels[i]] for i, a in enumerate(manifold) if 'LABEL' in p_labels[i]] output_labels = [L for L in p_labels if 'LABEL' not in L] output_labels = [labels.index(normalize_label(L)) for L in output_labels] x_output = [a[0] for a in output_points] y_output = [a[1] for a in output_points] x_class_points = [a[0][0] for a in class_points] y_class_points = [a[0][1] for a in class_points] l_class_points = [a[1].split('-')[1] for a in class_points] fig, ax = plt.subplots() num_colors = len(labels) cm = plt.get_cmap('rainbow') ax.set_color_cycle([cm(1. * i / num_colors) for i in range(num_colors)]) for c in range(len(labels)): x_L = [x for i, x in enumerate(x_output) if output_labels[i] == c] y_L = [y for i, y in enumerate(y_output) if output_labels[i] == c] ax.scatter(x_L, y_L, marker='x', label=labels[c]) lgd = ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5)) font0 = FontProperties() font0.set_weight('bold') font0.set_size(7) for i, L in enumerate(l_class_points): ax.text(x_class_points[i], y_class_points[i], L, fontproperties=font0, bbox=dict(facecolor='white', edgecolor='black', alpha=0.8)) ax.set_xlim([-50, 50]) fig.savefig(fig_filename, bbox_extra_artists=(lgd, ), bbox_inches='tight', format='svg') plt.show(block=False)
COLOR='blue' #RESFACT=10 MAP='jet' # choose carefully, or color transitions will not appear smoooth # create random data np.random.seed(101) x = y2009_class['NDVI'] y = y2009_class['TRMM'] z = y2009_class['LST'] %matplotlib inline fig = plt.figure(figsize=(8,8)) ax = plt.axes(projection='3d') cm = plt.get_cmap(MAP) ax.set_color_cycle([cm(1.*i/(NPOINTS-1)) for i in range(NPOINTS-1)]) for i in range(NPOINTS-1): ax.plot(x[i:i+2],y[i:i+2],z[i:i+2]) # ax.scatter(x[i:i+2],y[i:i+2],z[i:i+2]) #ax.plot(x,y,z) ax.scatter(x,y,z,c=np.linspace(0,1,NPOINTS), s=40) #ax.text(.05,1.05,'Reg. Res - Color Map') ax.set_xlim(0,5) ax.set_ylim(0,5) ax.set_zlim(0,5) qtls_NDVI_lst = qtls_NDVI.tolist() qtls_NDVI_lst.insert(0,' ')
def plot_minmax_levels_binned(var_list, ID, times, time_bins, it_bins, ic, jc, di, dj): print('computing binned min/max at each level') cm = plt.cm.get_cmap('coolwarm') cm2 = plt.cm.get_cmap('bwr') zrange = dx[2] * krange c1 = -1. c2 = -1. c3 = -1. minmax = {} minmax['time'] = times max_single = np.zeros((kmax), dtype=np.double) max_double = np.zeros((kmax), dtype=np.double) max_triple = np.zeros( (2, kmax), dtype=np.double ) # 0: total domain, (2*di)x(2*dj) gridpoints around collision point for var_name in var_list: minmax[var_name] = {} minmax[var_name]['max'] = np.zeros((len(times), kmax), dtype=np.double) minmax[var_name]['min'] = np.zeros((len(times), kmax), dtype=np.double) # # # minmax['w'] = {} # # # minmax['s'] = {} # # # minmax['temp'] = {} for var_name in var_list: print('') print('variable: ' + var_name) fig2, axes2 = plt.subplots(3, 2, sharey='all', figsize=(10, 15)) for it, t0 in enumerate(times): count_color = np.double(it) / len(times) if var_name == 'theta': s_var = read_in_netcdf_fields( 's', os.path.join(path_fields, str(t0) + '.nc')) var = theta_s(s_var) else: var = read_in_netcdf_fields( var_name, os.path.join(path_fields, str(t0) + '.nc')) for k in range(kmax): minmax[var_name]['max'][it, k] = np.amax(var[:, :, k]) minmax[var_name]['min'][it, k] = np.amin(var[:, :, k]) if t0 < time_bins[1]: ax = axes2[0, :] c1 += 1. c = c1 / (it_bins[1] - it_bins[0]) max_single += np.amax(np.amax(var[:, :, :kmax], axis=0), axis=0) elif t0 < time_bins[2]: ax = axes2[1, :] c2 += 1. c = c2 / (it_bins[2] - it_bins[1]) max_double += np.amax(np.amax(var[:, :, :kmax], axis=0), axis=0) elif t0 < time_bins[3]: ax = axes2[2, :] c3 += 1. c = c3 / (it_bins[3] - it_bins[2]) max_triple[0, :] += np.amax(np.amax(var[:, :, :kmax], axis=0), axis=0) max_triple[1, :] += np.amax(np.amax(var[ic - di:ic + di, jc - dj:jc + dj, :kmax], axis=0), axis=0) else: continue del var ax[0].plot(minmax[var_name]['min'][it, :], zrange, '-', color=cm(c), label='t=' + str(it) + 's') ax[1].plot(minmax[var_name]['max'][it, :], zrange, '-', color=cm(c), label='t=' + str(it) + 's') max_single /= c1 max_double /= c2 max_triple /= c3 fig2.subplots_adjust(bottom=0.05, right=.85, left=0.1, top=.95, wspace=0.25) for i in range(3): axes2[i, 0].set_xlim(np.amin(minmax[var_name]['min']), np.amax(minmax[var_name]['min'])) axes2[i, 1].set_xlim(np.amin(minmax[var_name]['max']), np.amax(minmax[var_name]['max'])) axes2[i, 0].set_ylabel('height z [m]') axes2[i, 1].set_title('t=' + str(time_bins[i]) + ' - ' + str(time_bins[i + 1]) + 's', fontsize=18) axes2[0, 0].set_title('single CP', fontsize=18) axes2[1, 0].set_title('2-CP collision', fontsize=18) axes2[2, 0].set_title('3-CP collision', fontsize=18) for i in range(2): axes2[2, i].set_xlabel('min(' + var_name + ') [m/s]') axes2[2, i].set_xlabel('max(' + var_name + ') [m/s]') fig2.suptitle('min/max of ' + var_name, fontsize=24) fig_name2 = var_name + '_' + str(ID) + '_minmax_binned.png' fig2.savefig(os.path.join(path_out_figs, fig_name2)) plt.close(fig2) print('') fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey='none', figsize=(15, 5)) k0 = 25 # s_var = read_in_netcdf_fields('s', os.path.join(path_fields, str(1500) + '.nc')) # var = theta_s(s_var)[:,:,22] # del s_var w_var = read_in_netcdf_fields( 'w', os.path.join(path_fields, str(1200) + '.nc')) ax1.contourf(w_var[:, :, k0].T) w_var = read_in_netcdf_fields( 'w', os.path.join(path_fields, str(1500) + '.nc')) aux = np.amax(np.abs(w_var[:, :, k0])) ax2.contourf(w_var[:, :, 20].T, levels=np.linspace(-aux, aux, 1e2), cmap=cm2) rect_double = mpatches.Rectangle((ic - 50, jc - 50), 2 * 50, 2 * 50, linewidth=1, edgecolor='grey', facecolor='none') rect = mpatches.Rectangle((ic - di, jc - dj), 2 * di, 2 * dj, linewidth=1, edgecolor='k', facecolor='none') ax1.add_patch(rect) ax1.add_patch(rect_double) ax2.add_patch(rect) ax2.add_patch(rect_double) ax3.plot(max_single, zrange, label='single') ax3.plot(max_double, zrange, label='double') ax3.plot(max_triple[0, :], zrange, label='triple') ax3.plot(max_triple[1, :], zrange, label='triple subdomain') ax3.legend() ax1.set_xlabel('min(' + var_name + ') [m/s]') ax3.set_xlabel('max(' + var_name + ') [m/s]') ax1.set_ylabel('height z [m]') fig.suptitle('min/max of ' + var_name, fontsize=24) fig_name = var_name + '_' + str(ID) + '_max_binned.png' fig.savefig(os.path.join(path_out_figs, fig_name)) plt.close(fig) return
motion_inx = 0 for motion in os.listdir(args.bag_dir): if os.path.isdir(os.path.join(args.bag_dir, motion)): bag_dir = os.path.join(args.bag_dir, motion) for fov in os.listdir(bag_dir): if os.path.isdir(os.path.join(bag_dir, fov)): chi, alpha, fl = parse('chi{:f}_alpha{:f}_fl{:f}', fov) print "Motion type {}, chi={}, alpha={}, focal_length={}".format( motion, chi, alpha, fl) ax = fig.add_subplot( num_rows, num_cols, motion_inx * num_cols + fov_dict[fov] + 1) cm = plt.get_cmap('nipy_spectral') ax.set_prop_cycle(color=[ cm(1. * i / len(descriptor_list)) for i in range(len(descriptor_list)) ]) if args.detector == '': handles = [] for det, desc in d_list: print "Detector+Descriptor {}".format(det + '+' + desc) matcher = Matcher(args.image_topic, args.depth_image_topic, args.pose_topic, det, desc, fl, chi,
default="jet", help="Colormap used to pick a color.") args = parser.parse_args() cm = colors.cm_mapper(0, 1, args.colormap) fig, axs = get_axs() fnames = args.INPUT_DIRECTORIES for i, fname in enumerate(fnames): hackfactor = None if fname == "run-transient4": hackfactor = 2.0 if args.colors is None: if len(fnames) > 1: c = cm(float(i) / (len(fnames) - 1)) else: c = 'b' else: if type(args.colors[i]) is float: c = cm(args.colors[i]) else: c = args.colors[i] ktd = parse_ats.readATS(fname, "visdump_surface_data.h5") plot_surface_balance(ktd, axs, c, label=fname, hackfactor=hackfactor) ktd[2].close() plt.tight_layout() axs[0].legend(bbox_to_anchor=(0., 1., 3.6, .05), loc=3,
def sample_colours_from_colourmap(n_colours, colour_map): import matplotlib.pyplot as plt cm = plt.get_cmap(colour_map) return [cm(1. * i / n_colours)[:3] for i in range(n_colours)]
def plot(self, outputDirectory): # Skip this step if matplotlib is not installed try: import matplotlib.pyplot as plt except ImportError: return import matplotlib.cm cm = matplotlib.cm.jet Nreac = self.network.Nisom + self.network.Nreac Nprod = Nreac + self.network.Nprod Tlist = self.Tlist.value_si Plist = self.Plist.value_si Tcount = Tlist.shape[0] Pcount = Plist.shape[0] K = self.K count = 0 for prod in range(Nprod): for reac in range(Nreac): if reac == prod: continue reaction = self.network.netReactions[count] count += 1 reaction_str = '{0} {1} {2}'.format( ' + '.join([reactant.label for reactant in reaction.reactants]), '<=>' if prod < Nreac else '-->', ' + '.join([product.label for product in reaction.products]), ) fig = plt.figure(figsize=(10,6)) K2 = numpy.zeros((Tcount, Pcount)) if reaction.kinetics is not None: for t in range(Tcount): for p in range(Pcount): K2[t,p] = reaction.kinetics.getRateCoefficient(Tlist[t], Plist[p]) K = self.K[:,:,prod,reac].copy() order = len(reaction.reactants) K *= 1e6 ** (order-1) K2 *= 1e6 ** (order-1) kunits = {1: 's^-1', 2: 'cm^3/(mol*s)', 3: 'cm^6/(mol^2*s)'}[order] plt.subplot(1,2,1) for p in xrange(Pcount): plt.semilogy(1000.0 / Tlist, K[:,p], color=cm(1.*p/(Pcount-1)), marker='o', linestyle='', label=str('%.2e' % (Plist[p]/1e+5)) + ' bar') if reaction.kinetics is not None: plt.semilogy(1000.0 / Tlist, K2[:,p], color=cm(1.*p/(Pcount-1)), marker='', linestyle='-') plt.xlabel('1000 / Temperature (1000/K)') plt.ylabel('Rate coefficient ({0})'.format(kunits)) plt.title(reaction_str) plt.legend() plt.subplot(1,2,2) for t in xrange(Tcount): plt.loglog(Plist*1e-5, K[t,:], color=cm(1.*t/(Tcount-1)), marker='o', linestyle='', label=str('%.0d' % Tlist[t]) + ' K') plt.loglog(Plist*1e-5, K2[t,:], color=cm(1.*t/(Tcount-1)), marker='', linestyle='-') plt.xlabel('Pressure (bar)') plt.ylabel('Rate coefficient ({0})'.format(kunits)) plt.title(reaction_str) plt.legend() fig.subplots_adjust(left=0.10, bottom=0.13, right=0.95, top=0.92, wspace=0.3, hspace=0.3) if not os.path.exists('plots'): os.mkdir('plots') plt.savefig(os.path.join(outputDirectory, 'plots/kinetics_{0:d}.pdf'.format(count))) plt.close()
def plot_head_connectivity(connectivity, sensorlocations, plotsensors=False, vmin=None, vmax=None, cm=cm.jet, plothead=True, plothead_kwargs=None, ax=P, view='top', **kwargs): """Plot connectivity on a head surface, derived from some sensor locations. The sensor locations are first projected onto the best fitting sphere and finally projected onto a circle (by simply ignoring the z-axis). :param connectivity: Connectivity matrix :type connectivity: matrix :param sensorlocations: array (nsensors x 3), 3D coordinates of each sensor. The order of the sensors has to match with the `connectivity` matrix. :param plotsensors: bool; if True, sensor will be plotted on their projected coordinates. No sensors are shown otherwise. :param plothead: bool; If True, a head outline is plotted. :param plothead_kwargs: Additional keyword arguments passed to `plot_head_outline()`. :param vmin,vmax: Minimum and maximum value to be used for graphics. :param ax: matplotlib axes to plot to. Standard is pylab. :param view: one of 'top' and 'rear'; Defines from where the head is viewed. :param kwargs: All additional arguments will be passed to `P.imshow()`. :returns: (map, head, sensors) The corresponding matplotlib objects are returned if plotted, i.e., if plothead is set to `False`, head will be `None`. map The colormap that makes the actual plot, a matplotlib.image.AxesImage instance. head What is returned by :py:meth:`plot_head_outline`. sensors The dots marking the electrodes, a matplotlib.lines.Line2d instance. ..seealso: :py:meth:`plot_head_topography` """ #Some assertions: assert len(connectivity.shape)==2 and connectivity.shape[0]==connectivity.shape[1], "connectivity must be a quadratic matrix" assert connectivity.shape[0] == sensorlocations.shape[0], "connectivity and sensorlocations must have same length" if vmin!=None and vmax != None: assert vmin<vmax, "vmin(=%f) must be smaller than vmax(=%f)" % (vmin,vmax) # give sane defaults if plothead_kwargs is None: plothead_kwargs = {} #assert sensorlocations is an numpy-arrays and swap x and y coordinates #swapping is necessary as the eeglab .ced files have another interpretation of this sensorlocations = N.array(sensorlocations) tmp = sensorlocations[:,1].copy() sensorlocations[:,1] = sensorlocations[:,0] sensorlocations[:,0]=tmp[:] sensorlocations[:,0]*=-1 del tmp # error function to fit the sensor locations to a sphere def err(params): r, cx, cy, cz = params #print r,cx,cy,cz rv = (sensorlocations[:, 0] - cx) ** 2 + (sensorlocations[:, 1] - cy) ** 2 + (sensorlocations[:, 2] - cz) ** 2 - r ** 2 rv = abs(rv.sum()) #print "rv: ",rv return rv # initial guess of sphere parameters (radius and center) params = N.array([1.0, 0.0, 0.0, 0.0]) # do fit r, cx, cy, cz = fmin(err,params,disp=0)#leastsq(err, params)# #print "Results of fit:", r, cx,cy,cz # project the sensor locations onto the sphere sphere_center = N.array((cx, cy, cz)) sproj = sensorlocations - sphere_center sproj = r * sproj / N.c_[N.sqrt(N.sum(sproj ** 2, axis=1))] sproj += sphere_center #print "sproj.shape:",sproj.shape #vmin, vmax: give sane defaults #first, make copy of connectivity and set diagonal elements to zero conn = connectivity.copy() conn *= N.ones((conn.shape[0]),"d")-N.diag(N.ones((conn.shape[0]),"d")) if vmin==None: vmin=conn.min() if vmax==None: vmax=conn.max() #Now transform values of conn to be between 0 and 1 conn = (conn-vmin)/(vmax-vmin) conn[conn>1] = N.ones(conn[conn>1].shape,"d") conn[conn<0] = N.zeros(conn[conn<0].shape,"d") if view == 'top': # fig = ax.gca() for i in range(connectivity.shape[0]): for j in range(i): dx=sproj[j,0]-sproj[i,0] dy=sproj[j,1]-sproj[i,1] if conn[i,j] != conn[j,i]: if conn[i,j]>0.0: #ax.arrow(sproj[i,0],sproj[i,1],dx,dy,lw=conn[i,j]*5+1,ec=cm(conn[i,j]),head_width=N.sqrt(dx**2+dy**2)*conn[i,j]*0.03,zorder=100-conn[i,j]) arr1 = P.Arrow(sproj[i,0], sproj[i,1], dx, dy, width=(conn[i,j]*5+1)/30,ec=cm(conn[i,j]),fc=cm(conn[i,j]),zorder=100+conn[i,j]) fig.add_patch(arr1) if conn[j,i]>0.0: #ax.arrow(sproj[i,0],sproj[i,1],dx,dy,lw=conn[j,i]*5+1,ec=cm(conn[j,i]),head_width=N.sqrt(dx**2+dy**2)*conn[j,i]*0.03,zorder=100-conn[j,i]) arr1 = P.Arrow(sproj[i,0], sproj[i,1], dx, dy, width=(conn[j,i]*5+1)/30,ec=cm(conn[j,i]),fc=cm(conn[j,i]),zorder=100+conn[j,i]) fig.add_patch(arr1) else: if conn[i,j]>0.0: ax.arrow(sproj[i,0],sproj[i,1],sproj[j,0]-sproj[i,0],sproj[j,1]-sproj[i,1],lw=conn[i,j]*5+1,ec=cm(conn[i,j]),zorder=100-conn[i,j]) #elif view=='rear': # pass else: raise ValueError("view must be one of 'top' and 'rear'") # show surface #map = ax.imshow(topo, origin="lower", extent=(-r, r, -r, r), **kwargs) ax.axis('off') ax.axis('equal') if plothead: # plot scaled head outline head = plot_head_outline(scale=r, shift=(cx, cy), view=view, **plothead_kwargs) else: head = None if plotsensors: sensors = plot_sensors(sproj,ax,"wo",view=view) else: sensors = None if view == 'top': ax.xlim((cx-(r*1.2),cx+(r*1.2))) ax.ylim((cy-(r*1.2),cy+(r*1.2))) elif view=='rear': ax.xlim((cx-(r*1.2),cx+(r*1.2))) ax.ylim((cz-(r*0.4),cz+(r*1.2))) return map, head, sensors
chosen_pic_depth_image = Image.fromarray(numpy.uint8(chosen_pic_depth_image_array)) # chosen_pic_depth_image = chosen_pic_depth_image.convert('P', palette = Image.ADAPTIVE, colors = 2) # chosen_pic_depth_image.show() # Making the images smaller for faster processing. chosen_pic_image.thumbnail((350, 350), Image.ANTIALIAS) chosen_pic_depth_image.thumbnail((350, 350), Image.ANTIALIAS) # When the colormap mode is enabled, we use the colormapped depth data as a texture. chosen_pic_photo_image_buffer = io.BytesIO() chosen_pic_colormap_image_buffer = io.BytesIO() arrx = numpy.array(chosen_pic_depth_image.convert('L')).astype(int) pre_cmap_array = (255*(arrx - numpy.min(arrx))/numpy.ptp(arrx)).astype(int) cm = matplotlib.cm.get_cmap('jet') post_cmap_array = numpy.uint8(numpy.rint(cm(pre_cmap_array)*255))[:, :, :3] cmap_img = Image.fromarray(post_cmap_array) cmap_img.save(chosen_pic_colormap_image_buffer, format = 'PNG') chosen_pic_image.save(chosen_pic_photo_image_buffer, format = 'PNG') rgbData = chosen_pic_photo_image_buffer.getvalue() chosen_pic_depth_image_buffer = io.BytesIO() chosen_pic_depth_image.save(chosen_pic_depth_image_buffer, format = 'PNG') depthData = chosen_pic_depth_image_buffer.getvalue() s = Server() s.start_server() modeSelector = ui.SegmentedControl(alpha = 0, corner_radius = 5) modeSelector.segments = ('Mesh' , 'Wireframe', 'Point Cloud') modeSelector.selected_index = 0
def plot_2D_SWC(tree=None, file_name=None, cs=None, synapses=None, locs=None, syn_cs=None, outN=None, draw_cbar=True, draw_scale=True, color_scale=None, num_ticks=5, no_axon=True, my_color='k', special_syn=None, syn_labels=None, cbar_orientation='horizontal', cbar_label='Vm', lwidth_factor=1, show_axes=False, radial_projection=False, alpha=1.): ''' Colors can be None: uniform/default matplotlib color Any color code: uniform but specified color array of values: colormap? ''' import matplotlib.patheffects as PathEffects xlim = [0,0] ylim = [0,0] frame1 = plt.gca() if file_name != None: # read the SWC into a dictionary: key=index, value=(x,y,z,d,parent) x = open(file_name,'r') SWC = {} for line in x : if(not line.startswith('#')) : splits = line.split() index = int(splits[0]) n_type = int(splits[1]) x = float(splits[2]) y = float(splits[3]) z = float(splits[4]) r = float(splits[5]) parent = int(splits[-1]) SWC[index] = (x,y,z,r,parent,n_type) if x > xlim[1]: xlim[1] = x if x < xlim[0]: xlim[0] = x if y > ylim[1]: ylim[1] = y if y < ylim[0]: xlim[0] = y elif tree != None : SWC = {} nodes = tree.get_nodes() for node in nodes: p3d = node.get_content()['p3d'] index = p3d.index n_type = p3d.type x = p3d.x y = p3d.y z = p3d.z r = p3d.radius parent = p3d.parent_index SWC[index] = (x,y,z,r,parent,n_type) if x > xlim[1]: xlim[1] = x if x < xlim[0]: xlim[0] = x if y > ylim[1]: ylim[1] = y if y < ylim[0]: xlim[0] = y else: print 'Error: input is either \'tree\' or \'filename\'' exit(1) if locs != None: # reshape location list loc_dict = {} for loc in locs: loc_dict[loc['node']] = loc['x'] #if use_colors: #my_color_list = ['r','g','b','c','m','y','r--','b--','g--', 'y--'] #else: #my_color_list = ['k','k','k','k','k','k','k--','k--','k--', 'k--'] # for color scale plotting if cs == None: pass elif color_scale != None: max_cs = color_scale[1] min_cs = color_scale[0] norm_cs = (max_cs - min_cs) * (1. + 1./100.) elif isinstance(cs, np.ndarray): max_cs = np.max(cs) min_cs = np.min(cs) norm_cs = (max_cs - min_cs) * (1. + 1./100.) elif isinstance(cs, list): max_cs = max(cs) min_cs = min(cs) norm_cs = (max_cs - min_cs) * (1. + 1./100.) elif isinstance(cs, dict): arr = np.array([cs[key] for key in cs.keys()]) max_cs = np.max(arr) min_cs = np.min(arr) norm_cs = (max_cs - min_cs) * (1. + 1./100.) else: raise Exception('cs type is invalid') if cs != None: cm = plt.get_cmap('jet') Z = [[0,0],[0,0]] levels = np.linspace(min_cs, max_cs, 100) CS3 = plt.contourf(Z, levels, cmap=cm) min_y = 100000.0 for index in SWC.keys() : # not ordered but that has little importance here # draw a line segment from parent to current point current_SWC = SWC[index] #print 'index: ', index, ' -> ', current_SWC c_x = current_SWC[0] c_y = current_SWC[1] c_z = current_SWC[2] c_r = current_SWC[3]*2. parent_index = current_SWC[4] if(c_y < min_y) : min_y = c_y if(index <= 3) : print 'do not draw the soma and its CNG, 2 point descriptions' else : if (not no_axon) or (current_SWC[5] !=2): parent_SWC = SWC[parent_index] p_x = parent_SWC[0] p_y = parent_SWC[1] p_z = parent_SWC[2] p_r = parent_SWC[3] if(p_y < min_y) : min_y= p_y # print 'index:', index, ', len(cs)=', len(cs) if(cs == None) : if radial_projection: pl = plt.plot([np.sqrt(p_x**2+p_z**2), np.sqrt(c_x**2+c_z**2)], [p_y,c_y], c=my_color, linewidth=c_r*lwidth_factor, alpha=alpha) else: pl = plt.plot([p_x,c_x], [p_y,c_y], c=my_color, linewidth=c_r*lwidth_factor, alpha=alpha) else : if radial_projection: pl = plt.plot([np.sqrt(p_x**2+p_z**2), np.sqrt(c_x**2+c_z**2)], [p_y,c_y], c=cm((cs[index]-min_cs)/norm_cs), linewidth=c_r*lwidth_factor, alpha=alpha) else: pl = plt.plot([p_x,c_x], [p_y,c_y], c=cm((cs[index]-min_cs)/norm_cs), linewidth=c_r*lwidth_factor, alpha=alpha) # add the synapses if synapses != None: if index in synapses: # plot synapse marker if syn_cs == None: if radial_projection: plt.plot(np.sqrt(c_x**2+c_z**2),c_y,'ro', markersize=5*lwidth_factor) else: plt.plot(c_x,c_y,'ro', markersize=5*lwidth_factor) else: if radial_projection: plt.plot(np.sqrt(c_x**2+c_z**2),c_y, 'o', mfc=syn_cs[index], markersize=5*lwidth_factor) else: plt.plot(c_x,c_y, 'o', mfc=syn_cs[index], markersize=5*lwidth_factor) # plot synapse label if syn_labels != None and index in syn_labels.keys(): txt = frame1.annotate(syn_labels[index], xy=(c_x, c_y), xycoords='data', xytext=(5,5), textcoords='offset points', fontsize='large') txt.set_path_effects([PathEffects.withStroke(foreground="w", linewidth=2)]) if locs != None: if index in loc_dict.keys(): # plot synapse marker p_x = SWC[parent_index][0] p_y = SWC[parent_index][1] p_z = SWC[parent_index][2] if radial_projection: x_plot = p_x + (c_x - p_x) * loc_dict[index] z_plot = p_z + (c_z - p_z) * loc_dict[index] point_plot = np.sqrt(x_plot**2 + z_plot**2) else: point_plot = p_x + (c_x - p_x) * loc_dict[index] y_plot = p_y + (c_y - p_y) * loc_dict[index] if syn_cs == None: plt.plot(point_plot, y_plot, 'ro', markersize=5*lwidth_factor) else: plt.plot(point_plot, y_plot, 'o', mfc=syn_cs[index], markersize=5*lwidth_factor) # plot synapse label if syn_labels != None and index in syn_labels.keys(): txt = frame1.annotate(syn_labels[index], xy=(x_plot, y_plot), xycoords='data', xytext=(5,5), textcoords='offset points', fontsize='large') txt.set_path_effects([PathEffects.withStroke(foreground="w", linewidth=2)]) if not show_axes: frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) frame1.set_xlabel('X') frame1.set_ylabel('Y') frame1.axes.get_xaxis().set_visible(show_axes) frame1.axes.get_yaxis().set_visible(show_axes) frame1.axison = show_axes # draw a scale bar if draw_scale: scale = 100 plt.plot([0,scale],[min_y*1.1,min_y*1.1],'k',linewidth=5) # 250 for MN, 100 for granule txt = frame1.annotate(r'' + str(scale) + ' $\mu$m', xy=(scale/2., min_y*1.1), xycoords='data', xytext=(-28,8), textcoords='offset points', fontsize='medium') txt.set_path_effects([PathEffects.withStroke(foreground="w", linewidth=2)]) #~ frame1.text(, min_y+(ylim[1]-ylim[0])/30, str(scale) + ' um') if(cs != None and draw_cbar) : # so that colorbar works with tight_layout from mpl_toolkits.axes_grid1 import make_axes_locatable divider = make_axes_locatable(frame1) if cbar_orientation=='horizontal': cax = divider.append_axes("bottom", "5%", pad="3%") else: cax = divider.append_axes("right", "5%", pad="3%") cb = plt.colorbar(None, cax=cax, orientation=cbar_orientation) ticks_f = np.round(np.linspace(min_cs, max_cs, num_ticks+2), decimals=1) #~ print ticks_f ticks_i = ticks_f cb.set_ticks(ticks_i) if cbar_orientation=='horizontal': cb.ax.xaxis.set_ticks_position('bottom') cb.set_label(r'$V_m$ (mV)') if(outN != None) : plt.savefig(outN) #~ if ax != None: #~ ax = frame1 return frame1
parser.add_argument("--colors", "-c", type=colors.float_list_type, default=None, help="List of color indices to use, of the form: --colors=[0,0.1,1], where the doubles are in the range (0,1) and are mapped to a color using the colormap.") parser.add_argument("--colormap", "-m", type=str, default="jet", help="Colormap used to pick a color.") args = parser.parse_args() cm = colors.cm_mapper(0,1,args.colormap) fig, axs = get_axs() fnames = args.INPUT_DIRECTORIES for i,fname in enumerate(fnames): if args.colors is None: if len(fnames) > 1: c = cm(float(i)/(len(fnames)-1)) else: c = 'b' else: if type(args.colors[i]) is float: c = cm(args.colors[i]) else: c = args.colors[i] times,td = thaw_depth(fname) plot_thaw_depth(times, td, axs, color=c, label=fname) plt.tight_layout() axs.legend() plt.show()
import numpy as np import pandas as pd import matplotlib.pyplot as pylab # NOTE: Either freefall_Omukai.py or freefall_Omukai_Temperature.py must be run # first to generate the data for this script. from matplotlib.colors import ListedColormap, LinearSegmentedColormap from matplotlib import cm cm = cm.get_cmap('coolwarm', 4) cols = [cm(0.2), cm(0.4), cm(0.6), cm(0.8)] # Set colors colors = ["blue", "brown", "magenta", "purple"] # Set metallicity logZ = [-6., -4., -2., 0.] for i in range(len(logZ)): filenamefH2 = 'logfH2_v_logN_[Z]=%d_2005.csv' % logZ[i] n = [] fH2 = [] with open(filenamefH2, 'r') as datafH2: lines = datafH2.readlines() for line in lines[1:]: p = line.split(',') n.append(float(p[0])) fH2.append(float(p[1])) pylab.plot(n, fH2,
def main_routine (wd="./",cfg="./python/parameter.cfg",cnt=11,sptype=1,myCmap="RdYlBu"): ### read config file ### print ("load from config file: " + cfg) configParser = cp.ConfigParser() configParser.read(cfg) print (configParser.sections()) cfg=configParser.__dict__['_sections'].copy() #for src, target in cfg['NVSETUP'].items(): # print(src + " : " + target) omega_c = float(cfg['NVSETUP']['{omega_c}']) nWrite=int(cfg['OCFourier']['{write_harmonic}']) nRead =int(cfg['OCFourier']['{read_harmonic}']) nStore=int(cfg['MEFourier']['{storage_harmonic}']) nDown =nRead+nWrite nUp =nDown+nWrite ### read config file ### ### read data ### cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg) time =IOHelper.functionaltimes_readwrite(**cfg) time['write'][:] *= 1e9 time['read'][:] *= 1e9 ti = int(time['idx_ti']) tf = int(time['idx_tf']) functime = time['read'][ti:tf] dt = float(time['delta_t']) filename =IOHelper.getVectorOverlap(**cfg) reGamma,imGamma=sp.loadtxt(filename).T alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead] alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown] alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp] ### read data ### ### plotting Reg1Up = sp.dot(alphaU.conj(),cavityWrite) Reg1Down = sp.dot(alphaD.conj(),cavityWrite) Reg2Down = sp.dot(alphaD.conj(),cavityMemo) Reg2Up = sp.dot(alphaU.conj(),cavityMemo) Reg2Read = sp.dot(alphaR.conj(),cavityRead) Reg2DownRead = Reg2Down + Reg2Read Reg2UpRead = Reg2Up + Reg2Read FuncInfo = sp.zeros([cnt,2]) FuncInfoPhase = sp.zeros([cnt,2]) FuncSuperRead = sp.zeros([cnt,functime.size],complex) denom = sp.zeros([2]) denom[0] = cumtrapz( sp.absolute(Reg2DownRead[ti:tf])**2, x=None, dx=dt )[-1] denom[1] = cumtrapz( sp.absolute(Reg2UpRead[ti:tf])**2, x=None, dx=dt )[-1] for i in sp.arange(0.0,cnt): phi = i*2.0/(cnt-1.0) Reg1Super = superimpose(Reg1Down,Reg1Up,phi,sptype) Reg2Super = superimpose(Reg2Down,Reg2Up,phi,sptype) Reg2SuperRead = Reg2Super + Reg2Read FuncSuperRead[i,:] = Reg2SuperRead[ti:tf] FuncInfoIntegrand = sp.absolute(FuncSuperRead[i,:]) * sp.absolute(Reg2DownRead[ti:tf]) FuncInfo[i,0] = cumtrapz( FuncInfoIntegrand, x=None, dx=dt )[-1]/denom[0] FuncInfoIntegrand = sp.absolute(FuncSuperRead[i,:]) * sp.absolute(Reg2UpRead[ti:tf]) FuncInfo[i,1] = cumtrapz( FuncInfoIntegrand, x=None, dx=dt )[-1]/denom[1] FuncInfoIntegrand = FuncSuperRead[i,:].conj() * Reg2UpRead[ti:tf] FuncInfoPhase[i,0] = cumtrapz( FuncInfoIntegrand.real, x=None, dx=dt )[-1]/denom[1] FuncInfoPhase[i,1] = cumtrapz( FuncInfoIntegrand.imag, x=None, dx=dt )[-1]/denom[1] xx, yy = sp.meshgrid(functime,sp.linspace(0.0,2.0,cnt)) zzR = FuncSuperRead.real/FuncSuperRead.real.max() zzI = FuncSuperRead.imag/FuncSuperRead.imag.max() zzA = sp.absolute(FuncSuperRead)**2/((sp.absolute(FuncSuperRead)**2).max()) zmin = -1.5 zmax = +1.0 fs = 20 cm = plt.cm.get_cmap(myCmap) myColors = cm(zzR) fig = plt.figure() fig0 = fig.add_subplot(231, projection='3d') fig0.plot_surface(xx, yy, zzA, rstride=10, cstride=5, cmap=cm, alpha=0.5,zorder=11.0,vmin=-1, vmax=1) fig0.contourf(xx, yy, zzA, zdir='z', offset=zmin, cmap=cm, vmin=-1, vmax=1,zorder=1.0) fig0.set_zlim(zmin,zmax) fig0.set_title("a) normalized $|A_0(t;\phi_0)|^2$",fontsize=fs) fig0.set_xlabel("$t$ in ns",fontsize=fs) fig0.set_ylabel("$\phi_0/\pi$",fontsize=fs) fig1 = fig.add_subplot(232, projection='3d') fig1.plot_surface(xx, yy, zzR, rstride=10, cstride=5, cmap=cm, alpha=0.5,zorder=11.0,vmin=-1, vmax=1) fig1.contourf(xx, yy, zzR, zdir='z', offset=zmin, cmap=cm, vmin=-1, vmax=1,zorder=1.0) fig1.set_zlim(zmin,zmax) fig1.set_title("b) normalized $Re[\,A_0(t;\phi_0)\,]$",fontsize=fs) fig1.set_xlabel("$t$ in ns",fontsize=fs) fig1.set_ylabel("$\phi_0/\pi$",fontsize=fs) fig2 = fig.add_subplot(233, projection='3d') fig2.plot_surface(xx, yy, zzI, rstride=10, cstride=5, cmap=cm, alpha=0.5,zorder=11.0,vmin=-1, vmax=1) fig2.contourf(xx, yy, zzI, zdir='z', offset=zmin, cmap=cm, vmin=-1, vmax=1,zorder=1.0) fig2.set_zlim(zmin,zmax) fig2.set_title("c) normalized $Im[\,A_0(t;\phi_0)\,]$",fontsize=fs) fig2.set_xlabel("$t$ in ns",fontsize=fs) fig2.set_ylabel("$\phi_0/\pi$",fontsize=fs) plt.subplot2grid((2,3),(1,0),colspan=1,rowspan=1) plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfo[:,0],label="overlap with $i=$'$0$'",linewidth="2",color="blue") plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfo[:,1],label="overlap with $i=$'$1$'",linewidth="2",color="red") plt.ylim(0,1.1) plt.xlabel("$\phi_0/\pi$",fontsize=fs) plt.legend(bbox_to_anchor=(0.52, 0.0), loc=3, borderaxespad=0.) plt.title("d) classical overlap, $O_{\mathbb{R}}(i)$",fontsize=fs) # plt.title("$\\frac{1}{N}\int_{T_{{\cal F}1}}^{T_{{\cal F}2}}dt\,|A_0(t;\phi_0)|\cdot|A_i(t)|$",fontsize=fs) # FuncInfoPhase[:,0]-=min(FuncInfoPhase[:,0]) # FuncInfoPhase[:,0]/=0.5*max(sp.absolute(FuncInfoPhase[:,0])) # FuncInfoPhase[:,0]-=1.0 # FuncInfoPhase[:,1]-=min(FuncInfoPhase[:,1]) # FuncInfoPhase[:,1]/=0.5*max(sp.absolute(FuncInfoPhase[:,1])) # FuncInfoPhase[:,1]-=1.0 plt.subplot2grid((2,3),(1,1),colspan=1,rowspan=1) plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,0],label="Re[ $O_{\mathbb{C}}($'$1$'$)$ ]",linewidth="2",color="red") plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,1],label="Im[ $O_{\mathbb{C}}($'$1$'$)$ ]",linewidth="2",color="magenta") plt.title("e) complex overlap, $O_{\mathbb{C}}($'$1$'$)$",fontsize=fs) # plt.title("$\\frac{1}{N}\int_{T_{{\cal F}1}}^{T_{{\cal F}2}}dt\,A_0(t;\phi_0)^*\cdot A_1(t)$",fontsize=fs) plt.xlabel("$\phi_0/\pi$",fontsize=fs) plt.legend(bbox_to_anchor=(0.66, 0.0), loc=3, borderaxespad=0.) plt.ylim(-1,1.1) FuncInfoPhase[:,0]-=min(FuncInfoPhase[:,0]) FuncInfoPhase[:,0]/=0.5*max(sp.absolute(FuncInfoPhase[:,0])) FuncInfoPhase[:,0]-=1.0 FuncInfoPhase[:,1]-=min(FuncInfoPhase[:,1]) FuncInfoPhase[:,1]/=0.5*max(sp.absolute(FuncInfoPhase[:,1])) FuncInfoPhase[:,1]-=1.0 # plt.subplot2grid((2,3),(1,2),colspan=1,rowspan=1) # plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,0],linewidth="2",color="red") # plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,1],linewidth="2",color="magenta") # plt.title("f) $O_{\mathbb{C}}$ scaled & translated, $(O_{\mathbb{C}}($'$1$'$)-I_R)/I_0$",fontsize=fs) # plt.xlabel("$\phi_0/\pi$",fontsize=fs) # plt.ylim(-1,1) plt.show()
def get_colors(self, num, cmap='jet'): import matplotlib.cm as ccmm from numpy import linspace cm = getattr(ccmm, cmap) return cm(linspace(0, 1, num))
import matplotlib.cm as cm import matplotlib.colors as colors NUM_COLORS = 2 cm = plt.get_cmap('Paired') file_list = sys.argv[1:] fig = plt.figure() #plt.suptitle(r'Pdf per i tempi di collisione in funzione di $\eta$',fontsize=16) ax = fig.add_subplot(111) ax.grid(True) plt.xlabel(r'$t_c$',fontsize='15') plt.ylabel(r'$P(t_c)$',fontsize='15') ax.set_color_cycle([cm(1.*i/NUM_COLORS) for i in range(NUM_COLORS)]) i=0 Markers=['x','+','*','s','d','v','^','<','>','p','h','.','+','*','o','x','^','<','h','.','>','p','s','d','v','o','x','+','*','s','d','v','^','<','>','p','h','.'] col=['r','b'] #labels for cfr between fit and mean labels=['fit','calcolato'] for f in file_list: #eta_temp=float(f[-13:-4]) eta,mfp=np.loadtxt(f,unpack=True,usecols=(0,1)) width = 0.7 * (eta[1] - eta[0]) color = cm(1.*i/NUM_COLORS) #plt.bar(eta, mfp, width=width,label=r'$\eta=%.5lf$'%(eta_temp), alpha=(0.8),color=color,linewidth=0) plt.plot(eta,mfp,Markers[i], label=labels[i],color=col[i]) i+=1 mean=0
if (calc_distance): ax1.set_title('Electric Field Magnitude \n Mean Point Distance %0.4e mm' % mean_point_distance) else: ax1.set_title('Electric Field Magnitude') plt.show() else: eps_plot = 1e-10 nonzeroE = np.where(magE > eps_plot) zeroE = np.where(magE <= eps_plot) E[nonzeroE, 0] = E[nonzeroE, 0] / magE[nonzeroE] E[nonzeroE, 1] = E[nonzeroE, 1] / magE[nonzeroE] magE[nonzeroE] = np.log10(magE[nonzeroE]) magE[np.where(magE<0)] = 0 magE[zeroE] = 0 norm = colors.Normalize() norm.autoscale(magE) cm = cm.get_cmap("turbo") sm = plt.cm.ScalarMappable(cmap=cm, norm=norm) sm.set_array([]) fig1, ax1 = plt.subplots(dpi=400) ax1.set_aspect('equal') quiv_scale = np.max(E)*75 plt.quiver(C[:, 0], C[:, 1], E[:, 0], E[:, 1], color=cm(magE), scale=quiv_scale, headwidth=2, headlength=3) ax1.set_title('Electric Field Lines') cb = fig1.colorbar(sm) cb.ax.set_ylabel("log(|E [V/mm]|), saturated at 0", rotation=90) plt.show()
def show_distance_plot(stream, event, inventory, starttime, endtime, plot_travel_times=True): """ Plots distance dependent seismogramm sections. :param stream: The waveforms. :type stream: :class:`obspy.core.stream.Stream` :param event: The event. :type event: :class:`obspy.core.event.Event` :param inventory: The station information. :type inventory: :class:`obspy.station.inventory.Inventory` :param starttime: starttime of traces to be plotted :type starttime: UTCDateTime :param endttime: endttime of traces to be plotted :type endttime: UTCDateTime :param plot_travel_times: flag whether phases are marked as traveltime plots in the section obspy.taup is used to calculate the phases :type pot_travel_times: bool """ stream = stream.slice(starttime=starttime, endtime=endtime).copy() event_depth_in_km = event.origins[0].depth / 1000.0 event_time = event.origins[0].time attach_coordinates_to_traces(stream, inventory, event=event) cm = plt.cm.jet stream.traces = sorted(stream.traces, key=lambda x: x.stats.distance)[::-1] # One color for each trace. colors = [cm(_i) for _i in np.linspace(0, 1, len(stream))] # Relative event times. times_array = stream[0].times() + (stream[0].stats.starttime - event_time) distances = [tr.stats.distance for tr in stream] min_distance = min(distances) max_distance = max(distances) distance_range = max_distance - min_distance stream_range = distance_range / 10.0 # Normalize data and "shift to distance". stream.normalize() for tr in stream: tr.data *= stream_range tr.data += tr.stats.distance plt.figure(figsize=(18, 10)) for _i, tr in enumerate(stream): plt.plot(times_array, tr.data, label="%s.%s" % (tr.stats.network, tr.stats.station), color=colors[_i]) plt.grid() plt.ylabel("Distance in degree to event") plt.xlabel("Time in seconds since event") plt.legend() dist_min, dist_max = plt.ylim() if plot_travel_times: distances = defaultdict(list) ttimes = defaultdict(list) for i in np.linspace(dist_min, dist_max, 1000): tts = getTravelTimes(i, event_depth_in_km, "ak135") for phase in tts: name = phase["phase_name"] distances[name].append(i) ttimes[name].append(phase["time"]) for key in distances.iterkeys(): min_distance = min(distances[key]) max_distance = max(distances[key]) min_tt_time = min(ttimes[key]) max_tt_time = max(ttimes[key]) if min_tt_time >= times_array[-1] or \ max_tt_time <= times_array[0] or \ (max_distance - min_distance) < 0.8 * (dist_max - dist_min): continue ttime = ttimes[key] dist = distances[key] if max(ttime) > times_array[0] + 0.9 * times_array.ptp(): continue plt.scatter(ttime, dist, s=0.5, zorder=-10, color="black", alpha=0.8) plt.text(max(ttime) + 0.005 * times_array.ptp(), dist_max - 0.02 * (dist_max - dist_min), key) plt.ylim(dist_min, dist_max) plt.xlim(times_array[0], times_array[-1]) plt.title(event.short_str()) plt.show()
#print(df) fig, ax = plt.subplots(1, 1, figsize=(10, 7)) x = list(range(2009, 2020)) univs = df.university.unique() cm = plt.cm.get_cmap('tab20') print(len(univs)) for i, univ in enumerate(sorted(univs)): y = [ len(df[np.logical_and(df.university == univ, df.year == year)]) for year in x ] ax.plot(x, y, label=univ, alpha=0.95, lw=5 if univ == 'bucknell' else 2, color=cm(i / len(univs))) ax.xaxis.set_major_locator(MultipleLocator(1)) ax.set_ylabel('dblp count') ax.set_xlabel('year') ax.set_ylim(bottom=0) ax.legend() plt.tight_layout() plt.show()
def main_routine (wd="./",cfg="./python/parameter.cfg",thetaCnt=11,phiCnt=11,bloch=0,myMap="jet",minOlap=0,fortranCheck=0,gridR=5,gridC=5): ### read config file ### print ("load from config file: " + cfg) cfg=IOHelper.loadCfg(wd,cfg) #for src, target in cfg['NVSETUP'].items(): # print(src + " : " + target) omega_c = float(cfg['NVSETUP']['{omega_c}']) nWrite=int(cfg['OCFourier']['{write_harmonic}']) nRead =int(cfg['OCFourier']['{read_harmonic}']) nStore=int(cfg['MEFourier']['{storage_harmonic}']) nDown =nRead+nWrite nUp =nDown+nWrite ### read config file ### ### read data ### cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg) time =IOHelper.functionaltimes_readwrite(**cfg) time['write'][:] *= 1e9 time['read'][:] *= 1e9 ti = int(time['idx_ti']) tf = int(time['idx_tf']) functime = time['read'][ti:tf] dt = float(time['delta_t']) filename =IOHelper.getVectorOverlap(**cfg) reGamma,imGamma=sp.loadtxt(filename).T alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead] alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown] alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp] ### read data ### ### plotting Reg1Up = sp.dot(alphaU.conj(),cavityWrite) Reg1Down = sp.dot(alphaD.conj(),cavityWrite) Reg2Down = sp.dot(alphaD.conj(),cavityMemo) Reg2Up = sp.dot(alphaU.conj(),cavityMemo) Reg2Read = sp.dot(alphaR.conj(),cavityRead) Reg2DownRead = Reg2Down + Reg2Read Reg2UpRead = Reg2Up + Reg2Read spos = sp.zeros([2,thetaCnt,phiCnt],complex) FuncInfoOlap = sp.zeros([2,thetaCnt,phiCnt],complex) gamma = sp.zeros([thetaCnt,phiCnt],complex) delta = sp.zeros([thetaCnt,phiCnt],complex) # I00 = 1j*cumtrapz( (Reg2Down[ti:tf].conj() * Reg2Down[ti:tf]).imag, x=None, dx=dt )[-1] I00 = cumtrapz( (Reg2Down[ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1] I01 = 1j*cumtrapz( (Reg2Up [ti:tf] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1] I01 += cumtrapz( (Reg2Up [ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1] I0R = 1j*cumtrapz( (Reg2Read[ti:tf] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1] I0R += cumtrapz( (Reg2Read[ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1] # I11 = 1j*cumtrapz( (Reg2Up [ti:tf].conj() * Reg2Up [ti:tf]).imag, x=None, dx=dt )[-1] I11 = cumtrapz( (Reg2Up [ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1] I10 = 1j*cumtrapz( (Reg2Down[ti:tf] * Reg2Up [ti:tf].conj()).imag, x=None, dx=dt )[-1] I10 += cumtrapz( (Reg2Down[ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1] I1R = 1j*cumtrapz( (Reg2Read[ti:tf] * Reg2Up [ti:tf].conj()).imag, x=None, dx=dt )[-1] I1R += cumtrapz( (Reg2Read[ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1] for i in sp.arange(0.0,thetaCnt): theta = i/(thetaCnt-1.0) # from 0 to 1 * pi for j in sp.arange(0.0,phiCnt): phi = j/(phiCnt-1.0)*2.0 # from 0 to 2 * pi spos[0,i,j] = sp.cos(theta*sp.pi/2.0) spos[1,i,j] = sp.sin(theta*sp.pi/2.0)*sp.exp(1j*phi*sp.pi) if fortranCheck == 1: cmd = "./scripts/ifort-checkBloch.sh " + wd print ("compile fortran routines: "+cmd) call(cmd.split()) print ("### call checkBloch") cmd=wd+"checkBloch" generateSuperposition = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input cmd = "echo {:}".format(thetaCnt) # communication with fortran-routine: chose superposition parameter generateInput = Popen(cmd.split(), stdout=generateSuperposition.stdin) # send action to fortran program cmd = "echo {:}".format(phiCnt) # communication with fortran-routine: chose superposition parameter generateInput = Popen(cmd.split(), stdout=generateSuperposition.stdin) # send action to fortran program output = generateSuperposition.communicate()[0] generateInput.wait() FuncInfoOlap [0,:,:],FuncInfoOlap [1,:,:]=IOHelper.read_MtrxProjection(thetaCnt,phiCnt,**cfg['FILES']) else: for i in sp.arange(0.0,thetaCnt): for j in sp.arange(0.0,phiCnt): FuncCavity = spos[0,i,j]*Reg2Down[ti:tf] + spos[1,i,j]*Reg2Up[ti:tf] FuncCavity[:] += Reg2Read [ti:tf] FuncInfoOlap [0,i,j] = cumtrapz( (FuncCavity[:] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1] FuncInfoOlap [0,i,j] += 1j*cumtrapz( (FuncCavity[:] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1] FuncInfoOlap [1,i,j] = cumtrapz( (FuncCavity[:] * Reg2Up[ti:tf].conj()).real, x=None, dx=dt )[-1] FuncInfoOlap [1,i,j] += 1j*cumtrapz( (FuncCavity[:] * Reg2Up[ti:tf].conj()).imag, x=None, dx=dt )[-1] if minOlap==0: gamma[:,:]=((FuncInfoOlap [0,:,:]-I0R)*I11+(I1R-FuncInfoOlap [1,:,:])*I01)/(I11*I00-I01*I10) delta[:,:]=((FuncInfoOlap [1,:,:]-I1R)*I00+(I0R-FuncInfoOlap [0,:,:])*I10)/(I11*I00-I01*I10) else : gamma[:,:]= (FuncInfoOlap [0,:,:]-I0R)/I00 delta[:,:]= (FuncInfoOlap [1,:,:]-I1R)/I11 fs = 22 label_size = 12 plt.rcParams['xtick.labelsize'] = label_size plt.rcParams['ytick.labelsize'] = label_size plt.rcParams['xtick.major.pad']='20' plt.rcParams['ytick.major.pad']='20' fig = plt.figure() if bloch == 0: xx, yy = sp.meshgrid(sp.linspace(0.0,2.0,phiCnt),sp.linspace(0.0,1.0,thetaCnt)) zmin = 0.0 zmax = +1.0 zzOlapR0 = gamma [:,:].real zzOlapI0 = gamma [:,:].imag zzErr0 = sp.absolute(gamma[:,:]-spos[0,:,:]) zzOlapR1 = delta [:,:].real zzOlapI1 = delta [:,:].imag zzErr1 = sp.absolute(delta[:,:]-spos[1,:,:]) fig1 = fig.add_subplot(321, projection='3d') fig1.plot_surface(xx, yy, zzOlapR0, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR0.min(), vmax=zzOlapR0.max()) fig1.set_zlim(0,1) # fig1.set_title("Re$[\,\gamma_R\,]\\approx\cos(\\theta/2)$",fontsize=fs) fig1.set_title("Re$[\,\gamma_R\,]$",fontsize=fs) fig1.set_ylabel("$\\theta_W/\pi$",fontsize=fs) fig1.set_xlabel("$\phi_W / \pi$",fontsize=fs) fig1 = fig.add_subplot(323, projection='3d') fig1.plot_surface(xx, yy, zzOlapI0, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI0.min(), vmax=zzOlapI0.max()) # fig1.set_zlim(-0.01,0.01) # fig1.set_title("Im$[\,\gamma_R\,]\\approx\cos(\\theta/2)$",fontsize=fs) fig1.set_title("Im$[\,\gamma_R\,]$",fontsize=fs) fig1.set_ylabel("$\\theta_W/\pi$",fontsize=fs) fig1.set_xlabel("$\phi_W/\pi$",fontsize=fs) fig1 = fig.add_subplot(322, projection='3d') fig1.plot_surface(xx, yy, zzOlapR1, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR1.min(), vmax=zzOlapR1.max()) fig1.set_zlim(-1,1) # fig1.set_title("Re$[\,\delta_R\,]\\approx\sin(\\theta/2)\cos(\phi)$",fontsize=fs) fig1.set_title("Re$[\,\delta_R\,]$",fontsize=fs) fig1.set_ylabel("$\\theta_W/\pi$",fontsize=fs) fig1.set_xlabel("$\phi_W / \pi$",fontsize=fs) fig1 = fig.add_subplot(324, projection='3d') fig1.plot_surface(xx, yy, zzOlapI1, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI1.min(), vmax=zzOlapI1.max()) fig1.set_zlim(-1,1) # fig1.set_title("Im$[\,\delta_R\,]\\approx\sin(\\theta/2)\sin(\phi)$",fontsize=fs) fig1.set_title("Im$[\,\delta_R\,]$",fontsize=fs) fig1.set_ylabel("$\\theta_W/\pi$",fontsize=fs) fig1.set_xlabel("$\phi_W/\pi$",fontsize=fs) fig1 = fig.add_subplot(325, projection='3d') fig1.plot_surface(xx, yy, zzErr0, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzErr0.min(), vmax=zzErr0.max()) # fig1.set_zlim(-1,1) # fig1.set_title("Re$[\,\delta_R\,]\\approx\sin(\\theta/2)\cos(\phi)$",fontsize=fs) fig1.set_title("$\epsilon_\gamma$",fontsize=fs) fig1.set_ylabel("$\\theta_W/\pi$",fontsize=fs) fig1.set_xlabel("$\phi_W / \pi$",fontsize=fs) fig1 = fig.add_subplot(326, projection='3d') fig1.plot_surface(xx, yy, zzErr1, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzErr1.min(), vmax=zzErr1.max()) # fig1.set_zlim(-1,1) # fig1.set_title("Re$[\,\delta_R\,]\\approx\sin(\\theta/2)\cos(\phi)$",fontsize=fs) fig1.set_title("$\epsilon_\delta$",fontsize=fs) fig1.set_ylabel("$\\theta_W/\pi$",fontsize=fs) fig1.set_xlabel("$\phi_W / \pi$",fontsize=fs) else: # x_expect = 2e0*sp.real(FuncInfoOlap [0,:,:]*FuncInfoOlap [1,:,:]) # x = 2e0*sp.real(spos[0,:,:].conj()*spos [1,:,:]) y = 2e0*sp.real(spos[0,:,:].conj()*spos [1,:,:]/1.0j) z = sp.absolute(spos[0,:,:])**2 -sp.absolute(spos [1,:,:])**2 xprime = 2e0*sp.real(gamma [:,:].conj()*delta [:,:]) yprime = 2e0*sp.real(gamma [:,:].conj()*delta [:,:]/1.0j) zprime = sp.absolute(gamma [:,:])**2 -sp.absolute(delta [:,:])**2 myDensity = sp.sqrt(sp.absolute(x-xprime)**2 + sp.absolute(y-yprime)**2 + sp.absolute(z-zprime)**2) # myDensity = 1.0 - (x*xprime + y*yprime + z*zprime) cm = plt.cm.get_cmap(myMap) myMin = myDensity.min() myMax = max(sp.absolute(myDensity.max()),sp.absolute(myDensity.min())) myColors = cm(myDensity/myMax) m = plt.cm.ScalarMappable(cmap=myMap) fig3D = fig.add_subplot(1, 1, 1, projection='3d') surf = fig3D.plot_surface(xprime, yprime, zprime, rstride=1, cstride=1, linewidth=1, color="black", facecolors=myColors,shade=False,antialiased=True, vmin=myMin, vmax=myMax) # fig3D.plot_wireframe(x*1.01, y*1.01, z*1.01, rstride=1, cstride=1,alpha=1,linewidth=1,color="black") tick2=(myMin+myMax)/2.0 tick1=round_sig(myMin+(myMax-myMin)*0.1,2) tick3=round_sig(myMax-(myMax-myMin)*0.1,2) tick2=round_sig(tick2,2) print "### error boundaries:", tick1,tick2,tick3 m.set_array(myDensity) cb= plt.colorbar(m,shrink=0.5,aspect=7,ticks=([tick1,tick2,tick3])) cb.formatter.set_scientific(True) cb.formatter.set_powerlimits((0, 0)) cb.update_ticks() fig3D.set_xlabel("$\langle \sigma_x(\gamma^\prime,\delta^\prime) \\rangle$", fontsize=fs) fig3D.set_ylabel("$\langle \sigma_y(\gamma^\prime,\delta^\prime) \\rangle$", fontsize=fs) fig3D.set_zlabel("$\langle \sigma_z(\gamma^\prime,\delta^\prime) \\rangle$", fontsize=fs) fig3D.set_xticks([-1,0,1]) fig3D.set_xlim([-1.01,1.01]) fig3D.xaxis._axinfo['label']['space_factor'] = 2.0 fig3D.set_yticks([-1,0,1]) fig3D.set_ylim([-1.01,1.01]) fig3D.yaxis._axinfo['label']['space_factor'] = 2.0 fig3D.set_zticks([-1,0,1]) fig3D.set_zlim([-1.01,1.01]) fig3D.zaxis._axinfo['label']['space_factor'] = 2.0 plt.show()
def get_color(i, n): spaces = np.linspace(0, 1 - 1 / n, n) cm = plt.get_cmap('hsv') return cm(spaces[i])
def main(): # starting position (x, y) start1 = [3, 3] start2 = [1, 3] start3 = [1, 2] # goal point (x, y) goal1 = [1, 7] goal2 = [3, 7] goal3 = [7, 8] if not os.path.exists('Output'): os.makedirs('Output') # safe distance s = 0.5 # Initial Planning rrt_star1 = RRTStar(start1, goal1, s=s) traj1 = rrt_star1.plan("Output/plan1.txt", "Output/explored1.png") rrt_star2 = RRTStar(start2, goal2, s=s) traj2 = rrt_star2.plan("Output/plan2.txt", "Output/explored2.png") rrt_star3 = RRTStar(start3, goal3, s=s) traj3 = rrt_star3.plan("Output/plan3.txt", "Output/explored3.png") # Plot planned trajectories fig, ax = plt.subplots() ax.set_xlim([0, 10]) ax.set_ylim([0, 10]) ax.plot(traj1[0], traj1[1], color='b', linewidth=1) ax.plot(traj2[0], traj2[1], color='r', linewidth=1) ax.plot(traj3[0], traj3[1], color='g', linewidth=1) plt.savefig("Output/Plan0.png") replan = [True, True, True] traj_all = [traj1, traj2, traj3] rrt_star = [rrt_star1, rrt_star2, rrt_star3] # replan2 = True # replan3 = True flag = True for i in range(2): col, t = check_for_replanning(traj1, traj2, traj3, s, flag) flag = False print("Collision: ", col) pd1 = float('inf') pd2 = float('inf') pd3 = float('inf') if col[0] and replan[0]: new_traj1 = rrt_star1.replan( [traj2, traj3], t[0], "Output/replanned" + str(i) + "_1.txt", "Output/re_explored" + str(i) + "_1.png") pd1 = (len(new_traj1[0]) - len(traj1[0])) / float(len( traj1[0])) * 100 if col[1] and replan[1]: new_traj2 = rrt_star2.replan( [traj1, traj3], t[1], "Output/replanned" + str(i) + "_2.txt", "Output/re_explored" + str(i) + "_2.png") pd2 = (len(new_traj2[0]) - len(traj2[0])) / float(len( traj2[0])) * 100 if col[2] and replan[2]: new_traj3 = rrt_star3.replan( [traj1, traj2], t[2], "Output/replanned" + str(i) + "_3.txt", "Output/re_explored" + str(i) + "_3.png") pd3 = (len(new_traj3[0]) - len(traj3[0])) / float(len( traj3[0])) * 100 m = min(pd1, pd2, pd3) if m == float('inf'): print("Final trajectories found at iteration: " + str(i + 1)) break if m == pd1: print("Trajectory 1 changed at iteration " + str(i + 1)) traj1 = new_traj1 replan[0] = False elif m == pd2: print("Trajectory 2 changed at iteration " + str(i + 1)) traj2 = new_traj2 replan[1] = False else: print("Trajectory 3 changed at iteration " + str(i + 1)) traj3 = new_traj3 replan[2] = False fig6, ax6 = plt.subplots() ax6.set_xlim([0, 10]) ax6.set_ylim([0, 10]) ax6.plot(traj1[0], traj1[1], color='b', linewidth=1) ax6.plot(traj2[0], traj2[1], color='r', linewidth=1) ax6.plot(traj3[0], traj3[1], color='g', linewidth=1) plt.savefig("Output/Plan" + str(i + 1) + ".png") fig2, ax2 = plt.subplots() ax2.set_xlim([0, 10]) ax2.set_ylim([0, 10]) obs = plt.Rectangle((4, 3.5), 2, 3, fill=True, color='k') ax2.add_patch(obs) L1 = len(traj1[0]) L2 = len(traj2[0]) L3 = len(traj3[0]) L = max(L1, L2, L3) cm = plt.get_cmap('plasma') ax2.set_color_cycle([cm(1. * i / (L)) for i in range(L)]) for i in range(L1 - 1): ax2.plot(traj1[0][i:i + 2], traj1[1][i:i + 2]) cm = plt.get_cmap('plasma') ax2.set_color_cycle([cm(1. * i / (L)) for i in range(L)]) for i in range(L2 - 1): ax2.plot(traj2[0][i:i + 2], traj2[1][i:i + 2]) cm = plt.get_cmap('plasma') ax2.set_color_cycle([cm(1. * i / (L)) for i in range(L)]) for i in range(L3 - 1): ax2.plot(traj3[0][i:i + 2], traj3[1][i:i + 2]) tr = [traj1, traj2, traj3] # for i in range(3): # print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") # print("For iteration: " + str(i+3)) # l2 = min(len(tr[i][0]), len(tr[(i+1)%3][0])) # for j in range(l2): # dist = np.sqrt((tr[i][0][j]-tr[(i+1)%3][0][j])**2 + (tr[i][1][j]-tr[(i+1)%3][1][j])**2) # # print(dist) # if dist < s: # print("Collision detected at: " + str(i)) # print(dist) # save data in txt file out1 = traj1.T if os.path.exists("Output/final_path1.txt"): os.remove("Output/final_path1.txt") final1 = open("Output/final_path1.txt", "a") for i in range(len(out1)): np.savetxt(final1, out1[i], fmt="%s", newline=' ') final1.write("\n") out2 = traj2.T if os.path.exists("Output/final_path2.txt"): os.remove("Output/final_path2.txt") final2 = open("Output/final_path2.txt", "a") for i in range(len(out2)): np.savetxt(final2, out2[i], fmt="%s", newline=' ') final2.write("\n") out3 = traj3.T if os.path.exists("Output/final_path3.txt"): os.remove("Output/final_path3.txt") final3 = open("Output/final_path3.txt", "a") for i in range(len(out3)): np.savetxt(final3, out3[i], fmt="%s", newline=' ') final3.write("\n") plt.savefig("Output/final_path.png") plt.show() plt.pause(15) plt.close()
def colorize(value, normalize=True, vmin=None, vmax=None, cmap=None, vals=255): """ A utility function for TensorFlow that maps a grayscale image to a matplotlib colormap for use with TensorBoard image summaries. By default it will normalize the input value to the range 0..1 before mapping to a grayscale colormap. Arguments: - value: 2D Tensor of shape [height, width] or 3D Tensor of shape [height, width, 1]. - vmin: the minimum value of the range used for normalization. (Default: value minimum) - vmax: the maximum value of the range used for normalization. (Default: value maximum) - cmap: a valid cmap named for use with matplotlib's `get_cmap`. (Default: 'gray') - vals: the number of values in the cmap minus one Example usage: ``` output = tf.random_uniform(shape=[256, 256, 1]) output_color = colorize(output, vmin=0.0, vmax=1.0, cmap='viridis') tf.summary.image('output', output_color) ``` Returns a 3D tensor of shape [height, width, 3]. """ value = tf.squeeze(value, axis=3) if normalize: vmin = tf.reduce_min(value) if vmin is None else vmin vmax = tf.reduce_max(value) if vmax is None else vmax value = (value - vmin) / (vmax - vmin) # vmin..vmax # dma = tf.reduce_max(value) # dma = tf.Print(dma, [dma], 'dma', summarize=16) # tf.summary.histogram('dma', dma) # just so tf.Print works # quantize indices = tf.to_int32(tf.round(value * float(vals))) else: # quantize indices = tf.to_int32(value) # 00 Unknown 0 0 0 # 01 Terrain 210 0 200 # 02 Sky 90 200 255 # 03 Tree 0 199 0 # 04 Vegetation 90 240 0 # 05 Building 140 140 140 # 06 Road 100 60 100 # 07 GuardRail 255 100 255 # 08 TrafficSign 255 255 0 # 09 TrafficLight 200 200 0 # 10 Pole 255 130 0 # 11 Misc 80 80 80 # 12 Truck 160 60 60 # 13 Car:0 200 200 200 if cmap == 'vkitti': colors = np.array([ 0, 0, 0, 210, 0, 200, 90, 200, 255, 0, 199, 0, 90, 240, 0, 140, 140, 140, 100, 60, 100, 255, 100, 255, 255, 255, 0, 200, 200, 0, 255, 130, 0, 80, 80, 80, 160, 60, 60, 200, 200, 200, 230, 208, 202 ]) colors = np.reshape(colors, [15, 3]).astype(np.float32) / 255.0 colors = tf.constant(colors) else: # gather cm = matplotlib.cm.get_cmap(cmap if cmap is not None else 'gray') if cmap == 'RdBu' or cmap == 'RdYlGn': colors = cm(np.arange(256))[:, :3] else: colors = cm.colors colors = np.array(colors).astype(np.float32) colors = np.reshape(colors, [-1, 3]) colors = tf.constant(colors, dtype=tf.float32) value = tf.gather(colors, indices) # value is float32, in [0,1] return value
def chroma_fill_between(x, y1, y2=None, c=None, cm=cm.gist_rainbow, axes=None): """Plot y vs. x but fill in between y and 0 with a colormap. Arguments --------- x, y1, y2 -- curves to plot between c -- array congruent to x and y indicating which color in the color map to fill in at that point. The range should be from 0.0 to 1.0 Keyword Arguments ----------------- cm -- Matplotlib Colormap Object. The c array above picks colors from this array. (default matplotlib.cm.gist_rainbow, which covers roygbiv with red at c=0.0 and violet at c=1.0) axes -- Matplotlib Axes object to use to make the plot. """ if axes is None: axes = plt.gca() if y2 is None: y2 = np.zeros(len(y1), dtype=float) elif isinstance(y2, (int, float)): y2 *= np.ones(len(y1), dtype=float) cc = c.copy() cc[cc > 1.0] = 1.0 cc[cc < 0.0] = 0.0 for i in xrange(cm.N): cmin = 1.0*i/cm.N cmax = 1.0*(i+1)/cm.N w = np.int_(np.logical_and(cc >= cmin, cc <= cmax)) if not w.any() : continue starts = np.nonzero(np.diff(w) == 1)[0] if w[0] == True : starts = np.insert(starts, 0, 0) ends = np.nonzero(np.diff(w) == -1)[0] if w[-1] == True : ends = np.append(ends, 0) for start, end in zip(starts, ends): axes.fill_between(x[start:end+1], y1[start:end+1], y2[start:end+1], color=cm(i))
def plot_snapshots(stg, var, cvar, times, varname='', varunit='', fvar=lambda v: '%g' % v, cvarname='', cvarunit='', fcvar=lambda v: '%g' % v): fig, axs = plt.subplots(1, len(times)) fig.subplots_adjust(wspace=0.1, hspace=0.05) cm = plt.get_cmap('tab10') colors = [cm(i) for i in (0, 4, 6, 9)] uv = array(sorted(stg[var].unique())) ucv = array(sorted(stg[cvar].unique())) csp = linspace(-0.075 * len(ucv), 0.075 * len(ucv), len(ucv)) legh, legl = [], [] for i, v in enumerate(uv): for j, t in enumerate(times): axs[j].set_yscale('log') for k, cv in enumerate(ucv): #for rmax in [0,1]: sl = stg[(stg[var] == v) & (stg[cvar] == cv)] #sl = sl[sl['Rmax'] == rmax] es = [Evap(tdir, gdir) for tdir in sl.index] e = array([interp(t, e.tf, e.evapf, right=nan) for e in es]) e = e[~isnan(e)] x = array([i + csp[k]] * len(e)) y = lake_evap(e, L_lake, A_lake) * yrsec if len(y) > 0: #axs[j].scatter(x+0.1, y, s=2.5, c=colors[k]) r = axs[j].violinplot(y, positions=[x[0]]) r['bodies'][0].set_alpha(0) for s in ['cmins', 'cmaxes', 'cbars']: r[s].set_color(colors[k]) r[s].set_linewidth(1.5) if i == 0 and j == 0: label = '%s = %s %s' % (cvarname, fcvar(cv), cvarunit) legh.append(r['cbars']) legl.append(label) ymin = min([ax.get_ylim()[0] for ax in axs]) ymax = max([ax.get_ylim()[1] for ax in axs]) for j in range(len(axs)): axs[j].set_ylim(ymin, ymax) axs[j].set_title('{:,g} {}'.format(times[j] / TU, TL)) axs[j].grid(False, axis='x') axs[j].set_xticks(range(len(var))) axs[j].tick_params(axis='x', length=0) axs[j].set_xticklabels([fvar(v) for v in uv]) axs[j].set_xlim(-0.2 * len(uv), (len(uv) - 1) + 0.2 * len(uv)) axs[j].spines['top'].set_visible(True) axs[j].spines['right'].set_visible(True) for i in range(len(uv) - 1): axs[j].plot([i + 1 / 2] * 2, [ymin, ymax], 'k', alpha=0.5) for j in range(1, len(axs)): axs[j].set_yticklabels([]) axs[j].tick_params(axis='y', length=0) axs[0].set_ylabel('Balancing Evaporation Rate (m/yr)\n4000 km$^2$ surface') fig.text(0.5, 0.04, '%s (%s)' % (varname, varunit), ha='center') axs[0].legend(legh, legl, loc='upper left')
def sample_colours_from_colourmap(n_colours, colour_map): import matplotlib.pyplot as plt cm = plt.get_cmap(colour_map) return [cm(1.*i/n_colours)[:3] for i in range(n_colours)]
p.new(title='Trajectories', aspect='equal',xlabel='x-coordinate',ylabel='y-coordinate') traj -= np.floor(traj/L)*L p.plot([0,L,L,0,0],[0,0,L,L,0],'b-', lw=2) p.plot(traj[-1,0,:],traj[-1,1,:],'wo', alpha=0.1 ,ms=7, mew = 2) p.plot(traj[0,0,:],traj[0,1,:],'+', c=[0.8,0.8,0.8], alpha=0.1) i = range(traj.shape[2]) np.random.shuffle(i) tpart = np.array(traj[:,:,i[:3]]) for n in range(1,tpart.shape[0]): i = (tpart[n-1,0,:] - tpart[n,0,:])**2+(tpart[n-1,1,:] - tpart[n,1,:])**2 > 50 tpart[n,:,i] = [None,None,None] nmax = tpart.shape[2] cm = cm.get_cmap('Dark2') colors=[cm(1.*i/nmax) for i in range(nmax)] for n in range(nmax): p.plot(tpart[:,0,n],tpart[:,1,n],'-', c = colors[n], alpha=0.8) p.plot(tpart[-1,0,n],tpart[-1,1,n],'o', c = colors[n], alpha=0.8 ,ms=7, mew = 2) # Energies p.new(title='Energies (from t=0 to t=10)',xlabel='time',ylabel='energy') i = ts < 10 p.plot(ts[i],Ekins[i], label='Ekin') p.plot(ts[i ],Es[i], label='Eges') p.plot(ts[i ],Epots[i], label='Epot') # Energies p.new(title='Energies (from t=10 to t=100)',xlabel='time',ylabel='energy') i = np.all([ts < 100, ts > 10], axis=0) p.plot(ts[i],Ekins[i], label='Ekin')