# If you want to highlight the weights of the training point to the test location # highlight_x_star, toggle should_highlight to True. You can change which test location # to highlight by changing the value of highlight_x_star. should_highlight = False highlight_x_star = 16 if should_highlight: plt.scatter(X_star_few[highlight_x_star], post_m_few[highlight_x_star], s=[400], marker='.', color='red') weights_few = weights_few[highlight_x_star] min_w = min(weights_few) max_w = max(weights_few) weights_few = (weights_few - min_w) / max_w marker_size = weights_few * 300 + 30 else: marker_size = 40 plt.scatter(X, Y, marker='x', s=marker_size, color='blue') ax = plt.gca() ax.legend() plt.xlim(min(X[0], X_star[0]), max(X[-1], X_star[-1])) ax.xaxis.set_major_locator(plt.MultipleLocator(np.pi / 2)) ax.xaxis.set_minor_locator(plt.MultipleLocator(np.pi / 12)) ax.xaxis.set_major_formatter(plt.FuncFormatter(multiple_formatter())) plt.show()
thick -= 0.2 deaths = [x for x in deaths if x <= 8 * 60] # Filter out timeouts x_lookup_max = len(series) - 1 # Special case for last data point deaths_y = [series[min(x, x_lookup_max)] for x in deaths] axarr[i, sel_b].plot(deaths, deaths_y, linestyle='None', marker='x', color='black', markersize=8, markeredgewidth=0.8) axarr[i, 0].set_ylabel(type_labels[t], fontsize=14) ticks = [(x + 1) * 60 for x in range(8)] axarr[1, sel_b].xaxis.set_major_locator(ticker.FixedLocator(ticks)) axarr[1, sel_b].xaxis.set_major_formatter( plt.FuncFormatter(lambda x, pos: '%.0f' % (x / 60))) # convert minutes into hours # Trim the fat axarr[0, 0].set_ylim([500, 2500]) axarr[1, 0].set_ylim([1800, 2300]) axarr[0, 1].set_ylim([0, 135]) axarr[1, 1].set_ylim([2000, 15000]) fig.subplots_adjust(hspace=0.08) # axarr[1].legend(loc='lower right', shadow=True) plt.legend(bbox_to_anchor=(0.5, 0), loc="upper center", bbox_transform=fig.transFigure, ncol=3, labelspacing=0, handletextpad=0)
def main(): # ----------------------------------------------------------------------- # # creating arg parser # ----------------------------------------------------------------------- # parser = argparse.ArgumentParser( description= "Performs the benchmark of several routing systems in Python, both from frameworks or standalone solutions" ) parser.add_argument( "--skip-falcon", action="store_true", help="skip Falcon from the benchmark", dest="skip_falcon", default=False, ) parser.add_argument( "--skip-kua", action="store_true", help="skip kua from the benchmark", dest="skip_kua", default=False, ) parser.add_argument( "--skip-routes", action="store_true", help="skip Routes from the benchmark", dest="skip_routes", default=False, ) parser.add_argument( "--skip-sanic", action="store_true", help="skip Sanic from the benchmark", dest="skip_sanic", default=False, ) parser.add_argument( "--skip-xrtr", action="store_true", help="skip xrtr from the benchmark", dest="skip_xrtr", default=False, ) parser.add_argument( "--plot", action="store_true", help="plot the results using matplotlib", dest="plot", default=True, ) parser.add_argument( "--iters", type=int, help="number of iterations on each test", dest="total_iter", default=100000, ) try: args = parser.parse_args() except Exception: parser.print_help() sys.exit(1) # ----------------------------------------------------------------------- # # start testing # ----------------------------------------------------------------------- # print("\n==========================================") print("THIS TEST CAN TAKE A WHILE ...") print("==========================================\n") res = { "falcon": { "min": { "simple": {}, "complex": {} }, "full": { "simple": {}, "complex": {} }, }, "kua": { "min": { "simple": {}, "complex": {} }, "full": { "simple": {}, "complex": {} }, }, "routes": { "min": { "simple": {}, "complex": {} }, "full": { "simple": {}, "complex": {} }, }, "sanic": { "min": { "simple": {}, "complex": {} }, "full": { "simple": {}, "complex": {} }, }, "xrtr": { "min": { "simple": {}, "complex": {} }, "full": { "simple": {}, "complex": {} }, }, } global falcon_compiled_router_minimal global falcon_compiled_router_full global kua_router_minimal global kua_router_full global routes_router_minimal global routes_router_full global sanic_router_minimal global sanic_router_full global xrtr_router_minimal global xrtr_router_full global minimal_uris global lots_of_uris minimal_uris, lots_of_uris = create_bench_data() num_vars = ["ZERO", "ONE", "TWO", "THREE"] # ----------------------------------------------------------------------- # if not args.skip_falcon: falcon_compiled_router_minimal = create_falcon_router(minimal_uris) for i, k in enumerate(num_vars): print_type_of_test( "MINIMAL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("falcon_compiled_router_minimal.find", "minimal_uris", i, False) res["falcon"]["min"]["simple"][k] = measure_router( "falcon_compiled_router_minimal", run_stmt, args.total_iter) print_type_of_test( "MINIMAL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("falcon_compiled_router_minimal.find", "minimal_uris", i, True) res["falcon"]["min"]["complex"][k] = measure_router( "falcon_compiled_router_minimal", run_stmt, args.total_iter) # ------------------------------------------------------------------- # falcon_compiled_router_full = create_falcon_router(lots_of_uris) for i, k in enumerate(num_vars): print_type_of_test( "FULL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("falcon_compiled_router_full.find", "lots_of_uris", i, False) res["falcon"]["full"]["simple"][k] = measure_router( "falcon_compiled_router_full", run_stmt, args.total_iter) print_type_of_test( "FULL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("falcon_compiled_router_full.find", "lots_of_uris", i, True) res["falcon"]["full"]["complex"][k] = measure_router( "falcon_compiled_router_full", run_stmt, args.total_iter) # ----------------------------------------------------------------------- # if not args.skip_kua: kua_router_minimal = create_kua_router(minimal_uris) for i, k in enumerate(num_vars): print_type_of_test( "MINIMAL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("kua_router_minimal.match", "minimal_uris", i, False) res["kua"]["min"]["simple"][k] = measure_router( "kua_router_minimal", run_stmt, args.total_iter) print_type_of_test( "MINIMAL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("kua_router_minimal.match", "minimal_uris", i, True) res["kua"]["min"]["complex"][k] = measure_router( "kua_router_minimal", run_stmt, args.total_iter) # ------------------------------------------------------------------- # kua_router_full = create_kua_router(lots_of_uris) for i, k in enumerate(num_vars): print_type_of_test( "FULL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("kua_router_full.match", "lots_of_uris", i, False) res["kua"]["full"]["simple"][k] = measure_router( "kua_router_full", run_stmt, args.total_iter) print_type_of_test( "FULL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("kua_router_full.match", "lots_of_uris", i, True) res["kua"]["full"]["complex"][k] = measure_router( "kua_router_full", run_stmt, args.total_iter) # ----------------------------------------------------------------------- # if not args.skip_routes: routes_router_minimal = create_routes_router(minimal_uris) for i, k in enumerate(num_vars): print_type_of_test( "MINIMAL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("routes_router_minimal.match", "minimal_uris", i, False) res["routes"]["min"]["simple"][k] = measure_router( "routes_router_minimal", run_stmt, args.total_iter) print_type_of_test( "MINIMAL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("routes_router_minimal.match", "minimal_uris", i, True) res["routes"]["min"]["complex"][k] = measure_router( "routes_router_minimal", run_stmt, args.total_iter) # ------------------------------------------------------------------- # routes_router_full = create_routes_router(lots_of_uris) # THIS IS SLOW for i, k in enumerate(num_vars): print_type_of_test( "FULL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("routes_router_full.match", "lots_of_uris", i, False) res["routes"]["full"]["simple"][k] = measure_router( "routes_router_full", run_stmt, args.total_iter) print_type_of_test( "FULL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("routes_router_full.match", "lots_of_uris", i, True) res["routes"]["full"]["complex"][k] = measure_router( "routes_router_full", run_stmt, args.total_iter) # ----------------------------------------------------------------------- # if not args.skip_sanic: sanic_router_minimal = create_sanic_router(minimal_uris) for i, k in enumerate(num_vars): print_type_of_test( "MINIMAL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt( "sanic_router_minimal._get", "minimal_uris", i, False, is_sanic=True, ) res["sanic"]["min"]["simple"][k] = measure_router( "sanic_router_minimal", run_stmt, args.total_iter) print_type_of_test( "MINIMAL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt( "sanic_router_minimal._get", "minimal_uris", i, True, is_sanic=True, ) res["sanic"]["min"]["complex"][k] = measure_router( "sanic_router_minimal", run_stmt, args.total_iter) # ------------------------------------------------------------------- # sanic_router_full = create_sanic_router(lots_of_uris) for i, k in enumerate(num_vars): print_type_of_test( "FULL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt( "sanic_router_full._get", "lots_of_uris", i, False, is_sanic=True, ) res["sanic"]["full"]["simple"][k] = measure_router( "sanic_router_full", run_stmt, args.total_iter) print_type_of_test( "FULL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt( "sanic_router_full._get", "lots_of_uris", i, True, is_sanic=True, ) res["sanic"]["full"]["complex"][k] = measure_router( "sanic_router_full", run_stmt, args.total_iter) # ----------------------------------------------------------------------- # if not args.skip_xrtr: xrtr_router_minimal = create_xrtr_router(minimal_uris) for i, k in enumerate(num_vars): print_type_of_test( "MINIMAL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt( "xrtr_router_minimal.get", "minimal_uris", i, False, is_xrtr=True, ) res["xrtr"]["min"]["simple"][k] = measure_router( "xrtr_router_minimal", run_stmt, args.total_iter) print_type_of_test( "MINIMAL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt( "xrtr_router_minimal.get", "minimal_uris", i, True, is_xrtr=True, ) res["xrtr"]["min"]["complex"][k] = measure_router( "xrtr_router_minimal", run_stmt, args.total_iter) # ------------------------------------------------------------------- # xrtr_router_full = create_xrtr_router(lots_of_uris) for i, k in enumerate(num_vars): print_type_of_test( "FULL, {} VARIABLE, SIMPLE STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("xrtr_router_full.get", "lots_of_uris", i, False, is_xrtr=True) res["xrtr"]["full"]["simple"][k] = measure_router( "xrtr_router_full", run_stmt, args.total_iter) print_type_of_test( "FULL, {} VARIABLE, COMPLEX STRING, NO-REPEAT".format(k)) run_stmt = gen_stmt("xrtr_router_full.get", "lots_of_uris", i, True, is_xrtr=True) res["xrtr"]["full"]["complex"][k] = measure_router( "xrtr_router_full", run_stmt, args.total_iter) # ----------------------------------------------------------------------- # # skip helper def should_skip(frwk): if frwk == "falcon" and args.skip_falcon: return True elif frwk == "kua" and args.skip_kua: return True elif frwk == "routes" and args.skip_routes: return True elif frwk == "sanic" and args.skip_sanic: return True elif frwk == "xrtr" and args.skip_xrtr: return True return False # ----------------------------------------------------------------------- # # compact result print("\n\n-------------------------------------") print("COMPACT RESULT") print("-------------------------------------\n") for k in res: if should_skip(k): continue print(">> {}".format(k)) print(" - zero var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["simple"]["ZERO"])) print(" - 1 simple var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["simple"]["ONE"])) print(" - 2 simple var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["simple"]["TWO"])) print(" - 3 simple var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["simple"]["THREE"])) print("") print(" - zero var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["complex"]["ZERO"])) print( " - 1 complex var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["complex"]["ONE"])) print( " - 2 complex var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["complex"]["TWO"])) print( " - 3 complex var, min routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["min"]["complex"]["THREE"])) print("") print(" - zero var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["simple"]["ZERO"])) print( " - 1 simple var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["simple"]["ONE"])) print( " - 2 simple var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["simple"]["TWO"])) print( " - 3 simple var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["simple"]["THREE"])) print("") print(" - zero var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["complex"]["ZERO"])) print( " - 1 complex var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["complex"]["ONE"])) print( " - 2 complex var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["complex"]["TWO"])) print( " - 3 complex var, full routes: {:.6f} (~{:.2f} iter/sec)".format( *res[k]["full"]["complex"]["THREE"])) print("") if args.plot: frwks = res.keys() type_test = ["min", "full"] var_complexity = ["simple", "complex"] for tt in type_test: for vc in var_complexity: fig, ax = plt.subplots() for f in frwks: if should_skip(f): continue series = [res[f][tt][vc][n][0] for n in num_vars] ax.plot(series, label=f, marker="o") for a, b in enumerate(series): ax.text(a, b + 0.007, "{:.3f}s".format(b)) ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) plt.title("{} routes, {} variables, {:,} iterations".format( tt, vc, args.total_iter)) plt.legend(loc="upper left") plt.ylabel("Time (in seconds)") plt.xlabel("Number of variables") plt.show()
fte_graph.tick_params(axis='both', which='major', labelsize=18) # Customizing the tick labels of the y-axis # Adding a title and a subtitle plt.title("Comparison of School Enrollment", fontsize=20, weight='bold', alpha=.75) # add cool legend fte_graph.legend(fontsize=11, frameon=True).get_frame().set_edgecolor('blue') fte_graph.set_xlabel('Year', fontsize=20, weight='bold', alpha=.75) fte_graph.set_ylabel('Gross', fontsize=20, weight='bold', alpha=.75) fte_graph.legend(frameon=False, loc='upper left', fontsize=15) fte_graph.yaxis.set_major_formatter(plt.FuncFormatter('{:.0f}%'.format)) plt.tight_layout() plt.savefig("schoolcomparison.png") # In[60]: bins = [1973, 1978, 1983, 1988, 1993, 1998, 2003, 2008, 2013, 2018] s = df[[ 'School enrollment, primary (% gross)', 'School enrollment, secondary (% gross)', 'School enrollment, tertiary (% gross)' ]] #s.set_index(['1973-1977','1978-1982','1983-1987','1988-1992','1993-1997','1998-2002','2003-2007','2008-2012','2013-2018'], inplace = True) s = s.groupby(s.index // 5 * 5).sum() s.index = [
ax.yaxis.set_tick_params('minor', width=2, length=0) ax.yaxis.set_tick_params('major', width=2, length=4) ax.yaxis.label.set_color(cc) ax.xaxis.label.set_color(cc) ax.tick_params(axis='x', colors=cc) ax.tick_params(axis='y', colors=cc) # End Defaults c, d = np.loadtxt("marecc.dat", usecols=(0, 1), unpack=True) cH, dH = np.loadtxt("highcc.dat", usecols=(0, 1), unpack=True) ax.set_yscale('log') ax.set_xscale('log', basex=2) formatter = plt.FuncFormatter(log_10_product) ax.xaxis.set_major_formatter(formatter) plt.plot(d, c, 'o', markersize=15, color='r') plt.plot(dH, cH, 'o', markersize=15, color='b') xx = np.arange(0.01, 1000, 0.1) yy = 0.0025 * xx**-2.0 zz = 0.05 * xx**-2.0 plt.plot(xx, yy, marker='None', color='g') plt.plot(xx, zz, marker='None', color='g') plt.xlim(1.0, 800) plt.ylim(5e-8, 2e-3)
def __init__(self): # initial conditions config = utils.Config() # model run params config.dt = 100 # timestep in yrs self._paused = False # setup params config.Cf = 0.004 # friction coeff config.D50 = 300*1e-6 config.Beta = 1.5 # exponent to avulsion function config.Df = 0.6 # dampening factor to lateral migration rate change config.dxdtstd = 1 # stdev of lateral migration dist, [m/yr]? # constants config.conR = 1.65 config.cong = 9.81 config.conrhof = 1000 config.connu = 1.004e-6 config.Rep = geom.Repfun(config.D50, config.conR, config.cong, config.connu) # particle Reynolds num # water discharge slider params config.Qw = config.Qwinit = 1000 config.Qwmin = 200 config.Qwmax = 4000 config.Qwstep = 100 # subsidence slider params config.sig = config.siginit = 2 config.sigmin = 0 config.sigmax = 5 config.sigstep = 0.2 # avulsion timescale slider params config.Ta = config.Tainit = 500 config.Tamin = config.dt config.Tamax = 1500 config.Tastep = 10 # yView slider params config.yView = config.yViewinit = 100 config.yViewmin = 25 config.yViewmax = 250 config.yViewstep = 25 # basin width slider params config.Bb = config.Bbinit = 4000 # width of belt (m) config.Bbmin = 1 config.Bbmax = 10 config.Bbstep = 0.5 # additional initializations config.Bast = 0 # Basin top level # setup the figure plt.rcParams['toolbar'] = 'None' plt.rcParams['figure.figsize'] = 8, 6 self.fig, self.strat_ax = plt.subplots() self.fig.canvas.set_window_title('SedEdu -- rivers2stratigraphy') plt.subplots_adjust(left=0.085, bottom=0.1, top=0.95, right=0.5) self.strat_ax.set_xlabel("channel belt (km)") self.strat_ax.set_ylabel("stratigraphy (m)") plt.ylim(-config.yView, 0.1*config.yView) plt.xlim(-config.Bb/2, config.Bb/2) self.strat_ax.xaxis.set_major_formatter( plt.FuncFormatter( lambda v, x: str(v / 1000).format('%0.0f')) ) # add sliders self.config = config self.sm = SliderManager(self)
def axes_rank(self, rect): """ rect : sequence of float The dimensions [left, bottom, width, height] of the new axes. All quantities are in fractions of figure width and height. """ # Ranked Metric Scores Plot ax1 = self.fig.add_axes(rect, sharex=self.ax) if self.module == 'ssgsea': ax1.fill_between(self._x, y1=np.log(self.rankings), y2=0, color='#C9D3DB') ax1.set_ylabel("log ranked metric", fontsize=14) else: ax1.fill_between(self._x, y1=self.rankings, y2=0, color='#C9D3DB') ax1.set_ylabel("Ranked list metric", fontsize=14) ax1.text(.05, .9, self._pos_label, color='red', horizontalalignment='left', verticalalignment='top', transform=ax1.transAxes) ax1.text(.95, .05, self._neg_label, color='Blue', horizontalalignment='right', verticalalignment='bottom', transform=ax1.transAxes) # the x coords of this transformation are data, and the y coord are axes trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes) ax1.vlines(self._zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey') hap = self._zero_score_ind / max(self._x) if hap < 0.25: ha = 'left' elif hap > 0.75: ha = 'right' else: ha = 'center' ax1.text(hap, 0.5, self._z_score_label, horizontalalignment=ha, verticalalignment='center', transform=ax1.transAxes) ax1.set_xlabel("Rank in Ordered Dataset", fontsize=14) ax1.spines['top'].set_visible(False) ax1.tick_params(axis='both', which='both', top=False, right=False, left=False) ax1.locator_params(axis='y', nbins=5) ax1.yaxis.set_major_formatter( plt.FuncFormatter( lambda tick_loc, tick_num: '{:.1f}'.format(tick_loc)))
def compute_Wavelet(): from astroML.fourier import FT_continuous, IFT_continuous #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=False) def wavelet(t, t0, f0, Q): return (np.exp(-(f0 / Q * (t - t0)) ** 2) * np.exp(2j * np.pi * f0 * (t - t0))) def wavelet_FT(f, t0, f0, Q): # this is its fourier transform using # H(f) = integral[ h(t) exp(-2pi i f t) dt] return (np.sqrt(np.pi) * Q / f0 * np.exp(-2j * np.pi * f * t0) * np.exp(-(np.pi * (f - f0) * Q / f0) ** 2)) def check_funcs(t0=1, f0=2, Q=3): t = np.linspace(-5, 5, 10000) h = wavelet(t, t0, f0, Q) f, H = FT_continuous(t, h) assert np.allclose(H, wavelet_FT(f, t0, f0, Q)) #------------------------------------------------------------ # Create the simulated dataset np.random.seed(5) t = np.linspace(-40, 40, 2001)[:-1] h = np.exp(-0.5 * ((t - 20.) / 1.0) ** 2) hN = h + np.random.normal(0, 0.5, size=h.shape) #------------------------------------------------------------ # Compute the convolution via the continuous Fourier transform # This is more exact than using the discrete transform, because # we have an analytic expression for the FT of the wavelet. Q = 0.3 f0 = 2 ** np.linspace(-3, -1, 100) f, H = FT_continuous(t, hN) W = np.conj(wavelet_FT(f, 0, f0[:, None], Q)) t, HW = IFT_continuous(f, H * W) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 5)) fig.subplots_adjust(hspace=0.05, left=0.12, right=0.95, bottom=0.08, top=0.95) # First panel: the signal ax = fig.add_subplot(311) ax.plot(t, hN, '-k', lw=1) ax.text(0.02, 0.95, ("Input Signal:\n" "Localized spike plus noise"), ha='left', va='top', transform=ax.transAxes) ax.set_xlim(-40, 40) ax.set_ylim(-1.2, 2.2) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.set_ylabel('$h(t)$') # Second panel: the wavelet ax = fig.add_subplot(312) W = wavelet(t, 0, 0.125, Q) ax.plot(t, W.real, '-k', label='real part', lw=1) ax.plot(t, W.imag, '--k', label='imag part', lw=1) ax.legend(loc=1) ax.text(0.02, 0.95, ("Example Wavelet\n" "$t_0 = 0$, $f_0=1/8$, $Q=0.3$"), ha='left', va='top', transform=ax.transAxes) ax.text(0.98, 0.05, (r"$w(t; t_0, f_0, Q) = e^{-[f_0 (t - t_0) / Q]^2}" "e^{2 \pi i f_0 (t - t_0)}$"), ha='right', va='bottom', transform=ax.transAxes) ax.set_xlim(-40, 40) ax.set_ylim(-1.4, 1.4) ax.set_ylabel('$w(t; t_0, f_0, Q)$') ax.xaxis.set_major_formatter(plt.NullFormatter()) # Third panel: the spectrogram ax = fig.add_subplot(313) ax.imshow(abs(HW) ** 2, origin='lower', aspect='auto', cmap=plt.cm.binary, extent=[t[0], t[-1], np.log2(f0)[0], np.log2(f0)[-1]]) ax.set_xlim(-40, 40) ax.text(0.02, 0.95, ("Wavelet PSD"), color='w', ha='left', va='top', transform=ax.transAxes) ax.set_ylim(np.log2(f0)[0], np.log2(f0)[-1]) ax.set_xlabel('$t$') ax.set_ylabel('$f_0$') ax.yaxis.set_major_locator(plt.MultipleLocator(1)) ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, *args: ("1/%i" % (2 ** -x)))) plt.show()
def genericplot(df, column, outfile, config, device_name): logger = logging.getLogger(__name__) timeframe = config["timeframe"] outfile = outfile.replace(":", ".") logging.info(f"creating {outfile} for column {column}") dim = (16, 6) markersize = 1 style = "-" marker = "" colormapName = "Set1" plt.style.use("seaborn-whitegrid") palette = plt.get_cmap(colormapName) colour = palette(1) # Is this a numeric column? try: column_type = str(df[column].dtype) # logger.info(f"Column type: {column_type}") except AttributeError: column_type = "unknown" if column_type == "float64" or column_type == "int64": pass else: # TryEuropean df[column] = [x.replace(",", ".") for x in df[column]] df[column] = df[column].astype(float) try: dim = parse_tuple("(" + config["plotting"]["dim"] + ")") except KeyError: pass try: markersize = float(config["plotting"]["markersize"]) except KeyError: pass try: style = config["plotting"]["style"] except KeyError: pass if style == "": marker = "o" # Defaults or override with config file fig, ax = plt.subplots(figsize=dim, dpi=80, facecolor="w", edgecolor="dimgrey") if timeframe is not None: ax.plot( df[column][timeframe.split(",")[0] : timeframe.split(",")[1]], alpha=0.7, color=colour, linestyle=style, markersize=markersize, marker=marker, ) else: ax.plot( df[column], alpha=0.7, color=colour, linestyle=style, markersize=markersize, marker=marker, ) plt.grid(which="both", axis="both", linestyle="--") # vmstat make chart top "100" if column == "us" or column == "sy" or column == "wa" or column == "Total CPU": ax.set_ylim(top=100) if "%" in column: ax.set_ylim(top=100) # y axis ax.get_yaxis().set_major_formatter( plt.FuncFormatter(lambda x, loc: "{:,}".format(float(x))) ) ax.set_ylim(bottom=0) # Always zero start # print('max {:,.4f}'.format(df[column].max())) if df[column].max() > 10 or "%" in column: ax.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter("{x:,.0f}")) elif df[column].max() < 0.002: ax.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter("{x:,.4f}")) else: ax.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter("{x:,.3f}")) # if df[column].max() > 999: # ax.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter("{x:,.0f}")) # else: # ax.yaxis.set_major_formatter(ScalarFormatter(useOffset=None)) # ax.get_yaxis().get_major_formatter().set_scientific(False) # Try to be smarter with the x axis. more to come if timeframe is not None and timeframe != "": if df[column][timeframe.split(",")[0] : timeframe.split(",")[1]].max() <= 10: ax.yaxis.set_major_formatter( matplotlib.ticker.StrMethodFormatter("{x:,.3f}") ) StartTime = datetime.strptime(timeframe.split(",")[0], "%Y-%m-%d %H:%M:%S") EndTime = datetime.strptime(timeframe.split(",")[-1], "%Y-%m-%d %H:%M:%S") TotalMinutes = (EndTime - StartTime).total_seconds() / 60 logging.debug("TF Minutes: " + str(TotalMinutes)) else: StartTime = df.index[0] EndTime = df.index[-1] # if the data wraps around (usually a few minutes where we have created the datetime artificially) if StartTime > EndTime: logger.debug(f"Wrapping dates {str(StartTime)} {str(EndTime)}") df = df.sort_index() StartTime = df.index[0] EndTime = df.index[-1] logger.debug(f"Sorted dates {str(StartTime)} {str(EndTime)}") # Compare to previous value # logger.info(f"{df.iloc[:,[0]].lt(df.iloc[:,[0]].shift())}") TotalMinutes = (df.index[-1] - df.index[0]).total_seconds() / 60 # logging.info("All Minutes: " + str(TotalMinutes)) if TotalMinutes <= 15: ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S")) ax.xaxis.set_major_locator( mdates.SecondLocator(interval=int((TotalMinutes * 60) / 10)) ) elif TotalMinutes <= 180: ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M")) ax.xaxis.set_major_locator( mdates.MinuteLocator(interval=int(TotalMinutes / 10)) ) elif TotalMinutes <= 1500: ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M")) ax.xaxis.set_major_locator(mdates.HourLocator()) elif TotalMinutes <= 3000: ax.xaxis.set_major_formatter(mdates.DateFormatter("%d-%H:%M")) else: ax.xaxis.set_major_formatter(mdates.DateFormatter("%a %m/%d - %H:%M")) StartTimeStr = datetime.strftime(StartTime, "%a %Y-%m-%d %H:%M:%S") EndTimeStr = datetime.strftime(EndTime, "%a %Y-%m-%d %H:%M:%S") if device_name == "": plt.title( column + " between " + StartTimeStr + " and " + EndTimeStr, fontsize=12 ) else: plt.title( device_name + " : " + column + " between " + StartTimeStr + " and " + EndTimeStr, fontsize=12, ) # plt.xlabel("Time", fontsize=10) plt.tick_params(labelsize=10) plt.setp(ax.get_xticklabels(), rotation=45, ha="right") plt.tight_layout() plt.savefig(outfile, bbox_inches="tight") plt.close()
def plotAndSave(data, dataName, metric, savePath, year, xTrainA, yTrainA, xTrainB, yTrainB, cmap, mask=[], vmin=None, vmax=None, intervals=51, saveDataFile=False, elevLabel=""): ''' creates plot and saves output png ''' #Get x,y,z values piv = data.pivot(index='Y_Round', columns='X_Round', values=metric) xi = piv.columns.values yi = piv.index.values zi = piv.values #Apply mask if mask != []: zi = np.ma.masked_where(1 - np.flip(mask, axis=0), zi) #create contour map fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(111) ax.contourf(xi, yi, zi, intervals, cmap=cmap, vmin=vmin, vmax=vmax) #Plot Training paths ax.scatter(xTrainA, yTrainA, color='k', marker='.', s=7) if len(xTrainB) != 0: ax.scatter(xTrainB, yTrainB, color='g', marker='.', s=7) #Define colourbar m = plt.cm.ScalarMappable(cmap=cmap) m.set_array(zi) m.set_clim(vmin, vmax) msize = (vmax - vmin) / intervals cbar = fig.colorbar(m, boundaries=np.arange(vmin, vmax + .1, msize)) cbar.ax.set_ylabel('Count {}'.format(elevLabel)) #Format x axis ax.get_yaxis().set_major_formatter( plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x)))) ax.get_xaxis().set_major_formatter( plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x)))) ax.set_xlim(-170000, -138000) plt.ylabel('y', figure=fig) plt.xlabel('x', figure=fig) #Add title title = '{}-Density({})'.format(year, dataName) plt.title(title) #Save figure fig.savefig(savePath + title + '.png', format='png') if saveDataFile: dataOut = pd.DataFrame(data=zi, index=yi, columns=xi) dataOut.to_csv(savePath + title + '-WithRowColHeaders.csv')
for i in range(len(runway)): y.append(eval(runway[i][1].replace(',', '.'))) x.append(eval(runway[i][2].replace(',', '.'))) font = {'family': 'normal', 'weight': 'normal', 'size': 14} ax = plt.subplot(111) box = ax.get_position() plt.rc('font', **font) plt.axis((0, 100, 0, 4500)) plt.plot(x, y, label="runway length") a = [[31.45, 2275]] plt.plot(*zip(*a), marker='o', markersize=8, color='fuchsia', label="reference aircraft") #A321neo b = [[34.15, 2320]] plt.plot(*zip(*b), marker='o', markersize=8, color='fuchsia') #A321neo ax.get_yaxis().set_major_formatter( plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x)))) plt.xlabel("% of airports") plt.ylabel("runway length [m]") plt.axhline(2000, linestyle="dashed", color="Red") plt.axvline(23.73, linestyle="dashed", color="Red") ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.grid() plt.show()
def plot(stage_to_timestamps, stage_to_counts, points, chorus_ts, pipeline, plot_name, folder, zoom): plt.subplots_adjust(hspace=0, wspace=0) font_size = 16 fig1 = plt.figure() gs1 = gridspec.GridSpec(2, 1) zoom_axis = plt.subplot(gs1[0]) zoom_axis.locator_params(axis='x', nbins=5) ripple_axis = plt.subplot(gs1[1]) ripple_axis.locator_params(axis='x', nbins=5) colors = ["blue", "cyan", "orange", "purple", "magenta", "black"] legends = [] for axis in [zoom_axis, ripple_axis]: axis.margins(x=0, y=0) for side in ["right", "top"]: axis.spines[side].set_visible(False) for side in ["left", "bottom"]: axis.spines[side].set_linewidth(3) for stage in range(len(pipeline) + 1): color = colors[stage % len(colors)] label = pipeline[stage]["name"] if stage < len(pipeline) else "Total" patch = mpatches.Patch(facecolor=color, edgecolor="black", label=label, linewidth=1, linestyle="solid") legends.append(patch) regions = {} max_y = 0 max_x = 0 increment = 5 for stage in stage_to_timestamps: min_x = sys.maxsize px = [] py = [] color = colors[stage % len(colors)] for i in range(len(stage_to_timestamps[stage])): lx = stage_to_timestamps[stage][i] ly = stage_to_counts[stage][i] if len(py) > 0 and lx - px[-1] > increment: r = range(int(px[-1]) + increment, int(lx), increment) for dx in r: px.append(dx) py.append(py[-1]) if ly > 0: px.append(lx) py.append(ly) if len(py) > 0: max_y = max(max_y, max(py)) min_x = min(min_x, min(px)) max_x = max(max_x, max(px)) regions[stage] = [min_x, max_x] zoom_axis.plot(px, py, color=color) ripple_axis.plot(px, py, color=color) chorus_y_points = list(map(lambda x: max_y, chorus_ts)) for axis in [ripple_axis, zoom_axis]: axis.scatter(chorus_ts, chorus_y_points, color="red", s=1) zoom_axis.set_xlim(zoom) zoom_axis.xaxis.set_major_formatter(plt.FuncFormatter(timestamp_label)) ripple_axis.set_xlim([0, max_x]) ripple_axis.xaxis.set_major_formatter(plt.FuncFormatter(timestamp_label)) plot_name = "{0:s}/{1:s}.png".format(folder, plot_name) plt.xlabel("Runtime (seconds)", size=font_size) print(plot_name) gs1.tight_layout(fig1) fig1.savefig(plot_name) print("Accumulation plot", plot_name) plt.close()
if MULTIPLE_TRANSPARENCIES: for l, t in enumerate(MULTIPLE_TRANSPARENCIES): p = params.valuesdict() # p[f't_j1_{i}'] = t p[f't_j2_{i}'] = t model = eval_squid_current(field, i, j, k, p) axes[-k - 1, j].plot(phase, model, color=f'C{l+1}', label=f't={t}') else: model = eval_squid_current(field, i, j, k, params.valuesdict()) axes[-k - 1, j].plot(phase, model, color='C1') axes[-k - 1, j].xaxis.set_major_formatter( plt.FuncFormatter(format_phase)) axes[-k - 1, j].tick_params(direction='in', width=1.5) axes[-k - 1, j].legend(title=f'Vg{j+1} = {g} V', loc=1) axes[-1, j].set_xlabel('SQUID phase') axes[len(g_dataset) // 2, j].set_ylabel('Bias current (µA)') if PLOT_FITS == 'color': cb = plt.colorbar(im, ax=axes[len(g_dataset) // 2, j]) cb.set_label('Resistance (Ω)') if ANALYSIS_PATH: fig.savefig(os.path.join(ANALYSIS_PATH, f'fit_f_{f}.pdf')) plt.show() # Build result arrays from the fitted parameters results = {} results['field'] = np.array(list(datasets))
print( "Using a training and test set to determine the best parameters for the SVM \n" ) clf = svm.SVC(kernel=best_kernel, C=best_C_value).fit(X_train, y_train) final_test_score = clf.score(X_test, y_test) print("The best score comes from using Kernel=", best_kernel, " and C=", best_C_value, ": ", final_test_score, "\n") print("Predicted activity for the test data \n") print(clf.predict(X_test), "\n") print("Actual activity for the test data \n") print(y_test) formatter = plt.FuncFormatter(lambda i, *args: activity_labels[int(i)]) plt.figure(figsize=(8, 6)) pca = PCA(n_components=2) proj = pca.fit_transform(X_test) plt.title("Real activity for data") plt.scatter(proj[:, 0], proj[:, 1], c=y_test, cmap="Paired") plt.colorbar(ticks=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], format=formatter) plt.xlabel('Principle Component 1') plt.ylabel('Principle Component 2') plt.figure(figsize=(8, 6)) plt.title("Predicted activity for data") plt.scatter(proj[:, 0], proj[:, 1], c=clf.predict(X_test), cmap="Paired") plt.colorbar(ticks=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], format=formatter)
ax.set_xlim(-40, 40) ax.set_ylim(-1.4, 1.4) ax.set_ylabel('$w(t; t_0, f_0, Q)$') ax.xaxis.set_major_formatter(plt.NullFormatter()) # Third panel: the spectrogram ax = fig.add_subplot(313) ax.imshow(abs(HW)**2, origin='lower', aspect='auto', cmap=plt.cm.binary, extent=[t[0], t[-1], np.log2(f0)[0], np.log2(f0)[-1]]) ax.set_xlim(-40, 40) ax.text(0.02, 0.95, ("Wavelet PSD"), color='w', ha='left', va='top', transform=ax.transAxes) ax.set_ylim(np.log2(f0)[0], np.log2(f0)[-1]) ax.set_xlabel('$t$') ax.set_ylabel('$f_0$') ax.yaxis.set_major_locator(plt.MultipleLocator(1)) ax.yaxis.set_major_formatter( plt.FuncFormatter(lambda x, *args: ("1/%i" % (2**-x)))) plt.show()
sub.xaxis.set_label_coords(0.5, 0.05) #No. points ticks sub.xaxis.set_tick_params(which='major', labelbottom=True, labeltop=False, bottom=True, top=False) #the following is for tick formatting (just for fun) def format_fn(tick_val, tick_pos): return tick_val sub.xaxis.set_major_formatter(plt.FuncFormatter(format_fn)) from matplotlib import ticker sub.xaxis.set_major_locator(ticker.FixedLocator(_points)) sub.xaxis.set_minor_locator(ticker.FixedLocator([])) #No. bins ticks _xx = sub.twiny() _xx.scatter(_points, means, color='') _xx.xaxis.set_tick_params(labelbottom=False, labeltop=True, bottom=False, top=True) #the following is for tick formatting (just for fun) def format_fn(tick_val, tick_pos):
'weight' : 'normal', 'size' : 14} plt.rc('font', **font) ###plt.plot(xlst,set_threshold, label="May '16") ##plt.plot(xlst1,set_threshold1, label="August '16") ###plt.plot(xlst2,set_threshold2, label="December '16") ###plt.plot(xlst3,set_threshold3, label="Februari '17") ##plt.axvline(90, linestyle="dashed", color="Red",label="Threshold") ##plt.xlabel("% of flights") ##plt.ylabel("range [km]") ##ax = plt.subplot(111) ##box = ax.get_position() ##ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) ##ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) ##plt.grid() ##plt.show() ax = plt.subplot(111) plt.hist(dis1,bins=80) plt.xlabel("range [km]") plt.ylabel("number of flights [-]") ax.get_xaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x)))) plt.grid() plt.show() #lst = [1879.3,1952.9,1761.6,1854,1789.6,2003,1541.4,1537.9,1617.7]
print("\t\t\t\t\tIris Flower dataset prediction\n") print ('\nKeys:', data.keys()) # Key values for the dictionary data structures print ('-' * 20) print ('Data Shape:', data.data.shape) # no. of rows & columns print ('-' * 20) print ('Features:', data.feature_names) # features of the flower print ('-' * 20) #Petal Length vs Sepal Width plt.scatter(data.data[:, 1], data.data[:, 2], c=data.target, cmap=plt.cm.get_cmap('Set1', 3)) plt.xlabel(data.feature_names[1]) plt.ylabel(data.feature_names[2]) color_bar_formating = plt.FuncFormatter(lambda i, *args: data.target_names[int(i)]) plt.colorbar(ticks = [0,1,2], format = color_bar_formating) #Petal Length vs Sepal Width plt.scatter(data.data[:, 2], data.data[:, 3], c=data.target, cmap=plt.cm.get_cmap('Set1', 3)) plt.xlabel(data.feature_names[2]) plt.ylabel(data.feature_names[3]) color_bar_formating = plt.FuncFormatter(lambda i, *args: data.target_names[int(i)]) plt.colorbar(ticks = [0,1,2], format = color_bar_formating) # where X = measurements and y = species X, y = data.data, data.target #define the model
# Load the data from sklearn.datasets import load_iris iris = load_iris() from matplotlib import pyplot as plt # this formatter will label the colorbar with the correct target names formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)]) plt.figure(figsize=(50, 40)) for i in [0, 1, 2]: for j in range(i+1, 4): if i == 0: index = i + j else: index = i + j +1 plt.subplot(2, 3, index) plt.scatter(iris.data[:, i], iris.data[:, j], c=iris.target) plt.colorbar(ticks=[0, 1, 2], format=formatter) plt.xlabel(iris.feature_names[i]) plt.ylabel(iris.feature_names[j]) #plt.tight_layout() plt.show()
def PODEigPlotter(savename, Array, PODArray, EigenValues, PODEigenValues, EddyCurrentTest): #Create a way to reference xkcd colours PYCOL = [ '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf' ] #Retrieve the settings for the plot Title, Show, ETP, _, MLS, MMS, SLS, SMS, _, _, ECL = PlotterSettings() #Plot the real graph fig, ax = plt.subplots() #Plot the mainlines for i, line in enumerate(ETP): if i == 0: lines = ax.plot(Array, EigenValues[:, line - 1].real, MLS, markersize=MMS, color=PYCOL[i]) else: lines += ax.plot(Array, EigenValues[:, line - 1].real, MLS, markersize=MMS, color=PYCOL[i]) #Plot the snapshots for i, line in enumerate(ETP): lines += ax.plot(PODArray, PODEigenValues[:, line - 1].real, SLS, markersize=SMS, color=PYCOL[i]) ymin, ymax = ax.get_ylim() #Format the axes plt.xscale('log') plt.ylim(ymin, ymax) ax.grid(True) ax.yaxis.set_major_formatter(plt.FuncFormatter(TickFormatter)) plt.subplots_adjust(wspace=0.6, hspace=0.6, left=0.15, bottom=0.1, right=0.94, top=0.90) #Label the axes plt.xlabel("Frequency (rad/s)") plt.ylabel(r"$\lambda(\mathcal{N}^0+\mathcal{R})$") #Title if Title == True: plt.title(r"Eigenvalues of $\mathcal{N}^0+\mathcal{R}$") #Create the legend names = [] for i, number in enumerate(ETP): names.append(r"$\lambda_{" + str(number) + "}(\mathcal{N}^0+\mathcal{R})$ (POD)") for i, number in enumerate(ETP): names.append(r"$\lambda_{" + str(number) + "}(\mathcal{N}^0+\mathcal{R})$ (Snapshot)") #Show where the eddy-current breaks down (if applicable) if isinstance(EddyCurrentTest, float): if ECL == True: x = np.ones(10) * EddyCurrentTest y = np.linspace(ymin, ymax, 10) lines += ax.plot(x, y, '--r') names.append(r"eddy-current model valid") #Make the legend ax.legend(lines, names) #Save the graph plt.savefig(savename + "RealEigenvalues.pdf") #Plot the imaginary graph fig, ax = plt.subplots() #Plot the mainlines for i, line in enumerate(ETP): if i == 0: lines = ax.plot(Array, EigenValues[:, line - 1].imag, MLS, markersize=MMS, color=PYCOL[i]) else: lines += ax.plot(Array, EigenValues[:, line - 1].imag, MLS, markersize=MMS, color=PYCOL[i]) #Plot the snapshots for i, line in enumerate(ETP): lines += ax.plot(PODArray, PODEigenValues[:, line - 1].imag, SLS, markersize=SMS, color=PYCOL[i]) ymin, ymax = ax.get_ylim() #Format the axes plt.xscale('log') plt.ylim(ymin, ymax) ax.grid(True) ax.yaxis.set_major_formatter(plt.FuncFormatter(TickFormatter)) plt.subplots_adjust(wspace=0.6, hspace=0.6, left=0.15, bottom=0.1, right=0.94, top=0.90) #Label the axes plt.xlabel("Frequency (rad/s)") plt.ylabel(r"$\lambda(\mathcal{I})$") #Title if Title == True: plt.title(r"Eigenvalues of $\mathcal{I}$") #Create the legend names = [] for i, number in enumerate(ETP): names.append(r"$\lambda_{" + str(number) + "}(\mathcal{I})$ (POD)") for i, number in enumerate(ETP): names.append(r"$\lambda_{" + str(number) + "}(\mathcal{I})$ (Snapshot)") #Show where the eddy-current breaks down (if applicable) if isinstance(EddyCurrentTest, float): if ECL == True: x = np.ones(10) * EddyCurrentTest y = np.linspace(ymin, ymax, 10) lines += ax.plot(x, y, '--r') names.append(r"eddy-current model valid") #Make the legend ax.legend(lines, names) #Save the graph plt.savefig(savename + "ImaginaryEigenvalues.pdf") return Show
def train_n_plot(training, training_label, test, test_label, Dataframe, ax, a): # NN model_NN, X_train, X_test, y_train, y_test = NN_tensorflow.NN_clf(training, training_label, test, test_label, Dataframe) y_pred_0n = model_NN.predict(X_test[y_test == 0])[:, 1] y_pred_1n = model_NN.predict(X_test[y_test == 1])[:, 1] fpr_NN, tpr_NN, area_NN = roc_curve(y_pred_0n, y_pred_1n) # Fisher clf = fisher.fisher_LDA(training, training_label, test, test_label, Dataframe) y_pred_0l = clf.predict_proba(test[test_label==0])[:,1] y_pred_1l = clf.predict_proba(test[test_label==1])[:,1] fpr_fis, tpr_fis, area_fis = roc_curve(y_pred_0l, y_pred_1l) # SVM model_svm = SVM.SVM_clf(training, training_label, test, test_label) y_pred_0s = model_svm.predict_proba(test[test_label == 0])[:, 1] y_pred_1s = model_svm.predict_proba(test[test_label == 1])[:, 1] fpr_svm, tpr_svm, area_svm = roc_curve(y_pred_0s, y_pred_1s) # XGboost model = xgboost_dec_tree.xgb_clf(training, training_label, test, test_label) y_pred_0x = model.predict_proba(test[test_label == 0])[:, 1] y_pred_1x = model.predict_proba(test[test_label == 1])[:, 1] fpr_XG, tpr_XG, area_XG = roc_curve(y_pred_0x, y_pred_1x) # ============================================================================= # Prediction plot (2x2) of all models # ============================================================================= bins = 150 figp, ((axp1, axp2), (axp3, axp4)) = plt.subplots(2, 2, figsize=(12,12), sharex='col', sharey='row') mpl.rcParams['font.size'] = 16 # LDA _ = axp1.hist(y_pred_0l, bins=bins, label='Signals', color='darkgreen', histtype='step') _ = axp1.hist(y_pred_1l, bins=bins, label='Afterpulses', color='darkred', histtype='step') axp1.grid(True, color='black', linestyle='--', linewidth=0.5, alpha=0.25) axp1.legend() axp1.set(#xlabel='prediction', ylabel='Frequency', yscale='log', title='LDA') # SVM _ = axp2.hist(y_pred_0s, bins=bins, label='Signals', color='darkgreen', histtype='step') _ = axp2.hist(y_pred_1s, bins=bins, label='Afterpulses', color='darkred', histtype='step') axp2.grid(True, color='black', linestyle='--', linewidth=0.5, alpha=0.25) axp2.legend() axp2.set(#xlabel='prediction', #ylabel='Frequency', yscale='log', title='SVM') # NN _ = axp3.hist(y_pred_0n, bins=bins, label='Signals', color='darkgreen', histtype='step') _ = axp3.hist(y_pred_1n, bins=bins, label='Afterpulses', color='darkred', histtype='step') axp3.grid(True, color='black', linestyle='--', linewidth=0.5, alpha=0.25) axp3.legend() axp3.set(xlabel='prediction', ylabel='Frequency', yscale='log', title='NN') # XGB _ = axp4.hist(y_pred_0x, bins=bins, label='Signals', color='darkgreen', histtype='step') _ = axp4.hist(y_pred_1x, bins=bins, label='Afterpulses', color='darkred', histtype='step') axp4.grid(True, color='black', linestyle='--', linewidth=0.5, alpha=0.25) axp4.legend() axp4.set(xlabel='prediction', #ylabel='Frequency', yscale='log', title='XGB') mpl.rcParams['font.size'] = 13 figp.savefig("prediction_models.pdf") # ============================================================================= # Adding to ROC curve # ============================================================================= round_n = 4 if opt: ax.plot(fpr_fis, tpr_fis, label="Optimized Fisher's discriminant with area = " + str(round(area_fis,round_n)), linestyle='-.', color='green') ax.plot(fpr_NN, tpr_NN, label="Optimized neural network classifier with area = " + str(round(area_NN,round_n)), linestyle='--', color='darkorange', marker='*') ax.plot(fpr_svm, tpr_svm, marker='x', label="Optimized SVM classifier with area = " + str(round(area_svm,round_n)), linestyle='-', color='darkred') ax.plot(fpr_XG, tpr_XG, label="Optimized XGBoost classifier with area = " + str(round(area_XG,round_n)), linestyle=':', color='darkblue') # this is an inset axes over the main axes a.plot(fpr_fis, tpr_fis, linestyle='-.', color='green') a.plot(fpr_NN, tpr_NN, linestyle='-', color='darkorange', marker='*') a.plot(fpr_svm, tpr_svm, linestyle='-', color='darkred', marker='x') a.plot(fpr_XG, tpr_XG, linestyle=':', color='darkblue',) else: ax.plot(fpr_fis, tpr_fis, label="Fisher's discriminant with area = " + str(round(area_fis,round_n)), linestyle='-', color='green') ax.plot(fpr_NN, tpr_NN, label="Neural network classifier with area = " + str(round(area_NN,round_n)), linestyle='--', color='darkorange') ax.plot(fpr_svm, tpr_svm, label="SVM classifier with area = " + str(round(area_svm,round_n)), linestyle='-', color='darkred') ax.plot(fpr_XG, tpr_XG, label="XGBoost classifier with area = " + str(round(area_XG,round_n)), linestyle='-', color='darkblue') # this is an inset axes over the main axes a.plot(fpr_fis, tpr_fis, linestyle='-', color='green') a.plot(fpr_NN, tpr_NN, linestyle='--', color='darkorange') a.plot(fpr_svm, tpr_svm, linestyle='-', color='darkred') a.plot(fpr_XG, tpr_XG, linestyle='-', color='darkblue') print("Separation in standard deviation:") print("LDA: ", (np.mean(y_pred_1l) - np.mean(y_pred_0l)) / np.sqrt(np.std(y_pred_1l)**2 + np.std(y_pred_0l)**2)) print("SVM: ", (np.mean(y_pred_1s) - np.mean(y_pred_0s)) / np.sqrt(np.std(y_pred_1s)**2 + np.std(y_pred_0s)**2)) print("NN: ", (np.mean(y_pred_1n) - np.mean(y_pred_0n)) / np.sqrt(np.std(y_pred_1n)**2 + np.std(y_pred_0n)**2)) print("XGB: ", (np.mean(y_pred_1x) - np.mean(y_pred_0x)) / np.sqrt(np.std(y_pred_1x)**2 + np.std(y_pred_0x)**2)) print() print("AUC:") print("LDA: ", area_fis) print("SVM: ", area_svm) print("NN: ", area_NN) print("XGB: ", area_XG) print() print("Afterpulse discriminated at 99% correct classifying signals [%]:") print("LDA: ", min(tpr_fis[fpr_fis>0.01])*100) print("SVM: ", min(tpr_svm[fpr_svm>0.01])*100) print("NN: ", min(tpr_NN[fpr_NN>0.01])*100) print("XGB: ", min(tpr_XG[fpr_XG>0.01])*100) print() if not opt: def get_weights(arb_model): _coef = [abs(i) for i in list(arb_model.coef_[0])] _wei = [i / sum(_coef) for i in _coef] return _wei clf_wei = get_weights(clf) svm_wei = get_weights(model_svm) figi, axi = plt.subplots(figsize=(16,10)) width = 0.5 df = pd.DataFrame(dict(graph=list(training.keys()), XGB=list(model.feature_importances_), LDA=clf_wei, SVM=svm_wei)) df = df.iloc[::-1] corr_start, energy_end = 0-width*10, len(df)*2 corr_form, form_energy = 7*width, len(df)*2-21*width axi.axhspan(corr_start, corr_form, facecolor='purple', alpha=0.4) axi.axhspan(corr_form, form_energy, facecolor='yellow', alpha=0.4) axi.axhspan(form_energy, energy_end, facecolor='cyan', alpha=0.4) axi.text(0.5, (corr_form-corr_start)/4, "Correlation parameters", color='purple', alpha=0.6, fontsize=18) axi.text(0.5, (form_energy-corr_form)/1.5, "Shape of pulse parameters", color='orange', alpha=0.8, fontsize=18) axi.text(0.5, energy_end - (energy_end-form_energy)/1.5, "Energy of pulse parameters", color='blue', alpha=0.6, fontsize=18) ind = np.arange(len(df))*2 def format_func(value, tick_number): return str(round(value*100)) + "%" axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) axi.barh(ind + width, df.LDA, width, label='LDA', alpha=0.8, color='darkgreen', edgecolor='darkgreen'); axi.barh(ind + 2*width, df.SVM, width, label='SVM', alpha=0.9, color='darkred', edgecolor='darkred'); axi.barh(ind, df.XGB, width, label='XGB', alpha=0.8, color='darkblue', edgecolor='darkblue'); axi.set(yticks=ind + width, yticklabels=df.graph, ylim=[2*width - 2, len(df)*2], xlabel='Parameter importance') axi.legend(loc="center", framealpha=0.2, title="Models", fontsize=14, ) for i in range(len(ind)): axi.text(list(df.XGB)[i]+0.001, ind[i]-0.21, str(round(list(df.XGB)[i]*100,3))+"%", color='k', fontweight='bold', fontsize=9) for i in range(len(ind)): axi.text(list(df.LDA)[i]+0.001, ind[i]-0.21+width, str(round(list(df.LDA)[i]*100,3))+"%", color='k', fontweight='bold', fontsize=9) for i in range(len(ind)): axi.text(list(df.SVM)[i]+0.001, ind[i]-0.21+width*2, str(round(list(df.SVM)[i]*100,3))+"%", color='k', fontweight='bold', fontsize=9) figi.savefig("Feature_importance2.pdf") return ax, a
def ErrorPlotter(savename, Array, Values, Errors, EddyCurrentTest): #Create a way to reference xkcd colours PYCOL = [ '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf' ] #Retrieve the settings for the plot Title, Show, _, TTP, MLS, MMS, _, _, EBLS, EBMS, ECL = PlotterSettings() #Plot the graph fig, ax = plt.subplots() #Plot the mainlines for i, line in enumerate(TTP): if i == 0: #Plot main lines lines = ax.plot(Array, Values[:, line - 1].real, MLS, markersize=MMS, color=PYCOL[i]) #Plot bottom error bars lines += ax.plot(Array, Values[:, line - 1].real - Errors[:, line - 1], EBLS, markersize=EBMS, color=PYCOL[i]) else: #Plot main lines lines += ax.plot(Array, Values[:, line - 1].real, MLS, markersize=MMS, color=PYCOL[i]) #Plot bottom error bars lines += ax.plot(Array, Values[:, line - 1].real - Errors[:, line - 1], EBLS, markersize=EBMS, color=PYCOL[i]) #Calculate the limits ymin = np.amin(Values.real) ymax = np.amax(Values.real) y_range = ymax - ymin ymin -= 0.05 * y_range ymax += 0.05 * y_range #Show where the eddy-current breaks down (if applicable) if isinstance(EddyCurrentTest, float): if ECL == True: x = np.ones(10) * EddyCurrentTest y = np.linspace(ymin, ymax, 10) lines += ax.plot(x, y, '--r') #Plot the top error bars (we plot them seperatly for the legend order) for i, line in enumerate(TTP): lines += ax.plot(Array, Values[:, line - 1].real + Errors[:, line - 1], EBLS, markersize=EBMS, color=PYCOL[i]) #Format the axes plt.xscale('log') plt.ylim(ymin, ymax) ax.grid(True) ax.yaxis.set_major_formatter(plt.FuncFormatter(TickFormatter)) plt.subplots_adjust(wspace=0.6, hspace=0.6, left=0.15, bottom=0.1, right=0.94, top=0.90) #Label the axes plt.xlabel("Frequency (rad/s)") plt.ylabel(r"$\mathcal{N}^0_{ij}+\mathcal{R}_{ij}$") if Title == True: plt.title(r"Tensor coefficients of $\mathcal{N}^0+\mathcal{R}$") #Create the legend names = [] CoefficientRef = [ "11", "12", "13", "22", "23", "33", "21", "31", "_", "32" ] for i, number in enumerate(TTP): if number == 1 or number == 4 or number == 6: names.append(r"Re($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$)") names.append(r"Re($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$) (Certificate Bound)") else: names.append(r"Re($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$)=Re($\mathcal{M}_{" + CoefficientRef[number + 4] + "}(\omega)$)") names.append(r"Re($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$)=Re($\mathcal{M}_{" + CoefficientRef[number + 4] + "}(\omega)$) (Certificate Bound)") #Show where the eddy-current breaks down (if applicable) if isinstance(EddyCurrentTest, float): if ECL == True: names.append(r"eddy-current model valid") #Shrink the size of the legend if there are to many lines if len(names) >= 13: ax.legend(lines, names, prop={'size': 6}) elif len(names) >= 7: ax.legend(lines, names, prop={'size': 8}) else: ax.legend(lines, names) #Save the graph plt.savefig(savename + "RealTensorCoeficients.pdf") #Plot the imaginary graph fig, ax = plt.subplots() #Plot the mainlines for i, line in enumerate(TTP): if i == 0: #Plot the main lines lines = ax.plot(Array, Values[:, line - 1].imag, MLS, markersize=MMS, color=PYCOL[i]) #Plot bottom error bars lines += ax.plot(Array, Values[:, line - 1].imag - Errors[:, line - 1], EBLS, markersize=EBMS, color=PYCOL[i]) else: #Plot the main lines lines += ax.plot(Array, Values[:, line - 1].imag, MLS, markersize=MMS, color=PYCOL[i]) #Plot bottom error bars lines += ax.plot(Array, Values[:, line - 1].imag - Errors[:, line - 1], EBLS, markersize=EBMS, color=PYCOL[i]) #Calculate the limits ymin = np.amin(Values.imag) ymax = np.amax(Values.imag) y_range = ymax - ymin ymin -= 0.05 * y_range ymax += 0.05 * y_range #Show where the eddy-current breaks down (if applicable) if isinstance(EddyCurrentTest, float): if ECL == True: x = np.ones(10) * EddyCurrentTest y = np.linspace(ymin, ymax, 10) lines += ax.plot(x, y, '--r') #Plot the top error bars (we plot them seperatly for the legend order) for i, line in enumerate(TTP): lines += ax.plot(Array, Values[:, line - 1].imag + Errors[:, line - 1], EBLS, markersize=EBMS, color=PYCOL[i]) #Format the axes plt.xscale('log') plt.ylim(ymin, ymax) ax.grid(True) ax.yaxis.set_major_formatter(plt.FuncFormatter(TickFormatter)) plt.subplots_adjust(wspace=0.6, hspace=0.6, left=0.15, bottom=0.1, right=0.94, top=0.90) #Label the axes plt.xlabel("Frequency (rad/s)") plt.ylabel(r"$\mathcal{I}_{ij}$") if Title == True: plt.title(r"Tensor coefficients of $\mathcal{I}$") #Create the legend names = [] for i, number in enumerate(TTP): if number == 1 or number == 4 or number == 6: names.append(r"Im($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$)") names.append(r"Im($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$) (Certificate Bound)") else: names.append(r"Im($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$)=Im($\mathcal{M}_{" + CoefficientRef[number + 4] + "}(\omega)$)") names.append(r"Im($\mathcal{M}_{" + CoefficientRef[number - 1] + "}(\omega)$)=Im($\mathcal{M}_{" + CoefficientRef[number + 4] + "}(\omega)$) (Certificate Bound)") #Show where the eddy-current breaks down (if applicable) if isinstance(EddyCurrentTest, float): if ECL == True: names.append(r"eddy-current model valid") #Shrink the size of the legend if there are to many lines if len(names) >= 13: ax.legend(lines, names, prop={'size': 6}) elif len(names) >= 7: ax.legend(lines, names, prop={'size': 8}) else: ax.legend(lines, names) #Save the graph plt.savefig(savename + "ImaginaryTensorCoeficients.pdf") return Show
for i in dfl: print(i.head()) count = 0 for i in dfl: i['sample'] = samples[count] print(i.head()) count += 1 dfc = pd.concat(dfl, axis=0) print(dfc) print('') fig = plt.figure(figsize=(7, 5)) ax = fig.add_subplot(1, 1, 1) plot = sns.lineplot(x='insert_size', y='Reads', hue='sample', data=dfc) sns.despine() plot.set(xlim=(0, 1250)) plot.set(ylim=(0, None)) plot.legend(loc='center right', bbox_to_anchor=(1.5, 0.5), framealpha=1).texts[0].set_text('Sample') def commas(x, pos): return '{:,}'.format(int(x)) ax.get_yaxis().set_major_formatter(plt.FuncFormatter(commas)) plt.savefig('insert_size.png', format='png', dpi=500, bbox_inches='tight') print('finished')
def bar_plot(total_cans_shift_lst): avg_shift_output = [] total_shift_output = [] for i in range(total_cans_shift_lst.shape[0]): mean_output = np.mean(total_cans_shift_lst[i]) avg_shift_output.append(mean_output) total_ouput = np.sum(total_cans_shift_lst[i]) total_shift_output.append(total_ouput) all_shift_output = np.sum(total_shift_output) percentage_of_total_output = [] for i in range(total_cans_shift_lst.shape[0]): percentage_of_total_output.append( (total_shift_output[i] / all_shift_output) * 100) X = [1, 2, 3] total_shift_output = [2525553, 3089514, 2623086] avg_shift_output = [420926, 514919, 437181] percentage_of_total_output = [] for i in range(3): percentage_of_total_output.append( (total_shift_output[i] / sum(total_shift_output)) * 100) X = [1, 2, 3] X_labels = ['Shift 1', 'Shift 2', 'Shift 3'] fig, ax = plt.subplots() ax.bar(X, avg_shift_output, align='center') ax.set_xticklabels(X_labels) ax.set_xticks(X) plt.xlabel('Shift #') # plt.ylim(0,55) plt.grid(True) plt.ylabel('Average Output (# of Cans)') plt.title('Average Output per Shift') ax.get_yaxis().set_major_formatter( plt.FuncFormatter(lambda X, loc: "{:,}".format(int(X)))) for i in range(3): v = avg_shift_output[i] + 3000 ax.text(i + .9, v, "{:,.0f}".format(avg_shift_output[i]), color='black', va='center', fontweight='bold', fontsize=10) fig, ax = plt.subplots() ax.bar(X, total_shift_output, align='center') ax.set_xticklabels(X_labels) ax.set_xticks(X) plt.xlabel('Shift #') # plt.ylim(0,55) plt.grid(True) plt.ylabel('Total Output (# of Cans)') plt.title('Total Output per Shift') for i in range(3): v = total_shift_output[i] + 60000 ax.text(i + .9, v, "{:,.0f}".format(total_shift_output[i]), color='black', va='center', fontweight='bold', fontsize=10) ax.get_yaxis().set_major_formatter( plt.FuncFormatter(lambda X, loc: "{:,}".format(int(X)))) fig, ax = plt.subplots() ax.bar(X, percentage_of_total_output, align='center') ax.set_xticklabels(X_labels) ax.set_xticks(X) plt.xlabel('Shift #') plt.ylim(0, 50) plt.grid(True) plt.ylabel('Percentage of Total Output (%)') plt.title('Output Distribution Per Shift') for i in range(3): v = percentage_of_total_output[i] + 5 ax.text(i + .95, v, "{:.1f}%".format(percentage_of_total_output[i]), color='black', va='center', fontweight='bold', fontsize=10) ax.get_yaxis().set_major_formatter( plt.FuncFormatter(lambda X, loc: "{:,}".format(int(X)))) plt.show()
# Ensure that the axis ticks only show up on the bottom and left of the plot. # Ticks on the right and top of the plot are generally unnecessary. ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() fig.subplots_adjust(left=.06, right=.75, bottom=.02, top=.94) # Limit the range of the plot to only where the data is. # Avoid unnecessary whitespace. ax.set_xlim(1969.5, 2011.1) ax.set_ylim(-0.25, 90) # Make sure your axis ticks are large enough to be easily read. # You don't want your viewers squinting to read your plot. plt.xticks(range(1970, 2011, 10), fontsize=14) plt.yticks(range(0, 91, 10), fontsize=14) ax.xaxis.set_major_formatter(plt.FuncFormatter('{:.0f}'.format)) ax.yaxis.set_major_formatter(plt.FuncFormatter('{:.0f}%'.format)) # Provide tick lines across the plot to help your viewers trace along # the axis ticks. Make sure that the lines are light and small so they # don't obscure the primary data lines. plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.3) # Remove the tick marks; they are unnecessary with the tick lines we just # plotted. plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='on', left='off',
############################################################################### # Plot of the results and computation time ############################################################################### df_results = (pd.DataFrame({'Balanced model': cv_results_balanced, 'Imbalanced model': cv_results_imbalanced}) .unstack().reset_index()) df_time = (pd.DataFrame({'Balanced model': cv_time_balanced, 'Imbalanced model': cv_time_imbalanced}) .unstack().reset_index()) import seaborn as sns import matplotlib.pyplot as plt plt.figure() sns.boxplot(y='level_0', x=0, data=df_time) sns.despine(top=True, right=True, left=True) plt.xlabel('time [s]') plt.ylabel('') plt.title('Computation time difference using a random under-sampling') plt.figure() sns.boxplot(y='level_0', x=0, data=df_results, whis=10.0) sns.despine(top=True, right=True, left=True) ax = plt.gca() ax.xaxis.set_major_formatter( plt.FuncFormatter(lambda x, pos: "%i%%" % (100 * x))) plt.xlabel('ROC-AUC') plt.ylabel('') plt.title('Difference in terms of ROC-AUC using a random under-sampling')
def plot_results(model_preds: xr.Dataset, tci_path: Path, savepath: Path, prefix: str = "") -> None: multi_output = len(model_preds.data_vars) > 1 tci = sentinel_as_tci( BaseEngineer.load_tif(tci_path, start_date=datetime(2020, 1, 1), days_per_timestep=30), scale=False, ).isel(time=-1) tci = tci.sortby("x").sortby("y") model_preds = model_preds.sortby("lat").sortby("lon") plt.clf() fig, ax = plt.subplots(1, 3, figsize=(20, 7.5), subplot_kw={"projection": ccrs.PlateCarree()}) fig.suptitle( f"Model results for tile with bottom left corner:" f"\nat latitude {float(model_preds.lat.min())}" f"\n and longitude {float(model_preds.lon.min())}", fontsize=15, ) # ax 1 - original img_extent_1 = (tci.x.min(), tci.x.max(), tci.y.min(), tci.y.max()) img = np.clip(np.moveaxis(tci.values, 0, -1), 0, 1) ax[0].set_title("True colour image") ax[0].imshow(img, origin="upper", extent=img_extent_1, transform=ccrs.PlateCarree()) args_dict = { "origin": "upper", "extent": img_extent_1, "transform": ccrs.PlateCarree(), } if multi_output: mask = np.argmax(model_preds.to_array().values, axis=0) # currently, we have 10 classes (at most). It seems unlikely we will go # above 20 args_dict["cmap"] = plt.cm.get_cmap("tab20", len(model_preds.data_vars)) else: mask = model_preds.prediction_0 args_dict.update({"vmin": 0, "vmax": 1}) # ax 2 - mask ax[1].set_title("Mask") im = ax[1].imshow(mask, **args_dict) # finally, all together ax[2].set_title("Mask on top of the true colour image") ax[2].imshow(img, origin="upper", extent=img_extent_1, transform=ccrs.PlateCarree()) args_dict["alpha"] = 0.3 if not multi_output: mask = mask > 0.5 ax[2].imshow(mask, **args_dict) colorbar_args = { "ax": ax.ravel().tolist(), } if multi_output: # This function formatter will replace integers with target names formatter = plt.FuncFormatter( lambda val, loc: list(model_preds.data_vars)[val]) colorbar_args.update({ "ticks": range(len(model_preds.data_vars)), "format": formatter }) # We must be sure to specify the ticks matching our target names fig.colorbar(im, **colorbar_args) plt.savefig(savepath / f"results_{prefix}{tci_path.name}.png", bbox_inches="tight", dpi=300) plt.close()
def make_chart(tbl_name, tbl_data, flg, itm, y1d, y2d): # tbl_data:テーブルデータ, flg:画面表示フラグ, itm[項目名,カラム], y1d[Y1軸ラベル名,カラム], y2d[Y2軸ラベル名,カラム] #---------------------------------------------------------------------------- dsp_msg('チャート作成', itm[0], 2) #---------------------------------------------------------------------------- if itm[1] != -1: # table:ginkou tbl = [] for x in tbl_data: if x[itm[1]] == itm[0]: tbl.append(x) else: tbl = tbl_data with open('./data/{0}.csv'.format(itm[0]), 'w', newline='') as f: writer = csv.writer(f) writer.writerows(tbl) fig = plt.figure(figsize=(12, 8), facecolor='skyblue', tight_layout=True) if flg == 'OFF': plt.ion() xFmt = mdates.DateFormatter('%Y-%m-%d') yFmt = plt.FuncFormatter(lambda y, loc: '{:,}'.format(int(y))) x = [r[1] for r in tbl] if tbl_name == 'shisan': y1 = [r[y1d[1]] for r in tbl] y2 = [r[y2d[1]] for r in tbl] elif tbl_name == 'syouhin': y1 = [r[y1d[1]] for r in tbl] y2 = [] for r in tbl: if r[2] == '国内株式': y2.append(r[6] * r[14]) # 保有数量 * 前日比 elif r[2] == '米国株式': y2.append(r[6] * r[14] * (r[16] / r[10] / r[6])) # 保有数量 * 前日比 * USD( 時価評価額 / 現在値 / 保有数量 ) elif r[2] == '投資信託': y2.append(r[16] / r[10] * r[14]) # 時価評価額 / 現在値 * 前日比 elif tbl_name == 'ginkou': y1 = [r[y1d[1]] for r in tbl] ax1 = fig.add_subplot(111, title=itm[0]) ax1.plot(x, y1, marker='o', color='blue', linestyle='-', label=y1d[0]) if itm[1] != -1: ax2 = ax1.twinx() ax2.plot(x, y2, marker='o', color='green', linestyle=':', label=y2d[0]) handler1, label1 = ax1.get_legend_handles_labels() handler2, label2 = ax2.get_legend_handles_labels() ax1.legend(handler1 + handler2, label1 + label2) ax1.tick_params(axis='x', colors='black', direction='in') ax1.tick_params(axis='y', colors='blue', direction='in') ax2.tick_params(axis='y', colors='green', direction='in') ax1.yaxis.set_major_formatter(yFmt) ax2.yaxis.set_major_formatter(yFmt) else: handler1, label1 = ax1.get_legend_handles_labels() ax1.legend(handler1, label1) ax1.tick_params(axis='x', colors='black', direction='in') ax1.tick_params(axis='y', colors='blue', direction='in') ax1.yaxis.set_major_formatter(yFmt) ax_pos = ax1.get_position() fig.text(ax_pos.x1 - 0.05, ax_pos.y0, datetime.datetime.now().strftime('%Y-%m-%d %H:%M'), color='red', fontsize=8) ax1.xaxis.set_major_formatter(xFmt) ax1.grid(True) fig.autofmt_xdate() plt.tight_layout() fig.savefig('./chart/img_{0}.png'.format(itm[0])) if flg == 'ON': plt_set_fullscreen() plt.show() else: plt.ioff()
return '0' elif x == 1: return 'h' elif x == -1: return '-h' else: return '%ih' % x for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']): axi = ax.ravel()[i] log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot) axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF') axi.text(-2.6, 0.95, kernel) axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) axi.xaxis.set_major_locator(plt.MultipleLocator(1)) axi.yaxis.set_major_locator(plt.NullLocator()) axi.set_ylim(0, 1.05) axi.set_xlim(-2.9, 2.9) ax[0, 1].set_title('Available Kernels') # ---------------------------------------------------------------------- # Plot a 1D density example N = 100 np.random.seed(1) X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
] all_labels = [ all_labels[i] + ": " + names[i] for i in range(0, len(all_labels)) ] scores = np.unique(classed) labels = [all_labels[s] for s in scores] num_scores = len(scores) def rescore(c): """ rescore values from original score values (0-59) to values ranging from 0 to num_scores-1 """ return np.where(scores == c)[0][0] rescore = np.vectorize(rescore) painted = rescore(classed) plt.subplot(1, 2, 1), plt.imshow(image) plt.subplot(1, 2, 2), plt.imshow(painted, cmap=plt.cm.get_cmap('jet', num_scores), aspect=float(image.shape[0]) / float(image.shape[1])) #plt.imshow(painted, cmap=plt.cm.get_cmap('jet', num_scores)) # setup legend formatter = plt.FuncFormatter(lambda val, loc: labels[val]) plt.colorbar(ticks=range(0, num_scores), format=formatter) plt.clim(-0.5, num_scores - 0.5) plt.show()