fileUz.close() dataUz = np.genfromtxt("Uz_" + log, skiprows=2) line3, = pl.plot(datatime, dataUz, '', label="Uz", linewidth=1) if os.path.isfile("p_" + log): filep.close() datap = np.genfromtxt("p_" + log, skiprows=2) line4, = pl.plot(datatime, datap, '', label="p", linewidth=1) if os.path.isfile("k_" + log): filek.close() datak = np.genfromtxt("k_" + log, skiprows=2) line5, = pl.plot(datatime, datak, '', label="k", linewidth=1) if os.path.isfile("w_" + log): filew.close() dataw = np.genfromtxt("w_" + log, skiprows=2) line6, = pl.plot(datatime, dataw, '', label="w", linewidth=1) if os.path.isfile("e_" + log): filee.close() datae = np.genfromtxt("e_" + log, skiprows=2) line6, = pl.plot(datatime, datae, '', label="e", linewidth=1) # plot graphs pl.legend(handler_map={line1: HandlerLine2D(numpoints=6)}) # plots the legend pl.title(log + " OpenFOAM - Convergence Plot") pl.xlabel("Time (s)") pl.ylabel("Convergence Residual (Log Scale)") pl.yscale('log') pl.show()
def analysis_k1(elem, cur_k, k1val, k1mval, k2val, k3val, k3mval): if cur_k == 0: k1mval = elem if cur_k == 1: k3mval = elem fl = 0 fl_delta = 0 fl_di = 0 bifurcation_point = [] S_A = lambdify((x, y, k1, k1m, k2, k3, k3m), traceA) delta_A = lambdify((x, y, k1, k1m, k2, k3, k3m), detA) for i in range(N): cur_y = y_func(X[i], k1val, k1mval, k3val, k3mval) cur_k1 = k1_func(X[i], cur_y, k1mval, k2val, k3mval, k3val) k1_f[i] = cur_k1 y_f[i] = cur_y sa = S_A(X[i], cur_y, k1val, k1mval, k2val, k3mval, k3val) deltaa = delta_A(X[i], cur_y, k1val, k1mval, k2val, k3mval, k3val) di = sa * sa - 4 * deltaa if sa < 0: if fl != 0 and fl == 1: #print(X[i], cur_y) bifurcation_point.append([X[i], cur_y]) plt.plot(k1_f[i], X[i], 'r', marker='o', label="hopf_node") plt.plot(k1_f[i], y_f[i], 'r', marker='o') fl = -1 #print ('меньше нуля',i, sa, cur_y, cur_k2, fl) else: if fl != 0 and fl == -1: #print(X[i], cur_y) bifurcation_point.append([X[i], cur_y]) plt.plot(k1_f[i], X[i], 'r', marker='o', label="hopf_node") plt.plot(k1_f[i], y_f[i], 'r', marker='o') fl = 1 #print('больше нуля',i, sa, cur_y, cur_k2, fl) if deltaa < 0: if fl_delta != 0 and fl_delta == 1: #print ('определитель меньше нуля',i, deltaa, cur_y, cur_k2, fl) plt.plot(k1_f[i], X[i], 'k*', marker='o', label="saddle_node") plt.plot(k1_f[i], y_f[i], 'k*', marker='o') fl_delta = -1 else: if fl_delta != 0 and fl_delta == -1: #print('определитель больше нуля',i, deltaa, cur_y, cur_k2, fl) plt.plot(k1_f[i], X[i], 'k*', marker='o', label="saddle_node") plt.plot(k1_f[i], y_f[i], 'k*', marker='o') fl_delta = 1 line1, = plt.plot(k1_f, X, 'b--', label="x") line2, = plt.plot(k1_f, y_f, 'k', label="y") plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) if cur_k == 0: plt.title( 'Однопараметрический анализ. Зависимость стационарных решений от параметра k1, k_1 = ' + str(k1mval)) else: plt.title( 'Однопараметрический анализ. Зависимость стационарных решений от параметра k1, k_3 = ' + str(k3mval)) plt.plot(k1_f, X, 'b--') plt.plot(k1_f, y_f, 'k') plt.xlabel('k1') plt.ylabel('x,y') plt.show()
# All of this flexibility means that we have the necessary hooks to implement # custom handlers for our own type of legend key. # # The simplest example of using custom handlers is to instantiate one of the # existing :class:`~matplotlib.legend_handler.HandlerBase` subclasses. For the # sake of simplicity, let's choose :class:`matplotlib.legend_handler.HandlerLine2D` # which accepts a ``numpoints`` argument (note numpoints is a keyword # on the :func:`legend` function for convenience). We can then pass the mapping # of instance to Handler as a keyword to legend. from matplotlib.legend_handler import HandlerLine2D line1, = plt.plot([3, 2, 1], marker='o', label='Line 1') line2, = plt.plot([1, 2, 3], marker='o', label='Line 2') plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) ############################################################################### # As you can see, "Line 1" now has 4 marker points, where "Line 2" has 2 (the # default). Try the above code, only change the map's key from ``line1`` to # ``type(line1)``. Notice how now both :class:`~matplotlib.lines.Line2D` instances # get 4 markers. # # Along with handlers for complex plot types such as errorbars, stem plots # and histograms, the default ``handler_map`` has a special ``tuple`` handler # (:class:`~matplotlib.legend_handler.HandlerTuple`) which simply plots # the handles on top of one another for each item in the given tuple. The # following example demonstrates combining two legend keys on top of one another: from numpy.random import randn
def historical(overpass, save_name, ylim=None, grid=False, wind_sources=["MERRA", "ECMWF", "GEM"], steps=False): """Plots times series of wind direction and windspeed before and after an overpass. Gets wind data for 18 hours before and 6 hours after an overpass for all wind fields in wind_sources. Option "steps" will plot MERRA horizontally if True, else will connect MERRA wind speed and direction points. """ # absolute times: t_min = overpass.time.round(_round) + dt.timedelta(hours=-_hours_back) t_max = overpass.time.round(_round) + dt.timedelta(hours=_hours_forward) # these are the times you open a file for best_ecmwf_times = [ (t_min + dt.timedelta(hours=x * _ECMWF_best_step)) for x in numpy.arange( float(_hours_back + _hours_forward) / _ECMWF_best_step + 0.1) ] ecmwf_times = [(t_min + dt.timedelta(hours=x * _ECMWF_step)) for x in numpy.arange( float(_hours_back + _hours_forward) / _ECMWF_step + 1)] merra_times = [(t_min + dt.timedelta(hours=(x * _MERRA_step + 1.5))) for x in numpy.arange( float(_hours_back + _hours_forward) / _MERRA_step)] gem_times = [(t_min + dt.timedelta(hours=x * _GEM_step)) for x in numpy.arange( float(_hours_back + _hours_forward) / _GEM_step + 1)] xticks = numpy.linspace(-_hours_back, _hours_forward, _num_x_ticks) legend_handles = [] legend_labels = [] handle_map = {} fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True) if "ECMWF" in wind_sources: ecmwf_handle = mlines.Line2D([], [], color=_ecmwf_col, marker=_marker, label='ECMWF') legend_handles.append(ecmwf_handle) legend_labels.append('ECMWF') # handle_map.update({ecmwf_handle: HandlerLine2D(numpoints=2)}) try: ecmwf_speeds = [] ecmwf_bearings = [] ecmwf_x = [] for time in best_ecmwf_times: w = ReadWinds.get_new_ecmwf(time, interp=False, return_stability=False)[0] ecmwf_speeds.append(w.speed) ecmwf_bearings.append(w.bearing) ecmwf_x.append(dhour(overpass.time, time)) except Exception as exc: ecmwf_speeds = [] ecmwf_bearings = [] ecmwf_x = [] for time in ecmwf_times: w = ReadWinds.get_ecmwf(time, interp=False, return_stability=False) ecmwf_speeds.append(w.speed) ecmwf_bearings.append(w.bearing) ecmwf_x.append(dhour(overpass.time, time)) print "Using old ECMWF" print "Exception:" print exc ax1.plot(ecmwf_x, ecmwf_speeds, marker=_marker, color=_ecmwf_col) ax2.plot(ecmwf_x, ecmwf_bearings, marker=_marker, color=_ecmwf_col) if "MERRA" in wind_sources: merra_handle = mlines.Line2D([], [], color=_merra_col, marker=_marker, label='MERRA') legend_handles.append(merra_handle) legend_labels.append('MERRA') merra_speeds = [] merra_bearings = [] merra_x = [dhour(overpass.time, time) for time in merra_times] for i, time in enumerate(merra_times): x = merra_x[i] w = ReadWinds.get_merra(time) merra_speeds.append(w.speed) merra_bearings.append(w.bearing) if steps: ax1.plot([x - 1.5, x + 1.5], [w.speed, w.speed], marker=_cap, color=_merra_col) ax2.plot([x - 1.5, x + 1.5], [w.bearing, w.bearing], marker=_cap, color=_merra_col) ls = '' if steps else '-' if steps: handle_map.update({merra_handle: HandlerLine2D(numpoints=1)}) ax1.plot(merra_x, merra_speeds, marker=_marker, color=_merra_col, ls=ls) ax2.plot(merra_x, merra_bearings, marker=_marker, color=_merra_col, ls=ls) if "GEM" in wind_sources: gem_handle = mlines.Line2D([], [], color=_gem_col, marker=_marker, label='GEM') legend_handles.append(gem_handle) legend_labels.append('GEM') # handle_map.update({gem_handle: HandlerLine2D(numpoints=2)}) gem_speeds = [] gem_bearings = [] gem_x = [] for time in gem_times: try: w = ReadWinds.get_gem(time, interp=False) gem_speeds.append(w.speed) gem_bearings.append(w.bearing) gem_x.append(dhour(overpass.time, time)) except Exception as e: print "Exception:", e ax1.plot(gem_x, gem_speeds, marker=_marker, color=_gem_col) ax2.plot(gem_x, gem_bearings, marker=_marker, color=_gem_col) ax1.set_xticks(xticks) Formats.set_tickfont(ax1) Formats.set_tickfont(ax2) ax1.set_xlim(-_hours_back, _hours_forward) ax1.axvline(color=_time_indicator_colour, lw=_time_indicator_lw) time_indicator_handle = mlines.Line2D([], [], color=_time_indicator_colour, lw=_time_indicator_lw, label='Overpass Time') legend_handles.append(time_indicator_handle) legend_labels.append('Overpass Time') leg = plt.legend(legend_handles, legend_labels, handler_map=handle_map, bbox_to_anchor=(_leg_loc), loc=3, ncol=4, mode="expand", borderaxespad=0.1, prop=_leg_prop) ax1.set_ylabel(_ylabel_speed, fontsize=_labelfont, fontname=Formats.globalfont) ax1.grid(grid) ax2.set_xticks(xticks) ax2.set_xlim(-_hours_back, _hours_forward) ax2.axvline(color=_time_indicator_colour, lw=_time_indicator_lw) ax2.set_ylabel(_ylabel_dir, fontsize=_labelfont, fontname=Formats.globalfont) ax2.set_xlabel(_xlabel, fontsize=_labelfont, fontname=Formats.globalfont) if ylim: ax2.set_ylim(ylim) ax2.grid(grid) # set legend font to Formats.globalfont legtext = leg.get_texts() plt.setp(legtext, fontname=Formats.globalfont) plt.suptitle("Wind speed and direction for {0}".format(overpass.info), fontsize=_maintitle, fontname=Formats.globalfont) plt.subplots_adjust(**_adjust_params) plt.savefig(save_name, dpi=_dpi)
markersize=7, linewidth=2.0) speaker2, = plt.plot(vector_2, label='Speaker 2', linestyle='-', marker='o', markersize=7, linewidth=2.0) speaker3, = plt.plot(vector_3, label='Speaker 3', linestyle='-', marker='o', markersize=7, linewidth=2.0) plt.legend(handler_map={speaker1: HandlerLine2D(numpoints=1)}, prop={'size': 9}, loc='center right') plt.subplot(212) plt.title('Multi-speaker model with 3 Scottish accent speakers', fontsize=14) plt.xlabel('Epoch') plt.ylabel('RMSE') plt.grid(True) speaker1, = plt.plot(vector_4, label='Speaker 1', linestyle='-', marker='^', markersize=7, linewidth=2.0) speaker2, = plt.plot(vector_5,
# make plots fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (12, 6)) [line1] = ax1.plot(expreq, source_sum[:,0], c='r', marker='o', label = 'Fibre 1') [line2] = ax1.plot(expreq, source_sum[:,1], c='g', marker='x', label = 'Fibre 2') [line3] = ax1.plot(expreq, source_sum[:,2], c='b', marker='*', label = 'Fibre 3') ax1.set_title('intensity - expreq') ax1.set_xlabel('Exposure Time /s') ax1.set_ylabel('Source Intensity Sum / ADU') ax1.set_xlim(0, 6) ax1.set_ylim(0, 1e7) ax1.legend(handler_map={line1: HandlerLine2D(numpoints=3), line2: HandlerLine2D(numpoints=3), line3: HandlerLine2D(numpoints=3)}, loc=4) [line1] = ax2.plot(expreq, flux[:,0], c='r', marker='o', label = 'Fibre 1') [line2] = ax2.plot(expreq, flux[:,1], c='g', marker='x', label = 'Fibre 2') [line3] = ax2.plot(expreq, flux[:,2], c='b', marker='*', label = 'Fibre 3') ax2.set_title('flux - expreq') ax2.set_xlabel('Exposure Time /s') ax2.set_ylabel(r'Source Flux / ' + r'$\mathrm{} \cdot \mathrm{} ^{}$'.format( '{ADU}', '{s}', '{-1}'))
def create_cumulative_graph(grouped_data, OUTDIR, ATTR): print('Processing data ...') # processing data graph_data = {} for domain, problems in grouped_data.items(): graph_data[domain] = {} for problem, algos in problems.items(): graph_data[domain][problem] = {} unsorted = [] for algo, val_list in algos.items(): for val in val_list: unsorted.append((val[ATTR], algo, 0)) sorted_data = sorted(unsorted, key=lambda pair: pair[0]) for algo in algos: graph_data[domain][problem][algo] = {'x': [], 'y': []} i = len(sorted_data) - 1 while i >= 0: j = i - 1 while j >= 0: if sorted_data[j][0] != sorted_data[i][0]: break j -= 1 for k in range(j + 1, i + 1): sorted_data[k] = (sorted_data[k][0], sorted_data[k][1], i + 1) i = j for (x, algo, y) in sorted_data: graph_data[domain][problem][algo]['x'].append(x) graph_data[domain][problem][algo]['y'].append(y) print('Creating graphs ...') # plotting fontP = FontProperties() fontP.set_size('small') for domain, problems in graph_data.items(): if not os.path.exists(OUTDIR + '/' + domain): os.makedirs(OUTDIR + '/' + domain) for problem, algos in problems.items(): fig = plt.figure() subplt = fig.add_subplot(111) subplt.set_xlabel(ATTR) subplt.set_ylabel('Cumulative count') for algo, xy_data in algos.items(): x = xy_data['x'] y = xy_data['y'] x = [x[0]] + x y = [0] + y if algo == 'base_unsat': subplt.plot(x, y, label=algo, linewidth=2) else: subplt.plot(x, y, label=algo) if len(str(x[0])) > 5: plt.setp(subplt.get_xticklabels(), rotation=30, horizontalalignment='right') subplt.grid(b=True, which='major', color='gray', linestyle='-') subplt.minorticks_on() subplt.grid(b=True, which='minor', color='gray') subplt.get_xaxis().get_major_formatter().set_useOffset(False) subplt.get_xaxis().get_major_formatter().set_scientific(False) subplt.margins(0.05) plt.legend(handler_map={subplt: HandlerLine2D(numpoints=1)}, prop=fontP, loc='upper left') plt.savefig(OUTDIR + '/' + domain + '/' + problem + '.png') plt.close() print('Created graph for ' + domain + ' ' + problem) return
- sp.y0(h)*sp.j1(h))/((h**2)*((sp.j1(h)**2)+(sp.y1(h)**2)))),0,m.inf) # Temperatur ved borehullveggen T_b_uss[i] = T_g + q/(k_g*(np.pi**2))*G[i] #------------------------------------------------------------------------------ # Temperaturprofilen ved borehullveggen for de ulike modellene #------------------------------------------------------------------------------ line1, = ax1.plot(tid_skala, T_b_pyg,'k-', lw=1.5, label='Pygfunction' ) line2, = ax1.plot(tid_skala, T_b_uls[:,0], 'r-', lw=1.5, label='u_linjesluk') line3, = ax1.plot(tid_skala, T_b_uss[:,0], 'y-', lw=1.5, label='u_sylindersluk') plt.legend(handler_map={line3: HandlerLine2D(numpoints=4)}, loc=4) plt.show() #------------------------------------------------------------------------------ # Temperaturdifferanse ved borehullveggen for de ulike modellene #------------------------------------------------------------------------------ plt.rc('figure') fig = plt.figure() ax2 = fig.add_subplot(111) # Tittel ax2.set_title('Temperaturdifferanse ved borehullveggen') # Navn på aksene ax2.set_xlabel(r'$ln(t/t_s)$') ax2.set_ylabel(r'Temperatur [($^\circ$C)]') # aksegrenser
def analysis_k2(elem, k3a): global k3_value global alpha_value if k3a == 0: k3_value = elem if k3a == 1: alpha_value = elem fl = 0 fl_delta = 0 fl_di = 0 for i in range(N): cur_y = y_func(x_range[i], k1_value, k_1_value, k3_value, alpha_value) cur_k2 = k2_func(x_range[i], cur_y, k1_value, k_1_value, k_2_value, k3_value, alpha_value) k2_f[i] = cur_k2 y_f[i] = cur_y sa = S_A(x_range[i], cur_y, k1_value, k_1_value, k2_value, k_2_value, k3_value, alpha_value) deltaa = delta_A(x_range[i], cur_y, k1_value, k_1_value, k2_value, k_2_value, k3_value, alpha_value) di = sa * sa - 4 * deltaa if sa < 0: if fl != 0 and fl == 1: #print(x_range[i], cur_y) bifurcation_point.append([x_range[i], cur_y]) plt.plot(k2_f[i], x_range[i], 'r', marker='o', label="hopf_node") plt.plot(k2_f[i], y_f[i], 'r', marker='o') fl = -1 #print ('меньше нуля',i, sa, cur_y, cur_k2, fl) else: if fl != 0 and fl == -1: #print(x_range[i], cur_y) bifurcation_point.append([x_range[i], cur_y]) plt.plot(k2_f[i], x_range[i], 'r', marker='o', label="hopf_node") plt.plot(k2_f[i], y_f[i], 'r', marker='o') fl = 1 #print('больше нуля',i, sa, cur_y, cur_k2, fl) if deltaa < 0: if fl_delta != 0 and fl_delta == 1: #print ('определитель меньше нуля',i, deltaa, cur_y, cur_k2, fl) plt.plot(k2_f[i], x_range[i], 'k*', marker='o', label="saddle_node") plt.plot(k2_f[i], y_f[i], 'k*', marker='o') fl_delta = -1 else: if fl_delta != 0 and fl_delta == -1: #print('определитель больше нуля',i, deltaa, cur_y, cur_k2, fl) plt.plot(k2_f[i], x_range[i], 'k*', marker='o', label="saddle_node") plt.plot(k2_f[i], y_f[i], 'k*', marker='o') fl_delta = 1 line1, = plt.plot(k2_f, x_range, 'b--', label="x") line2, = plt.plot(k2_f, y_f, 'k', label="y") plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) if k3a == 0: plt.title( 'Однопараметрический анализ. Завсимость стационарных решений от параметра k2, k3 = ' + str(k3_value)) else: plt.title( 'Однопараметрический анализ. Завсимость стационарных решений от параметра k2, alpha = ' + str(alpha_value)) plt.plot(k2_f, x_range, 'b--') plt.plot(k2_f, y_f, 'k') plt.xlabel('k2') plt.ylabel('x,y') plt.show()
def main(parameters_file, list_of_pairs_of_files=[], image_output_file=None): """ Plot the results of the previously run design space exploration. """ show_samples = False filename, file_extension = os.path.splitext(parameters_file) if file_extension != ".json": print( "Error: invalid file name. \nThe input file has to be a .json file not a %s" % file_extension) exit(1) with open(parameters_file, 'r') as f: config = json.load(f) json_schema_file = 'scripts/schema.json' with open(json_schema_file, 'r') as f: schema = json.load(f) DefaultValidatingDraft4Validator = extend_with_default(Draft4Validator) DefaultValidatingDraft4Validator(schema).validate(config) application_name = config["application_name"] optimization_metrics = config["optimization_objectives"] feasible_output = config["feasible_output"] feasible_output_name = feasible_output["name"] run_directory = config["run_directory"] xlog = config["output_image"]["image_xlog"] ylog = config["output_image"]["image_ylog"] if "optimization_objectives_labels_image_pdf" in config["output_image"]: optimization_objectives_labels_image_pdf = config["output_image"][ "optimization_objectives_labels_image_pdf"] else: optimization_objectives_labels_image_pdf = optimization_metrics # Only consider the files in the json file if there are no input files. if list_of_pairs_of_files == []: output_pareto_file = config["output_pareto_file"] if output_pareto_file == "output_pareto.csv": output_pareto_file = application_name + "_" + output_pareto_file output_data_file = config["output_data_file"] if output_data_file == "output_samples.csv": output_data_file = application_name + "_" + output_data_file list_of_pairs_of_files.append( (deal_with_relative_and_absolute_path(run_directory, output_pareto_file), deal_with_relative_and_absolute_path(run_directory, output_data_file))) if image_output_file != None: output_image_pdf_file = image_output_file filename = os.path.basename(image_output_file) path = os.path.dirname(image_output_file) if path == "": output_image_pdf_file_with_all_samples = "all_" + filename else: output_image_pdf_file_with_all_samples = path + "/" + "all_" + filename else: tmp_file_name = config["output_image"]["output_image_pdf_file"] if tmp_file_name == "output_pareto.pdf": tmp_file_name = application_name + "_" + tmp_file_name output_image_pdf_file = deal_with_relative_and_absolute_path( run_directory, tmp_file_name) if tmp_file_name[0] == "/": filename = os.path.basename(output_image_pdf_file) path = os.path.dirname(output_image_pdf_file) output_image_pdf_file_with_all_samples = path + "/" + "all_" + filename else: output_image_pdf_file_with_all_samples = str(run_directory + "/" + "all_" + tmp_file_name) str_files = "" for e in list_of_pairs_of_files: str_files += str(e[0] + " " + e[1] + " ") print("######### plot_dse.py ##########################") print("### Parameters file is %s" % parameters_file) print("### The Pareto and DSE data files are: %s" % str_files) print("### The first output pdf image is %s" % output_image_pdf_file) print("### The second output pdf image is %s" % output_image_pdf_file_with_all_samples) print("################################################") param_space = space.Space(config) xelem = optimization_metrics[0] yelem = optimization_metrics[1] handler_map_for_legend = {} xlabel = optimization_objectives_labels_image_pdf[0] ylabel = optimization_objectives_labels_image_pdf[1] x_max = float("-inf") x_min = float("inf") y_max = float("-inf") y_min = float("inf") print_legend = True fig = plt.figure() ax1 = plt.subplot(1, 1, 1) if xlog: ax1.set_xscale('log') if ylog: ax1.set_yscale('log') objective_1_max = objective_2_max = 1 objective_1_is_percentage = objective_2_is_percentage = False if "objective_1_max" in config["output_image"]: objective_1_max = config["output_image"]["objective_1_max"] objective_1_is_percentage = True if "objective_2_max" in config["output_image"]: objective_2_max = config["output_image"]["objective_2_max"] objective_2_is_percentage = True input_data_array = {} fast_addressing_of_data_array = {} non_valid_optimization_obj_1 = defaultdict(list) non_valid_optimization_obj_2 = defaultdict(list) for file_pair in list_of_pairs_of_files: # file_pair is tuple containing: (pareto file, DSE file) next_color = get_next_color() ############################################################################# ###### Load data from files and do preprocessing on the data before plotting. ############################################################################# for file in file_pair: print(("Loading data from %s ..." % file)) input_data_array[file], fast_addressing_of_data_array[ file] = param_space.load_data_file(file, debug) if input_data_array[file] == None: print("Error: no data found in input data file: %s. \n" % file_pair[1]) exit(1) if (xelem not in input_data_array[file]) or ( yelem not in input_data_array[file]): print( "Error: the optimization variables have not been found in input data file %s. \n" % file) exit(1) print(("Parameters are " + str(list(input_data_array[file].keys())) + "\n")) input_data_array[file][xelem] = [ float(input_data_array[file][xelem][i]) / objective_1_max for i in range(len(input_data_array[file][xelem])) ] input_data_array[file][yelem] = [ float(input_data_array[file][yelem][i]) / objective_2_max for i in range(len(input_data_array[file][yelem])) ] if objective_1_is_percentage: input_data_array[file][xelem] = [ input_data_array[file][xelem][i] * 100 for i in range(len(input_data_array[file][xelem])) ] if objective_2_is_percentage: input_data_array[file][yelem] = [ input_data_array[file][yelem][i] * 100 for i in range(len(input_data_array[file][yelem])) ] x_max, x_min, y_max, y_min = compute_min_max_samples( input_data_array[file], x_max, x_min, xelem, y_max, y_min, yelem) input_data_array_size = len(input_data_array[file][list( input_data_array[file].keys())[0]]) print("Size of the data file %s is %d" % (file, input_data_array_size)) file_pareto = file_pair[0] # This is the Pareto file file_search = file_pair[1] # This is the DSE file ###################################################################################################### ###### Compute invalid samples to be plot in a different color (and remove them from the data arrays). ###################################################################################################### if show_samples: i = 0 for ind in range(len(input_data_array[file][yelem])): if input_data_array[file][feasible_output_name][i] == False: non_valid_optimization_obj_2[file_search].append( input_data_array[file][yelem][i]) non_valid_optimization_obj_1[file_search].append( input_data_array[file][xelem][i]) for key in list(input_data_array[file].keys()): del input_data_array[file][key][i] else: i += 1 label_is = get_last_dir_and_file_names(file_pareto) all_samples, = plt.plot(input_data_array[file_search][xelem], input_data_array[file_search][yelem], color=next_color, linestyle='None', marker='.', mew=0.5, markersize=3, fillstyle="none", label=label_is) plt.plot(input_data_array[file_pareto][xelem], input_data_array[file_pareto][yelem], linestyle='None', marker='.', mew=0.5, markersize=3, fillstyle="none") handler_map_for_legend[all_samples] = HandlerLine2D(numpoints=1) ################################################################################################################ ##### Create a straight Pareto plot: we need to add one point for each point of the data in paretoX and paretoY. ##### We also need to reorder the points on the x axis first. ################################################################################################################ straight_pareto_x = list() straight_pareto_y = list() if len(input_data_array[file_pareto][xelem]) != 0: data_array_pareto_x, data_array_pareto_y = (list(t) for t in zip( *sorted( zip(input_data_array[file_pareto][xelem], input_data_array[file_pareto][yelem])))) for j in range(len(data_array_pareto_x)): straight_pareto_x.append(data_array_pareto_x[j]) straight_pareto_x.append(data_array_pareto_x[j]) straight_pareto_y.append(data_array_pareto_y[j]) straight_pareto_y.append(data_array_pareto_y[j]) straight_pareto_x.append( x_max) # Just insert the max on the x axis straight_pareto_y.insert( 0, y_max) # Just insert the max on the y axis label_is = "Pareto - " + get_last_dir_and_file_names(file_pareto) pareto_front, = plt.plot(straight_pareto_x, straight_pareto_y, label=label_is, linewidth=1, color=next_color) handler_map_for_legend[pareto_front] = HandlerLine2D(numpoints=1) label_is = "Invalid Samples - " + get_last_dir_and_file_names( file_search) if show_samples: non_valid, = plt.plot(non_valid_optimization_obj_1[file_search], non_valid_optimization_obj_2[file_search], linestyle='None', marker='.', mew=0.5, markersize=3, fillstyle="none", label=label_is) handler_map_for_legend[non_valid] = HandlerLine2D(numpoints=1) plt.ylabel(ylabel, fontsize=16) plt.xlabel(xlabel, fontsize=16) for tick in ax1.xaxis.get_major_ticks(): tick.label.set_fontsize( 14) # Set the fontsize of the label on the ticks of the x axis for tick in ax1.yaxis.get_major_ticks(): tick.label.set_fontsize( 14) # Set the fontsize of the label on the ticks of the y axis # Add the legend with some customizations if print_legend: lgd = ax1.legend(handler_map=handler_map_for_legend, loc='best', bbox_to_anchor=(1, 1), fancybox=True, shadow=True, ncol=1, prop={'size': 14}) # Display legend. font = {'size': 16} matplotlib.rc('font', **font) fig.savefig(output_image_pdf_file_with_all_samples, dpi=120, bbox_inches='tight') if objective_1_is_percentage: plt.xlim(0, 100) if objective_2_is_percentage: plt.ylim(0, 100) fig.savefig(output_image_pdf_file, dpi=120, bbox_inches='tight')
Error[i] = np.power((y - D.item(i)), 2) e.append(Error.item(i)) w1p.append(w1) w2p.append(w2) w3p.append(w3) # average = np.matrix.mean(Error) # AvgError.append(average) for i in range(0, 4): y = y_function(X.item(i, 0), X.item(i, 1), X.item(i, 2), w1, w2, w3) Error[i] = np.power((y - D.item(i)), 2) average = np.matrix.mean(Error) AvgError.append(average) # Next lines are all about the plots required plt.figure(1) plt.subplot(311) plt.plot(AvgError, '*r-') # error plot plt.ylabel('Average Error') plt.xlabel('Iterations') # plt.show() plt.subplot(312) w1_plot, = plt.plot(w1p, '*r-', label='W1') # weights plot w2_plot, = plt.plot(w2p, 'xb-', label='W2') w3_plot, = plt.plot(w3p, 'og-', label='W3') plt.legend(handler_map={w1_plot: HandlerLine2D(numpoints=4)}) plt.ylabel('Weight') plt.xlabel('Iterations') plt.subplot(313) plt.plot(e, '+r-') plt.ylabel('Error') plt.xlabel('Iterations') plt.show()
def plotSplitMetric(complete_train_loss_history, complete_val_loss_history, new_dir_path, metricName, iterNum, complete_test_loss_history=None): """ # lengthOfTrainLossHistory = len( complete_train_loss_history ) # lengthOfValLossHistory = len( complete_val_loss_history ) # lengthOfTestLossHistory = len( complete_test_loss_history ) """ train_loss_x = [] train_loss_y = [] val_loss_x = [] val_loss_y = [] if complete_test_loss_history is not None: test_loss_x = [] test_loss_y = [] for idx in xrange(len(complete_train_loss_history)): train_loss_x.append(complete_train_loss_history[idx][0]) train_loss_y.append(complete_train_loss_history[idx][1]) for idx in xrange(len(complete_val_loss_history)): val_loss_x.append(complete_val_loss_history[idx][0]) val_loss_y.append(complete_val_loss_history[idx][1]) if complete_test_loss_history is not None: for idx in xrange(len(complete_test_loss_history)): test_loss_x.append(complete_test_loss_history[idx][0]) test_loss_y.append(complete_test_loss_history[idx][1]) plt.subplot(1, 1, 1) plt.title( '%s vs. Number of Iterations\n RED=TRAINING, BLUE=VALIDATION, GREEN=TEST' % (metricName)) line1, = plt.plot(train_loss_x, train_loss_y, '-ro', label='Training %s' % (metricName)) line2, = plt.plot(val_loss_x, val_loss_y, '-bo', label='Val %s' % (metricName)) if complete_test_loss_history is not None: line3, = plt.plot(test_loss_x, test_loss_y, '-go', label='Test %s' % (metricName)) plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) plt.xlabel('Iteration') plt.ylabel(metricName) if metricName == 'Accuracy': plt.ylim([0.0, 1.0]) elif metricName == 'Loss': plt.ylim([0, 7]) plt.gcf().set_size_inches(15, 12) lossPlotFileName = './%s/YOLO%sHistory_%d.png' % (new_dir_path, metricName, iterNum) plt.savefig(lossPlotFileName) plt.clf()
line3, = plt.plot(angle, Ftran0, '-b', linewidth=2, label='$F_{\perp}$ for B=0') line4, = plt.plot(angle, FtranB, '--b', linewidth=2, label='$F_{\perp}$ for B=$\infty$') plt.title('Friction Force for Proton', color='m', fontsize=20) plt.xlabel('Angle Relative Beam Axis, rad', color='m', fontsize=16) plt.ylabel('Value Proporsed to Friction Force, rel.units', color='m', fontsize=16) plt.legend(handler_map={line1: HandlerLine2D()}, loc=4) plt.grid(True) # # Verifying Parchomchuk's formulae from # Bruhwiler et al "Direct simulation ..." (Fig.3): # n_e = 2.0e9 # cm^-3 Bmag = 5.0e4 # Gs q_e = 4.8e-10 # CGSE m_e = 9.1e-28 # g m_i = 67.1e-24 # g c_light = 2.99e10 # cm/s Ve_tranRMS = 8.e8 # cm/s Ve_longRMS = 1.e7 # cm/s
cat, snap = paths[alpha_key] radial_distance, densities, _, _, M500, R500 = profile_3d_particles( path_to_snap=snap, path_to_catalogue=cat, ) ax.plot(radial_distance[::10], densities[::10], marker=',', lw=0, linestyle="", c=cmap(float(alpha_key)), alpha=0.5, label=f"Alpha_max = {alpha_key}") ax.set_ylabel(r'$\rho_{gas} / \rho_{crit}$') ax.set_ylim([1, 1e3]) plt.legend(handler_map={plt.Line2D: HandlerLine2D(update_func=update_prop)}) fig.suptitle( (f"Aperture = 2 $r_{{500}}$\t\t" f"$z = 0$\n" f"VR18_-8res_MinimumDistance_fixedAGNdT8.5_Nheat1_alpha*_no_adiabatic\n" f"Central FoF group only\n" f"Top row: shell-averaged, bottom row: particle-dots ([::20])"), fontsize=7) plt.show()
def Analys_K2(k_1, k_m_1, k_m_2, k_3_0, Alpha, Tolerance): # задаем переменные y = numpy.linspace(0.001, 0.987, 1000) N = numpy.size(y) x = numpy.zeros(N) z = numpy.zeros(N) phi = numpy.zeros(N) phi_m = numpy.zeros(N) k_2 = numpy.zeros(N) sled = numpy.zeros(N) Det_A = numpy.zeros(N) # заданное значение для pfi phi[0] = pow((1 - y[0]), Alpha) phi_m[0] = pow((1 - y[0]), Alpha - 1) # выражаем x через y для нулевого элемента сетки x[0] = k_1 * (1 - y[0]) / (k_1 + k_m_1 + k_3_0 * phi[0] * y[0]) # выражаем значение k_2 через остальные параметры и переменную y для нулевого элемента сетки k_2[0] = ( k_m_2 * pow(y[0], 2) * pow((k_1 + k_m_1 + k_3_0 * phi[0] * y[0]), 2) + (k_1 + k_m_1 + k_3_0 * phi[0] * y[0]) * k_1 * k_3_0 * phi[0] * y[0] * (1 - y[0])) / (pow((1 - y[0]), 2) * pow( (k_m_1 + k_3_0 * phi[0] * y[0]), 2)) # выпишем элементы матрицы Якоби для нулевого элемента сетки a11 = -k_1 - k_m_1 - k_3_0 * phi[0] * y[0] a12 = -k_1 - k_3_0 * x[0] + k_3_0 * Alpha * phi_m[0] * y[0] * x[0] a21 = -2 * k_2[0] * z[0] - k_3_0 * phi[0] * y[0] a22 = -2 * k_2[0] * z[0] - 2 * k_m_2 * y[0] - k_3_0 * phi[0] * x[ 0] + k_3_0 * Alpha * phi_m[0] * y[0] * x[0] # вычисляем след матрицы Якоби sled[0] = a11 + a22 # вычисляем определитель матрицы Якоби Det_A[0] = a11 * a22 - a12 * a21 for i in range(1, N): # заданное значение для pfi phi[i] = pow((1 - y[i]), Alpha) phi_m[i] = pow((1 - y[i]), Alpha - 1) # выражаем x через y для различных элементов сетки x[i] = k_1 * (1 - y[i]) / (k_1 + k_m_1 + k_3_0 * phi[i] * y[i]) # выражаем значение k_2 через остальные параметры и переменную y для различных элементов сетки k_2[i] = (k_m_2 * pow(y[i], 2) * pow( (k_1 + k_m_1 + k_3_0 * phi[i] * y[i]), 2) + (k_1 + k_m_1 + k_3_0 * phi[i] * y[i]) * k_1 * k_3_0 * phi[i] * y[i] * (1 - y[i])) / (pow((1 - y[i]), 2) * pow( (k_m_1 + k_3_0 * phi[i] * y[i]), 2)) # выпишем элементы матрицы Якоби для различных элементов сетки a11 = -k_1 - k_m_1 - k_3_0 * phi[i] * y[i] a12 = -k_1 - k_3_0 * phi[i] * x[i] + k_3_0 * Alpha * phi_m[i] * y[ i] * x[i] a21 = -2 * k_2[i] * z[i] - k_3_0 * phi[i] * y[i] a22 = -2 * k_2[i] * z[i] - 2 * k_m_2 * y[i] - k_3_0 * phi[i] * x[ i] + k_3_0 * Alpha * phi_m[i] * y[i] * x[i] # вычисляем след матрицы Якоби sled[i] = a11 + a22 # вычисляем определитель матрицы Якоби Det_A[i] = a11 * a22 - a12 * a21 # отрисовка графиков и точек бифуркации # седло-узловая бифуркация if (Det_A[i] * Det_A[i - 1] < Tolerance): y_new = y[i - 1] - Det_A[i - 1] * (y[i] - y[i - 1]) / ( Det_A[i] - Det_A[i - 1]) x_new = k_1 * (1 - y_new) / (k_1 + k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new) k_2_new = (k_m_2 * pow(y_new, 2) * pow((k_1 + k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new), 2) + (k_1 + k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new) * k_1 * k_3_0 * pow( (1 - y_new), Alpha) * y_new * (1 - y_new)) / (pow( (1 - y_new), 2) * pow((k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new), 2)) matplot.plot(k_2_new, x_new, 'g*', marker='o') matplot.plot(k_2_new, y_new, 'g*', marker='o') # бифуркация Хопфа if (sled[i] * sled[i - 1] < Tolerance): y_new = y[i - 1] - sled[i - 1] * (y[i] - y[i - 1]) / (sled[i] - sled[i - 1]) x_new = k_1 * (1 - y_new) / (k_1 + k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new) k_2_new = (k_m_2 * pow(y_new, 2) * pow((k_1 + k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new), 2) + (k_1 + k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new) * k_1 * k_3_0 * pow( (1 - y_new), Alpha) * y_new * (1 - y_new)) / (pow( (1 - y_new), 2) * pow((k_m_1 + k_3_0 * pow( (1 - y_new), Alpha) * y_new), 2)) matplot.plot(k_2_new, x_new, 'k*', marker='o') matplot.plot(k_2_new, y_new, 'k*', marker='o') matplot.title( 'Однопараметрический анализ. Завсимость стационарных решений от параметра k_2' ) line1, = matplot.plot(k_2, x, 'b--', label="x") line2, = matplot.plot(k_2, y, 'r', label="y") matplot.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) matplot.xlim((0, 0.7)) matplot.xlabel('k_2') matplot.ylabel('x,y') matplot.grid(True) matplot.show() return
def train_frac(X, y, p, k): train_accu1 = [] test_accu1 = [] train_accu2 = [] test_accu2 = [] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20) scaler = StandardScaler() scaler.fit(X_train) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) train_size = [0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] for size in train_size: X1_train, X2_test, y1_train, y1_test = train_test_split( X_train, y_train, train_size=size, test_size=0.2, random_state=1) classifier1 = KNeighborsClassifier(n_neighbors=k, weights='uniform', algorithm='auto', p=p) classifier2 = KNeighborsClassifier(n_neighbors=k, weights='distance', algorithm='auto', p=p) start_time = time.time() classifier1.fit(X1_train, y1_train) print("--- %s seconds ---" % (time.time() - start_time)) start_time = time.time() classifier2.fit(X1_train, y1_train) print("--- %s seconds ---" % (time.time() - start_time)) accu_train1 = classifier1.score(X1_train, y1_train) accu_test1 = classifier1.score(X_test, y_test) accu_train2 = classifier2.score(X1_train, y1_train) accu_test2 = classifier2.score(X_test, y_test) train_accu1.append(accu_train1) test_accu1.append(accu_test1) train_accu2.append(accu_train2) test_accu2.append(accu_test2) train_accuracy = np.asarray(train_accu1) test_accuracy = np.asarray(test_accu1) train_accuracy2 = np.asarray(train_accu2) test_accuracy2 = np.asarray(test_accu2) train_size = np.asarray(train_size) #print(k_arrayprint() print(test_accuracy) print(train_accuracy) print(test_accuracy2) print(train_accuracy2) line1, = plt.plot(train_size, train_accuracy, color='r', label='train_accuracy_uniform') line2, = plt.plot(train_size, test_accuracy, color='b', label='test_accuracy_uniform') line3, = plt.plot(train_size, train_accuracy2, color='g', label='train_accuracy_distance') line4, = plt.plot(train_size, test_accuracy2, color='k', label='test_accuracy_distance') plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}) plt.ylabel('Accuracy_score') plt.xlabel('Train size fraction') plt.show() return None
def main(): w = [[0.1, 1, -0.1]] M = [[1, 1, 0]] H = [[0, 0, 0]] I = np.matrix([[2, 0, 0], [0, 2, 0], [0, 0, 2]]) zero = [0] h = float(input("Enter interval \n")) time = [] wx = [] wy = [] wz = [] energy = [] energy.append(0.5 * I.item(0) * (w[0][0]) * (w[0][0]) + 0.5 * I.item(4) * (w[0][1]) * (w[0][1]) + 0.5 * I.item(8) * (w[0][2]) * (w[0][2])) wx.append(w[0][0]) wy.append(w[0][1]) wz.append(w[0][2]) H.append(matrixvectprod(I, w[0])) t0 = float(input("Enter t0 \n")) deltaT = float(input("Enter DeltaT : simulation t -> t + deltaT \n")) time.append(t0) for i in range(int(deltaT / h)): k1 = scalarvectprod(h, fdot(t0 + i * h, w[i], I, M[0])) k2 = scalarvectprod( h, fdot(t0 + i * h + h / 2, addvectors(w[i], scalarvectprod(0.5, k1)), I, M[0])) k3 = scalarvectprod( h, fdot(t0 + i * h + h / 2, addvectors(w[i], scalarvectprod(0.5, k2)), I, M[0])) k4 = scalarvectprod( h, fdot(t0 + i * h + h, addvectors(w[i], k3), I, M[0])) ksum = addvectors(addvectors(k1, k4), scalarvectprod(2, addvectors(k2, k3))) w.append(addvectors(w[i], scalarvectprod(1 / 6, ksum))) wx.append(w[i + 1][0]) wy.append(w[i + 1][1]) wz.append(w[i + 1][2]) energy.append(0.5 * I.item(0) * (w[i + 1][0]) * (w[i + 1][0]) + 0.5 * I.item(4) * (w[i + 1][1]) * (w[i + 1][1]) + 0.5 * I.item(8) * (w[i + 1][2]) * (w[i + 1][2])) # print (mod(w[i]), " ") time.append(t0 + (i + 1) * h) zero.append(0) H.append(matrixvectprod(I, w[i + 1])) # plt.plot( time, H[0:], label = 'line') # plt.plot( time, energy, label = "energy") # plt.plot.Axes.set_ybounds(-1.6, 1.6) # line1, = plt.plot( time, energy, 'r', label = "Energy(J)") line1, = plt.plot(time, wx, label="wx") # line2, = plt.plot( time, wy, label = "wy") # line3, = plt.plot( time, wz, label = "wz") # print(energy) # line5, = plt.plot( time, zero, 'w') # plt.title('Angular velocity v/s Time plot') plt.title('Energy v/s Time plot') plt.xlabel('Time (seconds)') # plt.ylabel('Angular velocity(Hz)') plt.ylabel('Energy (J)') plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)})
def neighbors(k, X, y, p): train_accu1 = [] test_accu1 = [] train_accu2 = [] test_accu2 = [] k_array = [] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20) X_train1 = X_train.iloc[0:40000] y_train1 = y_train.iloc[0:40000] scaler = StandardScaler() scaler.fit(X_train) X_train1 = scaler.transform(X_train1) X_test = scaler.transform(X_test) for k in range(3, k): classifier1 = KNeighborsClassifier(n_neighbors=k, weights='uniform', algorithm='auto', p=p) classifier2 = KNeighborsClassifier(n_neighbors=k, weights='distance', algorithm='auto', p=p) start_time = time.time() classifier1.fit(X_train1, y_train1) print("--- %s seconds ---" % (time.time() - start_time)) start_time = time.time() classifier2.fit(X_train, y_train) print("--- %s seconds ---" % (time.time() - start_time)) accu_train1 = classifier1.score(X_train1, y_train1) accu_test1 = classifier1.score(X_test, y_test) accu_train2 = classifier2.score(X_train1, y_train1) accu_test2 = classifier2.score(X_test, y_test) train_accu1.append(accu_train1) test_accu1.append(accu_test1) train_accu2.append(accu_train2) test_accu2.append(accu_test2) k_array.append(k) train_accuracy = np.asarray(train_accu1) test_accuracy = np.asarray(test_accu1) train_accuracy2 = np.asarray(train_accu2) test_accuracy2 = np.asarray(test_accu2) k_array = np.asarray(k_array) #print(k_arrayprint() print(test_accuracy) print(train_accuracy) print(test_accuracy2) print(train_accuracy2) line1, = plt.plot(k_array, train_accuracy, color='r', label='train_accuracy_uniform') line2, = plt.plot(k_array, test_accuracy, color='b', label='test_accuracy_uniform') line3, = plt.plot(k_array, train_accuracy2, color='g', label='train_accuracy_distance') line4, = plt.plot(k_array, test_accuracy2, color='k', label='test_accuracy_distance') plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}) plt.ylabel('Accuracy_score') plt.xlabel('Number of nearest neighbors') plt.show() return None
import matplotlib.pyplot as plt from matplotlib.legend_handler import HandlerLine2D # LEARNING RATE indexes = [0.01, 0.05, 0.1, 0.2, 0.25, 0.4, 0.5, 0.6, 0.75, 0.8, 1] a = [0.59760789249756063, 0.61590204604838972, 0.62757908862988399, 0.63959779094959845, 0.64272750653770394, 0.64971642333153923, 0.65122895394966451, 0.65193148793999756, 0.65490149968264333, 0.6561804636314692, 0.65722494391977826] b = [0.59514008004574037, 0.61461586296735105, 0.62833533742403613, 0.63866244628742197, 0.64024250971099828, 0.64528220839006678, 0.64356347957301074, 0.64709193176660595, 0.64695223485585274, 0.64540156967457107, 0.64650684043311935] l1, = plt.plot(indexes, a, label='Train AUC') l2, = plt.plot(indexes, b, color='red', label='Test AUC') plt.legend(handler_map={l1: HandlerLine2D(numpoints=2)}) plt.ylabel('AUC') plt.xlabel('Learning Rate') plt.axis([0, 1, 0.50, 0.70]) plt.axvline(x=0.6, color='g') plt.show() # ESTIMATORS indexes = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 600, 750, 1024] a = [0.58118881814468415, 0.59697523951503584, 0.60794758253318737, 0.6155907916647998, 0.63258273990606018, 0.64410367428474147, 0.65129479310034966, 0.66134878988257828, 0.67047839265347253, 0.68087061045685537, 0.68385042254313955, 0.68677558631947677, 0.69252039984102975] b = [0.58092400370167341, 0.59178698578863909, 0.60729879518782459, 0.61640731375218816, 0.62944245017771572, 0.6398747001231927, 0.64266340707208247, 0.64276213801111681, 0.64665095813218754, 0.64453656876094145, 0.64414761306902912, 0.64210358791179645, 0.64470256936216863] l1, = plt.plot(indexes, a, label='Train AUC') l2, = plt.plot(indexes, b, color='red', label='Test AUC') plt.legend(handler_map={l1: HandlerLine2D(numpoints=2)})
'r--', linewidth=lwidth, label='Train Set -' + label2) line2, = pyplot.plot(train_inds, error_analysis['train_size']['test'][-1, :, 1], 'r', linewidth=lwidth, label='Test Set -' + label2) pyplot.ylim(.75, .85) pyplot.xlabel('Train Set Size (batches of ' + str(batch_size) + ')', fontsize=axis_fontsize) pyplot.ylabel('Proportion Correct', fontsize=axis_fontsize) pyplot.title('Performance', fontsize=title_fontsize) pyplot.legend(fontsize=20, handler_map={line1: HandlerLine2D()}, loc='center left', bbox_to_anchor=(1, 0.5), bbox_transform=pyplot.gcf().transFigure) savefig('TrainSizeEffects.png', bbox_inches='tight') pyplot.show() ''' ####Test effects of regularizer values on performance########################## ''' for reg_val_idx, reg_val in enumerate(regularizer_vals): input_data = {} target_data = {} input_data['train'] = all_input_data['train'] target_data['train'] = all_target_data['train']
def graphPlot(): #plotting fig = plt.figure() ax = fig.add_subplot(111, projection='3d') #Positive wavefunction data: ax.scatter(low_prob_positive[0],low_prob_positive[1],low_prob_positive[2], alpha=0.05, s=2,color='cornflowerblue',label=f'Between {low*100}% and {middle*100}%') ax.scatter(middle_prob_positive[0],middle_prob_positive[1],middle_prob_positive[2], alpha=0.05, s=2,color='blue',label=f'Between {middle*100}% and {high*100}%') ax.scatter(high_prob_positive[0],high_prob_positive[1],high_prob_positive[2], alpha=0.05, s=2,color='red', label=f'More than {high*100}%') #Negative wavefunction data: ax.scatter(low_prob_negative[0],low_prob_negative[1],low_prob_negative[2], alpha=0.05, s=2,color='cornflowerblue') ax.scatter(middle_prob_negative[0],middle_prob_negative[1],middle_prob_negative[2], alpha=0.05, s=2,color='blue') ax.scatter(high_prob_negative[0],high_prob_negative[1],high_prob_negative[2], alpha=0.05, s=2,color='red') #Axis ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') #Title ax.set_title(f'Hydrogen Orbital (n = {n}, l = {l}, m = {m})') #Legend plt.legend(handler_map={PathCollection : HandlerPathCollection(update_func= update), plt.Line2D : HandlerLine2D(update_func = update)},markerscale = 4) #save plt.savefig(destination)
y_pos = np.arange(len(objects)) plt.xticks(y_pos, objects) plt.ylabel('Rho *') plt.xlabel('c') S0.reverse() S1.reverse() S2.reverse() S3.reverse() S4.reverse() ax1, = plt.plot(S0, label='So = 0.5U') ax2, = plt.plot(S1, label='So = 0.6U') ax3, = plt.plot(S2, label='So = 0.7U') ax4, = plt.plot(S3, label='So = 0.8U') ax5, = plt.plot(S4, label='So = 0.9U') plt.legend(handler_map={ax1: HandlerLine2D(numpoints=1)}) plt.legend(handler_map={ax2: HandlerLine2D(numpoints=1)}) plt.legend(handler_map={ax3: HandlerLine2D(numpoints=1)}) plt.legend(handler_map={ax4: HandlerLine2D(numpoints=1)}) plt.legend(handler_map={ax5: HandlerLine2D(numpoints=1)}) # In[64]: len(plot_1) # In[69]: print plot_1[0] # In[92]:
print "biases3 =", biases3 print "avgdvalues1 =", avgdvalues1 print "avgdvalues2 =", avgdvalues2 print "avgdvalues3 =", avgdvalues3 plt.figure() ax = plt.subplot(111) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.10, box.width, box.height * .90]) fontP = FontProperties() fontP.set_size('small') plt.xlim(0,60) line1, = plt.plot(samplepercents100,biases1,'bo-', label='Random node sampling') line2, = plt.plot(samplepercents100,biases2,'rs-', label='Modified random node sampling') line3, = plt.plot(samplepercents100,biases3,'c^-', label='Node sampling via random walk') plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.12), fancybox=True, shadow=True, ncol=2, handler_map={line1: HandlerLine2D(numpoints=2)}, prop = fontP) plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.12), fancybox=True, shadow=True, ncol=2, handler_map={line2: HandlerLine2D(numpoints=2)}, prop = fontP) plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.12), fancybox=True, shadow=True, ncol=2, handler_map={line3: HandlerLine2D(numpoints=2)}, prop = fontP) plt.suptitle('Comparison of Average Clustering Coefficient Bias vs Random node sample % plot till year '+str(curyear), fontsize=12) plt.xlabel('Random node sample %') plt.ylabel('Average Clustering Coefficient Bias') plt.savefig('../graphs/Comparison of Average Clustering Coefficient Bias vs Random node sample % plot till year '+str(curyear)+'.png') plt.close() plt.figure() plt.xlim(0,60) ax = plt.subplot(111) box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.10, box.width, box.height * .90]) fontP = FontProperties() fontP.set_size('small')
def execute(self): """ Execute the simulation and display the result on graph """ # VARIABLES x, y = [], [] var = self._params[0] # starting value plt.xlabel(self._label) plt.ylabel('Power output (Watts)') df = self._data # Select the study variable while var < self._params[1]: # _params[3] = end if self._type == 'w': df[0] = var elif self._type == 'r': df[1] = var elif self._type == 'a': df[2] = var elif self._type == 't': df[3] = var elif self._type == 'h': df[4] = var elif self._type == 'c': df[5] = var x.append(var) if self._density is None: y.append( power.turbine_power(df[0], df[1], df[2], df[3], df[4], df[5])) else: y.append( power.turbine_power(df[0], df[1], df[2], df[3], df[4], df[5], self._density)) var += self._params[2] # self._params[2] = incrementation value text = "" if self._type == 'r': text = "Constants: v = %sm/s; alt = %sm; t = %sC; RH = %s%%; Cp = %s" % ( df[0], df[2], df[3], df[4], df[5]) plt.title('Variable Radius Study') elif self._type == 'w': text = "Constants: r = %sm; alt = %sm; t = %sC; RH = %s%%; Cp = %s" % ( df[1], df[2], df[3], df[4], df[5]) plt.title('Variable Wind Speed Study') elif self._type == 'a': text = "Constants: v = %sm/s; r = %sm; t = %sC; RH = %s%%; Cp = %s" % ( df[0], df[1], df[3], df[4], df[5]) plt.title('Variable Altitude Study') elif self._type == 't': text = "Constants: v = %sm/s; r = %sm; a = %sm; RH = %s%%; Cp = %s" % ( df[0], df[1], df[2], df[4], df[5]) plt.title('Variable Temperature Study') elif self._type == 'h': text = "Constants: v = %sm/s; r = %sm; a = %sm; t = %sC; Cp = %s" % ( df[0], df[1], df[2], df[3], df[5]) plt.title('Variable Humidity Study') elif self._type == 'c': text = "Constants: v = %sm/s; r = %sm; a = %sm; t = %sC; RH = %s%%" % ( df[0], df[1], df[2], df[3], df[4]) plt.title('Variable Coefficient of Power Study') line1, = plt.plot(x, y, label=text, linewidth=2) if legend: plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) return plt
def plot_outer_legend(plot_data, description, xlabel, ylabel, xscale, yscale, file_name_without_ext, style): # print("(plot_helpers.plot_outer_legend)xlabel:", xlabel) # print("(plot_helpers.plot_outer_legend)plot_data:", plot_data) rc('font', **FONT) plt.clf() plt.subplot(111) for_plotlib = [list(), list()] for label, line_data in plot_data.items(): for_plotlib[0].append(label) for_plotlib[1].append(line_data) for_plotlib = synchronous_sort(for_plotlib, 0, lambda_func=lambda x: float(x) if isnumber(x) else x) lines = list() labels = list() if style['no_line']: linestyle = 'None' else: linestyle = 'solid' # print("(plot_helpers.plot_outer_legend)linestyle:", linestyle) for idx, (label, line_data) in enumerate(zip(*for_plotlib)): # if idx == 0: # print("(plot_helpers.plot_outer_legend)line_data:", line_data) if label is None or label == 'None': label = '' labels.append(label) if idx > len(COLORS) - 1: color = [ random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1) ] else: color = COLORS[idx] if len(line_data) > 2: errors = line_data[2] errors = [0. if e is None else e for e in errors] else: errors = None if style['error'] == 'fill': yerr = None ym = [y - e for y, e in zip(line_data[1], errors)] yp = [y + e for y, e in zip(line_data[1], errors)] plt.fill_between( line_data[0], ym, yp, alpha=.4, color=color, ) elif style['error'] == 'bar': yerr = errors else: yerr = None # print("(plot_helpers.plot_outer_legend)yerr:", yerr) # print("(plot_helpers.plot_outer_legend)line_data:", line_data) lines.append( plt.errorbar( line_data[0], line_data[1], yerr=yerr, marker=style['marker'], color=color, label=label, ls=linestyle, )[0]) # print("(plot_helpers.plot_outer_legend)labels:", labels) plt.xlabel(xlabel) plt.ylabel(ylabel) scale_kwargs = dict() if xscale == 'symlog': linthreshx = get_linthreshx(for_plotlib[1], ) scale_kwargs['linthreshx'] = linthreshx plt.xscale(xscale, **scale_kwargs) plt.yscale(yscale) there_is_labels = False for label in labels: if len(label) > 0: there_is_labels = there_is_labels or True if there_is_labels: handler_map = dict( list( zip(lines, [HandlerLine2D(numpoints=1) for _ in range(len(lines))]))) # print("(plot_helpers.plot_outer_legend)handler_map:", handler_map) ax = plt.gca() handles, labels = ax.get_legend_handles_labels() handles = [ h[0] if isinstance(h, container.ErrorbarContainer) else h for h in handles ] lgd = ax.legend( handles, labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handler_map=handler_map, ) bbox_extra_artists = [lgd] else: bbox_extra_artists = () # lgd = plt.legend( # bbox_to_anchor=(1.05, 1), # loc=2, # borderaxespad=0., # handler_map=handler_map, # ) for format in FORMATS: if format == 'pdf': fig_path = os.path.join(file_name_without_ext + '.pdf') elif format == 'png': fig_path = os.path.join(file_name_without_ext + '.png') else: fig_path = None create_path(fig_path, file_name_is_in_path=True) r = plt.savefig(fig_path, bbox_extra_artists=bbox_extra_artists, bbox_inches='tight') # print("%s %s %s %s:" % (pupil_name, res_type, regime, format), r) if description is not None: description_file = os.path.join(file_name_without_ext + '.txt') with open(description_file, 'w') as f: f.write(description)
def variable_study(study, default=None, legend=True, density=None): """ study [study type, label title, start, end , increment] default [wind speed, radius, altitude, temperature, humidity, power coefficient] """ # VARIABLES x, y = [], [] var = study[2] # starting value plt.xlabel(study[1]) plt.ylabel('Power output (Watts)') if default is None: df = [10, 0.5, 100, 19, 80, 0.4] else: df = default while var < study[3]: # study[3] = end if study[0] == 'w': df[0] = var elif study[0] == 'r': df[1] = var elif study[0] == 'a': df[2] = var elif study[0] == 't': df[3] = var elif study[0] == 'h': df[4] = var elif study[0] == 'c': df[5] = var else: pass x.append(var) if density is None: y.append( power.turbine_power(df[0], df[1], df[2], df[3], df[4], df[5])) else: y.append( power.turbine_power(df[0], df[1], df[2], df[3], df[4], df[5], density)) var += study[4] # study[4] = increment if study[0] == 'r': text = "Constants: v = %sm/s; alt = %sm; t = %sC; RH = %s%%; Cp = %s" % ( df[0], df[2], df[3], df[4], df[5]) plt.title('Variable Radius Study') elif study[0] == 'w': text = "Constants: r = %sm; alt = %sm; t = %sC; RH = %s%%; Cp = %s" % ( df[1], df[2], df[3], df[4], df[5]) plt.title('Variable Wind Speed Study') elif study[0] == 'a': text = "Constants: v = %sm/s; r = %sm; t = %sC; RH = %s%%; Cp = %s" % ( df[0], df[1], df[3], df[4], df[5]) plt.title('Variable Altitude Study') elif study[0] == 't': text = "Constants: v = %sm/s; r = %sm; a = %sm; RH = %s%%; Cp = %s" % ( df[0], df[1], df[2], df[4], df[5]) plt.title('Variable Temperature Study') elif study[0] == 'h': text = "Constants: v = %sm/s; r = %sm; a = %sm; t = %sC; Cp = %s" % ( df[0], df[1], df[2], df[3], df[5]) plt.title('Variable Humidity Study') elif study[0] == 'c': text = "Constants: v = %sm/s; r = %sm; a = %sm; t = %sC; RH = %s%%" % ( df[0], df[1], df[2], df[3], df[4]) plt.title('Variable Coefficient of Power Study') line1, = plt.plot(x, y, label=text, linewidth=2) if legend: plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) return plt
def main(): np.random.seed(5) num_elements_one_input = steps * num_input # use pandas to read csv file # result is a 2D data structure with labels data = pd.read_csv('./data/' + symbol + '.csv') open2 = scale_data(data.Open) high = scale_data(data.High) low = scale_data(data.Low) volume = scale_data(data.Volume) adjclose = scale_data(data.AdjClose) dates = data.Date # process the data to input and output # X is input, 1D with n * steps * num_input elements # y is output, 1D with n elements #X, y= processData(adjclose, steps) X, y, z = processData5(open2, high, low, volume, adjclose, steps, dates) # split the data into 90% for train and testing # split_point has to be an integer so use // splity = int(len(y) * split_ratio) # remember that for each output element there are num_elements_one_input splitx = int(splity * num_elements_one_input) # :splitx = [0, splitx), splitx: = [splitx, len(X)) X_train, X_test = X[:splitx], X[splitx:] y_train, y_test = y[:splity], y[splity:] z_test = z[splity:] #print first data slice print(X_train.shape[0]) print(X_test.shape[0]) print(y_train.shape[0]) print(y_test.shape[0]) print(X_train[0]) print(X_test[0]) #Build the model model = Sequential() # Add one layer of LSTM with 256 tensors model.add(GRU(256, input_shape=(steps, num_input))) # Dense is for output layer model.add(Dense(1)) # optimizer is the gradient descent algorithm # mse is the most popular loss function model.compile(optimizer='adam', loss='mse') # reshape input from 1D array to 3D so model can use X_train = X_train.reshape( (len(X_train) // num_elements_one_input, steps, num_input)) X_test = X_test.reshape( (len(X_test) // num_elements_one_input, steps, num_input)) # train the model model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test), shuffle=False) # train completed, let's do predict y_predicted = model.predict(X_test) # how is predicted comapred with actual? # we also want to see first half and second half test score testScore, testScore2, testScore3 = mean_absolute_percentage_error( y_test, y_predicted) print('Test Score: %.2f MAPE' % (testScore)) # Root Mean Square Error, print('Test Score 2: %.2f MAPE' % (testScore2)) print('Test Score 3: %.2f MAPE' % (testScore3)) # draw it # y_test and y_predicted are still in 0 - 1 so we need to call inverse_transform # to change them into real prices # as before, scl needs to work on 2D, # reshape(-1, 1) will do the trick, -1 means it will calculate for us line1, = plt.plot(scl.inverse_transform(y_test.reshape(-1, 1)), marker='d', label='Actual') line2, = plt.plot(scl.inverse_transform(y_predicted.reshape(-1, 1)), marker='o', label='Predicted') plt.legend(handler_map={line1: HandlerLine2D(numpoints=4)}) plt.title(symbol + "(" + z_test[0] + " to " + z_test[len(z_test) - 1] + ")") plt.show()
def train(self, epochs, batch_size, auto_save=True): assert (epochs > 0 and batch_size > 0) loss_train = [] accuracy_train = [] loss_val = [] accuracy_val = [] loss_test = [] accuracy_test = [] E = [] num_examples = len(self.train_data) print('Training the model . . .') with tf.Session() as session: session.run(tf.global_variables_initializer()) total_steps = trange(epochs) for epoch in total_steps: E.append(epoch+1) ll = 0 cc = 0 self.train_data, self.train_labels = shuffle(self.train_data, self.train_labels) for offset in range(0, num_examples, batch_size): end = offset + batch_size X_batch, y_batch = self.train_data[offset:end], self.train_labels[offset:end] _, acc, cross, loss_ = session.run([self.training_step, self.accuracy_operation, self.cross_entropy, self.loss_op], feed_dict={self.X: X_batch, self.y: y_batch}) ll += loss_ * batch_size cc += acc * batch_size # loss_t, train_accuracy = self.evaluate(self.train_data, self.train_labels, batch_size) loss_train.append(ll/num_examples) accuracy_train.append(cc/num_examples) print("Training set: Epoch: ", epoch+1, cc/num_examples, ll/num_examples) if self.validation_data is not None: loss_v, validation_accuracy = self.evaluate(self.validation_data, self.validation_labels, batch_size) loss_val.append(loss_v) accuracy_val.append(validation_accuracy) print("Valiation set: Epoch: ", epoch+1, validation_accuracy, loss_v) # loss_t, train_accuracy = self.evaluate(self.train_data, self.train_labels, batch_size) # print("Training set: Epoch: ", epoch+1, train_accuracy, loss_t) loss_tt, test_accuracy = self.evaluate(self.test_data, self.test_labels, batch_size=batch_size) print("Testing set: Epoch: ", epoch+1, test_accuracy, loss_tt) # total_steps.set_description( # "Epoch {} - validation accuracy {:.3f} ".format(epoch + 1, validation_accuracy)) # # print("Epoch {} - validation accuracy {:.3f} ".format(epoch+1,validation_accuracy)) # tcotal_steps.set_description( # "Epoch {} - validation accuracy {:.3f} ".format(epoch + 1, validation_accuracy)) loss_test.append(loss_tt) accuracy_test.append(test_accuracy) if auto_save and (epoch % 10 == 0): save_path = self.saver.save(session, 'tmp/model.ckpt'.format(epoch)) _, test_accuracy = self.evaluate(self.test_data, self.test_labels, batch_size=batch_size) line1, = plt.plot(E, loss_train, color='b', label = "train set loss", lw=2) line2, = plt.plot(E, loss_val, color='r', label = "validation set loss", lw=2) line3, = plt.plot(E, loss_test, color='g', label = "test set loss", lw=2) #plt.plot(stoch_points, mean - int_conf, '--', color='r',lw=2) plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}, prop={'size':15}) #plt.show() plt.savefig("loss.pdf", bbox_inches='tight') plt.clf() line4, = plt.plot(E, accuracy_train, color='b', label = "train set accuracy", lw=2) line5, = plt.plot(E, accuracy_val, color='r', label = "validation set accuracy", lw=2) line6, = plt.plot(E, accuracy_test, color='g', label = "test set accuracy", lw=2) #plt.plot(stoch_points, mean - int_conf, '--', color='r',lw=2) plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)}, prop={'size':15}) plt.savefig("acc.pdf", bbox_inches='tight') return test_accuracy
import numpy as np import matplotlib.pyplot as plt from matplotlib.legend_handler import HandlerLine2D ln1, = plt.plot([3, 2.5, 1], marker = 'o', label = 'line 1') ln2, = plt.plot([1, 1.5, 3], marker = 'o', label = 'line 2') plt.legend(handler_map = {ln1: HandlerLine2D(numpoints = 3), ln2: HandlerLine2D(numpoints = 2)}) plt.show()
def plot_results(results, save=False): """Plot a result graph. Params: results (dict or string): the result dictionary or the json file to parse Note: results: { "title": ..., "x_label": ..., "y_label": ... } """ MARKERS = ['o', '^', '*', 's', '+', 'v'] COLORS = [ "#000000", "#999999", "#222222", "#555555", "#AAAAAA", "#CCCCCC" ] ALPHA = [ 0.9, 1.0, 1.0, 1.0, 1.0, 1.0 ] LINESTYLE = [":", "--", "-", "-.", "steps", ":"] if type(results) != dict: with open(results) as result_file: from json import load as js_load results = js_load(result_file) fig = plt.figure() fig.suptitle(results.get('title', ''), fontsize=14, fontweight='bold') data = results.get('results') labels = [] if results.get("sorted", False): all_data = enumerate(sorted(data.items(), key=lambda elm: int(elm[0]))) else: all_data = enumerate(data.items()) for idx, (type_, obj) in all_data: if results.get("max_step", False): _y_ = obj.get('values')[:results.get("max_step")] else: _y_ = obj.get('values') _x_ = range(len(_y_)) gen_step = results.get("gen_step", 1) tot_gen = (len(_y_) - 1) * gen_step x_real = range(tot_gen) y_real = [] for _n_, val in enumerate(_y_[:-1]): next_ = _y_[_n_ + 1] y_real.append(val) for cur_step in range(gen_step - 1): ## # Cos interpolation alpha = float((cur_step + 1.) / gen_step) alpha2 = (1 - cos(alpha * pi)) / 2 new_point = (val * (1 - alpha2) + next_ * alpha2) y_real.append( new_point ) ## # Do lines and point cur_plot = plt.plot( x_real, y_real, marker=obj.get('marker', MARKERS[idx]), markersize=obj.get('markersize', None), color=obj.get('color', COLORS[idx]), linewidth=obj.get('linewidth', 1), # ls=LINESTYLE[idx], alpha=obj.get('alpha', ALPHA[idx]), label=obj.get('label'), markevery=results.get( "markevery", [int(elm * gen_step) for elm in _x_[:-1]]) ) labels.append(cur_plot[0]) plt.legend( handler_map=dict( [ (label, HandlerLine2D(numpoints=1))for label in labels ] ), bbox_to_anchor=results.get("legend_ancor", (1.0, 1.0)), fontsize=18 ) plt.tick_params(axis='both', which='major', labelsize=14) plt.tick_params(axis='both', which='minor', labelsize=14) plt.axis((0, tot_gen, 0, 1)) plt.xlabel(results.get('x_label', 'Generations'), fontsize=14) plt.ylabel(results.get('y_label', 'Accuracy'), fontsize=14) plt.grid(True) if save: plt.savefig("{}.png".format(save), dpi=600, bbox_inches='tight') print("+ out file -> {}.png".format(save)) plt.show() plt.close()