def plot_per_iteration(self): lst = [i for i in (range(len(self.best_per_iter)))] # print("lst:", lst) print("Best allowed per iteration", self.best_allowed_per_iter) # Plotter().line_plot(lst, self.best_per_iter, plot_title="Best") # Plotter().line_plot(lst, self.best_allowed_per_iter, plot_title="Best allowed") # Plotter().line_plot(lst, self.worst_per_iter, plot_title="Worst") # Plotter().line_plot(lst, self.worst_allowed_per_iter, plot_title="Worst allowed") # Plotter().line_plot(lst, self.avg_per_iter, plot_title="Average") # Plotter().line_plot(lst, self.avg_allwowed_per_iter, plot_title="Average allowed") Plotter().multiline_plot(lst, self.best_allowed_per_iter, self.avg_allwowed_per_iter, self.worst_allowed_per_iter, x_axis_name="Iteration", y_axis_name="Rating", legend=["Best", "Average", "Worst"], plot_title="Allowed ratings") Plotter().multiline_plot(lst, self.best_per_iter, self.avg_per_iter, self.worst_per_iter, x_axis_name="Iteration", y_axis_name="Rating", legend=["Best", "Average", "Worst"], plot_title="All ratings")
def make_full_report(list_output_results): list_ultimate_points = [] list_yield_points = [] list_stiffness = [] # Creating subsequent force displacement curves for i in range(0, len(list_output_results)): plots = Plotter(list_output_results[i], "plot" + str(i)) plots.plot_xy_data(r"Rotation $\theta$ ($deg$)", r"Torque ($Nm$)") # storing the ultimates (maximum) and offset yields list_ultimate_points.append(list_output_results[i].ultimate_point.y) list_yield_points.append(list_output_results[i].yield_point.y) list_stiffness.append(list_output_results[i].stiffness) mean_ultimate = np.mean(np.asarray(list_ultimate_points)) mean_yield = np.mean(np.asarray(list_yield_points)) std_ultimate = np.std(np.asarray(list_ultimate_points)) std_yield = np.std(np.asarray(list_yield_points)) print("mean of ultimates = ", mean_ultimate) print("mean of yields = ", mean_yield) return "done"
def run_premade_map(): # Set up RL data = Data(ROWS, COLS, load_data=False, epsilon=0.25) gen = Generator(ROWS, COLS, resolution, size) vis = Plotter(data, cell_edge, ROWS, COLS) data.load_racetrack(directory='Sensitivity_Analysis') data.get_start_line() data.get_finish_line() vis.visualize_racetrack() # Finish setting up RL env = Environment(data, gen, ROWS, COLS) mcc = Monte_Carlo_Control(data, ROWS, COLS) agent = Agent() print("Saving data") # Save arrays data.save_racetrack(directory='Sensitivity_Analysis_2') data.save_Q_vals(directory='Sensitivity_Analysis_2') data.save_C_vals(directory='Sensitivity_Analysis_2') data.save_rewards(directory='Sensitivity_Analysis_2') data.save_pi(directory='Sensitivity_Analysis_2') # Train agent train_agent(mcc, env, agent, data, plot=True, image_name='sensitivity_epsilon=0.25') get_gif('epsilon=0.25', data, vis)
def test_map_making(): data = Data(ROWS, COLS, load_data=False) gen = Generator(ROWS, COLS, resolution, size) vis = Plotter(data, cell_edge, ROWS, COLS) data.racetrack = gen.generate_map() vis.visualize_racetrack()
def _plot(self, series, labels, colors, title, lines=None, today=None, stacked=False, loc=0, lines_y='bottom', today_y='bottom', **kwargs): p = Plotter(fill=stacked, **kwargs) p.plots(zip(labels, series), stacked=stacked, colors=colors) self._lines(p, series, lines, lines_y, today, today_y) plt.legend(loc=loc) plt.title('%s, stacked' % title if stacked else title)
def _plot(self, series, labels, colors, title, lines=None, today=None, stacked=False, loc=0, lines_y='bottom', today_y='bottom', stamp=False, **kwargs): p = Plotter(fill=stacked, stamp=stamp) p.plots(zip(labels, series), stacked=stacked, colors=colors) self._lines(p, series, lines, lines_y, today, today_y) plt.legend(loc=loc) plt.title('%s, stacked' % title if stacked else title)
def bode (tf, plot = False): print "=================================================================="; gm, pm, wg, wp = ctrl.margin (tf); print "Gain Margin: ", gm, " dB in ", wg, " rad/s"; #Verificar informacoes print "Phase Margin: ", gm, "deg in", wp, " rad/s"; mag, pha, w = ctrl.bode_plot (tf); if (plot == True): p = Plotter ({'type' : 'log', 'grid' : True}); p.subplot ([(w, 20*np.log10(mag)), (w, (180*pha/np.pi))], ["Gain (dB)", "Phase (deg)"]); return gm, pm, wg, wp
def run_tests(times: int = 1): mea = MultiInstanceEAlgorithm(set_specimen=True) mea.execute_algorithm(times=times) best, worst, avg, std = mea.get_ratings() best_len, worst_len, avg_len, std_len = mea.get_ratings_lengths() case_names = [] map_names = [] y_data = [] iteretion = 0 for name in mea.cases_names: case_names.append(case_naming(name)) for name in mea.maps_names: map_names.append(map_naming(name)) for name1 in map_names: Plotter().barplot_threeway(best[iteretion * 7:iteretion * 7 + 7], avg[iteretion * 7:iteretion * 7 + 7], worst[iteretion * 7:iteretion * 7 + 7], case_names, "Rating", name1, ["Best", "Avg", "Worst"]) # Plotter().scatter_plot(best[iteretion*7:iteretion*7+7], # best_len[iteretion*7:iteretion*7+7], # "Ratings", "Genome lengths", name1) y_data.append(best[iteretion * 7:iteretion * 7 + 7]) print(iteretion, "baprlot values", best[iteretion * 7:iteretion * 7 + 7]) iteretion += 1 return None
def __init__(self, hyperparameters, should_log=False, eval_interval=-1, display_moves=False): self.hparams = hyperparameters self.random_seed = round(time.time()) self.should_log = should_log self.display_moves = display_moves self.eval_interval = eval_interval self.random_util = RandomUtil(self.random_seed) self.base_path = 'results/GA-' + self.random_util.generate_random_string( 6) + '/' self.plotter = Plotter(self.base_path) self.episode_count = 0
def maps_test(): mea = MultiInstanceEAlgorithm() mea.load_multiple_points() x = [] y = [] z = [] for _map in mea.maps: for d in _map.map: x.append(d["x"]) y.append(d["y"]) z.append(d["z"]) print(".", end=" ") print("#") print(x) print(y) Plotter.scatter_plot(x, y) x = [] y = [] z = []
def run_random_map(plot=False, directory='Random_Map_Data', image_name='random_reward_graph', epsilon=0.1): # Set up RL data = Data(ROWS, COLS, load_data=False, epsilon=epsilon) gen = Generator(ROWS, COLS, resolution, size) vis = Plotter(data, cell_edge, ROWS, COLS) # TODO:: Must be able to plot map without needing space bar # Display maps until we get one we like while True: data.racetrack = gen.generate_map() data.get_start_line() data.get_finish_line() vis.visualize_racetrack() response = input("Is this map suitable for our test? (y/n): ") if response == 'y': break # Finish setting up RL env = Environment(data, gen, ROWS, COLS) mcc = Monte_Carlo_Control(data, ROWS, COLS) agent = Agent() print("Saving data") # Save arrays data.save_racetrack(directory=directory) data.save_Q_vals(directory=directory) data.save_C_vals(directory=directory) data.save_rewards(directory=directory) data.save_pi(directory=directory) # Train agent train_agent(mcc, env, agent, data, plot=plot, image_name=image_name) if plot: get_gif(image_name, data, vis)
def __init__(self, train_data, test_data, positive_classes, split_size, split_type, data_type, classifier, threads, iteration=-1, clean_stuff=None): self.data_handler = DataHandler(train_data, test_data, positive_classes, split_size, split_type, iteration, clean_stuff) self.classifier = classifier self.threads = threads self.iteration = iteration self.data_type = data_type self.positive_classes = positive_classes self.plotter = Plotter(style="ggplot") self.decisions = [] self.infos = []
def test_dynamic_plotting(self): plotter = Plotter() max = 3000 for i in range(max): plotter.add_values([("loss", (max-i)/max), ("evaluation score", i/max/2), ("second score", 0.3)]) plotter.plot("DynamicTestPlot").savefig("DynamicTestPlot") self.assertTrue(os.path.exists("DynamicTestPlot.png"))
def time (tf, method = 'step', plot = False): print "=================================================================="; print "Poles: ", ctrl.pole (tf); print "Zeros: ", ctrl.zero (tf); dc = ctrl.dcgain (tf); print "DC gain: ", dc; if (method == 'step'): t, y = ctrl.step_response (tf); if (method == 'impulse'): t, y = ctrl.impulse_response (tf); ys = filter (lambda l: l >= 0.98 * dc, y); i = np.ndarray.tolist(y).index (min (ys)); print "Ts: ", t[i]; print "Overshoot: ", (max (y) / dc) - 1; i = np.ndarray.tolist(y).index (max (y)); print "Tr: ", t[i]; if (plot == True): p = Plotter ({'grid' : True}); p.plot ([(t, y)]); return t, y
def __init__(self, train_data, test_data, split_size, split_type, data_type, classifier, threads, iter=-1, clean_stuff=None): self.data_handler = MultiClassDataHandler(train_data, test_data, split_size, split_type, iter, clean_stuff) self.classifier = classifier self.threads = threads self.data_type = data_type self.plotter = Plotter(style="ggplot") self.decisions = [] self.infos = []
def run_test_percentage(times: int = 1): mea = MultiInstanceEAlgorithm(set_specimen=True) mea.execute_precengage_values(times) best, worst, avg, std = mea.get_ratings() # best_len, worst_len, avg_len, std_len = mea.get_ratings_lengths() case_names = [] map_names = [] # y_data = [] x_data = [10 * x for x in range(5, 15)] print(len(best) / 11) iteretion = 0 # print("best len", len(best)) for name in mea.cases_names: case_names.append(case_naming(name)) for name in mea.maps_names: map_names.append(map_naming(name)) for name1 in map_names: print("XXX") # Plotter().barplot_threeway(best[iteretion * 7:iteretion * 7 + 7], # avg[iteretion * 7:iteretion * 7 + 7], # worst[iteretion * 7:iteretion * 7 + 7], # case_names, "Rating", name1, ["Best", "Avg", "Worst"]) # Plotter().scatter_plot(best[iteretion*7:iteretion*7+7], # best_len[iteretion*7:iteretion*7+7], # "Ratings", "Genome lengths", name1) Plotter().multiline_plot( x_data, best[11 * iteretion:11 * iteretion + 10], best[10 * iteretion + 10:10 * iteretion + 20], best[10 * iteretion + 20:10 * iteretion + 30], x_axis_name="Default parameter pecentage", y_axis_name="Rating", plot_title=name1, legend=["alpha", "mutation_probability", "parent_group_size"]) # y_data.append(best[iteretion * 7:iteretion * 7 + 7]) # print(iteretion, "baprlot values", best[iteretion * 7:iteretion * 7 + 7]) iteretion += 1
class GA_Agent: hparams = {} random_seed = 0 eval_interval = -1 display_moves = False should_log = False random_util = RandomUtil(0) plotter = Plotter('none') base_path = 'none' episode_count = 0 def __init__(self, hyperparameters, should_log=False, eval_interval=-1, display_moves=False): self.hparams = hyperparameters self.random_seed = round(time.time()) self.should_log = should_log self.display_moves = display_moves self.eval_interval = eval_interval self.random_util = RandomUtil(self.random_seed) self.base_path = 'results/GA-' + self.random_util.generate_random_string( 6) + '/' self.plotter = Plotter(self.base_path) self.episode_count = 0 def __evaluate_ga(self, blue_coeffs, red_coeffs, eval_py_env, display_moves=False, no_tests=10): if display_moves: episode_path = 'game-drawn/episode-' + str(self.episode_count) if not os.path.exists(self.base_path + episode_path): os.makedirs(self.base_path + episode_path) time_step = eval_py_env.reset() picture_count = 0 while not time_step.is_last(): observation = time_step.observation self.plotter.plot_state( observation, episode_path + '/' + str(picture_count) + '.jpeg') picture_count += 1 time_step = eval_py_env.step() picture_count = 0 self.episode_count += 1 total_return = 0.0 for _ in range(no_tests): time_step = eval_py_env.reset() episode_return = 0.0 while not time_step.is_last(): #print(time_step.observation.numpy()) time_step = eval_py_env.step() episode_return += time_step.reward total_return += episode_return return total_return / no_tests def __plot_coefs(self, single_organism_coefs, file_name): x = 0.0 x_values = [] function_values = [] def evaluate_function(x): value = 0.0 x_pow = 1 for coef in single_organism_coefs: value += coef * x_pow x_pow *= x return value step = 0.03 while x < 10: function_values.append(evaluate_function(x)) x_values.append(x) x += step self.plotter.plot_simple_values(x=x_values, y=function_values, directory=file_name) def train(self, eval_game_params): hparams = self.hparams max_degree = hparams['max_parameter_degree'] no_blues = hparams['no_blue_organisms'] no_reds = hparams['no_red_organisms'] no_parameters = hparams['no_parameters'] coef_count = np.power(max_degree + 1, no_parameters) self.ga_util = GaUtil(self.random_util, coef_count) blue_coeffs = self.random_util.get_random_matrix( no_blues, coef_count, [-1000, 1000]) red_coeffs = self.random_util.get_random_matrix( no_reds, coef_count, [-1000, 1000]) returns = [] # Train the genetic algorithm no_random = hparams['no_random_start'] * 1.0 random_step = (hparams['no_random_final'] - no_random) / hparams['no_random_anneal_time'] mutation_factor_range = np.array( hparams['mutation_factor_range_start']) mutation_factor_range_final = np.array( hparams['mutation_factor_range_final']) mutation_factor_range_step = (mutation_factor_range_final - mutation_factor_range)\ / hparams['mutation_factor_range_anneal_time'] for generation_number in range(hparams['no_generations']): prev_blue_organisms = [] prev_red_organisms = [] if generation_number > 0: prev_blue_organisms = env.dead_blue_organisms prev_red_organisms = env.dead_red_organisms env = GameEnv(blue_coeffs, red_coeffs, max_degree, hparams['food_count'], hparams['board_size']) #utils.validate_py_environment(py_environment, episodes=5) # Evaluate the GA if self.eval_interval > 0 and (generation_number + 1) % self.eval_interval == 0: eval_blue_coeffs = self.ga_util.get_coeffs_from_best( prev_blue_organisms, eval_game_params['no_blue_organisms'], eval_game_params['no_blue_organisms'], 0, [0, 0]) eval_red_coeffs = self.ga_util.get_coeffs_from_best( prev_red_organisms, eval_game_params['no_red_organisms'], eval_game_params['no_red_organisms'], 0, [0, 0]) eval_py_env = GameEnv(eval_blue_coeffs, eval_red_coeffs, max_degree, eval_game_params['food_count'], eval_game_params['board_size']) avg_return = self.__evaluate_ga(blue_coeffs, red_coeffs, eval_py_env, self.display_moves) returns.append(avg_return) if self.should_log: print(avg_return) # Play the game time_step = env.reset() while not time_step.is_last(): #print(time_step.observation.numpy()) time_step = env.step() # Pick best genomes for the next generation blue_organisms = env.dead_blue_organisms blue_coeffs = self.ga_util.get_coeffs_from_best( blue_organisms, no_blues, hparams['no_best'], round(no_random), mutation_factor_range) red_organisms = env.dead_red_organisms red_coeffs = self.ga_util.get_coeffs_from_best( red_organisms, no_reds, hparams['no_best'], round(no_random), mutation_factor_range) # Reduce the number of random organisms and mutation_factor_range no_random += random_step mutation_factor_range += mutation_factor_range_step self.plotter.plot_simple_values(y=list(returns), directory='score.jpeg') for single_organism_coefs in red_coeffs: self.__plot_coefs(single_organism_coefs, 'red-coeffs.jpeg') for single_organism_coefs in blue_coeffs: self.__plot_coefs(single_organism_coefs, 'blue-coeffs.jpeg') hparams['random_seed'] = self.random_seed self.plotter.dump_to_json(hparams, 'hparams.json')
_summaryPlots = [ _summary, _summaryN, ] _summaryPlotsHp = [ _summaryHp, _summaryNHp, ] _packedCandidatePlots = [ _packedCandidateFlow, _packedCandidateParam1, _packedCandidateParam2, _packedCandidateMomVert, _packedCandidateHits, ] plotter = Plotter() def _appendTrackingPlots(lastDirName, name, algoPlots, onlyForPileup=False): # to keep backward compatibility, this set of plots has empty name plotter.append(name, _trackingFolders(lastDirName), TrackingPlotFolder(*algoPlots, onlyForPileup=onlyForPileup, purpose=PlotPurpose.TrackingIteration)) summaryName = "" if name != "": summaryName += name+"_" summaryName += "summary" plotter.append(summaryName, _trackingFolders(lastDirName), PlotFolder(*_summaryPlots, loopSubFolders=False, onlyForPileup=onlyForPileup, purpose=PlotPurpose.TrackingSummary, page="summary", section=name)) plotter.append(summaryName+"_highPurity", _trackingFolders(lastDirName), PlotFolder(*_summaryPlotsHp, loopSubFolders=False, onlyForPileup=onlyForPileup, purpose=PlotPurpose.TrackingSummary, page="summary", section=name+"_highPurity" if name != "" else "highPurity"), fallbackNames=[summaryName]) # backward compatibility for release validation, the HP plots used to be in the same directory with all-track plots
else: a_type_dict = {} if not (a_type_dict or f_type_dict): print('You have not specified any knowledge records!') sys.exit() ######################## Frame type data parsing ########################## frame_KRs = user_specs['knowledge_record_subkeys']['frame_types'] for key, value in frame_KRs.items(): sub_plot_list = [] print key, value # print value.keys() if not value: plt2 = Plotter(reader, '.gams.frames.' + key) continue if 'reference_frame' in value.keys(): reference_frames = frame_KRs[key]['reference_frame'] for subkey in value.keys(): if 'plot' in subkey: sub_plot_list.append(frame_KRs[key][subkey].items()) plt7 = Plotter(reader, '.gams.frames.' + key, frames_of_choice=reference_frames, subkeys=sub_plot_list, points_per_plot=10) else: plt4 = Plotter(reader, '.gams.frames' + key, frames_of_choice=['geo', 'p1_base_footprint'])
def sensitivity_analysis(): # Sensitivity analysis settings for epsilon=0.1 directory = 'Sensitivity_Analysis' image_name = 'epsilon=0.1' epsilon = 0.1 # Set up RL data = Data(ROWS, COLS, load_data=False, epsilon=epsilon) gen = Generator(ROWS, COLS, resolution, size) vis = Plotter(data, cell_edge, ROWS, COLS) # Display maps until we get one we like while True: data.racetrack = gen.generate_map() data.get_start_line() data.get_finish_line() vis.visualize_racetrack() response = input("Is this map suitable for our test? (y/n): ") if response == 'y': break # Finish setting up RL env = Environment(data, gen, ROWS, COLS) mcc = Monte_Carlo_Control(data, ROWS, COLS) agent = Agent() print("Saving data") # Save arrays data.save_racetrack(directory=directory) data.save_Q_vals(directory=directory) data.save_C_vals(directory=directory) data.save_rewards(directory=directory) data.save_pi(directory=directory) # Train agent train_agent(mcc, env, agent, data, plot=True, image_name=image_name) # Sensitivity analysis settings for epsilon=0.01 image_name = 'epsilon=0.01' epsilon = 0.01 gif_name = 'epsilon=0.01' # Set up RL data2 = Data(ROWS, COLS, load_data=False, epsilon=epsilon) vis = Plotter(data2, cell_edge, ROWS, COLS) # Get old map data2.racetrack = data.racetrack data2.get_start_line() data2.get_finish_line() # Finish setting up RL env = Environment(data2, gen, ROWS, COLS) mcc = Monte_Carlo_Control(data2, ROWS, COLS) print("Saving data") # Save arrays data.save_racetrack(directory=directory) data.save_Q_vals(directory=directory) data.save_C_vals(directory=directory) data.save_rewards(directory=directory) data.save_pi(directory=directory) # Train agent train_agent(mcc, env, agent, data2, plot=True, image_name=image_name) get_gif(gif_name, data2, vis)
import numpy as np from plotting import Plotter a=11E-9 L = 1000 # Length of wet region P_cap = 2*72E-3/a x = np.linspace(0,L,100) print(x) sigma_0 = P_cap*(x/L) r_tip = plt = Plotter(subplot=(2, 1),sharey = True) plt.add_plot(x, sigma_0, marker='r-') plt.show_figure()
def __init__(self): self.experiment_name = self.__class__.__name__ self.__plotter__ = Plotter() self.last_plot = None self.path = self.config.TIC_TAC_TOE_DIR + "/experiments/artifacts/%s/" % self.experiment_name
def main(model=None, output_dir=None, n_iter=20, n_texts=2000, init_tok2vec=None, plot=False): if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # add the text classifier to the pipeline if it doesn't exist # nlp.create_pipe works for built-ins that are registered with spaCy if "textcat" not in nlp.pipe_names: textcat = nlp.create_pipe( "textcat", config={"exclusive_classes": True, "architecture": "simple_cnn"} ) nlp.add_pipe(textcat, last=True) # otherwise, get it, so we can add labels to it else: textcat = nlp.get_pipe("textcat") # add label to text classifier textcat.add_label("POSITIVE") textcat.add_label("NEGATIVE") # load the IMDB dataset print("Loading IMDB data...") (train_texts, train_cats), (dev_texts, dev_cats) = load_data() train_texts = train_texts[:n_texts] train_cats = train_cats[:n_texts] print( "Using {} examples ({} training, {} evaluation)".format( n_texts, len(train_texts), len(dev_texts) ) ) train_data = list(zip(train_texts, [{"cats": cats} for cats in train_cats])) dev_data = list(zip(dev_texts, [{"cats": cats} for cats in dev_cats])) # get names of other pipes to disable them during training pipe_exceptions = ["textcat", "trf_wordpiecer", "trf_tok2vec"] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] with nlp.disable_pipes(*other_pipes): # only train textcat optimizer = nlp.begin_training() if init_tok2vec is not None: with init_tok2vec.open("rb") as file_: textcat.model.tok2vec.from_bytes(file_.read()) print("Training the model...") print("{:^5}\t{:^5}\t{:^5}\t{:^5}".format("LOSS", "P", "R", "F")) # Set-up the plotter: if plot: plotter = Plotter( title='IMDB Text categorisation training', ylabels=["Train-loss", "Dev-loss", "Precision", "Recall", "F-score"], iterations=n_iter, figsize=(8, 10)) batch_sizes = compounding(4.0, 32.0, 1.001) for i in range(n_iter): losses = {} # batch up the examples using spaCy's minibatch random.shuffle(train_data) batches = minibatch(train_data, size=batch_sizes) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, drop=0.2, losses=losses) with textcat.model.use_params(optimizer.averages): # evaluate on the dev data split off in load_data() scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats) dev_losses = {} random.shuffle(dev_data) batches = minibatch(dev_data, size=batch_sizes) for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=None, losses=dev_losses) print( "{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format( # print a simple table losses["textcat"], dev_losses["textcat"], scores["textcat_p"], scores["textcat_r"], scores["textcat_f"], ) ) # Update the plot: if plot: plotter.update(y=[ losses["textcat"], dev_losses["textcat"], scores["textcat_p"], scores["textcat_r"], scores["textcat_f"], ]) # test the trained model test_text = "This movie sucked" doc = nlp(test_text) print(test_text, doc.cats) if output_dir is not None: with nlp.use_params(optimizer.averages): nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) doc2 = nlp2(test_text) print(test_text, doc2.cats) # Keep showing the plot until the plotting window is closed. if plot: plotter.keep()
def create_plotters_from_config(file_path): user_specs = yaml_loader(file_path) # user_specs = OrderedDict(yaml_loader(file_path)) # user_specs = OrderedDict(user_specs) # 2. Grab schema file if user_specs[source_key][capnp_schemas_location]: schemas = user_specs[source_key][capnp_schemas_location] else: sys.exit('You have not specified schema files') # 3. Grab source from which to read data (.stk or live transport) # Checks to see what source is specified (For now cannot be both) if user_specs[source_key].has_key(stk_file_source): stk = user_specs[source_key][stk_file_source] reader = DataReaderFromFile(schemas, stk) elif user_specs[source_key][kb_tranport_settings_key][ kb_transport_type_key]: transport_type = user_specs[source_key][kb_tranport_settings_key][ kb_transport_type_key] hosts = user_specs[source_key][kb_tranport_settings_key][ kb_transport_hosts_key] kb_name = user_specs[source_key][kb_tranport_settings_key][kb_name_key] queue_lenght = None thread_hertz = None read_threads = None transport_settings = user_specs[source_key][kb_tranport_settings_key] #sub_dict = dict(sub_dict) if transport_settings.has_key(queue_lenght_key): queue_lenght = transport_settings[queue_lenght_key] if transport_settings.has_key(read_threads_key): read_threads = transport_settings[read_threads_key] if transport_settings.has_key(thread_hertz_key): thread_hertz = transport_settings[thread_hertz_key] creator = KnowledgeBaseCreator(kb_name, transport_type, hosts, queue_lenght, read_threads, thread_hertz) kb = creator.get_knowledge_base() reader = DataReaderFromKB(schemas, kb) else: print( 'You have not specified a data source to read from or it is incomplete!' ) sys.exit() # TODO: maybe merge types into one? ####################################### Plot Data ############################################### # 1. Create list of KRs for which the user would like to be plotted. # Separate lists for Any and Frame type data if user_specs[subkeys_key][frames_key]: frame_KRs = user_specs[subkeys_key][frames_key] # list else: frame_KRs = {} if user_specs[subkeys_key][any_type_key]: any_KRs = user_specs[subkeys_key][any_type_key] # list else: any_KRs = {} has_other_key = False plot_dict = {} for key in user_specs[subkeys_key]: if not (key == frames_key) and not (key == any_type_key): has_other_key = True value = user_specs[subkeys_key][key] if not (value == None): sub_plot_list = [] for subkey in value.keys(): if 'plot' in subkey: sub_plot_list.append(value[subkey].items()) plot_dict[key] = Plotter(reader, key, subkeys=sub_plot_list) else: plot_dict[key] = Plotter(reader, key) if len(any_KRs) == 0 and len(frame_KRs) == 0 and not (has_other_key): print('You have not specified any knowledge records!') sys.exit() # 2. Frame type data plotting for key, value in frame_KRs.items(): sub_plot_list = [] flagger = [0, 0, 0, 0] try: if '3D' in value.keys() and value['3D']: flagger[0] += 1 if 'points_per_plot' in value.keys(): points = value['points_per_plot'] flagger[1] += 1 if 'reference_frame' in value.keys(): reference_frames = value['reference_frame'] flagger[2] += 1 # just to find if 'plot_' substring is in a value string s = [s for s in value.keys() if 'plot_' in s] if s: # if 'plot_' in value.keys(): flagger[3] += 1 for subkey in value.keys(): if 'plot_' in subkey: sub_plot_list.append(frame_KRs[key][subkey].items()) except: pass if flagger == [0, 0, 0, 0]: plot_dict[key] = Plotter( reader, data_reader_interface.frames_prefix + '.' + key) continue if flagger == [1, 0, 0, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, plot_to_3d=True) continue if flagger == [0, 1, 0, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points) continue if flagger == [0, 0, 1, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, frames_of_choice=reference_frames) continue if flagger == [0, 0, 0, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, subkeys=sub_plot_list) continue if flagger == [1, 1, 0, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points, plot_to_3d=True) continue if flagger == [0, 1, 1, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points, frames_of_choice=reference_frames) continue if flagger == [0, 0, 1, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, frames_of_choice=reference_frames, subkeys=sub_plot_list) continue if flagger == [1, 0, 1, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, plot_to_3d=True, frames_of_choice=reference_frames) continue if flagger == [1, 0, 0, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, plot_to_3d=True, subkeys=sub_plot_list) continue if flagger == [0, 1, 0, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points, subkeys=sub_plot_list) continue if flagger == [1, 1, 1, 0]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points, plot_to_3d=True, frames_of_choice=reference_frames) continue if flagger == [0, 1, 1, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points, subkeys=sub_plot_list, frames_of_choice=reference_frames) continue if flagger == [1, 0, 1, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, plot_to_3d=True, subkeys=sub_plot_list, frames_of_choice=reference_frames) continue if flagger == [1, 1, 0, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, plot_to_3d=True, subkeys=sub_plot_list, points_per_plot=points) continue if flagger == [1, 1, 1, 1]: plot_dict[key] = Plotter(reader, data_reader_interface.frames_prefix + '.' + key, points_per_plot=points, plot_to_3d=True, frames_of_choice=reference_frames, subkeys=sub_plot_list) continue # 3. Any type data plotting #TODO: Allow for handling of just specifying key and plotting subkeys automatically for key, value in any_KRs.items(): sub_plot_list = [] flagger = [0, 0, 0] try: if '3D' in value.keys() and value['3D']: flagger[0] += 1 if 'points_per_plot' in value.keys(): points = value['points_per_plot'] flagger[1] += 1 # just to find if 'plot_' substring is in a value string s = [s for s in value.keys() if 'plot_' in s] if s: flagger[2] += 1 for subkey, subvalue in value.items(): if 'plot_' in subkey: try: sub_plot_list.append( sorted(any_KRs[key][subkey].items())) except: sub_plot_list.append(subvalue) except: pass if flagger == [0, 0, 1]: plot_dict[key] = Plotter(reader, key, subkeys=sub_plot_list) continue if flagger == [0, 1, 1]: plot_dict[key] = Plotter(reader, key, subkeys=sub_plot_list, points_per_plot=points) continue if flagger == [1, 0, 1]: plot_dict[key] = Plotter(reader, key, plot_to_3d=True, subkeys=sub_plot_list) continue if flagger == [1, 1, 1]: plot_dict[key] = Plotter(reader, key, plot_to_3d=True, subkeys=sub_plot_list, points_per_plot=points) continue return plot_dict
trn.create_Gs(elements, num_radial_etas, num_angular_etas, num_zetas, angular_type) trjbd = TrajectoryBuilder() calc = EMT() train_atoms = trjbd.build_atoms(system, size, temp, calc) calc = EMT() test_atoms = trjbd.build_atoms(system, size, temp, calc) steps, train_traj = trjbd.integrate_atoms( train_atoms, train_traj, n_train, save_interval ) steps, test_traj = trjbd.integrate_atoms( test_atoms, test_traj, n_test, save_interval ) plter = Plotter() energy_noforcetrain = "energy_noforcetrain.png" force_noforcetrain = "force_noforcetrain.png" energy_forcetrain = "energy_forcetrain.png" force_forcetrain = "force_forcetrain.png" convergence = {"energy_rmse": 1e-16, "force_rmse": None, "max_steps": max_steps} force_coefficient = None trn.convergence = convergence trn.force_coefficient = force_coefficient label = "energy" dblabel = label + "-train" calc = trn.create_calc(label=label, dblabel=dblabel) ann = Annealer( calc=calc, images=train_traj, Tmax=20, Tmin=1, steps=2000, train_forces=False )
import market import argparse LOCAL_DATASOURCE = "yahoo.db3" TEST_SYMBOL = "ENI.MI" if __name__=='__main__': ## plotting ## source = yahoo.LocalSource(LOCAL_DATASOURCE) symbol = market.Symbol(source, TEST_SYMBOL, None, None, matplotlib=True) p = Plotter('Simple') p.draw_simple(symbol) p.run() p = Plotter('Candlestick') p.draw_candlestick(symbol) p.run() p = Plotter('Simple with volume') p.draw_simple_with_volume(symbol) p.run() p = Plotter('Simple with volume and OBV') p.draw_simple_with_volume_obv(symbol) p.run()
_summaryPlots = [ _summary, _summaryN, ] _summaryPlotsHp = [ _summaryHp, _summaryNHp, ] _packedCandidatePlots = [ _packedCandidateFlow, _packedCandidateParam1, _packedCandidateParam2, _packedCandidateMomVert, _packedCandidateHits, ] plotter = Plotter() def _appendTrackingPlots(lastDirName, name, algoPlots, onlyForPileup=False): # to keep backward compatibility, this set of plots has empty name plotter.append( name, _trackingFolders(lastDirName), TrackingPlotFolder(*algoPlots, onlyForPileup=onlyForPileup, purpose=PlotPurpose.TrackingIteration)) summaryName = "" if name != "": summaryName += name + "_" summaryName += "summary" plotter.append( summaryName, _trackingFolders(lastDirName),
from datamodel import Model from servercomm import URL from parameterui import ViewController from excelprinter import ExcelPrinter from plotting import Plotter from optparse import OptionParser parser = OptionParser() parser.add_option("-p", "--plot", action="store_true", dest="do_plot", default=False, help="don't print status messages to stdout") #def set_plot(): # do_plot = True if __name__ == "__main__": (options, args) = parser.parse_args() print(options) printer = 0 if (options.do_plot): printer = Plotter() else: printer = ExcelPrinter('data.xlsx') m = Model(URL, printer) c = ViewController(m)
assert b1 in range(1, 9) b2 = int( input( 'Choose action:\n1)Visualize Bigrams\n2)Visualize Terms\n3)Run model grid param search\n4)Run all ' 'models kfold\n> ')) assert b2 in range(1, 5) b3 = int( input( 'Perform clean file parsing or use pickles?:\n1)Use pickles\n2)Perform clean file parsing\n> ' )) assert b3 in range(1, 3) plotter = Plotter(threads=b1, ignore_pickles=True, strict=(b3 == 2)) while b2 != 5: if b2 == 1: plotter.visualize_bigrams(40) #print(plotter.prep.best_bigram_scores) elif b2 == 2: plotter.visualize_descriptive_terms(200) #print(plotter.prep.selected_words) elif b2 == 3: start = time.time()
import yahoo from plotting import Plotter import market import argparse LOCAL_DATASOURCE = "yahoo.db3" TEST_SYMBOL = "ENI.MI" if __name__ == '__main__': ## plotting ## source = yahoo.LocalSource(LOCAL_DATASOURCE) symbol = market.Symbol(source, TEST_SYMBOL, None, None, matplotlib=True) p = Plotter('Simple') p.draw_simple(symbol) p.run() p = Plotter('Candlestick') p.draw_candlestick(symbol) p.run() p = Plotter('Simple with volume') p.draw_simple_with_volume(symbol) p.run() p = Plotter('Simple with volume and OBV') p.draw_simple_with_volume_obv(symbol) p.run()
trn.create_Gs(elements, num_radial_etas, num_angular_etas, num_zetas, angular_type) symm_funcs["Selected"] = trn.Gs G2 = make_symmetry_functions(elements=elements, type="G2", etas=[0.05, 0.23, 1.0, 5.0], centers=np.zeros(4)) G4 = make_symmetry_functions( elements=elements, type="G4", etas=0.005 * np.ones(1), zetas=[1.0, 4.0], gammas=[1.0, -1.0], ) symm_funcs["Default"] = G2 + G4 anl = Analyzer() plter = Plotter() r, rdf = anl.calculate_rdf(train_traj, r_max=cutoff.Rc) for label, symm_func in symm_funcs.items(): plter.plot_symmetry_functions( label + "_rad.png", label + "_ang.png", symm_func, rij=r, rdf=rdf, cutoff=cutoff, )
Plot("dzres_vs_pt_Sigma", ytitle="#sigma(#delta z_{0}) [cm]", ymin=0.0009, ymax=0.1, **_common), Plot("ptres_vs_pt_Sigma", ytitle="#sigma(#delta p_{t}/p_{t})", ymin=0.003, ymax=2.2, **_common), ], legendDy=-0.02, legendDh=-0.01 ) plotter = Plotter([ "DQMData/Run 1/Tracking/Run summary/Track", "DQMData/Tracking/Track", "DQMData/Run 1/RecoTrackV/Run summary/Track", "DQMData/RecoTrackV/Track", ],[ _effandfake1, _effandfake2, _dupandfake1, _dupandfake2, _effvspos, _dedx, _chargemisid, _hitsAndPt, _ntracks, _tuning, _pulls, _resolutionsEta, _resolutionsPt, ]) import collections _iterModuleMap = collections.OrderedDict([ ("initialStepPreSplitting", ["initialStepSeedLayersPreSplitting", "initialStepSeedsPreSplitting", "initialStepTrackCandidatesPreSplitting",
if __name__ == "__main__": system = "silicon" size = (2, 2, 2) temp = 300 timestep = 1.0 n_test = int(5e3) save_interval = 10 legend = ["SW", "AMP"] energy_log = "energy-trained-log.txt" force_log = "force-trained-log.txt" energy_plot = system + "_" + "energy_log.png" force_plot = system + "_" + "force_log.png" plter = Plotter() plter.plot_trainlog(energy_log, energy_plot) plter.plot_trainlog(force_log, force_plot) trjbd = TrajectoryBuilder() calc = OpenKIMcalculator("SW_StillingerWeber_1985_Si__MO_405512056662_005") test_atoms = trjbd.build_atoms(system, size, temp, calc, seed=0) calc = Amp.load("calcs/force-trained.amp") amp_test_atoms = trjbd.build_atoms(system, size, temp, calc, seed=0) test_traj = "test.traj" steps, test_traj = trjbd.integrate_atoms( test_atoms, test_traj, n_test, save_interval, timestep=timestep, convert=True ) amp_test_traj = "amp_test.traj"
import matplotlib.pyplot as plt import seaborn as sns sys.path.insert(1, "../tools") from analysis import Analyzer from plotting import Plotter from training import Trainer if __name__ == "__main__": sns.set() plot_dir = "plots" plot_file = os.path.join(plot_dir, "rdf.png") if not os.path.exists(plot_dir): os.mkdir(plot_dir) anl = Analyzer() plter = Plotter() r_cut = 6.0 r, rdf = anl.calculate_rdf("trajs/training.traj", r_max=r_cut) rdf[np.nonzero(rdf)] /= max(rdf) cutoff = plter.polynomial(r, r_cut, gamma=5.0) plt.plot(r, rdf, label="Radial distribution function") plt.plot(r, cutoff, label="Polynomial cutoff, gamma=5.0") plt.legend() plt.title("Copper radial distribution function") plt.xlabel("Radial distance [Angstrom]") plt.ylabel("Radial distribution function (normalized to 1)") plt.savefig(plot_file)
class Handler(): def __init__(self, train_data, test_data, positive_classes, split_size, split_type, data_type, classifier, threads, iteration=-1, clean_stuff=None): self.data_handler = DataHandler(train_data, test_data, positive_classes, split_size, split_type, iteration, clean_stuff) self.classifier = classifier self.threads = threads self.iteration = iteration self.data_type = data_type self.positive_classes = positive_classes self.plotter = Plotter(style="ggplot") self.decisions = [] self.infos = [] def cross_validate(self): # if self.iteration == -1: self.results = self.classifier.classify( self.threads, self.data_handler.cross_validation(self.data_type), "CV") #tuple is (classification value, information about said classification) for res_tuple in self.results: if len(res_tuple) == 0: continue self.decisions.append(res_tuple[0]) self.infos.append(res_tuple[1][0]) # else: #res = self.classifier.classify(self.data_handler.cross_validation()) def optimize_C(self, C_values, threshold=1): accuracies = [] for c_index, c_value in enumerate(C_values): if len(accuracies) == 0: best_acc, best_c = "-", "-" else: best_acc, best_c = max(accuracies), C_values[np.argsort( accuracies)[-1]] print( "Testing C: {}\tIteration: {}/{} Current best Accuracy and C: {} {}" .format(c_value, c_index, len(C_values), best_acc, best_c)) self.classifier.change_C(c_value) corrects = 0 results = self.classifier.classify( self.threads, self.data_handler.cross_validation(self.data_type), "Opt") for result in results: if result[2] == True: corrects += 1 accuracies.append(corrects / len(results)) if corrects / len(results) >= threshold: break sorted_indexes = np.argsort(accuracies) print("\nOptimal C:{}".format(C_values[sorted_indexes[-1]])) self.classifier.change_C(C_values[sorted_indexes[-1]]) def optimize_cnn(self, parameters): print("Optimizing CNN parameters..") self.classifier.optimize_parameters( parameters, self.data_handler.true_data(self.data_type)) def attribute_testdata(self, keep_test_seperate=False): print() self.keep_test_seperate = keep_test_seperate results = self.classifier.classify( self.threads, self.data_handler.true_data(self.data_type), "test") for result in results: self.decisions.append(result[0]) self.infos.append(result[1][0]) def plot_values(self, scale=False, title=" "): self.plotter.scale = scale # if self.data_type == "single" or self.data_type == "book_split": self.aggregate_results() #else: #self.format_results() authors = self.get_authors() mapping = {} for author in authors: if author not in self.positive_classes: mapping[author] = "blue" else: mapping[author] = "red" self.plotter.plot(self.decisions, self.infos, mapping, title) def aggregate_results(self): data = OrderedDict() for i in range(0, len(self.decisions)): author = self.infos[i][0] book = self.infos[i][1] name = author + "_" + book data[name] = data.get(name, [[], []]) data[name][0].append(self.decisions[i]) data[name][1].append((author, book)) new_decisions = [] new_info = [] for key, value in data.items(): if "test" in key and self.keep_test_seperate == True: for val_i, val in enumerate(value[0]): new_decision = val new_decisions.append(new_decision) new_info.append([ value[1][0][0], value[1][0][1] + "_" + str(val_i), "full" ]) new_decision = sum(value[0]) / len(value[0]) new_decisions.append(new_decision) new_info.append([value[1][0][0], value[1][0][1], "full"]) self.decisions = new_decisions self.infos = new_info def get_authors(self): authors = [] for author in self.data_handler.train_data: authors.append(author) return authors def get_best_features(self, num_feats=50): X_train, y_train, _, _, _, _, _, _ = next( self.data_handler.true_data(self.data_type)) features = self.classifier.get_best_features(X_train, y_train, num_feats) print("Positive features:\n") self.print_feat(features["POS"]) print("\nNegative features:\n") self.print_feat(features["NEG"]) def get_best_features_nsampling(self, text_percentage, sampling_count): X_train, y_train, _, _, _, _, _, _ = next( self.data_handler.true_data(self.data_type)) features = self.classifier.get_best_features_nsampling( X_train, y_train, text_percentage, sampling_count) print("Positive features: \n") self.print_feat(features["POS"]) print("\nNegative features:\n") self.print_feat(features["NEG"]) def print_feat(self, feats): if len(feats[0]) == 2: ## normal feat for feat in feats: print("{}\t{}".format(feat[0], round(feat[1], 3))) elif len(feats[0]) == 3: ##nsampled for feat in feats: print("{}\t{}\t{}".format(feat[0], feat[1], round(feat[2], 3))) def get_results(self): pass def norm_value(self, old_value, old_min, old_max, new_min, new_max): return ((old_value - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min def print_results(self, normalize=False): # if self.data_type == "single" or self.data_type == "book_split": self.aggregate_results() min_val, max_val = min(self.decisions), max(self.decisions) res = sorted(zip(self.decisions, self.infos), key=itemgetter(0), reverse=True) for val in res: if normalize: value = self.norm_value(val[0], min_val, max_val, -1, 1) else: value = val[0] value = math.floor(value[0] * 1000) / 1000.0 print("Author: {}\tBook: {}\tDecision: {}".format( val[1][0], val[1][1], value)) def load_iteration_results(self, result_folder): files = os.listdir(result_folder) for file_i, filename in enumerate(files): with open(result_folder + "/" + filename, "rb") as pklf: res = pickle.load(pklf) for i in range(len(res[0][0])): self.decisions.append(res[0][0][i]) info = res[0][1][i] self.infos.append(info) #print(info, filename) print("Read: {}".format(file_i), end="\r") print()