def train(env): value_net = Critic(1290, 128, 256, params['critic_weight_init']).to(device) policy_net = Actor(1290, 128, 256, params['actor_weight_init']).to(device) target_value_net = Critic(1290, 128, 256).to(device) target_policy_net = Actor(1290, 128, 256).to(device) #Switiching off dropout layers target_value_net.eval() target_policy_net.eval() softUpdate(value_net, target_value_net, soft_tau=1.0) softUpdate(policy_net, target_policy_net, soft_tau=1.0) value_optimizer = optimizer.Ranger(value_net.parameters(), lr=params['value_lr'], weight_decay=1e-2) policy_optimizer = optimizer.Ranger(policy_net.parameters(), lr=params['policy_lr'], weight_decay=1e-5) value_criterion = nn.MSELoss() loss = { 'test': { 'value': [], 'policy': [], 'step': [] }, 'train': { 'value': [], 'policy': [], 'step': [] } } plotter = Plotter( loss, [['value', 'policy']], ) step = 0 plot_every = 10 for epoch in range(100): print("Epoch: {}".format(epoch + 1)) for batch in (env.train_dataloader): loss, value_net, policy_net, target_value_net, target_policy_net, value_optimizer, policy_optimizer\ = ddpg(value_net,policy_net,target_value_net,target_policy_net,\ value_optimizer, policy_optimizer, batch, params, step=step) # print(loss) plotter.log_losses(loss) step += 1 if step % plot_every == 0: print('step', step) test_loss = run_tests(env,step,value_net,policy_net,target_value_net,target_policy_net,\ value_optimizer, policy_optimizer,plotter) plotter.log_losses(test_loss, test=True) plotter.plot_loss() if step > 1500: assert False
def observe(self): y_data_list = [] addrs = [] for addr, packet_bin in self.bins.iteritems(): if len(packet_bin) > VALID_PACKET_COUNT_THRESHOLD: y_data_list.append(packet_bin.generate_y_data(self.observer)) addrs.append(addr) plotter = Plotter(range(self.size), y_data_list) plotter.output_file = PLOT_DIR + '_'.join(self.plot_name.split()) + '.pdf' plotter.x_label = 'Packet Sequence Number' plotter.y_label = addrs plotter.plot()
def __init__(self, parent, controller, manager): self.parent = parent super().__init__(self.parent) self.manager = manager self.plt = Plotter(self, parent.parent.controller.force_sensor.getreading, controller.experiment.exp_str) self.sizer = wx.BoxSizer(wx.VERTICAL) self.sizer.Add(self.plt, 0, wx.ALL | wx.EXPAND, 0) self.timer = wx.Timer(self, wx.ID_ANY) self.Bind(wx.EVT_TIMER, self.plt.update) self.SetSizerAndFit(self.sizer)
def plot(self): self.image_count = 0 QtGui.QPixmapCache.clear() # clear qt image cache self.stop_button.setEnabled(True) self.plot_button.setEnabled(False) self.animate_button.setEnabled(False) # send dates in decimal format to worker start_date = self.start_year.value() + (1 + self.start_month.currentIndex() * 2) / 24 end_date = self.end_year.value() + (1 + self.end_month.currentIndex() * 2) / 24 self.worker = Plotter(start_date, end_date, self.plot_step, self.color_map, self) self.worker.image_increment_signal.connect(self.add_image) self.worker.finished.connect(self.del_worker) self.worker.status_signal.connect(self.set_status) self.worker.start()
def train(config, dataset, model): # Data loaders train_loader, val_loader = dataset.train_loader, dataset.val_loader if 'use_weighted' not in config: # TODO (part c): define loss function criterion = None else: # TODO (part e): define weighted loss function criterion = None # TODO (part c): define optimizer learning_rate = config['learning_rate'] optimizer = None # Attempts to restore the latest checkpoint if exists print('Loading model...') force = config['ckpt_force'] if 'ckpt_force' in config else False model, start_epoch, stats = checkpoint.restore_checkpoint( model, config['ckpt_path'], force=force) # Create plotter plot_name = config['plot_name'] if 'plot_name' in config else 'CNN' plotter = Plotter(stats, plot_name) # Evaluate the model _evaluate_epoch(plotter, train_loader, val_loader, model, criterion, start_epoch) # Loop over the entire dataset multiple times for epoch in range(start_epoch, config['num_epoch']): # Train model on training set _train_epoch(train_loader, model, criterion, optimizer) # Evaluate model on training and validation set _evaluate_epoch(plotter, train_loader, val_loader, model, criterion, epoch + 1) # Save model parameters checkpoint.save_checkpoint(model, epoch + 1, config['ckpt_path'], plotter.stats) print('Finished Training') # Save figure and keep plot open plotter.save_cnn_training_plot() plotter.hold_training_plot()
def simulation(x, test_range, step_size, file, n = 100, runs = 1000, dim = 2, learn_rate = 1): ''' Function runs a series of simulations with the perceptron on a number or randomly generated feature vectors. Depending on which variable we are controlling for the simulations fix the values for dimensionality, number of points, and learning rate (c value) The variable that we control for will (x) will be initialized to the low end of the test range and incremented by the step size repeatedly. With each incrementation of the step size, we run the perceptron (with weights/bias always initialized to zero) 1000 times. After each single run, we record the results (i.e. number or perceptron iterations required for convergence) as a row in our dataframe The results are saved to a csv :param x: variable to control for, must be 'n', 'dim', or 'c' :param test_range: range of variable to test :param step_size: how to incrament the variable :param file: save destination for csv :return: N/A ''' # check for invalid x if x not in ['n', 'c', 'dim']: raise ValueError('Invalid parameter x') (low, high) = test_range val = low data = [] plot = Plotter() while val < high: # Increment independent variable if x == 'n': n = val elif x == 'c': learn_rate = val elif x == 'dim': dim = val # Run perceptron 1000 times each on a randomly generated set of feature vectors for i in range(runs): features = plot.generate_points(n, dim) labels = plot.generate_labels_linear(features) model = Perceptron(dim, zeros=False) iterations = model.train(features,labels, c=learn_rate) data.append([n, dim, learn_rate, iterations]) val += step_size # Move data to pandas dataframe and save df = pd.DataFrame(data, columns=['n features', 'dimensions', 'c', 'iterations']) df.to_csv(file, sep=',', index=False)
async def main(): transactionCounts99 = {} plotter = Plotter() infoGetter = InfoGetter( "https://*****:*****@nd-806-802-183.p2pify.com" ) latestBlock = infoGetter.getLatestTransactions() tasks = [] async with aiohttp.ClientSession() as session: for selectedBlock in range( int(latestBlock, 16) - 100, int(latestBlock, 16)): task = asyncio.ensure_future( infoGetter.getTransactions(session, hex(selectedBlock))) tasks.append(task) responses = await asyncio.gather(*tasks) for response in responses: valuesAndKey = next(iter(response.items())) transactionCounts99[valuesAndKey[0]] = valuesAndKey[1] #we've completed the request, so now we can plot plotter.plot(transactionCounts99)
def test_for_error_n(test_range, step_size, file, learn_rate = 1, dim = 2, runs = 100): (low, high) = test_range n = low data = [] plot = Plotter() df = pd.DataFrame() while n < high: for i in range(runs): features = plot.generate_points(n, dim) labels = plot.generate_labels_linear(features) df['features'] = features.tolist() df['labels'] = labels.tolist() train, testing = train_test_split(df, test_size=.25) model = Perceptron(dim, zeros=True) model.train(to_array(train['features']), to_array(train['labels']), c=learn_rate) error = model.test_error(to_array(testing['features']), to_array(testing['labels'])) data.append([n, error]) df = pd.DataFrame(data, columns=['n', 'error']) df.to_csv(file, sep=',', index=False)
from visualizer import visualizer from plot import Plotter from audio import AudioConnection from config import Config CONFIG = Config() if __name__ == "__main__": p = Plotter(CONFIG) conn = AudioConnection(CONFIG) visualizer(p, conn, CONFIG)
} loss = { 'test': { 'value': [], 'policy': [], 'step': [] }, 'train': { 'value': [], 'policy': [], 'step': [] } } plotter = Plotter( loss, [['value', 'policy']], ) step = 0 for epoch in range(n_epochs): for batch in env.train_dataloader: loss = reinforce_update(batch, params, nets, optimizer, writer=writer, device=device, debug=debug, learn=True, step=step) if loss: plotter.log_losses(loss)
beg_yr = int(sys.argv[2]) end_yr = int(sys.argv[3]) nc_var = sys.argv[4] obs_pattern = sys.argv[5] obs_nc_var = sys.argv[6] for yr in xrange(beg_yr, end_yr): pattern = os.path.join(nc_dir, '*' + str(yr) + '*.nc') # for r = RegCMReader(pattern) value = r.get_value(nc_var).mean() time_limits = value.get_limits('time') crd_limits = value.get_latlonlimits() obs_r = CRUReader(obs_pattern) obs_value = obs_r.get_value(obs_nc_var, imposed_limits={ 'time': time_limits }, latlon_limits=crd_limits).mean() if obs_nc_var == "TMP": obs_value.to_K() value.regrid(obs_value.latlon) diff = obs_value - value plt = Plotter(diff) plt.plot(levels=(-5, 5)) plt.show() plt.save('image', format='png') plt.close()
def main(): print("Loading wordvecs...") if utils.exists("glove", "glove.840B.300d.txt", "gutenberg"): words, wordvecs = utils.load_glove("glove", "glove.840B.300d.txt", "gutenberg") else: words, wordvecs = utils.load_glove("glove", "glove.840B.300d.txt", "gutenberg", set(map(clean_word, gutenberg.words()))) wordvecs_norm = wordvecs / np.linalg.norm(wordvecs, axis=1).reshape(-1, 1) print("Loading corpus...") # Convert corpus into normed wordvecs, replacing any words not in vocab with zero vector sentences = [[wordvecs_norm[words[clean_word(word)]] if clean_word(word) in words.keys() else np.zeros(WORD_DIM) for word in sentence] for sentence in gutenberg.sents()] print("Processing corpus...") # Pad sentences shorter than SEQUENCE_LENGTH with zero vectors and truncate sentences longer than SEQUENCE_LENGTH s_train = list(map(pad_or_truncate, sentences)) np.random.shuffle(s_train) # Truncate to multiple of BATCH_SIZE s_train = s_train[:int(len(s_train) / BATCH_SIZE) * BATCH_SIZE] s_train_idxs = np.arange(len(s_train)) print("Generating graph...") network = NlpGan(learning_rate=LEARNING_RATE, d_dim_state=D_DIM_STATE, g_dim_state=G_DIM_STATE, dim_in=WORD_DIM, sequence_length=SEQUENCE_LENGTH) plotter = Plotter([2, 1], "Loss", "Accuracy") plotter.plot(0, 0, 0, 0) plotter.plot(0, 0, 0, 1) plotter.plot(0, 0, 1, 0) plotter.plot(0, 1, 1, 0) #d_vars = [var for var in tf.trainable_variables() if 'discriminator' in var.name] saver = tf.train.Saver() with tf.Session() as sess: #eval(sess, network, words, wordvecs_norm, saver) sess.run(tf.global_variables_initializer()) #resume(sess, saver, plotter, "GAN_9_SEQUENCELENGTH_10", 59) d_loss, g_loss = 0.0, 0.0 for epoch in range(0, 10000000): print("Epoch %d" % epoch) np.random.shuffle(s_train_idxs) for batch in range(int(len(s_train_idxs) / BATCH_SIZE)): # select next random batch of sentences s_batch_real = [s_train[x] for x in s_train_idxs[batch:batch + BATCH_SIZE]] # shape (BATCH_SIZE, SEQUENCE_LENGTH, WORD_DIM) # reshape to (SEQUENCE_LENGTH, BATCH_SIZE, WORD_DIM) while preserving sentence order s_batch_real = np.array(s_batch_real).swapaxes(0, 1) if d_loss - g_loss > MAX_LOSS_DIFF and False: output_dict = sess.run( network.get_fetch_dict('d_loss', 'd_train', 'g_loss'), network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB) ) elif g_loss - d_loss > MAX_LOSS_DIFF and False: output_dict = sess.run( network.get_fetch_dict('d_loss', 'g_loss', 'g_train'), network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB) ) else: output_dict = sess.run( network.get_fetch_dict('d_loss', 'd_train', 'g_loss', 'g_train'), network.get_feed_dict(inputs=s_batch_real, input_dropout=D_KEEP_PROB, instance_variance=INSTANCE_VARIANCE) ) d_loss, g_loss = output_dict['d_loss'], output_dict['g_loss'] if batch % 10 == 0: print("Finished training batch %d / %d" % (batch, int(len(s_train) / BATCH_SIZE))) print("Discriminator Loss: %f" % output_dict['d_loss']) print("Generator Loss: %f" % output_dict['g_loss']) plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), d_loss, 0, 0) plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), g_loss, 0, 1) if batch % 100 == 0: eval = sess.run( network.get_fetch_dict('g_outputs', 'd_accuracy'), network.get_feed_dict(inputs=s_batch_real, input_dropout=1.0, instance_variance=INSTANCE_VARIANCE) ) # reshape g_outputs to (BATCH_SIZE, SEQUENCE_LENGTH, WORD_DIM) while preserving sentence order generated = eval['g_outputs'].swapaxes(0, 1) for sentence in generated[:3]: for wordvec in sentence: norm = np.linalg.norm(wordvec) word, similarity = nearest_neighbor(words, wordvecs_norm, wordvec / norm) print("{}({:4.2f})".format(word, similarity)) print('\n---------') print("Total Accuracy: %f" % eval['d_accuracy']) plotter.plot(epoch + (batch / int(len(s_train) / BATCH_SIZE)), eval['d_accuracy'], 1, 0) saver.save(sess, './checkpoints/{}.ckpt'.format(SAVE_NAME), global_step=epoch) plotter.save(SAVE_NAME)
from link import link from measures import mu, sigma, p_correlation from plot import Plotter from copy import deepcopy import networkx as nx # Graph definition n = 500 G = nx.random_geometric_graph(n, 0.05) adj = list(G.adjacency()) # Derived structural information means = list(map(lambda x: mu(x, n), adj)) variances = sigma(adj, means) similarities = p_correlation(adj, means, variances) # Hierarchical clustering stree = link(deepcopy(similarities), max) ctree = link(deepcopy(similarities), min) splotter = Plotter(G, stree, "slink.html") cplotter = Plotter(G, ctree, "clink.html") level = 450 splotter.plot(level) cplotter.plot(level) if __name__ == '__main__': pass
# Measure format is '[HR] [BC]' where HR is heart rate and BC is beat count heartRate = int(measure.split()[0]) beatCount = int(measure.split()[1]) print("HR: {0} BC: {1}".format(heartRate, beatCount)) self.heartRate = heartRate def processProblem(self, problem): print("Problem:", problem) if __name__ == '__main__': handler = HRMHandler() server = sensorserver.SensorTCPServer() print("Starting server and waiting for connection") server.startServer(('', 4004), handler) server.waitForConnection() print("Starting loop") server_thread = threading.Thread(target=server.loop) server_thread.daemon = True server_thread.start() plotter = Plotter(handler) plotter.animate(20) plotter.getPlot().show() # Execution continues after user closes the window print("Shutting down") server.shutdown()
workload = workload_model.predefined_workloads[args.workload] limiter = getattr(proxy_model, limiter_name) proxy = getattr(proxy_model, args.proxy)(args.duration, ratekeeper, workload, limiter) proxy.run() for priority in workload.priorities(): latencies = sorted( [p for t in proxy.results.latencies[priority].values() for p in t]) total_started = sum(proxy.results.started[priority].values()) still_queued = sum( [r.count for r in proxy.request_queue if r.priority == priority]) if len(latencies) > 0: print('\n%s: %d requests in %d seconds (rate=%f). %d still queued.' % (priority, total_started, proxy.time, float(total_started) / proxy.time, still_queued)) print(' Median latency: %f' % latencies[len(latencies) // 2]) print(' 90%% latency: %f' % latencies[int(0.9 * len(latencies))]) print(' 99%% latency: %f' % latencies[int(0.99 * len(latencies))]) print(' 99.9%% latency: %f' % latencies[int(0.999 * len(latencies))]) print(' Max latency: %f' % latencies[-1]) print('') if not args.no_graph: plotter = Plotter(proxy.results) plotter.display()
def plot_calendar( *, zip_path, year, plot_size=1, n_cols=4, month_gap=0, col_gap=0.5, sport="running", label=None, ): """Plot a year of Strava data in a calendar layout. Parameters ---------- zip_path : str Path to .zip archive from Strava year : int Year of data to use. We have to unzip and read each file in the archive to figure out what year it is from, and this takes around 5 minutes for a new year of data. plot_size : float (default=1) The size of the plot is dynamically chosen for the layout, but you can make it bigger or smaller by making this number bigger or smaller. n_cols : int (default=4) Number of columns to divide the days into. Splits evenly on months, so this number should evenly divide 12. month_gap : float (default=0) Vertical space between two months. Each calendar square is 1 x 1, so a value of 1.5 here would move the months 1.5 calendar squares apart. col_gap : float (default=0.5) Horizontal space between columns. A calendar square is 1 x 1, so a value of 0.5 here puts columns half a square apart. sport : str (default="running") Sport to plot routes for. I have not tested this with anything except running, but maybe you get lucky! label : str or None Label in the top left corner of the plots. Defaults to the year. Use "" to not have any label. Returns ------- figure, axis The matplotlib figure and axis with the plot. These can be used for further customization. """ data = get_data( zip_path, sport, datetime.datetime(year, 1, 1), datetime.datetime(year + 1, 1, 1), ) plotter = Plotter(data) fig, ax = plt.subplots(figsize=(plot_size * 5 * n_cols, plot_size * 40 / n_cols)) fig, ax = plotter.plot_year(year=year, fig=fig, ax=ax, n_cols=n_cols, month_gap=month_gap, col_gap=col_gap) if label is None: label = str(year) ax.text(0, -1, label, fontdict={ "fontsize": 32, "fontweight": "heavy" }, alpha=0.5) return fig, ax
from rocket import Rocket from plot import Plotter import matplotlib.pyplot as plt import numpy as np import time from pyqtgraph.Qt import QtCore, QtGui if __name__ == "__main__": initial_state = np.array([0., 0., 0., 0., 1e-2, 0., 0., 0., 0., 0.]) p = Plotter(0, 0, 0, -0.1, 0.1, np.array([0, 0, -9.8])) r = Rocket(*initial_state) def update(): r.propagate_state() p.update(r.x, r.y, r.z, r.theta1, r.theta2, r.c) timer = QtCore.QTimer() timer.timeout.connect(update) timer.start(10) states = np.ones((100, 10)) p.start() # labels = ['x', 'y', 'z', 'theta1', 'theta2', 'xdot', 'ydot', 'zdot', # 'theta1dot', 'theta2dot'] # colors = ['r', 'g', 'b', 'c', 'k', 'r', 'g', 'b', 'c', 'k'] # alphas = [1, 1, 1, 1, 1, 0.6, 0.6, 0.6, 0.6, 0.6] # x = np.linspace(0, 100, 100) # for label, data, color, alpha in zip(labels, states.T, colors, alphas): # plt.plot(x, data, label=label, c=color, alpha=alpha) # plt.legend() # plt.show()
from data_reader import Data from datapipeline import Datapipeline from plot import Plotter from gbm_model import Model BASE_URL = "http://*****:*****@app.route('/') def index(): return render_template('index.html', base_url=BASE_URL) @app.route('/plot/get_poverty_breakdown', methods=['GET']) def get_poverty_breakdown(): bytes_obj = plotter.plot_poverty_breakdown()
def main(self): parser = argparse.ArgumentParser() parser.add_argument('--type', dest='type', default='original') parser.add_argument('--percentage', dest='percentage', default='5') parser.add_argument('--latex', dest='latex', default=False) args = parser.parse_args() type = args.type percentage = args.percentage print_latex = args.latex stats_f1_list = [] f1_list = [] stats_em_list = [] em_list = [] names_list = [] plotter = Plotter(type, percentage) if type == 'original': dev_pattern_file = config.ORIGINAL_CONFIG['dev_pattern_file'] models_to_process = config.ORIGINAL_CONFIG['models_to_process'] elif type == 'class_dev': # preds on 5% of training (pre-evaluation), trained with splitted training dev_pattern_file = config.CLASS_DEV_CONFIG['dev_pattern_file'] models_to_process = config.CLASS_DEV_CONFIG['models_to_process'] elif type == 'dev_on_splitted': # preds on original dev, trained with splitted training dev_pattern_file = config.DEV_ON_SPLITTED_CONFIG[ 'dev_pattern_file'] models_to_process = config.DEV_ON_SPLITTED_CONFIG[ 'models_to_process'] elif type == 'ensemble': # original dev to construct id_to_type_dict print('\n 1. Step: original dev to construct id_to_type_dict\n') dev_pattern_file = config.ORIGINAL_CONFIG['dev_pattern_file'] id_to_type = self.get_id_to_type_dict(dev_pattern_file) for k, v in id_to_type.items(): self.id_to_type_dict[k] = v # class_dev to obtain weights print('\n 2. Step: class_dev to obtain weights\n') dev_pattern_file = config.CLASS_DEV_CONFIG['dev_pattern_file'] models_to_process = config.CLASS_DEV_CONFIG['models_to_process'] self.stats.type_to_count_dict = self.count_question_types( dev_pattern_file, print_latex) self.stats.print_latex = print_latex for model in models_to_process: name = model[0] file = model[1] results = self.analyze_model(name, file, dev_pattern_file) stats_f1_list.append(results['f1'][0]) f1_list.append(results['f1'][1]) stats_em_list.append(results['em'][0]) em_list.append(results['em'][1]) names_list.append(name) # self.stats.summarize() plotter.plot_bar(stats_f1_list, f1_list, names_list, 'F1', 'class_dev') plotter.plot_bar(stats_em_list, em_list, names_list, 'EM', 'class_dev') weights = self.ensembler.count_weights(stats_f1_list, names_list, 'F1') weights_updated = self.ensembler.update_undefined_type_weight( weights, names_list, f1_list) # dev_on_splitted to get candidate answers print('\n 3. Step: dev_on_splitted to get candidate answers\n') models_to_process = config.ORIGINAL_CONFIG['models_to_process'] candidate_predictions = self.get_candidate_predictions( models_to_process) # ensemble.predict to get ensemble answers -> save to file print( '\n 4. Step: ensemble.predict to get ensemble answers -> save to file\n' ) ensemble_predictions = self.ensembler.predict( candidate_predictions, self.id_to_type_dict, weights_updated) with open(config.ENSEMBLE_FILE, 'w') as f: json.dump(ensemble_predictions, f) # evaluate ensemble predictions (vs. 100% of training results) # ??? vs. splitted or full training print( '\n 5. Step: evaluate ensemble predictions (vs. 100% training results)\n' ) dev_pattern_file = config.ORIGINAL_CONFIG['dev_pattern_file'] models_to_process = config.ORIGINAL_CONFIG['models_to_process'] models_to_process.append(('Ensemble', config.ENSEMBLE_FILE)) print(models_to_process) stats_f1_list = [] f1_list = [] stats_em_list = [] em_list = [] names_list = [] for model in models_to_process: name = model[0] print('\nAnalysing {}...'.format(name)) file = model[1] results = self.analyze_model(name, file, dev_pattern_file) stats_f1_list.append(results['f1'][0]) f1_list.append(results['f1'][1]) stats_em_list.append(results['em'][0]) em_list.append(results['em'][1]) names_list.append(name) # self.stats.summarize() plotter.type = 'ensemble' plotter.plot_bar(stats_f1_list, f1_list, names_list, 'F1', type) plotter.plot_bar(stats_em_list, em_list, names_list, 'EM', type) else: print( 'type must be original, class_dev, dev_on_splitted or ensemble' ) sys.exit(1) self.stats.type_to_count_dict = self.count_question_types( dev_pattern_file, print_latex) self.stats.print_latex = print_latex if type != 'ensemble': for model in models_to_process: name = model[0] print('\nAnalysing {}...'.format(name)) file = model[1] results = self.analyze_model(name, file, dev_pattern_file) stats_f1_list.append(results['f1'][0]) f1_list.append(results['f1'][1]) stats_em_list.append(results['em'][0]) em_list.append(results['em'][1]) names_list.append(name) self.stats.summarize() plotter.plot_bar(stats_f1_list, f1_list, names_list, 'F1', type) plotter.plot_bar(stats_em_list, em_list, names_list, 'EM', type)
from plot import Plotter from data import Data from singleLayerNN import SingleLayerNeuralNetwork from multiLayerNN import MultiLayerNeuralNetwork data = Data('./dataset.csv') plotter = Plotter(data) slnn = SingleLayerNeuralNetwork(data, 0.01, 1000) weightsSLNN, precisionSLNN = slnn.run() mlnn = MultiLayerNeuralNetwork(data, 0.1, 10000) weightsMLNN, precisionMLNN = mlnn.run() print("\nSingle Layer Neural Net Precision:\t", precisionSLNN, "%") print("Multi Layer Neural Net Precision: \t", precisionMLNN, "%") plotter.plot(weightsSLNN, weightsMLNN)
def calculate_emissions(): # in this example we will calculate annual CO emissions for the 14 GFED # basisregions over 1997-2014. Please adjust the code to calculate emissions # for your own specie, region, and time period of interest. Please # first download the GFED4.1s files and the GFED4_Emission_Factors.txt # to your computer and adjust the directory where you placed them below directory = '.' """ Read in emission factors """ species = [] # names of the different gas and aerosol species EFs = np.zeros((41, 6)) # 41 species, 6 sources k = 0 f = open(directory+'/GFED4_Emission_Factors.txt') while 1: line = f.readline() if line == "": break if line[0] != '#': contents = line.split() species.append(contents[0]) EFs[k,:] = contents[1:] k += 1 f.close() plotter = Plotter(); #totals for three regionally organized tables regional_tables = np.zeros((20, 3, 7, 15)); #totals for three species-organized tables species_tables = np.zeros((20, 3, 7, 9)); for species_num in range(9): print " " print "Species: " + species_used[species_num] EF_species = EFs[species_row[species_num]]; writers = []; for writer_type in range(3): writers.append(setup_writer(data_types[writer_type], species_used[species_num], units[writer_type])); #calculate and write emissions for this species for each year 1997 - 2014 for year in range(start_year, end_year+3): year_to_use = year; start_month = 0; identifier = species_used[species_num] + "_" + str(year); #do el nino and la nina years separately -- calculate and write emissions for July 1 to June 30 if(year == end_year+1): year_to_use = 1997; start_month = 7; identifier = species_used[species_num] + "_1997-1998 El Nino"; if(year == end_year+2): year_to_use = 1998; start_month = 7; identifier = species_used[species_num] + "_1998-1999 La Nina"; emissions_table = calculate_species_for_year(directory, species_num, EF_species, year_to_use, start_month); # convert to $ value scar_table = emissions_table * scar_values[species_num] / GRAMS_PER_TON; aq_table = emissions_table * aq_values[species_num] / GRAMS_PER_TON; # convert to Tg CO final_emissions_table = emissions_table / 1E12; tables = [final_emissions_table, scar_table, aq_table]; for data_type in range(3): regional_tables[year - start_year][data_type] += tables[data_type]; species_tables[year - start_year][data_type][0:7, species_num] = tables[data_type][0:7, 14]; plot_and_write_table(tables, writers, plotter, identifier); print species_used[species_num] + " done"; #calculate total emissions by adding up the results from each species, for each year for year in range(20): year_description = str(start_year+year); if(year + start_year == end_year+1): year_description = "1997-1998 El Nino"; if(year + start_year == end_year+2): year_description = "1998-1999 La Nina"; plot_regions_table(regional_tables[year], plotter, year_description + " regional totals"); plot_species_table(species_tables[year], plotter, year_description + " all species");
def main(eval_mode: bool, feature_type: str, scene: str, hyper_params: dict, network_config: dict, eval_settings: dict, fft_params: dict) -> None: """ Main function that takes hyper-parameters, creates the architecture, trains the model and evaluates it """ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') os.makedirs('results', exist_ok=True) experiment_id = datetime.now().strftime( "%Y%m%d-%H%M%S") + f' - {feature_type} - {scene}' writer = SummaryWriter(log_dir=os.path.join('tensorboard', experiment_id)) shutil.copyfile('config.json', os.path.join( 'results', 'config.json')) # save current config file to results training_dataset = BaseDataset(feature_type, scene, hyper_params, fft_params) # create network classes = util.get_scene_classes(scene) plotter = Plotter(classes, hop_size=fft_params['hop_size'], sampling_rate=22050) # finalize network config parameters network_config['out_features'] = len(classes) if feature_type == 'spec': network_config['n_features'] = fft_params['n_fft'] // 2 + 1 elif feature_type == 'mfcc': network_config['n_features'] = fft_params['n_mfcc'] elif feature_type == 'mels': network_config['n_features'] = fft_params['n_mels'] # create network net = SimpleCNN(**network_config) # Save initial model as "best" model (will be overwritten later) model_path = os.path.join('results', f'best_{feature_type}_{scene}_model.pt') if not os.path.exists(model_path): torch.save(net, model_path) else: # if there already exists a model, just load parameters print(f'reusing pre-trained model: "{model_path}"') net = torch.load(model_path, map_location=torch.device('cpu')) net.to(device) # get loss function loss_fn = torch.nn.BCELoss() # create adam optimizer optimizer = torch.optim.Adam(net.parameters(), lr=hyper_params['learning_rate'], weight_decay=hyper_params['weight_decay']) train_stats_at = eval_settings['train_stats_at'] validate_at = eval_settings['validate_at'] best_loss = np.inf # best validation loss so far progress_bar = tqdm.tqdm(total=hyper_params['n_updates'], desc=f"loss: {np.nan:7.5f}", position=0) update = 0 # current update counter fold_idx = 1 # one random fold (defines split into training and validation set) rnd_augment = hyper_params['rnd_augment'] # create subsets and data loaders if eval_mode: train_subset = training_dataset val_loader = None else: train_subset = Subset(training_dataset, training_dataset.get_fold_indices(fold_idx)[0]) val_subset = Subset(training_dataset, training_dataset.get_fold_indices(fold_idx)[1]) val_set = ExcerptDataset(val_subset, feature_type, classes, hyper_params['excerpt_size'], fft_params, overlap_factor=1, rnd_augment=False) val_loader = DataLoader(val_set, batch_size=hyper_params['batch_size'], shuffle=False, num_workers=0) train_set = ExcerptDataset( train_subset, feature_type, classes, hyper_params['excerpt_size'], fft_params, overlap_factor=hyper_params['train_overlap_factor'], rnd_augment=rnd_augment) train_loader = DataLoader(train_set, batch_size=hyper_params['batch_size'], shuffle=True, num_workers=0) n_updates = hyper_params['n_updates'] # main training loop while update <= n_updates: if rnd_augment and update > 0: # regenerate new excerpts (in background) but use current ones for training train_set.generate_excerpts() for data in train_loader: inputs, targets, audio_file, idx = data inputs = inputs.to(device, dtype=torch.float32) targets = targets.to(device, dtype=torch.float32) optimizer.zero_grad() predictions = net(inputs) loss = loss_fn(predictions, targets) loss.backward() optimizer.step() if update % train_stats_at == 0 and update > 0: # log training loss writer.add_scalar(tag="training/loss", scalar_value=loss.cpu(), global_step=update) if not eval_mode and update % validate_at == 0 and update > 0: # evaluate model on validation set, log parameters and metrics val_loss, metrics, metrics_pp = validate_model( net, val_loader, classes, update, device, plotter) print(f'val_loss: {val_loss}') f_score = metrics['segment_based']['overall']['F'] err_rate = metrics['segment_based']['overall']['ER'] f_score_pp = metrics_pp['segment_based']['overall']['F'] err_rate_pp = metrics_pp['segment_based']['overall']['ER'] print(f'f_score: {f_score}') print(f'err_rate: {err_rate}') print(f'f_score_pp: {f_score_pp}') print(f'err_rate_pp: {err_rate_pp}') params = net.parameters() log_validation_params(writer, val_loss, params, metrics, metrics_pp, update) # Save best model for early stopping if val_loss < best_loss: print( f'{val_loss} < {best_loss}... saving as new {os.path.split(model_path)[-1]}' ) best_loss = val_loss torch.save(net, model_path) if eval_mode: # in eval mode, just compare train_loss train_loss = loss.cpu() if train_loss < best_loss: print( f'{train_loss} < {best_loss}... saving as new {os.path.split(model_path)[-1]}' ) best_loss = train_loss torch.save(net, model_path) # update progress and update-counter progress_bar.set_description(f"loss: {loss:7.5f}", refresh=True) progress_bar.update() update += 1 if update >= n_updates: break progress_bar.close() print('finished training.') print('starting evaluation...') evaluator = evaluation.Evaluator(feature_type, scene, hyper_params, network_config, fft_params, model_path, device, writer, plotter) evaluator.evaluate() print('zipping "results" folder...') util.zip_folder('results', f'results_{feature_type}_{scene}')
bc = DirichletBC(FS, u_D, boundary) # Define variational problem F_1 = ((v - v_n) / k)*f_1*dx + epsilon_1*v*f_1*dx + kappa*grad(T)[0]*f_1*dx +\ A_1hat*((T - T_n) / k)*f_2*dx + a_T*grad(T)[0]*v*f_2*dx + M_s1*grad(v)[0]*f_2*dx - Q*f_2*dx F_2 = ((v - v_n) / k)*f_1*dx + epsilon_1*v*f_1*dx + beta*v*grad(v)[0]*f_1*dx + kappa*grad(T)[0]*f_1*dx +\ A_1hat*((T - T_n) / k)*f_2*dx + a_T*v*grad(T)[0]*f_2*dx + M_s1*grad(v)[0]*f_2*dx - Q*f_2*dx # Create VTK files for visualization output """vtkfile_v = File('qtcm1/velocity.pvd') vtkfile_T = File('qtcm1/temperature.pvd')""" pltr = Plotter(mesh, id_='4') # Solve the system for each time step t = 0 v_ = lambda y: v_n([y]) T_ = lambda y: T_n([y]) for n in range(num_steps): #pltr.plot(v_,'qtcm1/velocity/', n, t, quantity = 'velocity_42') pltr.plot(T_, 'qtcm1/velocity/', n, t, quantity='temp_43') t += dt # Solve variational problem for time step J = derivative(F_1, u) solve(F_1 == 0, u, bc, J=J) # Save solution to file (VTK) """_v, _T = u.split() vtkfile_v << (_v, t)
soft_update(value_net1, target_value_net1, soft_tau=1.0) soft_update(value_net2, target_value_net2, soft_tau=1.0) soft_update(perturbator_net, target_perturbator_net, soft_tau=1.0) # optim.Adam can be replaced with RAdam value_optimizer1 = optimizer.Ranger(value_net1.parameters(), lr=params['value_lr'], k=10) value_optimizer2 = optimizer.Ranger(value_net2.parameters(), lr=params['perturbator_lr'], k=10) perturbator_optimizer = optimizer.Ranger(perturbator_net.parameters(), lr=params['value_lr'], weight_decay=1e-3,k=10) generator_optimizer = optimizer.Ranger(generator_net.parameters(), lr=params['generator_lr'], k=10) loss = { 'train': {'value': [], 'perturbator': [], 'generator': [], 'step': []}, 'test': {'value': [], 'perturbator': [], 'generator': [], 'step': []}, } plotter = Plotter(loss, [['generator'], ['value', 'perturbator']]) for epoch in range(n_epochs): print("Epoch: {}".format(epoch+1)) for batch in env.train_dataloader: loss = bcq_update(batch, params, writer, debug, step=step) plotter.log_losses(loss) step += 1 print("Loss:{}".format(loss)) if step % plot_every == 0: print('step', step) test_loss = run_tests(env,params,writer,debug) print(test_loss) plotter.log_losses(test_loss, test=True) plotter.plot_loss()
def run(): plotter = Plotter(show_avgs=True, plot_elites=False) fig_1 = 1 fig_2 = 1 fig_3 = 1 fig_4 = 1 fig_5 = 1 extension = 1 use_fixed_seed = 1 enable_hof = 0 enable_virulence = 0 generations = 600 export_folder = "../report/plots/" generator_a = Generator(100, 1) generator_b = Generator(10, 10) generator_c = Generator(50, 2) mutator = Mutator(mutation_rate=0.005, bit_flip=False) selector = FitnessProportionateSelection() n = 25 if enable_hof: hof = HOF(scorer=Scorer(sample_size=10), size=50) else: hof = None # Wrap selector with virulence handling if enable_virulence: selector = VirulenceSelector(selector, 0.75, normalise=True) if fig_1: seed = seed_random(0, use_fixed_seed) description = "Figure 1 : [Seed {}]".format(seed) print(description) pop_a = generator_a.population(n, 0) pop_b = generator_a.population(n, 100) executor = Coevolution(scorer=F0Scorer(), selector=selector) executor.run(pop_a, pop_b, generations) plotter.make_plot( executor, fig_name=description, export_path=os.path.join(export_folder, "fig1.png"), ) if fig_2: seed = seed_random(1, use_fixed_seed) description = "Figure 2 : [Seed {}]".format(seed) print(description) pop_a = generator_a.population(n, 0) pop_b = generator_a.population(n, 0) executor = Coevolution(mutator=mutator, hof=hof, selector=selector) executor.run(pop_a, pop_b, generations) plotter.make_plot( executor, fig_name=description, export_path=os.path.join(export_folder, "fig2.png"), ) if fig_3: seed = seed_random(8486058433753192762, use_fixed_seed) description = "Figure 3 : [Seed {}]".format(seed) print(description) pop_a = generator_a.population(n, 0) pop_b = generator_a.population(n, 0) executor = Coevolution( mutator=mutator, scorer=Scorer(sample_size=1), hof=hof, selector=selector ) executor.run(pop_a, pop_b, generations) plotter.make_plot( executor, fig_name=description, export_path=os.path.join(export_folder, "fig3.png"), ) if fig_4: seed = seed_random(59759543964706904, use_fixed_seed) description = "Figure 4 : [Seed {}]".format(seed) print(description) pop_a = generator_b.population(n, 0) pop_b = generator_b.population(n, 0) executor = Coevolution(mutator=mutator, hof=hof, selector=selector) executor.run(pop_a, pop_b, generations) plotter.make_plot( executor, fig_name=description, export_path=os.path.join(export_folder, "fig4.png"), ) if fig_5: seed = seed_random(5706501168717675099, use_fixed_seed) description = "Figure 5 : [Seed {}]".format(seed) print(description) pop_a = generator_c.population(n, 0) pop_b = generator_c.population(n, 0) executor = Coevolution( mutator=mutator, scorer=Scorer(intransitive=True), hof=hof, selector=selector, ) executor.run(pop_a, pop_b, generations) plotter.make_plot( executor, fig_name=description, export_path=os.path.join(export_folder, "fig5.png"), ) if extension: hof = HOF(scorer=Scorer(sample_size=10), size=50) generations = 1200 fp_selector = FitnessProportionateSelection() sus_selector = StochasticUniversalSampling() t_selector = TournamentSelection(3) ext_cfgs = [] ext_cfgs += [ ( "Virulence [0.5]", "fig_5_v0.5.png", VirulenceSelector(fp_selector, 0.5), False, ) ] ext_cfgs += [ ( "Virulence [0.75]", "fig_5_v0.75.png", VirulenceSelector(fp_selector, 0.75), False, ) ] ext_cfgs += [ ( "Virulence [0.75] + SUS", "fig_5_v0.75_sus.png", VirulenceSelector(sus_selector, 0.75), False, ) ] ext_cfgs += [ ( "Virulence [0.75] + TS", "fig_5_v0.75_ts.png", VirulenceSelector(t_selector, 0.75), False, ) ] ext_cfgs += [ ( "Virulence [0.75] + SUS + HOF", "fig_5_v0.75_sus_hof.png", VirulenceSelector(sus_selector, 0.75), True, ) ] ext_cfgs += [ ( "Virulence [0.75] + HOF", "fig_5_v0.75_hof.png", VirulenceSelector(fp_selector, 0.75), True, ) ] ext_cfgs += [("HOF", "fig_5_hof.png", fp_selector, True)] for (cfg_txt, export_name, selector, use_hof) in ext_cfgs: seed = seed_random(8985012493578745191, use_fixed_seed) description = "Figure 5 - {} : [Seed {}]".format(cfg_txt, seed) print(description) pop_a = generator_c.population(n, 0) pop_b = generator_c.population(n, 0) executor = Coevolution( mutator=mutator, scorer=Scorer(intransitive=True), selector=selector, hof=hof if use_hof else None, ) executor.run(pop_a, pop_b, generations) plotter.make_plot( executor, fig_name=description, export_path=os.path.join(export_folder, export_name), )