def plot_fit_history(history): """ Plots the training history :param history: The history lists (from the fit output) """ print() print(" **************** ") print(" *** Loss *** ") print(" **************** ") loss = history.history['loss'] for index in range(len(loss)): loss[index] = int(loss[index] * 1000) termplot.plot(np.asarray(loss).transpose()) print() print(" **************** ") print(" *** Accuracy *** ") print(" **************** ") accuracy = history.history['categorical_accuracy'] for index in range(len(accuracy)): accuracy[index] = int(accuracy[index] * 1000) termplot.plot(np.asarray(accuracy).transpose())
def plot_training_log(): games_played = np.loadtxt('gameslog.txt') if draw_graphics_with_matplot: plt.ion() plt.plot(games_played) plt.show() plt.pause(0.01) else: if len(games_played.shape) > 0: termplot.plot(games_played)
def train(): generator = CSVStreamer(data_csvs[int( np.random.randint(len(data_csvs), size=1))]) env = TradingEnv(data_generator=generator, episode_length=el, trading_fee=0.01, time_fee=0.001, history_length=hl, s_c1=1, s_c2=0, buy_sell_scalar=bss, hold_scalar=hs, timeout_scalar=ts) state_size = env.observation_shape[1] action_size = env.action_space agent = DQNAgent(state_size, action_size) done = False total_steps = 0 performance = [] # trade_indexes = [2181, 3254, 5064, 5332, 9021, 10362, 14000, 14990, 16196, 17470, 21560, 28065, 36715, 38000, 41000, 41745, 45433, 47000, 52876, 53278, 55000] # level = 0 # best_value = 0 try: agent.load('./agents/save/dqn.h5') agent.epsilon = 0.01 print('SUCCESSFULLY LOADED') except: print('FAILED TO LOAD') pass for e in range(EPISODES): generator = CSVStreamer(data_csvs[int( np.random.randint(len(data_csvs), size=1))]) env.set_generator(generator) obs = env.reset() #overwrite memory agent.fine_dining_and_breathing(obs) state = agent.consider(obs) print("\n\nepisode: {:6}/{:6}|e: {:3.2}|source: {}".format( e, EPISODES, agent.epsilon, generator.filename)) pbar = tqdm(total=generator.file_length, desc='Running episode') while not done: action = agent.act(state) new_obs, reward, done, _ = env.step(action) next_state = agent.consider(new_obs) agent.remember(state, action, reward, next_state, done) state = next_state pbar.update(1) pbar.close() color = "\033[0;32m" if ( env.action_history[:].count(0) > agent.random_trades) and ( env.total_value >= 1.0) else "\033[0;0m" color = "\033[0;31m" if ( env.action_history[:].count(0) > agent.random_trades) and ( env.total_value <= 1.0) else color total_steps += env.iteration # |noise: t({:.2})/h({:.2}) print( "{}steps: {:5}|memory: {:12,}|total reward: {:10.8}|total value: {:5.3}|trade: {:2}|hold: {:5}|random: t({})/h({}) \033[0;0m" .format( color, env.iteration, len(agent.memory), # agent.model.get_layer('fuzzyout').get_weights()[1][0][0], # agent.model.get_layer('fuzzyout').get_weights()[1][1][0], float(env.total_reward), float(env.total_value), env.action_history[:].count(0), env.action_history[:].count(1), agent.random_trades, agent.random_holds)) # Plot the relative trade locations try: print('\n Trades:') termplot.plot(list( np.histogram( [e for e, x in enumerate(env.action_history) if x == 0], bins=100)[0]), plot_height=10, plot_char='*') except: pass if len(agent.memory) >= 10_000: history = agent.replay_all(batch_size) agent.resetENV() performance += [env.total_reward] try: print('\n Reward:') termplot.plot(performance[-150:], plot_height=10, plot_char='=') except Exception as ex: print(ex) done = False if e % 1 == 0: print('\nSAVING') print('LAST PREDICTION', agent.predict(state)) agent.update_epsilon(total_steps) agent.save('./agents/save/dqn.h5') print('\n')
import termplot ser = serial.Serial('/dev/cu.usbmodem14101', 9600) array = [] model = load_model("model.h5") for i in range(0, 30): v = (ser.readline()).decode('utf-8').rstrip("\n").split(",") v = [int(x, 10) for x in v] print("Clean signal is: ", v) termplot.plot(v) mean, sigma = 0, 500 noise = np.random.normal(mean, sigma, [8]) print("Noise is: ", noise) signal = v + noise print("Signal is: ", signal) termplot.plot(signal) array = [signal] print("") pred = np.array(array)
def fit(self,X, eps_0='auto', epsilon_start_factor = 0.2, percent_noise = 10, min_pts_decrease_factor = 0.9, eta=0.1, verbose=0): if verbose: print('-------------------------------------------------------') print('VDBSCAN Algorithm') print('-------------------------------------------------------') print(' Feature Matrix -> ' + str(X.shape[0]) + 'x' + str(X.shape[1])) print('-------------------------------------------------------') print(' - Kappa = ' + str(self.kappa)) print(' · Eps_0 = ' + str(eps_0)) print(' · Eta_Eps = ' + str(eta)) print('-------------------------------------------------------') print('Hierarchical iterations in progress...') self.eta = eta # Compute eps_0 as 20% of the mean distances between points in dataset if eps_0 == 'auto': c = np.mean(X,axis=0) self.eps = (epsilon_start_factor * np.mean(self.dists_p2set(c,X))) else: self.eps = eps_0 self.n_clusters = 1 self.size_dataset = X.shape[0] self.y = np.zeros(X.shape[0]).astype(int) finished = False current_level = 0 non_changes = 0 ncluster_ev = [1] while not(finished): y_new = copy(self.y) self.eps = self.eps * (1 - self.eta) current_level += 1 if verbose > 1: print('Current level: ' + str(current_level) + '/' + str(self.max_level) + \ ' - eps = ' + str(self.eps) + ' ') elif verbose > 2: print() print('\n############################################################') print('Current level: ' + str(current_level) + '/' + str(self.max_level) + \ ' - eps = ' + str(self.eps) + ' ') if non_changes == 0: if current_level > 1: print('Structure was altered in the last level!') elif non_changes == 1: print('Structured unchanged in the last level.') else: print('Structure unchanged in the last ' + str(non_changes) + ' levels.') print() for i in range(self.n_clusters): if verbose > 2: print('Clusters analysed: ' + str(i) + '/' + \ str(self.n_clusters) + ' - ' + \ str(100 * i / self.n_clusters) + '%') Xcluster, this_idx = get_cluster(X = X, labels = self.y, clusterID = i) if self.metric == 'default': #Testkör DBSCAN räkna noise, db = DBSCAN(eps=self.eps, min_samples=self.minPts) else: db = DBSCAN(eps=self.eps, metric = self.dist_p2p, min_samples=self.minPts) db.fit(Xcluster) this_n_clusters = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0) if this_n_clusters > 1: if self.isol: this_labels = self.separation_criterion(X = Xcluster, labels = sort_by_size(db.labels_), isolation = self.isolation(i,X,self.y), kappa = self.kappa, verbose = verbose) else: this_labels = self.separation_criterion(X = Xcluster, labels = sort_by_size(db.labels_), kappa = self.kappa, verbose = verbose) this_labels = get_new_labels(this_labels,y_new) y_new[this_idx] = this_labels y_new = sort_by_size(y_new) if verbose > 2: print('Clusters analysed: ' + str(i+1) + '/' + str(self.n_clusters) + ' - 100%') n_clusters_before = len(set(y_new)) - (1 if -1 in y_new else 0) y_new = sort_by_size(y_new) y_new = prune_clusters(thisX = X, labels = y_new) y_new = sort_by_size(y_new) ## EVALUATE STOP CONDITION if np.array_equal(self.y, y_new): non_changes += 1 if verbose > 1: print('Unchanged levels: ' + str(non_changes)) if non_changes >= self.max_non_changes: finished = True else: self.y = copy(y_new) self.n_clusters = len(set(self.y)) - (1 if -1 in self.y else 0) non_changes = 0 # Stopping criterion (MAX LEVEL) if current_level >= self.max_level: finished = True if verbose: n_noise = np.sum(self.y==-1) if 100*n_noise/self.y.shape[0] > percent_noise: print(percent_noise) print(str(100*n_noise/self.y.shape[0])) self.minPts = self.minPts * min_pts_decrease_factor print("MINPTS: " + str(self.minPts)) return VDBSCAN.fit(self = self, X=X,eta=eta, epsilon_start_factor=epsilon_start_factor) ncluster_ev.append(self.n_clusters) if verbose > 2: print('\nNumber of clusters after level: ' +\ str(self.n_clusters) + ' // ' +\ str(n_clusters_before - self.n_clusters) +\ ' small clusters pruned.') print('Noise samples after level: ' + str(n_noise) +\ '(' + str(100*n_noise/self.y.shape[0]) + '%)') print('\n############################################################') elif verbose > 1: print('Nº of clusters after level: ' +\ str(self.n_clusters) + ' // ' +\ str(n_clusters_before - self.n_clusters) +\ ' pruned // ' +\ 'Noise at ' + str(100*n_noise/self.y.shape[0]) + '%') # Set final labels (order by size and remove gaps) self.labels_ = sort_by_size(self.y) if verbose > 1: print('\n############################################################') print(' Evolution of number of clusters:') print('############################################################\n') tplt.plot(ncluster_ev, plot_height=15, plot_char='.') print() if verbose: print('-------------------------------------------------------') print('Algorithm complete!') print('-------------------------------------------------------') print('-------------------------------------------------------') return self
def train(): generator = CSVStreamer(data_csvs[int(np.random.randint(len(data_csvs), size=1))]) env = TradingEnv( data_generator=generator, episode_length=el, trading_fee=0.01, time_fee=0.001, history_length=hl, s_c1=1, s_c2=0, buy_sell_scalar=bss, hold_scalar=hs, timeout_scalar=ts, temporal_window_size=tws ) state_size = env.observation_shape[1] action_size = env.action_space agent = DQNAgent(state_size, action_size, noise_level=0.05) done = False total_steps = 0 try: agent.load('./agents/save/dqn.h5') agent.epsilon = 1.0 print('SUCCESSFULLY LOADED') except: print('FAILED TO LOAD') pass for e in range(EPISODES): generator = CSVStreamer(data_csvs[int(np.random.randint(len(data_csvs), size=1))]) env.set_generator(generator) state = env.reset() print("\n\nepisode: {:6}/{:6}|e: {:3.2}|source: {}".format(e, EPISODES, agent.epsilon, generator.filename)) pbar = tqdm(total=generator.file_length, desc='Running episode') while not done: action = agent.act(state) next_state, reward, done, _ = env.step(action) agent.remember(state, action, reward, next_state, done) state = next_state pbar.update(1) pbar.close() color = "\033[0;32m" if (env.action_history[:].count(0) > agent.random_trades) and (env.total_value >= 1.0) else "\033[0;0m" color = "\033[0;31m" if (env.action_history[:].count(0) > agent.random_trades) and (env.total_value <= 1.0) else color total_steps += env.iteration # noise: t({:.2})/h({:.2})| print( "{}steps: {:5}|memory: {:9,}|total reward: {:10.8}|total value: {:5.3}|trade: {:2}|hold: {:5}|random: t({})/h({}) \033[0;0m".format( color, # agent.model.get_layer('fuzzyout').get_weights()[1][0][0], # agent.model.get_layer('fuzzyout').get_weights()[1][1][0], env.iteration, len(agent.memory), float(env.total_reward), float(env.total_value), env.action_history[:].count(0), env.action_history[:].count(1), agent.random_trades, agent.random_holds ) ) # Plot the relative trade locations try: # termplot.plot(list(np.histogram([e for e, x in enumerate(env.action_history) if x == 0], bins=100)[0]), plot_height=10, plot_char='*') termplot.plot([np.tanh(x) for x in list(np.histogram(env.reward_history, bins=100)[0])], plot_height=10, plot_char='*') except: print('DAILURE') pass if len(agent.memory) > batch_size: agent.replay_all(batch_size) done = False if e % 5 == 0: print('\nSAVING') print('LAST PREDICTION', agent.predict(state)) agent.update_epsilon(total_steps) agent.save('./agents/save/dqn.h5') print('\n')
if celldata[2]: alive_cells.pop() alive_cells.append(celllib.Cell(i, sys.argv[3], sys.argv[4])) alive_cells.append(celllib.Cell(i, sys.argv[3], sys.argv[4])) i += 1 time += 1 max_cells += 2 print(f"Alive: {len(alive_cells)} | Max: {max_cells}", end="\r") print("The cells lasted for " + str(time) + " units of time") # print(alive_graph) print("Cells Over Time:") termplot.plot(alive_graph) print("Exporting CSV for Graphing") with open("./csv/output-graphing.csv", "w") as outfile: writer = csv.writer(outfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for iteration in csv_data: writer.writerow(iteration[1:]) print("Exporting Normal CSV") with open("./csv/output.csv", "w") as outfile: writer = csv.writer(outfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL) for iteration in csv_data: writer.writerow(iteration) print("Generating MatPlotLib Graph") import matplotlib