def sim2(systems): keynames, keys, data = prep_data('M', systems) for i in range(10): som = SOM(30, 30, 12, .25, systems, 'M') som.train(100, data, '../LogFiles/Sim2/' + systems + '/log' + str(i) + '.json') output_results(som, keynames, keys)
def main(): '''CONTROL PANEL''' MODE = 'K' # 'K' to train on musical keys, 'M' to train on music datasets SYSTEMS = 'H' # Include 'W' for Western system, 'C' for Chinese system, 'H' for Hindustani system MODE2 = 'M' SYSTEMS2 = 'HC' width = 30 height = 30 iterations = 100 learning_rate = .25 pitch_count = 12 '''================''' # Generate model #print "Generating map..." som = SOM(width, height, pitch_count, learning_rate, SYSTEMS, MODE) genTest(som, iterations, MODE, SYSTEMS)
def sim3(mode1, mode2, sys1, sys2): for i in range(10): f = open('../LogFiles/Sim1/' + sys1 + '/log' + str(i) + '.json', 'r') json = jlb(f) nodes = scipy.array(json['states'][-1]) som = SOM(30, 30, 12, .25, sys2, mode2, nodes) # Fix radius and learning rates som.learning_rate /= 100. som.radius = 1. som.fixedR = True som.fixedLR = True # Update dataset to include new system(s) keynames, keys, data = prep_data(mode2, sys2) som.train(100, data, '../LogFiles/Sim3a/' + sys2 + '/log' + str(i) + '.json') output_results(som, keynames, keys)
def score_all(): for sys in 'WCH': for file_name in os.listdir('../LogFiles/Sim1/' + sys): with open('../LogFiles/Sim1/' + sys + '/' + file_name, 'r+') as f: data = jlb(f) states = data['states'] som = SOM(30, 30, 12, .25, sys, 'M') for state in states: state = np.array(state) som.set_nodes(state) som.score() scores = som.log['scores'] data = {'states':states, 'scores':scores} f.seek(0) json.dump(data, f) f.truncate()
# Toyset feat_nr = 3 data = np.random.randint(0, 255, (100,3)) np.save("./data/toydata.npy", data) data_path = "./data/toydata.npy" ################## # set parameters # ################## trial = "toytest" log_path = "./data/" num_steps = 100 x = 10 y = 10 gpu=None #get the model som = SOM(trial, x, y, num_steps, data_path, log_path, gpu=gpu, norm=True, learning_rate=0.1) #train the model som.train() # get the trained map net = som.get_weights() plot_colour(net)
y = dp.convertTextTarget(y, alias) #dump_result(output_path + 'accidents.csv', np.array(alias), ['accident']) print('Accident types: ', alias) # Standardize data X = ds.standardize(X) print('======== Training ============') # ============================================ # # Train SOM m = 20 n = 20 n_iter = 15000 som = SOM(m, n, len(col_name) - 1, n_iter, save=wd + 'checkpoint/', restore=wd + 'checkpoint/') som.train(X, checkpoint_len=2) # ============================================ # # Get cluster grid formed by SOM cluster_grid = som.get_centroids() cluster_grid = np.array(cluster_grid) cluster_grid = np.reshape(cluster_grid, (m * n, len(col_name) - 1)) cluster_grid = ds.inverse_transform(cluster_grid) # Dump data to plot on R dump_result(output_path + 'cluster_grid.csv', cluster_grid, col_name[:-1]) cluster_grid = np.reshape(cluster_grid, (m, n, len(col_name) - 1))
################## trial = "100x100" data_path = "./data/instances.npy" #log_path = "~/cache/tensorboard-logdir/jan" log_path = "./data/" num_steps = 100 x = 50 y = 50 gpu = None #get the model som = SOM(trial, x, y, num_steps, data_path, log_path, gpu=gpu, norm=True, learning_rate=0.1) #train the model som.train() # get the trained map #net = som.get_weights() ### save/load weights som.save_weights() #som.load_weights("./data/test.npy")