def main(): i_gen = InputGenerator(0, T, NUM_TIME_STEPS) data = i_gen.generate_sin(amplitude=AMPLITUDE) num_train = int(len(data) * RATIO_TRAIN) train_data = data[:num_train] model = ReservoirNetWork(inputs=train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.train() # 訓練 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(data[num_train:])) predict_result = model.predict(num_predict) t = np.linspace(0, T, NUM_TIME_STEPS) ## plot plt.plot(t, data, label="inputs") plt.plot(t[:num_train], train_result, label="trained") plt.plot(t[num_train:], predict_result, label="predicted") plt.axvline(x=int(T * RATIO_TRAIN), label="end of train", color="green") # border of train and prediction plt.legend() plt.title("Echo State Network Sin Prediction") plt.xlabel("time[ms]") plt.ylabel("y(t)") plt.show() print(model.test())
def main(): i_gen = InputGenerator(0, T, NUM_TIME_STEPS) data = i_gen.generate_logistic() num_train = int(len(data) * RATIO_TRAIN) train_data = data[:num_train] print("number of training is" + str(num_train)) print(dt) model = ReservoirNetWork(inputs=train_data, teacher=train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.offline_training() #オフライン学習で出力重みを初期化 model.online_training() #(オンライン学習で)重み更新 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(data[num_train:])) predict_result = model.predict(length_of_sequence=num_predict) t = np.linspace(0, T, NUM_TIME_STEPS) ## plot plt.plot(t, data, label="inputs") plt.plot(t[:num_train], train_result, label="trained") plt.plot(t[num_train:], predict_result, label="predicted") plt.axvline(x=int(T * RATIO_TRAIN), label="end of train", color="red") # border of train and prediction plt.legend() plt.title("Echo State Network Logistic Prediction" + "//Number of Reservoir Nodes " + str(NUM_RESERVOIR_NODES)) plt.xlabel("time") plt.ylabel("y(t)") plt.show()
def __init__(self, file): super(ProteinDataset, self).__init__() pdb_identifies_file = file # '../inputs/train.txt' # ../inputs/pdb_identifiers.txt' self.pdb_identifiers = Utility.get_pdb_identifiers(pdb_identifies_file) print(len(self.pdb_identifiers), " proteins in hand") self.input_generator = InputGenerator() self.records = self.generate_input_output_sets()
def main(): i_gen = InputGenerator(0, T, NUM_TIME_STEPS) data = i_gen.generate_lorentz() num_train = int(len(data) * RATIO_TRAIN) train_data = data[:num_train] print("number of training is"+ str(num_train)) print(dt) model = ReservoirNetWork(inputs=train_data, teacher = train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) # model.offline_training() #オフライン学習で出力重みを初期化 model.online_training()#(オンライン学習で)重み更新 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(data[num_train:])) predict_result = model.predict(num_predict) train_result = model.get_train_result() # 訓練の結果を取得 # print(train_result) ## plot fig = plt.figure() ax = Axes3D(fig) # ax.plot(train_result[0,:], train_result[1,:], train_result[2,:]) plt.show()
def processParams(self, params): # Directory for experience directory = self.setupDir(params) # Set up generator generator = InputGenerator( self.gt, chunk_size=params["nb_timesteps"], batch_size=params["batch_size"], ) # Set up model model = RNN(params) model = model.model # Set up loss loss = CustomLoss( lambda_roll=params["lambda_roll"], lambda_pitch=params["lambda_pitch"], lambda_yaw=params["lambda_yaw"], lambda_thrust=params["lambda_thrust"], loss_func=params["loss_func"], ) decay = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, min_lr=1e-6) # Set up loss optimizer = Adam( lr=params["lr"], decay=params["decay"], beta_1=0.9, beta_2=0.999, epsilon=1e-08, ) # Set up callbacks mcp = ModelCheckpoint( directory + "/model.{epoch:03d}.h5", verbose=1, save_weights_only=False, ) lrp = PrintLR() #lrs = LearningRateScheduler(step_decay) callbacks = [mcp, lrp, decay] # return all params return generator, model, loss, optimizer, callbacks
def run_tests(self): is_input_consistent_str = 'consistent' if not self.is_input_consistent: is_input_consistent_str = 'in' + is_input_consistent_str print 'Start testing for %s input...\n' % is_input_consistent_str for dimension in self.dimensions: random_no_points_per_axis = np.random.randint(2, 12, dimension).tolist() no_points_per_axis = ' '.join( [str(no) for no in random_no_points_per_axis]) no_points_per_axis_display = ', '.join( [str(no) for no in random_no_points_per_axis]) print ' Testing for dimension %d with [%s] points on axis...' % \ (dimension, no_points_per_axis_display) for epsilon in self.epsilons: print ' Testing for epsilon: ', epsilon for from_poly in [True, False]: input_gen = InputGenerator(PATH_TO_FILE, self.is_input_consistent, dimension, no_points_per_axis, rand_points_axis=True, from_poly=from_poly, eps=epsilon) input_gen.generate_test_file() # Silence the print statements to stdout. with Utils.nostdout(): cons = Consistency(PATH_TO_FILE, input_gen, False, False, True) result = cons.solve_LP_problem() if result is not self.is_input_consistent: raise RuntimeError( 'Counterexample found, aborting. See the file %s ' 'for details.' % PATH_TO_FILE) print '\n...Finished testing. No counterexamples found.\n'
def main(): (options, args) = command_line_arguments() validate_options(options) input_generator = None if options.gen_cons_input or options.gen_incons_input: input_generator = InputGenerator(options.input_file, options.gen_cons_input is not None, options.dimension, options.no_points_per_axis, options.rand_points_axis, options.from_poly, options.epsilon) input_generator.generate_test_file() cons = Consistency(options.input_file, input_generator, options.plot_surfaces, options.plot_rand_heights, options.verbose) # Run the LP algorithm to decide consistency. cons.solve_LP_problem()
def main(): print(NUM_TIME_STEPS) ing = InputGenerator(0,T,NUM_TIME_STEPS) train_data = ing.generate_sin() num_train = int(len(train_data) * RATIO_TRAIN) print(train_data) target_data = np.sign(train_data) num_target = int(len(target_data) * RATIO_TRAIN) print(target_data) model = ReservoirNetWork(inputs=train_data, #ReservoirNetwork.pyを参照 outputs_target = target_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.train() # 訓練 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(train_data[num_target:])) predict_result = model.predict(num_predict) t = np.linspace(0,T,NUM_TIME_STEPS) ## plot plt.plot(t, train_data, label="inputs") plt.plot(t[:num_train], train_result[:num_train], label="trained") plt.plot(t[num_target:], predict_result, label="predicted") plt.axvline(x=int(T * RATIO_TRAIN), label="end of train", color="green") # border of train and prediction plt.legend() plt.title("Echo State Network TEST Prediction") plt.xlabel("time[ms]") plt.ylabel("y(t)") plt.show()
def main(): i_gen = InputGenerator(0, T, dt) data = i_gen.generate_lorenz() num_train = int(len(data.T) * RATIO_TRAIN) train_data = data.T[:num_train] print(f"train_data_shape: {train_data.shape}") model = ReservoirNetWork(inputs=train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.train() train_result = model.get_train_result() # 訓練の結果を取得 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(data[0], data[1], data[2], marker='o') ax.set_title("Echo State Network Sin Prediction") ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") plt.show()
number_of_files = 10 base_var_num = 20 var_num = base_var_num clause_num = base_var_num * 3 loop_exp_factor = 1.3 num_of_loops = 5 for i in range(1, num_of_loops + 1): input_folder_name = 'inputs' + str(i) output_folder_name = 'outputs' + str(i) InputGenerator(num_of_var=var_num, num_of_clause=clause_num).generate_and_write_inputs( input_folder_name, number_of_files) data_hash = {} for file_num in range(number_of_files): execute(input_folder_name, output_folder_name, file_num, data_hash) lt = list() lt.append(str(i) + ': ') lt.append('var_num ' + str(var_num) + ' +- ' + str(var_num / 2)) lt.append('clause_num ' + str(clause_num) + ' +- ' + str(clause_num / 2)) lt.append('for ' + str(number_of_files) + ' number_of_files\n') for function_name in data_hash.keys():
def setInputGenerator(self, nb_timesteps, batch_size): self.batch_size = batch_size self.generator = InputGenerator(self.gt, chunk_size=nb_timesteps, batch_size=batch_size)