def main(): i_gen = InputGenerator(0, T, NUM_TIME_STEPS) data = i_gen.generate_logistic() num_train = int(len(data) * RATIO_TRAIN) train_data = data[:num_train] print("number of training is" + str(num_train)) print(dt) model = ReservoirNetWork(inputs=train_data, teacher=train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.offline_training() #オフライン学習で出力重みを初期化 model.online_training() #(オンライン学習で)重み更新 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(data[num_train:])) predict_result = model.predict(length_of_sequence=num_predict) t = np.linspace(0, T, NUM_TIME_STEPS) ## plot plt.plot(t, data, label="inputs") plt.plot(t[:num_train], train_result, label="trained") plt.plot(t[num_train:], predict_result, label="predicted") plt.axvline(x=int(T * RATIO_TRAIN), label="end of train", color="red") # border of train and prediction plt.legend() plt.title("Echo State Network Logistic Prediction" + "//Number of Reservoir Nodes " + str(NUM_RESERVOIR_NODES)) plt.xlabel("time") plt.ylabel("y(t)") plt.show()
def main(): i_gen = InputGenerator(0, T, NUM_TIME_STEPS) data = i_gen.generate_lorentz() num_train = int(len(data) * RATIO_TRAIN) train_data = data[:num_train] print("number of training is"+ str(num_train)) print(dt) model = ReservoirNetWork(inputs=train_data, teacher = train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) # model.offline_training() #オフライン学習で出力重みを初期化 model.online_training()#(オンライン学習で)重み更新 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(data[num_train:])) predict_result = model.predict(num_predict) train_result = model.get_train_result() # 訓練の結果を取得 # print(train_result) ## plot fig = plt.figure() ax = Axes3D(fig) # ax.plot(train_result[0,:], train_result[1,:], train_result[2,:]) plt.show()
def main(): i_gen = InputGenerator(0, T, NUM_TIME_STEPS) data = i_gen.generate_sin(amplitude=AMPLITUDE) num_train = int(len(data) * RATIO_TRAIN) train_data = data[:num_train] model = ReservoirNetWork(inputs=train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.train() # 訓練 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(data[num_train:])) predict_result = model.predict(num_predict) t = np.linspace(0, T, NUM_TIME_STEPS) ## plot plt.plot(t, data, label="inputs") plt.plot(t[:num_train], train_result, label="trained") plt.plot(t[num_train:], predict_result, label="predicted") plt.axvline(x=int(T * RATIO_TRAIN), label="end of train", color="green") # border of train and prediction plt.legend() plt.title("Echo State Network Sin Prediction") plt.xlabel("time[ms]") plt.ylabel("y(t)") plt.show() print(model.test())
def __init__(self, file): super(ProteinDataset, self).__init__() pdb_identifies_file = file # '../inputs/train.txt' # ../inputs/pdb_identifiers.txt' self.pdb_identifiers = Utility.get_pdb_identifiers(pdb_identifies_file) print(len(self.pdb_identifiers), " proteins in hand") self.input_generator = InputGenerator() self.records = self.generate_input_output_sets()
class ProteinDataset(Dataset): """docstring for ProteinDataset.""" def __init__(self, file): super(ProteinDataset, self).__init__() pdb_identifies_file = file # '../inputs/train.txt' # ../inputs/pdb_identifiers.txt' self.pdb_identifiers = Utility.get_pdb_identifiers(pdb_identifies_file) print(len(self.pdb_identifiers), " proteins in hand") self.input_generator = InputGenerator() self.records = self.generate_input_output_sets() def __len__(self): return len(self.records) def __getitem__(self, i): # x, y = self.records[i] # print("from dataset: ", "x:", x.shape, "y:", y.shape) return self.records[i] def n_proteins(self): return len(self.pdb_identifiers) def generate_input_output_sets(self): records = [] for identifier in self.pdb_identifiers: pdb_code = identifier inp_out_pairs = self.input_generator.get_input_output(pdb_code) # print(pdb_code, ":", len(inp_out_pairs)) records.extend(inp_out_pairs) # print(len(records)) return records def generate_input_output_sets_per_protein(self): # records = [] per_protein_records = [] for identifier in self.pdb_identifiers: pdb_code = identifier inp_out_pairs = self.input_generator.get_input_output(pdb_code) # print(pdb_code, ":", len(inp_out_pairs)) # records.extend(inp_out_pairs) per_protein_records.append(inp_out_pairs) # print(len(records)) return per_protein_records
def run_tests(self): is_input_consistent_str = 'consistent' if not self.is_input_consistent: is_input_consistent_str = 'in' + is_input_consistent_str print 'Start testing for %s input...\n' % is_input_consistent_str for dimension in self.dimensions: random_no_points_per_axis = np.random.randint(2, 12, dimension).tolist() no_points_per_axis = ' '.join( [str(no) for no in random_no_points_per_axis]) no_points_per_axis_display = ', '.join( [str(no) for no in random_no_points_per_axis]) print ' Testing for dimension %d with [%s] points on axis...' % \ (dimension, no_points_per_axis_display) for epsilon in self.epsilons: print ' Testing for epsilon: ', epsilon for from_poly in [True, False]: input_gen = InputGenerator(PATH_TO_FILE, self.is_input_consistent, dimension, no_points_per_axis, rand_points_axis=True, from_poly=from_poly, eps=epsilon) input_gen.generate_test_file() # Silence the print statements to stdout. with Utils.nostdout(): cons = Consistency(PATH_TO_FILE, input_gen, False, False, True) result = cons.solve_LP_problem() if result is not self.is_input_consistent: raise RuntimeError( 'Counterexample found, aborting. See the file %s ' 'for details.' % PATH_TO_FILE) print '\n...Finished testing. No counterexamples found.\n'
def main(): (options, args) = command_line_arguments() validate_options(options) input_generator = None if options.gen_cons_input or options.gen_incons_input: input_generator = InputGenerator(options.input_file, options.gen_cons_input is not None, options.dimension, options.no_points_per_axis, options.rand_points_axis, options.from_poly, options.epsilon) input_generator.generate_test_file() cons = Consistency(options.input_file, input_generator, options.plot_surfaces, options.plot_rand_heights, options.verbose) # Run the LP algorithm to decide consistency. cons.solve_LP_problem()
def main(): print(NUM_TIME_STEPS) ing = InputGenerator(0,T,NUM_TIME_STEPS) train_data = ing.generate_sin() num_train = int(len(train_data) * RATIO_TRAIN) print(train_data) target_data = np.sign(train_data) num_target = int(len(target_data) * RATIO_TRAIN) print(target_data) model = ReservoirNetWork(inputs=train_data, #ReservoirNetwork.pyを参照 outputs_target = target_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.train() # 訓練 train_result = model.get_train_result() # 訓練の結果を取得 num_predict = int(len(train_data[num_target:])) predict_result = model.predict(num_predict) t = np.linspace(0,T,NUM_TIME_STEPS) ## plot plt.plot(t, train_data, label="inputs") plt.plot(t[:num_train], train_result[:num_train], label="trained") plt.plot(t[num_target:], predict_result, label="predicted") plt.axvline(x=int(T * RATIO_TRAIN), label="end of train", color="green") # border of train and prediction plt.legend() plt.title("Echo State Network TEST Prediction") plt.xlabel("time[ms]") plt.ylabel("y(t)") plt.show()
def run_tests(self): is_input_consistent_str = 'consistent' if not self.is_input_consistent: is_input_consistent_str = 'in' + is_input_consistent_str print 'Start testing for %s input...\n' % is_input_consistent_str for dimension in self.dimensions: random_no_points_per_axis = np.random.randint(2, 12, dimension).tolist() no_points_per_axis = ' '.join([str(no) for no in random_no_points_per_axis]) no_points_per_axis_display = ', '.join([str(no) for no in random_no_points_per_axis]) print ' Testing for dimension %d with [%s] points on axis...' % \ (dimension, no_points_per_axis_display) for epsilon in self.epsilons: print ' Testing for epsilon: ', epsilon for from_poly in [True, False]: input_gen = InputGenerator(PATH_TO_FILE, self.is_input_consistent, dimension, no_points_per_axis, rand_points_axis=True, from_poly=from_poly, eps=epsilon) input_gen.generate_test_file() # Silence the print statements to stdout. with Utils.nostdout(): cons = Consistency(PATH_TO_FILE, input_gen, False, False, True) result = cons.solve_LP_problem() if result is not self.is_input_consistent: raise RuntimeError('Counterexample found, aborting. See the file %s ' 'for details.' % PATH_TO_FILE) print '\n...Finished testing. No counterexamples found.\n'
def main(): i_gen = InputGenerator(0, T, dt) data = i_gen.generate_lorenz() num_train = int(len(data.T) * RATIO_TRAIN) train_data = data.T[:num_train] print(f"train_data_shape: {train_data.shape}") model = ReservoirNetWork(inputs=train_data, num_input_nodes=NUM_INPUT_NODES, num_reservoir_nodes=NUM_RESERVOIR_NODES, num_output_nodes=NUM_OUTPUT_NODES, leak_rate=LEAK_RATE) model.train() train_result = model.get_train_result() # 訓練の結果を取得 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(data[0], data[1], data[2], marker='o') ax.set_title("Echo State Network Sin Prediction") ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") plt.show()
def processParams(self, params): # Directory for experience directory = self.setupDir(params) # Set up generator generator = InputGenerator( self.gt, chunk_size=params["nb_timesteps"], batch_size=params["batch_size"], ) # Set up model model = RNN(params) model = model.model # Set up loss loss = CustomLoss( lambda_roll=params["lambda_roll"], lambda_pitch=params["lambda_pitch"], lambda_yaw=params["lambda_yaw"], lambda_thrust=params["lambda_thrust"], loss_func=params["loss_func"], ) decay = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, min_lr=1e-6) # Set up loss optimizer = Adam( lr=params["lr"], decay=params["decay"], beta_1=0.9, beta_2=0.999, epsilon=1e-08, ) # Set up callbacks mcp = ModelCheckpoint( directory + "/model.{epoch:03d}.h5", verbose=1, save_weights_only=False, ) lrp = PrintLR() #lrs = LearningRateScheduler(step_decay) callbacks = [mcp, lrp, decay] # return all params return generator, model, loss, optimizer, callbacks
def setInputGenerator(self, nb_timesteps, batch_size): self.batch_size = batch_size self.generator = InputGenerator(self.gt, chunk_size=nb_timesteps, batch_size=batch_size)
class SupervisedManager(object): """docstring for SupervisedManager.""" def __init__(self, experimentname=None): # class attributes self._i = 0 self.experimentname = experimentname self.model = None self.weightpath = None self.loss = None self.callbacks = [] def setWeights(self, model, weightpath=None, multi_gpu=False): self.model = model self.weightpath = weightpath if weightpath: print("[*] Loading weights...") self.model.load_weights(weightpath, by_name=True) print("Done\n") def setModel(self, modelpath=None): self.model = load_model(modelpath) def setFreeze(self, freeze): self.freeze = freeze for layer in self.model.layers: layer.trainable = not layer.name in freeze def setLoss(self, loss): self.loss = loss def setOptimizer(self, optimizer): self.optimizer = optimizer def compile(self): self.model.compile(optimizer=self.optimizer, loss=self.loss) def setGroundTruth(self, datapath, ratio_valid=0.1): # PATH = "../resources" self.gt = GroundTruth(path=datapath, split=ratio_valid) def setInputGenerator(self, nb_timesteps, batch_size): self.batch_size = batch_size self.generator = InputGenerator(self.gt, chunk_size=nb_timesteps, batch_size=batch_size) def setCallbacks(self, callbacks): self.callbacks.append(callbacks) def train(self, epochs=15, initial_epoch=0): history = self.model.fit_generator( self.generator.generate(set="train"), steps_per_epoch=self.generator.steps_per_epoch_train, validation_data=self.generator.generate(set="valid"), validation_steps=self.generator.steps_per_epoch_valid, epochs=epochs, callbacks=self.callbacks, max_queue_size=10, workers=1, verbose=1, initial_epoch=initial_epoch, ) return history
def __init__(self, api, methods, seed=None, chaos_monkey=True): self.api = api self.methods = methods self.methods_list = methods.values() self.ig = InputGenerator(seed, chaos_monkey)
number_of_files = 10 base_var_num = 20 var_num = base_var_num clause_num = base_var_num * 3 loop_exp_factor = 1.3 num_of_loops = 5 for i in range(1, num_of_loops + 1): input_folder_name = 'inputs' + str(i) output_folder_name = 'outputs' + str(i) InputGenerator(num_of_var=var_num, num_of_clause=clause_num).generate_and_write_inputs( input_folder_name, number_of_files) data_hash = {} for file_num in range(number_of_files): execute(input_folder_name, output_folder_name, file_num, data_hash) lt = list() lt.append(str(i) + ': ') lt.append('var_num ' + str(var_num) + ' +- ' + str(var_num / 2)) lt.append('clause_num ' + str(clause_num) + ' +- ' + str(clause_num / 2)) lt.append('for ' + str(number_of_files) + ' number_of_files\n') for function_name in data_hash.keys():
class ApiRandomCaller: def __init__(self, api, methods, seed=None, chaos_monkey=True): self.api = api self.methods = methods self.methods_list = methods.values() self.ig = InputGenerator(seed, chaos_monkey) def call(self, method, inputs=None): try: event = method.call(self.api, params=inputs) except requests.exceptions.ConnectionError: print "[E] Couldn't call %s/%s" % ( method.base_url, method.url) method.enabled = False return None # Remove resources that results in a 404 if event.code == 404 and 'url_input' in inputs: for input_name, input_value in inputs['url_input'].items(): self.ig.resource_remove(input_name, input_value) # Remove resources that were deleted if method.http_method == 'DELETE' and event.code >= 200 and event.code < 300: for input_name, input_value in inputs['url_input'].items(): self.ig.resource_remove(input_name, input_value) # Adds output to resources self.ig.resources_add(event.outputs) return event def step(self, ask_before_call=False): random.shuffle(self.methods_list) # Pick a callable method for method in self.methods_list: if not method.enabled: continue # Tries to avoid delete and list if (method.http_method != 'DELETE' and not method.name.endswith('_list')) or self.ig.once_every(100): break if not method.enabled: print "Couldn't find a working method, abort" exit(1) # Generate inputs inputs = self.ig.generate_inputs(method.inputs) if ask_before_call: raw_input("\nPress enter to call %s (%s/%s) -d '%s' " % (method.name, method.base_url, method.url, inputs)) return self.call(method, inputs) def sync_resources(self): # Refresh internal resources list tenant_id = self.ig.resources.setdefault('tenant_id', None) self.ig.resources_clear() if tenant_id: self.ig.resources["tenant_id"] = tenant_id for name, method in self.methods.items(): if not name.endswith("_list"): continue try: event = method.call(self.api) except requests.exceptions.ConnectionError: print "[E] Couldn't call %s/%s" % ( method.base_url, method.url) continue # Adds output to resources self.ig.resources_add(event.outputs)