from tensorflow import keras GradientDescentOptimizer = tf.compat.v1.train.GradientDescentOptimizer import matplotlib.pyplot as plt # In[ ]: import tensorflow as tf from tensorflow.compat.v1.keras.backend import set_session config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.compat.v1.Session(config=config)) # ## Flags Definition # In[ ]: class Flags(object): def __init__(self, **kwargs): self.dpsgd = True self.learning_rate = 0.15 self.noise_multiplier = 1.1 self.l2_norm_clip = 1 self.batch_size = 250 self.epochs = 60
def get_rewards(self, model_fn, actions): ''' Creates a subnetwork given the actions predicted by the controller RNN, trains it on the provided dataset, and then returns a reward. Args: model_fn: a function which accepts one argument, a list of parsed actions, obtained via an inverse mapping from the StateSpace. actions: a list of parsed actions obtained via an inverse mapping from the StateSpace. It is in a specific order as given below: Consider 4 states were added to the StateSpace via the `add_state` method. Then the `actions` array will be of length 4, with the values of those states in the order that they were added. If number of layers is greater than one, then the `actions` array will be of length `4 * number of layers` (in the above scenario). The index from [0:4] will be for layer 0, from [4:8] for layer 1, etc for the number of layers. These action values are for direct use in the construction of models. Returns: a reward for training a model with the given actions ''' with tf.Session(graph=tf.Graph()) as network_sess: K.set_session(network_sess) # generate a submodel given predicted actions model = model_fn(actions) # type: Model model.compile('adam', 'categorical_crossentropy', metrics=['accuracy']) # unpack the dataset X_train, y_train, X_val, y_val = self.dataset # train the model using Keras methods model.fit(X_train, y_train, batch_size=self.batchsize, epochs=self.epochs, verbose=1, validation_data=(X_val, y_val), callbacks=[ ModelCheckpoint('weights/temp_network.h5', monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True) ]) # load best performance epoch in this training session model.load_weights('weights/temp_network.h5') # evaluate the model loss, acc = model.evaluate(X_val, y_val, batch_size=self.batchsize) # compute the reward reward = (acc - self.moving_acc) # if rewards are clipped, clip them in the range -0.05 to 0.05 if self.clip_rewards: reward = np.clip(reward, -0.05, 0.05) # update moving accuracy with bias correction for 1st update if self.beta > 0.0 and self.beta < 1.0: self.moving_acc = self.beta * self.moving_acc + ( 1 - self.beta) * acc self.moving_acc = self.moving_acc / (1 - self.beta_bias) self.beta_bias = 0 reward = np.clip(reward, -0.1, 0.1) print() print("Manager: EWA Accuracy = ", self.moving_acc) # clean up resources and GPU memory network_sess.close() return reward, acc
import tensorflow as tf from keras_frcnn import config #from keras import backend as K from keras.layers import Input from keras.models import Model #from keras.backend import set_session from tensorflow.compat.v1.keras import backend as K from keras_frcnn import roi_helpers sys.setrecursionlimit(40000) config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.log_device_placement = True sess = tf.compat.v1.Session(config=config) K.set_session(sess) parser = OptionParser() parser.add_option("-p", "--path", dest="test_path", help="Path to test data.") parser.add_option( "-n", "--num_rois", type="int", dest="num_rois", help="Number of ROIs per iteration. Higher means more memory use.", default=32) parser.add_option( "--config_filename", dest="config_filename", help=
import tensorflow as tf from tensorflow.compat.v1.keras.backend import set_session from tensorflow.keras.applications import ResNet50 # Pretrained Model from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img os.environ["CUDA_VISIBLE_DEVICES"] = "0" config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU config.log_device_placement = True # to log device placement (on which device the operation ran) sess = tf.compat.v1.Session(config=config) set_session(sess) TRAIN_PATH = '../train' TEST_PATH = '../test' IMGSIZE = 224 BATCH_SIZE = 64 EPOCHS = 10 is_dog = lambda category : int(category=='dog') def create_data(path,is_Train=True): data = [] # 2-D for pandas structure img_list = os.listdir(path) for name in tqdm(img_list): img_addr = os.path.join(path,name) img = cv2.imread(img_addr,cv2.IMREAD_COLOR)
def __init__(self, X_train=None, y_train=None, X_val=None, y_val=None, hidden_neurons=512, epochs=1, batch_size=32, verbose=1, max_len=50, n_tags=17, load_f=False, loadFile="tmp/model.h5"): ''' load_f : flag to load a model (eg set to True load a model) ''' tf.disable_eager_execution() self.X_train = X_train self.y_train = y_train self.val = (X_val, y_val) self.hidden_neurons = hidden_neurons self.epochs = epochs self.batch_size = batch_size self.verbose = verbose # session sess = tf.Session() K.set_session(sess) # elmo_model = hub.Module("https://tfhub.dev/google/elmo/3", trainable=True) # sess.run(tf.global_variables_initializer()) # sess.run(tf.tables_initializer()) # def ElmoEmbedding(x): # return elmo_model(inputs={ # "tokens": tf.squeeze(tf.cast(x, tf.string)), # "sequence_len": tf.constant(batch_size*[max_len]) # }, # signature="tokens", # as_dict=True)["elmo"] if load_f: self.model = load_model(loadFile) else: input_text = Input(shape=(max_len, ), dtype=tf.string) # embedding = Lambda(ElmoEmbedding, output_shape=(None, 1024))(input_text) embedding = ElmoLayer()(input_text) # Don't know why but it needs initialization after ElmoLayer sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) x = Bidirectional( LSTM(units=hidden_neurons, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(embedding) x_rnn = Bidirectional( LSTM(units=hidden_neurons, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(x) x = add([x, x_rnn]) # residual connection to the first biLSTM out = TimeDistributed(Dense(n_tags, activation="softmax"))(x) self.model = Model(input_text, out) self.model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
y_tr = pad_sequences(y_tr, maxlen=max_len_sen, padding='post') y_val = pad_sequences(y_val, maxlen=max_len_sen, padding='post') y_voc_size = len(y_tokenizer.word_index) + 1 #%% from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.compat.v1.keras.backend import set_session import tensorflow as tf tf.compat.v1.keras.backend.clear_session() # For easy reset of notebook state. config_proto = tf.compat.v1.ConfigProto() off = rewriter_config_pb2.RewriterConfig.OFF config_proto.graph_options.rewrite_options.arithmetic_optimization = off session = tf.compat.v1.Session(config=config_proto) set_session(session) #from keras import backend as K #K.clear_session() emb_dim = 128 latent_dim = 200 #Input & embed encoder_inputs = Input(shape=(max_len_event, ), name='enc_input') enc_emb = Embedding(x_voc_size, emb_dim, trainable=True, name='enc_embedding')(encoder_inputs) #LSTM encoder_lstm = LSTM(latent_dim, return_state=True,
parser.add_argument('--data', type=str, default='MI', help='name of data, ERN or MI or P300') parser.add_argument('--model', type=str, default='EEGNet', help='name of model, EEGNet or DeepCNN') opt = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_n) config = tf.ConfigProto() config.gpu_options.allocator_type = 'BFC' # A "Best-fit with coalescing" algorithm, simplified from a version of dlmalloc. config.gpu_options.per_process_gpu_memory_fraction = 0.25 config.gpu_options.allow_growth = True K.set_session(tf.Session(config=config)) random_seed = None subject_numbers = {'ERN': 16, 'MI': 14, 'P300': 8} amplitudes = {'ERN': 0.3, 'MI': 1.5, 'P300': 0.01} data_name = opt.data # 'ERN' or 'MI' or 'P300' model_used = opt.model # 'EEGNet' or 'DeepCNN' npp_params = [amplitudes[data_name], 5, 0.1] subject_number = subject_numbers[data_name] batch_size = 64 epoches = 1600 repeat = 10 poison_rates = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1] save_dir = 'runs/influence_of_poisoning_number' raccs = []
from keras.models import * from keras.optimizers import * from keras.layers import * from keras.metrics import * from keras.regularizers import * from keras.callbacks import * from tensorflow.compat.v1 import ConfigProto , Session from tensorflow.compat.v1.keras import backend as K config = ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction= 0.95 K.set_session(Session(config=config) ) if retrain == False: #word2vec model to be trained input_target = Input((1,) , name='target_in') input_context = Input((1,) , name='context_in') embedding = Embedding( nterms, vector_dim, input_length=1, name='embedding' , embeddings_initializer='glorot_uniform' ) target = embedding(input_target) target = Reshape((vector_dim, 1), name='target')(target) context = embedding(input_context) context = Reshape((vector_dim, 1) , name='context' )(context) dot_product = dot([target, context] , axes=1 , normalize = False)
def simulation(): #record the reward trend reward_list = [] test_reward_list = [] #save path: # actor_checkpoint = rospy.get_param("/rl_client/actor_checkpoint") # critic_checkpoint = rospy.get_param("/rl_client/critic_checkpoint") # fig_path = rospy.get_param("/rl_client/figure_path") # result_path = rospy.get_param("/rl_client/result_path") # test_result_path = rospy.get_param("/rl_client/test_result_path") #init obs_dim = 12 action_dim = 36 sess = tf.compat.v1.Session() K.set_session(sess) actor_critic = ActorCritic(sess) path = "/home/yunke/prl_proj/panda_ws/src/franka_cal_sim/python/replay_buffers/replay_buffer_imu_final.txt" #read replay buffer read_replay(path,actor_critic,obs_dim,action_dim) path = "/home/yunke/prl_proj/panda_ws/src/franka_cal_sim/python/replay_buffer_last.txt" #read replay buffer read_replay(path,actor_critic,obs_dim,action_dim) path = "/home/yunke/prl_proj/panda_ws/src/franka_cal_sim/python/replay_buffers/replay_buffer_tiny.txt" read_replay(path,actor_critic,obs_dim,action_dim) num_trials = 10000 trial_len = 3 for i in range(num_trials): # reset() # cur_state = np.ones(obs_dim)*500 # reward_sum = 0 # obs_list = [] # act_list = [] # cur_state = cur_state.reshape((1,obs_dim)) # obs_list.append(normalize(cur_state)) # act_list.append(normalize(0.01*np.ones((1,action_dim)))) # for j in range(trial_len): # #env.render() # print("trial:" + str(i)) # print("step:" + str(j)) # obs_seq = np.asarray(obs_list) # print("obs_seq"+str(obs_seq)) # act_seq = np.asarray(act_list) # obs_seq = obs_seq.reshape((1, -1, obs_dim)) # act_seq = act_seq.reshape((1, -1, action_dim)) # action = actor_critic.act(obs_seq,act_seq) # action = action.reshape((action_dim)) # cur_state = cur_state.reshape(obs_dim) # new_state, reward, done = step(cur_state,action) # reward_sum += reward # rospy.loginfo(rospy.get_caller_id() + 'got reward %s',reward) # if j == (trial_len - 1): # done = True actor_critic.train(i) actor_critic.update_target() obs_list = [] act_list = [] cur_state = np.zeros(obs_dim) cur_state = cur_state.reshape((1,obs_dim)) obs_list.append(cur_state) act_list.append(0.01*np.ones((1,action_dim))) obs_seq = np.asarray(obs_list) act_seq = np.asarray(act_list) obs_seq = obs_seq.reshape((1, -1, obs_dim)) act_seq = act_seq.reshape((1, -1, action_dim)) action = actor_critic.actor_model.predict([obs_seq,act_seq])*0.02 if i%200==0: print(action[0]) print(np.linalg.norm(action[0])) action = np.ones((1,action_dim)) print(actor_critic.critic_model.predict([obs_seq,act_seq,action])) action = np.ones((1,action_dim))*0.05 act_seq = np.ones((1,1,action_dim))*0.001 print(actor_critic.critic_model.predict([obs_seq,act_seq,action])) # new_state = np.asarray(new_state).reshape((1,obs_dim)) # action = action.reshape((1,action_dim)) # obs_list.append(normalize(new_state)) # act_list.append(normalize(action)) # next_obs_seq = np.asarray(obs_list) # next_act_seq = np.asarray(act_list) # next_obs_seq = next_obs_seq.reshape((1, -1, obs_dim)) # next_act_seq = next_act_seq.reshape((1, -1, action_dim)) # #padding # pad_width = trial_len-np.size(obs_seq,1) # rospy.loginfo(rospy.get_caller_id() + 'obs_shape %s',obs_seq.shape) # obs_seq = np.pad(obs_seq,((0,0),(pad_width,0),(0,0)),'constant') # next_obs_seq = np.pad(next_obs_seq,((0,0),(pad_width,0),(0,0)),'constant') # act_seq = np.pad(act_seq,((0,0),(pad_width,0),(0,0)),'constant') # next_act_seq = np.pad(next_act_seq,((0,0),(pad_width,0),(0,0)),'constant') # #print(obs_seq.shape) # #print(next_obs_seq.shape) # actor_critic.remember(obs_seq, act_seq, action, reward, next_obs_seq, next_act_seq, done) # cur_state = new_state # if done: # rospy.loginfo(rospy.get_caller_id() + 'got total reward %s',reward_sum) # reward_list.append(reward_sum) # break # if i % 5 == 0: # actor_critic.actor_model.save_weights(actor_checkpoint) # actor_critic.critic_model.save_weights(critic_checkpoint) # fig, ax = plt.subplots() # ax.plot(reward_list) # fig.savefig(fig_path) # np.savetxt(result_path, reward_list, fmt='%f') # #draw a graph obs_list = [] act_list = [] cur_state = np.zeros(obs_dim) cur_state = cur_state.reshape((1,obs_dim)) obs_list.append(cur_state) act_list.append(0.01*np.ones((1,action_dim))) obs_seq = np.asarray(obs_list) act_seq = np.asarray(act_list) obs_seq = obs_seq.reshape((1, -1, obs_dim)) act_seq = act_seq.reshape((1, -1, action_dim)) q = [] t_q = [] for i in range(100): action = 0.015*np.ones((1,action_dim))*i q_value = actor_critic.critic_model.predict([obs_seq,act_seq,action]) t_q_value = actor_critic.target_critic_model.predict([obs_seq,act_seq,action]) t_q.append(t_q_value.reshape((-1,))) q.append(q_value.reshape((-1,))) plt.plot(q) plt.show() plt.plot(t_q) plt.show()
def FCNN(data, num_layer, num_neuron, average_time, act_f, initializer, reg, batch_normalization, learning_rate, iteration_step, batch_size, n_input, n_output, n_Fold, project_name, device, avg_rmse, std_rmse): """ data : dataframe num_layer : int num_neuron : int average_time : int act_f : str 'elu', 'sigmoid' initializer : str 'glorot_uniform','random_normal', 'he_normal' reg : list ['L2',0.01] batch_normalization : bool True, False iteration_step : int batch_size : int device : str '/gpu:0','/gpu:1' avg_rmse : manager.dict std_rmse : manager.dict """ def get_session(gpu_fraction=0.3): num_threads = os.environ.get('OMP_NUM_THREADS') gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=gpu_fraction) if num_threads: return tf.Session(config=tf.ConfigProto( gpu_options=gpu_options, intra_op_parallelism_threads=num_threads)) else: return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) KK.set_session(get_session()) with tf.device(device): model = models.Sequential() model.add( layers.Dense(num_neuron, activation=act_f, input_shape=(n_input, ), kernel_initializer=initializer)) if batch_normalization: model.add(layers.BatchNormalization()) for i in range(num_layer - 1): model.add( layers.Dense(num_neuron, activation=act_f, kernel_initializer=initializer)) if batch_normalization: model.add(layers.BatchNormalization()) model.add(layers.Dense(n_output)) opt = tf.keras.optimizers.Adam(lr=learning_rate) model.compile(optimizer=opt, loss='mse', metrics=[tf.keras.metrics.RootMeanSquaredError()]) data = data.sample(frac=1).reset_index(drop=True) cv = KFold(n_splits=n_Fold) val_rmse = [] for i, (t, v) in enumerate(cv.split(data)): train = data.iloc[t] val = data.iloc[v] for kk in range(average_time): hist = model.fit(train[train.columns[:-n_output]], train[train.columns[-n_output:]], validation_data=(val[val.columns[:-n_output]], val[val.columns[-n_output:]]), epochs=iteration_step, verbose=0, batch_size=batch_size) aa = pd.DataFrame() aa['epoch'] = [(i + 1) for i in range(len(hist.history['loss']))] aa['rmse'] = hist.history['root_mean_squared_error'] aa['val_rmse'] = hist.history['val_root_mean_squared_error'] aa.to_csv(r'./{}/history_log/{}L_{}N_{}_{}Fold_{}.csv'.format( project_name, num_layer, num_neuron, i + 1, n_Fold, kk + 1), header=True, index=False) model.save(r'./{}/model/{}L_{}N_{}_{}Fold_{}.h5'.format( project_name, num_layer, num_neuron, i + 1, n_Fold, kk + 1)) val_rmse.append(hist.history['val_root_mean_squared_error'][-1]) tf.keras.backend.clear_session() print('mean : ', np.mean(val_rmse), 'std : ', np.std(val_rmse)) avg_rmse['{}L_{}N'.format(num_layer, num_neuron)] = np.mean(val_rmse) std_rmse['{}L_{}N'.format(num_layer, num_neuron)] = np.std(val_rmse) return 0
if __name__ == '__main__': FPS = 60 # For stats ep_rewards = [-200] # For more repetitive results random.seed(1) np.random.seed(1) tf.set_random_seed(1) # Memory fraction, used mostly when trai8ning multiple agents gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=MEMORY_FRACTION) backend.set_session( tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))) # Create models folder if not os.path.isdir('models'): os.makedirs('models') # Create agent and environment agent = DQNAgent() env = CarEnv() # Start training thread and wait for training to be initialized trainer_thread = Thread(target=agent.train_in_loop, daemon=True) trainer_thread.start() while not agent.training_initialized: time.sleep(0.01)
def __init__(self, add_pdb=False, add_bkg=False, add_aa_comp=False, add_aa_ref=False, n_models=5, serial=False, diag=0.4, pssm_design=False, msa_design=False, feat_drop=0, eps=1e-8, sample=False, DB_DIR=".", lid=0.3, lid_scale=18.0): self.sample, self.serial = sample, serial self.feat_drop = feat_drop # reset graph K.clear_session() K1.set_session(tf1.Session(config=config)) # configure inputs self.in_label, inputs = [], [] def add_input(shape, label, dtype=tf.float32): inputs.append(Input(shape, batch_size=1, dtype=dtype)) self.in_label.append(label) return inputs[-1] I = add_input((None, None, 20), "I") if add_pdb: pdb = add_input((None, None, 100), "pdb") if add_bkg: bkg = add_input((None, None, 100), "bkg") loss_weights = add_input((None, ), "loss_weights") train = add_input([], "train", tf.bool)[0] ################################ # input features ################################ def add_gap(x): return tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, 1]]) def argmax(y): if sample: # ref: https://blog.evjang.com/2016/11/tutorial-categorical-variational.html U = tf.random.uniform(tf.shape(y), minval=0, maxval=1) y_pssm_sampled = tf.nn.softmax( y - tf.math.log(-tf.math.log(U + 1e-8) + 1e-8)) y_pssm = K.switch(train, y_pssm_sampled, tf.nn.softmax(y, -1)) else: y_pssm = tf.nn.softmax(y, -1) y_seq = tf.one_hot(tf.argmax(y_pssm, -1), 20) # argmax y_seq = tf.stop_gradient(y_seq - y_pssm) + y_pssm # gradient bypass return y_pssm, y_seq I_pssm, I_seq = argmax(I) # configuring input if msa_design: print("mode: msa design") I_feat = MRF(lid=lid, lid_scale=lid_scale)(add_gap(I_seq)) elif pssm_design: print("mode: pssm design") I_feat = PSSM(diag=diag)([I_seq, add_gap(I_pssm)]) else: print("mode: single sequence design") I_feat = PSSM(diag=diag)([I_seq, add_gap(I_seq)]) # add dropout to features if self.feat_drop > 0: e = tf.eye(tf.shape(I_feat)[1])[None, :, :, None] I_feat_drop = tf.nn.dropout(I_feat, rate=self.feat_drop) # exclude dropout at the diagonal I_feat_drop = e * I_feat + (1 - e) * I_feat_drop I_feat = K.switch(train, I_feat_drop, I_feat) ################################ # output features ################################ self.models = [] for token in ["xaa", "xab", "xac", "xad", "xae"][:n_models]: # load weights (for serial mode) or all models (for parallel mode) print(f"loading model: {token}") weights = load_weights(f"{DB_DIR}/models/model_{token}.npy") if self.serial: self.models.append(weights) else: self.models.append(RESNET(weights=weights, mode="TrR")(I_feat)) if self.serial: O_feat = RESNET(mode="TrR")(I_feat) else: O_feat = tf.reduce_mean(self.models, 0) ################################ # define loss ################################ self.loss_label, loss = [], [] def add_loss(term, label): loss.append(term) self.loss_label.append(label) # cross-entropy loss for fixed backbone design if add_pdb: pdb_loss = -0.25 * K.sum(pdb * K.log(O_feat + eps), -1) add_loss(K.mean(pdb_loss, [-1, -2]), "pdb") # kl loss for hallucination if add_bkg: bkg_loss = -0.25 * K.sum( O_feat * K.log(O_feat / (bkg + eps) + eps), -1) add_loss(K.mean(bkg_loss, [-1, -2]), "bkg") # amino acid composition loss if add_aa_ref: aa = tf.constant(AA_REF, dtype=tf.float32) I_soft = tf.nn.softmax(I, axis=-1) aa_loss = K.sum(K.mean(I_soft * aa, [-2, -3]), -1) add_loss(aa_loss, "aa") elif add_aa_comp: aa = tf.constant(AA_COMP, dtype=tf.float32) I_soft = tf.nn.softmax(I, axis=-1) aa_loss = K.sum(I_soft * K.log(I_soft / (aa + eps) + eps), -1) add_loss(K.mean(aa_loss, [-1, -2]), "aa") ################################ # define gradients ################################ print( f"The loss function is composed of the following: {self.loss_label}" ) loss = tf.stack(loss, -1) * loss_weights grad = Lambda(lambda x: tf.gradients(x[0], x[1])[0])([loss, I]) ################################ # define model ################################ self.out_label = ["grad", "loss", "feat", "pssm"] outputs = [grad, loss, O_feat, I_pssm] self.model = Model(inputs, outputs)