def train_epoch(data_path, sess, model, train_fetches, valid_fetches, train_writer, test_writer, logger): global last_score12 global lr time0 = time.time() batch_indexs = np.random.permutation(n_tr_batches) for batch in tqdm(range(n_tr_batches)): global_step = sess.run(model.global_step) if 0 == (global_step + 1) % FLAGS.valid_step: f1_micro, f1_macro, score12 = valid_epoch(data_valid_path, sess, model) print( 'Global_step=%d: f1_micro=%g, f1_macro=%g, score12=%g, time=%g s' % (global_step, f1_micro, f1_macro, score12, time.time() - time0)) logger.info( 'END:Global_step={}: f1_micro={}, f1_macro={}, score12={}'. format(sess.run(model.global_step), f1_micro, f1_macro, score12)) time0 = time.time() if score12 > last_score12: last_score12 = score12 saving_path = model.saver.save(sess, model_path, global_step + 1) print('saved new model to %s ' % saving_path) # training batch_id = batch_indexs[batch] [X_batch, y_batch, sent_len, length] = get_batch(data_path, batch_id) y_batch = to_categorical(y_batch) _batch_size = len(y_batch) feed_dict = { model.X_inputs: X_batch, model.wNum: sent_len, model.sNum: length, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: False, model.is_train: True } summary, _cost, _, _ = sess.run( train_fetches, feed_dict) # the cost is the mean cost of one batch # valid per 500 steps if 0 == (global_step + 1) % 500: train_writer.add_summary(summary, global_step) batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证 batch [X_batch, y_batch, sent_len, length] = get_batch(data_valid_path, batch_id) y_batch = to_categorical(y_batch) _batch_size = len(y_batch) feed_dict = { model.X_inputs: X_batch, model.wNum: sent_len, model.sNum: length, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: True, model.is_train: False } summary, _cost = sess.run(valid_fetches, feed_dict) test_writer.add_summary(summary, global_step)
def train_epoch(data_path, sess, model, train_fetches, valid_fetches, train_writer, test_writer, logger): global last_score12 global lr time0 = time.time() batch_indexs = np.random.permutation(n_tr_batches) for batch in tqdm(range(n_tr_batches)): global_step = sess.run(model.global_step) if 0 == (global_step + 1) % FLAGS.valid_step: cost, score = valid_epoch(data_valid_path, sess, model) logger.info('END:Global_step={}: cost={},score={}'.format( sess.run(model.global_step), cost, score)) time0 = time.time() if score > last_score12: last_score12 = score saving_path = model.saver.save(sess, model_path, global_step + 1) print('saved new model to %s ' % saving_path) # training batch_id = batch_indexs[batch] [X_batch, y_batch] = get_batch(data_path, batch_id) y_batch = to_categorical(y_batch, settings.n_class) _batch_size = len(y_batch) feed_dict = { model.X_inputs: X_batch, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: False, model.keep_prob: FLAGS.keep_prob } summary, _cost, _, _ = sess.run( train_fetches, feed_dict) # the cost is the mean cost of one batch # valid per 500 steps if 0 == (global_step + 1) % 500: train_writer.add_summary(summary, global_step) batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证batch [X_batch, y_batch] = get_batch(data_valid_path, batch_id) y_batch = to_categorical(y_batch, settings.n_class) _batch_size = len(y_batch) feed_dict = { model.X_inputs: X_batch, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0 } summary, _cost = sess.run(valid_fetches, feed_dict) test_writer.add_summary(summary, global_step)
def valid_epoch(data_path, sess, model): _costs = 0.0 predict_labels_list = list() marked_labels_list = list() # cnnout = None for i in range(n_va_batches): [X_batch, y_batch] = get_batch(data_path, i) marked_labels_list.extend(y_batch) y_batch = to_categorical(y_batch, settings.n_class) _batch_size = len(y_batch) if _batch_size != batch_size: continue fetches = [model.loss, model.y_pred, model.logits] feed_dict = { model.X_inputs: X_batch, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0 } _cost, predict_labels, logits = sess.run(fetches, feed_dict) _costs += _cost predict_labels_list.extend(predict_labels) print(max(predict_labels_list[0]), min(predict_labels_list[0])) print(predict_labels_list[0]) print(logits[0]) f1_micro, f1_macro, score12 = cail_evaluator(predict_labels_list, marked_labels_list) return _costs, f1_micro, f1_macro, score12
def valid_epoch(data_path, sess, model): va_batches = os.listdir(data_path) n_va_batches = len(va_batches) _costs = 0.0 predict_labels_list = list() marked_labels_list = list() for i in range(n_va_batches): [X_batch, y_batch] = get_batch(data_path, i) marked_labels_list.extend(y_batch) y_batch = to_categorical(y_batch) _batch_size = len(y_batch) fetches = [model.loss, model.y_pred] feed_dict = { model.X_inputs: X_batch, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0 } _cost, predict_labels = sess.run(fetches, feed_dict) _costs += _cost predict_labels_list.extend(predict_labels) f1_micro, f1_macro, score12 = cail_evaluator(predict_labels_list, marked_labels_list) return f1_micro, f1_macro, score12
def gen_number_sequence(nums): """ Method which generates sequence of numbers """ X = np.zeros([nums, 10, 20], dtype=float) y = np.zeros([nums, 10, 20], dtype=float) for i in range(nums): start = np.random.randint(0, 10) num_seq = np.arange(start, start+10) X[i] = to_categorical(num_seq, n_col=20) y[i] = np.roll(X[i], -1, axis=0) y[:, -1, 1] = 1 # Mark endpoint as 1 return X, y
def gen_multiplication_series(nums): """ Method which generates multiplication series """ X = np.zeros([nums, 10, 61], dtype=float) y = np.zeros([nums, 10, 61], dtype=float) for i in range(nums): start = np.random.randint(2, 7) mult_ser = np.linspace(start, start*10, num=10, dtype=int) X[i] = to_categorical(mult_ser, n_col=61) y[i] = np.roll(X[i], -1, axis=0) y[:, -1, 1] = 1 # Mark endpoint as 1 return X, y
def valid_epoch(data_path, sess, model): va_batches = os.listdir(data_path) n_va_batches = len(va_batches) _costs = 0.0 predict_labels_list = list() marked_labels_list = list() for i in range(n_va_batches): [X_batch, y_batch] = get_batch(data_path, i) marked_labels_list.extend(y_batch) y_batch = to_categorical(y_batch, cfg.n_class) _batch_size = len(y_batch) fetches = [model.loss, model.y_pred] feed_dict = {model.token_seq: X_batch, model.gold_label: y_batch, model.is_train: False} _cost, predict_labels = sess.run(fetches, feed_dict) _costs += _cost predict_labels_list.extend(predict_labels) f1_micro, f1_macro, score12 = cail_evaluator(predict_labels_list, marked_labels_list) return f1_micro, f1_macro, score12
def valid_epoch(data_path, sess, model): _costs = 0.0 predict_labels_list = list() marked_labels_list = list() for i in range(n_va_batches): [X_batch, y_batch] = get_batch(data_path, i) marked_labels_list.extend(y_batch) y_batch = to_categorical(y_batch, settings.n_class) _batch_size = len(y_batch) fetches = [model.loss, model.labels] feed_dict = { model.X_inputs: X_batch, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0 } _cost, predict_labels = sess.run(fetches, feed_dict) _costs += _cost predict_labels_list.extend(predict_labels) score = cail_imprisonment_evaluator(predict_labels_list, marked_labels_list) return _costs, score
def train_epoch(data_path, sess, model, bilm, elmo_context_input, context_token_ids, sess_bilm, train_fetches, valid_fetches, train_writer, test_writer, logger): global last_score12 global lr time0 = time.time() batch_indexs = np.random.permutation(n_tr_batches) for batch in tqdm(range(n_tr_batches)): global_step = sess.run(model.global_step) if 0 == (global_step + 1) % FLAGS.valid_step: f1_micro, f1_macro, score12 = valid_epoch( data_valid_path, sess, model, bilm, elmo_context_input, context_token_ids, sess_bilm, ) print( 'Global_step=%d: f1_micro=%g, f1_macro=%g, score12=%g, time=%g s' % (global_step, f1_micro, f1_macro, score12, time.time() - time0)) logger.info( 'END:Global_step={}: f1_micro={}, f1_macro={}, score12={}'. format(sess.run(model.global_step), f1_micro, f1_macro, score12)) time0 = time.time() if score12 > last_score12: last_score12 = score12 saving_path = model.saver.save(sess, model_path, global_step + 1) print('saved new model to %s ' % saving_path) # training batch_id = batch_indexs[batch] [X_batch, y_batch] = get_batch(data_path, batch_id) elmo_context_vector = get_elmo(X_batch, bilm, elmo_context_input, context_token_ids, sess_bilm) # print('elmo', elmo_context_vector.shape) # exit(0) y_batch = to_categorical(y_batch, settings.n_class) _batch_size = len(y_batch) feed_dict = { model.X_inputs: elmo_context_vector, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: False, model.keep_prob: FLAGS.keep_prob } summary, _cost, _, _ = sess.run( train_fetches, feed_dict) # the cost is the mean cost of one batch # valid per 500 steps if 0 == (global_step + 1) % 500: train_writer.add_summary(summary, global_step) batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证 batch [X_batch, y_batch] = get_batch(data_valid_path, batch_id) y_batch = to_categorical(y_batch, settings.n_class) _batch_size = len(y_batch) feed_dict = { model.X_inputs: X_batch, model.y_inputs: y_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0 } summary, _cost = sess.run(valid_fetches, feed_dict) test_writer.add_summary(summary, global_step)
def valid_epoch(data_path, sess, model, logger, global_step): va_batches = os.listdir(data_path) n_va_batches = len(va_batches) _costs = 0.0 acc_predict_onehot = list() acc_marked_labels = list() law_marked_labels = list() law_predict_onehot = list() death_marked_labels = list() death_predict_onehot = list() imp_marked_labels = list() imp_predict_onehot = list() lif_marked_predict = list() lif_predict_onehot = list() for i in range(n_va_batches): [X_batch, acc, law, death, imp, lif] = get_batch(data_path, i) acc_marked_labels.extend(acc) law_marked_labels.extend(law) death_marked_labels.extend(death) imp_marked_labels.extend(imp) lif_marked_predict.extend(lif) acc_batch = to_categorical(acc, 202) law_batch = to_categorical(law, 183) death_batch = to_categorical_death(death, 2) imp_batch = np.expand_dims(imp, axis=1) lif_batch = to_categorical_death(lif, 2) _batch_size = len(acc_batch) fetches = [ model.loss, model.accu_pred, model.law_pred, model.death_pred, model.imp_pred, model.lif_pred ] feed_dict = { model.X_inputs: X_batch, model.acc_y: acc_batch, model.article_y: law_batch, model.death_y: death_batch, model.imp_y: imp_batch, model.lif_y: lif_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1.0 } _cost, accu_pred, law_pred, death_pred, imp_pred, lif_pred = sess.run( fetches, feed_dict) _costs += _cost acc_predict_onehot.extend(accu_pred) law_predict_onehot.extend(law_pred) death_predict_onehot.extend(death_pred) imp_predict_onehot.extend(imp_pred) lif_predict_onehot.extend(lif_pred) accu_f1_micro, accu_f1_macro, accu_score12 = cail_evaluator( acc_predict_onehot, acc_marked_labels) law_f1_micro, law_f1_macro, law_score12 = cail_evaluator( law_predict_onehot, law_marked_labels) imp_score = cail_imprisonment_evaluator( death_predict_onehot, death_marked_labels, imp_predict_onehot, imp_marked_labels, lif_predict_onehot, lif_marked_predict) # score12 = accu_score12 + law_score12 + imp_score score12 = accu_score12 logger.info('Global_step={}'.format(global_step, )) logger.info('accu: f1_micro={}, f1_macro={}, score12={}'.format( accu_f1_micro, accu_f1_macro, accu_score12)) logger.info('law: f1_micro={}, f1_macro={}, score12={}'.format( law_f1_micro, law_f1_macro, law_score12)) logger.info('imprisonment: score={}'.format(imp_score)) return score12
def train_epoch(data_path, sess, model, train_fetches, valid_fetches, train_writer, test_writer, logger): global last_score12 global lr batch_indexs = np.random.permutation(n_tr_batches) for batch in tqdm(range(n_tr_batches)): global_step = sess.run(model.global_step) if 0 == (global_step + 1) % FLAGS.valid_step: score12 = valid_epoch(data_valid_path, sess, model, logger, global_step) if score12 > last_score12: last_score12 = score12 saving_path = model.saver.save(sess, model_path, global_step + 1) print('saved new model to %s ' % saving_path) # training batch_id = batch_indexs[batch] [X_batch, acc, law, death, imp, lif] = get_batch(data_path, batch_id) acc_batch = to_categorical(acc, 202) law_batch = to_categorical(law, 183) death_batch = to_categorical_death(death, 2) imp_batch = np.expand_dims(imp, axis=1) lif_batch = to_categorical_death(lif, 2) _batch_size = len(acc_batch) feed_dict = { model.X_inputs: X_batch, model.acc_y: acc_batch, model.article_y: law_batch, model.death_y: death_batch, model.imp_y: imp_batch, model.lif_y: lif_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: FLAGS.keep_prob } summary, _cost, _, _ = sess.run( train_fetches, feed_dict) # the cost is the mean cost of one batch # valid per 500 steps if 0 == (global_step + 1) % 500: train_writer.add_summary(summary, global_step) batch_id = np.random.randint(0, n_va_batches) # 随机选一个验证batch [X_batch, acc, law, death, imp, lif] = get_batch(data_path, batch_id) acc_batch = to_categorical(acc, 202) law_batch = to_categorical(law, 183) death_batch = to_categorical(death, 2) imp_batch = np.expand_dims(imp, axis=1) lif_batch = to_categorical(lif, 2) _batch_size = len(acc_batch) feed_dict = { model.X_inputs: X_batch, model.acc_y: acc_batch, model.article_y: law_batch, model.death_y: death_batch, model.imp_y: imp_batch, model.lif_y: lif_batch, model.batch_size: _batch_size, model.tst: True, model.keep_prob: 1 } summary, _cost = sess.run(valid_fetches, feed_dict) test_writer.add_summary(summary, global_step)
from DeepLearning.layers import Dense, Dropout, Conv2D, Flatten, Activation, MaxPooling2D from DeepLearning.layers import AveragePooling2D, ZeroPadding2D, BatchNormalization, RNN if __name__ == "__main__": ''' 2-D Convolutional Neural Network ''' optimizer = Adam() data = datasets.load_digits() X = data.data y = data.target # Convert to one-hot encoding y = to_categorical(y.astype("int")) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1) # Reshape X to (n_samples, channels, height, width) X_train = X_train.reshape((-1, 1, 8, 8)) X_test = X_test.reshape((-1, 1, 8, 8)) clf = NeuralNetwork(optimizer=optimizer, loss=CrossEntropy, validation_data=(X_test, y_test)) clf.add(