コード例 #1
0
def evaluate(model,session,data,global_steps=None,summary_writer=None):


    correct_num=0
    total_num=len(data[0])
    for step, (x,y,mask_x) in enumerate(data_helper.batch_iter(data,batch_size=FLAGS.batch_size)):

         fetches = model.correct_num
         feed_dict={}
         feed_dict[model.input_data]=x
         feed_dict[model.target]=y
         feed_dict[model.mask_x]=mask_x
         model.assign_new_batch_size(session,len(x))
         state = session.run(model._initial_state)
         for i , (c,h) in enumerate(model._initial_state):
            feed_dict[c]=state[i].c
            feed_dict[h]=state[i].h
         count=session.run(fetches,feed_dict)
         correct_num+=count

    accuracy=float(correct_num)/total_num
    dev_summary = tf.scalar_summary('dev_accuracy',accuracy)
    dev_summary = session.run(dev_summary)
    if summary_writer:
        summary_writer.add_summary(dev_summary,global_steps)
        summary_writer.flush()
    return accuracy
コード例 #2
0
def run_epoch(model,session,data,global_steps,valid_model,valid_data,train_summary_writer,valid_summary_writer=None):
    # for step, (x,y,mask_x) in enumerate(data_helper.batch_iter(data,batch_size=FLAGS.batch_size)):
    for step, (x, y, seq_length) in enumerate(data_helper.batch_iter(FLAGS.max_len, data, batch_size=FLAGS.batch_size, usemydata=FLAGS.using_mydata)):
        feed_dict={}
        feed_dict[model.input_data] = x # 输入数据位置!
        # feed_dict[model.seq_length] = seq_length # 提前准备好长度
        if FLAGS.using_mydata:
            feed_dict[model.seq_length] = seq_length
        else:
            feed_dict[model.mask_x] = seq_length
        feed_dict[model.target] = y
        model.assign_new_batch_size(session, len(x)) # 每次更新不同的len
        fetches = [model.cost, model.accuracy, model.train_op, model.summary]

        state = session.run(model._initial_state)
        for i, (c,h) in enumerate(model._initial_state):
            feed_dict[c]=state[i].c
            feed_dict[h]=state[i].h
        cost,accuracy,_,summary = session.run(fetches,feed_dict)
        train_summary_writer.add_summary(summary,global_steps)
        train_summary_writer.flush()

        valid_accuracy=evaluate(valid_model,session,valid_data,global_steps,valid_summary_writer)
        if(global_steps%1==0):
            print("the %i step, train cost is: %f and the train accuracy is %f and the valid accuracy is %f"%(global_steps,cost,accuracy,valid_accuracy))
        global_steps += 1

    return global_steps
コード例 #3
0
    def run_one_epoch(self, sess, train, dev, tag2label, epoch, saver):

        num_batches = (len(train) + self.batch_size - 1) // self.batch_size
        start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        batches = batch_iter(train, self.batch_size, shuffle=self.shuffle)
        step = -1
        for batch in batches:
            step+=1
            seqs, labels = zip(*batch)
            if step % 20 == 0:
                print(' processing: {} batch / {} batches.'.format(step + 1, num_batches) + '\r')
            step_num = epoch * num_batches + step + 1
            feed_dict, _ = self.get_feed_dict(seqs, labels, self.lr, self.dropout_keep_prob)  #
            _, loss_train, summary, step_num_ = sess.run([self.train_op, self.loss, self.merged, self.global_step],
                                                         feed_dict=feed_dict)
            if step + 1 == 1 or (step + 1) % 300 == 0 or step + 1 == num_batches:
                print('{} epoch {}, step {}, loss: {:.4}, global_step: {}'.format(
                    start_time, epoch + 1, step + 1,
                    loss_train, step_num))

            self.file_writer.add_summary(summary, step_num)

            if step + 1 == num_batches:
                saver.save(sess, self.model_path, global_step=step_num)

        label_list_dev, seq_len_list_dev = self.dev_one_epoch(sess, dev)
        self.evaluate(label_list_dev, seq_len_list_dev, dev, epoch)
コード例 #4
0
def predict(model_file, vocab_processor, params, labels, text_list):
    x_test = np.array(list(vocab_processor.transform(text_list)))
    batches = data_helper.batch_iter(list(x_test),
                                     params['batch_size'],
                                     1,
                                     shuffle=False)
    all_predictions = []
    graph = tf.Graph()
    with graph.as_default():
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=False)
        sess = tf.Session(config=session_conf)

        with sess.as_default():
            saver = tf.train.import_meta_graph("{}.meta".format(model_file))
            saver.restore(sess, model_file)
            input_x = graph.get_operation_by_name("input_x").outputs[0]
            dropout_keep_prob = graph.get_operation_by_name(
                "dropout_keep_prob").outputs[0]
            predictions = graph.get_operation_by_name(
                "output/predictions").outputs[0]

            for x_test_batch in batches:
                batch_predictions = sess.run(predictions, {
                    input_x: x_test_batch,
                    dropout_keep_prob: 1.0
                })
                all_predictions = np.concatenate(
                    [all_predictions, batch_predictions])
    to_return = []
    for inc in all_predictions:
        to_return.append(label_dict[int(inc)])
    return to_return
コード例 #5
0
def train_graph(sess, textRNN, x_train, y_train, batch_size, dropout_prob, trainMode=True):
    """
    Train model on training set
    """
    loss_list, acc_list = [], []
    loss, acc, ct = 0, 0, 0

    batches = data_helper.batch_iter(list(zip(x_train, y_train)), batch_size)
    for batch in batches:
        x_batch, y_batch = zip(*batch)
        feed = {
            textRNN.x: x_batch,
            textRNN.y: y_batch,
            textRNN.dropout_keep_prob: dropout_prob
        }
        if trainMode:
            sess.run([textRNN.train_step], feed_dict=feed)
        curr_loss, curr_acc = sess.run([textRNN.loss, textRNN.accuracy], feed_dict=feed)


        # Contain loss/acc per batch
        loss_list.append(curr_loss)
        acc_list.append(curr_acc)
        # Contain loss/acc per epoch
        loss, acc, ct = loss + curr_loss, acc + curr_acc, ct + 1

    return loss/float(ct), acc/float(ct), loss_list, acc_list
コード例 #6
0
def run(train_x,
        visible_size,
        hidden_szie=30,
        num_epoch=100,
        batch_size=16,
        lr=1e-3,
        test_rate=0.1):
    print("training begin")
    rbm = RBM.RBM_Model(visible_size=visible_size,
                        hidden_size=hidden_szie,
                        lr=lr)
    if os.path.exists(W_path):
        rbm.W = load_param(W_path)
        rbm_b_v = load_param(b_v_path)
        rbm_b_h = load_param(b_h_path)
    iter_time = 0
    for data in dh.batch_iter(train_x, num_epoch, batch_size):
        #rbm.lr = lr*math.pow(10,-1*(iter_time/5000))
        iter_time += 1
        rbm.train(data, iter_time)
    print("saving parameters...")
    np.save(W_path, rbm.W)
    np.save(b_v_path, rbm.b_v)
    np.save(b_h_path, rbm.b_h)
    print("save done!")
    return rbm
コード例 #7
0
def valid_model(sess, lstm, valid_ori_quests, valid_cand_quests, labels,
                results, test_cat_ids):
    total_loss, idx = 0, 0
    total_ori_cand = []
    #total_right, total_wrong, step = 0, 0, 0, 0
    for ori_valid, cand_valid, neg_valid, cat_ids_test in batch_iter(
            valid_ori_quests,
            valid_cand_quests,
            test_cat_ids,
            FLAGS.batch_size,
            1,
            isvalid=True):
        loss, ori_cand = run_step(sess, ori_valid, cand_valid, cand_valid,
                                  cat_ids_test, lstm, FLAGS.dropout, False,
                                  False)
        total_loss += loss
        total_ori_cand.extend(ori_cand)
        #total_right += right
        #total_wrong += wrong
        idx += 1

    acc, MAP, MRR = cal_acc(labels, results, total_ori_cand)
    logger.info("evaluation acc:%s" % (acc))
    logger.info("evaluation MAP:%s" % (MAP))
    logger.info("evaluation MRR:%s" % (MRR))

    return acc, MAP, MRR
コード例 #8
0
def evaluate(model, session, data, global_steps=None, summary_writer=None):
    correct_num = 0
    total_num = len(data[0])
    for step, (x, y, mask_x) in enumerate(
            data_helper.batch_iter(data, batch_size=FLAGS.batch_size)):

        fetches = model.correct_num
        feed_dict = {}
        feed_dict[model.input_data] = x
        feed_dict[model.target] = y
        feed_dict[model.mask_x] = mask_x
        model.assign_new_batch_size(session, len(x))
        state = session.run(model._initial_state)
        for i, (c, h) in enumerate(model._initial_state):
            feed_dict[c] = state[i].c
            feed_dict[h] = state[i].h
        count = session.run(fetches, feed_dict)
        correct_num += count

    accuracy = float(correct_num) / total_num
    dev_summary = tf.summary.scalar('dev_accuracy', accuracy)
    dev_summary = session.run(dev_summary)
    if summary_writer:
        summary_writer.add_summary(dev_summary, global_steps)
        summary_writer.flush()
    return accuracy
コード例 #9
0
def validate_model(sess, cnn, valid_x, valid_y):
    start_time = time.time()
    batches = batch_iter(zip(valid_x, valid_y),
                         FLAGS.batch_size,
                         shuffle=False)
    total_loss, total_acc, total_elapsed_time = 0, 0, 0
    idx = 0
    pred_labels = list()
    act_labels = list()
    for batch in batches:
        batch_x, batch_y = zip(*batch)
        step, cur_loss, cur_acc, predicts, elapsed_time = valid_step(
            sess, cnn, batch_x, batch_y, is_optimizer=False)
        total_loss += cur_loss
        total_acc += cur_acc
        total_elapsed_time += elapsed_time
        idx += 1
        pred_labels.extend(predicts)
        act_labels.extend(batch_y)

    aver_loss = 1. * total_loss / idx
    aver_acc = 1. * total_acc / idx
    aver_elapsed_time = 1. * total_elapsed_time / idx
    validate_elapsed_time = time.time() - start_time
    logger.info(
        "validation loss:%s, acc:%s, %6.7f secs/batch_size, total elapsed time: %6.7f"
        % (aver_loss, aver_acc, aver_elapsed_time, validate_elapsed_time))
    return pred_labels, act_labels
コード例 #10
0
def run_epoch(model, session, data, global_steps, valid_model, valid_data,
              train_summary_writer):
    for step, batch in enumerate(batch_iter(data,
                                            batch_size=FLAGS.batch_size)):
        x, y, mask_x = zip(*batch)
        feed_dict = {
            model.input_data: x,
            model.target: y,
            model.mask_x: np.transpose(mask_x)
        }

        fetches = [model.cost, model.accuracy, model.train_op, model.summary]
        cost, accuracy, _, summary = session.run(fetches, feed_dict)

        train_summary_writer.add_summary(summary, global_steps)
        train_summary_writer.flush()

        timestr = datetime.datetime.now().isoformat()
        logging.info(
            "%s, the %i step, train cost is:%f and the train accuracy is %6.7f"
            % (timestr, global_steps, cost, accuracy))
        if (global_steps % FLAGS.evaluate_every == 0):
            valid_accuracy = evaluate(valid_model, session, valid_data,
                                      global_steps)
            logging.info("%s, the valid accuracy is %f" %
                         (timestr, valid_accuracy))

        global_steps += 1

    return global_steps
コード例 #11
0
def evaluate(model,session,data,global_steps=None,summary_writer=None):


    correct_num=0
    if FLAGS.using_mydata:
        total_num = len(data[0][0]) # 这里也要修改相应的维数
    else:
        total_num = len(data[0])
    for step, (x, y, seq_length) in enumerate(data_helper.batch_iter(FLAGS.max_len, data,batch_size=FLAGS.batch_size, usemydata=FLAGS.using_mydata)):

         fetches = [model.correct_num, model.accuracy]
         feed_dict = {}
         feed_dict[model.input_data] = x
         # feed_dict[model.seq_length] = seq_length  # 提前准备好长度
         if FLAGS.using_mydata:
             feed_dict[model.seq_length] = seq_length
         else:
             feed_dict[model.mask_x] = seq_length
         feed_dict[model.target] = y
         model.assign_new_batch_size(session, len(x))
         state = session.run(model._initial_state)
         for i , (c,h) in enumerate(model._initial_state):
            feed_dict[c]=state[i].c
            feed_dict[h]=state[i].h
         count, acc =session.run(fetches, feed_dict)
         correct_num+=count
         # print('step:', step, 'count:', count, 'correct_num:', correct_num, 'acc:', acc)

    accuracy=float(correct_num)/total_num
    dev_summary = tf.summary.scalar('dev_accuracy',accuracy)
    dev_summary = session.run(dev_summary)
    if summary_writer:
        summary_writer.add_summary(dev_summary,global_steps)
        summary_writer.flush()
    return accuracy
コード例 #12
0
def run():
    # training parameters
    batch_size = 128
    num_epochs = 100
    maxlen = 8
    step = 1
    next_n = 1

    # model parameters
    num_units = 300
    num_rnn_layers = 1
    vocab_size = 10000

    model = RNN(batch_size, maxlen, num_units, num_rnn_layers, vocab_size)

    texts = batch_iter("poetry.txt", batch_size, num_epochs, maxlen, vocab_size, step, next_n)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())

        i = 0
        for text in texts:
            x_batch, y_batch = zip(*text)
            # lr = 0.5 * (0.99 ** i)
            lr = 0.01
            sess.run(tf.assign(model.learning_rate, lr))

            train_loss, _ = sess.run([model.loss, model.train_op],
                                     feed_dict={model.xs: x_batch, model.ys: y_batch})
            i += 1
            if i % 500 == 0:
                print("Epoch: %s, loss: %s" % (i, train_loss))
                saver.save(sess, 'model/rnn.ckpt', global_step=i)
コード例 #13
0
def run_epoch(model,
              session,
              data,
              global_steps,
              valid_model,
              valid_data,
              train_summary_writer,
              valid_summary_writer=None):
    for step, (x, y, mask_x) in enumerate(
            data_helper.batch_iter(data, batch_size=FLAGS.batch_size)):

        feed_dict = {}
        feed_dict[model.input_data] = x
        feed_dict[model.target] = y
        feed_dict[model.mask_x] = mask_x
        model.assign_new_batch_size(session, len(x))
        fetches = [model.cost, model.accuracy, model.train_op, model.summary]
        state = session.run(model._initial_state)
        for i, (c, h) in enumerate(model._initial_state):
            feed_dict[c] = state[i].c
            feed_dict[h] = state[i].h
        cost, accuracy, _, summary = session.run(fetches, feed_dict)
        train_summary_writer.add_summary(summary, global_steps)
        train_summary_writer.flush()
        valid_accuracy = evaluate(valid_model, session, valid_data,
                                  global_steps, valid_summary_writer)
        if global_steps % 100 == 0:
            print(
                "the %i step, train cost is: %f and the train accuracy is %f and the valid accuracy is %f"
                % (global_steps, cost, accuracy, valid_accuracy))
        global_steps += 1

    return global_steps
コード例 #14
0
ファイル: predict.py プロジェクト: person-lee/H-CNN-GRU
def evaluate(session, test_x, test_y, corpus, global_steps=None):
    total_correct_num=0
    total_busi_num = 0
    total_busi_correct_num = 0
    total_other_num = 0
    total_other_correct_num = 0
    data = zip(test_x, test_y, corpus)
    total_num=len(data)
    for step, batch in enumerate(batch_iter(data, batch_size=FLAGS.batch_size, shuffle=False)):
        x, input_y, batch_corpus = zip(*batch)
        input_x, sess_len, sent_len = format_input_x(x)
        fetches = [model_prediction, model_correct_num, model_accuracy]
        feed_dict={
            model.input_data:input_x,
            model.target:input_y,
            model.session_lengths:sess_len,
            model.sent_lengths:sent_len,
            model.dropout_ratio:1.0
        }
        
        prediction, correct_num, acc = session.run(fetches, feed_dict)
        other_correct_num, other_num, busi_correct_num, busi_num = cal_detail_acc(prediction, correct_num, input_y, batch_corpus)
        total_correct_num += correct_num
        total_busi_num += busi_num
        total_busi_correct_num += busi_correct_num
        total_other_num += other_num
        total_other_correct_num += other_correct_num

    accuracy=float(total_correct_num)/total_num
    busi_acc = float(total_busi_correct_num) / total_busi_num
    other_acc = float(total_other_correct_num) / total_other_num
    logger.info("validation success")

    return accuracy, busi_acc, other_acc
コード例 #15
0
def train():
    x = tf.placeholder(tf.int32, [None, None], name='x')
    y = tf.placeholder(tf.int32, [None], name='y')
    lr = TextCNN.INIT_LEARNING_RATE
    embedding = tf.Variable(embedding_table, dtype=tf.float32, trainable=False)
    # embedding = tf.Variable(tf.random_uniform([TextCNN.VOCAB_SIZE, TextCNN.EMBED_FEATURE], -1.0, 1.0))
    input = tf.nn.embedding_lookup(embedding, x)

    model = TextCNN.TextCNN()
    logits_train = model.inference(input, Training=True)
    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_train,
                                                          labels=y,
                                                          name='loss')
    loss_ = tf.reduce_mean(loss) + tf.nn.l2_loss(model.fc.get_weights()[0])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss_)

    logits = model.inference(input)
    correct_pred = tf.equal(tf.argmax(logits, axis=1), tf.cast(y, tf.int64))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))

    sum_correct_pred = tf.reduce_sum(tf.cast(correct_pred, dtype=tf.float32))

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        for epoch in range(TextCNN.EPOCH):
            for step, (x_, y_) in enumerate(
                    data_helper.batch_iter(x_train, y_train,
                                           TextCNN.BATCH_SIZE)):
                # print(sess.run(input , feed_dict={x:x_}))
                _ = sess.run(train_op, feed_dict={x: x_, y: y_})
                if step % 64 == 0:
                    # print(y_)
                    # print(sess.run(tf.argmax(logits, axis=1), feed_dict={x: x_}))
                    print('epoch :', epoch, 'step :', step, ' train_acc = ',
                          sess.run(accuracy, feed_dict={
                              x: x_,
                              y: y_
                          }))
            sum_ = 0
            for (x__, y__) in data_helper.batch_iter(x_test, y_test,
                                                     TextCNN.BATCH_SIZE):
                tmp = sess.run(sum_correct_pred, feed_dict={x: x__, y: y__})
                sum_ += tmp
            print('epoch ', epoch, 'acc = ', sum_ / len(y_test))
            if epoch % 30 == 0:
                lr /= 2
コード例 #16
0
def test(test_x, rbm, batch_size=16, topK=30):
    test_recomendation = []
    test_s = []
    for test_data in dh.batch_iter(test_x, num_epoch=1, batch_size=batch_size):
        v1_state = rbm.recomendation(test_data, topK)
        test_s.extend(test_data)
        test_recomendation.extend(v1_state)
    return test_s, test_recomendation
コード例 #17
0
def predict_unseen_data():
	"""Step 0: load trained model and parameters"""
	params = json.loads(open('./parameters.json').read())
	checkpoint_dir = sys.argv[1]
	if not checkpoint_dir.endswith('/'):
		checkpoint_dir += '/'
	checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir + 'checkpoints')
	logging.critical('Loaded the trained model: {}'.format(checkpoint_file))

	"""Step 1: load data for prediction"""
	test_file = sys.argv[2]
	x_test, y_test, revs, labels = data_helper.load_data_and_labels(test_file)
	logging.info('The number of x_test: {}'.format(len(x_test)))
	logging.info('The number of y_test: {}'.format(len(y_test)))

	vocab_path = os.path.join(checkpoint_dir, "vocab.pickle")
	vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
	x_test = np.array(list(vocab_processor.transform(x_test)))

	"""Step 2: compute the predictions"""
	graph = tf.Graph()
	with graph.as_default():
		#配置并初始化
		session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
		sess = tf.Session(config=session_conf)

		with sess.as_default():
			saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
			saver.restore(sess, checkpoint_file)

			# Get the placeholders from the graph by name
			input_x = graph.get_operation_by_name("input_x").outputs[0]
			dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

			# Tensors we want to evaluate
			predictions = graph.get_operation_by_name("output/predictions").outputs[0]

			# Generate batches for one epoch
			batches = data_helper.batch_iter(list(x_test), params['batch_size'], 1, shuffle=False)
			
			# Collect the predictions here
			all_predictions = []
			for x_test_batch in batches:
				batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
				all_predictions = np.concatenate([all_predictions, batch_predictions])

	# Print accuracy if y_test is defined
	if y_test is not None:
		y_test = np.argmax(y_test, axis=1)
		correct_predictions = sum(all_predictions == y_test)
		logging.critical('The accuracy is: {}'.format(correct_predictions / float(len(y_test))))

	# Save the evaluation to a csv
	predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
	out_path = os.path.join(checkpoint_dir, "..", "prediction.csv")
	logging.critical("Saving evaluation to {0}".format(out_path))
	with open(out_path, 'w') as f:
		csv.writer(f).writerows(predictions_human_readable)
コード例 #18
0
def predict_new_data():
	"""Step 0: load trained model and parameters"""
	params = json.loads(open('./parameters.json').read())
	checkpoint_dir = sys.argv[1]
	if not checkpoint_dir.endswith('/'):
		checkpoint_dir += '/'
	checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir + 'checkpoints')
	logging.critical('Loaded the trained model: {}'.format(checkpoint_file))

	"""Step 1: load data for prediction"""
	test_file = sys.argv[2]
	test_examples = json.loads(open(test_file).read())

	# labels.json was saved during training, and it has to be loaded during prediction
	labels = json.loads(open('./labels.json').read())
	one_hot = np.zeros((len(labels), len(labels)), int)
	np.fill_diagonal(one_hot, 1)
	label_dict = dict(zip(labels, one_hot))

	x_raw = [example['consumer_complaint_narrative'] for example in test_examples]
	x_test = [data_helper.clean_str(x) for x in x_raw]
	logging.info('The number of x_test: {}'.format(len(x_test)))

	y_test = None
	if 'product' in test_examples[0]:
		y_raw = [example['product'] for example in test_examples]
		y_test = [label_dict[y] for y in y_raw]
		logging.info('The number of y_test: {}'.format(len(y_test)))

	vocab_path = os.path.join(checkpoint_dir, "vocab.pickle")
	vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
	x_test = np.array(list(vocab_processor.transform(x_test)))

	"""Step 2: compute the predictions"""
	graph = tf.Graph()
	with graph.as_default():
		session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
		sess = tf.Session(config=session_conf)

		with sess.as_default():
			saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
			saver.restore(sess, checkpoint_file)

			input_x = graph.get_operation_by_name("input_x").outputs[0]
			dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
			predictions = graph.get_operation_by_name("output/predictions").outputs[0]

			batches = data_helper.batch_iter(list(x_test), params['batch_size'], 1, shuffle=False)
			all_predictions = []
			for x_test_batch in batches:
				batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
				all_predictions = np.concatenate([all_predictions, batch_predictions])

	if y_test is not None:
		y_test = np.argmax(y_test, axis=1)
		correct_predictions = sum(all_predictions == y_test)
		logging.critical('The accuracy is: {}'.format(correct_predictions / float(len(y_test))))
コード例 #19
0
def train():
    x_train, x_test, y_train, y_test = data_process()
    print('x_train`s shape:',x_train.shape)
    with tf.device('/gpu:0'):
        with tf.Graph().as_default():
            session_conf = tf.ConfigProto(
                allow_soft_placement = allow_soft_placement,
                log_device_placement = log_device_placement
            )
            sess = tf.Session(config=session_conf)
            with sess.as_default():
                cnn = TextCNN(
                    sequence_length = x_train.shape[1],
                    num_classes = y_train.shape[1],
                    filter_sizes = filter_sizes,
                    num_filters = num_filters,
                    l2_reg_lambda = l2_reg_lambda,
                    embedding_size=embedding_dim)

                global_step = 0

                train_op = tf.train.AdamOptimizer(learn_rate).minimize(cnn.loss)

                timestamp = str(int(time.time()))
                out_dir = os.path.abspath(os.path.join(os.path.curdir,timestamp))
                print('Writing to {}'.format(out_dir))

                checkpoint_dir = os.path.abspath(os.path.join(out_dir,'checkpoints'))

                if not os.path.exists(checkpoint_dir):
                    os.makedirs(checkpoint_dir)
                saver = tf.train.Saver()

                # initialize all variables
                # 全局初始化
                sess.run(tf.global_variables_initializer())

                batches = data_helper.batch_iter(
                    list(zip(x_train,y_train)),
                    batch_size,
                    num_epochs
                )

                best_acc = 0
                for batch in batches:
                    global_step += 1
                    x_batch, y_batch = zip(*batch)
                    train_step(sess,cnn,x_batch,y_batch,train_op,global_step)
                    if global_step % evaluate_every == 0:
                        print('\n Evaluation:')
                        accurent_acc = dev_step(sess,cnn,x_test,y_test,global_step)
                        print('')
                    if global_step % checkpoint_every == 0 and accurent_acc > best_acc:
                        path = saver.save(sess,checkpoint_dir + '/save_net.ckpt')
                        best_acc = accurent_acc
                        print('Saved model checkpoint to {}\n'.format(path))
コード例 #20
0
def valid_model(sess, lstm, valid_ori_quests, valid_cand_quests, labels, results):
    logger.info("start to validate model")
    total_ori_cand = []
    for ori_valid, cand_valid, neg_valid in batch_iter(valid_ori_quests, valid_cand_quests, FLAGS.batch_size, 1, is_valid=True):
        ori_cand = valid_run_step(sess, ori_valid, cand_valid, lstm)
        total_ori_cand.extend(ori_cand)

    data_len = len(total_ori_cand)
    acc = cal_acc(labels[:data_len], results[:data_len], total_ori_cand)
    timestr = datetime.datetime.now().isoformat()
    logger.info("%s, evaluation acc:%s"%(timestr, acc))
コード例 #21
0
def valid_model_train_format(sess, cnn, valid_ori_quests, valid_cand_quests,
                             valid_neg_quests, test_cat_ids):

    for ori_valid, cand_valid, neg_valid, cat_ids_test in batch_iter(
            valid_ori_quests,
            valid_cand_quests,
            test_cat_ids,
            FLAGS.batch_size,
            1,
            neg_quests=valid_neg_quests):
        run_step(sess, ori_valid, cand_valid, neg_valid, cat_ids_test, cnn,
                 FLAGS.dropout, False)
コード例 #22
0
ファイル: cnn_deep.py プロジェクト: eqiihuu/dialog_classifier
 def train(self, dropout, check_step, save_step, batch_size, epoch_num,
           model_name, train_word, train_vds, train_reg, train_y, dev_word,
           dev_vds, dev_reg, dev_y, test_word, test_vds, test_reg, test_y,
           id2label):
     root_path = './save/%d_%d_%d/' % (self.word, self.vds, time.time())
     print 'ROOT_PATH: %s' % root_path
     os.mkdir(root_path)
     curr_step = 0
     batches = dh.batch_iter(
         list(zip(train_word, train_vds, train_reg, train_y)), batch_size,
         epoch_num, True)
     dev_feed_dict = {
         self.x_word: dev_word,
         self.x_vds: dev_vds,
         self.x_reg: dev_reg,
         self.y: dev_y,
         self.dropout_keep: 1.0
     }
     sess = tf.InteractiveSession()
     sess.run(self.init)
     max_devacc = 0
     step_max_devacc = 0
     # Training
     for batch in batches:
         if len(batch) == 0:
             continue
         word_batch, vds_batch, reg_batch, y_batch = zip(*batch)
         train_feed_dict = {
             self.x_word: word_batch,
             self.x_vds: vds_batch,
             self.x_reg: reg_batch,
             self.y: y_batch,
             self.dropout_keep: dropout
         }
         self.train_step.run(feed_dict=train_feed_dict)
         # print 'Step %d, %s' % (curr_step, time.time())
         curr_step += 1
         if curr_step % check_step == 0:
             dev_acc = self.accuracy.eval(dev_feed_dict)
             train_acc = self.accuracy.eval(train_feed_dict)
             print 'Step %d, Train: %.03f' % (curr_step, train_acc)
             print '          Dev Accuracy: %.03f' % dev_acc
             if curr_step % save_step == 0:
                 save_model_path = os.path.join(
                     root_path,
                     "model_%d_devacc_%.3f" % (curr_step, dev_acc))
                 saver = tf.train.Saver(tf.global_variables())
                 saver.save(sess, save_model_path)
                 if dev_acc > max_devacc:
                     step_max_devacc = curr_step
                     max_devacc = dev_acc
     return max_devacc, step_max_devacc, root_path
コード例 #23
0
 def dev_one_epoch(self, sess, dev):
     """
     :param sess:
     :param dev:
     :return:
     """
     label_list, seq_len_list = [], []
     for batch in batch_iter(dev, self.batch_size, shuffle=False):
         seqs, labels  = zip(*batch)
         label_list_, seq_len_list_ = self.predict_one_batch(sess, seqs)
         label_list.extend(label_list_)
         seq_len_list.extend(seq_len_list_)
     return label_list, seq_len_list
コード例 #24
0
ファイル: run_RNN.py プロジェクト: Ivy99999/PinyinClassify
def evaluate(sess, x_, y_):
    """评估在某一数据上的准确率和损失"""
    data_len = len(x_)
    batch_eval = batch_iter(x_, y_, 128)
    total_loss = 0.0
    total_acc = 0.0
    for x_batch, y_batch in batch_eval:
        batch_len = len(x_batch)
        feed_dict = feed_data(x_batch, y_batch, 1.0)
        loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)
        total_loss += loss * batch_len
        total_acc += acc * batch_len

    return total_loss / data_len, total_acc / data_len
コード例 #25
0
ファイル: predict_merge.py プロジェクト: sidrai97/DarwinBot
def predict_unseen_data(userMessage):
	os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
	#logging.getLogger().setLevel(logging.INFO)
	"""Step 0: load trained model and parameters"""
	mainDir = "C:/Users/sid/Desktop/Darwin/DarwinBot/process_message/intentClassifier/"
	params = json.loads(open(mainDir+'parameters.json').read())
	#checkpoint_dir = mainDir+"trained_model_1519274258/"
	checkpoint_dir = mainDir+"trained_model_1522994422"
	if not checkpoint_dir.endswith('/'):
		checkpoint_dir += '/'
	checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir + 'checkpoints')
	#logging.critical('Loaded the trained model: {}'.format(checkpoint_file))

	"""Step 1: load data for prediction"""
	
	# labels.json was saved during training, and it has to be loaded during prediction
	labels = json.loads(open(mainDir+'labels.json').read())
	one_hot = np.zeros((len(labels), len(labels)), int)
	np.fill_diagonal(one_hot, 1)
	label_dict = dict(zip(labels, one_hot))

	x_raw = userMessage
	x_test = [data_helper.clean_str(x_raw)]
	#logging.info('The number of x_test: {}'.format(len(x_test)))
	
	vocab_path = os.path.join(checkpoint_dir, "vocab.pickle")
	vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
	x_test = np.array(list(vocab_processor.transform(x_test)))

	"""Step 2: compute the predictions"""
	graph = tf.Graph()
	with graph.as_default():
		session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
		sess = tf.Session(config=session_conf)

		with sess.as_default():
			saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
			saver.restore(sess, checkpoint_file)

			input_x = graph.get_operation_by_name("input_x").outputs[0]
			dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
			predictions = graph.get_operation_by_name("output/predictions").outputs[0]

			batches = data_helper.batch_iter(list(x_test), params['batch_size'], 1, shuffle=False)
			all_predictions = []
			for x_test_batch in batches:
				batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
				all_predictions = np.concatenate([all_predictions, batch_predictions])
				all_predictions = all_predictions.tolist()
	return labels[int(all_predictions[0])]
コード例 #26
0
def valid_model(sess, cnn, valid_ori_quests, valid_cand_quests, labels, results):
    total_loss, idx = 0, 0
    total_ori_cand = []
    #total_right, total_wrong, step = 0, 0, 0, 0
    for ori_valid, cand_valid, neg_valid in batch_iter(valid_ori_quests, valid_cand_quests, FLAGS.batch_size, 1, is_valid=True):
        loss, ori_cand = run_step(sess, ori_valid, cand_valid, cand_valid, cnn, FLAGS.dropout, False)
        total_loss += loss
        total_ori_cand.extend(ori_cand)
        #total_right += right
        #total_wrong += wrong
        idx += 1

    acc = cal_acc(labels, results, total_ori_cand)
    timestr = datetime.datetime.now().isoformat()
    logging.info("%s, evaluation loss:%s, acc:%s"%(timestr, total_loss/idx, acc))
コード例 #27
0
def evaluate(rnn, sess, x, y):
    """在其他数据集上评估模型的准确率"""
    data_len = len(x)
    total_loss = 0.0
    total_acc = 0.0
    for x_batch, y_batch in batch_iter(x, y):
        batch_len = len(x_batch)
        feed_dict = {
            rnn.input_x: x_batch,
            rnn.input_y: y_batch,
            rnn.keep_prob: 1.0
        }
        loss, acc = sess.run([rnn.loss, rnn.acc], feed_dict)
        total_loss += loss * batch_len
        total_acc += acc * batch_len
    return total_loss / data_len, total_acc / data_len
コード例 #28
0
 def prediction(x, y):
     batches = batch_iter(x, y, BATCH_SIZE, 1)
     outputs = []
     predictions = []
     logits = []
     for batch_x, batch_y in batches:
         logit, prediction = sess.run([model.logits, model.predictions],
                                      feed_dict={
                                          model.x: batch_x,
                                          model.y: batch_y,
                                          model.keep_prob: 1.0
                                      })
         logits.extend(logit)
         predictions.extend(prediction.tolist())
         outputs.extend(batch_y.tolist())
     return logits, predictions, outputs
コード例 #29
0
def evaluate(model, session, data, global_steps=None, summary_writer=None):

    correct_num = 0
    total_num = len(data[0])
    # state = session.run(model.initial_state)
    state_fw = session.run(model.initial_state_fw)
    state_bw = session.run(model.initial_state_bw)

    for step, (x, y, mask_x) in enumerate(
            data_helper.batch_iter(data, batch_size=FLAGS.batch_size)):

        # fetches = [model.correct_num, model.final_state]
        fetches = [
            model.correct_num, model.final_state_fw, model.final_state_bw
        ]
        feed_dict = {}
        feed_dict[model.input_data] = x
        feed_dict[model.targets] = y
        feed_dict[model.mask_x] = mask_x
        # if step == 0:
        #     model.assign_new_batch_size(session, len(x))
        #     state = session.run(model.initial_state)
        # for i, (c, h) in enumerate(model.initial_state):
        #    feed_dict[c] = state[i].c
        #    feed_dict[h] = state[i].h
        for i, (c, h) in enumerate(model.initial_state_fw):
            feed_dict[c] = state_fw[i].c
            feed_dict[h] = state_fw[i].h
        for i, (c, h) in enumerate(model.initial_state_bw):
            feed_dict[c] = state_bw[i].c
            feed_dict[h] = state_bw[i].h

        # count, state = session.run(fetches, feed_dict)
        count, state_fw, state_bw = session.run(fetches, feed_dict)
        correct_num += count

    # print("-----------------------------------------")
    # print(correct_num)
    # print(total_num)

    accuracy = float(correct_num) / total_num
    dev_summary = tf.summary.scalar('dev_accuracy', accuracy)
    dev_summary = session.run(dev_summary)
    if summary_writer:
        summary_writer.add_summary(dev_summary, global_steps)
        summary_writer.flush()
    return accuracy
コード例 #30
0
def only_test(test_x, test_y, kernal_initilizer):
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(
        per_process_gpu_memory_fraction=0.5))
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # with tf.Session() as sess:
        tf.set_random_seed(seed)
        BATCH_SIZE = args.batch_size
        model = biLSTM(max_input_length=args.max_input_len,
                       num_class=len(args.hidden_ratio),
                       input_dim=args.input_dim,
                       hidden_layer_num=args.hidden_layers,
                       bi_direction=args.bi_directional,
                       use_attention=args.use_attention,
                       attention_size=args.attention_size,
                       num_hidden=args.num_hidden,
                       fc_num_hidden=args.fc_num_hidden,
                       hidden_layer_num_bi=args.hidden_layers_bi,
                       num_hidden_bi=args.num_hidden_bi)
        # Define training procedure
        global_step = tf.Variable(0, trainable=False)

        # Initialize all variables
        sess.run(tf.global_variables_initializer())

        # Load variables from pre-trained model
        if not args.pre_trained == "none":
            pre_trained_variables = [
                v for v in tf.global_variables() if "Adam" not in v.name
            ]
            saver_pre = tf.train.Saver(pre_trained_variables)
            ckpt = tf.train.get_checkpoint_state(args.summary_dir)
            saver_pre.restore(sess, ckpt.model_checkpoint_path)
        batches = batch_iter(test_x, test_y, BATCH_SIZE, 1)
        outputs = []
        predictions = []
        for batch_x, batch_y in batches:
            prediction, = sess.run([model.predictions],
                                   feed_dict={
                                       model.x: batch_x,
                                       model.y: batch_y,
                                       model.keep_prob: 1.0
                                   })
            predictions.extend(prediction.tolist())
            outputs.extend(batch_y.tolist())
        accuracy = sum(np.equal(predictions, outputs)) / len(outputs)
        print("test accuracy: %f" % accuracy)
コード例 #31
0
def evaluate(model, session, data, global_steps=None):
    correct_num=0
    total_num=len(data)
    for step, batch in enumerate(batch_iter(data, batch_size=FLAGS.batch_size)):
        x, y, mask_x = zip(*batch)
        fetches = model.correct_num
        feed_dict={
            model.input_data:x,
            model.target:y,
            model.mask_x:np.transpose(mask_x)
        }
        
        count=session.run(fetches, feed_dict)
        correct_num += count

    accuracy=float(correct_num)/total_num
    return accuracy
コード例 #32
0
def run_epoch(model,session,data,global_steps,valid_model,valid_data,train_summary_writer,valid_summary_writer=None):
    for step, (x,y,mask_x) in enumerate(data_helper.batch_iter(data,batch_size=FLAGS.batch_size)):

        feed_dict={}
        feed_dict[model.input_data]=x
        feed_dict[model.target]=y
        feed_dict[model.mask_x]=mask_x
        model.assign_new_batch_size(session,len(x))
        fetches = [model.cost,model.accuracy,model.train_op,model.summary]
        state = session.run(model._initial_state)
        for i , (c,h) in enumerate(model._initial_state):
            feed_dict[c]=state[i].c
            feed_dict[h]=state[i].h
        cost,accuracy,_,summary = session.run(fetches,feed_dict)
        train_summary_writer.add_summary(summary,global_steps)
        train_summary_writer.flush()
        valid_accuracy=evaluate(valid_model,session,valid_data,global_steps,valid_summary_writer)
        if(global_steps%100==0):
            print("the %i step, train cost is: %f and the train accuracy is %f and the valid accuracy is %f"%(global_steps,cost,accuracy,valid_accuracy))
        global_steps+=1

    return global_steps
コード例 #33
0
def run_epoch(model,session,data,global_steps,valid_model,valid_data,train_summary_writer):
    for step, batch in enumerate(batch_iter(data, batch_size = FLAGS.batch_size)):
        x, y, mask_x = zip(*batch)
        feed_dict={
            model.input_data:x,
            model.target:y,
            model.mask_x:np.transpose(mask_x)
        }
        
        fetches = [model.cost, model.accuracy, model.train_op, model.summary]
        cost, accuracy, _, summary = session.run(fetches, feed_dict)

        train_summary_writer.add_summary(summary,global_steps)
        train_summary_writer.flush()

        timestr = datetime.datetime.now().isoformat()
        logging.info("%s, the %i step, train cost is:%f and the train accuracy is %6.7f"%(timestr, global_steps, cost, accuracy))
        if(global_steps % FLAGS.evaluate_every == 0):
            valid_accuracy = evaluate(valid_model,session,valid_data,global_steps)
            logging.info("%s, the valid accuracy is %f"%(timestr, valid_accuracy))

        global_steps += 1

    return global_steps
コード例 #34
0
def predict_unseen_data():
	"""Step 0: load trained model and parameters"""
	params = json.loads(open('./parameters.json').read())
	checkpoint_dir = sys.argv[1]
	if not checkpoint_dir.endswith('/'):
		checkpoint_dir += '/'
	checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir + 'checkpoints')
	logging.critical('Loaded the trained model: {}'.format(checkpoint_file))

	"""Step 1: load data for prediction"""
	test_file = sys.argv[2]
	test_examples = json.loads(open(test_file).read())

	# labels.json was saved during training, and it has to be loaded during prediction
	labels = json.loads(open('./labels.json').read())
	one_hot = np.zeros((len(labels), len(labels)), int)
	np.fill_diagonal(one_hot, 1)
	label_dict = dict(zip(labels, one_hot))

	x_raw = [example['consumer_complaint_narrative'] for example in test_examples]
	x_test = [data_helper.clean_str(x) for x in x_raw]
	logging.info('The number of x_test: {}'.format(len(x_test)))

	y_test = None
	if 'product' in test_examples[0]:
		y_raw = [example['product'] for example in test_examples]
		y_test = [label_dict[y] for y in y_raw]
		logging.info('The number of y_test: {}'.format(len(y_test)))

	vocab_path = os.path.join(checkpoint_dir, "vocab.pickle")
	vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
	x_test = np.array(list(vocab_processor.transform(x_test)))

	"""Step 2: compute the predictions"""
	graph = tf.Graph()
	with graph.as_default():
		session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
		sess = tf.Session(config=session_conf)

		with sess.as_default():
			saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
			saver.restore(sess, checkpoint_file)

			input_x = graph.get_operation_by_name("input_x").outputs[0]
			dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
			predictions = graph.get_operation_by_name("output/predictions").outputs[0]

			batches = data_helper.batch_iter(list(x_test), params['batch_size'], 1, shuffle=False)
			all_predictions = []
			for x_test_batch in batches:
				batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
				all_predictions = np.concatenate([all_predictions, batch_predictions])

	if y_test is not None:
		y_test = np.argmax(y_test, axis=1)
		correct_predictions = sum(all_predictions == y_test)

		# Save the actual labels back to file
		actual_labels = [labels[int(prediction)] for prediction in all_predictions]

		for idx, example in enumerate(test_examples):
			example['new_prediction'] = actual_labels[idx]
		
		with open('./data/small_samples_prediction.json', 'w') as outfile:
			json.dump(test_examples, outfile, indent=4)

		logging.critical('The accuracy is: {}'.format(correct_predictions / float(len(y_test))))
		logging.critical('The prediction is complete')
コード例 #35
0
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))


print "print "
# Initializing the variables
init = tf.initialize_all_variables()




print "Launch the graph"
print len(x_inp)
ma = len(x_input)
# Launch the graph
batches = data_helper.batch_iter(list(zip(x_input, y_input)), 64, 200)














コード例 #36
0
def train_cnn():
	"""Step 0: load sentences, labels, and training parameters"""
	train_file = sys.argv[1]
	x_raw, y_raw, df, labels = data_helper.load_data_and_labels(train_file)

	parameter_file = sys.argv[2]
	params = json.loads(open(parameter_file).read())

	"""Step 1: pad each sentence to the same length and map each word to an id"""
	max_document_length = max([len(x.split(' ')) for x in x_raw])
	logging.info('The maximum length of all sentences: {}'.format(max_document_length))
	vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
	x = np.array(list(vocab_processor.fit_transform(x_raw)))
	y = np.array(y_raw)

	"""Step 2: split the original dataset into train and test sets"""
	x_, x_test, y_, y_test = train_test_split(x, y, test_size=0.1, random_state=42)

	"""Step 3: shuffle the train set and split the train set into train and dev sets"""
	shuffle_indices = np.random.permutation(np.arange(len(y_)))
	x_shuffled = x_[shuffle_indices]
	y_shuffled = y_[shuffle_indices]
	x_train, x_dev, y_train, y_dev = train_test_split(x_shuffled, y_shuffled, test_size=0.1)

	"""Step 4: save the labels into labels.json since predict.py needs it"""
	with open('./labels.json', 'w') as outfile:
		json.dump(labels, outfile, indent=4)

	logging.info('x_train: {}, x_dev: {}, x_test: {}'.format(len(x_train), len(x_dev), len(x_test)))
	logging.info('y_train: {}, y_dev: {}, y_test: {}'.format(len(y_train), len(y_dev), len(y_test)))

	"""Step 5: build a graph and cnn object"""
	graph = tf.Graph()
	with graph.as_default():
		session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
		sess = tf.Session(config=session_conf)
		with sess.as_default():
			cnn = TextCNN(
				sequence_length=x_train.shape[1],
				num_classes=y_train.shape[1],
				vocab_size=len(vocab_processor.vocabulary_),
				embedding_size=params['embedding_dim'],
				filter_sizes=list(map(int, params['filter_sizes'].split(","))),
				num_filters=params['num_filters'],
				l2_reg_lambda=params['l2_reg_lambda'])

			global_step = tf.Variable(0, name="global_step", trainable=False)
			optimizer = tf.train.AdamOptimizer(1e-3)
			grads_and_vars = optimizer.compute_gradients(cnn.loss)
			train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

			timestamp = str(int(time.time()))
			out_dir = os.path.abspath(os.path.join(os.path.curdir, "trained_model_" + timestamp))

			checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
			checkpoint_prefix = os.path.join(checkpoint_dir, "model")
			if not os.path.exists(checkpoint_dir):
				os.makedirs(checkpoint_dir)
			saver = tf.train.Saver()

			# One training step: train the model with one batch
			def train_step(x_batch, y_batch):
				feed_dict = {
					cnn.input_x: x_batch,
					cnn.input_y: y_batch,
					cnn.dropout_keep_prob: params['dropout_keep_prob']}
				_, step, loss, acc = sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)

			# One evaluation step: evaluate the model with one batch
			def dev_step(x_batch, y_batch):
				feed_dict = {cnn.input_x: x_batch, cnn.input_y: y_batch, cnn.dropout_keep_prob: 1.0}
				step, loss, acc, num_correct = sess.run([global_step, cnn.loss, cnn.accuracy, cnn.num_correct], feed_dict)
				return num_correct

			# Save the word_to_id map since predict.py needs it
			vocab_processor.save(os.path.join(out_dir, "vocab.pickle"))
			sess.run(tf.global_variables_initializer())

			# Training starts here
			train_batches = data_helper.batch_iter(list(zip(x_train, y_train)), params['batch_size'], params['num_epochs'])
			best_accuracy, best_at_step = 0, 0

			"""Step 6: train the cnn model with x_train and y_train (batch by batch)"""
			for train_batch in train_batches:
				x_train_batch, y_train_batch = zip(*train_batch)
				train_step(x_train_batch, y_train_batch)
				current_step = tf.train.global_step(sess, global_step)

				"""Step 6.1: evaluate the model with x_dev and y_dev (batch by batch)"""
				if current_step % params['evaluate_every'] == 0:
					dev_batches = data_helper.batch_iter(list(zip(x_dev, y_dev)), params['batch_size'], 1)
					total_dev_correct = 0
					for dev_batch in dev_batches:
						x_dev_batch, y_dev_batch = zip(*dev_batch)
						num_dev_correct = dev_step(x_dev_batch, y_dev_batch)
						total_dev_correct += num_dev_correct

					dev_accuracy = float(total_dev_correct) / len(y_dev)
					logging.critical('Accuracy on dev set: {}'.format(dev_accuracy))

					"""Step 6.2: save the model if it is the best based on accuracy on dev set"""
					if dev_accuracy >= best_accuracy:
						best_accuracy, best_at_step = dev_accuracy, current_step
						path = saver.save(sess, checkpoint_prefix, global_step=current_step)
						logging.critical('Saved model at {} at step {}'.format(path, best_at_step))
						logging.critical('Best accuracy is {} at step {}'.format(best_accuracy, best_at_step))

			"""Step 7: predict x_test (batch by batch)"""
			test_batches = data_helper.batch_iter(list(zip(x_test, y_test)), params['batch_size'], 1)
			total_test_correct = 0
			for test_batch in test_batches:
				x_test_batch, y_test_batch = zip(*test_batch)
				num_test_correct = dev_step(x_test_batch, y_test_batch)
				total_test_correct += num_test_correct

			test_accuracy = float(total_test_correct) / len(y_test)
			logging.critical('Accuracy on test set is {} based on the best model {}'.format(test_accuracy, path))
			logging.critical('The training is complete')
コード例 #37
0
with graph.as_default():
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        # Load the saved meta graph and restore variables
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)

        # Get the placeholders from the graph by name
        input_x = graph.get_operation_by_name("input_x").outputs[0]
        dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

        # Tensors we want to evaluate
        predictions = graph.get_operation_by_name("output/predictions").outputs[0]

        # Generate batches for one epoch
        batches = data_helper.batch_iter(x_test, FLAGS.batch_size, 1, shuffle=False)

        # Collect the predictions here
        all_predictions = []

        for x_test_batch in batches:
            batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
            all_predictions = np.concatenate([all_predictions, batch_predictions])

        # Write the predictions to a cvs file
        data_helper.create_submission_file(all_predictions, FLAGS.submission_filename)

コード例 #38
0
ファイル: rnn_train.py プロジェクト: AntNLP/opie
                Max["NN-P"] = prec
                Max["NN-R"] = rec
                Max["NN-F"] = f1
                Max["P"] = precision
                Max["R"] = recall
                Max["F"] = f1_score
                print("Max result")
                for key, value in Max.items():
                    print(key, value)
                print()
            if writer:
                writer.add_summary(summaries, step)
            return pred

        # Generate batches
        batches = data_helper.batch_iter(list(zip(X_train, y_train)),
            FLAGS.batch_size, FLAGS.num_epochs)
        # Training loop. For each batch...
        for batch in batches:
            X_batch, y_batch = zip(*batch)
            train_step(X_batch, y_batch)
            current_step = tf.train.global_step(sess, global_step)
            if current_step % FLAGS.evaluate_every == 0:
                print("\nEvaluation:")
                pred = dev_step(X_dev, y_dev,
                                writer=dev_summary_writer)
            if current_step % FLAGS.checkpoint_every == 0:
                path = saver.save(sess, checkpoint_prefix, global_step=current_step)
                print("Saved model checkpoint to {}\n".format(path))
print("Max result")
print(domain)
for key, value in Max.items():