Пример #1
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.001,
                        help='Learning Rate')
    parser.add_argument('--epochs', type=int, default=12, help='Expochs')
    parser.add_argument('--debug',
                        type=bool,
                        default=False,
                        help='print debug msgs')
    parser.add_argument('--load', type=bool, default=False, help='load model')
    parser.add_argument('--save_dir',
                        type=str,
                        default='Models/save/',
                        help='Data')

    args = parser.parse_args()

    modOpts = json.load(open('Models/config.json', 'r'))['rnet']['train']

    print('Reading data')
    dp = preprocess.read_data('train', modOpts)
    num_batches = int(np.floor(dp.num_samples / modOpts['batch_size'])) - 1

    rnet_model = model_rnet.R_NET(modOpts)
    input_tensors, loss, acc, pred_si, pred_ei = rnet_model.build_model()
    #train_op = tf.train.AdamOptimizer(args.learning_rate).minimize(loss)
    train_op = tf.train.AdadeltaOptimizer(
        1.0,
        rho=0.95,
        epsilon=1e-06,
    ).minimize(loss)

    #saver
    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config)
    if args.load:
        PATH = 'Models/save/rnet_model0.ckpt'
        start_epoch = 1
        saver.restore(sess, PATH)
        f = open('Results/rnet_training_result.txt', 'a')
    else:
        init = tf.global_variables_initializer()
        sess.run(init)
        f = open('Results/rnet_training_result.txt', 'w')
        start_epoch = 0

    for i in range(start_epoch, args.epochs):
        rl = random.sample(range(num_batches), num_batches)
        batch_no = 0
        LOSS = 0.0
        EM = 0.0
        while batch_no < num_batches:
            tensor_dict, idxs = dp.get_training_batch(rl[batch_no])
            feed_dict = {
                input_tensors['p']: tensor_dict['paragraph'],
                input_tensors['q']: tensor_dict['question'],
                input_tensors['a_si']: tensor_dict['answer_si'],
                input_tensors['a_ei']: tensor_dict['answer_ei'],
            }
            if modOpts['char_emb']:
                feed_dict.update({
                    input_tensors['pc']:
                    tensor_dict['paragraph_c'],
                    input_tensors['qc']:
                    tensor_dict['question_c'],
                })
            _, loss_value, accuracy, predictions_si, predictions_ei = sess.run(
                [train_op, loss, acc, pred_si, pred_ei], feed_dict=feed_dict)
            batch_no += 1
            LOSS += loss_value
            EM += accuracy
            print("{} epoch {} batch, Loss:{:.2f}, Acc:{:.2f}".format(
                i, batch_no, loss_value, accuracy))
        save_path = saver.save(
            sess, os.path.join(args.save_dir, "rnet_model{}.ckpt".format(i)))
        f.write(' '.join(("Loss", str(LOSS / dp.num_samples), str(i), '\n')))
        f.write(' '.join(("EM", str(EM / num_batches), '\n')))
        f.write("---------------\n")
        f.flush()
        print("---------------")
    f.close()
    save_path = saver.save(
        sess, os.path.join(args.save_dir, "rnet_model_final.ckpt"))
    print('save path:', save_path)
Пример #2
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        type=str,
                        default='rnet',
                        help='Model: match_lstm, bidaf, rnet')
    parser.add_argument('--debug',
                        type=bool,
                        default=False,
                        help='print debug msgs')
    parser.add_argument('--dataset', type=str, default='testa', help='dataset')
    parser.add_argument('--model_path',
                        type=str,
                        default='Models/save/rnet_model_final.ckpt',
                        help='saved model path')

    args = parser.parse_args()
    if not args.model == 'rnet':
        raise NotImplementedError

    modOpts = json.load(open('Models/config.json', 'r'))[args.model]['dev']
    print('Model Configs:')
    pprint(modOpts)

    print('Reading data')
    if args.dataset == 'train':
        raise NotImplementedError
    elif args.dataset == 'testa':
        dp = prepro.read_data(args.dataset, modOpts)

    model = model_rnet.R_NET(modOpts)
    input_tensors, loss, acc, pred = model.build_model()
    saved_model = args.model_path

    num_batches = int(np.ceil(dp.num_samples / modOpts['batch_size']))
    print(num_batches, 'batches')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    new_saver = tf.train.Saver()
    sess = tf.InteractiveSession(config=config)
    new_saver.restore(sess, saved_model)

    predictions = []

    for batch_no in range(num_batches):
        if args.model == 'rnet':
            paragraph, question, answer, ID, context, n = dp.get_testing_batch(
                batch_no)
            feed_dict = {
                input_tensors['p']: paragraph,
                input_tensors['q']: question,
                input_tensors['a']: answer,
            }

            pred_vec = sess.run(pred, feed_dict=feed_dict)
            pred_vec = np.argmax(pred_vec, axis=1)
            for q_id, prediction, candidates in zip(ID, pred_vec, context):
                prediction_answer = u''.join(candidates[prediction])
                predictions.append(str(q_id) + '\t' + prediction_answer)
    outputs = u'\n'.join(predictions)
    with codecs.open('Results/prediction.a.txt', 'w', encoding='utf-8') as f:
        f.write(outputs)
    print('done!')
Пример #3
0
def run():
	parser = argparse.ArgumentParser()
	parser.add_argument('--model', type=str, default='rnet', help='Model: match_lstm, bidaf, rnet')
	parser.add_argument('--debug', type=bool, default=False, help='print debug msgs')
	parser.add_argument('--dataset', type=str, default='dev', help='dataset')
	parser.add_argument('--model_path', type=str, default='Models/save/rnet_model0.ckpt', help='saved model path')

	args = parser.parse_args()
	if not args.model == 'rnet':
		raise NotImplementedError

	modOpts = json.load(open('Models/config.json','r'))[args.model]['dev']
	print('Model Configs:')
	pprint(modOpts)

	print('Reading data')
	if args.dataset == 'train':
		raise NotImplementedError
	elif args.dataset == 'dev':
		dp = preprocess.read_data(args.dataset, modOpts)
    
	model = model_rnet.R_NET(modOpts)
	input_tensors, loss, acc, pred_si, pred_ei = model.build_model()
	saved_model = args.model_path


	num_batches = int(np.ceil(dp.num_samples/modOpts['batch_size']))
	print(num_batches, 'batches')
	
	config = tf.ConfigProto()
	config.gpu_options.allow_growth = True
	new_saver = tf.train.Saver()
	sess = tf.InteractiveSession(config=config)
	new_saver.restore(sess, saved_model)
	
	pred_data = {}

	EM = 0.0
	F1 = 0.0
	empty_answer_idx = np.ndarray((modOpts['batch_size'], modOpts['p_length']))
	for batch_no in range(num_batches):
		if args.model == 'rnet':
			context, context_original, paragraph, question, paragraph_c, question_c, answer_si, answer_ei, ID, n = dp.get_testing_batch(batch_no)
			predictions_si, predictions_ei = sess.run([pred_si, pred_ei], feed_dict={
				input_tensors['p']:paragraph,
				input_tensors['q']:question,
				input_tensors['pc']:paragraph_c,
				input_tensors['qc']:question_c,
				input_tensors['a_si']:empty_answer_idx,
				input_tensors['a_ei']:empty_answer_idx,
			})
		for i in range(n):
			parag = context[i]
			f1 = []
			p_tokens = []
			for j in range(len(answer_si[i])):
				if answer_si[i][j] == answer_ei[i][j]: # single word answer
					truth_tokens = [parag[int(answer_si[i][j])]]
					pred_tokens = [parag[int(predictions_si[i])]]
				else:
					truth_tokens = parag[int(answer_si[i][j]):int(answer_ei[i][j])+1]
					pred_tokens = parag[int(predictions_si[i]):int(predictions_ei[i])+1]
				f1.append(f1_score( pred_tokens, truth_tokens ))
				p_tokens.append(pred_tokens)
			idx = np.argmax(f1)
			if answer_si[i][idx] == int(predictions_si[i]) and answer_ei[i][idx] == int(predictions_ei[i]):
				EM += 1.0
			F1 += f1[idx]
			pred_data[ID[i]] =  ' '.join( p_tokens[idx] )
		print(batch_no, 'EM', '{:.5f}'.format(EM/(batch_no+1)/modOpts['batch_size']), 'F1', F1/(batch_no+1)/modOpts['batch_size'])
	print("---------------")
	print("EM", EM/dp.num_samples )
	print("F1", F1/dp.num_samples )
	with open('Results/'+args.model+'_prediction.txt', 'w') as outfile:
	    json.dump(pred_data, outfile)
Пример #4
0
            test_context_vec[i][j])])

test_question_emb = []
j = 0
for i in range(len(test_question_vec)):
    test_question_emb.append([])
    for j in range(len(test_question_vec[i])):
        test_question_emb[i].append(embeddings_matrix[int(
            test_question_vec[i][j])])

modOpts = json.load(open('Models/config.json', 'r'))['rnet']['dev']

num_samples = len(test_context)
num_batches = int(np.floor(num_samples / modOpts['batch_size']))

model = model_rnet.R_NET(modOpts)
input_tensors, loss, acc, pred_si, pred_ei = model.build_model()
saved_model = 'Models/save/rnet_model_14.ckpt'

config = tf.ConfigProto()
config.gpu_options.allow_growth = True

new_saver = tf.train.Saver()
sess = tf.InteractiveSession(config=config)
new_saver.restore(sess, saved_model)

tensor_dict = {}
tensor_dict['paragraph'] = test_context_emb
tensor_dict['question'] = test_question_emb

empty_answer_idx = np.ndarray((len(test_context), length_context))