コード例 #1
0
	def test_run(self):
		np.random.seed(1)
		for m_c in self.methods_configs:		
			print 'Testing acquisition ' + m_c['name']
			name = m_c['name']+'_'+'acquisition_gradient_testfile'
			unittest_result = run_eval(self.f_obj, self.f_bounds, self.f_inits, method_config=m_c, name=name, outpath=self.outpath, time_limit=None, unittest = self.is_unittest)			
			original_result = np.loadtxt(self.outpath +'/'+ name+'.txt')
			self.assertTrue((abs(original_result - unittest_result)<1e-4).all())
コード例 #2
0
	def test_run(self):
		for m_c in self.methods_configs:	
			np.random.seed(1)	
			print 'Testing other GPyOpt options: ' + m_c['name']
			name = m_c['name']+'_'+'_testfile'
			unittest_result = run_eval(self.f_obj, self.f_bounds, self.f_inits, method_config=m_c, name=name, outpath=self.outpath, time_limit=None, unittest = self.is_unittest)			
			print unittest_result
			original_result = np.loadtxt(self.outpath +'/'+ name+'.txt')
			self.assertTrue((abs(original_result - unittest_result)<1e-4).all())
コード例 #3
0
ファイル: inference.py プロジェクト: wyu-du/neutralizing-bias
                                     tagging_model=tagging_model)

if CUDA:
    joint_model = joint_model.cuda()

if ARGS.checkpoint is not None and os.path.exists(ARGS.checkpoint):
    print('LOADING FROM ' + ARGS.checkpoint)
    # TODO(rpryzant): is there a way to do this more elegantly?
    # https://pytorch.org/tutorials/beginner/saving_loading_models.html#saving-loading-model-across-devices
    if CUDA:
        joint_model.load_state_dict(torch.load(ARGS.checkpoint))
        joint_model = joint_model.cuda()
    else:
        joint_model.load_state_dict(
            torch.load(ARGS.checkpoint, map_location='cpu'))
    print('...DONE')

# # # # # # # # # # # # EVAL # # # # # # # # # # # # # #
joint_model.eval()
hits, preds, golds, srcs = joint_utils.run_eval(joint_model, eval_dataloader,
                                                tok2id, ARGS.inference_output,
                                                ARGS.max_seq_len,
                                                ARGS.beam_width)

print('eval/bleu', seq2seq_utils.get_bleu(preds, golds), 0)
print('eval/true_hits', np.mean(hits), 0)

with open(ARGS.working_dir + '/stats.txt', 'w') as f:
    f.write('eval/bleu %d' % seq2seq_utils.get_bleu(preds, golds))
    f.write('eval/true_hits %d' % np.mean(hits))
コード例 #4
0
model_parameters = filter(lambda p: p.requires_grad, joint_model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('NUM PARAMS: ', params)

if ARGS.freeze_tagger and ARGS.pretrain_data:
    joint_optimizer = pretrain_optim
else:
    joint_optimizer = seq2seq_utils.build_optimizer(
        debias_model if ARGS.freeze_tagger else joint_model, num_train_steps)

# train model
print('JOINT TRAINING...')
print('INITIAL EVAL...')
joint_model.eval()
hits, preds, golds, srcs = joint_utils.run_eval(
    joint_model, eval_dataloader, tok2id,
    ARGS.working_dir + '/results_initial.txt', ARGS.max_seq_len,
    ARGS.beam_width)
writer.add_scalar('eval/bleu', seq2seq_utils.get_bleu(preds, golds), 0)
writer.add_scalar('eval/true_hits', np.mean(hits), 0)

for epoch in range(ARGS.epochs):
    print('EPOCH ', epoch)
    print('TRAIN...')
    losses = joint_utils.train_for_epoch(joint_model,
                                         train_dataloader,
                                         joint_optimizer,
                                         debias_loss_fn,
                                         tagging_loss_fn,
                                         ignore_tagger=False,
                                         coverage=ARGS.coverage)
    writer.add_scalar('train/loss', np.mean(losses), epoch + 1)
コード例 #5
0
ファイル: train.py プロジェクト: wyu-du/neutralizing-bias
    print('SAVING DEBIASER...')
    torch.save(model.state_dict(), ARGS.working_dir + '/debiaser.ckpt')

# # # # # # # # # # # # TRAINING # # # # # # # # # # # # # #

for epoch in range(ARGS.epochs):
    print('EPOCH ', epoch)
    print('TRAIN...')
    model.train()
    losses = utils.train_for_epoch(model,
                                   train_dataloader,
                                   tok2id,
                                   optimizer,
                                   loss_fn,
                                   coverage=ARGS.coverage)
    writer.add_scalar('train/loss', np.mean(losses), epoch + 1)

    print('SAVING...')
    model.save(ARGS.working_dir + '/model_%d.ckpt' % (epoch + 1))

    print('EVAL...')
    model.eval()
    hits, preds, golds, srcs = utils.run_eval(
        model, eval_dataloader, tok2id,
        ARGS.working_dir + '/results_%d.txt' % epoch, ARGS.max_seq_len,
        ARGS.beam_width)
    # writer.add_scalar('eval/partial_bleu', utils.get_partial_bleu(preds, golds, srcs), epoch+1)
    writer.add_scalar('eval/bleu', utils.get_bleu(preds, golds), epoch + 1)
    writer.add_scalar('eval/true_hits', np.mean(hits), epoch + 1)
コード例 #6
0
#!/usr/bin/env python
# coding=utf-8
import argparse
from utils import calc_metrics, run_eval

if __name__ == "__main__":
    # Define and parse program input
    parser = argparse.ArgumentParser(
        description="evaluate classification metrics",
        formatter_class=argparse.RawTextHelpFormatter,
        usage="%(prog)s [-h] [options] -file FILE -head HEAD")
    parser.add_argument("-file",
                        help="The path to the labeled file.",
                        required=True)
    parser.add_argument("-head",
                        help="Num of the former character to be ignored.",
                        default=2)
    parser.add_argument("-verbose",
                        help="Whether to print classification report.",
                        default=True)

    args = parser.parse_args()
    # Run the program.
    run_eval(args.file, args.head, args.verbose)