def main(): args = parse_args() checkpoint_path = Path(args.checkpoint) checkpoint_dir = checkpoint_path.parent params_path = checkpoint_dir / 'params.json' vocab_dir = checkpoint_dir / 'vocab' params = Params.from_file(params_path) train_params, model_params = params.pop('train'), params.pop('model') tokenizer = WordTokenizer( start_tokens=['<s>'], end_tokens=['</s>'], ) token_indexer = SingleIdTokenIndexer(lowercase_tokens=True) dataset_reader = SnliReader(tokenizer=tokenizer, token_indexers={'tokens': token_indexer}) valid_dataset = dataset_reader.read(train_params.pop('valid_dataset_path')) if not args.test_dataset: test_dataset_path = train_params.pop('test_dataset_path') else: test_dataset_path = args.test_dataset test_dataset = dataset_reader.read(test_dataset_path) if args.only_label: test_dataset = [ d for d in test_dataset if d.fields['label'].label == args.only_label ] vocab = Vocabulary.from_files(vocab_dir) random.shuffle(valid_dataset) model_params['token_embedder']['pretrained_file'] = None model = SNLIModel(params=model_params, vocab=vocab) model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=False) model.to(args.cuda_device) model.eval() torch.set_grad_enabled(False) iterator = BasicIterator(batch_size=32) iterator.index_with(vocab) for dataset in (valid_dataset, test_dataset): generator = iterator(dataset, shuffle=False, num_epochs=1) model.get_metrics(reset=True) for batch in tqdm(generator): batch = move_to_device(batch, cuda_device=args.cuda_device) model(premise=batch['premise'], hypothesis=batch['hypothesis'], label=batch['label']) metrics = model.get_metrics() pprint(metrics)
def main(): args = parse_args() checkpoint_path = Path(args.checkpoint) checkpoint_dir = checkpoint_path.parent params_path = checkpoint_dir / 'params.json' vocab_dir = checkpoint_dir / 'vocab' params = Params.from_file(params_path) train_params, model_params = params.pop('train'), params.pop('model') tokenizer = WordTokenizer( start_tokens=['<s>'], end_tokens=['</s>'], ) token_indexer = SingleIdTokenIndexer(lowercase_tokens=True) dataset_reader = SnliReader(tokenizer=tokenizer, token_indexers={'tokens': token_indexer}) valid_dataset = dataset_reader.read(train_params.pop('valid_dataset_path')) vocab = Vocabulary.from_files(vocab_dir) random.shuffle(valid_dataset) model_params['token_embedder']['pretrained_file'] = None model = SNLIModel(params=model_params, vocab=vocab) model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=False) model.eval() iterator = BasicIterator(batch_size=1) iterator.index_with(vocab) generator = iterator(valid_dataset) for i in range(10): batch = next(generator) label_token_to_index = vocab.get_token_to_index_vocabulary('labels') print('----') print(' '.join( model.convert_to_readable_text(batch['premise']['tokens'])[0])) for label, label_index in label_token_to_index.items(): label_tensor = torch.tensor([label_index]) enc_embs = model.embed(batch['premise']['tokens']) enc_mask = get_text_field_mask(batch['premise']) enc_hidden = model.encode(inputs=enc_embs, mask=enc_mask, drop_start_token=True) code, kld = model.sample_code_and_compute_kld(enc_hidden) generated = model.generate(code=code, label=label_tensor, max_length=enc_mask.sum(1) * 2, beam_size=10, lp_alpha=args.lp_alpha) text = model.convert_to_readable_text(generated[:, 0])[0] print(label) print(' '.join(text))
def main(): args = parse_args() checkpoint_path = Path(args.checkpoint) checkpoint_dir = checkpoint_path.parent params_path = checkpoint_dir / 'params.json' vocab_dir = checkpoint_dir / 'vocab' params = Params.from_file(params_path) train_params, model_params = params.pop('train'), params.pop('model') tokenizer = WordTokenizer(start_tokens=['<s>'], end_tokens=['</s>'],) token_indexer = SingleIdTokenIndexer(lowercase_tokens=True) dataset_reader = SnliReader( tokenizer=tokenizer, token_indexers={'tokens': token_indexer}) valid_dataset = dataset_reader.read( train_params.pop('valid_dataset_path')) vocab = Vocabulary.from_files(vocab_dir) model_params['token_embedder']['pretrained_file'] = None model = SNLIModel(params=model_params, vocab=vocab) model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=False) model.to(args.device) model.eval() iterator = BasicIterator(batch_size=args.batch_size) iterator.index_with(vocab) generator = iterator(valid_dataset, num_epochs=1, shuffle=False) label_index_to_token = vocab.get_index_to_token_vocabulary('labels') out_file = open(args.out, 'w') for batch in tqdm(generator): premise_tokens = batch['premise']['tokens'] enc_embs = model.embed(premise_tokens.to(args.device)) enc_mask = get_text_field_mask(batch['premise']).to(args.device) enc_hidden = model.encode(inputs=enc_embs, mask=enc_mask, drop_start_token=True) code, kld = model.sample_code_and_compute_kld(enc_hidden) pre_text = model.convert_to_readable_text(premise_tokens[:, 1:]) label_tensor = batch['label'].to(args.device) generated = model.generate( code=code, label=label_tensor, max_length=25, beam_size=10, lp_alpha=args.lp_alpha) text = model.convert_to_readable_text(generated[:, 0]) for pre_text_b, text_b, label_index_b in zip(pre_text, text, label_tensor): obj = {'sentence1': ' '.join(pre_text_b), 'sentence2': ' '.join(text_b), 'gold_label': label_index_to_token[label_index_b.item()]} out_file.write(json.dumps(obj)) out_file.write('\n')
def evaluate(args): with open(args.data, 'rb') as f: test_dataset: SNLIDataset = pickle.load(f) word_vocab = test_dataset.word_vocab label_vocab = test_dataset.label_vocab model = SNLIModel(num_classes=len(label_vocab), num_words=len(word_vocab), word_dim=args.word_dim, hidden_dim=args.hidden_dim, clf_hidden_dim=args.clf_hidden_dim, clf_num_layers=args.clf_num_layers, use_leaf_rnn=args.leaf_rnn, intra_attention=args.intra_attention, use_batchnorm=args.batchnorm, dropout_prob=args.dropout) num_params = sum(np.prod(p.size()) for p in model.parameters()) num_embedding_params = np.prod(model.word_embedding.weight.size()) print(f'# of parameters: {num_params}') print(f'# of word embedding parameters: {num_embedding_params}') print(f'# of parameters (excluding word embeddings): ' f'{num_params - num_embedding_params}') model.load_state_dict(torch.load(args.model)) model.eval() if args.gpu > -1: model.cuda(args.gpu) test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate) num_correct = 0 num_data = len(test_dataset) for batch in test_data_loader: pre = wrap_with_variable(batch['pre'], volatile=True, gpu=args.gpu) hyp = wrap_with_variable(batch['hyp'], volatile=True, gpu=args.gpu) pre_length = wrap_with_variable(batch['pre_length'], volatile=True, gpu=args.gpu) hyp_length = wrap_with_variable(batch['hyp_length'], volatile=True, gpu=args.gpu) label = wrap_with_variable(batch['label'], volatile=True, gpu=args.gpu) logits = model(pre=pre, pre_length=pre_length, hyp=hyp, hyp_length=hyp_length) label_pred = logits.max(1)[1].squeeze(1) num_correct_batch = torch.eq(label, label_pred).long().sum() num_correct_batch = unwrap_scalar_variable(num_correct_batch) num_correct += num_correct_batch print(f'# data: {num_data}') print(f'# correct: {num_correct}') print(f'Accuracy: {num_correct / num_data:.4f}')
def evaluate(args): with open(args.data, 'rb') as f: test_dataset: SNLIDataset = pickle.load(f) word_vocab = test_dataset.word_vocab label_vocab = test_dataset.label_vocab model = SNLIModel(num_classes=len(label_vocab), num_words=len(word_vocab), word_dim=args.word_dim, hidden_dim=args.hidden_dim, clf_hidden_dim=args.clf_hidden_dim, clf_num_layers=args.clf_num_layers, use_leaf_rnn=args.leaf_rnn, intra_attention=args.intra_attention, use_batchnorm=args.batchnorm, dropout_prob=args.dropout, bidirectional=args.bidirectional) num_params = sum(np.prod(p.size()) for p in model.parameters()) num_embedding_params = np.prod(model.word_embedding.weight.size()) print(f'# of parameters: {num_params}') print(f'# of word embedding parameters: {num_embedding_params}') print(f'# of parameters (excluding word embeddings): ' f'{num_params - num_embedding_params}') model.load_state_dict(torch.load(args.model, map_location='cpu')) model.eval() model.to(args.device) torch.set_grad_enabled(False) test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate) num_correct = 0 num_data = len(test_dataset) for batch in test_data_loader: pre = batch['pre'].to(args.device) hyp = batch['hyp'].to(args.device) pre_length = batch['pre_length'].to(args.device) hyp_length = batch['hyp_length'].to(args.device) label = batch['label'].to(args.device) logits = model(pre=pre, pre_length=pre_length, hyp=hyp, hyp_length=hyp_length) label_pred = logits.max(1)[1] num_correct_batch = torch.eq(label, label_pred).long().sum() num_correct_batch = num_correct_batch.item() num_correct += num_correct_batch print(f'# data: {num_data}') print(f'# correct: {num_correct}') print(f'Accuracy: {num_correct / num_data:.4f}')
def main(): args = parse_args() params = Params.from_file(args.params) save_dir = Path(args.save) save_dir.mkdir(parents=True) params.to_file(save_dir / 'params.json') train_params, model_params = params.pop('train'), params.pop('model') random_seed = train_params.pop_int('random_seed', 2019) torch.manual_seed(random_seed) random.seed(random_seed) log_filename = save_dir / 'stdout.log' sys.stdout = TeeLogger(filename=log_filename, terminal=sys.stdout, file_friendly_terminal_output=False) sys.stderr = TeeLogger(filename=log_filename, terminal=sys.stderr, file_friendly_terminal_output=False) tokenizer = WordTokenizer(start_tokens=['<s>'], end_tokens=['</s>']) token_indexer = SingleIdTokenIndexer(lowercase_tokens=True) dataset_reader = SnliReader(tokenizer=tokenizer, token_indexers={'tokens': token_indexer}) train_labeled_dataset_path = train_params.pop('train_labeled_dataset_path') train_unlabeled_dataset_path = train_params.pop( 'train_unlabeled_dataset_path', None) train_labeled_dataset = dataset_reader.read(train_labeled_dataset_path) train_labeled_dataset = filter_dataset_by_length( dataset=train_labeled_dataset, max_length=30) if train_unlabeled_dataset_path is not None: train_unlabeled_dataset = dataset_reader.read( train_unlabeled_dataset_path) train_unlabeled_dataset = filter_dataset_by_length( dataset=train_unlabeled_dataset, max_length=30) else: train_unlabeled_dataset = [] valid_dataset = dataset_reader.read(train_params.pop('valid_dataset_path')) vocab = Vocabulary.from_instances( instances=train_labeled_dataset + train_unlabeled_dataset, max_vocab_size=train_params.pop_int('max_vocab_size', None)) vocab.save_to_files(save_dir / 'vocab') labeled_batch_size = train_params.pop_int('labeled_batch_size') unlabeled_batch_size = train_params.pop_int('unlabeled_batch_size') labeled_iterator = BasicIterator(batch_size=labeled_batch_size) unlabeled_iterator = BasicIterator(batch_size=unlabeled_batch_size) labeled_iterator.index_with(vocab) unlabeled_iterator.index_with(vocab) if not train_unlabeled_dataset: unlabeled_iterator = None pretrained_checkpoint_path = train_params.pop('pretrained_checkpoint_path', None) model = SNLIModel(params=model_params, vocab=vocab) if pretrained_checkpoint_path: model.load_state_dict( torch.load(pretrained_checkpoint_path, map_location='cpu')) model.add_finetune_parameters( con_autoweight=train_params.pop_bool('con_autoweight', False), con_y_weight=train_params.pop_float('con_y_weight'), con_z_weight=train_params.pop_float('con_z_weight'), con_z2_weight=train_params.pop_float('con_z2_weight')) main_optimizer = optim.Adam(params=model.finetune_main_parameters( exclude_generator=train_params.pop_bool('exclude_generator')), lr=train_params.pop_float('lr', 1e-3)) aux_optimizer = optim.Adam(params=model.finetune_aux_parameters(), lr=train_params.pop_float('aux_lr', 1e-4)) summary_writer = SummaryWriter(log_dir=save_dir / 'log') kl_anneal_rate = train_params.pop_float('kl_anneal_rate', None) if kl_anneal_rate is None: kl_weight_scheduler = None else: kl_weight_scheduler = (lambda step: min(1.0, kl_anneal_rate * step)) model.kl_weight = 0.0 gumbel_anneal_rate = train_params.pop_float('gumbel_anneal_rate', None) if gumbel_anneal_rate is None: gumbel_temperature_scheduler = None else: gumbel_temperature_scheduler = ( lambda step: max(0.1, 1.0 - gumbel_anneal_rate * step)) model.gumbel_temperature = 1.0 iters_per_epoch = train_params.pop_int( 'iters_per_epoch', len(train_labeled_dataset) // labeled_batch_size) trainer = FineTuningTrainer( model=model, main_optimizer=main_optimizer, aux_optimizer=aux_optimizer, labeled_iterator=labeled_iterator, unlabeled_iterator=unlabeled_iterator, train_labeled_dataset=train_labeled_dataset, train_unlabeled_dataset=train_unlabeled_dataset, validation_dataset=valid_dataset, summary_writer=summary_writer, serialization_dir=save_dir, num_epochs=train_params.pop_int('num_epochs', 50), iters_per_epoch=iters_per_epoch, write_summary_every=100, validate_every=1000, patience=train_params.pop_int('patience', 5), clip_grad_max_norm=train_params.pop_float('grad_max_norm', 5.0), kl_weight_scheduler=kl_weight_scheduler, gumbel_temperature_scheduler=gumbel_temperature_scheduler, cuda_device=train_params.pop_int('cuda_device', 0), ) trainer.train()