def main(): args = parse_args() checkpoint_path = Path(args.checkpoint) checkpoint_dir = checkpoint_path.parent params_path = checkpoint_dir / 'params.json' vocab_dir = checkpoint_dir / 'vocab' params = Params.from_file(params_path) train_params, model_params = params.pop('train'), params.pop('model') tokenizer = WordTokenizer( start_tokens=['<s>'], end_tokens=['</s>'], ) token_indexer = SingleIdTokenIndexer(lowercase_tokens=True) dataset_reader = SnliReader(tokenizer=tokenizer, token_indexers={'tokens': token_indexer}) valid_dataset = dataset_reader.read(train_params.pop('valid_dataset_path')) if not args.test_dataset: test_dataset_path = train_params.pop('test_dataset_path') else: test_dataset_path = args.test_dataset test_dataset = dataset_reader.read(test_dataset_path) if args.only_label: test_dataset = [ d for d in test_dataset if d.fields['label'].label == args.only_label ] vocab = Vocabulary.from_files(vocab_dir) random.shuffle(valid_dataset) model_params['token_embedder']['pretrained_file'] = None model = SNLIModel(params=model_params, vocab=vocab) model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=False) model.to(args.cuda_device) model.eval() torch.set_grad_enabled(False) iterator = BasicIterator(batch_size=32) iterator.index_with(vocab) for dataset in (valid_dataset, test_dataset): generator = iterator(dataset, shuffle=False, num_epochs=1) model.get_metrics(reset=True) for batch in tqdm(generator): batch = move_to_device(batch, cuda_device=args.cuda_device) model(premise=batch['premise'], hypothesis=batch['hypothesis'], label=batch['label']) metrics = model.get_metrics() pprint(metrics)
def main(): args = parse_args() checkpoint_path = Path(args.checkpoint) checkpoint_dir = checkpoint_path.parent params_path = checkpoint_dir / 'params.json' vocab_dir = checkpoint_dir / 'vocab' params = Params.from_file(params_path) train_params, model_params = params.pop('train'), params.pop('model') tokenizer = WordTokenizer(start_tokens=['<s>'], end_tokens=['</s>'],) token_indexer = SingleIdTokenIndexer(lowercase_tokens=True) dataset_reader = SnliReader( tokenizer=tokenizer, token_indexers={'tokens': token_indexer}) valid_dataset = dataset_reader.read( train_params.pop('valid_dataset_path')) vocab = Vocabulary.from_files(vocab_dir) model_params['token_embedder']['pretrained_file'] = None model = SNLIModel(params=model_params, vocab=vocab) model.load_state_dict(torch.load(checkpoint_path, map_location='cpu'), strict=False) model.to(args.device) model.eval() iterator = BasicIterator(batch_size=args.batch_size) iterator.index_with(vocab) generator = iterator(valid_dataset, num_epochs=1, shuffle=False) label_index_to_token = vocab.get_index_to_token_vocabulary('labels') out_file = open(args.out, 'w') for batch in tqdm(generator): premise_tokens = batch['premise']['tokens'] enc_embs = model.embed(premise_tokens.to(args.device)) enc_mask = get_text_field_mask(batch['premise']).to(args.device) enc_hidden = model.encode(inputs=enc_embs, mask=enc_mask, drop_start_token=True) code, kld = model.sample_code_and_compute_kld(enc_hidden) pre_text = model.convert_to_readable_text(premise_tokens[:, 1:]) label_tensor = batch['label'].to(args.device) generated = model.generate( code=code, label=label_tensor, max_length=25, beam_size=10, lp_alpha=args.lp_alpha) text = model.convert_to_readable_text(generated[:, 0]) for pre_text_b, text_b, label_index_b in zip(pre_text, text, label_tensor): obj = {'sentence1': ' '.join(pre_text_b), 'sentence2': ' '.join(text_b), 'gold_label': label_index_to_token[label_index_b.item()]} out_file.write(json.dumps(obj)) out_file.write('\n')
def evaluate(args): with open(args.data, 'rb') as f: test_dataset: SNLIDataset = pickle.load(f) word_vocab = test_dataset.word_vocab label_vocab = test_dataset.label_vocab model = SNLIModel(num_classes=len(label_vocab), num_words=len(word_vocab), word_dim=args.word_dim, hidden_dim=args.hidden_dim, clf_hidden_dim=args.clf_hidden_dim, clf_num_layers=args.clf_num_layers, use_leaf_rnn=args.leaf_rnn, intra_attention=args.intra_attention, use_batchnorm=args.batchnorm, dropout_prob=args.dropout, bidirectional=args.bidirectional) num_params = sum(np.prod(p.size()) for p in model.parameters()) num_embedding_params = np.prod(model.word_embedding.weight.size()) print(f'# of parameters: {num_params}') print(f'# of word embedding parameters: {num_embedding_params}') print(f'# of parameters (excluding word embeddings): ' f'{num_params - num_embedding_params}') model.load_state_dict(torch.load(args.model, map_location='cpu')) model.eval() model.to(args.device) torch.set_grad_enabled(False) test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate) num_correct = 0 num_data = len(test_dataset) for batch in test_data_loader: pre = batch['pre'].to(args.device) hyp = batch['hyp'].to(args.device) pre_length = batch['pre_length'].to(args.device) hyp_length = batch['hyp_length'].to(args.device) label = batch['label'].to(args.device) logits = model(pre=pre, pre_length=pre_length, hyp=hyp, hyp_length=hyp_length) label_pred = logits.max(1)[1] num_correct_batch = torch.eq(label, label_pred).long().sum() num_correct_batch = num_correct_batch.item() num_correct += num_correct_batch print(f'# data: {num_data}') print(f'# correct: {num_correct}') print(f'Accuracy: {num_correct / num_data:.4f}')
def train(args): with open(args.train_data, 'rb') as f: train_dataset: SNLIDataset = pickle.load(f) with open(args.valid_data, 'rb') as f: valid_dataset: SNLIDataset = pickle.load(f) train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2, collate_fn=train_dataset.collate, pin_memory=True) valid_loader = DataLoader(dataset=valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=2, collate_fn=valid_dataset.collate, pin_memory=True) word_vocab = train_dataset.word_vocab label_vocab = train_dataset.label_vocab model = SNLIModel(num_classes=len(label_vocab), num_words=len(word_vocab), word_dim=args.word_dim, hidden_dim=args.hidden_dim, clf_hidden_dim=args.clf_hidden_dim, clf_num_layers=args.clf_num_layers, use_leaf_rnn=args.leaf_rnn, use_batchnorm=args.batchnorm, intra_attention=args.intra_attention, dropout_prob=args.dropout, bidirectional=args.bidirectional) if args.glove: logging.info('Loading GloVe pretrained vectors...') glove_weight = load_glove( path=args.glove, vocab=word_vocab, init_weight=model.word_embedding.weight.data.numpy()) glove_weight[word_vocab.pad_id] = 0 model.word_embedding.weight.data.set_(torch.FloatTensor(glove_weight)) if args.fix_word_embedding: logging.info('Will not update word embeddings') model.word_embedding.weight.requires_grad = False model.to(args.device) logging.info(f'Using device {args.device}') if args.optimizer == 'adam': optimizer_class = optim.Adam elif args.optimizer == 'adagrad': optimizer_class = optim.Adagrad elif args.optimizer == 'adadelta': optimizer_class = optim.Adadelta params = [p for p in model.parameters() if p.requires_grad] optimizer = optimizer_class(params=params, weight_decay=args.l2reg) scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='max', factor=0.5, patience=10, verbose=True) criterion = nn.CrossEntropyLoss() train_summary_writer = SummaryWriter( log_dir=os.path.join(args.save_dir, 'log', 'train')) valid_summary_writer = SummaryWriter( log_dir=os.path.join(args.save_dir, 'log', 'valid')) def run_iter(batch, is_training): model.train(is_training) pre = batch['pre'].to(args.device) hyp = batch['hyp'].to(args.device) pre_length = batch['pre_length'].to(args.device) hyp_length = batch['hyp_length'].to(args.device) label = batch['label'].to(args.device) logits = model(pre=pre, pre_length=pre_length, hyp=hyp, hyp_length=hyp_length) label_pred = logits.max(1)[1] accuracy = torch.eq(label, label_pred).float().mean() loss = criterion(input=logits, target=label) if is_training: optimizer.zero_grad() loss.backward() clip_grad_norm_(parameters=params, max_norm=5) optimizer.step() return loss, accuracy def add_scalar_summary(summary_writer, name, value, step): if torch.is_tensor(value): value = value.item() summary_writer.add_scalar(tag=name, scalar_value=value, global_step=step) num_train_batches = len(train_loader) validate_every = num_train_batches // 10 best_vaild_accuacy = 0 iter_count = 0 for epoch_num in range(args.max_epoch): logging.info(f'Epoch {epoch_num}: start') for batch_iter, train_batch in enumerate(train_loader): if iter_count % args.anneal_temperature_every == 0: rate = args.anneal_temperature_rate new_temperature = max([0.5, math.exp(-rate * iter_count)]) model.encoder.gumbel_temperature = new_temperature logging.info( f'Iter #{iter_count}: ' f'Set Gumbel temperature to {new_temperature:.4f}') train_loss, train_accuracy = run_iter(batch=train_batch, is_training=True) iter_count += 1 add_scalar_summary(summary_writer=train_summary_writer, name='loss', value=train_loss, step=iter_count) add_scalar_summary(summary_writer=train_summary_writer, name='accuracy', value=train_accuracy, step=iter_count) if (batch_iter + 1) % validate_every == 0: torch.set_grad_enabled(False) valid_loss_sum = valid_accuracy_sum = 0 num_valid_batches = len(valid_loader) for valid_batch in valid_loader: valid_loss, valid_accuracy = run_iter(batch=valid_batch, is_training=False) valid_loss_sum += valid_loss.item() valid_accuracy_sum += valid_accuracy.item() torch.set_grad_enabled(True) valid_loss = valid_loss_sum / num_valid_batches valid_accuracy = valid_accuracy_sum / num_valid_batches scheduler.step(valid_accuracy) add_scalar_summary(summary_writer=valid_summary_writer, name='loss', value=valid_loss, step=iter_count) add_scalar_summary(summary_writer=valid_summary_writer, name='accuracy', value=valid_accuracy, step=iter_count) progress = epoch_num + batch_iter / num_train_batches logging.info(f'Epoch {progress:.2f}: ' f'valid loss = {valid_loss:.4f}, ' f'valid accuracy = {valid_accuracy:.4f}') if valid_accuracy > best_vaild_accuacy: best_vaild_accuacy = valid_accuracy model_filename = (f'model-{progress:.2f}' f'-{valid_loss:.4f}' f'-{valid_accuracy:.4f}.pkl') model_path = os.path.join(args.save_dir, model_filename) torch.save(model.state_dict(), model_path) print(f'Saved the new best model to {model_path}')