def __init__(self, learning_rate: float, roberta_type: str = 'roberta-base'):
        super().__init__()
        config = RobertaConfig.from_pretrained(roberta_type)
        config.num_labels = 2
        self.num_labels = config.num_labels
        self.config = config
        self.lr = learning_rate

        self.model = RobertaForMultipleChoice(config).from_pretrained(roberta_type, num_labels=self.num_labels)
예제 #2
0
 def create_and_check_roberta_for_multiple_choice(
         self, config, input_ids, token_type_ids, input_mask,
         sequence_labels, token_labels, choice_labels):
     config.num_choices = self.num_choices
     model = RobertaForMultipleChoice(config=config)
     model.to(torch_device)
     model.eval()
     multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(
         -1, self.num_choices, -1).contiguous()
     multiple_choice_token_type_ids = token_type_ids.unsqueeze(
         1).expand(-1, self.num_choices, -1).contiguous()
     multiple_choice_input_mask = input_mask.unsqueeze(1).expand(
         -1, self.num_choices, -1).contiguous()
     loss, logits = model(
         multiple_choice_inputs_ids,
         attention_mask=multiple_choice_input_mask,
         token_type_ids=multiple_choice_token_type_ids,
         labels=choice_labels,
     )
     result = {
         "loss": loss,
         "logits": logits,
     }
     self.parent.assertListEqual(list(result["logits"].size()),
                                 [self.batch_size, self.num_choices])
     self.check_loss_output(result)
예제 #3
0
def main(argv):
    parser = argparse.ArgumentParser(description='')
    required = parser.add_argument_group('required arguments')
    required.add_argument('-r', '--retrieval', choices=['IR', 'NSP', 'NN'] , help='retrieval solver for the contexts. Options: IR, NSP or NN', required=True)
    parser.add_argument('-t', '--dataset', default='ndq', choices=['ndq', 'dq'], help='dataset to train the model with. Options: ndq or dq. Default: ndq')
    parser.add_argument('-d', '--device', default='gpu', choices=['gpu', 'cpu'], help='device to train the model with. Options: cpu or gpu. Default: gpu')
    parser.add_argument('-p', '--pretrainings', default="checkpoints/pretrainings_e4.pth", help='path to the pretrainings model. If empty, the model will be the RobertForMultipleChoice with roberta-large weights. Default: checkpoints/pretrainings_e4.pth')
    parser.add_argument('-b', '--batchsize', default= 1, type=int, help='size of the batches. Default: 1')
    parser.add_argument('-x', '--maxlen', default= 180, type=int, help='max sequence length. Default: 180')
    parser.add_argument('-l', '--lr', default= 1e-5, type=float, help='learning rate. Default: 1e-5')
    parser.add_argument('-e', '--epochs', default= 4, type=int, help='number of epochs. Default: 4')
    parser.add_argument('-s', '--save', default=False, help='save model at the end of the training', action='store_true')
    args = parser.parse_args()
    print(args)
    
    if args.pretrainings == "":
        model = RobertaForMultipleChoice.from_pretrained("roberta-large")
    else:
        model = torch.load(args.pretrainings)
    tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
    
    if args.device=="gpu":
        device = torch.device("cuda")
        model.cuda()
    if args.device=="cpu":
        device = torch.device("cpu") 
        model.cpu()
    
    model.zero_grad()
    
    batch_size = args.batchsize
    max_len = args.maxlen
    lr = args.lr
    epochs = args.epochs
    retrieval_solver = args.retrieval
    save_model = args.save
    dataset_name = args.dataset

    raw_data_train = get_data_ndq(dataset_name, "train", retrieval_solver, tokenizer, max_len)
    raw_data_val = get_data_ndq(dataset_name, "val", retrieval_solver, tokenizer, max_len)
    
    train_dataloader = process_data_ndq(raw_data_train, batch_size, "train")
    val_dataloader = process_data_ndq(raw_data_val, batch_size, "val")

    
    optimizer = AdamW(model.parameters(), lr = lr, eps = 1e-8)
    total_steps = len(train_dataloader) * epochs
    scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps)
    
    training_ndq(model, train_dataloader, val_dataloader, optimizer, scheduler, epochs, retrieval_solver, device, save_model, dataset_name)
예제 #4
0
	def __init__(self, model_name_or_path):
		super(Model, self).__init__()
		self.config = RobertaConfig.from_pretrained(
			model_name_or_path,
			num_labels=num_labels,
		)
		self.tokenizer = RobertaTokenizer.from_pretrained(
			model_name_or_path,
			do_lower_case=False,
		)
		self.model = RobertaForMultipleChoice.from_pretrained(
			model_name_or_path,
			from_tf=False,
			config=self.config,
		)
		self.model = self.model.eval().cuda() 
		self.m = nn.Softmax(1)
예제 #5
0
 def create_and_check_for_multiple_choice(
     self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
 ):
     config.num_choices = self.num_choices
     model = RobertaForMultipleChoice(config=config)
     model.to(torch_device)
     model.eval()
     multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
     multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
     multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
     result = model(
         multiple_choice_inputs_ids,
         attention_mask=multiple_choice_input_mask,
         token_type_ids=multiple_choice_token_type_ids,
         labels=choice_labels,
     )
     self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
예제 #6
0
 def __init__(self, config):
     super().__init__(config)
     self.config = config
     self.model = RobertaForMultipleChoice.from_pretrained("roberta-base")
class RobertaPIQA(pl.LightningModule):
    def __init__(self, learning_rate: float, roberta_type: str = 'roberta-base'):
        super().__init__()
        config = RobertaConfig.from_pretrained(roberta_type)
        config.num_labels = 2
        self.num_labels = config.num_labels
        self.config = config
        self.lr = learning_rate

        self.model = RobertaForMultipleChoice(config).from_pretrained(roberta_type, num_labels=self.num_labels)
        # self.model.init_weights()

    def forward(self, *args, **kwargs) -> MultipleChoiceModelOutput:
        return self.model.forward(*args, **kwargs)

    def training_step(self, batch, batch_idx):
        # unpack batch
        input = batch['input_ids']
        mask = batch['attention_mask']
        token_type = batch['token_type_ids']
        label = batch['label']
        # forward + loss
        output = self.model(
            input_ids=input,
            attention_mask=mask,
            token_type_ids=token_type,
            labels=label)

        loss = output.loss
        out = torch.argmax(output.logits, dim=1)
        correct = sum(out == label).item()
        acc = correct / len(label)

        # make so that the loss is logged
        self.log('train_loss', loss)
        self.log('train_accuracy', acc, on_epoch=True)
        return loss

    def validation_step(self, batch, batch_idx):
        # unpack batch
        input = batch['input_ids']
        mask = batch['attention_mask']
        token_type = batch['token_type_ids']
        label = batch['label']
        # forward + loss
        output = self.model(
            input_ids=input,
            attention_mask=mask,
            token_type_ids=token_type,
            labels=label)

        loss = output.loss
        out = torch.argmax(output.logits, dim=1)
        correct = sum(out == label).item()
        acc = correct / len(label)

        # make so that the loss is logged
        self.log('val_loss', loss)
        self.log('val_accuracy', acc, on_epoch=True, prog_bar=True)
        return {'loss': loss, 'logits': output.logits, 'output': out, 'correct': correct}

    def test_step(self, batch, batch_idx):
        # unpack batch
        input = batch['input_ids']
        mask = batch['attention_mask']
        token_type = batch['token_type_ids']
        # forward + loss
        output = self.model(
            input_ids=input,
            attention_mask=mask,
            token_type_ids=token_type)

        out = torch.argmax(output.logits, dim=1)

        self.log(f'test_loss', 0.0)
        return {'loss': 0.0, 'logits': output.logits, 'output': out}

    def configure_optimizers(self):
        return torch.optim.Adam(
            self.parameters(),
            lr=self.lr,
        )
    default=
    "/net/nfs.websail/yyv959/winogrande/outputs/roberta-large/train-l-mc-fake-medium-sym-200000-unigram-8/",
    type=str,
    help=
    "The input data dir. Should contain the .tsv files (or other data files) for the task."
)
parser.add_argument("--no_cuda",
                    action='store_true',
                    help="Avoid using CUDA when available")
args = parser.parse_args()

dir = args.dir

mc_model_path = args.mc_model_path

mc_model = RobertaForMultipleChoice.from_pretrained(mc_model_path)

mc_tokenizer = RobertaTokenizer.from_pretrained(mc_model_path)

mc_model.eval()

device = torch.device(
    "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")

sent_encoder = SentenceTransformer('roberta-base-nli-stsb-mean-tokens',
                                   device=device)
mc_model.to(device)

tagger = spacy.load("en_core_web_lg")
word_vector = gensim.models.KeyedVectors.load_word2vec_format(
    '/net/nfs.websail/yyv959/counter-fitted-vectors.txt', binary=False)
예제 #9
0
def main(argv):
    parser = argparse.ArgumentParser(description='')
    parser.add_argument(
        '-d',
        '--device',
        default='gpu',
        choices=['gpu', 'cpu'],
        help='device to train the model with. Options: cpu or gpu. Default: gpu'
    )
    parser.add_argument(
        '-p',
        '--pretrainings',
        default='../checkpoints/RACE_e1.pth',
        help=
        'path to the pretrainings model. Default: ../checkpoints/RACE_e1.pth')
    parser.add_argument('-b',
                        '--batchsize',
                        default=1,
                        type=int,
                        help='size of the batches. Default: 1')
    parser.add_argument('-x',
                        '--maxlen',
                        default=256,
                        type=int,
                        help='max sequence length. Default: 256')
    parser.add_argument('-l',
                        '--lr',
                        default=1e-5,
                        type=float,
                        help='learning rate. Default: 1e-5')
    parser.add_argument('-e',
                        '--epochs',
                        default=4,
                        type=int,
                        help='number of epochs. Default: 4')
    parser.add_argument('-s',
                        '--save',
                        default=False,
                        help='save model at the end of the training',
                        action='store_true')
    args = parser.parse_args()
    print(args)

    if args.pretrainings == "":
        model = RobertaForMultipleChoice.from_pretrained("roberta-large")
    else:
        model = torch.load(args.pretrainings)
    tokenizer = RobertaTokenizer.from_pretrained('roberta-large')

    if args.device == "gpu":
        device = torch.device("cuda")
        model.cuda()
    if args.device == "cpu":
        device = torch.device("cpu")
        model.cpu()

    model.zero_grad()

    batch_size = args.batchsize
    max_len = args.maxlen
    dataset_name = "pretrainings"
    lr = args.lr
    epochs = args.epochs
    save_model = args.save

    raw_data_train = get_data_pretrainings(dataset_name, "train", tokenizer,
                                           max_len)
    raw_data_val = get_data_pretrainings(dataset_name, "val", tokenizer,
                                         max_len)

    train_dataloader = process_data_ndq(raw_data_train, batch_size, "train")
    val_dataloader = process_data_ndq(raw_data_val, batch_size, "val")

    optimizer = AdamW(model.parameters(), lr=lr, eps=1e-8)
    total_steps = len(train_dataloader) * epochs
    scheduler = get_linear_schedule_with_warmup(optimizer,
                                                num_warmup_steps=0,
                                                num_training_steps=total_steps)

    training_ndq(model, train_dataloader, val_dataloader, optimizer, scheduler,
                 epochs, device, save_model, dataset_name)