Exemplo n.º 1
0
def make_model_and_data(args, device, new_data: bool = True):
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")
    if new_data:
        vocab_size = 10000
        model, criterion, optimizer, scaler = make_model(
            args, device, vocab_size)
        lm_dataset = BenchmarkLMDataset()
        lm_dataloader = DataLoader(lm_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=0,
                                   collate_fn=collate_sentences_lm)
        return {
            "model": model,
            "criterion": criterion,
            "optimizer": optimizer,
            "data": lm_dataloader,
            "vocab_size": vocab_size,
        }
    else:
        data = get_data(device)
        ntokens, train_data, val_data, test_data = data
        model, criterion, optimizer, scaler = make_model(args, device, ntokens)
        return {
            "model": model,
            "criterion": criterion,
            "optimizer": optimizer,
            "data": data,
        }
Exemplo n.º 2
0
def get_synthetic_dataloader(args):
    """Returns dataloader for synthetic data."""

    if args.model_name == "lm":
        lm_dataset = BenchmarkLMDataset()
        lm_dataloader = DataLoader(
            lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
        )
        return lm_dataloader
    else:
        raise RuntimeError("Unrecognized args.model_mame " % args.model_name)
Exemplo n.º 3
0
def make_model_and_data(args, device):
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    vocab_size = 10000
    model, criterion, optimizer = make_model(args, device, vocab_size)
    lm_dataset = BenchmarkLMDataset()
    lm_dataloader = DataLoader(
        lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
    )
    return {
        "model": model,
        "criterion": criterion,
        "optimizer": optimizer,
        "data": lm_dataloader,
        "vocab_size": vocab_size,
    }