Ejemplo n.º 1
0
def test_get_dataset_frame(name, split, num_frames, size, dataset_type,
                           record_set_type):
    dataset = core.get_dataset(
        name,
        DATA_ROOT,
        split=split,
        num_frames=num_frames,
        size=size,
        dataset_type=dataset_type,
        record_set_type=record_set_type,
    )
    for i, (frames, label) in enumerate(dataset):
        if not (i < MAX_ITERS):
            break
        print(i, frames.shape, label)
        assert label < cfg.num_classes_dict[name]
        assert frames.shape == torch.Size((3, num_frames, size, size))
Ejemplo n.º 2
0
def test_get_heatvol_dataset(name, split, num_frames, size, dataset_type,
                             record_set_type):
    dataset = core.get_dataset(
        name,
        DATA_ROOT,
        split=split,
        num_frames=num_frames,
        size=size,
        dataset_type=dataset_type,
        record_set_type=record_set_type,
    )
    print(f'Dataset {name}-{split} has len: {len(dataset)}')
    for i, (frames, label, heatvol, volmask) in enumerate(dataset):
        if not (i < MAX_ITERS):
            break
        print(i, frames.shape, label, heatvol.shape, volmask)
        assert label < cfg.num_classes_dict[name]
        assert frames.shape == torch.Size((3, num_frames, size, size))
Ejemplo n.º 3
0
    wandb.watch(model)

    # setting device
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # 多GPU
    print(torch.cuda.device_count())
    if device == 'cuda' and torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model, device_ids=[0, 1])

    print("using device", device)
    model.to(device)

    train_dataset = get_dataset(name="boolq",
                                tokenizer=tokenizer,
                                split='train')
    test_dataset = get_dataset(name="boolq",
                               tokenizer=tokenizer,
                               split='validation')

    train_dataloader = DataLoader(train_dataset, batch_size=6, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=6, shuffle=True)

    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params': [
            p for n, p in model.named_parameters()
            if not any(nd in n for nd in no_decay)
        ],
Ejemplo n.º 4
0
    wandb.watch(model)

    # setting device
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # 多GPU
    print(torch.cuda.device_count())
    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model, device_ids=[0, 1])

    print("using device", device)
    model.to(device)

    train_dataset = get_dataset(name="snli",
                                tokenizer=tokenizer,
                                split='train')
    test_dataset = get_dataset(name="snli", tokenizer=tokenizer, split='test')

    train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True)

    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = ['bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters()
                if not any(nd in n for nd in no_decay)
            ],
            "weight_decay":