Example #1
0
def train():
    # Dataset & Dataloader
    train_transform = transforms.Compose([
        transforms.RandomRotation(15),
        transforms.RandomResizedCrop(cfg.CROP_SIZE, scale=(0.8, 1.0)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor()
    ])

    test_transform = transforms.Compose([
        transforms.Resize(70),
        transforms.CenterCrop(cfg.CROP_SIZE),
        transforms.ToTensor()
    ])

    train_set = datasets.ImageFolder(cfg.TRAIN_DIR, transform=train_transform)
    trainloader = DataLoader(train_set,
                             batch_size=cfg.BATCH_SIZE,
                             shuffle=True,
                             num_workers=4)

    test_set = datasets.ImageFolder(cfg.TEST_DIR, transform=test_transform)
    testloader = DataLoader(test_set, batch_size=cfg.BATCH_SIZE, shuffle=True)

    # Config
    config = set_config({
        "batch_size": cfg.BATCH_SIZE,
        "crop_size": cfg.CROP_SIZE
    })

    # Training Preparation
    model = CNN().to(device)
    criterion = nn.NLLLoss()
    optimizer = optim.AdamW(model.parameters(), lr=0.001)
    callback = Callback(model, config, outdir=cfg.OUTDIR)

    # Training
    while True:
        train_cost, train_score = loop_fn("train", train_set, trainloader,
                                          model, criterion, optimizer, device)
        with torch.no_grad():
            test_cost, test_score = loop_fn("test", test_set, testloader,
                                            model, criterion, optimizer,
                                            device)

        # Callbacks
        callback.log(train_cost, test_cost, train_score, test_score)
        callback.save_checkpoint()
        if callback.early_stopping(model, monitor="test_score"):
            break
Example #2
0
def main():
    parser = ArgumentParser("Load Data")
    parser.add_argument("data_path", type=Path, help="path to dataset folder")
    parser.add_argument("--limit",
                        "--l",
                        type=int,
                        help="limit of the dataset")
    args = parser.parse_args()

    transform = Compose([Resize(256, 256), ToTensor()])

    train_dataset = Dataset(args.data_path / "train", transform, args.limit)
    val_dataset = Dataset(args.data_path / "test", transform, args.limit)

    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=8,
                                                   shuffle=True,
                                                   num_workers=8)
    val_dataloader = torch.utils.data.DataLoader(val_dataset,
                                                 batch_size=8,
                                                 shuffle=False,
                                                 num_workers=8)

    model = CNN((256, 256), 2)

    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.005,
                                 weight_decay=0.005)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.998)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    max_step = (len(train_dataloader.dataset) + (8 - 1)) // 8

    for epoch in range(1000):
        epoch_loss = 0
        for step, batch in enumerate(train_dataloader):
            start_time = time.perf_counter()
            img_batch, label_batch = batch["img"].to(
                device).float(), batch["label"].to(device).long()
            optimizer.zero_grad()
            x = model(img_batch)
            loss = loss_fn(x, label_batch)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print_step(step, max_step, loss, time.perf_counter() - start_time)
        print("")
        print(epoch_loss / max_step, flush=True)
Example #3
0
        "kernel": cfg.kernel,
        "pad": cfg.pad,
        "in_size": cfg.in_size,
        "n1": cfg.n1,
        "n2": cfg.n2,
        "dropout": cfg.dropout,
        "out_size": cfg.out_size,
        "batch_norm": cfg.batch_norm,
        "author": cfg.author

})

# Training Preparation
model = CNN().to(device)
criterion = nn.NLLLoss()
optimizer = optim.AdamW(model.parameters(), lr=0.001)
callback = Callback(model, config, outdir=cfg.OUTDIR)

 # Training
 while True:
      train_cost, train_score = loop_fn(
           "train", train_set, trainloader, model, criterion, optimizer, device)
       with torch.no_grad():
            test_cost, test_score = loop_fn(
                "test", test_set, testloader, model, criterion, optimizer, device)

        # Callbacks
        callback.log(train_cost, test_cost, train_score, test_score)
        callback.save_checkpoint()
        if callback.early_stopping(model, monitor="test_score"):
            break