Ejemplo n.º 1
0
def main(opt):
    train_dataset = Dataset(opt.dataroot, True)
    train_dataloader = Dataloader(train_dataset, batch_size=opt.batchSize, \
                                      shuffle=False, num_workers=2)

    test_dataset = Dataset(opt.dataroot, False)
    test_dataloader = Dataloader(test_dataset, batch_size=opt.batchSize, \
                                      shuffle=False, num_workers=2)

    net = PointNet(d=opt.d, feature_transform=opt.feature_transform)
    net.double()
    print(net)

    criterion = nn.CosineSimilarity(dim=2)
    optimizer = optim.Adam(net.parameters(), lr=opt.lr)

    with open('train.csv', 'a') as f:
        writer = csv.writer(f, lineterminator='\n')
        writer.writerow(
            ["train_loss", "train_gain", "baseline_loss", "baseline_gain"])

    with open('test.csv', 'a') as f:
        writer = csv.writer(f, lineterminator='\n')
        writer.writerow(
            ["test_loss", "test_gain", "baseline_loss", "baseline_gain"])

    start = time.time()

    for epoch in range(0, opt.niter):
        train(epoch, train_dataloader, net, criterion, optimizer, opt)
        test(test_dataloader, net, criterion, optimizer, opt)

    elapsed_time = time.time() - start

    with open('time.csv', 'a') as f:
        writer = csv.writer(f, lineterminator='\n')
        writer.writerow(["学習時間", elapsed_time])
Ejemplo n.º 2
0
def main(opt):
    train_dataset = BADataset(opt.dataroot, opt.L, True, False, False)
    train_dataloader = BADataloader(train_dataset, batch_size=opt.batchSize, \
                                      shuffle=True, num_workers=opt.workers, drop_last=True)

    valid_dataset = BADataset(opt.dataroot, opt.L, False, True, False)
    valid_dataloader = BADataloader(valid_dataset, batch_size=opt.batchSize, \
                                     shuffle=True, num_workers=opt.workers, drop_last=True)

    test_dataset = BADataset(opt.dataroot, opt.L, False, False, True)
    test_dataloader = BADataloader(test_dataset, batch_size=opt.batchSize, \
                                     shuffle=True, num_workers=opt.workers, drop_last=True)

    all_dataset = BADataset(opt.dataroot, opt.L, False, False, False)
    all_dataloader = BADataloader(all_dataset, batch_size=opt.batchSize, \
                                     shuffle=False, num_workers=opt.workers, drop_last=False)

    net = PointNet(d0=opt.d0,
                   d1=opt.d1,
                   d2=opt.d2,
                   d3=opt.d3,
                   d4=opt.d4,
                   d5=opt.d5,
                   d6=opt.d6)
    net.double()
    print(net)

    criterion = nn.CosineSimilarity(dim=1)

    if opt.cuda:
        net.cuda()
        criterion.cuda()

    optimizer = optim.Adam(net.parameters(), lr=opt.lr)
    early_stopping = EarlyStopping(patience=opt.patience, verbose=True)

    os.makedirs(OutputDir, exist_ok=True)
    train_loss_ls = []
    valid_loss_ls = []
    test_loss_ls = []

    for epoch in range(0, opt.niter):
        train_loss = train(epoch, train_dataloader, net, criterion, optimizer,
                           opt)
        valid_loss = valid(valid_dataloader, net, criterion, opt)
        test_loss = test(test_dataloader, net, criterion, opt)

        train_loss_ls.append(train_loss)
        valid_loss_ls.append(valid_loss)
        test_loss_ls.append(test_loss)

        early_stopping(valid_loss, net, OutputDir)
        if early_stopping.early_stop:
            print("Early stopping")
            break

    df = pd.DataFrame({
        'epoch': [i for i in range(1,
                                   len(train_loss_ls) + 1)],
        'train_loss': train_loss_ls,
        'valid_loss': valid_loss_ls,
        'test_loss': test_loss_ls
    })
    df.to_csv(OutputDir + '/loss.csv', index=False)

    net.load_state_dict(torch.load(OutputDir + '/checkpoint.pt'))
    inference(all_dataloader, net, opt, OutputDir)