Пример #1
0
## data loader for val set, which is completely labeled
val_set = pipeline.IntrinsicDataset(args.data_path,
                                    args.val_sets,
                                    args.val_intrinsics,
                                    inds=range(0,
                                               args.num_val * args.val_offset,
                                               args.val_offset),
                                    array=args.unlabeled_array,
                                    size_per_dataset=args.set_size)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=args.batch_size,
                                         num_workers=args.loader_threads,
                                         shuffle=False)

## print out error plots after every epoch for every prediction
logger = pipeline.Logger(
    ['recon', 'refl', 'depth', 'shape', 'lights', 'shading'], args.save_path)
param_updater = pipeline.ParamUpdater(args.transfer)

for epoch in range(args.num_epochs):
    print '<Main> Epoch {}'.format(epoch)

    if param_updater.check(epoch):
        ## update which parameters are updated
        transfer = param_updater.refresh(epoch)
        print 'Updating params: ', epoch, transfer
        ## get a new trainer with different learnable parameters
        trainer = pipeline.ComposerTrainer(model,
                                           train_loader,
                                           args.lr,
                                           args.lights_mult,
                                           args.un_mult,
Пример #2
0
                                           batch_size=args.batch_size,
                                           num_workers=args.loaders,
                                           shuffle=True)

val_set = pipeline.IntrinsicDataset(args.data_path,
                                    args.val_sets,
                                    args.intrinsics,
                                    array=args.array,
                                    size_per_dataset=args.num_val)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=args.batch_size,
                                         num_workers=args.loaders,
                                         shuffle=False)

## plots errors for the intrinsic image predictions
logger = pipeline.Logger(['refl', 'shape', 'lights'], args.save_path)

trainer = pipeline.DecomposerTrainer(model, train_loader, args.lr,
                                     args.lights_mult)

for epoch in range(args.num_epochs):
    print('<Main> Epoch {}'.format(epoch))

    ## save model state
    state = model.state_dict()
    torch.save(state, os.path.join(args.save_path, 'state.t7'))

    ## get losses and save visualization on val images
    val_losses = pipeline.visualize_decomposer(model, val_loader,
                                               args.save_path, epoch)
Пример #3
0
## data loader for val set, which is completely labeled
val_set = pipeline.IntrinsicDataset(
    args.data_path,
    args.val_sets,
    args.val_intrinsics,
    inds=range(0, args.num_val * args.val_offset, args.val_offset),
    array=args.unlabeled_array,
    size_per_dataset=args.num_val,
)
val_loader = torch.utils.data.DataLoader(
    val_set, batch_size=args.batch_size, num_workers=args.loader_threads, shuffle=False
)

## print out error plots after every epoch for every prediction
logger = pipeline.Logger(
    ["recon", "refl", "depth", "shape", "lights", "shading"], args.save_path
)
param_updater = pipeline.ParamUpdater(args.transfer)

for epoch in range(args.num_epochs):
    print("<Main> Epoch {}".format(epoch))

    if param_updater.check(epoch):
        ## update which parameters are updated
        transfer = param_updater.refresh(epoch)
        print("Updating params: ", epoch, transfer)
        ## get a new trainer with different learnable parameters
        trainer = pipeline.ComposerTrainer(
            model,
            train_loader,
            args.lr,