Пример #1
0
parser.add_argument('--loaders',
                    type=int,
                    default=4,
                    help='number of parallel data loading processes')
parser.add_argument('--batch_size', type=int, default=32)
args = parser.parse_args()

pipeline.initialize(args)

## load model : image --> reflectance x normals x depth x lighting
model = models.Decomposer().cuda()

## get a data loader for train and val sets
train_set = pipeline.IntrinsicDataset(args.data_path,
                                      args.train_sets,
                                      args.intrinsics,
                                      array=args.array,
                                      size_per_dataset=args.num_train)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=args.batch_size,
                                           num_workers=args.loaders,
                                           shuffle=True)

val_set = pipeline.IntrinsicDataset(args.data_path,
                                    args.val_sets,
                                    args.intrinsics,
                                    array=args.array,
                                    size_per_dataset=args.num_val)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=args.batch_size,
                                         num_workers=args.loaders,
Пример #2
0
train_set = pipeline.ComposerDataset(args.data_path,
                                     args.unlabeled,
                                     args.labeled,
                                     unlabeled_array=args.unlabeled_array,
                                     labeled_array=args.labeled_array,
                                     size_per_dataset=args.set_size)
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=args.batch_size / 2,
                                           num_workers=args.loader_threads,
                                           shuffle=True)

## data loader for val set, which is completely labeled
val_set = pipeline.IntrinsicDataset(args.data_path,
                                    args.val_sets,
                                    args.val_intrinsics,
                                    inds=range(0,
                                               args.num_val * args.val_offset,
                                               args.val_offset),
                                    array=args.unlabeled_array,
                                    size_per_dataset=args.set_size)
val_loader = torch.utils.data.DataLoader(val_set,
                                         batch_size=args.batch_size,
                                         num_workers=args.loader_threads,
                                         shuffle=False)

## print out error plots after every epoch for every prediction
logger = pipeline.Logger(
    ['recon', 'refl', 'depth', 'shape', 'lights', 'shading'], args.save_path)
param_updater = pipeline.ParamUpdater(args.transfer)

for epoch in range(args.num_epochs):
    print '<Main> Epoch {}'.format(epoch)
shape_loss = 0
lights_loss = 0

# preparing necessary models
#decomp_path = "saved/decomposer/decomp_with_dropout/state_dropout.t7"
decomp_path = "saved/decomposer/decomp_cl/state_cl.t7"

decomposer = models.Decomposer().cuda()
checkpoint = torch.load(decomp_path)
#decomposer.load_state_dict(checkpoint['model_state_dict'])
decomposer.load_state_dict(checkpoint)
decomposer.train(mode=False)

test_set = pipeline.IntrinsicDataset(args.data_path,
                                     args.test_sets,
                                     args.intrinsics,
                                     array=args.array,
                                     size_per_dataset=args.num_test,
                                     rel_path='../intrinsics-network')
test_loader = torch.utils.data.DataLoader(test_set,
                                          batch_size=args.batch_size,
                                          num_workers=args.loaders,
                                          shuffle=False)

images = []

for ind, tensors in enumerate(test_loader):
    tensors = [Variable(t.float().cuda(async=True)) for t in tensors]
    inp, mask, refl_targ, depth_targ, shape_targ, lights_targ = tensors

    refl_pred, depth_pred, shape_pred, lights_pred = decomposer.forward(
        inp, mask)
Пример #4
0
import sys, os, argparse, torch
import models, pipeline


val_set = pipeline.IntrinsicDataset('dataset/output/','car_tarin',['input', 'mask', 'albedo', 'depth', 'normals', 'lights'], size_per_dataset=1)
print ("is",type(val_set))
val_loader = torch.utils.data.DataLoader(val_set, batch_size=32, num_workers=4, shuffle=False)
shader = models.Shader().cuda()
# pipeline.visualize_shader(shader, val_loader, "saved/a.png",save_raw=True)
for epoch in range(4):
    save_path = os.path.join('saved/shader/', str(epoch) + '.png')
    val_losses = pipeline.visualize_decomposer(shader, val_loader, "saved/a.png", 1,save_raw=True)