# Create dataloader dataset = PanoDataset(root_dir=args.root_dir, cat_list=[*args.input_cat, 'edge', 'cor'], flip=False, rotate=False, gamma=False, return_filenames=True) # Prepare model backend = args.backend.lower() net = models[backend]() net = nn.DataParallel(net).to(device) net.load_state_dict(torch.load(args.ckpt)) # Start evaluation test_losses = StatisticDict() test_pano_losses = StatisticDict() test_2d3d_losses = StatisticDict() for ith, datas in enumerate(dataset): print('processed %d batches out of %d' % (ith, len(dataset)), end='\r', flush=True) x = torch.cat([datas[i] for i in range(len(args.input_cat))], dim=0).numpy() x_augmented, aug_type = augment(x, args.flip, args.rotate) with torch.no_grad(): x_augmented = torch.FloatTensor(x_augmented).to(device) de_list = net(x_augmented) edg_tensor = torch.sigmoid(de_list[:, :-1])
def augment_undo(x_imgs_augmented, aug_type): x_imgs = [] for x_img, aug in zip(x_imgs_augmented, aug_type): if aug == 'flip': x_imgs.append(np.flip(x_img, axis=-1)) elif aug.startswith('rotate'): shift = int(aug.split()[-1]) x_imgs.append(np.roll(x_img, -shift, axis=-1)) elif aug == '': x_imgs.append(x_img) else: raise NotImplementedError() return np.array(x_imgs) test_losses = StatisticDict() for ith, datas in enumerate(dataset): print('processed %d batches out of %d' % (ith, len(dataset)), end='\r', flush=True) x = torch.cat([datas[i] for i in range(len(args.input_cat))], dim=0).numpy() x_augmented, aug_type = augment(x) with torch.no_grad(): x_augmented = torch.FloatTensor(x_augmented).to(device) en_list = encoder(x_augmented) edg_de_list = edg_decoder(en_list[::-1]) cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1]) cor_tensor = torch.sigmoid(cor_de_list[-1])
num_workers=args.num_workers, pin_memory=args.device.startswith('cuda')) # Prepare model encoder = Encoder(args.input_channels).to(device) edg_decoder = Decoder(skip_num=2, out_planes=3).to(device) cor_decoder = Decoder(skip_num=3, out_planes=1).to(device) encoder.load_state_dict(torch.load('%s_encoder.pth' % args.path_prefix)) edg_decoder.load_state_dict(torch.load('%s_edg_decoder.pth' % args.path_prefix)) cor_decoder.load_state_dict(torch.load('%s_cor_decoder.pth' % args.path_prefix)) # Start evaluation criti = nn.BCEWithLogitsLoss(reduction='none') test_losses = StatisticDict() for ith, datas in enumerate(loader): print('processed %d batches out of %d' % (ith, len(loader)), end='\r', flush=True) with torch.no_grad(): # Prepare data x = torch.cat([datas[i] for i in range(len(args.input_cat))], dim=1).to(device) y_edg = datas[-2].to(device) y_cor = datas[-1].to(device) b_sz = x.size(0) # Feedforward en_list = encoder(x) edg_de_list = edg_decoder(en_list[::-1]) cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1]) y_edg_ = edg_de_list[-1]
optimizer = optim.Adam([ *group_weight(encoder), *group_weight(edg_decoder), *group_weight(cor_decoder) ], lr=args.lr, betas=(args.beta1, 0.999), weight_decay=args.weight_decay) else: raise NotImplementedError() # Init variable args.warmup_iters = args.warmup_epochs * len(loader_train) args.max_iters = args.epochs * len(loader_train) args.running_lr = args.warmup_lr if args.warmup_epochs > 0 else args.lr args.cur_iter = 0 train_losses = StatisticDict(winsz=100) criti = nn.BCEWithLogitsLoss(reduction='none') print(args) print('%d iters per epoch for train' % len(loader_train)) print('%d iters per epoch for valid' % len(loader_valid)) print(' start training '.center(80, '=')) # Start training for ith_epoch in range(1, args.epochs + 1): for ith_batch, datas in enumerate(loader_train): # Set learning rate adjust_learning_rate(optimizer, args) args.cur_iter += 1 # Prepare data
def augment_undo(x_imgs_augmented, aug_type): x_imgs = [] for x_img, aug in zip(x_imgs_augmented, aug_type): if aug == 'flip': x_imgs.append(np.flip(x_img, axis=-1)) elif aug.startswith('rotate'): shift = int(aug.split()[-1]) x_imgs.append(np.roll(x_img, -shift, axis=-1)) elif aug == '': x_imgs.append(x_img) else: raise NotImplementedError() return np.array(x_imgs) test_losses = StatisticDict() test_pano_losses = StatisticDict() test_2d3d_losses = StatisticDict() for ith, datas in enumerate(dataset): print('processed %d batches out of %d' % (ith, len(dataset)), end='\r', flush=True) x = torch.cat([datas[i] for i in range(len(args.input_cat))], dim=0).numpy() x_augmented, aug_type = augment(x) with torch.no_grad(): x_augmented = torch.FloatTensor(x_augmented).to(device) en_list = encoder(x_augmented) edg_de_list = edg_decoder(en_list[::-1])