示例#1
0
def generate_data(num_clusters, total_timesteps, num_timesteps, num_atoms,
                  edge_prob, test_size, normalization):
    # Generate the data
    print(f"{time_str()} Generating synthetic data.")
    samples, labels, precisions = gen_synthetic_tseries(
        num_clusters=num_clusters,
        num_tsteps=total_timesteps,
        sample_size=num_timesteps,
        num_nodes=num_atoms,
        edge_prob=edge_prob)

    # Split the data between train / test
    splitting = model_selection.train_test_split(samples,
                                                 labels,
                                                 test_size=test_size,
                                                 stratify=labels)
    x_train, x_test, y_train, y_test = splitting

    print(f"{time_str()} After splitting we have "
          f"{len(x_train)} samples for training and {len(x_test)} "
          f"for testing.")

    tr_dataset = SyntheticDataset(x_train,
                                  y_train,
                                  normalization=normalization)
    val_dataset = SyntheticDataset(x_test, y_test, normalization="val")
    val_dataset.normalization = tr_dataset.normalization
    val_dataset.scaler = tr_dataset.scaler

    return tr_dataset, val_dataset, precisions
def fit(opt):
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    dataset = SyntheticDataset(opt.train_path, filetype=opt.filetype)
    train_size = int(0.8 * len(dataset))
    train_set, valid_set = random_split(
        dataset, [train_size, len(dataset) - train_size])
    train_loader = DataLoader(train_set,
                              batch_size=opt.bs,
                              shuffle=True,
                              num_workers=4)
    valid_loader = DataLoader(valid_set, batch_size=opt.bs, num_workers=4)

    writer = SummaryWriter()
    model = Net().to(device)

    if opt.resume:
        print("resuming")
        model.load_state_dict(torch.load(opt.model_path))

    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    for e in range(opt.epochs):
        train_loss = train_step(model, optimizer, train_loader, device, writer)
        valid_loss = valid_step(model, valid_loader, device)
        print(f"{e}\t{train_loss:.4f}\t{valid_loss:.4f}")
        torch.save(model.state_dict(), f"checkpoints/{e}.pt")
    torch.save(model.state_dict(), opt.model_path)
示例#3
0
def main(args):
    if args.resume !="":
        model = HomographyModel.load_from_checkpoint(args.resume)
    else:
        model_dir = 'lightning_logs/version*'
        model_dir_list = sorted(glob.glob(model_dir))
        model_dir = model_dir_list[-1]
        model_path = osp.join(model_dir, "checkpoints", "*.ckpt")
        model_path_list = sorted(glob.glob(model_path))
        if len(model_path_list) > 0:
            model_path = model_path_list[-1]
            print(model_path)
            # model = HomographyModel.load_from_checkpoint(model_path)
            model = HomographyModel() #test专用,内部重新定义精简版的类
            model_old = torch.load(model_path, map_location=lambda storage, loc: storage)
            # print(model_old.keys())
            # net.load_state_dict(torch.load('path/params.pkl'))
            model.load_state_dict(model_old['state_dict'])
            print(model_path)
            print("model loaded.")
        else:
            raise ValueError(f'No load model!')  #raise Error

    model.eval()  #不训练

    test_set = SyntheticDataset(args.test_path, rho=args.rho, filetype=args.filetype,pic_size=pic_size,patch_size=patch_size)

    #clear last output
    last_output = "figures/*"
    os.system("rm "+last_output)
    print('clear last ok.')
    for i in range(args.n):
        img_a,img_b, patch_a, patch_b, corners, delta = test_set[i]
        # after unsqueeze to save
        # tensors_to_gif(patch_a, patch_b, f"figures/input_{i}.gif")
        tensors_to_gif(img_a, img_b, f"figures/input_{i}.gif")

        #add
        img_a = img_a.unsqueeze(0)
        img_b = img_b.unsqueeze(0)


        ##
        patch_a = patch_a.unsqueeze(0)
        patch_b = patch_b.unsqueeze(0)
        corners = corners.unsqueeze(0)

        corners = corners - corners[:, 0].view(-1, 1, 2)
        delta_hat = model(patch_a, patch_b)
        corners_hat = corners + delta_hat
        #获取h
        h = kornia.get_perspective_transform(corners, corners_hat)
        h_inv = torch.inverse(h)

        patch_b_hat = kornia.warp_perspective(img_a, h_inv, (patch_a.shape[-2],patch_a.shape[-1]))  #128 最初设置 #注意,用img_a / patch_a
        img_b_hat = kornia.warp_perspective(img_a, h_inv, (img_a.shape[-2],img_a.shape[-1]))
        #输出

        tensors_to_gif(patch_b_hat[0], patch_b[0], f"figures/output_patch{i}.gif")
        tensors_to_gif(img_b_hat[0], img_b[0], f"figures/output_{i}.gif")
示例#4
0
 def val_dataloader(self):
     val_set = SyntheticDataset(self.hparams.valid_path,
                                rho=self.hparams.rho)
     return DataLoader(
         val_set,
         num_workers=4,
         batch_size=self.hparams.batch_size,
         collate_fn=safe_collate,
     )
示例#5
0
 def train_dataloader(self):
     train_set = SyntheticDataset(self.hparams.train_path,
                                  rho=self.hparams.rho)
     return DataLoader(
         train_set,
         num_workers=4,
         batch_size=self.hparams.batch_size,
         shuffle=True,
         collate_fn=safe_collate,
     )
def test_dataset(path):
    dataset = SyntheticDataset(path)

    img_a, img_b, patch_a, patch_b, points = dataset[0]
    print(img_a.shape)
    print(patch_a.shape)
    print(patch_b.shape)
    print(points.shape)

    import matplotlib.pyplot as plt
    from torchvision import transforms

    to_pil = transforms.ToPILImage()

    for img in [img_a, patch_a, patch_b]:
        plt.imshow(to_pil(img), cmap="gray")
        plt.show()
示例#7
0
def main(args):
    # model = HomographyModel.load_from_checkpoint(args.checkpoint)
    model_dir = 'lightning_logs/version*'
    model_dir_list = sorted(glob.glob(model_dir))
    model_dir = model_dir_list[-1]
    model_path = osp.join(model_dir, "checkpoints", "*.ckpt")
    model_path_list = sorted(glob.glob(model_path))
    if len(model_path_list) > 0:
        model_path = model_path_list[-1]
        model = HomographyModel.load_from_checkpoint(model_path)
        print(model_path)
        print("model loaded.")
    else:
        raise ValueError(f'No load model!')  #raise Error
    model.eval()
    test_set = SyntheticDataset(args.test_path,
                                rho=args.rho,
                                filetype=args.filetype)

    #clear last output
    last_output = "figures/*"
    os.system("rm " + last_output)
    print('clear last ok.')
    for i in range(args.n):
        img_a, patch_a, patch_b, corners, delta = test_set[i]

        tensors_to_gif(patch_a, patch_b, f"figures/input_{i}.gif")
        patch_a = patch_a.unsqueeze(0)
        patch_b = patch_b.unsqueeze(0)
        corners = corners.unsqueeze(0)

        corners = corners - corners[:, 0].view(-1, 1, 2)

        delta_hat = model(patch_a, patch_b)
        corners_hat = corners + delta_hat
        h = kornia.get_perspective_transform(corners, corners_hat)
        h_inv = torch.inverse(h)

        patch_b_hat = kornia.warp_perspective(patch_a, h_inv, (128, 128))
        tensors_to_gif(patch_b_hat[0], patch_b[0], f"figures/output_{i}.gif")
def main(args):
    model = HomographyModel.load_from_checkpoint(args.checkpoint)
    model.eval()
    test_set = SyntheticDataset(args.test_path,
                                rho=args.rho,
                                filetype=args.filetype)

    for i in range(args.n):
        img_a, patch_a, patch_b, corners, delta = test_set[i]

        tensors_to_gif(patch_a, patch_b, f"figures/input_{i}.gif")
        patch_a = patch_a.unsqueeze(0)
        patch_b = patch_b.unsqueeze(0)
        corners = corners.unsqueeze(0)

        corners = corners - corners[:, 0].view(-1, 1, 2)

        delta_hat = model(patch_a, patch_b)
        corners_hat = corners + delta_hat
        h = kornia.get_perspective_transform(corners, corners_hat)
        h_inv = torch.inverse(h)

        patch_b_hat = kornia.warp_perspective(patch_a, h_inv, (128, 128))
        tensors_to_gif(patch_b_hat[0], patch_b[0], f"figures/output_{i}.gif")
示例#9
0
文件: main.py 项目: NVIDIA/HugeCTR
    model = DLRM(
        metadata['vocab_sizes'],
        num_dense_features=13,
        embedding_vec_size=128,
        bottom_stack_units=[512, 256, 128],
        top_stack_units=[1024, 1024, 512, 256, 1],
        num_gpus=hvd.size(),
        use_cuda_interact=args.custom_interact,
        compress=args.compress,
    )

    if args.use_synthetic_dataset or args.data_dir is None:
        print('[Info] Using synthetic dataset')
        dataset = SyntheticDataset(
            batch_size=global_batch_size // hvd.size(),
            num_iterations=args.early_stop if args.early_stop > 0 else 30,
            vocab_sizes=metadata['vocab_sizes'],
            prefetch=20,
        )
        test_dataset = SyntheticDataset(
            batch_size=global_batch_size // hvd.size(),
            num_iterations=args.early_stop if args.early_stop > 0 else 30,
            vocab_sizes=metadata['vocab_sizes'],
            prefetch=20,
        )
    elif args.use_splited_dataset:
        print('[Info] Using splited dataset in %s' % args.data_dir)
        dataset = SplitedBinaryDataset(
            os.path.join(args.data_dir, 'train/label.bin'),
            os.path.join(args.data_dir, 'train/dense.bin'),
            [
                os.path.join(args.data_dir, 'train/category_%d.bin' % i)
示例#10
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    cfg = get_config(args.config)

    try:
        world_size = int(os.environ['WORLD_SIZE'])
        rank = int(os.environ['RANK'])
        dist.init_process_group('nccl')
    except KeyError:
        world_size = 1
        rank = 0
        dist.init_process_group(backend='nccl',
                                init_method="tcp://127.0.0.1:12584",
                                rank=rank,
                                world_size=world_size)

    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)
    os.makedirs(cfg.output, exist_ok=True)
    init_logging(rank, cfg.output)

    if cfg.rec == "synthetic":
        train_set = SyntheticDataset(local_rank=local_rank)
    else:
        train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_set, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=train_set,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    backbone = get_model(cfg.network,
                         dropout=0.0,
                         fp16=cfg.fp16,
                         num_features=cfg.embedding_size).to(local_rank)
    summary(backbone, input_size=(3, 112, 112))
    exit()

    if cfg.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(
                torch.load(backbone_pth,
                           map_location=torch.device(local_rank)))
            if rank == 0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            if rank == 0:
                logging.info("resume fail, backbone init successfully!")

    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()
    if cfg.loss == 'magface':
        margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g)
    elif cfg.loss == 'mag_cosface':
        margin_softmax = losses.get_loss(cfg.loss)
    else:
        margin_softmax = losses.get_loss(cfg.loss,
                                         s=cfg.s,
                                         m1=cfg.m1,
                                         m2=cfg.m2,
                                         m3=cfg.m3)
    module_partial_fc = PartialFC(rank=rank,
                                  local_rank=local_rank,
                                  world_size=world_size,
                                  resume=cfg.resume,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    opt_backbone = torch.optim.SGD(params=[{
        'params': backbone.parameters()
    }],
                                   lr=cfg.lr / 512 * cfg.batch_size *
                                   world_size,
                                   momentum=0.9,
                                   weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(params=[{
        'params': module_partial_fc.parameters()
    }],
                              lr=cfg.lr / 512 * cfg.batch_size * world_size,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)

    num_image = len(train_set)
    total_batch_size = cfg.batch_size * world_size
    cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch
    cfg.total_step = num_image // total_batch_size * cfg.num_epoch

    def lr_step_func(current_step):
        cfg.decay_step = [
            x * num_image // total_batch_size for x in cfg.decay_epoch
        ]
        if current_step < cfg.warmup_step:
            return current_step / cfg.warmup_step
        else:
            return 0.1**len([m for m in cfg.decay_step if m <= current_step])

    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=lr_step_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc,
                                                      lr_lambda=lr_step_func)

    for key, value in cfg.items():
        num_space = 25 - len(key)
        logging.info(": " + key + " " * num_space + str(value))

    val_target = cfg.val_targets
    callback_verification = CallBackVerification(2000, rank, val_target,
                                                 cfg.rec)
    callback_logging = CallBackLogging(50, rank, cfg.total_step,
                                       cfg.batch_size, world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    start_epoch = 0
    global_step = 0
    grad_amp = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            x = backbone(img)
            features = F.normalize(x)
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc, x)
            if cfg.fp16:
                features.backward(grad_amp.scale(x_grad))
                grad_amp.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_amp.step(opt_backbone)
                grad_amp.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16,
                             scheduler_backbone.get_last_lr()[0], grad_amp)
            callback_verification(global_step, backbone)
            scheduler_backbone.step()
            scheduler_pfc.step()
        callback_checkpoint(global_step, backbone, module_partial_fc)

    callback_verification('last', backbone)
    dist.destroy_process_group()
示例#11
0
def main(args):
    # if args.resume is not "":
    #     model = HomographyModel.load_from_checkpoint(args.resume)
    if args.resume == "none":
        hmodel = HomographyModel(hparams=args)
        best = 0
        print("None:train from none.")
    elif args.resume is not "":
        hmodel = HomographyModel(hparams=args)
        model_old = torch.load(args.resume,
                               map_location=lambda storage, loc: storage)
        # print(model_old.keys())
        # net.load_state_dict(torch.load('path/params.pkl'))
        hmodel.load_state_dict(model_old['state_dict'])
        best = model_old['loss']
        # model = HomographyModel.load_from_checkpoint(args.resume)
        print(args.resume)
        print("model loaded.")
    else:
        try:
            model_dir = 'HESIC_logs/version*'
            model_dir_list = sorted(glob.glob(model_dir))
            model_dir = model_dir_list[-1]
            model_path = osp.join(model_dir, "checkpoints", "*.ckpt")
            model_path_list = sorted(glob.glob(model_path))

            model_path = model_path_list[-1]
            hmodel = HomographyModel.load_from_checkpoint(model_path)
            best = hmodel['loss']
            print(model_path)
            print("model loaded.")
        except:
            hmodel = HomographyModel(hparams=args)
            best = 0
            print("train from none.")
    print("history_best:", best)

    device = torch.device(
        "cuda:" + str(args.gpus) if torch.cuda.is_available() else "cpu")
    hmodel = hmodel.to(device)

    train_set = SyntheticDataset(hmodel.hparams.train_path,
                                 rho=hmodel.hparams.rho,
                                 pic_size=hmodel.hparams.picsize,
                                 patch_size=hmodel.hparams.patchsize)
    val_set = SyntheticDataset(hmodel.hparams.valid_path,
                               rho=hmodel.hparams.rho,
                               pic_size=hmodel.hparams.picsize,
                               patch_size=hmodel.hparams.patchsize)
    train_loader = DataLoader(
        train_set,
        num_workers=4,
        batch_size=hmodel.hparams.batch_size,
        shuffle=True,
        collate_fn=safe_collate,
    )
    validation_loader = DataLoader(
        val_set,
        num_workers=4,
        batch_size=hmodel.hparams.batch_size,
        collate_fn=safe_collate,
    )

    optimizer = torch.optim.Adam(hmodel.model.parameters(),
                                 lr=hmodel.hparams.learning_rate)

    for epoch in range(args.epochs):
        for train_step, train_batch in enumerate(train_loader):
            img_a, img_b, patch_a, patch_b, corners, gt = train_batch
            img_a = img_a.to(device)
            img_b = img_b.to(device)
            patch_a = patch_a.to(device)
            patch_b = patch_b.to(device)
            corners = corners.to(device)
            delta = hmodel.model(patch_a, patch_b).to(device)
            optimizer.zero_grad()
            loss = photometric_loss(delta, img_a, patch_b, corners)
            loss.backward()
            optimizer.step()
            if train_step % 100 == 0:
                print("train || epoch:", epoch, "  step:", train_step,
                      "  loss:", loss.data)

        with torch.no_grad():
            vali_total_loss = []
            for vali_step, validation_batch in enumerate(validation_loader):
                vali_img_a, vali_img_b, vali_patch_a, vali_patch_b, vali_corners, vali_gt = validation_batch
                vali_img_a = vali_img_a.to(device)
                vali_img_b = vali_img_b.to(device)
                vali_patch_a = vali_patch_a.to(device)
                vali_patch_b = vali_patch_b.to(device)
                vali_corners = vali_corners.to(device)
                vali_delta = hmodel.model(vali_patch_a,
                                          vali_patch_b).to(device)
                vali_loss = photometric_loss(vali_delta, vali_img_a,
                                             vali_patch_b, vali_corners)
                vali_total_loss.append({"vali_loss": vali_loss})

            avg_loss = torch.stack([x["vali_loss"]
                                    for x in vali_total_loss]).mean()
            print("test || loss:", avg_loss.data)
            if best == 0:
                best = avg_loss
                is_best = 1
            elif avg_loss <= best:
                is_best = 1
                best = avg_loss
            else:
                is_best = 0
            save_checkpoint(
                {
                    'state_dict': hmodel.state_dict(),
                    'loss': avg_loss,
                    'optimizer': optimizer.state_dict(),
                }, is_best)
            print("present best = ", best)
            torch.cuda.empty_cache()
示例#12
0
    iou = tp / (num + fp)
    miou = iou.mean()

    return miou


if __name__ == '__main__':
    cv2.setNumThreads(0)

    opt = parse_args()
    print(opt)

    Path(opt.out_path).mkdir(parents=True, exist_ok=True)

    train_set = SyntheticDataset(opt.syn_path, opt.params_path, opt.blend, opt.channels)
    train_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True, pin_memory=True)

    val_set = RealDataset(opt.real_path, opt.channels, split='val')
    val_loader = DataLoader(dataset=val_set, num_workers=0, batch_size=1, shuffle=False)

    test_set = RealDataset(opt.real_path, opt.channels, split='test')
    test_loader = DataLoader(dataset=test_set, num_workers=0, batch_size=1, shuffle=False)
    
    opt.n_classes = train_set.n_classes
    net = PowderNet(opt.arch, opt.n_channels, train_set.n_classes)
    net = net.cuda()
    optimizer = AdamW(net.parameters(), lr=opt.lr, weight_decay=opt.decay)
    scheduler = CosineLRWithRestarts(optimizer, opt.batch_size, len(train_set), opt.period, opt.t_mult)
    vis = Visualizer(server=opt.server, env=opt.env)
    start_epoch = 0
示例#13
0
def train(args):
    # Load data
    TrainDataset = SyntheticDataset(data_path=args.data_path,
                                    mode=args.mode,
                                    img_h=args.img_h,
                                    img_w=args.img_w,
                                    patch_size=args.patch_size,
                                    do_augment=args.do_augment)
    train_loader = DataLoader(TrainDataset, batch_size=args.batch_size, shuffle=True, num_workers=4)
    print('===> Train: There are totally {} training files'.format(len(TrainDataset)))

    net = HomographyModel(args.use_batch_norm)
    if args.resume:
        model_path = os.path.join(args.model_dir, args.model_name)
        ckpt = torch.load(model_path)
        net.load_state_dict(ckpt.state_dict())
    if torch.cuda.is_available():
        net = net.cuda()

    optimizer = optim.Adam(net.parameters(), lr=args.lr)  # default as 0.0001
    decay_rate = 0.96
    step_size = (math.log(decay_rate) * args.max_epochs) / math.log(args.min_lr * 1.0 / args.lr)
    print('args lr:', args.lr, args.min_lr)
    print('===> Decay steps:', step_size)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=int(step_size), gamma=0.96)

    print("start training")
    writer = SummaryWriter(logdir=args.log_dir, flush_secs=60)
    score_print_fre = 100
    summary_fre = 1000
    model_save_fre = 4000
    glob_iter = 0
    t0 = time.time()

    for epoch in range(args.max_epochs):
        net.train()
        epoch_start = time.time()
        train_l1_loss = 0.0
        train_l1_smooth_loss = 0.0
        train_h_loss = 0.0

        for i, batch_value in enumerate(train_loader):
            I1_batch = batch_value[0].float()
            I2_batch = batch_value[1].float()
            I1_aug_batch = batch_value[2].float()
            I2_aug_batch = batch_value[3].float()
            I_batch = batch_value[4].float()
            I_prime_batch = batch_value[5].float()
            pts1_batch = batch_value[6].float()
            gt_batch = batch_value[7].float()
            patch_indices_batch = batch_value[8].float()

            if torch.cuda.is_available():
                I1_aug_batch = I1_aug_batch.cuda()
                I2_aug_batch = I2_aug_batch.cuda()
                I_batch = I_batch.cuda()
                pts1_batch = pts1_batch.cuda()
                gt_batch = gt_batch.cuda()
                patch_indices_batch = patch_indices_batch.cuda()

            # forward, backward, update weights
            optimizer.zero_grad()
            batch_out = net(I1_aug_batch, I2_aug_batch, I_batch, pts1_batch, gt_batch, patch_indices_batch)
            h_loss = batch_out['h_loss']
            rec_loss = batch_out['rec_loss']
            ssim_loss = batch_out['ssim_loss']
            l1_loss = batch_out['l1_loss']
            l1_smooth_loss = batch_out['l1_smooth_loss']
            ncc_loss = batch_out['ncc_loss']
            pred_I2 = batch_out['pred_I2']

            loss = l1_loss
            loss.backward()
            optimizer.step()

            train_l1_loss += loss.item()
            train_l1_smooth_loss += l1_smooth_loss.item()
            train_h_loss += h_loss.item()
            if (i + 1) % score_print_fre == 0 or (i + 1) == len(train_loader):
                print(
                    "Training: Epoch[{:0>3}/{:0>3}] Iter[{:0>3}]/[{:0>3}] l1 loss: {:.4f} "
                    "l1 smooth loss: {:.4f} h loss: {:.4f} lr={:.8f}".format(
                        epoch + 1, args.max_epochs, i + 1, len(train_loader), train_l1_loss / score_print_fre,
                        train_l1_smooth_loss / score_print_fre, train_h_loss / score_print_fre, scheduler.get_lr()[0]))
                train_l1_loss = 0.0
                train_l1_smooth_loss = 0.0
                train_h_loss = 0.0

            if glob_iter % summary_fre == 0:
                writer.add_scalar('learning_rate', scheduler.get_lr()[0], glob_iter)
                writer.add_scalar('h_loss', h_loss, glob_iter)
                writer.add_scalar('rec_loss', rec_loss, glob_iter)
                writer.add_scalar('ssim_loss', ssim_loss, glob_iter)
                writer.add_scalar('l1_loss', l1_loss, glob_iter)
                writer.add_scalar('l1_smooth_loss', l1_smooth_loss, glob_iter)
                writer.add_scalar('ncc_loss', ncc_loss, glob_iter)

                writer.add_image('I', utils.denorm_img(I_batch[0, ...].cpu().numpy()).astype(np.uint8)[:, :, ::-1],
                                 glob_iter, dataformats='HWC')
                writer.add_image('I_prime',
                                 utils.denorm_img(I_prime_batch[0, ...].numpy()).astype(np.uint8)[:, :, ::-1],
                                 glob_iter, dataformats='HWC')

                writer.add_image('I1_aug', utils.denorm_img(I1_aug_batch[0, 0, ...].cpu().numpy()).astype(np.uint8),
                                 glob_iter, dataformats='HW')
                writer.add_image('I2_aug', utils.denorm_img(I2_aug_batch[0, 0, ...].cpu().numpy()).astype(np.uint8),
                                 glob_iter, dataformats='HW')
                writer.add_image('pred_I2',
                                 utils.denorm_img(pred_I2[0, 0, ...].cpu().detach().numpy()).astype(np.uint8),
                                 glob_iter, dataformats='HW')

                writer.add_image('I2', utils.denorm_img(I2_batch[0, 0, ...].numpy()).astype(np.uint8), glob_iter,
                                 dataformats='HW')
                writer.add_image('I1', utils.denorm_img(I1_batch[0, 0, ...].numpy()).astype(np.uint8), glob_iter,
                                 dataformats='HW')

            # save model
            if glob_iter % model_save_fre == 0 and glob_iter != 0:
                filename = 'model' + '_iter_' + str(glob_iter) + '.pth'
                model_save_path = os.path.join(args.model_dir, filename)
                torch.save(net, model_save_path)

            glob_iter += 1
        scheduler.step()
        print("Epoch: {} epoch time: {:.1f}s".format(epoch, time.time() - epoch_start))

    elapsed_time = time.time() - t0
    print("Finished Training in {:.0f}h {:.0f}m {:.0f}s.".format(
        elapsed_time // 3600, (elapsed_time % 3600) // 60, (elapsed_time % 3600) % 60))
示例#14
0
def test(args):
    # Load data
    TestDataset = SyntheticDataset(data_path=args.data_path,
                                   mode=args.mode,
                                   img_h=args.img_h,
                                   img_w=args.img_w,
                                   patch_size=args.patch_size,
                                   do_augment=args.do_augment)
    test_loader = DataLoader(TestDataset, batch_size=1)
    print('===> Test: There are totally {} testing files'.format(len(TestDataset)))

    # Load model
    net = HomographyModel()
    model_path = os.path.join(args.model_dir, args.model_name)
    state = torch.load(model_path)
    net.load_state_dict(state.state_dict())
    if torch.cuda.is_available():
        net = net.cuda()

    print("start testing")

    with torch.no_grad():
        net.eval()
        test_l1_loss = 0.0
        test_h_loss = 0.0
        h_losses_array = []
        for i, batch_value in enumerate(test_loader):
            I1_aug_batch = batch_value[2].float()
            I2_aug_batch = batch_value[3].float()
            I_batch = batch_value[4].float()
            I_prime_batch = batch_value[5].float()
            pts1_batch = batch_value[6].float()
            gt_batch = batch_value[7].float()
            patch_indices_batch = batch_value[8].float()

            if torch.cuda.is_available():
                I1_aug_batch = I1_aug_batch.cuda()
                I2_aug_batch = I2_aug_batch.cuda()
                I_batch = I_batch.cuda()
                pts1_batch = pts1_batch.cuda()
                gt_batch = gt_batch.cuda()
                patch_indices_batch = patch_indices_batch.cuda()

            batch_out = net(I1_aug_batch, I2_aug_batch, I_batch, pts1_batch, gt_batch, patch_indices_batch)
            h_loss = batch_out['h_loss']
            rec_loss = batch_out['rec_loss']
            ssim_loss = batch_out['ssim_loss']
            l1_loss = batch_out['l1_loss']
            pred_h4p_value = batch_out['pred_h4p']

            test_h_loss += h_loss.item()
            test_l1_loss += l1_loss.item()
            h_losses_array.append(h_loss.item())

            if args.save_visual:
                I_sample = utils.denorm_img(I_batch[0].cpu().numpy()).astype(np.uint8)
                I_prime_sample = utils.denorm_img(I_prime_batch[0].numpy()).astype(np.uint8)
                pts1_sample = pts1_batch[0].cpu().numpy().reshape([4, 2]).astype(np.float32)
                gt_h4p_sample = gt_batch[0].cpu().numpy().reshape([4, 2]).astype(np.float32)

                pts2_sample = pts1_sample + gt_h4p_sample

                pred_h4p_sample = pred_h4p_value[0].cpu().numpy().reshape([4, 2]).astype(np.float32)
                pred_pts2_sample = pts1_sample + pred_h4p_sample

                # Save
                visual_file_name = ('%s' % i).zfill(4) + '.jpg'
                utils.save_correspondences_img(I_prime_sample, I_sample, pts1_sample, pts2_sample, pred_pts2_sample,
                                               args.results_dir, visual_file_name)

            print("Testing: h_loss: {:4.3f}, rec_loss: {:4.3f}, ssim_loss: {:4.3f}, l1_loss: {:4.3f}".format(
                h_loss.item(), rec_loss.item(), ssim_loss.item(), l1_loss.item()
            ))

    print('|Test size  |   h_loss   |    l1_loss   |')
    print(len(test_loader), test_h_loss / len(test_loader), test_l1_loss / len(test_loader))

    tops_list = utils.find_percentile(h_losses_array)
    print('===> Percentile Values: (20, 50, 80, 100):')
    print(tops_list)
    print('======> End! ====================================')