Beispiel #1
0
def main(args, config):
    cuda = cnn_utils.check_cuda(config)
    model = cnn_utils.load_model_and_weights(args, config)
    if cuda:
        model = model.cuda()

    file_path = os.path.join(config['PATH']['hdf5_dir'],
                             config['PATH']['hdf5_name'])
    with h5py.File(file_path, mode='r', libver='latest') as hdf5_file:
        overall_psnr_accum = (0, 0, 0)
        overall_ssim_accum = (0, 0, 0)

        for sample_num in range(args.nSamples):
            p1, s1 = do_one_demo(args, config, hdf5_file, model, sample_num,
                                 cuda)
            overall_psnr_accum = welford.update(overall_psnr_accum, p1)
            overall_ssim_accum = welford.update(overall_ssim_accum, s1)

        if args.nSamples > 1:
            psnr_mean, psnr_var, _ = welford.finalize(overall_psnr_accum)
            ssim_mean, ssim_var, _ = welford.finalize(overall_ssim_accum)
            print("\nOverall cnn psnr average {:5f}, stddev {:5f}".format(
                psnr_mean, math.sqrt(psnr_var)))
            print("Overall cnn ssim average {:5f}, stddev {:5f}".format(
                ssim_mean, math.sqrt(ssim_var)))
        #Ground truth possible
        """
def main(args, config, sample_index):
    cuda = cnn_utils.check_cuda(config)
    model = cnn_utils.load_model_and_weights(args, config)
    if cuda:
        model = model.cuda()

    model.eval()

    # Create output directory
    base_dir = os.path.join(config['PATH']['output_dir'], 'warped')
    if not os.path.isdir(base_dir):
        pathlib.Path(base_dir).mkdir(parents=True, exist_ok=True)
    save_dir = get_sub_dir_for_saving(base_dir)

    start_time = time.time()
    file_path = os.path.join(config['PATH']['hdf5_dir'],
                             config['PATH']['hdf5_name'])
    with h5py.File(file_path, mode='r', libver='latest') as hdf5_file:
        depth_grp = hdf5_file['val']['disparity']
        SNUM = sample_index
        depth_images = torch.squeeze(
            torch.tensor(depth_grp['images'][SNUM], dtype=torch.float32))

        colour_grp = hdf5_file['val']['colour']
        colour_images = torch.tensor(colour_grp['images'][SNUM],
                                     dtype=torch.float32)

        sample = {
            'depth': depth_images,
            'colour': colour_images,
            'grid_size': depth_images.shape[0]
        }

        warped = data_transform.center_normalise(sample)
        im_input = warped['inputs'].unsqueeze_(0)

        if cuda:
            im_input = im_input.cuda()

        output = model(im_input)

        time_taken = time.time() - start_time
        print("Time taken was {:4f}s".format(time_taken))
        grid_size = 64

        psnr_accumulator = (0, 0, 0)
        ssim_accumulator = (0, 0, 0)

        print("Saving output to", save_dir)

        output = torch.squeeze(denormalise_lf(output))
        cpu_output = np.around(output.cpu().detach().numpy()).astype(np.uint8)

        if (not args.no_eval) or args.get_diff:
            ground_truth = np.around(
                denormalise_lf(colour_images).numpy()).astype(np.uint8)

        grid_len = int(math.sqrt(grid_size))
        for i in range(grid_size):
            row, col = i // grid_len, i % grid_len

            file_name = 'Colour{}{}.png'.format(row, col)
            save_location = os.path.join(save_dir, file_name)
            if i == 0:
                print("Saving images of size ", cpu_output[i].shape)
            image_warping.save_array_as_image(cpu_output[i], save_location)

            if args.get_diff:
                colour = ground_truth[i]
                diff = image_warping.get_diff_image(colour, cpu_output[i])
                #diff = get_diff_image_floatint(res, colour)
                file_name = 'Diff{}{}.png'.format(row, col)
                save_location = os.path.join(save_dir, file_name)
                image_warping.save_array_as_image(diff, save_location)

            if not args.no_eval:
                img = ground_truth[i]
                file_name = 'GT_Colour{}{}.png'.format(row, col)
                save_location = os.path.join(save_dir, file_name)
                image_warping.save_array_as_image(img, save_location)
                psnr = evaluate.my_psnr(cpu_output[i], img)
                ssim = evaluate.ssim(cpu_output[i], img)
                psnr_accumulator = welford.update(psnr_accumulator, psnr)
                ssim_accumulator = welford.update(ssim_accumulator, ssim)

        psnr_mean, psnr_var, _ = welford.finalize(psnr_accumulator)
        ssim_mean, ssim_var, _ = welford.finalize(ssim_accumulator)
        print("For cnn, psnr average {:5f}, stddev {:5f}".format(
            psnr_mean, math.sqrt(psnr_var)))
        print("For cnn, ssim average {:5f}, stddev {:5f}".format(
            ssim_mean, math.sqrt(ssim_var)))
Beispiel #3
0
def main(args, config, writer):
    best_loss = math.inf
    best_model, best_epoch = None, None
    cuda = cnn_utils.check_cuda(config)

    # Attempts to otimise - see
    # https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do
    torch.backends.cudnn.benchmark = True

    data_loaders = create_dataloaders(args, config)

    model, criterion, optimizer, lr_scheduler = setup_model(args)
    if cuda:  # GPU support
        model = model.cuda()
        # The below is only needed if loss fn has params
        #criterion = criterion.cuda()

    if args.checkpoint:  # Resume from a checkpoint
        best_loss = cnn_utils.load_from_checkpoint(model, optimizer, args,
                                                   config)

    if args.pretrained:  # Direct copy weights from another model
        cnn_utils.load_weights(model, args, config, frozen=args.frozen)

    # Perform training and testing
    print("Beginning training loop")
    for epoch in range(args.start_epoch, args.start_epoch + args.nEpochs):
        epoch_loss = 0
        epoch_loss_all = train(model=model,
                               dset_loaders=data_loaders,
                               optimizer=optimizer,
                               lr_scheduler=lr_scheduler,
                               criterion=criterion,
                               epoch=epoch,
                               cuda=cuda,
                               clip=args.clip,
                               writer=writer)

        for val in epoch_loss_all:
            epoch_loss += val / len(epoch_loss_all)

        if epoch_loss < best_loss:
            best_loss = epoch_loss
            best_epoch = epoch
            best_model = copy.deepcopy(model)

        # Update the scheduler - restarting
        if lr_scheduler.last_epoch == lr_scheduler.T_max:
            for group in optimizer.param_groups:
                group['lr'] = args.lr
            lr_scheduler = CosineAnnealingLR(optimizer,
                                             T_max=lr_scheduler.T_max * 2)

        # cnn_utils.log_all_layer_weights(model, writer, epoch)

        if epoch % 1 == 0 and epoch != 0:
            cnn_utils.save_checkpoint(model, epoch, optimizer, best_loss,
                                      config['PATH']['checkpoint_dir'],
                                      args.tag + "{}.pth".format(epoch))

        if args.prompt:
            if not helpers.prompt_user(CONTINUE_MESSAGE):
                print("Ending training")
                break

    print("Best loss was {:.5f} at epoch {}".format(best_loss, best_epoch))

    save = True
    if args.prompt:
        if not helpers.prompt_user(SAVE_MESSAGE):
            print("Not saving the model")
            save = False

    # Save the best model
    if save:
        cnn_utils.save_checkpoint(
            best_model, best_epoch, optimizer, best_loss,
            config['PATH']['model_dir'],
            args.tag + "_best_at{}.pth".format(best_epoch))

    parent_dir = os.path.abspath(os.pardir)
    scalar_dir = os.path.join(parent_dir, "logs", args.tag)
    # if not os.path.isdir(scalar_dir):
    #     pathlib.Path(scalar_dir).mkdir(parents=True, exist_ok=True)
    # writer.export_scalars_to_json(
    #     os.path.join(scalar_dir, "all_scalars.json"))
    writer.close()