def conditional_save_pred(self, mode, file_name, pred, epoch):
        if ("test" in mode or mode == "eval") and self.args.save_pred:

            # save images for visualization/ testing
            image_folder = os.path.join(self.output_directory,
                                        mode + "_output")
            if not os.path.exists(image_folder):
                os.makedirs(image_folder)
            img = torch.squeeze(pred.data.cpu()).numpy()
            file_path = os.path.join(image_folder, file_name)
            vis_utils.save_depth_as_uint16png(img, file_path)
示例#2
0
    def conditional_save_pred(self, mode, i, pred, epoch):
        #if ("test" in mode or mode == "eval") and self.args.save_pred:
        if ("test" in mode
                or mode == "val"):  #and self.args.save_pred: # 我加的,2020/02/26

            # save images for visualization/ testing
            image_folder = os.path.join(self.output_directory, mode)
            if not os.path.exists(image_folder):
                os.makedirs(image_folder)
            img = torch.squeeze(pred.data.cpu()).numpy()
            filename = os.path.join(image_folder, '{0:010d}.png'.format(i))
            vis_utils.save_depth_as_uint16png(img, filename)
    def conditional_save_pred(self, mode, i, pred, epoch):
        if ("test" in mode or mode == "eval") and self.args.save_pred:

            self.writer.add_image(f"depth_{mode}",
                                  pred[0] / torch.max(pred[0]),
                                  self.current_step)

            # save images for visualization/ testing
            image_folder = os.path.join(self.output_directory,
                                        mode + "_output")
            if not os.path.exists(image_folder):
                os.makedirs(image_folder)
            img = torch.squeeze(pred.data.cpu()).numpy()
            filename = os.path.join(image_folder, '{0:010d}.png'.format(i))
            vis_utils.save_depth_as_uint16png(img, filename)
示例#4
0
    def conditional_save_pred_named_with_intensity(self, mode, name, pred,
                                                   pred_intensity, epoch):
        # if ("test" in mode or mode == "eval") and self.args.save_pred:
        if ("test" in mode or mode == "val") or mode == 'eval':

            # save images for visualization/ testing
            image_folder = os.path.join(self.output_directory,
                                        mode + "_output_depth")
            # print(name, image_folder, pred.shape)
            if not os.path.exists(image_folder):
                os.makedirs(image_folder)
            img = torch.squeeze(pred.data.cpu()).numpy()
            # filename = os.path.join(image_folder, '{0:010d}.png'.format(i))
            filename = os.path.join(image_folder, name)
            vis_utils.save_depth_as_uint16png(img, filename)

            image_folder = os.path.join(self.output_directory,
                                        mode + "_output_intensity")
            if not os.path.exists(image_folder):
                os.makedirs(image_folder)
            img = torch.squeeze(pred_intensity.data.cpu()).numpy()
            # filename = os.path.join(image_folder, '{0:010d}.png'.format(i))
            filename = os.path.join(image_folder, name)
            vis_utils.save_depth_as_uint16png(img, filename)
示例#5
0
def iterate(mode, args, loader, model, optimizer, logger, epoch):
    block_average_meter = AverageMeter()
    average_meter = AverageMeter()
    meters = [block_average_meter, average_meter]
    # switch to appropriate mode
    assert mode in ["train", "val", "eval", "test_prediction", "test_completion"], \
        "unsupported mode: {}".format(mode)
    if mode == 'train':
        model.train()
        lr = helper.adjust_learning_rate(args.lr, optimizer, epoch)
    else:
        model.eval()
        lr = 0
    torch.set_printoptions(profile="full")
    table_is = np.zeros(400)

    # ITERATE OVER IMAGES

    for i, batch_data in enumerate(loader):

        sparse_depth_pathname = batch_data['d_path'][0]
        print(sparse_depth_pathname)
        del batch_data['d_path']
        print("i: ", i)
        print(
            f"depth (sparse) points: {len(torch.where(batch_data['d']>0)[0])}")
        print(
            f"gt depth (dense) points: {len(torch.where(batch_data['gt'] > 0)[0])}"
        )
        start = time.time()
        batch_data = {
            key: val.to(device)
            for key, val in batch_data.items() if val is not None
        }
        gt = batch_data[
            'gt'] if mode != 'test_prediction' and mode != 'test_completion' else None

        # adjust depth for features
        depth_adjust = args.depth_adjust
        adjust_features = False  # normalize the number of points in a feature
        print(depth_adjust, args.use_d)
        if depth_adjust and args.use_d:
            if args.type_feature == "sq":
                if args.use_rgb:

                    depth_new, alg_mode, feat_mode, features, shape = depth_adjustment(
                        batch_data['d'], args.test_mode, args.feature_mode,
                        args.feature_num, adjust_features, i, model_orig,
                        args.seed, batch_data['rgb'])
                else:
                    depth_new, alg_mode, feat_mode, features, shape = depth_adjustment(
                        batch_data['d'], args.test_mode, args.feature_mode,
                        args.feature_num, adjust_features, i, model_orig,
                        args.seed)
            elif args.type_feature == "lines":
                print("lines")
                depth_new, alg_mode, feat_mode, features = depth_adjustment_lines(
                    batch_data['d'], args.test_mode, args.feature_mode,
                    args.feature_num, i, sparse_depth_pathname, model_orig,
                    args.seed)

            print("batch depth_new: ", len(np.where(depth_new > 0)[0]))
            batch_data['d'] = torch.Tensor(depth_new).unsqueeze(0).unsqueeze(
                1).to(device)

        print("batch depth: ", len(torch.where(batch_data['d'] > 0)[0]))
        data_time = time.time() - start
        start = time.time()
        if mode == "train":
            pred = model(batch_data)
        else:
            with torch.no_grad():
                pred = model(batch_data)

        # im = batch_data['d'].detach().cpu().numpy()
        # im_sq = im.squeeze()
        # plt.figure()
        # plt.imshow(im_sq)
        # plt.show()
        # for i in range(im_sq.shape[0]):
        #     print(f"{i} - {np.sum(im_sq[i])}")

#        pred = pred +9.5
#        gt = gt+9.5
# compute loss
        depth_loss, photometric_loss, smooth_loss, mask = 0, 0, 0, None
        if mode == 'train':
            # Loss 1: the direct depth supervision from ground truth label
            # mask=1 indicates that a pixel does not ground truth labels
            if 'sparse' in args.train_mode:
                depth_loss = depth_criterion(pred, batch_data['d'])
                mask = (batch_data['d'] < 1e-3).float()
            elif 'dense' in args.train_mode:
                depth_loss = depth_criterion(pred, gt)
                mask = (gt < 1e-3).float()
            # Loss 2: the self-supervised photometric loss
            if args.use_pose:
                # create multi-scale pyramids
                pred_array = helper.multiscale(pred)
                rgb_curr_array = helper.multiscale(batch_data['rgb'])
                rgb_near_array = helper.multiscale(batch_data['rgb_near'])
                if mask is not None:
                    mask_array = helper.multiscale(mask)
                num_scales = len(pred_array)
                # compute photometric loss at multiple scales
                for scale in range(len(pred_array)):
                    pred_ = pred_array[scale]
                    rgb_curr_ = rgb_curr_array[scale]
                    rgb_near_ = rgb_near_array[scale]
                    mask_ = None
                    if mask is not None:
                        mask_ = mask_array[scale]
                    # compute the corresponding intrinsic parameters
                    height_, width_ = pred_.size(2), pred_.size(3)
                    intrinsics_ = kitti_intrinsics.scale(height_, width_)
                    # inverse warp from a nearby frame to the current frame
                    warped_ = homography_from(rgb_near_, pred_,
                                              batch_data['r_mat'],
                                              batch_data['t_vec'], intrinsics_)
                    photometric_loss += photometric_criterion(
                        rgb_curr_, warped_, mask_) * (2**(scale - num_scales))
            # Loss 3: the depth smoothness loss
            smooth_loss = smoothness_criterion(pred) if args.w2 > 0 else 0

            # backprop
            loss = depth_loss + args.w1 * photometric_loss + args.w2 * smooth_loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        gpu_time = time.time() - start

        # measure accuracy and record loss
        with torch.no_grad():
            mini_batch_size = next(iter(batch_data.values())).size(0)
            result = Result()
            if mode != 'test_prediction' and mode != 'test_completion':
                result.evaluate(pred.data, gt.data, photometric_loss)
            [
                m.update(result, gpu_time, data_time, mini_batch_size)
                for m in meters
            ]

            print(f"rmse: {result.rmse:,}")
            if result.rmse < 6000:
                print("good rmse")
            elif result.rmse > 12000:
                print("bad rmse")

            logger.conditional_print(mode, i, epoch, lr, len(loader),
                                     block_average_meter, average_meter)
            logger.conditional_save_img_comparison(mode, i, batch_data, pred,
                                                   epoch)
            logger.conditional_save_pred(mode, i, pred, epoch)

        # save log and checkpoint
        every = 999 if mode == "val" else 200  #200

        if i % every == 0 and i != 0:

            print(
                f"test settings (main_orig eval): {args.type_feature} {args.test_mode} {args.feature_mode} {args.feature_num}"
            )
            avg = logger.conditional_save_info(mode, average_meter, epoch)
            is_best = logger.rank_conditional_save_best(mode, avg, epoch)
            if is_best and not (mode == "train"):
                logger.save_img_comparison_as_best(mode, epoch)
            logger.conditional_summarize(mode, avg, is_best)

            if mode != "val":
                #if 1:
                helper.save_checkpoint({  # save checkpoint
                    'epoch': epoch,
                    'model': model.module.state_dict(),
                    'best_result': logger.best_result,
                    'optimizer': optimizer.state_dict(),
                    'args': args,
                }, is_best, epoch, logger.output_directory, args.type_feature, args.test_mode, args.feature_num, args.feature_mode, args.depth_adjust, i, every, "scratch")

        # draw features
        if args.draw_features_rgb and args.evaluate and depth_adjust:
            run_info = [args.type_feature, alg_mode, feat_mode, model_orig]
            if batch_data['rgb'] != None and 1 and (i % 1) == 0:
                draw(args.type_feature, batch_data['rgb'], batch_data['d'],
                     features, 65, run_info, i, result)
        if args.depth_save and args.evaluate:
            name = os.path.split(sparse_depth_pathname)[-1]
            parameters_name = args.evaluate.split(os.sep)
            if depth_adjust:
                path_depth = f"depth_predicted/{args.type_feature}/{args.feature_mode}/{args.test_mode}/{parameters_name[-2]}/{parameters_name[-1]}/"
            else:
                path_depth = f"depth_predicted/full/{parameters_name[-2]}/{parameters_name[-1]}/"
            os.makedirs(path_depth, exist_ok=True)
            #torch.save(pred, path_depth + f"{name}.pt")
            depth_predicted = pred.squeeze().detach().cpu().numpy()
            depth_pred_color = vis_utils.depth_colorize(depth_predicted)
            vis_utils.save_depth_as_uint16png(
                depth_pred_color, path_depth + f"{name}_im_color.png")

        print('\n' + '*' * 15 + '\n\n')
    return avg, is_best