Ejemplo n.º 1
0
def linear_logistic_regression(res, x_train, y_train, x_test, y_test):
    # Create model
    model = LogisticRegression()
    # Fit model
    model.fit(x_train, y_train)
    # Predict the response for test dataset (proba output)
    y_pred = model.predict(x_test)
    # Categorize the output
    y_pred = np.where(y_pred > 0.5, 1, 0)

    accuracy = accuracy_score(y_test, y_pred)
    add_row(res, ["logisticRegression", accuracy])
    return res
Ejemplo n.º 2
0
    def save(self, input, prediction, target, to_disk=False):
        rgb = input[0, :3, :, :]
        input_depth = input[0, 3:4, :, :]
        input_conf = input[0, 4:5, :, :]
        in_gt_depth = target[0, :1, :, :]
        out_depth1 = prediction[0][0, :1, :, :]
        if prediction[1] is not None:
            out_conf1 = prediction[1][0, :1, :, :]
        else:
            out_conf1 = None

        if prediction[2] is not None:
            out_depth2 = prediction[2][0, :1, :, :]
        else:
            out_depth2 = None

        row = utils.merge_into_row_with_gt2(rgb, input_depth, input_conf,
                                            in_gt_depth, out_depth1, out_conf1,
                                            out_depth2)
        if (self.image is not None):
            self.image = utils.add_row(self.image, row)
        else:
            self.image = row

        if to_disk:
            utils.save_image(self.image, self.filename)
Ejemplo n.º 3
0
def decision_tree_accuracy(res, x_train, y_train, x_test, y_test):
    # Create Decision Tree classifer object
    tree = DecisionTreeClassifier()
    # Train Decision Tree Classifer
    tree = tree.fit(x_train, y_train)
    # Predict the response for test dataset
    y_pred = tree.predict(x_test)

    accuracy = accuracy_score(y_test, y_pred)
    add_row(res, ["decisionTree", accuracy])

    col_names = [
        'percentile1', 'percentile2', 'percentile3', 'percentile4',
        'percentile5', 'persistence1', 'persistence2', 'persistence3',
        'persistence4', 'persistence5', 'tree1', 'tree2', 'tree3'
    ]
    print(dict(zip(col_names, tree.feature_importances_)))

    return res
Ejemplo n.º 4
0
def random_forest_accuracy(res,
                           x_train,
                           y_train,
                           x_test,
                           y_test,
                           n_estimators=28):
    # Create Decision Tree classifer object
    model_rf = RandomForestClassifier(n_estimators=n_estimators,
                                      random_state=0)
    # Fit model
    model_rf.fit(x_train, y_train)
    # Predict the response for test dataset
    y_pred = model_rf.predict(x_test)

    accuracy = accuracy_score(y_test, y_pred)
    add_row(res, ["randomForest", accuracy])

    print("Tree depths: ",
          [estimator.tree_.max_depth for estimator in model_rf.estimators_])

    return res
    def draw_images(idxs: Iterable[int]) -> np.ndarray:
        im_merge = None
        for i in idxs:
            plt.figure()
            x_rgbd, y = val_dataset_rgbd[i]
            x_rgb, y = val_dataset_rgb[i]
            x_rgb = torch.unsqueeze(x_rgb, 0)
            x_rgbd = torch.unsqueeze(x_rgbd, 0)
            y = torch.unsqueeze(y, 0)
            x_rgbd, x_rgb, y = x_rgbd.cuda(), x_rgb.cuda(), y.cuda()
            y_hat_depth = depth_model(x_rgbd  # type: ignore
                                      if depth_model_is_rgbd else x_rgb)

            y_hat_rgb = rgb_model(x_rgbd  # type: ignore
                                  if rgb_model_is_rgbd else x_rgb)
            images = [x_rgbd, y, y_hat_depth, y_hat_rgb]
            squares = None if "single-pixel" in args.depth_type else [
                val_dataset_rgbd.square
            ] * len(images)
            row = merge_ims_into_row(images, square=squares)
            im_merge = row if im_merge is None else add_row(im_merge, row)
        return im_merge
Ejemplo n.º 6
0
def validate(val_loader, model, epoch, logger):
    average_meter = AverageMeter()

    model.eval()  # switch to evaluate mode

    end = time.time()

    skip = len(val_loader) // 9  # save images every skip iters

    for i, (input, target) in enumerate(val_loader):

        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)

        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        if args.dataset == 'kitti':
            rgb = input[0]
            pred = pred[0]
            target = target[0]
        else:
            rgb = input

        if i == 0:
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'RML={result.absrel:.2f}({average.absrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    logger.add_scalar('Test/rmse', avg.rmse, epoch)
    logger.add_scalar('Test/Rel', avg.absrel, epoch)
    logger.add_scalar('Test/log10', avg.lg10, epoch)
    logger.add_scalar('Test/Delta1', avg.delta1, epoch)
    logger.add_scalar('Test/Delta2', avg.delta2, epoch)
    logger.add_scalar('Test/Delta3', avg.delta3, epoch)
    return avg, img_merge
Ejemplo n.º 7
0
def validate(val_loader, model, epoch, batch_epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (Y, Y_1_2, Y_1_4, Y_1_8, LR, LR_8, HR,
            name) in enumerate(val_loader):
        Y = Y.cuda()
        Y_1_2 = Y_1_2.cuda()
        Y_1_4 = Y_1_4.cuda()
        Y_1_8 = Y_1_8.cuda()
        LR_8 = LR_8.cuda()
        LR = LR.cuda()
        HR = HR.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end
        # compute output
        end = time.time()
        with torch.no_grad():
            #  print("I am for validation in main 342")
            if args.arch == 'VDSR_16':
                pred_HR = model(LR)
            elif args.arch == 'VDSR_16_2':
                pred_HR = model(Y, LR)
            elif args.arch == 'VDSR':
                pred_HR, residule = model(LR_8, Y)
            elif args.arch == 'ResNet_bicubic':
                pred_HR, residule = model(LR_8, Y)
            elif args.arch == 'resnet50_15_6' or 'resnet50_15_11' or 'resnet50_15_12':
                pred_HR = model(Y_1_2, LR)
            elif args.arch == 'resnet50_15_2' or 'resnet50_15_3' or 'resnet50_15_5' or 'resnet50_15_8' or 'resnet50_15_9':
                pred_HR, residule = model(Y, LR, LR_8)

            else:
                if config.use_different_size_Y == 1:
                    pred_HR, pred_thermal0 = model(Y, Y_1_2, Y_1_4, Y_1_8, LR)
                else:
                    pred_HR, pred_thermal = model(Y, LR)
        torch.cuda.synchronize()
        gpu_time = time.time() - end
        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred_HR, HR)
        average_meter.update(result, gpu_time, data_time,
                             Y.size(0))  #Y.size(0) batch_size
        end = time.time()

        # save 8 images for visualization,对验证集合生产图片
        skip = config.skip
        if i == 0:
            img_merge = utils.merge_into_row_with_YT(Y, LR, pred_HR, HR)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row_with_YT(Y, LR, pred_HR, HR)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:  #储存最终的图片
            filename = output_directory + '/Compair_data_epoch_' + str(
                epoch) + '_batch_eopch_' + str(batch_epoch + 1) + '.png'
            utils.save_image(img_merge, filename)
            print("生成第" + str(batch_epoch + 1) + "图片")

    if (i + 1) % args.print_freq == 0:
        print('Test: [{0}/{1}]\t'
              't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
              'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
              'MAE={result.mae:.2f}({average.mae:.2f}) '
              'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
              'REL={result.absrel:.3f}({average.absrel:.3f}) '
              'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                  i + 1,
                  len(val_loader),
                  gpu_time=gpu_time,
                  result=result,
                  average=average_meter.average()))
    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'dataset epoch': epoch,
                'batch epoch': batch_num + 1,
                'psnr': 10 * math.log(1 / (avg.mse), 10),
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
Ejemplo n.º 8
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        ##############################################################
        ##             Start of PnP-Depth modification              ##
        ##############################################################
        # Original inference
        with torch.no_grad():
            ori_pred = model.pnp_forward_front(model.pnp_forward_rear(
                input))  # equivalent to `ori_pred = model(input)`

        # Inference with PnP
        sparse_target = input[:, -1:]  # NOTE: written for rgbd input
        criterion = criteria.MaskedL1Loss().cuda(
        )  # NOTE: criterion function defined here only for clarity
        pnp_iters = 5  # number of iterations
        pnp_alpha = 0.01  # update/learning rate
        pnp_z = model.pnp_forward_front(input)
        for pnp_i in range(pnp_iters):
            if pnp_i != 0:
                pnp_z = pnp_z - pnp_alpha * torch.sign(pnp_z_grad)  # iFGM
            pnp_z = Variable(pnp_z, requires_grad=True)
            pred = model.pnp_forward_rear(pnp_z)
            if pnp_i < pnp_iters - 1:
                pnp_loss = criterion(pred, sparse_target)
                pnp_z_grad = Grad([pnp_loss], [pnp_z], create_graph=True)[0]
        ##############################################################
        ##              End of PnP-Depth modification               ##
        ##############################################################
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]
                depth = input[:, 3:, :, :]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(
                        rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target,
                                                       pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
def validate(val_loader, model, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    print_freq = 10
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 10
        rgb = input

        if i == 0:
            import matplotlib.pyplot as plt
            plt.imsave('pred.png', np.squeeze(pred.cpu().numpy()))
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)

        if (i + 1) % print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
Ejemplo n.º 10
0
def validation(device,
               data_loader,
               model,
               ord_loss,
               output_dir,
               epoch,
               logger,
               PRINT_FREQ,
               BETA,
               GAMMA,
               ORD_NUM=80.0):
    avg80 = AverageMeter()
    avg50 = AverageMeter()
    model.eval()

    end = time.time()
    skip = 1
    img_list = []

    evalbar = tqdm(total=len(data_loader))

    for i, (_input, _sparse_depth, _dense_depth) in enumerate(data_loader):
        _input, _sparse_depth, _dense_depth = _input.to(
            device), _sparse_depth.to(device), _dense_depth.to(device)
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            _pred_prob, _pred_label = model(_input)
            loss = ord_loss(_pred_prob, _dense_depth)

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        pred_depth = utils.label2depth_sid(_pred_label,
                                           K=ORD_NUM,
                                           alpha=1.0,
                                           beta=BETA,
                                           gamma=GAMMA)

        abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 = compute_errors(
            _sparse_depth, pred_depth.to(device))

        # measure accuracy and record loss
        result80 = Result()
        result80.evaluate(pred_depth, _sparse_depth.data, cap=80)
        result50 = Result()
        result50.evaluate(pred_depth, _sparse_depth.data, cap=50)

        avg80.update(result80, gpu_time, data_time, _input.size(0))
        avg50.update(result50, gpu_time, data_time, _input.size(0))
        end = time.time()

        # save images for visualization
        if i == 0:
            img_merge = utils.merge_into_row(_input, _dense_depth, pred_depth)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(_input, _dense_depth, pred_depth)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = os.path.join(output_dir,
                                    'eval_{}.png'.format(int(epoch)))
            print('save validation figures at {}'.format(filename))
            utils.save_image(img_merge, filename)

        if (i + 1) % PRINT_FREQ == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'AbsRel={result.absrel:.2f}({average.absrel:.2f}) '
                  'SqRel={result.sqrel:.2f}({average.sqrel:.2f}) '
                  'Log10={result.lg10:.3f}({average.lg10:.3f}) '
                  'RMSE_log={result.rmse_log:.3f}({average.rmse_log:.3f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'Delta2={result.delta2:.3f}({average.delta2:.3f}) '
                  'Delta3={result.delta3:.3f}({average.delta3:.3f})'.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result80,
                      average=avg80.average()))

        # update progress bar and show loss
        evalbar.set_postfix(
            ORD_LOSS=
            '{:.2f},RMSE/log= {:.2f}/{:.2f},delta={:.2f}/{:.2f}/{:.2f},AbsRel/SqRe;={:.2f}/{:.2f}'
            .format(loss, rmse, rmse_log, a1, a2, a3, abs_rel, sq_rel))
        evalbar.update(1)
        i = i + 1

    print('\n**** CAP=80 ****\n'
          'RMSE={average.rmse:.3f}\n'
          'RMSE_log={average.rmse_log:.3f}\n'
          'AbsRel={average.absrel:.3f}\n'
          'SqRel={average.sqrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          'iRMSE={average.irmse:.3f}\n'
          'iMAE={average.imae:.3f}\n'
          't_GPU={average.gpu_time:.3f}\n'.format(average=avg80.average()))

    print('\n**** CAP=50 ****\n'
          'RMSE={average.rmse:.3f}\n'
          'RMSE_log={average.rmse_log:.3f}\n'
          'AbsRel={average.absrel:.3f}\n'
          'SqRel={average.sqrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          'iRMSE={average.irmse:.3f}\n'
          'iMAE={average.imae:.3f}\n'
          't_GPU={average.gpu_time:.3f}\n'.format(average=avg50.average()))

    logger.add_scalar('VAL_CAP80/RMSE', avg80.average().rmse, epoch)
    logger.add_scalar('VAL_CAP80/RMSE_log', avg80.average().rmse_log, epoch)
    logger.add_scalar('VAL_CAP80/AbsRel', avg80.average().absrel, epoch)
    logger.add_scalar('VAL_CAP80/SqRel', avg80.average().sqrel, epoch)
    logger.add_scalar('VAL_CAP80/Delta1', avg80.average().delta1, epoch)
    logger.add_scalar('VAL_CAP80/Delta2', avg80.average().delta2, epoch)
    logger.add_scalar('VAL_CAP80/Delta3', avg80.average().delta3, epoch)

    logger.add_scalar('VAL_CAP50/RMSE', avg50.average().rmse, epoch)
    logger.add_scalar('VAL_CAP50/RMSE_log', avg50.average().rmse_log, epoch)
    logger.add_scalar('VAL_CAP50/AbsRel', avg50.average().absrel, epoch)
    logger.add_scalar('VAL_CAP50/SqRel', avg50.average().sqrel, epoch)
    logger.add_scalar('VAL_CAP50/Delta1', avg50.average().delta1, epoch)
    logger.add_scalar('VAL_CAP50/Delta2', avg50.average().delta2, epoch)
    logger.add_scalar('VAL_CAP50/Delta3', avg50.average().delta3, epoch)
Ejemplo n.º 11
0
            rgb_input = model.rgb_image
            depth_input = model.sparse_depth
            rgb_sparse = model.sparse_rgb
            depth_target = model.depth_image
            depth_est = model.depth_est

            ### These part save image in vis/ folder
            if i % num == 0:
                img_merge = utils.merge_into_row_with_pred_visualize(
                    rgb_input, depth_input, rgb_sparse, depth_target,
                    depth_est)
            elif i % num < num - 1:
                row = utils.merge_into_row_with_pred_visualize(
                    rgb_input, depth_input, rgb_sparse, depth_target,
                    depth_est)
                img_merge = utils.add_row(img_merge, row)
            elif i % num == num - 1:
                filename = 'vis/' + str(i) + '.png'
                utils.save_image(img_merge, filename)

            i += 1

            print('test epoch {0:}, iters: {1:}/{2:} '.format(
                epoch, epoch_iter,
                len(test_dataset) * test_opt.batch_size),
                  end='\r')
            print('RMSE={result.rmse:.4f}({average.rmse:.4f}) '
                  'MSE={result.mse:.4f}({average.mse:.4f}) '
                  'MAE={result.mae:.4f}({average.mae:.4f}) '
                  'Delta1={result.delta1:.4f}({average.delta1:.4f}) '
                  'Delta2={result.delta2:.4f}({average.delta2:.4f}) '
Ejemplo n.º 12
0
def validate(val_loader, model, epoch, write_to_file=True, logger=None):
    average_meter = AverageMeter()
    if args.arch in multistage_group:
        average_meter_stage1 = AverageMeter()
    
    # Include daynight info and rain condition
    avg_meter_day = AverageMeter()
    avg_meter_night = AverageMeter()

    # day, night, sun, rain combinations
    avg_meter_day_sun = AverageMeter()
    avg_meter_day_rain = AverageMeter()
    avg_meter_night_sun = AverageMeter()
    avg_meter_night_rain = AverageMeter()

    # sun and rain
    avg_meter_sun = AverageMeter()
    avg_meter_rain = AverageMeter()

    model.eval() # switch to evaluate mode
    end = time.time()

    # Save something to draw??
    if logger is None:
        import h5py
        output_path = os.path.join(output_directory, "results.h5")
        h5_writer = h5py.File(output_path, "w", libver="latest", swmr=True)

    for i, data in enumerate(val_loader):
        # Add compatibility for nuscenes
        if args.data != "nuscenes":
            inputs, target = data[0].cuda(), data[1].cuda()
        else:
            inputs, target = data["inputs"].cuda(), data["labels"].cuda()
            
        torch.cuda.synchronize()
        data_time = time.time() - end

        # Compute output
        end = time.time()
        with torch.no_grad():
            if args.arch in multistage_group:
                pred_ = model(inputs)
                pred1 = pred_["stage1"]
                pred = pred_["stage2"]
            else:
                pred = model(inputs)
                pred_ = None

        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # Record for qualitative results
        if (logger is None) and (i % 5 == 0):
            pred_np = {}
            if pred_ is None:
                pred_np = pred.cpu().numpy()
            else:
                for key in pred_.keys():
                    pred_np[key] = pred_[key][0, ...].cpu().numpy()
            res = {
                "inputs": data["inputs"][0, ...].cpu().numpy(),
                "lidar_depth": data["lidar_depth"][0, ...].cpu().numpy(),
                "radar_depth": data["radar_depth"][0, ...].cpu().numpy(),
                "pred": pred_np
            }
            file_key = "%05d"%(i)
            f_group = h5_writer.create_group(file_key)
            # Store data
            for key, output_data in res.items():
                if isinstance(output_data, dict):
                    for key, data_ in output_data.items():
                        if key in res.keys():
                            key = key + "*"
                        f_group.create_dataset(key, data=data_, compression="gzip")
                elif output_data is None:
                    pass
                else:    
                    f_group.create_dataset(key, data=output_data, compression="gzip")

        # Measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, inputs.size(0))
        if args.arch in multistage_group:
            result_stage1 = Result()
            result_stage1.evaluate(pred1.data, target.data)
            average_meter_stage1.update(result_stage1, gpu_time, data_time, inputs.size(0))
        end = time.time()
        
        # Record the day, night, rain info
        assert inputs.size(0) == 1
        daynight_info = data["daynight_info"][0]
        if ("day" in daynight_info) and ("rain" in daynight_info):
            avg_meter_day_rain.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_day.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_rain.update(result, gpu_time, data_time, inputs.size(0))
        elif "day" in daynight_info:
            avg_meter_day_sun.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_day.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_sun.update(result, gpu_time, data_time, inputs.size(0))

        if ("night" in daynight_info) and ("rain" in daynight_info):
            avg_meter_night_rain.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_night.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_rain.update(result, gpu_time, data_time, inputs.size(0))
        elif "night" in daynight_info:
            avg_meter_night_sun.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_night.update(result, gpu_time, data_time, inputs.size(0))
            avg_meter_sun.update(result, gpu_time, data_time, inputs.size(0))

        
        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = inputs
            elif args.modality == 'rgbd':
                rgb = inputs[:,:3,:,:]
                depth = inputs[:,3:,:,:]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8*skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target, pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8*skip:
                filename = output_directory + '/comparison_' + str(epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i+1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                   i+1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))

    # Save the result to pkl file
    if logger is None:
        h5_writer.close()
    avg = average_meter.average()
    if args.arch in multistage_group:
        avg_stage1 = average_meter_stage1.average()
        if logger is not None:
            record_test_scalar_summary(avg_stage1, epoch, logger, "Test_stage1")

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'Rel={average.absrel:.3f}\n'
          'Log10={average.lg10:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'Delta2={average.delta2:.3f}\n'
          'Delta3={average.delta3:.3f}\n'
          't_GPU={time:.3f}\n'.format(
        average=avg, time=avg.gpu_time))

    if logger is not None:
        # Record summaries
        record_test_scalar_summary(avg, epoch, logger, "Test")

    print('\n*\n'
        'RMSE={average.rmse:.3f}\n'
        'MAE={average.mae:.3f}\n'
        'Delta1={average.delta1:.3f}\n'
        'REL={average.absrel:.3f}\n'
        'Lg10={average.lg10:.3f}\n'
        't_GPU={time:.3f}\n'.format(
        average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
                'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,
                'data_time': avg.data_time, 'gpu_time': avg.gpu_time})

    return avg, img_merge
Ejemplo n.º 13
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    eval_file = output_directory + '/evaluation.csv'
    f = open(eval_file, "w+")
    f.write("Max_Error,Depth,RMSE,GPU_TIME,Number_Of_Frame\r\n")
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        # torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        # torch.cuda.synchronize()
        gpu_time = time.time() - end

        abs_err = (target.data - pred.data).abs().cpu()
        max_err_ind = np.unravel_index(np.argmax(abs_err, axis=None),
                                       abs_err.shape)

        max_err_depth = target.data[max_err_ind]
        max_err = abs_err[max_err_ind]

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        f.write(
            f'{max_err},{max_err_depth},{result.rmse:.2f},{gpu_time},{i+1}\r\n'
        )
        # save 8 images for visualization
        skip = 50

        if args.modality == 'rgb':
            rgb = input

        if i == 0:
            img_merge = utils.merge_into_row_with_gt(rgb, target, pred,
                                                     (target - pred).abs())
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row_with_gt(rgb, target, pred,
                                               (target - pred).abs())
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))
    f.close()
    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
Ejemplo n.º 14
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda(
        )  # 从后面看,这里的target应该是深度图的ground truth
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        depth_pred = model(input_var)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        output1 = torch.index_select(depth_pred.data, 1,
                                     torch.cuda.LongTensor([0]))
        result.evaluate(output1, target)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]

            if i == 0:
                img_merge = utils.merge_into_row(rgb, target, depth_pred)
            # 隔50个图片抽一张作为可视化结果
            elif (i < 8 * skip) and (i % skip == 0):  # and等同于C++中的&&
                row = utils.merge_into_row(rgb, target, depth_pred)
                img_merge = utils.add_row(img_merge, row)  # 添加一行
            elif i == 8 * skip:  # 只保存8张图片,保存够8张后输出
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'  # str():将()中的对象转换为字符串
                utils.save_image(img_merge,
                                 filename)  # 建议:把这种常用的功能写到特定的脚本文件中,再像这样调用

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
Ejemplo n.º 15
0
def validate_coarse(val_loader, model, epoch):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            rgb = input
            upsmaple_size = (rgb.size()[2], rgb.size()[3])
            upsmaple = torch.nn.Upsample(size=upsmaple_size,
                                         mode='bilinear',
                                         align_corners=True)
            target = upsmaple(target)
            pred = upsmaple(pred)

            if i == 0:
                img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/coarse_comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    return avg, img_merge
Ejemplo n.º 16
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        # io.imshow(np.squeeze(input[:,3:,:,:].cpu().numpy()), interpolation='nearest')
        # io.show()
        #print(input.size())
        input, target = input.cuda(), target.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        if visualize:
            if args.modality == 'd':
                fig = plt.figure()
                fig.suptitle(
                    'Error Percentage ' + str(round(result.absrel * 100, 2)) +
                    ' GPU TIME ' + str(round(gpu_time, 2)) + '   FPS ' +
                    str(round(60.0 / (gpu_time + data_time), 2)),
                    fontsize=16)

                plt.subplot(131)
                plt.title("SPARSE (Input)")
                plt.axis('off')
                plt.imshow(np.squeeze(input.cpu().numpy()),
                           interpolation='nearest')

                plt.subplot(132)
                plt.title("TARGET (Ground Truth)")
                plt.axis('off')
                plt.imshow(np.squeeze(target.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                plt.subplot(133)
                plt.title("PREDICTED")
                plt.axis('off')
                plt.imshow(np.squeeze(pred.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                # plt.waitforbuttonpress(timeout=2)
                # plt.close()

                plt.waitforbuttonpress()
                plt.close()

            if args.modality == 'rgbd':
                # sparse = np.squeeze(input[:, 3:, :, :].cpu().numpy())
                # print(sparse.shape)
                # sleep(3)

                fig = plt.figure()

                fig.suptitle(
                    'Error Percentage ' + str(round(result.absrel * 100, 2)) +
                    ' GPU TIME ' + str(round(gpu_time, 2)) + '   FPS ' +
                    str(round(60.0 / (gpu_time + data_time), 2)),
                    fontsize=16)

                rgb1 = 255 * np.transpose(
                    np.squeeze(input[:, :3, :, :].cpu().numpy()),
                    (1, 2, 0))  # H, W, C
                rgb1 = Image.fromarray(rgb1.astype('uint8'))
                plt.subplot(221)
                plt.title("RGB (Input)")
                plt.axis('off')
                plt.imshow(rgb1)

                plt.subplot(222)
                plt.title("SPARSE (Input)")
                plt.axis('off')
                plt.imshow(np.squeeze(input[:, 3:, :, :].cpu().numpy()),
                           interpolation='nearest')

                plt.subplot(223)
                plt.title("TARGET (Ground Truth)")
                plt.axis('off')
                plt.imshow(np.squeeze(target.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                plt.subplot(224)
                plt.title("PREDICTED")
                plt.axis('off')
                plt.imshow(np.squeeze(pred.cpu().numpy()),
                           interpolation='nearest')
                plt.colorbar(fraction=0.1, pad=0.04)

                # plt.waitforbuttonpress(timeout=2)
                # plt.close()

                plt.waitforbuttonpress()
                plt.close()


#
# save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]
                depth = input[:, 3:, :, :]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(
                        rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target,
                                                       pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
Ejemplo n.º 17
0
def validate(val_loader, model, epoch):
    average_meter = AverageMeter()

    model.eval()  # switch to evaluate mode

    end = time.time()

    skip = len(val_loader) // 8  # save images every skip iters
    count_b = 0
    count = 0
    x = random.randint(0, len(val_loader))
    for i, (input, target, label, mask) in enumerate(val_loader):

        input, target = input.cuda(), target.cuda()
        input = torch.squeeze(input, 0)
        mask = mask.cuda()
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred, pred_mask, c1, c2, c3 = model(input)
            # pred,c1,c2,c3 = model(input)
        b, c, h, w = pred.size()

        # temp0 = torch.zeros_like(pred_mask)
        # temp1 = torch.ones_like(pred_mask)
        # pred_mask2 = torch.where(pred_mask>0.5,temp1,temp0)
        #pred[:,1,:,:] = pred[:,1,:,:] * pred_mask2

        temp0_2 = torch.zeros_like(c1)
        temp1_2 = torch.ones_like(c1)
        c1_2 = torch.where(c1 > 0.5, temp1_2, temp0_2)
        c2_2 = torch.where(c2 > 0.5, temp1_2, temp0_2)
        c3_2 = torch.where(c3 > 0.5, temp1_2, temp0_2)

        torch.cuda.synchronize()

        gpu_time = time.time() - end

        target = torch.squeeze(target, 1)
        # measure accuracy and record loss
        c1_2 = c1_2.cpu().numpy()
        c2_2 = c2_2.cpu().numpy()
        c3_2 = c3_2.cpu().numpy()
        l = label.numpy()
        for k in range(l.shape[0]):
            if c1_2[k] == 0 and c2_2[k] == 0 and c3_2[k] == 0 and l[k] == -1:
                count_b += 1
            if c1_2[k] == 1 and c2_2[k] == 0 and c3_2[k] == 0 and l[k] == 0:
                count_b += 1
            if c1_2[k] == 0 and c2_2[k] == 1 and c3_2[k] == 0 and l[k] == 1:
                count_b += 1
            if c1_2[k] == 0 and c2_2[k] == 0 and c3_2[k] == 1 and l[k] == 2:
                count_b += 1
        count += l.shape[0]
        result = Result()
        result.evaluate(pred, target, label)

        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        rgb = input
        # print(rgb.size(),target.size(),pred.size())
        # exit(0)

        # if i == x:
        #     img_merge = utils.merge_into_row(rgb, target, pred, pred_mask2,label)
        #     filename = output_directory + '/comparison_' + str(epoch) + '.png'
        #     utils.save_image(img_merge, filename)
        if i == 0:
            img_merge = utils.merge_into_row(
                rgb, target, pred, target,
                label)  # (rgb, target, pred, pred_mask2,label)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred, target, label)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print("acc: %f" % (count_b * 1.0 / count))
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'MAE={result.mae:.2f}({average.mae:.2f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))
    avg = average_meter.average()

    print("epoch: %d, acc: %f" % (epoch, count_b * 1.0 / count))
    print('\n*\n'
          'MAE={average.mae:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    return avg, img_merge
def main():
    args = parser.parse_args()
    image_shape = (192, 256)
    args.data = os.path.join(os.environ["DATASET_DIR"], "nyudepthv2")

    dataset_no_depth = NYUDataset(args.data,
                                  phase='val',
                                  modality="rgbd",
                                  num_samples=0,
                                  square_width=0,
                                  output_shape=image_shape,
                                  depth_type="square")

    dataset_square_hq = NYUDataset(args.data,
                                   phase='val',
                                   modality="rgbd",
                                   num_samples=0,
                                   square_width=50,
                                   output_shape=image_shape,
                                   depth_type="square")

    dataset_square_lq = NYUDataset(args.data,
                                   phase='val',
                                   modality="rgbd",
                                   num_samples=0,
                                   square_width=50,
                                   output_shape=image_shape,
                                   depth_type="low-quality-square")

    dataset_square_random_sample = NYUDataset(args.data,
                                              phase='val',
                                              modality="rgbd",
                                              num_samples=200,
                                              output_shape=image_shape,
                                              depth_type="full")

    dataset_single_pixel = NYUDataset(args.data,
                                      phase='val',
                                      modality="rgbd",
                                      num_samples=0,
                                      output_shape=image_shape,
                                      depth_type="single-pixel")
    dataset_single_pixel_lq = NYUDataset(args.data,
                                         phase='val',
                                         modality="rgbd",
                                         num_samples=0,
                                         output_shape=image_shape,
                                         depth_type="single-pixel-low-quality")

    images = []
    image_idx = 223

    #Create image with various possible depthmaps, not only from this work
    _, y = dataset_no_depth[image_idx]
    y = y.cuda().unsqueeze(0)
    images.append(y)
    for dataset in [
            dataset_square_hq, dataset_square_lq, dataset_square_random_sample
    ]:
        x, _ = dataset[image_idx]
        x = x.cuda().unsqueeze(0)
        images.append(x)
    square_shapes = [
        None, dataset_square_hq.square, dataset_square_hq.square, None
    ]
    im_merge_depth = merge_ims_into_row(images,
                                        rgbd_action="depth_only",
                                        square=square_shapes)

    os.makedirs(args.save_dir)
    depth_fn = os.path.join(args.save_dir, "depth.png")
    save_image(im_merge_depth, depth_fn)

    #Create image with source image
    x, y = dataset_no_depth[image_idx]
    x, y = x.cuda().unsqueeze(0), y.cuda().unsqueeze(0)
    im_merge_original = merge_ims_into_row([x, y], rgbd_action="rgb_only")
    original_fn = os.path.join(args.save_dir, "original.png")
    save_image(im_merge_original, original_fn)

    #Create image with all the depth maps investigated in this work
    #First row is hq images
    images = []
    squares = []
    x, _ = dataset_single_pixel[image_idx]
    images.append(x.cuda().unsqueeze(0))
    squares.append(None)

    for square_width in [10, 50, 100]:
        dataset_square_hq.square_width = square_width
        x, _ = dataset_square_hq[image_idx]
        images.append(x.cuda().unsqueeze(0))
        squares.append(dataset_square_hq.square)

    im_merge_depths_hq = merge_ims_into_row(images,
                                            square=squares,
                                            rgbd_action="depth_only")

    #Second row is lq images
    images = []
    squares = []

    x, _ = dataset_single_pixel_lq[image_idx]
    images.append(x.cuda().unsqueeze(0))
    squares.append(None)

    for square_width in [10, 50, 100]:
        dataset_square_lq.square_width = square_width
        x, _ = dataset_square_lq[image_idx]
        images.append(x.cuda().unsqueeze(0))
        squares.append(dataset_square_lq.square)

    im_merge_depths_lq = merge_ims_into_row(images,
                                            square=squares,
                                            rgbd_action="depth_only")
    im_merge_depths = add_row(im_merge_depths_hq, im_merge_depths_lq)
    depths_fn = os.path.join(args.save_dir, "depths.png")
    save_image(im_merge_depths, depths_fn)
Ejemplo n.º 19
0
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()
    model.eval()  # switch to evaluate mode
    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        # torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        #print("Predicted shape ",pred.shape) #(1,1,224,224)
        # torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50

        if args.modality == 'rgb':
            rgb = input

        output_dir = "/content/drive/MyDrive/Code/fast-depth/results2/"
        filename_gt = output_dir + "gt_" + str(i) + ".jpg"
        filename_predicted = output_dir + "predicted_" + str(i) + ".jpg"

        gt, predicted = utils.get_depth_map(rgb, target, pred)
        print("Predicted shape ", predicted.shape)
        import scipy.misc
        #scipy.misc.toimage(gt).save(filename_gt)
        #scipy.misc.toimage(predicted).save(filename_predicted)

        if i == 0:
            img_merge = utils.merge_into_row(rgb, target, pred)
        elif (i < 8 * skip) and (i % skip == 0):
            row = utils.merge_into_row(rgb, target, pred)
            img_merge = utils.add_row(img_merge, row)
        elif i == 8 * skip:
            filename = output_directory + '/comparison_' + str(epoch) + '.png'
            utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f}) '
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })
    return avg, img_merge
def validate(val_loader, model, epoch, write_to_file=True):
    average_meter = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        # torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        with torch.no_grad():
            pred = model(input)
        # torch.cuda.synchronize()
        gpu_time = time.time() - end

        # measure accuracy and record loss
        result = Result()
        result.evaluate(pred.data, target.data)
        average_meter.update(result, gpu_time, data_time, input.size(0))
        end = time.time()

        # save 8 images for visualization
        skip = 50
        if args.modality == 'd':
            img_merge = None
        else:
            if args.modality == 'rgb':
                rgb = input
            elif args.modality == 'rgbd':
                rgb = input[:, :3, :, :]
                depth = input[:, 3:, :, :]

            if i == 0:
                if args.modality == 'rgbd':
                    img_merge = utils.merge_into_row_with_gt(
                        rgb, depth, target, pred)
                else:
                    img_merge = utils.merge_into_row(rgb, target, pred)
            elif (i < 8 * skip) and (i % skip == 0):
                if args.modality == 'rgbd':
                    row = utils.merge_into_row_with_gt(rgb, depth, target,
                                                       pred)
                else:
                    row = utils.merge_into_row(rgb, target, pred)
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)

        if (i + 1) % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
                  'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                  'MAE={result.mae:.2f}({average.mae:.2f})\n\t'
                  'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                  'REL={result.absrel:.3f}({average.absrel:.3f}) '
                  'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
                      i + 1,
                      len(val_loader),
                      gpu_time=gpu_time,
                      result=result,
                      average=average_meter.average()))

    avg = average_meter.average()

    print('\n*\n'
          'RMSE={average.rmse:.3f}\n'
          'MAE={average.mae:.3f}\n'
          'Delta1={average.delta1:.3f}\n'
          'REL={average.absrel:.3f}\n'
          'Lg10={average.lg10:.3f}\n'
          't_GPU={time:.3f}\n'.format(average=avg, time=avg.gpu_time))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'absrel': avg.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'delta1': avg.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'data_time': avg.data_time,
                'gpu_time': avg.gpu_time
            })

    return avg, img_merge
Ejemplo n.º 21
0
def validate(
    val_loader,
    square_width,
    modality,
    output_directory,
    print_freq,
    test_csv,
    model: torch.nn.Module,
    epoch: int,
    write_to_file: bool = True,
) -> typing.Tuple[Result, Result, Result, np.array, typing.List[MaskedResult],
                  evaluate.Evaluator]:
    average_meter = AverageMeter()
    inside_average_meter = AverageMeter()
    outside_average_meter = AverageMeter()

    # switch to evaluate mode
    model.eval()
    evaluator = evaluate.Evaluator(val_loader.dataset.output_shape,
                                   square_width)
    end = time.time()
    results = []
    for i, (input, target) in enumerate(val_loader):
        input, target = input.cuda(), target.cuda()
        input_var = torch.autograd.Variable(input)
        torch.cuda.synchronize()
        data_time = time.time() - end

        # compute output
        end = time.time()
        depth_pred = model(input_var)
        torch.cuda.synchronize()
        gpu_time = time.time() - end
        # measure accuracy and record loss
        output1 = torch.index_select(depth_pred.data, 1,
                                     torch.cuda.LongTensor([0]))
        evaluator.add_results(output1, target)
        #assume all squares are of same size
        result = MaskedResult(val_loader.dataset.mask_inside_square)
        result.evaluate(output1, target)
        results.append(result)
        average_meter.update(result.result, gpu_time, data_time, input.size(0))
        inside_average_meter.update(result.result_inside, gpu_time, data_time,
                                    input.size(0))
        outside_average_meter.update(result.result_outside, gpu_time,
                                     data_time, input.size(0))
        end = time.time()
        # save 8 images for visualization
        skip = 50
        if modality == 'd':
            img_merge = None
        else:
            if i == 0:
                img_merge = utils.merge_ims_into_row(
                    [input, target, depth_pred], rgbd_action="both")
            elif (i < 8 * skip) and (i % skip == 0):
                row = utils.merge_ims_into_row([input, target, depth_pred],
                                               rgbd_action="both")
                img_merge = utils.add_row(img_merge, row)
            elif i == 8 * skip:
                filename = output_directory + '/comparison_' + str(
                    epoch) + '.png'
                utils.save_image(img_merge, filename)
        average = average_meter.average()
        if (i + 1) % print_freq == 0:
            #print('=> output: {}'.format(output_directory))
            def print_result(result, result_name):
                stdout.write(
                    f'Validation Epoch: {epoch} [{i + 1}/{len(val_loader)}]\t'
                    f"{result_name}: "
                    #f't_Data={data_time:.3f}({average.data_time:.3f}) '
                    #f't_GPU={gpu_time:.3f}({average.gpu_time:.3f}) '
                    f'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
                    f'MAE={result.mae:.2f}({average.mae:.2f}) '
                    f'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
                    #f'REL={result.absrel:.3f}({average.absrel:.3f}) '
                    #f'Lg10={result.lg10:.3f}({average.lg10:.3f}) \n'
                    '\n')

            print_result(result.result, "result")

    avg = average_meter.average()
    avg_inside = inside_average_meter.average()
    avg_outside = outside_average_meter.average()
    avg.name = "average"
    avg_inside.name = "average inside"
    avg_outside.name = "average outside"

    gpu_time = average.gpu_time
    print(f'\n*\n' + str(avg) + "\n" + str(avg_inside) + "\n" +
          str(avg_outside))

    if write_to_file:
        with open(test_csv, 'a') as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
            writer.writerow({
                'mse': avg.mse,
                'rmse': avg.rmse,
                'rmse inside': avg_inside.rmse,
                'rmse outside': avg_outside.rmse,
                'absrel': avg.absrel,
                'absrel inside': avg_inside.absrel,
                'absrel outside': avg_outside.absrel,
                'lg10': avg.lg10,
                'mae': avg.mae,
                'mae inside': avg_inside.mae,
                'mae outside': avg_outside.mae,
                'delta1': avg.delta1,
                'delta1 inside': avg_inside.delta1,
                'delta1 outside': avg_outside.delta1,
                'delta2': avg.delta2,
                'delta3': avg.delta3,
                'gpu_time': avg.gpu_time,
                'data_time': avg.data_time
            })
    evaluator.save_plot(
        os.path.join(output_directory, f"evaluation_epoch{epoch}.png"))

    return avg, avg_inside, avg_outside, img_merge, results, evaluator