def predict_height(depthmap_file: str, rgb_file: str,
                   calibration_file: str) -> float:

    # Check if it is captured by a new device
    dmap = Depthmap.create_from_zip_absolute(depthmap_file, 0,
                                             calibration_file)
    angle = dmap.get_angle_between_camera_and_floor()

    # Run segmentation
    im = Image.open(rgb_file).rotate(-90, expand=True)
    resized_im, seg_map = DEEPLAB_MODEL.run(im)
    seg_map[seg_map != PERSON_SEGMENTATION] = 0

    # Check if the child's head is fully visible
    boundary = calculate_boundary(seg_map)
    if boundary[0] <= 0:
        raise Exception(
            'Skipping because the child\'s head is not fully visible')

    # Upscale depthmap
    floor = dmap.get_floor_level()
    mask = dmap.detect_floor(floor)
    depth = dmap.get_distance_of_child_from_camera(mask)
    dmap.resize(seg_map.shape[0], seg_map.shape[1])
    dmap.depthmap_arr[:, :] = depth

    # Calculate height
    seg_map[seg_map == PERSON_SEGMENTATION] = MASK_CHILD
    highest = dmap.get_highest_point(seg_map)[1]
    factor = 1.0 + math.sin(math.radians(angle)) * HEIGHT_SCALE_FACTOR
    height_in_cm = factor * (highest - floor) * 100.0 + HEIGHT_OFFSET_IN_CM
    return height_in_cm
Ejemplo n.º 2
0
def test_blur_face():
    dmap = Depthmap.create_from_zip_absolute(DEPTHMAP_FPATH, RGB_FPATH, CALIBRATION_FPATH)

    # Find top of the object
    floor = dmap.get_floor_level()
    assert floor == pytest.approx(-0.9706086, 0.001)
    mask = dmap.segment_child(floor)
    highest_point = dmap.get_highest_point(mask)  # 3D

    # Render the color data
    output_unblurred = render_rgb(dmap)

    # Blur
    output_blurred = blur_face(output_unblurred, highest_point, dmap, CHILD_HEAD_HEIGHT_IN_METERS)

    # Assert some pixels in whole image change (image not same)
    all_count = dmap.width * dmap.height
    count = np.count_nonzero(output_unblurred - output_blurred) / 3
    ratio_blurred = count / all_count
    assert 0.01 < ratio_blurred < 0.9

    # Assert that blurred around object
    object_x = int(dmap.width * OFFSET_X_Y[0])
    object_y = int(dmap.height * (1.0 - OFFSET_X_Y[1]))
    slice_x = slice(object_x - 2, object_x + 2)
    slice_y = slice(object_y - 2, object_y + 2)
    assert (output_unblurred[slice_x, slice_y] != output_blurred[slice_x, slice_y]).any()

    # Assert that NOT blurred around corner
    corner_x = 0
    corner_y = 0
    slice_x = slice(corner_x, corner_x + 4)
    slice_y = slice(corner_y, corner_y + 4)
    np.testing.assert_array_equal(output_unblurred[slice_x, slice_y], output_blurred[slice_x, slice_y])
Ejemplo n.º 3
0
def create_layers_rgbd(depthmap_fpath: str, rgb_fpath: str,
                       should_rotate_rgb: bool) -> Tuple[np.ndarray, dict]:
    if should_rotate_rgb:
        dmap = Depthmap.create_from_zip_absolute(depthmap_fpath, rgb_fpath,
                                                 CALIBRATION_FPATH)
    else:
        dmap = rotate_and_load_depthmap_with_rgbd(depthmap_fpath, rgb_fpath,
                                                  CALIBRATION_FPATH)

    if not dmap.device_pose:
        raise InvalidDevicePoseError()

    depthmap = dmap.depthmap_arr  # shape: (longer, shorter)
    depthmap = preprocess(depthmap)  # shape (longer, shorter, 1)

    rgb = dmap.rgb_array  # shape (longer, shorter, 3)
    rgb = preprocess_rgb(rgb)  # shape (longer, shorter, 3)

    layers = np.concatenate(
        [
            depthmap,  # shape (longer, shorter, 1)
            rgb,  # shape (longer, shorter, 3)
        ],
        axis=2)  # shape (longer, shorter, 4)

    metadata = {
        'device_pose': dmap.device_pose,
        'raw_header': dmap.header,
        'angle': dmap.get_angle_between_camera_and_floor(),
    }
    return layers, metadata
Ejemplo n.º 4
0
def test_depthmap():
    dmap = Depthmap.create_from_zip_absolute(DEPTHMAP_FPATH, RGB_FPATH,
                                             CALIBRATION_FILE)
    assert dmap.width == 240
    assert dmap.height == 180

    dmap_intrinsics = np.array([dmap.fx, dmap.fy, dmap.cx, dmap.cy])
    expected_intrinsics = np.array(
        [162.883128, 162.881251, 119.004372, 90.630756])
    np.testing.assert_array_almost_equal(dmap_intrinsics, expected_intrinsics)
    assert dmap.max_confidence == 7.
    assert dmap.depth_scale == 0.001

    floor = dmap.get_floor_level()
    mask = dmap.segment_child(floor)
    highest_point = dmap.get_highest_point(mask)
    child_height_in_m = highest_point[1] - floor
    assert 0 < child_height_in_m < 1.2
    assert mask.shape[:2] == dmap.rgb_array.shape[:2]

    angle_in_degrees = dmap.get_angle_between_camera_and_floor()
    assert -90 < angle_in_degrees < 90

    distance_in_m = dmap.get_distance_of_child_from_camera(mask)
    assert 0.1 < distance_in_m < 5.

    dmap.resize(640, 360)
    dmap_intrinsics = np.array([dmap.fx, dmap.fy, dmap.cx, dmap.cy])
    expected_intrinsics = np.array(
        [434.355008, 325.762502, 317.344992, 181.261512])
    np.testing.assert_array_almost_equal(dmap_intrinsics, expected_intrinsics)
Ejemplo n.º 5
0
def test_get_highest_point():
    dmap = Depthmap.create_from_zip_absolute(DEPTHMAP_FPATH, RGB_FPATH,
                                             CALIBRATION_FILE)

    # Find top of the object
    floor = dmap.get_floor_level()
    mask = dmap.segment_child(floor)
    highest_point = dmap.get_highest_point(mask)  # 3D

    object_height_in_m = highest_point[1] - floor
    assert 0.3 < object_height_in_m < 0.6
Ejemplo n.º 6
0
def create_layers(depthmap_fpath: str) -> Tuple[np.ndarray, dict]:
    dmap = Depthmap.create_from_zip_absolute(
        depthmap_fpath, rgb_fpath=None, calibration_fpath=CALIBRATION_FPATH)
    depthmap = dmap.depthmap_arr  # shape: (width, height)
    depthmap = preprocess(depthmap)
    layers = depthmap
    if not dmap.device_pose:
        raise InvalidDevicePoseError()
    metadata = {
        'device_pose': dmap.device_pose,
        'raw_header': dmap.header,
        'angle': dmap.get_angle_between_camera_and_floor(),
    }
    return layers, metadata
def predict_height(depthmap_file: str, rgb_file: str,
                   calibration_file: str) -> float:

    # Check if the child is fully visible
    dmap = Depthmap.create_from_zip_absolute(depthmap_file, 0,
                                             calibration_file)
    floor = dmap.get_floor_level()
    mask = dmap.segment_child(floor)
    if not dmap.is_child_fully_visible(mask):
        raise Exception('Skipping because the child is not fully visible')

    # Calculate height
    highest_point = dmap.get_highest_point(mask)
    height_in_cm = (highest_point[1] - floor) * 100.0
    return height_in_cm
Ejemplo n.º 8
0
 def create_from_rgbd(cls, depthmap_fpath: str, rgb_fpath: str,
                      calibration_fpath: str) -> 'BodyPose':
     dmap = Depthmap.create_from_zip_absolute(depthmap_fpath, 0,
                                              calibration_fpath)
     return cls(dmap, rgb_fpath)
def render_prediction_plots(depthmap_file: str, rgb_file: str,
                            calibration_file: str) -> np.array:
    dmap = Depthmap.create_from_zip_absolute(depthmap_file, rgb_file,
                                             calibration_file)
    return render_plot_debug(dmap)
Ejemplo n.º 10
0
    keys = metadata.keys()
    for key_index, key in enumerate(keys):
        valid = []
        for artifact in range(len(metadata[key])):
            data = metadata[key][artifact]

            try:
                # Run segmentation
                rgb_file = (path + data[METADATA_RGB]).replace('"', '')
                im = Image.open(rgb_file).rotate(-90, expand=True)
                resized_im, seg_map = DEEPLAB_MODEL.run(im)
                seg_map[seg_map != PERSON_SEGMENTATION] = 0

                # Get upscaled depthmap
                depthmap_file = (path + data[METADATA_DEPTHMAP]).replace('"', '')
                dmap = Depthmap.create_from_zip_absolute(depthmap_file, 0, calibration_file)
                dmap.resize(seg_map.shape[0], seg_map.shape[1])

                # Count validity
                cond1 = seg_map == PERSON_SEGMENTATION
                cond2 = dmap.depthmap_arr > 0.1
                count_child_px = len(dmap.depthmap_arr[cond1])
                count_valid_px = len(dmap.depthmap_arr[cond1 & cond2])
                valid.append(count_valid_px / count_child_px)
            except Exception:
                continue

        value = 0
        if len(valid) > 0:
            value = np.mean(valid)
        data.append(value)
Ejemplo n.º 11
0
def run_evaluation(path: str, metadata_file: str, calibration_file: str, method: str, one_artifact_per_scan: bool):
    """Runs evaluation process and save results into CSV files

    Args:
        path: Path where the RAW dataset is located
        metadata_file: Path to the CSV file with RAW dataset metadata preprocessed by rgbd_match.py script
        calibration_file: Path to lens calibration file of the device
        method: Method for estimation, available are depthmap_toolkit, ml_segmentation and hrnet variants
        hrnet variants are: hrnet_cv_lying, hrnet_cv_standing, hrnet_ml_lying, hrnet_ml_standing
        one_artifact_per_scan: True to return one artifact per scan (faster), False to return all artifacts (slower)
    """

    is_standing = True
    if method == 'depthmap_toolkit':
        from height_prediction_depthmap_toolkit import predict_height
    elif method == 'ml_segmentation':
        from height_prediction_with_ml_segmentation import predict_height
    elif method == 'hrnet_cv_standing':
        from height_prediction_with_hrnet import predict_height_cv_standing as predict_height
    elif method == 'hrnet_cv_lying':
        from height_prediction_with_hrnet import predict_height_cv_lying as predict_height
        is_standing = False
    elif method == 'hrnet_ml_standing':
        from height_prediction_with_hrnet import predict_height_ml_standing as predict_height
    elif method == 'hrnet_ml_lying':
        from height_prediction_with_hrnet import predict_height_ml_lying as predict_height
        is_standing = False
    else:
        raise Exception('Unimplemented method')

    metadata = filter_metadata(read_csv(metadata_file), is_standing, one_artifact_per_scan)

    output = [header]
    rejections = []
    keys = metadata.keys()
    for key_index, key in enumerate(keys):
        logger.info('Processing %d/%d', key_index + 1, len(keys))

        angles = []
        heights = []
        distances = []
        positions = []
        directions = []
        camera_heights = []
        floors = []
        last_fail = 0
        for artifact in range(len(metadata[key])):
            data = metadata[key][artifact]

            try:

                # Process prediction
                depthmap_file = (path + data[METADATA_DEPTHMAP]).replace('"', '')
                rgb_file = (path + data[METADATA_RGB]).replace('"', '')
                height = predict_height(depthmap_file, rgb_file, calibration_file)
                check_height_prediction(height, is_standing)

                # Get additional data
                dmap = Depthmap.create_from_zip_absolute(depthmap_file, 0, calibration_file)
                if is_google_tango_resolution(dmap.width, dmap.height):
                    raise Exception('Skipping because it is not a new device data')
                floor = dmap.get_floor_level()
                mask = dmap.detect_floor(floor)
                distance = dmap.get_distance_of_child_from_camera(mask)
                angle = dmap.get_angle_between_camera_and_floor()
                position = dmap.device_pose[12:15]
                direction = dmap.get_camera_direction_angle()
                camera_height = -position[1] - floor

                floors.append(floor)
                camera_heights.append(camera_height)
                directions.append(direction)
                positions.append(position)
                distances.append(distance)
                heights.append(height)
                angles.append(angle)
            except Exception as exc:
                last_fail = str(exc)
                continue

        info = update_output(
            angles,
            distances,
            heights,
            positions,
            directions,
            camera_heights,
            floors,
            last_fail,
            data,
            output,
            rejections,
            is_standing)
        log_report(generate_report(output, info, is_standing))

    write_csv('output.csv', output)
    write_csv('rejections.csv', rejections)
    write_csv('report.csv', generate_report(output, info, is_standing))
Ejemplo n.º 12
0
def get_joints3d():
    dmap = Depthmap.create_from_zip_absolute(DEPTHMAP_FPATH, 0,
                                             CALIBRATION_FILE)
    floor = dmap.get_floor_level()
    dmap.resize(640, 480)
    return convert_2dskeleton_to_3d(dmap, floor, JOINTS, CONFIDENCES)