コード例 #1
0
ファイル: zero_shot_pose.py プロジェクト: rjean/selfsupmotion
def get_match_aligned_points(idx: int,
                             match_idx: int,
                             info_df: pd.DataFrame,
                             train_info_df: pd.DataFrame,
                             ground_truth=False):
    """Get 3D IoU for a specific query image, using the kth neighbor.

    Returns:
        [type]: [description]
    """
    assert type(match_idx) == int
    points2d_train, points3d_train = get_points(train_info_df, match_idx)
    points2d_valid, points3d_valid = get_points(info_df, idx)

    valid_image = Image.open(info_df.iloc[idx]["filepath_full"])
    train_image = Image.open(train_info_df.iloc[match_idx]["filepath_full"])
    points2d_valid_px = geo.points_2d_to_points2d_px(points2d_valid,
                                                     valid_image.width,
                                                     valid_image.height)
    points2d_train_px = geo.points_2d_to_points2d_px(points2d_train,
                                                     train_image.width,
                                                     train_image.height)
    valid_bbox = geo.get_bbox(points2d_valid_px, valid_image.width,
                              valid_image.height)
    train_bbox = geo.get_bbox(points2d_train_px, train_image.width,
                              train_image.height)
    points3d_train_rotated, _ = geo.align_with_bbox_3d(points3d_train,
                                                       train_bbox, valid_bbox)
    if ground_truth:
        return np.array(points3d_train_rotated), np.array(points3d_valid)
    else:
        return np.array(points3d_train_rotated), np.array(points3d_train)
コード例 #2
0
def get_match_snapped_points(idx, match_idx,  info_df, train_info_df, ground_truth=False, rescale=True, align_axis=True):
    assert type(match_idx)==int
    plane_center, plane_normal = get_plane(info_df, idx)
    points2d, points3d = get_points(info_df,idx)
    points3d_result_rotated, points3d_result = get_match_aligned_points(idx, match_idx, info_df, train_info_df, ground_truth = ground_truth)
    snapped, intersect = geo.snap_box_to_plane(points3d_result_rotated, plane_normal, plane_center, align_axis=align_axis)
    

    result = snapped
    if rescale:
        points2d_result, _ = get_points(train_info_df,match_idx)
        #points3d_result = np.array(points3d_result)
        camera = get_camera(info_df, idx)
        intrinsics = get_intrinsics(camera)
        intrinsics = geo.scale_intrinsics(intrinsics, 0.25, 0.25)
        points2d_px = geo.project_3d_to_2d(snapped, intrinsics)
        dest_bbox = geo.get_bbox(points2d_px, 360, 480)

        points2d_valid, points3d_valid = get_points(info_df, idx)

        valid_image = Image.open(info_df.iloc[idx]["filepath_full"])
        points2d_valid_px = geo.points_2d_to_points2d_px(points2d_valid, valid_image.width, valid_image.height)
        valid_bbox = geo.get_bbox(points2d_valid_px, valid_image.width, valid_image.height)
        scale_x, scale_y = geo.get_scale_factors(dest_bbox, valid_bbox)
        scale_factor = (scale_x + scale_y)/2
        
        #print(scale_factor)
        fixed_point = snapped[0]
        snapped_rotated = snapped.copy()
        #scale_factor = 1.2
        snapped_rotated=(snapped_rotated-fixed_point)*scale_factor+fixed_point
        snapped_rotated, intersect=geo.snap_box_to_plane(snapped_rotated, plane_normal, plane_center)
        result = snapped_rotated

    return result, points3d_result
コード例 #3
0
def get_bounding_box(idx, match_idx, experiment, adjust_scale=False):
    points_2d_result, points_3d_result = experiment.get_points(match_idx, train=True)
    points_2d_px_result = geo.points_2d_to_points2d_px(points_2d_result, 360, 480)
    points_2d_query, _ = experiment.get_points(idx, train=False)
    points_2d_px_query = geo.points_2d_to_points2d_px(points_2d_query, 360, 480)
    plane_center_query, plane_normal_query= experiment.get_plane(idx, train=False)
    plane_center_result, plane_normal_result = experiment.get_plane(match_idx, train=True)
    result_bbox = geo.get_bbox(points_2d_px_result, 360, 480) 
    #query_camera = experiment.get_camera(idx, train=False)
    query_intrinsics = experiment.get_intrinsics(idx, train=False)
    query_intrinsics = geo.scale_intrinsics(query_intrinsics, 0.25,0.25)
    result_bbox = geo.get_bbox(points_2d_px_result, 360, 480)
    query_bbox = geo.get_bbox(points_2d_px_query, 360, 480)
    cx, cy, a, b = estimate_object_center_in_query_image(query_intrinsics, query_bbox, points_2d_px_result)
    center_ray = np.array([a,b,-1])
    points_3d_axis = align_box_with_plane(points_3d_result, plane_normal_query, plane_normal_result)
    obj_radius = geo.get_dist_from_plane(plane_normal_result, plane_center_result, points_3d_axis[0])
    points_3d_result_snapped = geo.snap_to_plane(points_3d_axis, plane_normal_query, plane_center_query, center_ray, obj_radius)
    if adjust_scale:
        scale = get_smooth_scale_factor(query_bbox, points_3d_result_snapped, query_intrinsics, 2)
        points3d_scaled = points_3d_result_snapped
        for i in range(0,4):
            #print(scale, obj_radius)
            obj_radius = geo.get_dist_from_plane(plane_normal_query, plane_center_query, points3d_scaled[0])
            points3d_scaled = geo.snap_to_plane(geo.scale_3d_bbox(points3d_scaled, scale),
                                                plane_normal_query, plane_center_query, center_ray, obj_radius = obj_radius*scale)
            scale = get_smooth_scale_factor(query_bbox, points3d_scaled, query_intrinsics, 2)
            #print(i, get_iou_between_bbox(np.array(points_3d_query), np.array(points3d_scaled)))
        return points3d_scaled
    return points_3d_result_snapped
コード例 #4
0
def get_scale_factor(query_bbox, points3d_scaled, intrinsics, width=360, height=480):
    points2d_px_result = geo.project_3d_to_2d(points3d_scaled, intrinsics)
    #points2d_px_query = geo.project_3d_to_2d(points_3d_query, intrinsics)
    result_bbox = geo.get_bbox(points2d_px_result, width, height)
    #query_bbox = geo.get_bbox(points2d_px_query, width, height)
    scale = geo.get_bbox_area(query_bbox) / geo.get_bbox_area(result_bbox)
    return scale
コード例 #5
0
def estimate_object_center_in_query_image(query_intrinsics, query_bbox, points_2d_px_result):
    cx = (query_bbox[0] + query_bbox[2])/2
    cy = (query_bbox[1] + query_bbox[3])/2
    result_bbox = geo.get_bbox(points_2d_px_result, 360, 480) 
    projected_center = points_2d_px_result[0]
    adjust_x_rel, adjust_y_rel = get_center_ajust(result_bbox, projected_center)
    adjust_x_px = adjust_x_rel * (query_bbox[2]-query_bbox[0])
    adjust_y_px = adjust_y_rel * (query_bbox[3]-query_bbox[1])
    cx = cx + adjust_x_px
    cy = cy + adjust_y_px
    b = (cx - query_intrinsics[1,2])/query_intrinsics[1,1]
    a = (cy - query_intrinsics[0,2])/query_intrinsics[0,0]
    return cx, cy, a, b