コード例 #1
0
     [227.2547443287154, 56.30924933427718],
     [209.13359962247614, 46.817221154818526],
     [203.9561297064078, 43.5813024572758]]
rdst = \
    [[10.822125594094452, 1.42189132706374],
     [21.177065426231174, 1.5297552836484982],
     [25.275895776451954, 1.42189132706374],
     [36.062291434927694, 1.6376192402332563],
     [40.376849698318004, 1.42189132706374],
     [11.900765159942026, -2.1376192402332563],
     [22.25570499207874, -2.1376192402332563],
     [26.785991168638553, -2.029755283648498],
     [37.033067044190524, -2.029755283648498],
     [41.67121717733509, -2.029755283648498]]

tform3_img = trans.ProjectiveTransform()
tform3_img.estimate(np.array(rdst), np.array(rsrc))


def draw_vbar_on(img, bar_intensity, x_pos, color=(0, 0, 255)):

    bar_size = int(img.shape[1] / 6 * bar_intensity)
    initial_y_pos = img.shape[0] - img.shape[0] / 6
    #print bar_intensity

    for i in range(bar_size):
        if bar_intensity > 0.0:
            y = initial_y_pos - i
            for j in range(20):
                img[y, x_pos + j] = color
コード例 #2
0
def perspective_transform(img: np.ndarray, input_points: list,
                          output_shape: tuple) -> np.ndarray:
    """
    Applies 4 point perspective transform to an image
    and returns it as a separate image.

    Parameters
    ----------
    img : numpy.ndarray
        Original image (np.ndarray).
    input_points : list of tuples
        List of four points (x, y) arranged anticlockwise,
        starting top-left corner.
    output_shape: tuple
        Desired shape of transformed image (height, width).

    Returns
    -------
    numpy.ndarray
        Transformed image.

    Raises
    ------
    ValueError
        If shape of input_points is not (4,2).

    """

    # checking if input_points are in correct shape
    input_shape = np.array(input_points).shape
    if not input_shape == (4, 2):
        raise ValueError("Shape of input_points expected to be (4,2) " +
                         f"got {input_shape} instead.")

    upper_left_src = (0, 0)
    lower_left_src = (0, output_shape[0])
    lower_right_src = (output_shape[1], output_shape[0])
    upper_right_src = (output_shape[1], 0)

    upper_left_dst = input_points[0]
    lower_left_dst = input_points[1]
    lower_right_dst = input_points[2]
    upper_right_dst = input_points[3]

    # checking if input_points are arranged in a correct way
    if not (upper_left_dst[1] < lower_left_dst[1]
            and lower_left_dst[0] < lower_right_dst[0]
            and lower_right_dst[1] > upper_right_dst[1]
            and upper_right_dst[0] > upper_left_dst[0]):
        warnings.warn("Suspicious arrangement of input_points," +
                      "expected anticlockwise starting top-left corner.")

    # transformation coordinates
    dst = np.array(input_points)
    src = np.array(
        [upper_left_src, lower_left_src, lower_right_src, upper_right_src])

    # transformation
    tform = transform.ProjectiveTransform()
    tform.estimate(src, dst)
    warped_img = transform.warp(img, tform, output_shape=output_shape)

    return warped_img
コード例 #3
0
def compute_homography_and_warp(image, vp1, vp2, clip=True, clip_factor=3):
    """Compute homography from vanishing points and warp the image.
    It is assumed that vp1 and vp2 correspond to horizontal and vertical
    directions, although the order is not assumed.
    Firstly, projective transform is computed to make the vanishing points go
    to infinty so that we have a fronto parellel view. Then,Computes affine
    transfom  to make axes corresponding to vanishing points orthogonal.
    Finally, Image is translated so that the image is not missed. Note that
    this image can be very large. `clip` is provided to deal with this.
    Parameters
    ----------
    image: ndarray
        Image which has to be wrapped.
    vp1: ndarray of shape (3, )
        First vanishing point in homogenous coordinate system.
    vp2: ndarray of shape (3, )
        Second vanishing point in homogenous coordinate system.
    clip: bool, optional
        If True, image is clipped to clip_factor.
    clip_factor: float, optional
        Proportion of image in multiples of image size to be retained if gone
        out of bounds after homography.
    Returns
    -------
    warped_img: ndarray
        Image warped using homography as described above.
    """
    # Find Projective Transform
    vanishing_line = np.cross(vp1, vp2)
    H = np.eye(3)
    H[2] = vanishing_line / vanishing_line[2]
    H = H / H[2, 2]

    # Find directions corresponding to vanishing points
    v_post1 = np.dot(H, vp1)
    v_post2 = np.dot(H, vp2)
    v_post1 = v_post1 / np.sqrt(v_post1[0]**2 + v_post1[1]**2)
    v_post2 = v_post2 / np.sqrt(v_post2[0]**2 + v_post2[1]**2)

    directions = np.array([[v_post1[0], -v_post1[0], v_post2[0], -v_post2[0]],
                           [v_post1[1], -v_post1[1], v_post2[1], -v_post2[1]]])

    thetas = np.arctan2(directions[0], directions[1])

    # Find direction closest to horizontal axis
    h_ind = np.argmin(np.abs(thetas))

    # Find positve angle among the rest for the vertical axis
    if h_ind // 2 == 0:
        v_ind = 2 + np.argmax([thetas[2], thetas[3]])
    else:
        v_ind = np.argmax([thetas[2], thetas[3]])

    A1 = np.array([[directions[0, v_ind], directions[0, h_ind], 0],
                   [directions[1, v_ind], directions[1, h_ind], 0],
                   [0, 0, 1]])
    # Might be a reflection. If so, remove reflection.
    if np.linalg.det(A1) < 0:
        A1[:, 0] = -A1[:, 0]

    A = np.linalg.inv(A1)

    # Translate so that whole of the image is covered
    inter_matrix = np.dot(A, H)

    cords = np.dot(inter_matrix, [[0, 0, image.shape[1], image.shape[1]],
                                  [0, image.shape[0], 0, image.shape[0]],
                                  [1, 1, 1, 1]])
    cords = cords[:2] / cords[2]

    tx = min(0, cords[0].min())
    ty = min(0, cords[1].min())

    max_x = cords[0].max() - tx
    max_y = cords[1].max() - ty

    if clip:
        # These might be too large. Clip them.
        max_offset = max(image.shape) * clip_factor / 2
        tx = max(tx, -max_offset)
        ty = max(ty, -max_offset)

        max_x = min(max_x, -tx + max_offset)
        max_y = min(max_y, -ty + max_offset)

    max_x = int(max_x)
    max_y = int(max_y)

    T = np.array([[1, 0, -tx],
                  [0, 1, -ty],
                  [0, 0, 1]])

    final_homography = np.dot(T, inter_matrix)

    h**o = np.linalg.inv(final_homography)

    print h**o

    trans = transform.ProjectiveTransform(h**o)

    warped_img = transform.warp(image, trans)
    return warped_img
コード例 #4
0
ファイル: detect.py プロジェクト: smada2017/CS3630
def unwarp_marker(image,
                  left_region,
                  right_region,
                  width=320,
                  height=240,
                  margin=20):
    '''
    Using the four corners of the detected marker, estimate the inverse
    projective transform to obtain a cropped, "unwarped" view of the marker

    Input:
        - image: grayscale image that contains marker
        - left_region: regionprops corresponding to lower-left corner
        - right_region: regionprops corresponding to upper-right corner
        - width, height: output "unwarped" image dimensions
        - margin: increase the "unwarped" bounding box by the given margin
    Returns: the "unwarped" image that contains just the marker
    '''

    li = left_region.intensity_image
    ri = right_region.intensity_image

    left_miny, left_minx, left_maxy, left_maxx = left_region.bbox
    right_miny, right_minx, right_maxy, right_maxx = right_region.bbox

    # Compute the coordinates of the corners
    top_right = (right_maxx, right_miny)
    bottom_left = (left_minx, left_maxy)

    # Compute the coordinates of the other two corners by estimating edges
    corner_bl_lines = estimate_corner_lines(left_region, 'BL', image.shape)
    corner_tr_lines = estimate_corner_lines(right_region, 'TR', image.shape)
    bottom_right = intersection(corner_bl_lines[0], corner_tr_lines[1])
    top_left = intersection(corner_bl_lines[1], corner_tr_lines[0])

    corners = (top_left, top_right, bottom_right, bottom_left)

    # Estimate the transform
    m = margin
    src = np.array([[m, m], [width - m, m], [m, height - m],
                    [width - m, height - m]])
    dst = np.array([
        top_left,  # top left     -> (m, m)
        top_right,  # top right    -> (width - m, m)
        bottom_left,  # bottom left  -> (m, height - m)
        bottom_right  # bottom right -> (width - m, height - m)
    ])

    t = transform.ProjectiveTransform()
    t.estimate(src, dst)

    unwarped = transform.warp(image,
                              t,
                              output_shape=(height, width),
                              mode='constant',
                              cval=1.0)
    cropped = np.copy(image)[right_miny:left_maxy, left_minx:right_maxx]

    # Draw the original estimated bounds atop the regular image
    marked = color.gray2rgb(np.copy(image))
    draw.set_color(
        marked,
        draw.line(top_left[1], top_left[0], bottom_left[1], bottom_left[0]),
        (1.0, 0, 0))
    draw.set_color(
        marked, draw.line(top_left[1], top_left[0], top_right[1],
                          top_right[0]), (1.0, 0, 0))
    draw.set_color(
        marked,
        draw.line(bottom_right[1], bottom_right[0], bottom_left[1],
                  bottom_left[0]), (1.0, 0, 0))
    draw.set_color(
        marked,
        draw.line(bottom_right[1], bottom_right[0], top_right[1],
                  top_right[0]), (1.0, 0, 0))
    draw.set_color(marked, draw.circle(top_left[1], top_left[0], 5),
                   (0, 1.0, 0))
    draw.set_color(marked, draw.circle(top_right[1], top_right[0], 5),
                   (0, 1.0, 0))
    draw.set_color(marked, draw.circle(bottom_left[1], bottom_left[0], 5),
                   (0, 1.0, 0))
    draw.set_color(marked, draw.circle(bottom_right[1], bottom_right[0], 5),
                   (0, 1.0, 0))

    return unwarped, cropped, marked, corners
コード例 #5
0
ファイル: plot_geometric.py プロジェクト: xuq/Dash-Docset
where you have a set of control points or homologous/corresponding points in two
images.

Let's assume we want to recognize letters on a photograph which was not taken
from the front but at a certain angle. In the simplest case of a plane paper
surface the letters are projectively distorted. Simple matching algorithms would
not be able to match such symbols. One solution to this problem would be to warp
the image so that the distortion is removed and then apply a matching algorithm:
"""

text = data.text()

src = np.array(((0, 0), (0, 50), (300, 50), (300, 0)))
dst = np.array(((155, 15), (65, 40), (260, 130), (360, 95)))

tform3 = tf.ProjectiveTransform()
tform3.estimate(src, dst)
warped = tf.warp(text, tform3, output_shape=(50, 300))

fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(8, 3))
fig.subplots_adjust(**margins)
plt.gray()
ax1.imshow(text)
ax1.plot(dst[:, 0], dst[:, 1], '.r')
ax1.axis('off')
ax2.imshow(warped)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
"""
コード例 #6
0
class Visualization():

    # ***** get perspective transform for images *****
    rsrc = \
        [[43.45456230828867, 118.00743250075844],
         [104.5055617352614, 83.46865203761757],
         [114.86050156739812, 60.83953551083698],
         [129.74572757609468, 50.48459567870026],
         [132.98164627363735, 46.38576532847949],
         [301.0336906326895, 98.16046448916306],
         [238.25686790036065, 62.56535881619311],
         [227.2547443287154, 56.30924933427718],
         [209.13359962247614, 46.817221154818526],
         [203.9561297064078, 43.5813024572758]]
    rdst = \
        [[10.822125594094452, 1.42189132706374],
         [21.177065426231174, 1.5297552836484982],
         [25.275895776451954, 1.42189132706374],
         [36.062291434927694, 1.6376192402332563],
         [40.376849698318004, 1.42189132706374],
         [11.900765159942026, -2.1376192402332563],
         [22.25570499207874, -2.1376192402332563],
         [26.785991168638553, -2.029755283648498],
         [37.033067044190524, -2.029755283648498],
         [41.67121717733509, -2.029755283648498]]

    tform3_img = tf.ProjectiveTransform()
    tform3_img.estimate(np.array(rdst), np.array(rsrc))

    def perspective_tform(self, x, y):
        p1, p2 = self.tform3_img((x, y))[0]
        return p2, p1

    # ***** functions to draw lines *****
    def draw_pt(self, img, x, y, color, sz=2):
        row, col = self.perspective_tform(x, y)
        row = row * 2
        col = col * 2
        if row >= 0 and row < img.shape[0] * 2 / 2 and col >= 0 and col < img.shape[1] * 2 / 2:
            img[int(row - sz):int(row + sz), int(col - sz):int(col + sz)] = color

    def draw_path(self, img, path_x, path_y, color):
        for x, y in zip(path_x, path_y):
            self.draw_pt(img, x, y, color)

    # ***** functions to draw predicted path *****

    def calc_curvature(self, v_ego, angle_steers, angle_offset=0):
        deg_to_rad = np.pi / 180.
        slip_fator = 0.0014  # slip factor obtained from real data
        steer_ratio = 15.3  # from http://www.edmunds.com/acura/ilx/2016/road-test-specs/
        wheel_base = 2.67  # from http://www.edmunds.com/acura/ilx/2016/sedan/features-specs/

        angle_steers_rad = (angle_steers - angle_offset)  # * deg_to_rad
        curvature = angle_steers_rad / (steer_ratio * wheel_base * (1. + slip_fator * v_ego ** 2))
        return curvature

    def calc_lookahead_offset(self, v_ego, angle_steers, d_lookahead, angle_offset=0):
        # *** this function returns the lateral offset given the steering angle, speed and the lookahead distance
        curvature = self.calc_curvature(v_ego, angle_steers, angle_offset)

        # clip is to avoid arcsin NaNs due to too sharp turns
        y_actual = d_lookahead * np.tan(np.arcsin(np.clip(d_lookahead * curvature, -0.999, 0.999)) / 2.)
        return y_actual, curvature

    # main methods --
    def visualize_line(self, img, speed_ms, angle_steers, color=(0, 0, 255)):

        path_x = np.arange(0, 50.1, 0.5)
        path_y, _ = self.calc_lookahead_offset(speed_ms, angle_steers, path_x)
        self.draw_path(img, path_x, path_y, color)

        return img, path_x, path_y

    def visualize_steering_wheel(self, image, angle):

        background = PILImage.fromarray(np.uint8(image))
        sw = PILImage.open("./steering/resources/sw.png")
        sw = sw.rotate(angle * 180 / np.pi)
        sw = sw.resize((80, 80), PILImage.ANTIALIAS)
        background.paste(sw, (10, 10), sw)

        draw = ImageDraw.Draw(background)
        font = ImageFont.truetype("./steering/resources/FiraMono-Medium.otf", 16)
        draw.text((80, 200), str(round(angle, 3)), (255, 255, 255), font=font)
        steering_img = cv2.resize(np.array(background), (640, 480))
        return steering_img

    def apply_accel_visualization(self, image, accel):

        background = PILImage.fromarray(np.uint8(image))
        if accel < -1:
            sw = PILImage.open("/home/neil/Workspace/self-driving-golf-cart/ros/src/autopilot/scripts/resources/stop.png")
            sw = sw.resize((80, 80), PILImage.ANTIALIAS)
            background.paste(sw, (10, 38), sw)

        draw = ImageDraw.Draw(background)
        font = ImageFont.truetype("/home/neil/Workspace/self-driving-golf-cart/ros/src/autopilot/scripts/resources/FiraMono-Medium.otf", 20)
        draw.text((40, 420), str(round(accel, 3)), (255, 255, 255), font=font)
        draw.text((10, 10), "Cruise Control Running... v1.0.0", (255, 255, 255), font=font)
        text = ""
        if accel < 0:
            text = 'Slow'
        elif accel > 0:
            text = 'Speed Up'
        draw.text((40, 380), text, (255, 255, 255), font=font)
        steering_img = cv2.resize(np.array(background), (640, 480))
        return steering_img

    # ------------------------------------------------------------------------------------------------------------------
    # cv_camera callback
    def camera_update_callback(self, data):

        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            raise e

        self.current_frame = cv_image

    def steering_cmd_callback(self, data):

        self.steering_angle = data.data

    def cruise_callback(self, data):

        self.cruise_cmds = data.data

    # ------------------------------------------------------------------------------------------------------------------

    def __init__(self):

        rospy.init_node('autopilot_visualization_node')

        self.current_frame = None
        self.bridge = CvBridge()
        self.steering_angle = 0.0
        self.cruise_cmds = 0.0

        # -----------------------------------------
        rospy.Subscriber("/zed/rgb/image_raw_color", Image,
                         callback=self.camera_update_callback, queue_size=8)
        # -----------------------------------------

        rospy.Subscriber('/vehicle/dbw/steering_cmds', Float32, callback=self.steering_cmd_callback)
        rospy.Subscriber('/vehicle/dbw/cruise_cmds', Float32, callback=self.cruise_callback)

        steering_viz_pub = rospy.Publisher('/visual/autopilot/angle_img', Image, queue_size=5)
        accel_viz_pub = rospy.Publisher('/visual/autopilot/cruise_img', Image, queue_size=5)
        steering_ios_path = rospy.Publisher('/visual/ios/steering/path', Float32MultiArray, queue_size=5)

        rate = rospy.Rate(24)

        while not rospy.is_shutdown():

            if self.current_frame is not None:

                # Apply Steering Visualization #  -0.025
                image, path_x, path_y = self.visualize_line(img=self.current_frame.copy(),
                                                            angle_steers=self.steering_angle * -0.035,
                                                            speed_ms=5)

                # Generate steering path message
                path_msg = Float32MultiArray()
                path_msg.data = np.hstack([path_x, path_y])
                steering_ios_path.publish(path_msg)

                # Generate steering image message
                img_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")
                steering_viz_pub.publish(img_msg)

                # Apply Accel Visualization
                image = self.apply_accel_visualization(image=self.current_frame, accel=self.cruise_cmds)
                img_msg = self.bridge.cv2_to_imgmsg(image, "bgr8")
                accel_viz_pub.publish(img_msg)

            rate.sleep()
コード例 #7
0
def get_transform_from_mathching_pts(key_pts_input, key_pts_target):
    tform = transform.ProjectiveTransform()  #Or AffineTransform
    tform.estimate(key_pts_input, key_pts_target)
    return tform.params
コード例 #8
0
ファイル: utils.py プロジェクト: starasteh/lane-detection
def init_transform(src, tgt):
    tform = tf.ProjectiveTransform()
    tform.estimate(np.array(src), np.array(tgt))

    return tform