Esempio n. 1
0
def check_shape(contour, max_point):
    max_test = cv.pointPolygonTest(contour, max_point, False)
    box, mass_dir = get_mass_dir(contour)

    area = cv.contourArea(box)
    if area == 0:
        area_test = False
    else:
        area_test = 0.23 > (cv.contourArea(contour) / area)
    ratio_test = 0

    x_t = get_dis(box[0], box[1])
    y_t = get_dis(box[1], box[2])
    if y_t != 0:
        ratio = x_t / y_t
        ratio_test = (ratio > 1.85 or ratio < 0.75) and x_t > 20 and y_t > 40
    test = area_test and max_test and (-1 < mass_dir <
                                       1) and (area > 1000) and ratio_test
    return test, box
Esempio n. 2
0
def get_mass_dir(cnt):
    rect = cv.minAreaRect(cnt)
    box = cv.boxPoints(rect)
    box = np.int0(box)
    max_num = 0
    a = 0
    for i in range(3):
        dis = get_dis(box[i], box[i + 1])
        if dis > max_num:
            max_num = dis
            a = get_slope(box[i], box[i + 1])
    return box, a
Esempio n. 3
0
    def generate_graph(self):
        self.points_array = []

        dx = self.x2 - self.x1
        dy = self.y2 - self.y1
        dz = self.z2 - self.z1

        dis = sqrt(pow(dx, 2) + pow(dy, 2) + pow(dz, 2))
        self.max_time = dis / self.velocity

        for i in range(self.points_count):
            x = self.x1 + (dx * i / self.points_count)
            y = self.y1 + (dy * i / self.points_count)
            y_temp = y - self.y1
            x_temp = x - self.x1
            dis = get_dis((0, 0), (y_temp, x_temp))
            z = self.z1 + (dis * tan(self.alpha)) - ((gravity * (dis**2)) / (2 * (self.velocity**2) * (cos(self.alpha)**2)))

            self.points_array.append([x, y, z])
Esempio n. 4
0
def calculate_vector(point_a, point_b):
    """
    takes two [x, y, z] points, and calculates the velocity and angle for shooting
    an object from point_a to point_b, no air friction included. The object will
    be vertical to the floot at point_b.

    input:
    point_a = [x, y, z] (meters)
    point_b = [x, y, z] (meters)

    output:
    alpha - float radians
    velocity - float m / s.
    """
    dh = point_b[2] - point_a[2]
    dx = get_dis(point_a, point_b)
    alpha = atan((2 * float(dh)) / float(dx))
    velocity = sqrt((gravity * float(dx)) / (cos(alpha) * sin(alpha)))
    return alpha, velocity
Esempio n. 5
0
    def get_location_at_time(self, time):
        if time > self.max_time:
            return self.points_array[-1]
        if time < 0:
            return self.points_array[0]

        dx = self.x2 - self.x1
        dy = self.y2 - self.y1
        dz = self.z2 - self.z1

        vx = dx / self.max_time
        vy = dy / self.max_time

        x = self.x1 + (time * vx)
        y = self.y1 + (time * vy)
        y_temp = y - self.y1
        x_temp = x - self.x1
        dis = get_dis((0, 0), (y_temp, x_temp))
        z = self.z1 + (dis * tan(self.alpha)) - ((gravity * (dis**2)) / (2 * (self.velocity**2) * (cos(self.alpha)**2)))

        return [x, y, z]
Esempio n. 6
0
def target_pose_estimation(mat,
                           model_points,
                           dist_coeffs,
                           camera_matrix,
                           is_view_image=False,
                           is_publish_image=False,
                           image_publisher=None,
                           target_pose_publisher=None,
                           rotation_publisher=None):
    """
    Find the pose of a target relatively to the camera.
    sets the rosparam '/target_in_frame' if the target appears in the mat.
    publishes the pose of the target under the name '/target_pose'.

    :param mat: RGB mat.
    :param model_points: The points of the target in real space ([ [x, y, z], [x, y, z], ... ]
    :param dist_coeffs: Distortion coeff matrix from camera calibration.
    :param camera_matrix: Camera calibration matrix.
    :param is_view_image: bool - for showing the image.
    :param is_publish_image: bool - for publishing the image.
    :param image_publisher: ROS image publisher.
    :param target_pose_publisher: ROS Target pose publisher ([x, y, z, roll, pitch, yaw])
    :param rotation_publisher: Float32 publisher for the turret rotation.
    :return: None
    """
    if mat is None:
        return None

    # declarations.
    success = False
    ROI = None
    target_pose = None
    rotation = 0.0

    # rotate mat 180 degrees
    mat = cv.rotate(mat, cv.ROTATE_180)

    # transforms the image to HSV and grayscale
    hsv = cv.cvtColor(mat, cv.COLOR_RGB2HSV)
    gray = cv.cvtColor(mat, cv.COLOR_RGB2GRAY)

    # blurs the image
    blur = cv.GaussianBlur(gray, (3, 3), 0)

    # find the brightest point on the mat
    minVal, maxVal, minLoc, maxLoc = cv.minMaxLoc(blur)

    # filters out the unwanted colors.
    lower_thresh = np.array([0, 0, 0])
    upper_thresh = np.array([255, 255, 112])
    # mask = cv.bitwise_not(cv.inRange(hsv, lower_thresh, upper_thresh))  # TODO: Calibrate mask thresholds
    mask = cv.inRange(hsv, (36, 25, 40), (100, 255, 255))

    # threshold and contours
    ret, thresh = cv.threshold(mask, 20, 255, 0)
    contours, hierarchy = cv.findContours(thresh, 1, 2)

    # finds the target using check_shape()
    for cnt in contours:
        test, box = check_shape(cnt, maxLoc)
        if test:
            cv.drawContours(mat, [cnt], -1, (255, 255, 255))
            box = order_points(box)
            offsets = [box[0][0] - 10, box[0][1] - 10]
            height = np.size(mask, 0)
            width = np.size(mask, 1)
            ROI = np.zeros((height, width))
            cv.fillPoly(ROI, [cnt], (255, 255, 255))
            ROI = ROI[(box[0][1] - 10):(box[2][1] + 10),
                      (box[0][0] - 10):(box[2][0] + 10)]

            test, temp, key_points = find_key_points_2(ROI, offsets)
            if not test:
                continue

            center_point_2d = get_center_point(key_points[0], key_points[-1])
            for p in key_points:
                cv.circle(mat, (int(p[0]), int(p[1])), 3, (255, 0, 255), -1)

            (success, rotation_vector,
             translation_vector) = cv.solvePnP(model_points, key_points,
                                               camera_matrix, dist_coeffs)

            if success:
                drawn, center_point_3d = draw_axis(mat, rotation_vector,
                                                   translation_vector,
                                                   camera_matrix, dist_coeffs)
                dis = get_dis(center_point_2d, center_point_3d)
                euler = rotation_to_euler(rotation_vector)
                rotation = ((center_point_2d[0] / width) - 0.5) * (-1.2)

                if success and (dis < 6):
                    mat = drawn
                target_pose = [i[0] for i in rotation_vector] + euler
                break

    if is_view_image:
        show_images([mat, thresh, ROI])

    if is_publish_image:
        publish_image(mat, image_publisher)
        rospy.set_param('target_in_frame', success)
        rotation_publisher.publish(float(rotation))

        if success:
            pose = create_pose(target_pose, '/target_pose')
            target_pose_publisher.publish(pose)