예제 #1
0
def cameraCallback(frame):
    #⬢⬢⬢⬢⬢➤ Grabs image from Frame
    img = frame.rgb_image.copy()

    arp_pose = arp.detect(img, debug_draw=True)
    if arp_pose:
        img_points = cvutils.reproject3DPoint(
            arp_pose.p.x(),
            arp_pose.p.y(),
            arp_pose.p.z(),
            camera=camera
        )

        cv2.circle(
            img,
            (int(img_points[0]), int(img_points[1])),
            5,
            (0, 0, 255),
            -1
        )

    #⬢⬢⬢⬢⬢➤ Show
    cv2.imshow("output", img)
    c = cv2.waitKey(1)
    if c == 113:
        node.close()
    if c == 32 and arp_pose != None:
        print("New Point Added", arp_pose.p)
        collected_points.append([
            arp_pose.p.x(), arp_pose.p.y(), arp_pose.p.z()
        ])
        if len(collected_points) % points_per_object == 0:
            print("New Object Stored")
예제 #2
0
def cameraCallback(frame):
    #⬢⬢⬢⬢⬢➤ Grabs image from Frame
    img = frame.rgb_image.copy()

    arp_pose = arp.detect(img, debug_draw=True)
    if arp_pose:
        img_points = cvutils.reproject3DPoint(arp_pose.p.x(),
                                              arp_pose.p.y(),
                                              arp_pose.p.z(),
                                              camera=camera)

        cv2.circle(img, (int(img_points[0]), int(img_points[1])), 5,
                   (0, 0, 255), -1)

    #⬢⬢⬢⬢⬢➤ Show
    cv2.imshow("output", img)
    c = cv2.waitKey(1)
    if c == 113:
        node.close()
    if c == 32 and arp_pose != None:
        print("New Point Added", arp_pose.p)
        collected_points.append(
            [arp_pose.p.x(), arp_pose.p.y(),
             arp_pose.p.z()])
        if len(collected_points) % points_per_object == 0:
            print("New Object Stored")
예제 #3
0
파일: arp_labeler.py 프로젝트: m4nh/ars
def cameraCallback(frame):
    #⬢⬢⬢⬢⬢➤ Grabs image from Frame
    img = frame.rgb_image.copy()

    if use_world_coordinates:
        robot_pose = node.retrieveTransform(
            robot_ee_tf_name,
            robot_world_tf_name,
            -1
        )
        if robot_pose == None:
            print("Robot pose not ready!")
            return
        else:
            camera_pose = robot_pose * robot_to_camera_pose

    arp_pose = arp.detect(img, debug_draw=True)
    if arp_pose:
        img_points = cvutils.reproject3DPoint(
            arp_pose.p.x(),
            arp_pose.p.y(),
            arp_pose.p.z(),
            camera=camera
        )

        cv2.circle(
            img,
            (int(img_points[0]), int(img_points[1])),
            5,
            (0, 0, 255),
            -1
        )

    #⬢⬢⬢⬢⬢➤ Show
    cv2.imshow("output", img)
    c = cv2.waitKey(1)
    if c == 113:
        saveOutput()
        node.close()
    if c == 32 and arp_pose != None:

        if use_world_coordinates:
            arp_pose = camera_pose * arp_pose

        print("New Point Added", arp_pose.p)
        collected_points.append([
            arp_pose.p.x(), arp_pose.p.y(), arp_pose.p.z()
        ])
        if len(collected_points) % points_per_object == 0:
            print("New Object Stored")
예제 #4
0
def cameraCallback(frame):
    global current_arp_pose, collected_boxes, windows_config
    if not windows_config:
        windows_config = True
        cv2.namedWindow("img", cv2.WINDOW_NORMAL)

    #⬢⬢⬢⬢⬢➤ Grabs image from Frame
    img = frame.rgb_image.copy()

    camera_pose = node.retrieveTransform(
        "/comau_smart_six/link6", "/comau_smart_six/base_link", -1)
    if camera_pose == None:
        print("No camera pose available")
        return

    camera_pose = camera_pose * camera_extrinsics

    arp_pose = arp.detect(img, debug_draw=False, contributes_output=None)
    if arp_pose:
        current_arp_pose = arp_pose

        rod_p = PyKDL.Frame(PyKDL.Vector(0, 0, 0.06))
        vob_rod = VirtualObject(
            arp_pose, size=[0.005, 0.005, rod_p.p.z()])
        vob_body = VirtualObject(
            arp_pose * rod_p, size=[0.05, 0.05, 0.22 - rod_p.p.z()])

        vob_rod.draw(img, camera=camera, color=(54, 67, 244), thickness=1)
        vob_body.draw(img, camera=camera, color=(54, 67, 244), thickness=2)

    for p in collected_points:
        center = cvutils.reproject3DPoint(
            p[0], p[1], p[2], camera=camera, camera_pose=PyKDL.Frame())
        cv2.circle(img, center, 5, (243, 150, 33), -1)

    for b in collected_boxes:
        vo = b.buildVirtualObject()
        vo.draw(img, camera=camera, color=(243, 150, 33))
        print "B", transformations.KDLtoNumpyVector(vo), b.size

    cv2.imshow("img", img)
    c = cv2.waitKey(1)
    if c == 32:
        evt = Bool()
        evt.data = True
        new_point_publisher.publish(evt)
예제 #5
0
    def commonPointFrames(self, x, frames):

        relative_frame = transformations.KDLFromArray(x, fmt='RPY')

        cumulation_points = []
        for frame in frames:
            computed_frame = frame * relative_frame.p

            img_points = cvutils.reproject3DPoint(
                computed_frame.x(),
                computed_frame.y(),
                computed_frame.z(),
                camera=camera
            )

            cumulation_points.append(img_points)

        error = self.computeClusterError(cumulation_points)
        if ArpBuilder.DEBUG_OPTIMIZITION_OUTPUT:
            print error
        return error
예제 #6
0
파일: arp_labeler.py 프로젝트: m4nh/ars
def cameraCallback(frame):
    #⬢⬢⬢⬢⬢➤ Grabs image from Frame
    img = frame.rgb_image.copy()

    if use_world_coordinates:
        robot_pose = node.retrieveTransform(robot_ee_tf_name,
                                            robot_world_tf_name, -1)
        if robot_pose == None:
            print("Robot pose not ready!")
            return
        else:
            camera_pose = robot_pose * robot_to_camera_pose

    arp_pose = arp.detect(img, debug_draw=True)
    if arp_pose:
        img_points = cvutils.reproject3DPoint(arp_pose.p.x(),
                                              arp_pose.p.y(),
                                              arp_pose.p.z(),
                                              camera=camera)

        cv2.circle(img, (int(img_points[0]), int(img_points[1])), 5,
                   (0, 0, 255), -1)

    #⬢⬢⬢⬢⬢➤ Show
    cv2.imshow("output", img)
    c = cv2.waitKey(1)
    if c == 113:
        saveOutput()
        node.close()
    if c == 32 and arp_pose != None:

        if use_world_coordinates:
            arp_pose = camera_pose * arp_pose

        print("New Point Added", arp_pose.p)
        collected_points.append(
            [arp_pose.p.x(), arp_pose.p.y(),
             arp_pose.p.z()])
        if len(collected_points) % points_per_object == 0:
            print("New Object Stored")
예제 #7
0
files = sorted(glob.glob(images_folder + '*.jpg'))

for filename in files:
    basename = os.path.basename(filename)
    base = basename.split('.')[0]

    img = cv2.imread(filename)

    contributes = []
    arp_pose = arp.detect(img, debug_draw=False, contributes_output=None)

    if arp_pose:
        print arp_pose
        img_points = cvutils.reproject3DPoint(
            arp_pose.p.x(),
            arp_pose.p.y(),
            arp_pose.p.z(),
            camera=camera
        )

        rod_p = PyKDL.Frame(PyKDL.Vector(0, 0, 0.06))

        vob_rod = VirtualObject(
            arp_pose, size=[0.005, 0.005, rod_p.p.z()])
        vob_body = VirtualObject(
            arp_pose * rod_p, size=[0.05, 0.05, 0.22 - rod_p.p.z()])

        vob_rod.draw(img, camera=camera, color=(54, 67, 244), thickness=1)
        vob_body.draw(img, camera=camera, color=(54, 67, 244), thickness=2)

        cv2.circle(
            img,