Ejemplo n.º 1
0
 def getCameraPose(self, index):
     index = index % self.size()
     robot_pose = self.robot_poses[index]
     camera_pose_frame = transformations.KDLFromArray(
         self.robot_to_camera_pose, fmt='XYZQ')
     robot_pose_frame = transformations.KDLFromArray(robot_pose, fmt='XYZQ')
     camera_pose_frame = robot_pose_frame * camera_pose_frame
     return camera_pose_frame
Ejemplo n.º 2
0
 def findBestPoses(self, markers_id_list, x0=np.array([-0.00, 0, 0.0, 0, 0, 0])):
     res_map = {"markers": {}}
     for id in markers_id_list:
         print("Optimizing marker:", id)
         res = self.findBestPose(id, x0)
         frame = transformations.KDLFromArray(res.x, fmt='RPY').Inverse()
         pose = transformations.KDLtoNumpyVector(frame, fmt='RPY')
         res_map["markers"][id] = {
             "pose": pose.reshape(6).tolist(),
             "size": self.marker_size
         }
         print pose
         # res_map[id] =
     return res_map
Ejemplo n.º 3
0
Archivo: arp.py Proyecto: m4nh/ars
    def __init__(self, configuration_file=""):

        if configuration_file == "":
            self.options = ARPConfiguration.getPrototypeConfiguration()
        else:
            with open(configuration_file) as data_file:
                self.options = json.load(data_file)

        self.valid = False
        self.markers_map = {}
        self.markers_poses = {}
        if len(self.options) > 0:
            self.valid = True
            for id, data in self.options["markers"].iteritems():
                self.markers_map[int(id)] = data["size"]
                self.markers_poses[int(id)] = transformations.KDLFromArray(
                    data["pose"])
Ejemplo n.º 4
0
    def commonPointFrames(self, x, frames):

        relative_frame = transformations.KDLFromArray(x, fmt='RPY')

        cumulation_points = []
        for frame in frames:
            computed_frame = frame * relative_frame.p

            img_points = cvutils.reproject3DPoint(
                computed_frame.x(),
                computed_frame.y(),
                computed_frame.z(),
                camera=camera
            )

            cumulation_points.append(img_points)

        error = self.computeClusterError(cumulation_points)
        if ArpBuilder.DEBUG_OPTIMIZITION_OUTPUT:
            print error
        return error
Ejemplo n.º 5
0
Archivo: arp.py Proyecto: m4nh/ars
    def detect(self, img, debug_draw=False, contributes_output=None):

        markers = self.marker_detector.detectMarkersMap(
            img, markers_map=self.arp_configuration.markers_map)

        mean_pose = [0, 0, 0, 0, 0, 0, 0]

        for id, marker in markers.iteritems():
            #⬢⬢⬢⬢⬢➤ Debug Draw
            if debug_draw:
                marker.draw(img)

            #⬢⬢⬢⬢⬢➤ Sum marker single contribute
            contribute = self.arp_configuration.markers_poses[id].Inverse()
            contribute = marker * contribute
            if contributes_output != None:
                contributes_output.append(contribute)
            mean_pose[0] += contribute.p.x()
            mean_pose[1] += contribute.p.y()
            mean_pose[2] += contribute.p.z()
            qx, qy, qz, qw = contribute.M.GetQuaternion()
            mean_pose[3] += qx
            mean_pose[4] += qy
            mean_pose[5] += qz
            mean_pose[6] += qw

        mean_pose = np.array([mean_pose], dtype=np.float32) / float(
            len(markers))

        mean_pose = mean_pose.reshape(7)
        mean_pose = transformations.KDLFromArray(mean_pose, fmt='XYZQ')

        if not math.isnan(mean_pose.p.x()):
            return mean_pose

        return None
Ejemplo n.º 6
0
    for id, marker in markers.iteritems():
        if debug:
            marker.draw(img)
            vob = VirtualObject(marker, size=[0.04, 0.04, 0.01])
            vob.draw(img, camera=camera)

        out_list.append(
            np.hstack((np.array(id).reshape(1, 1), transformations.KDLtoNumpyVector(marker).reshape(1, 7))))

    s = len(out_list)
    out_list = np.array(out_list).reshape(s, 8)
    output_file = os.path.join(output_folder, base + ".txt")
    np.savetxt(output_file, out_list,
               fmt='%d %.8f %.8f %.8f %.8f %.8f %.8f %.8f')

    if debug:
        control = np.loadtxt(output_file)

        if control.shape[0] > 0:
            for c in control:
                m = transformations.KDLFromArray(c[1:8], fmt='XYZQ')
                vob2 = VirtualObject(m, size=[0.04, 0.04, 0.01])
                vob2.draw(img, camera=camera, color=(0, 255, 0))

    if debug:
        cv2.imshow("img", img)
        c = cv2.waitKey(0)
        if c == 1048689:
            sys.exit(0)
    print basename, len(markers)
Ejemplo n.º 7
0
max_number = 330

files = sorted(glob.glob(detections_folder + '*.txt'))

whole_data = []
for filename in files:
    basename = os.path.basename(filename)
    base = basename.split('.')[0]
    n = int(base.split('_')[1])
    data = np.loadtxt(filename)

    data_map = {}
    for d in data:
        try:
            id = int(d[0])
            frame = transformations.KDLFromArray(d[1:8], fmt='XYZQ')
            data_map[id] = frame
        except:
            pass

    whole_data.append(data_map)

    if n >= max_number:
        break
    print base, n

arp_builder = ArpBuilder(whole_data, 0.04)

ids = [400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412]
built_cfg = arp_builder.findBestPoses(ids, [0, 0, 0, 0, 0, 0])
Ejemplo n.º 8
0
                                   "/camera/rgb/image_raw/compressed")
camera_file = node.getFileInPackage('roars',
                                    'data/camera_calibrations/asus_xtion.yml')
camera = CameraRGB(configuration_file=camera_file,
                   rgb_topic=camera_topic,
                   compressed_image="compressed" in camera_topic)

#⬢⬢⬢⬢⬢➤ Robot pose
robot_ee_tf_name = node.setupParameter("robot_ee_tf_name",
                                       "/comau_smart_six/link6")
robot_world_tf_name = node.setupParameter("robot_world_tf_name",
                                          "/comau_smart_six/base_link")
arp_robot_pose = transformations.KDLFromArray([
    1.171241733010316866e+00, 2.038211444054240443e-02,
    2.609647481922321988e-01, 3.170776406284797622e-01,
    -2.224684033246829545e-03, 9.483374663308693497e-01,
    -1.062405513804032947e-02
],
                                              fmt='XYZQ')
robot_on_hand_transform = transformations.KDLFromArray(
    [
        0.09131591676464931, 0.023330268359173824, -0.19437327402734972,
        -0.7408449656427065, 0.7505081899194715, 0.01462135745716728,
        -0.01655531561119271
        # 0.109010740854854,	0.0194781279660590,	-0.299749509111702,
        # -0.698978847276796, 0.707540085021683, 0.0743762216760345, -0.0726895920769237
    ],
    fmt='XYZQ')
arp_camera_pose = arp_robot_pose * robot_on_hand_transform

#⬢⬢⬢⬢⬢➤ Points storage
Ejemplo n.º 9
0
points_per_object = node.setupParameter("points_per_object", 6)
collected_points = []
output_file = node.setupParameter("output_file", "/tmp/arp_objects.json")

#⬢⬢⬢⬢⬢➤ Robot pose
use_world_coordinates = node.setupParameter("use_world_coordinates", True)
if use_world_coordinates:
    robot_ee_tf_name = node.setupParameter("robot_ee_tf_name",
                                           "/comau_smart_six/link6")
    robot_world_tf_name = node.setupParameter("robot_world_tf_name",
                                              "/comau_smart_six/base_link")
    camera_extrinsics_file = node.setupParameter(
        "camera_extrinsics_file",
        "/home/daniele/Desktop/datasets/roars_2017/indust/camera_extrinsics.txt"
    )
    robot_to_camera_pose = transformations.KDLFromArray(
        np.loadtxt(camera_extrinsics_file), fmt='XYZQ')


#⬢⬢⬢⬢⬢➤ Splice array in sub arrays
def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    for i in range(0, len(l), n):
        yield l[i:i + n]


#⬢⬢⬢⬢⬢➤ Save Output
def saveOutput():
    global collected_points, points_per_object
    probable_objects = list(chunks(collected_points, points_per_object))
    objects = []