Пример #1
0
                # positions of pitch axis
                pos_cmd_temp = dvrkKinematics.fk(
                    [q_cmd[0], q_cmd[1], q_cmd[2], 0, 0, 0],
                    L1=dvrkVar.L1,
                    L2=dvrkVar.L2,
                    L3=0,
                    L4=0)[:3, -1]
                pos_cmd_.append(pos_cmd_temp)
                pos_phy_.append(pt)
                print('index: ', len(pos_cmd_), '/', len(joint_traj))
                print('pos_des: ', pos_cmd_temp)
                print('pos_act: ', pt)
                print(' ')

            # Visualize
            cv2.imshow("images", color)
            cv2.waitKey(1) & 0xFF
            # cv2.waitKey(0)

# Save data to a file
if use_Trc:
    np.save('q_cmd', q_cmd_)
    np.save('q_phy', q_phy_)
else:
    # Get transform from robot to camera
    np.save('pos_cmd', pos_cmd_)
    np.save('pos_phy', pos_phy_)
    T = U.get_rigid_transform(np.array(pos_phy_), np.array(pos_cmd_))
    np.save('Trc', T)
np.save('t_stamp', time_stamp)
print("Data is successfully saved")
    def collect_data_joint(self,
                           j1,
                           j2,
                           j3,
                           j4,
                           j5,
                           j6,
                           transform='known'):  # j1, ..., j6: joint trajectory
        try:
            time_st = time.time()  # (sec)
            time_stamp = []
            q_des = []
            q_act = []
            pos_des = []
            pos_act = []
            assert len(j1) == len(j2) == len(j3) == len(j4) == len(j5) == len(
                j6)
            for qd1, qd2, qd3, qd4, qd5, qd6 in zip(j1, j2, j3, j4, j5, j6):
                joint1 = [qd1, qd2, qd3, qd4, qd5, qd6]
                self.dvrk.set_joint(joint1=joint1)

                # Capture image from Zivid
                self.zivid.capture_3Dimage()
                img_color, img_depth, img_point = self.BD.img_crop(
                    self.zivid.image, self.zivid.depth, self.zivid.point)
                img_color = cv2.cvtColor(img_color, cv2.COLOR_RGB2BGR)
                img_color_org = np.copy(img_color)

                # Find balls
                pbs = self.BD.find_balls(img_color_org, img_depth, img_point)
                img_color = self.BD.overlay_balls(img_color, pbs)

                # Find tool position, joint angles, and overlay
                if pbs[0] == [] or pbs[1] == []:
                    qa1 = 0.0
                    qa2 = 0.0
                    qa3 = 0.0
                    qa4 = 0.0
                    qa5 = 0.0
                    qa6 = 0.0
                else:
                    # Find tool position, joint angles, and overlay
                    pt = self.BD.find_tool_position(
                        pbs[0], pbs[1])  # tool position of pitch axis
                    pt = np.array(pt) * 0.001  # (m)
                    if transform == 'known':
                        pt = self.BD.Rrc.dot(pt) + self.BD.trc
                        qa1, qa2, qa3 = self.BD.ik_position(pt)

                        # Find tool orientation, joint angles, and overlay
                        count_pbs = [pbs[2], pbs[3], pbs[4], pbs[5]]
                        if count_pbs.count([]) >= 2:
                            qa4 = 0.0
                            qa5 = 0.0
                            qa6 = 0.0
                        else:
                            Rm = self.BD.find_tool_orientation(
                                pbs[2], pbs[3], pbs[4],
                                pbs[5])  # orientation of the marker
                            qa4, qa5, qa6 = self.BD.ik_orientation(
                                qa1, qa2, Rm)
                            img_color = self.BD.overlay_tool(
                                img_color, [qa1, qa2, qa3, qa4, qa5, qa6],
                                (0, 255, 0))

                # Append data pairs
                if transform == 'known':
                    # joint angles
                    q_des.append([qd1, qd2, qd3, qd4, qd5, qd6])
                    q_act.append([qa1, qa2, qa3, qa4, qa5, qa6])
                    time_stamp.append(time.time() - time_st)
                    print('index: ', len(q_des), '/', len(j1))
                    print('t_stamp: ', time.time() - time_st)
                    print('q_des: ', [qd1, qd2, qd3, qd4, qd5, qd6])
                    print('q_act: ', [qa1, qa2, qa3, qa4, qa5, qa6])
                    print(' ')
                elif transform == 'unknown':
                    # positions
                    pos_des_temp = self.BD.fk_position(q1=qd1,
                                                       q2=qd2,
                                                       q3=qd3,
                                                       q4=0,
                                                       q5=0,
                                                       q6=0,
                                                       L1=self.BD.L1,
                                                       L2=self.BD.L2,
                                                       L3=0,
                                                       L4=0)
                    pos_des.append(pos_des_temp)
                    pos_act.append(pt)
                    print('index: ', len(pos_des), '/', len(j1))
                    print('pos_des: ', pos_des_temp)
                    print('pos_act: ', pt)
                    print(' ')

                # Visualize
                cv2.imshow("images", img_color)
                cv2.waitKey(1) & 0xFF
                # cv2.waitKey(0)
        finally:
            # Save data to a file
            if transform == 'known':
                np.save('q_des_raw', q_des)
                np.save('q_act_raw', q_act)
            elif transform == 'unknown':
                # Get transform from robot to camera
                np.save('pos_des', pos_des)
                np.save('pos_act', pos_act)
                T = U.get_rigid_transform(np.array(pos_act), np.array(pos_des))
                np.save('Trc', T)
            np.save('t_stamp_raw', time_stamp)
            print("Data is successfully saved")
plt.style.use('seaborn-whitegrid')
# plt.style.use('bmh')
plt.rc('font', size=12)          # controls default text sizes
plt.rc('axes', titlesize=20)     # fontsize of the axes title
plt.rc('axes', labelsize=13)    # fontsize of the x and y labels
plt.rc('xtick', labelsize=13)    # fontsize of the tick labels
plt.rc('ytick', labelsize=13)    # fontsize of the tick labels
plt.rc('legend', fontsize=17)    # legend fontsize
plt.rc('figure', titlesize=10)  # fontsize of the figure title


file_path = root + 'experiment/1_rigid_transformation/'
pos_des = np.load(file_path + 'pos_cmd.npy')  # robot's position
pos_act = np.load(file_path + 'pos_phy.npy')  # position measured by camera
T = U.get_rigid_transform(np.array(pos_act), np.array(pos_des))
# T = np.load(file_path + 'Trc.npy')  # rigid transform from cam to rob
R = T[:3, :3]
t = T[:3, 3]
print (R, t)
pos_act = np.array([R.dot(p) + t for p in pos_act])

# RMSE error calc
RMSE = np.sqrt(np.sum((pos_des - pos_act) ** 2)/len(pos_des))
print("RMSE=", RMSE, '(m)')

# plot trajectory of des & act position
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.plot(pos_des[:, 0], pos_des[:, 1], pos_des[:, 2], 'b.-')
plt.plot(pos_act[:, 0], pos_act[:, 1], pos_act[:, 2], 'r.-')