Example #1
0
def convert_poses_to_trajectory(poses,
                                initial_joint_state,
                                linear_vel,
                                leeway=2.0,
                                warn_threshold=0):

    last_joints = initial_joint_state.position
    last_pos = np.array(urkin.fwd_kin(last_joints)[:3, 3]).T[0]

    traj = JointTrajectory()
    traj.header.stamp = rospy.Time.now()
    traj.joint_names = initial_joint_state.name
    traj.points.append(construct_joint_point(last_joints, 0.0))

    # Kind of a hack, offset correction due to fwd kins not being exact

    believed_pos = point_to_array(
        urkin.fwd_kin(last_joints, o_unit='p').position)
    correction = believed_pos - point_to_array(poses[0].position)

    joints = [last_joints]

    t = 0.0
    for pose in poses:

        pose.position = Point(*point_to_array(pose.position) + correction)
        new_pos = point_to_array(pose.position)
        new_joints = urkin.inv_kin(pose, last_joints)
        dt = max(
            np.linalg.norm(new_pos - last_pos) / linear_vel * leeway, 0.01)
        t += dt
        traj.points.append(construct_joint_point(new_joints, t))

        last_joints = new_joints
        last_pos = new_pos

        joints.append(new_joints)

    if warn_threshold > 0:
        joints = np.array(joints)
        sum_abs_diff = np.abs(joints[:-1] - joints[1:]).sum(axis=1)
        if np.any(sum_abs_diff > warn_threshold):
            rospy.logwarn(
                'Detected a large joint movement! Either near singularity or need to increase path resolution'
            )
            sys.exit(1)

    goal = FollowJointTrajectoryGoal()
    goal.trajectory = traj

    return goal
def choose_q(out_invKine, index):
    out_invKine[0:5,:] = out_invKine[0:5,:] - (out_invKine[0:5,:]>math.pi)*2*math.pi + (out_invKine[0:5,:]<-math.pi)*2*math.pi
    # print(out_invKine)1)
    for i in range(8):
        q = [joint[0,i] for joint in out_invKine]
        print('q %s', q)
        print('fwd %s' %fwd_kin(q, o_unit = 'p'))
    return [joint[0,index] for joint in out_invKine]
def main():
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        def dynamics(x,t,tau_v,g_v,q0_v,grasping_point,sess):
            position = grasping_point
            s_v = canoSystem(tau_v,t)
            feeddict = {g[0]:g_v[0],g[1]:g_v[1],g[2]:g_v[2],g[3]:g_v[3],g[4]:g_v[4],g[5]:g_v[5],
                        q[0]:x[0],q[1]:x[1],q[2]:x[2],q[3]:x[3],q[4]:x[4],q[5]:x[5],
                        qd[0]:x[6],qd[1]:x[7],qd[2]:x[8],qd[3]:x[9],qd[4]:x[10],qd[5]:x[11],
                        q0[0]:q0_v[0],q0[1]:q0_v[1],q0[2]:q0_v[2],q0[3]:q0_v[3],q0[4]:q0_v[4],q0[5]:q0_v[5],
                        tau:tau_v,s:s_v,xg:position[0],yg:position[1],zg:position[2]
                        }
            qdd1_v,qdd2_v,qdd3_v,qdd4_v,qdd5_v,qdd6_v = sess.run(dmpNet,feed_dict = feeddict)
            dx = [x[6],x[7],x[8],x[9],x[10],x[11],qdd1_v,qdd2_v,qdd3_v,qdd4_v,qdd5_v,qdd6_v]
            return dx

        period = 50
        t = np.linspace(2, period, 100)
        print(t)
        tau_v = float(1)/period
        # q0_v = [-0.0003235975848596695, -1.040771786366598, 1.6213598251342773, -0.34193402925600225, 1.5711277723312378, 3.141711950302124]
        q0_v = [0, -pi/2, pi/2, 0, pi/2, pi]
        v0 = [0,0,0,0,0,0]
        x0 = []; x0.extend(q0_v); x0.extend(v0)
        print("Initiate Object Detection")
        STATE = 'NONE'
        _,_,_, last_capture = IntelRealsense.pingpong_detection(display = False)

        T_camera2base = IntelRealsense.transformation_camera2base()
        T_end2base = IntelRealsense.transformation_end2base()
        gripper = np.array([  [0],
                              [0],
                              [0.1],
                              [1]  ])
        Y = np.array([ [0],
                       [0],
                       [0],
                       [0],
                       [0],
                       [0] ])
        X = np.array([ [np.random.normal(0, sigma_x)],
                       [np.random.normal(0, sigma_y)],
                       [np.random.normal(0, sigma_d)],
                       [np.random.normal(0, sigma_v_x)],
                       [np.random.normal(0, sigma_v_y)],
                       [np.random.normal(0, sigma_v_d)] ])
        X, P = init_state(X, Y, 0)
        print('First State is %s' %X)
        I = np.identity(6, dtype=float)
        H = np.identity(6, dtype=float)
        count = 0
        global client
        try:
            rospy.init_node("dmp", anonymous=True, disable_signals=True)
            client = actionlib.SimpleActionClient('follow_joint_trajectory', FollowJointTrajectoryAction)
            client.wait_for_server()
            print ("Connected to server")
            parameters = rospy.get_param(None)
            index = str(parameters).find('prefix')
            if (index > 0):
                prefix = str(parameters)[index+len("prefix': '"):(index+len("prefix': '")+str(parameters)[index+len("prefix': '"):-1].find("'"))]
                for i, name in enumerate(JOINT_NAMES):
                    JOINT_NAMES[i] = prefix + name
            book = Workbook()
            sheet = book.active
            excel_row = 1
            while(True):
                initial_time = time.time()
                pp_x, pp_y, pp_depth, capture = IntelRealsense.pingpong_detection(display = True)
                pingpong_camera = IntelRealsense.reverse_perspective_projection(pp_x, pp_y, pp_depth); pingpong_camera[3] = 1
                processing = capture - last_capture; timestep = processing
                pingpong_base = T_camera2base.dot(pingpong_camera)
                v_x, v_depth, v_z, a_x, a_y, a_depth, STATE = IntelRealsense.pingpong_velocity(STATE, pingpong_base[0,0], pingpong_base[1,0], pingpong_base[2,0], capture, display = False)
                last_capture = capture

                sheet.cell(row = excel_row,column = 1).value = pingpong_base[0]
                sheet.cell(row = excel_row,column = 2).value = pingpong_base[1]
                sheet.cell(row = excel_row,column = 3).value = pingpong_base[2]
                sheet.cell(row = excel_row,column = 4).value = v_x
                sheet.cell(row = excel_row,column = 5).value = v_depth
                sheet.cell(row = excel_row,column = 6).value = v_z
                sheet.cell(row = excel_row,column = 7).value = a_x
                sheet.cell(row = excel_row,column = 8).value = a_y
                sheet.cell(row = excel_row,column = 9).value = a_depth
                excel_row +=1
                if excel_row == 10000:
                    book.save('/home/s/catkin_ws/src/camera/src/real_world_variance.xlsx')
                    print('Excel Saved')
                # print('Real World Coordinates From KF \n %s \n' %pingpong_camera)
                # print('Ping-Pong Position (Base Frame) \n %s \n' %pingpong_base)
                grasping_point = [pingpong_base[0], pingpong_base[1], pingpong_base[2]]
                # print('grasping point %s' %grasping_point)
                pingpong_base = pingpong_base - T_end2base.dot(gripper)
                inp_invKine = IntelRealsense.transformation_end2base(pingpong_base[0,0],pingpong_base[1,0],pingpong_base[2,0])
                inv_pingpong = invKine(inp_invKine)
                # print('IK \n %s \n' %inv_pingpong)
                grasping_q = cost_func(inv_pingpong)
                # print('Grasping q \n%s\n' %grasping_q)
                g_v = grasping_q
                print('grasping_q %s' %grasping_q)
                fwd_grasping = fwd_kin(g_v, o_unit = 'p')
                print('fwd Grasping q %s' %fwd_grasping)
                inp = raw_input("Set Home! (Enter to continue)")
                set_home(set_duration = 10)
                # inp = raw_input("Continue? y/n: ")[0]
                # if (inp == 'y'):
                # q_ode = odeint(dynamics,x0,t,args=(tau_v,g_v,q0_v,grasping_point,sess))
                q_ode = np.load('q_sample1.npy')
                # set_home(g_v, 3)
                # print('q_ode %s' %q_ode)
                t = np.linspace(0.1, 10, 100)
                move_dmp_path(q_ode,t)
                final_time = time.time()
                print("Processing Time : %s" %(final_time-initial_time))
                # else:
                    # print ("Halting program")
                    # break
                # print('\n#############################################\n')
        except KeyboardInterrupt:
            rospy.signal_shutdown("KeyboardInterrupt")
            raise
Example #4
0
m_rest = 100

# fontsize
fs = 40

motor_pos = np.array(range(-80, 150, 20)) - m_rest
h = np.array([
    1 + 6.0 / 16, 1 + 3.0 / 16, 1 + 1.0 / 16, 14.5 / 16, 11.5 / 16, 9.5 / 16,
    6.5 / 16, 4.0 / 16, 1.5 / 16, 0, -1 + 12.5 / 16, -1 + 10.0 / 16
])
# convert to mm
h = h * -25.4

h_calc = np.array([])
# h_calc = [np.append(h_calc,k.fwd_kin(m)) for m in motor_pos]
h_calc = [k.fwd_kin(m) for m in motor_pos]

# error = calculated - physical (actual)
e = h_calc - h
# square error
se = np.power(e, 2)
# absolute error
ae = np.abs(e)
print(ae)
print(np.mean(ae))
print(np.std(ae))

plt.plot(motor_pos, h)
plt.plot(motor_pos, h_calc, '--')
plt.legend([r"Actual", r"Calculated"], fontsize=20)
plt.grid(True)
Example #5
0
def main():
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        def dynamics(x, t, tau_v, g_v, q0_v, grasping_point, sess):
            position = grasping_point
            s_v = canoSystem(tau_v, t)
            feeddict = {
                g[0]: g_v[0],
                g[1]: g_v[1],
                g[2]: g_v[2],
                g[3]: g_v[3],
                g[4]: g_v[4],
                g[5]: g_v[5],
                q[0]: x[0],
                q[1]: x[1],
                q[2]: x[2],
                q[3]: x[3],
                q[4]: x[4],
                q[5]: x[5],
                qd[0]: x[6],
                qd[1]: x[7],
                qd[2]: x[8],
                qd[3]: x[9],
                qd[4]: x[10],
                qd[5]: x[11],
                q0[0]: q0_v[0],
                q0[1]: q0_v[1],
                q0[2]: q0_v[2],
                q0[3]: q0_v[3],
                q0[4]: q0_v[4],
                q0[5]: q0_v[5],
                tau: tau_v,
                s: s_v,
                xg: position[0],
                yg: position[1],
                zg: position[2]
            }
            qdd1_v, qdd2_v, qdd3_v, qdd4_v, qdd5_v, qdd6_v = sess.run(
                dmpNet, feed_dict=feeddict)
            dx = [
                x[6], x[7], x[8], x[9], x[10], x[11], qdd1_v, qdd2_v, qdd3_v,
                qdd4_v, qdd5_v, qdd6_v
            ]
            return dx

        period = 20
        t = np.linspace(2, period, 100)
        tau_v = float(1) / period
        # q0_v = [-0.0003235975848596695, -1.040771786366598, 1.6213598251342773, -0.34193402925600225, 1.5711277723312378, 3.141711950302124]
        q0_v = [0, -pi / 2, pi / 2, 0, pi / 2, pi]
        v0 = [0, 0, 0, 0, 0, 0]
        x0 = []
        x0.extend(q0_v)
        x0.extend(v0)
        print("Initiate Object Detection")
        STATE = 'NONE'
        _, _, _, last_capture = IntelRealsense.pingpong_detection(
            display=False)
        T_camera2base = IntelRealsense.transformation_camera2base()
        T_end2base = IntelRealsense.transformation_end2base()
        gripper = np.array([[0], [0], [0.1], [1]])
        Y = np.array([[0], [0], [0], [0], [0], [0]])
        X = np.array([[np.random.normal(0, sigma_x)],
                      [np.random.normal(0, sigma_y)],
                      [np.random.normal(0, sigma_d)],
                      [np.random.normal(0, sigma_v_x)],
                      [np.random.normal(0, sigma_v_y)],
                      [np.random.normal(0, sigma_v_d)]])
        X, P, X_pred = init_state(X, Y, 0)
        I = np.identity(6, dtype=float)
        H = np.identity(6, dtype=float)
        count = 0
        global client
        try:
            rospy.init_node("dmp", anonymous=True, disable_signals=True)
            client = actionlib.SimpleActionClient('follow_joint_trajectory',
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()
            print("Connected to server")
            parameters = rospy.get_param(None)
            index = str(parameters).find('prefix')
            if (index > 0):
                prefix = str(parameters)[index + len("prefix': '"):(
                    index + len("prefix': '") +
                    str(parameters)[index + len("prefix': '"):-1].find("'"))]
                for i, name in enumerate(JOINT_NAMES):
                    JOINT_NAMES[i] = prefix + name
            straight_inp = raw_input("Set Straight y/n?")
            if straight_inp == 'y':
                set_home(straight, 5)
            set_home(set_duration=10)
            # inp = raw_input("Continue? (Enter to continue)")
            while (True):
                if STATE == 'GRASPING':
                    print('X: %s' % X)
                    dist = X[0, 0]**2 + X[1, 0]**2 + X[2, 0]**2
                    timestep_stack = 0
                    while (dist >= 0.81):
                        timestep_stack += timestep
                        X = grasping_prediction(X, timestep)
                        dist = X[0, 0]**2 + X[1, 0]**2 + X[2, 0]**2
                        print(dist)
                    print('X: %s (timestep_stack %s)' % (X, timestep_stack))
                    pingpong_base[0] = X[0]
                    pingpong_base[1] = X[2]
                    pingpong_base[2] = X[1]
                    # print('Real World Coordinates From KF \n %s \n' %pingpong_camera)
                    # pingpong_base = T_camera2base.dot(pingpong_camera)
                    print('Ping-Pong Position (Base Frame) \n %s \n' %
                          pingpong_base)
                    # grasping_point = [pingpong_base[0], pingpong_base[1], pingpong_base[2]]
                    # print('grasping point %s' %grasping_point)
                    grasping_point = [
                        pingpong_base[0], pingpong_base[1], pingpong_base[2]
                    ]
                    pingpong_base = pingpong_base - T_end2base.dot(
                        gripper)  # **
                    inp_invKine = IntelRealsense.transformation_end2base(
                        pingpong_base[0, 0], pingpong_base[1, 0],
                        pingpong_base[2, 0])
                    inv_pingpong = invKine(inp_invKine)
                    # grasping_q = choose_q(inv_pingpong,0)
                    # print('IK \n %s \n' %inv_pingpong)
                    grasping_q = cost_func(inv_pingpong)
                    # print('Grasping q \n%s\n' %grasping_q)

                    #### to Kanut DMP ####
                    g_v = grasping_q
                    # print('grasping_q %s' %grasping_q)
                    fwd_grasping = fwd_kin(g_v, o_unit='p')
                    print('fwd Grasping q %s' % fwd_grasping)
                    # inp = raw_input("Set Home! (Enter to continue)")
                    # set_home(set_duration = 10)
                    # inp = raw_input("Continue? y/n: ")[0]
                    # if (inp == 'y'):
                    # q_ode = odeint(dynamics,x0,t,args=(tau_v,g_v,q0_v,grasping_point,sess))
                    #     q_ode = np.load('q.npy')
                    set_home(g_v, 3)
                    # print(q_ode)
                    # move_dmp_path(q_ode,t)
                    STATE = 'STOP'
                    time.sleep(2)
                    set_home(set_duration=10)
                    #     print('q_ode %s' %q_ode)
                    #     move_dmp_path(q_ode,t)
                    # else:
                    #     print ("Halting program")
                    #     break
                elif STATE != 'GRASPING' and STATE != 'STOP':
                    initial_time = time.time()
                    pp_x, pp_y, pp_depth, capture = IntelRealsense.pingpong_detection(
                        display=True)  # Positional Detection
                    pingpong_camera = IntelRealsense.reverse_perspective_projection(
                        pp_x, pp_y, pp_depth)
                    pingpong_camera[3] = 1  # Real World Positional Projection
                    processing = capture - last_capture
                    timestep = processing  # Define timestep values
                    # print('Distance from camera \n%s' %(pingpong_camera))
                    pingpong_base = T_camera2base.dot(
                        pingpong_camera
                    )  # changing reference frame to base frame
                    # print('Distance from base UR5 %s (%s)' %(pingpong_base, timestep))
                    v_x, v_depth, v_z, a_x, a_y, a_depth, STATE = IntelRealsense.pingpong_velocity(
                        STATE,
                        pingpong_base[0, 0],
                        pingpong_base[1, 0],
                        pingpong_base[2, 0],
                        capture,
                        display=False)  # Velocity Detection
                    last_capture = capture

                    #### Kalman Filter Supitcha Klanpradit ####
                    if STATE != 'NONE':
                        # print('(x,y,depth,v_x,v_y,v_depth) (%s,%s,%s,%s,%s,%s)' %(pp_x, pp_y, pp_depth, v_x, v_depth, v_z))
                        Y = np.array([
                            pingpong_base[0], pingpong_base[2],
                            pingpong_base[1], [(v_x != 999) * (v_x)],
                            [(v_z != 999) * (v_z)],
                            [(v_depth != 999) * (v_depth)]
                        ])
                        # print('Y : %s,     STATE : %s' %(Y, STATE))
                        if STATE == 'VELOCITY':
                            X, P, X_pred = init_detection_state(
                                X, P, Y, timestep)
                            # print('P: %s' %P)
                        elif STATE == 'KALMAN':
                            X, P, X_pred = kalman_filter(X, Y, H, P, timestep)
                            count += 1
                            if count >= 3: STATE = 'GRASPING'

                # final_time = time.time()
                # print("Processing Time : %s" %(final_time-initial_time))
        except KeyboardInterrupt:
            rospy.signal_shutdown("KeyboardInterrupt")
            raise
Example #6
0
def main():
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        def dynamics(x, t, tau_v, g_v, q0_v, grasping_point, sess):
            position = grasping_point
            s_v = canoSystem(tau_v, t)
            feeddict = {
                g[0]: g_v[0],
                g[1]: g_v[1],
                g[2]: g_v[2],
                g[3]: g_v[3],
                g[4]: g_v[4],
                g[5]: g_v[5],
                q[0]: x[0],
                q[1]: x[1],
                q[2]: x[2],
                q[3]: x[3],
                q[4]: x[4],
                q[5]: x[5],
                qd[0]: x[6],
                qd[1]: x[7],
                qd[2]: x[8],
                qd[3]: x[9],
                qd[4]: x[10],
                qd[5]: x[11],
                q0[0]: q0_v[0],
                q0[1]: q0_v[1],
                q0[2]: q0_v[2],
                q0[3]: q0_v[3],
                q0[4]: q0_v[4],
                q0[5]: q0_v[5],
                tau: tau_v,
                s: s_v,
                xg: position[0],
                yg: position[1],
                zg: position[2]
            }
            qdd1_v, qdd2_v, qdd3_v, qdd4_v, qdd5_v, qdd6_v = sess.run(
                dmpNet, feed_dict=feeddict)
            dx = [
                x[6], x[7], x[8], x[9], x[10], x[11], qdd1_v, qdd2_v, qdd3_v,
                qdd4_v, qdd5_v, qdd6_v
            ]
            return dx

        period = 20
        t = np.linspace(5, period, 20)
        tau_v = float(1) / period
        q0_v = [0, -pi / 2, pi / 2, 0, pi / 2, pi]
        v0 = [0, 0, 0, 0, 0, 0]
        x0 = []
        x0.extend(q0_v)
        x0.extend(v0)
        print("Initiate Object Detection")
        STATE = 'NONE'
        _, _, _, last_capture = IntelRealsense.pingpong_detection(
            display=False)

        T_camera2base = IntelRealsense.transformation_camera2base()
        T_end2base = IntelRealsense.transformation_end2base()
        gripper = np.array([[0], [0], [0.1], [1]])
        Y = np.array([[0], [0], [0], [0], [0], [0]])
        X = np.array([[np.random.normal(0, sigma_x)],
                      [np.random.normal(0, sigma_y)],
                      [np.random.normal(0, sigma_d)],
                      [np.random.normal(0, sigma_v_x)],
                      [np.random.normal(0, sigma_v_y)],
                      [np.random.normal(0, sigma_v_d)]])
        X, P, _ = init_state(X, Y, 0)
        print('First State is %s' % X)
        I = np.identity(6, dtype=float)
        H = np.identity(6, dtype=float)
        count = 0
        global client
        try:
            rospy.init_node("dmp", anonymous=True, disable_signals=True)
            client = actionlib.SimpleActionClient('follow_joint_trajectory',
                                                  FollowJointTrajectoryAction)
            client.wait_for_server()
            print("Connected to server")
            parameters = rospy.get_param(None)
            index = str(parameters).find('prefix')
            if (index > 0):
                prefix = str(parameters)[index + len("prefix': '"):(
                    index + len("prefix': '") +
                    str(parameters)[index + len("prefix': '"):-1].find("'"))]
                for i, name in enumerate(JOINT_NAMES):
                    JOINT_NAMES[i] = prefix + name
            book = Workbook()
            sheet = book.active
            excel_row = 1
            while (True):
                initial_time = time.time()
                straight_inp = raw_input("Set Straight y/n?")
                if straight_inp == 'y':
                    set_home(straight, 5)
                pp_x, pp_y, pp_depth, capture = IntelRealsense.pingpong_detection(
                    display=True)
                pingpong_camera = IntelRealsense.reverse_perspective_projection(
                    pp_x, pp_y, pp_depth)
                pingpong_camera[3] = 1
                processing = capture - last_capture
                timestep = processing
                pingpong_base = T_camera2base.dot(pingpong_camera)
                v_x, v_depth, v_z, a_x, a_y, a_depth, STATE = IntelRealsense.pingpong_velocity(
                    STATE,
                    pingpong_base[0, 0],
                    pingpong_base[1, 0],
                    pingpong_base[2, 0],
                    capture,
                    display=False)
                last_capture = capture
                # print('Real World Coordinates From KF \n %s \n' %pingpong_camera)
                print('Ping-Pong Position (Base Frame) \n %s \n' %
                      pingpong_base)
                grasping_point = [
                    pingpong_base[0], pingpong_base[1], pingpong_base[2]
                ]
                # print('grasping point %s' %grasping_point)
                pingpong_base = pingpong_base - T_end2base.dot(gripper)
                inp_invKine = IntelRealsense.transformation_end2base(
                    pingpong_base[0, 0], pingpong_base[1, 0], pingpong_base[2,
                                                                            0])
                inv_pingpong = invKine(inp_invKine)
                print('IK \n %s \n' % inv_pingpong)
                # grasping_q = cost_func(inv_pingpong)
                grasping_q = choose_q(inv_pingpong, 0)
                print('Selected IK \n%s\n' % grasping_q)
                g_v = grasping_q
                fwd_grasping = fwd_kin(g_v, o_unit='p')
                print('Selected FK \n%s\n' % fwd_grasping)
                inp = raw_input("Set Home! (Enter to continue)")
                set_home(set_duration=10)
                # inp = raw_input("Continue? y/n: ")[0]
                # if (inp == 'y'):
                # q_ode = odeint(dynamics,x0,t,args=(tau_v,g_v,q0_v,grasping_point,sess))
                # q_ode = np.load('q_sample2.npy')
                set_home(g_v, 3)
                # print('q_ode %s' %q_ode)
                # move_dmp_path(q_ode,t)
                final_time = time.time()
                print("Processing Time : %s" % (final_time - initial_time))
                # else:
                # print ("Halting program")
                # break
                # print('\n#############################################\n')
        except KeyboardInterrupt:
            rospy.signal_shutdown("KeyboardInterrupt")
            raise