def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json_optimized',
                        help='The kinematic model JSON file')
    parser.add_argument('task_npz')
    parser.add_argument(
        'block_diagram',
        help='The file name of the drawn block diagram of the system')
    parser.add_argument('system_history', help='The system output pkl file')
    args = parser.parse_args()

    # Perform weight estimation over different window sizes
    window_sizes = (10, 50, 100)

    # Load the calibration sequence
    data = np.load(args.task_npz)
    trials = 0

    # Stack all the trials
    while 'full_sequence_' + str(trials) in data.keys():
        trials += 1
    print('Number of trials: %d' % trials)
    mocap_data = [
        data['full_sequence_' + str(trial)] for trial in range(trials)
    ]
    mocap_data.insert(0, np.concatenate(mocap_data, axis=2))

    # Initialize the tree
    kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)

    # We only want to draw the block diagram once
    block_diag = None
    histories = {}

    for i, trial in enumerate(mocap_data):
        # If its the first element, it is the combined data, change i to ALL and set to plot the block diagram
        if i == 0:
            i = 'ALL'
            block_diag = args.block_diagram

        print('Learning trial ' + str(i) + '...')

        # Learn the system and draw the block diagram
        trajectories = learn(trial, kin_tree, window_sizes, block_diag)

        # Set back to None so we don't re draw all the other times
        block_diag = None

        # append the trial number to the key to avoid clashing
        histories[str(i)] = trajectories

    # save to an npz file
    filename = args.system_history
    save(filename, histories)
def setup_trackers(box_joints_topic):
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json_optimized',
                        help='The kinematic model JSON file')
    # parser.add_argument('mocap_npz')
    args = parser.parse_args()

    #Load the calibration sequence
    # calib_data = np.load(args.mocap_npz)
    # ukf_mocap = load_mocap.MocapArray(calib_data['full_sequence'][:,:,:], FRAMERATE)

    # Load the mocap stream
    ukf_mocap = load_mocap.PointCloudStream('/mocap_point_cloud')
    tracker_kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)
    kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)

    # Add the external frames to track
    frame_tracker = KinematicTreeExternalFrameTracker(kin_tree, 'object_base')
    frame_tracker.attach_frame('joint2', 'trans2')
    frame_tracker.attach_frame('joint3', 'trans3')
    frame_tracker.attach_tf_frame('joint3', 'left_hand')
    frame_tracker.attach_tf_frame('joint2', 'base')

    def new_frame_callback(i, joint_angles):
        frame_tracker.set_config({
            'joint2': joint_angles[0],
            'joint3': joint_angles[1]
        })

    tracker = KinematicTreeTracker(tracker_kin_tree,
                                   ukf_mocap,
                                   joint_states_topic=box_joints_topic,
                                   object_tf_frame='/object_base',
                                   new_frame_callback=new_frame_callback)
    # ukf_output = tracker.run()

    return tracker, frame_tracker
示例#3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json_optimized',
                        help='The kinematic model JSON file')
    parser.add_argument('task_npz')
    parser.add_argument('trial', help='The trial number')
    args = parser.parse_args()

    # Load the calibration sequence
    data = np.load(args.task_npz)['full_sequence_' + args.trial]
    calib_data = data[:, :, :]
    ukf_mocap = load_mocap.MocapArray(calib_data, FRAMERATE)

    tracker_kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)
    kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)

    all_frames = []
    all_covariances = []
    all_residuals = []
    all_frames1 = []
    all_covariances1 = []
    all_residuals1 = []
    all_frames2 = []
    all_covariances2 = []
    all_residuals2 = []

    def new_frame_callback(i, joint_angles, covariance, squared_residual):
        # frame_tracker.set_config({'joint2':joint_angles[0], 'joint3':joint_angles[1]})
        # print(frame_tracker.compute_jacobian('base', 'left_hand'))
        print("0: %d" % i)
        all_frames.append(joint_angles)
        all_covariances.append(covariance[:, :, None])
        all_residuals.append(squared_residual)

    object_tracker = KinematicTreeTracker(
        'object',
        tracker_kin_tree,
        ukf_mocap,
        new_frame_callback=new_frame_callback)

    ref = 0
    while np.isnan(data[:, :, ref]).any():
        ref += 1
    reference_frame = data[:, :, ref]
    raw_input("Reference Frame is frame %d" % ref)
    marker_indices_1 = {
        name: index + 16
        for index, name in enumerate(MocapWrist.names[::-1])
    }
    marker_indices_2 = {
        name: index + 24
        for index, name in enumerate(MocapWrist.names[::-1])
    }

    def new_frame_callback_1(i, joint_angles, covariance, squared_residual):
        # frame_tracker.set_config({'joint2':joint_angles[0], 'joint3':joint_angles[1]})
        print("1: %d" % i)
        all_frames1.append(joint_angles)
        all_covariances1.append(covariance[:, :, None])
        all_residuals1.append(squared_residual)

    def new_frame_callback_2(i, joint_angles, covariance, squared_residual):
        print("2: %d" % i)
        all_frames2.append(joint_angles)
        all_covariances2.append(covariance[:, :, None])
        all_residuals2.append(squared_residual)

    wrist_tracker_1 = WristTracker('wrist1',
                                   ukf_mocap,
                                   marker_indices_1,
                                   reference_frame,
                                   new_frame_callback=new_frame_callback_1)
    wrist_tracker_2 = WristTracker('wrist2',
                                   ukf_mocap,
                                   marker_indices_2,
                                   reference_frame,
                                   new_frame_callback=new_frame_callback_2)

    object_tracker.run()
    print("Tracker 0 Done!")
    wrist_tracker_1.run()
    print("Tracker 1 Done!")
    wrist_tracker_2.run()
    print("Tracker 2 Done!")

    # object_tracker.start().join()
    ukf_output = np.concatenate(all_frames, axis=1)
    ukf_covar = np.concatenate(all_covariances, axis=2)
    ukf_residual = np.array(all_residuals)

    ukf_output1 = np.concatenate(all_frames1, axis=1)
    ukf_output2 = np.concatenate(all_frames2, axis=1)

    # Figure 1 - Mocap xyz trajectories
    fig = plt.figure(figsize=(16, 6))
    ax = fig.add_subplot(111)
    ax.plot(ukf_output.T)  #, color='r')
    ax.set_xlabel('Time (s)')
    ax.set_ylabel('$\\theta$ (rad)')

    # Figure 2 - Mocap xyz trajectories
    fig1 = plt.figure(figsize=(16, 6))
    ax1 = fig1.add_subplot(111)
    ax1.plot(ukf_output1.T)  #, color='r')
    ax1.set_xlabel('Time (s)')
    ax1.set_ylabel('$\\theta$ (rad)')

    # Figure 3 - Mocap xyz trajectories
    fig2 = plt.figure(figsize=(16, 6))
    ax2 = fig2.add_subplot(111)
    ax2.plot(ukf_output2.T)  #, color='r')
    ax2.set_xlabel('Time (s)')
    ax2.set_ylabel('$\\theta$ (rad)')

    plt.show()
示例#4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json_optimized',
                        help='The kinematic model JSON file')
    parser.add_argument('task_npz')
    parser.add_argument('trial', help='The trial number')
    args = parser.parse_args()

    # Load the calibration sequence
    data = np.load(args.task_npz)['full_sequence_' + args.trial]
    calib_data = data[:, :, :]
    ukf_mocap = load_mocap.MocapArray(calib_data, FRAMERATE)

    tracker_kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)
    kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)

    object_angles = {}
    wrist_1_angles = {}
    wrist_2_angles = {}

    def new_frame_callback(i, joint_angles, covariance, squared_residual):
        # frame_tracker.set_config({'joint2':joint_angles[0], 'joint3':joint_angles[1]})
        # print(frame_tracker.compute_jacobian('base', 'left_hand'))
        print("0: %d" % i)
        for joint_name in joint_angles:
            object_angles[joint_name] = object_angles.get(
                joint_name, []) + [joint_angles[joint_name]]

    object_tracker = KinematicTreeTracker(
        'object',
        tracker_kin_tree,
        ukf_mocap,
        new_frame_callback=new_frame_callback)

    ref = 0
    while np.isnan(data[:, :, ref]).any():
        ref += 1
    reference_frame = data[:, :, ref]
    raw_input("Reference Frame is frame %d" % ref)
    marker_indices_1 = {
        name: index + 16
        for index, name in enumerate(MocapWrist.names[::-1])
    }
    marker_indices_2 = {
        name: index + 24
        for index, name in enumerate(MocapWrist.names[::-1])
    }

    def new_frame_callback_1(i, joint_angles, covariance, squared_residual):
        # frame_tracker.set_config({'joint2':joint_angles[0], 'joint3':joint_angles[1]})
        print("1: %d" % i)
        for joint_name in joint_angles:
            wrist_1_angles[joint_name] = wrist_1_angles.get(
                joint_name, []) + [joint_angles[joint_name]]

    def new_frame_callback_2(i, joint_angles, covariance, squared_residual):
        print("2: %d" % i)
        for joint_name in joint_angles:
            wrist_2_angles[joint_name] = wrist_2_angles.get(
                joint_name, []) + [joint_angles[joint_name]]

    wrist_tracker_1 = WristTracker('wrist1',
                                   ukf_mocap,
                                   marker_indices_1,
                                   reference_frame,
                                   new_frame_callback=new_frame_callback_1)
    wrist_tracker_2 = WristTracker('wrist2',
                                   ukf_mocap,
                                   marker_indices_2,
                                   reference_frame,
                                   new_frame_callback=new_frame_callback_2)

    object_tracker.run()
    print("Tracker 0 Done!")
    wrist_tracker_1.run()
    print("Tracker 1 Done!")
    wrist_tracker_2.run()
    print("Tracker 2 Done!")

    # Figure 1 - Mocap xyz trajectories
    fig = plt.figure(figsize=(16, 6))
    ax = fig.add_subplot(111)
    # ax.plot(ukf_output.T)#, color='r')
    for key, value in object_angles.items():
        ax.plot(value, label=key)

    ax.set_xlabel('Time (s)')
    ax.set_ylabel('Angle (rad)')
    ax.legend()

    # Figure 2 - Mocap xyz trajectories
    fig1 = plt.figure(figsize=(16, 6))
    ax1 = fig1.add_subplot(111)
    for key, value in wrist_1_angles.items():
        ax1.plot(value, label=key)
    ax1.set_xlabel('Time (s)')
    ax1.set_ylabel('Angle (rad)')
    ax1.legend()

    # Figure 3 - Mocap xyz trajectories
    fig2 = plt.figure(figsize=(16, 6))
    ax2 = fig2.add_subplot(111)
    for key, value in wrist_2_angles.items():
        ax2.plot(value, label=key)
    ax2.set_xlabel('Timestep (k)')
    ax2.set_ylabel('Angle (rad)')
    ax2.legend()

    plt.show()
def main():
    plt.ion()
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json_optimized',
                        help='The kinematic model JSON file')
    parser.add_argument('mocap_npz')
    args = parser.parse_args()

    #Load the calibration sequence
    calib_data = np.load(args.mocap_npz)['full_sequence'][:, :, :]
    ukf_mocap = load_mocap.MocapArray(calib_data, FRAMERATE)

    # Load the mocap stream
    # ukf_mocap = load_mocap.PointCloudStream('/mocap_point_cloud')

    tracker_kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)
    kin_tree = kinmodel.KinematicTree(
        json_filename=args.kinmodel_json_optimized)

    # Add the external frames to track
    # frame_tracker = KinematicTreeExternalFrameTracker(kin_tree, 'object_base')
    # frame_tracker.attach_frame('joint2', 'trans2')
    # frame_tracker.attach_frame('joint3', 'trans3')
    # frame_tracker.attach_tf_frame('joint3', 'left_hand')
    # frame_tracker.attach_tf_frame('joint2', 'base')

    all_frames = []
    all_covariances = []
    all_residuals = []

    def new_frame_callback(i, joint_angles, covariance, squared_residual):
        # frame_tracker.set_config({'joint2':joint_angles[0], 'joint3':joint_angles[1]})
        # print(frame_tracker.compute_jacobian('base', 'left_hand'))
        print(i)
        all_frames.append(joint_angles)
        all_covariances.append(covariance[:, :, None])
        all_residuals.append(squared_residual)

    # tracker = KinematicTreeTracker(tracker_kin_tree, ukf_mocap, joint_states_topic='/kinmodel_state',
    #         object_tf_frame='/object_base', new_frame_callback=new_frame_callback)
    tracker = KinematicTreeTracker('cardboard',
                                   tracker_kin_tree,
                                   ukf_mocap,
                                   new_frame_callback=new_frame_callback)
    tracker.run()
    # tracker.start().join()
    ukf_output = np.concatenate(all_frames, axis=1)
    ukf_covar = np.concatenate(all_covariances, axis=2)
    ukf_residual = np.array(all_residuals)

    # Figure 2 - Mocap xyz trajectories
    fig = plt.figure(figsize=(16, 6))
    ax = fig.add_subplot(111)
    ax.plot(ukf_output.T)  #, color='r')
    ax.set_xlabel('Time (s)')
    ax.set_ylabel('$\\theta$ (rad)')

    # # # Figure 1 - Box static mocap markers
    # ukf_mocap.plot_frame(0, xyz_rotation=(math.pi/2, 0, math.pi/180*120))

    # # Figure 2 - Mocap xyz trajectories
    # fig = plt.figure(figsize=(16,6))
    # time_axis = (np.array(range(calib_data.shape[2])) * (1.0 / 80))[:,None]
    # ax_x = fig.add_subplot(311)
    # ax_y = fig.add_subplot(312)
    # ax_z = fig.add_subplot(313)
    # ax_x.plot(time_axis, calib_data[:,0,:].T)#, color='r')
    # ax_x.set_ylim((0,1))
    # ax_x.set_xlabel('Time (s)')
    # ax_x.set_ylabel('X (m)')
    # ax_y.plot(time_axis, calib_data[:,1,:].T)#, color='b')
    # ax_y.set_ylim((0.5,1.5))
    # ax_y.set_xlabel('Time (s)')
    # ax_y.set_ylabel('Y (m)')
    # ax_z.plot(time_axis, calib_data[:,2,:].T)#, color='g')
    # ax_z.set_ylim((-1,0))
    # ax_z.set_xlabel('Time (s)')
    # ax_z.set_ylabel('Z (m)')

    # #UKF Output
    # fig = plt.figure(figsize=(16,7))
    # ax1 = fig.add_subplot(311)
    # ax1.plot(time_axis[:-1], ukf_output[0:1,:].T, color='b')
    # ax1.plot(time_axis[:-1], ukf_output[0,:] + ukf_covar[0,0,:], color='b', linestyle=':')
    # ax1.plot(time_axis[:-1], ukf_output[0,:] - ukf_covar[0,0,:], color='b', linestyle=':')
    # ax1.set_ylim((-0.2, 1.2))
    # ax1.set_xlabel('Time (s)')
    # ax1.set_ylabel('$\\theta_1$ (rad)')
    # ax1.legend(['$\\mu_1$', '$\\mu_2 \\pm \\sigma^2_1$'])

    # ax2 = fig.add_subplot(312)
    # ax2.plot(time_axis[:-1], ukf_output[1:2,:].T, color='g')
    # ax2.plot(time_axis[:-1], ukf_output[1,:] + ukf_covar[1,1,:], color='g', linestyle=':')
    # ax2.plot(time_axis[:-1], ukf_output[1,:] - ukf_covar[1,1,:], color='g', linestyle=':')
    # ax2.set_ylim((-0.2, 1.2))
    # ax2.set_xlabel('Time (s)')
    # ax2.set_ylabel('$\\theta_2$ (rad)')
    # ax2.legend(['$\\mu_2$', '$\\mu_2 \pm \\sigma^2_2$'])

    # ax3 = fig.add_subplot(313)
    # ax3.plot(time_axis[:-1], ukf_residual, color='r')
    # # ax3.plot(time_axis[:-1], ukf_output[1,:] + ukf_covar[1,1,:], color='g', linestyle=':')
    # # ax3.plot(time_axis[:-1], ukf_output[1,:] - ukf_covar[1,1,:], color='g', linestyle=':')
    # # ax3.set_ylim((-0.2, 1.2))
    # ax3.set_xlabel('Time (s)')
    # ax3.set_ylabel('SSE ($m^2$)')
    # # ax3.legend(['$\\mu_2$', '$\\mu_2 \pm \\sigma^2_2$'])
    # print('MSE: ' + str(np.mean(ukf_residual)))
    plt.pause(100)
示例#6
0
def main():
    plt.ion()
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json', help='The kinematic model JSON file')
    parser.add_argument('kinmodel_json_optimized',
                        help='The kinematic model JSON file')
    parser.add_argument('mocap_npz',
                        help='The .npz file from mocap_recorder.py')
    args = parser.parse_args()

    #Load the calibration sequence
    calib_data = np.load(args.mocap_npz)
    ukf_mocap = load_mocap.ArrayMocapSource(
        calib_data['full_sequence'][:, :, :], FRAMERATE).get_stream()

    # Get the base marker indices
    base_indices = []
    kin_tree = kinmodel.KinematicTree(json_filename=args.kinmodel_json)
    base_joint = kin_tree.get_root_joint()
    for child in base_joint.children:
        if not hasattr(child, 'children'):
            # This is a feature
            base_indices.append(int(child.name.split('_')[1]))

    # Get all the marker indices of interest
    all_marker_indices = []
    for feature_name in kin_tree.get_features():
        all_marker_indices.append(int(feature_name.split('_')[1]))

    # Set the base frame coordinate transformation
    desired = ukf_mocap.read()[0][base_indices, :, 0]
    desired = desired - np.mean(desired, axis=0)
    data_array = np.dstack(
        (calib_data['full_sequence'][:, :, 0:1],
         calib_data['full_sequence'][:, :, calib_data[GROUP_NAME]]))
    mocap = load_mocap.ArrayMocapSource(data_array, FRAMERATE).get_stream()

    #Set the coordinate frame for the mocap sequence
    mocap.set_coordinates(base_indices, desired, mode='time_varying')

    #Generate the feature observation dicts
    feature_obs = []
    for frame, timestamp in mocap:
        feature_dict = {}
        for marker_idx in all_marker_indices:
            if not np.isnan(frame[marker_idx, 0, 0]):
                obs_point = kinmodel.new_geometric_primitive(
                    np.concatenate((frame[marker_idx, :, 0], np.ones(1))))
                feature_dict['mocap_' + str(marker_idx)] = obs_point
        feature_obs.append(feature_dict)

    # Run the optimization
    kin_tree.set_features(feature_obs[0])
    final_configs, final_twists, final_features = kin_tree.fit_params(
        feature_obs,
        optimize={
            'twists': True,
            'features': False,
            'configs': True
        })
    print('Second optimization...')
    final_configs, final_twists, final_features = kin_tree.fit_params(
        feature_obs,
        configs=final_configs,
        optimize={
            'twists': True,
            'features': True,
            'configs': True
        })
    kin_tree.json(args.kinmodel_json_optimized)
示例#7
0
def collect_model_data():
    parser = argparse.ArgumentParser()
    parser.add_argument('kinmodel_json',
                        help='The JSON file with the kinematic model data')
    parser.add_argument('output_npz', help='The output mocap data file')
    args = parser.parse_args()

    # Generate chains from the kinematic model file
    kin_tree = kinmodel.KinematicTree(json_filename=args.kinmodel_json)
    features = kin_tree.get_features()
    tree_marker_nums = []
    assignments = {}
    for feature_name in features:
        assignments[feature_name] = int(feature_name.split('_')[1])

    CHAINS = {}
    CHAINS['tree'] = assignments.keys()

    #Load the marker assignments and make a list of all markers
    all_markers = []
    for chain in CHAINS.keys():
        groups = CHAINS[chain]
        for group in groups:
            all_markers.append(assignments[group])
    all_markers = list(set(all_markers))
    print('Tracking markers:')
    print(all_markers)

    #Start recording frames
    rospy.init_node('mocap_calibrate_human')
    recorder = MocapRecorder()

    #Capture the 0-config
    raw_input('RECORDING: Press <Enter> to capture the 0-configuration: ')
    print('Waiting for all markers to be visible...')
    recorder.record()
    recorder.annotate_next_visible(all_markers, 'zero_config')

    #Capture the calibration sequence
    frame = 1
    for chain in CHAINS.keys():
        while True:
            command = raw_input(
                'RECORDING ' + chain +
                ': Press <Enter> to capture a pose or n+<Enter> to move to the next chain: '
            )
            if command is 'n':
                break
            else:
                recorder.annotate(chain)
                print('Captured frame ' + str(frame))
                frame += 1
    recorder.stop()

    #Generate the sets of training samples
    print('Extracting chains:')
    calib_frames = {chain: [] for chain in CHAINS.keys()}
    for chain in CHAINS.keys():
        groups = CHAINS[chain]
        group_markers = []
        for group in groups:
            group_markers.append(assignments[group])
        print(chain + ': ' + str(group_markers))
        group_markers = np.array(group_markers)

        #Select only the marker data for this group
        group_data = recorder.get_array()[group_markers, :, :]

        #Pull out the frame nearest to each annotation where all the group's
        #markers are visible
        for annotation in recorder.get_annotations():
            if annotation[1] == chain:
                calib_frames[chain].append(
                    get_closest_visible(group_data, int(annotation[0]))[1])
        li = []
        li.extend(set(calib_frames[chain]))
        li.sort()
        calib_frames[chain] = np.array(li)

    #Set the zero config as the first frame
    print('calib_frames:')
    print(calib_frames)
    first_frame = int(recorder.get_annotations()[0][0])
    for chain in CHAINS.keys():
        calib_frames[chain] = calib_frames[chain] - first_frame
    calib_frames['full_sequence'] = recorder.get_array()[:, :, first_frame:]

    #Add the full sequence and the id of the marker assignments to the dataset,
    #then write it to a file
    with open(args.output_npz, 'w') as output_file:
        np.savez_compressed(output_file, **calib_frames)
        print('Calibration sequences saved to ' + args.output_npz)