示例#1
1
 def histogram_of_stop_point_elbow(self, subjects, labels):
     fig1 = plt.figure(4)
     fig2 = plt.figure(5)
     labels = ['good']
     stop_locations = []
     arm_lengths = []
     for num, label in enumerate(labels):
         for subj_num, subject in enumerate(subjects):
             subject_stop_locations = []
             paramlist = rosparam.load_file(''.join([data_path, '/', subject, '/params.yaml']))
             for params, ns in paramlist:
                 rosparam.upload_params(ns, params)
             arm_length = rosparam.get_param('crook_to_fist')/100.
             # fig1 = plt.figure(2*num+1)
             ax1 = fig1.add_subplot(111)
             ax1.set_xlim(0.2, .4)
             # ax1.set_ylim(-10.0, 1.0)
             ax1.set_xlabel('Stop Position (m)', fontsize=20)
             ax1.set_ylabel('Number of trials', fontsize=20)
             ax1.set_title(''.join(['Difference between arm length and stop position at the elbow']), fontsize=20)
             ax1.tick_params(axis='x', labelsize=20)
             ax1.tick_params(axis='y', labelsize=20)
             ax2 = fig2.add_subplot(431+subj_num)
             ax2.set_xlim(0.2, .4)
             # ax1.set_ylim(-10.0, 1.0)
             ax2.set_xlabel('Position (m)')
             ax2.set_ylabel('Number of trials')
             ax2.set_title(''.join(['Stop position for "Good" outcome']), fontsize=20)
             vel = 0.1
             directory = ''.join([data_path, '/', subject, '/formatted_three/', str(vel),'mps/', label, '/'])
             force_file_list = os.listdir(directory)
             for file_name in force_file_list:
                 # print directory+file_name
                 loaded_data = load_pickle(directory+file_name)
                 stop_locations.append(np.max(loaded_data[:,1])-arm_length)
                 subject_stop_locations.append(np.max(loaded_data[:,1])-arm_length)
                 arm_lengths.append(arm_length)
             vel = 0.15
             directory = ''.join([data_path, '/', subject, '/formatted_three/', str(vel),'mps/', label, '/'])
             force_file_list = os.listdir(directory)
             for file_name in force_file_list:
                 loaded_data = load_pickle(directory+file_name)
                 stop_locations.append(np.max(loaded_data[:,1]-arm_length))
                 subject_stop_locations.append(np.max(loaded_data[:,1])-arm_length)
             ax2.hist(subject_stop_locations)
     mu = np.mean(stop_locations)
     sigma = np.std(stop_locations)
     print 'The minimum arm length is: ', np.min(arm_lengths)
     print 'The max arm length is: ', np.max(arm_lengths)
     print 'The mean arm length is: ', np.mean(arm_lengths)
     print 'The mean of the stop location is: ', mu
     print arm_lengths
     print 'The standard deviation of the stop location is: ', sigma
     n, bins, patches = ax1.hist(stop_locations, 10, color="green", alpha=0.75)
     points = np.arange(0, 10, 0.001)
     y = mlab.normpdf(points, mu, sigma)
     l = ax1.plot(points, y, 'r--', linewidth=2)
示例#2
0
    def __init__(self, end_effector_length):
        # create joint limit dicts
        self.joint_lim_dict = {}
        self.joint_lim_dict["right_arm"] = {
            "max": np.radians([120.00, 122.15, 77.5, 144.0, 122.0, 45.0, 45.0]),
            "min": np.radians([-47.61, -20.0, -77.5, 0.0, -80.0, -45.0, -45.0]),
        }

        self.joint_lim_dict["left_arm"] = {
            "max": np.radians([120.00, 20.0, 77.5, 144.0, 80.0, 45.0, 45.0]),
            "min": np.radians([-47.61, -122.15, -77.5, 0.0, -122.0, -45.0, -45.0]),
        }

        end_effector_length += 0.0135 + 0.04318  # add wrist linkange and FT sensor lengths
        self.setup_kdl_mekabot(end_effector_length)
        q_guess_pkl_l = (
            os.environ["HOME"]
            + "/svn/gt-ros-pkg/hrl/hrl_arm_control/hrl_cody_arms/src/hrl_cody_arms/q_guess_left_dict.pkl"
        )
        q_guess_pkl_r = (
            os.environ["HOME"]
            + "/svn/gt-ros-pkg/hrl/hrl_arm_control/hrl_cody_arms/src/hrl_cody_arms/q_guess_right_dict.pkl"
        )

        self.q_guess_dict_left = ut.load_pickle(q_guess_pkl_l)
        self.q_guess_dict_right = ut.load_pickle(q_guess_pkl_r)
示例#3
0
    def display(self, succ_pickle, fail_pickle):
        # load in pickle
        print 'loading...'
        successes = ut.load_pickle(succ_pickle)
        failures = ut.load_pickle(fail_pickle)

        print 'Enter the topic number:'
        for i, k in enumerate(successes[0].keys()):
            print i, k
        topic = successes[0].keys()[int(raw_input())]

        red = np.matrix([1., 0, 0, 1.]).T
        green = np.matrix([0., 1., 0, 1.]).T
        print 'construct_pressure_marker_message(successes, topic, green)'
        #succ_marker, succ_pc = construct_pressure_marker_message(successes, topic, green)
        succ_pc = construct_pressure_marker_message(successes, topic, green)
        print 'construct_pressure_marker_message(failures, topic, red)'
        #fail_marker, fail_pc = construct_pressure_marker_message(failures, topic, red)
        fail_pc = construct_pressure_marker_message(failures, topic, red)
        print 'publishing...'
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            self.succ_pc_pub.publish(succ_pc)
            self.fail_pc_pub.publish(fail_pc)
            r.sleep()
示例#4
0
    def display(self, succ_pickle, fail_pickle):
        # load in pickle
        print 'loading...'
        successes = ut.load_pickle(succ_pickle)
        failures = ut.load_pickle(fail_pickle)

        print 'Enter the topic number:'
        for i, k in enumerate(successes[0].keys()):
            print i, k
        topic = successes[0].keys()[int(raw_input())]

        red = np.matrix([1.,0, 0, 1.]).T
        green = np.matrix([0.,1., 0, 1.]).T
        print 'construct_pressure_marker_message(successes, topic, green)'
        #succ_marker, succ_pc = construct_pressure_marker_message(successes, topic, green)
        succ_pc = construct_pressure_marker_message(successes, topic, green)
        print 'construct_pressure_marker_message(failures, topic, red)'
        #fail_marker, fail_pc = construct_pressure_marker_message(failures, topic, red)
        fail_pc = construct_pressure_marker_message(failures, topic, red)
        print 'publishing...'
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            self.succ_pc_pub.publish(succ_pc)
            self.fail_pc_pub.publish(fail_pc)
            r.sleep()
示例#5
0
    def display(self, succ_pickle, fail_pickle):
        # load in pickle
        print 'loading...'
        successes = ut.load_pickle(succ_pickle)
        failures = ut.load_pickle(fail_pickle)

        topics = ['/pressure/l_gripper_motor', '/pressure/r_gripper_motor']
        topic = topics[0]

        red = np.matrix([1., 0, 0, 1.]).T
        green = np.matrix([0., 1., 0, 1.]).T
        blue = np.matrix([0., 0, 1., 1.]).T

        #data_dict [state number] [topic] [trial number] ['t' 'left' 'right']
        succ_avg = average_reading_over_trials(successes, topic)
        fail_avg = average_reading_over_trials(failures, topic)
        diff_avg = subtract_records(succ_avg, fail_avg, topic)

        succ_marker, succ_pc = construct_pressure_marker_message(succ_avg, topic, green)
        fail_marker, fail_pc = construct_pressure_marker_message(fail_avg, topic, red)
        diff_marker, diff_pc = construct_pressure_marker_message(diff_avg, topic, blue)

        r = rospy.Rate(10)
        print 'publishing...'
        while not rospy.is_shutdown():
            self.succ_marker.publish(succ_marker)
            self.fail_marker.publish(fail_marker)

            self.succ_pc_pub.publish(succ_pc)
            self.fail_pc_pub.publish(fail_pc)

            self.diff_marker.publish(diff_marker)
            self.diff_pc_pub.publish(diff_pc)
            r.sleep()
示例#6
0
    def set_position_data_from_saved_movement_profile(self, subject, input_classes, output_classes):
        print 'Now editing files to insert position data.'
        position_profile = None
        for vel in [0.1, 0.15]:
            if vel == 0.1:
                print ''.join([self.data_path, '/position_profiles/position_combined_0_1mps.pkl'])
                position_profile = load_pickle(''.join([self.data_path, '/position_profiles/position_combined_0_1mps.pkl']))
                print 'Position profile loaded!'
            elif vel == 0.15:
                position_profile = load_pickle(''.join([self.data_path, '/data/position_profiles/position_combined_0_15mps.pkl']))
                print ''.join([self.data_path, '/position_profiles/position_combined_0_15mps.pkl'])
                print 'Position profile loaded!'
            else:
                print 'There is no saved position profile for this velocity! Something has gone wrong!'
                return None
            for class_num in xrange(len(input_classes)):
                i = 0
                while os.path.isfile(''.join([self.data_path, '/', subject, '/', str(vel), 'mps/', input_classes[class_num], '/ft_sleeve_', str(i), '.pkl'])):
                    ft_threshold_was_exceeded = False
                    print ''.join([self.data_path, '/', subject, '/', input_classes[class_num], '/ft_sleeve_', str(i), '.pkl'])
                    # current_data = np.array([map(float,line.strip().split()) for line in open(''.join([self.pkg_path, '/data/', subject, '/', input_classes[class_num], '/ft_sleeve_', str(i), '.log']))])
                    current_data = load_pickle(''.join([self.data_path, '/', subject, '/', input_classes[class_num], '/ft_sleeve_', str(i), '.pkl']))

                    # if np.max(current_data[:, 2]) >= 10. or np.max(current_data[:, 3]) >= 10. \
                    #         or np.max(current_data[:, 4]) >= 10.:
                    #     ft_threshold_was_exceeded = True

                    for j in current_data:
                        j[2] = -j[2]
                        j[3] = -j[3]
                        j[4] = -j[4]
                        if j[0] < 0.5:
                            j[1] = 0
                        else:
                            if np.max(np.abs(j[2:])) > 10. and not ft_threshold_was_exceeded:
                                time_of_stop = j[0]
                                ft_threshold_was_exceeded = True
                                for k in xrange(len(position_profile)-1):
                                    if position_profile[k, 0] <= j[0] < position_profile[k+1, 0]:
                                        position_of_stop = position_profile[k, 1] + \
                                                           (position_profile[k+1, 1] - position_profile[k, 1]) * \
                                                           (j[0]-position_profile[k, 0])/(position_profile[k+1, 0] - position_profile[k, 0])
                            if ft_threshold_was_exceeded:
                                j[1] = position_of_stop
                            else:
                                for k in xrange(len(position_profile)-1):
                                    if position_profile[k, 0] <= j[0] < position_profile[k+1, 0]:
                                        new_position = position_profile[k, 1] + \
                                                       (position_profile[k+1, 1] - position_profile[k, 1]) * \
                                                       (j[0]-position_profile[k, 0])/(position_profile[k+1, 0] - position_profile[k, 0])
                                j[1] = new_position
                    save_number = 0
                    while os.path.isfile(''.join([self.data_path, '/', subject, '/formatted/', str(vel),'mps/', output_classes[class_num], '/force_profile_', str(save_number), '.pkl'])):
                        save_number += 1
                    print 'Saving with number', save_number
                    save_pickle(current_data, ''.join([self.data_path, '/', subject, '/formatted/', str(vel),'mps/', output_classes[class_num], '/force_profile_', str(save_number), '.pkl']))
                    i += 1
        print 'Done editing files!'
示例#7
0
    def create_model(self, succ_pickle, fail_pickle):
        print 'creating model...'
        topic = '/pressure/l_gripper_motor'
        SEGMENT_LENGTH = 1.0
        VARIANCE_KEEP = .7

        # load in pickle
        print 'loading pickles'
        successes = ut.load_pickle(succ_pickle)
        failures = ut.load_pickle(fail_pickle)

        #chop data set into little chunks
        # data_sets[state][chunk_idx]['data', 'time'][chunk_record]
        print 'preprocess pickles'
        success_data_sets, failure_data_sets, chunk_params = self.preprocess_pickles(successes, \
                failures, topic, SEGMENT_LENGTH)

        # turn each set of records into a matrix
        combined_sets = {}
        for dset_name, datasets in zip(['success', 'failure'], [success_data_sets, failure_data_sets]):
            #merge the two matrices from mat_set
            mat_set = self.create_matrix_from_chunked_datasets(datasets)
            for state in range(len(datasets.keys())):
                if not combined_sets.has_key(state):
                    combined_sets[state] = {}
                for chunk_idx in range(len(datasets[state])):
                    if not combined_sets[state].has_key(chunk_idx):
                        combined_sets[state][chunk_idx] = {}
                    combined_sets[state][chunk_idx][dset_name] = mat_set[state][chunk_idx]['data']
                    combined_sets[state][chunk_idx]['time'] = mat_set[state][chunk_idx]['time']

        # run PCA over the entire set
        models = {}
        for state in range(len(combined_sets.keys())):
            models[state] = []
            for chunk_idx in range(len(combined_sets[state])):
                print 'building model for state', state, 'chunk idx', chunk_idx
                # pdb.set_trace()
                data_chunk = np.column_stack((combined_sets[state][chunk_idx]['success'], \
                                              combined_sets[state][chunk_idx]['failure']))
                num_pos = combined_sets[state][chunk_idx]['success'].shape[1]
                num_neg = combined_sets[state][chunk_idx]['failure'].shape[1]
                labels = np.column_stack((np.matrix(np.ones((1, num_pos))), np.matrix(np.zeros((1, num_neg)))))

                projection_basis, dmean = dimreduce.pca_vectors(data_chunk, VARIANCE_KEEP)
                print 'pca_basis: number of dimensions', projection_basis.shape[1]
                reduced_data = projection_basis.T * (data_chunk-dmean)
                models[state].append({'time':    combined_sets[state][chunk_idx]['time'],
                                      'project': projection_basis,
                                      'reduced': reduced_data,
                                      'labels':  labels,
                                      'mean':    dmean,
                                      'data':    data_chunk
                                      #'tree':    sp.KDTree(np.array(reduced_data.T))
                                      })

        self.models = {'models':models, 
                       'chunk_params': chunk_params}
示例#8
0
 def plot_all_data(self, subjects, labels):
     fig1 = plt.figure(1)
     for num, label in enumerate(labels):
         for subject in subjects:
             # fig1 = plt.figure(2*num+1)
             ax1 = fig1.add_subplot(221 + 2 * num)
             ax1.set_xlim(0., .85)
             ax1.set_ylim(-0.40, 0.40)
             ax1.set_xlabel('Position (m)')
             ax1.set_ylabel('Force_x (N)')
             ax1.set_title(''.join([
                 'Force in direction of movement vs Position for: ', label,
                 ' type'
             ]))
             # fig2 = plt.figure(2*num+2)
             ax2 = fig1.add_subplot(222 + 2 * num)
             ax2.set_xlim(0, .85)
             ax2.set_ylim(-0.40, 0.40)
             ax2.set_xlabel('Position (m)')
             ax2.set_ylabel('Force_z (N)')
             ax2.set_title(''.join([
                 'Force in upward direction vs Position for: ', label,
                 ' type'
             ]))
             vel = 0.1
             directory = ''.join([
                 data_path, '/', subject, '/formatted_three/',
                 str(vel), 'mps/', label, '/'
             ])
             force_file_list = os.listdir(directory)
             for file_name in force_file_list:
                 # print directory+file_name
                 loaded_data = load_pickle(directory + file_name)
                 X1 = loaded_data[:, 1]
                 Y1 = loaded_data[:, 2]
                 surf1 = ax1.plot(X1, Y1, color="blue", alpha=1)
                 X2 = loaded_data[:, 1]
                 Y2 = loaded_data[:, 4]
                 surf2 = ax2.plot(X2, Y2, color="green", alpha=1)
             vel = 0.15
             directory = ''.join([
                 data_path, '/', subject, '/formatted_three/',
                 str(vel), 'mps/', label, '/'
             ])
             force_file_list = os.listdir(directory)
             for file_name in force_file_list:
                 # print directory+file_name
                 loaded_data = load_pickle(directory + file_name)
                 X1 = loaded_data[:, 1]
                 Y1 = loaded_data[:, 2]
                 surf1 = ax1.plot(X1, Y1, color="blue", alpha=1)
                 X2 = loaded_data[:, 1]
                 Y2 = loaded_data[:, 4]
                 surf2 = ax2.plot(X2, Y2, color="green", alpha=1)
示例#9
0
文件: analyze.py 项目: wklharry/hrl
def compare(file1, file2):
    f1 = ut.load_pickle(file1 + '.pkl')
    f2 = ut.load_pickle(file2 + '.pkl')
    force1 = f1['force']
    force2 = f2['force']
    fmag1 = []
    fmag2 = []

    for i in range(len(force1)):
        fmag1.append(np.linalg.norm(force1[i]))
    for i in range(len(force2)):
        fmag2.append(np.linalg.norm(force2[i]))
    res = st.ttest_ind(fmag1, fmag2)
    print res
示例#10
0
文件: analyze.py 项目: gt-ros-pkg/hrl
def compare(file1,file2):
	f1=ut.load_pickle(file1+'.pkl')
	f2=ut.load_pickle(file2+'.pkl')
	force1 = f1['force']
	force2 = f2['force']
	fmag1 = []
	fmag2 = []

	for i in range(len(force1)):
		fmag1.append(np.linalg.norm(force1[i]))
	for i in range(len(force2)):
		fmag2.append(np.linalg.norm(force2[i]))
	res = st.ttest_ind(fmag1,fmag2)
	print res
示例#11
0
 def histogram_of_stop_point_fist(self, subjects, labels):
     fig1 = plt.figure(3)
     labels = ['caught_fist']
     stop_locations = []
     for num, label in enumerate(labels):
         for subject in subjects:
             # fig1 = plt.figure(2*num+1)
             ax1 = fig1.add_subplot(111)
             ax1.set_xlim(0.2, .5)
             # ax1.set_ylim(-10.0, 1.0)
             ax1.set_xlabel('Stop Position (m)', fontsize=20)
             ax1.set_ylabel('Number of trials', fontsize=20)
             ax1.tick_params(axis='x', labelsize=20)
             ax1.tick_params(axis='y', labelsize=20)
             ax1.set_title(''.join([
                 'Stop position when caught on fist, when started at tip of fist'
             ]),
                           fontsize=20)
             vel = 0.1
             directory = ''.join([
                 data_path, '/', subject, '/formatted/',
                 str(vel), 'mps/', label, '/'
             ])
             force_file_list = os.listdir(directory)
             for file_name in force_file_list:
                 # print directory+file_name
                 loaded_data = load_pickle(directory + file_name)
                 stop_locations.append(np.max(loaded_data[:, 1]))
             vel = 0.15
             directory = ''.join([
                 data_path, '/', subject, '/formatted/',
                 str(vel), 'mps/', label, '/'
             ])
             force_file_list = os.listdir(directory)
             for file_name in force_file_list:
                 loaded_data = load_pickle(directory + file_name)
                 stop_locations.append(np.max(loaded_data[:, 1]))
     mu = np.mean(stop_locations)
     sigma = np.std(stop_locations)
     print 'The mean of the stop location is: ', mu
     print 'The standard deviation of the stop location is: ', sigma
     n, bins, patches = ax1.hist(stop_locations,
                                 10,
                                 color="green",
                                 alpha=0.75)
     points = np.arange(0, 10, 0.001)
     y = mlab.normpdf(points, mu, sigma)
     l = ax1.plot(points, y, 'r--', linewidth=2)
    def load_task_scores(self, task, model, type):
        # file_name = ''.join([self.pkg_path, '/data/', task, '_', model, '_subj_', str(subj), '_score_data.pkl'])
        split_task = task.split('_')
        if 'shaving' in split_task:
            file_name = ''.join([self.data_path, '/', task, '/', model, '/', task, '_', model, '_', type, '_score_data'])
        elif 'scratching' in split_task:

            if 'upper' in split_task:
                file_name = ''.join([self.data_path, '/', split_task[0], '/', model, '/', split_task[1], '_', split_task[2], '_', split_task[3], '/', task, '_', model, '_subj_0', '_score_data'])
            elif 'chest' in split_task:
                file_name = ''.join([self.data_path, '/', split_task[0], '/', model, '/', split_task[1], '/', task, '_', model, '_subj_0', '_score_data'])
            else:
                file_name = ''.join([self.data_path, '/', split_task[0], '/', model, '/', split_task[1], '_', split_task[2], '/', task, '_', model, '_subj_0', '_score_data'])
        else:
            file_name = ''.join([self.data_path, '/', task, '/', model, '/', task, '_', model, '_subj_0', '_score_data'])
        # return self.load_spickle(file_name)
        # print 'loading file with name ', file_name
        try:
            if type == 'cma' or 'shaving' in split_task:
                print 'loading file with name ', file_name+'.pkl'
                return load_pickle(file_name+'.pkl')
            else:
                print 'loading file with name ', file_name
                return joblib.load(file_name)
        except IOError:
            print 'Load failed, sorry.'
            return None
示例#13
0
def store_raw_data_from_file(filename):
    print '>> loading and storing', filename
    r = ut.load_pickle(filename)
    cur.execute('INSERT INTO raw_data (id, surface_id, surface_height, unrotated_points, normal, labels, image, intensities, points) '\
                + 'values (?, ?, ?, ?, ?, ?, ?, ?, ?)',\
                (r['id'],     r['surface_id'],        r['surface_height'], r['unrotated_points'], 
                 r['normal'], np.matrix(r['labels']), r['image_name'],     r['intensities'],    r['points']))
    def delete_autobed_configuration(self, req):
        """ Is the callback from the GUI Interaction Service. This service is 
        called when a user wants to delete a saved position. This function will 
        update the autobed_config_data.yaml file by removing the data 
        corresponding to the string specified. If successful, it will return a 
        boolean value"""
        try:
            #Get Autobed pickle file from the params directory into a dictionary
            current_autobed_config_data = load_pickle(self.autobed_config_file)
        except: 
            #return zero
            return update_bed_configResponse(False)

        #Delete present Autobed positions and angles from the dictionary
        try:
            current_autobed_config_data.pop(req.config)
        except KeyError:
            update_bed_configResponse(False)
            print "Autobed Configuration Database is already empty"
        try:
            #Update param file
            save_pickle(current_autobed_config_data, self.autobed_config_file) 
            #Publish list of keys to the abdout1 topic
            self.abdout1.publish(current_autobed_config_data.keys())
            #Return success if param file correctly updated
            return update_bed_configResponse(True)
        except:
            return update_bed_configResponse(False)
示例#15
0
def interp_data_plot(task_name, processed_data_path, nSet=1, save_pdf=False):

    target_file = os.path.join(processed_data_path,
                               task_name + '_dataSet_' + str(nSet))
    if os.path.isfile(target_file) is not True:
        print "There is no saved data"
        sys.exit()

    data_dict = ut.load_pickle(target_file)
    visualization_raw_data(data_dict, save_pdf=save_pdf)

    # training set
    trainingData, param_dict = extractLocalFeature(data_dict['trainData'],
                                                   feature_list, local_range)

    # test set
    normalTestData, _ = extractLocalFeature(data_dict['normalTestData'], feature_list, local_range, \
                                            param_dict=param_dict)
    abnormalTestData, _ = extractLocalFeature(data_dict['abnormalTestData'], feature_list, local_range, \
                                            param_dict=param_dict)

    print "======================================"
    print "Training data: ", np.shape(trainingData)
    print "Normal test data: ", np.shape(normalTestData)
    print "Abnormal test data: ", np.shape(abnormalTestData)
    print "======================================"

    visualization_hmm_data(feature_list, trainingData=trainingData, \
                           normalTestData=normalTestData,\
                           abnormalTestData=abnormalTestData, save_pdf=save_pdf)
示例#16
0
def compute_mean_std(pkls, bin_size):
    c_list = []
    f_list = []
    max_config = math.radians(100.)
    typ = 'rotary'
    for pkl_nm in pkls:
        pull_dict = ut.load_pickle(pkl_nm)
        pr2_log = 'pr2' in pkl_nm
        h_config, h_ftan = force_trajectory_in_hindsight(
            pull_dict, typ, pr2_log)
        #h_config, h_ftan = online_force_with_radius(pull_dict, pr2_log)
        c_list.append(h_config)
        f_list.append(h_ftan)
        max_config = min(max_config, np.max(h_config))

    leng = int(max_config / bin_size) - 1
    ff = []
    for c, f in zip(c_list, f_list):
        #c, f = maa.bin(c, f, bin_size, max, True)
        c, f = maa.bin(c, f, bin_size, np.mean, False, empty_value=np.nan)
        f, c = truncate_to_config(f, c, max_config)
        f = np.ma.masked_array(f, np.isnan(f))
        f = f[:leng]
        c = c[:leng]
        ff.append(f)
    arr = np.array(ff)
    mean = arr.mean(0)
    std = arr.std(0)
    return mean, std, c, arr
示例#17
0
    def __init__(self, arm):
        HRLArmKinematics.__init__(self, n_jts=7)

        # create joint limit dicts
        if arm == 'r':
            max_lim = np.radians([120.00, 122.15, 77.5, 144., 122., 45., 45.])
            min_lim = np.radians([-47.61, -20., -77.5, 0., -80., -45., -45.])
        else:
            max_lim = np.radians([120.00, 20., 77.5, 144., 80., 45., 45.])
            min_lim = np.radians(
                [-47.61, -122.15, -77.5, 0., -122., -45., -45.])

        self.joint_lim_dict = {}
        self.joint_lim_dict['max'] = max_lim
        self.joint_lim_dict['min'] = min_lim

        wrist_stub_length = 0.0135 + 0.04318  # wrist linkange and FT sensor lengths
        self.setup_kdl_chains(arm, wrist_stub_length)

        if arm == 'r':
            pkl_nm = 'q_guess_right_dict.pkl'
        else:
            pkl_nm = 'q_guess_left_dict.pkl'

        pth = roslib.rospack.rospackexec(['find', 'hrl_cody_arms'])
        q_guess_pkl = pth + '/src/hrl_cody_arms/' + pkl_nm
        self.q_guess_dict = ut.load_pickle(q_guess_pkl)
    def run_slip_trial(self, i, f_thresh, t_impulse, goal):
        self.rosbag_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
        self.skin_record_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
        self.ft_and_humanoid_record_srv('slip_impact_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')

        goal_ls = goal[0].A1.tolist()

        goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
        goal_str = ''.join(goal_str_buf)
        controller = subprocess.call(['python', 
                                      'run_controller_debug.py',
                                      '--darci',
                                      '--t_impulse='+str(t_impulse),
                                      '--f_thresh='+str(f_thresh),
                                      "--goal="+goal_str])

        time.sleep(1.0)
        
        self.rosbag_srv('')
        self.skin_record_srv('')
        self.ft_and_humanoid_record_srv('')

        data = ut.load_pickle('./result.pkl')
        self.reaching_right_results.append(data['result'])
        return data['result']
def compute_mean_std(pkls, bin_size):
    c_list = []
    f_list = []
    max_config = math.radians(100.)
    typ = 'rotary'
    for pkl_nm in pkls:
        pull_dict = ut.load_pickle(pkl_nm)
        pr2_log =  'pr2' in pkl_nm
        h_config, h_ftan = force_trajectory_in_hindsight(pull_dict,
                                                       typ, pr2_log)
        #h_config, h_ftan = online_force_with_radius(pull_dict, pr2_log)
        c_list.append(h_config)
        f_list.append(h_ftan)
        max_config = min(max_config, np.max(h_config))

    leng = int (max_config / bin_size) - 1
    ff = []
    for c, f in zip(c_list, f_list):
        #c, f = maa.bin(c, f, bin_size, max, True)
        c, f = maa.bin(c, f, bin_size, np.mean, False, empty_value = np.nan)
        f, c = truncate_to_config(f, c, max_config)
        f = np.ma.masked_array(f, np.isnan(f))
        f = f[:leng]
        c = c[:leng]
        ff.append(f)
    arr = np.array(ff)
    mean = arr.mean(0)
    std = arr.std(0)
    return mean, std, c, arr
示例#20
0
    def __init__(self, name, fname, arm='right'):
        self.arm = arm
        self.name = name

        rospy.logout('TrajPlayback: Initializing (%s)' % self.name)
        try:
            rospy.init_node('traj_playback_' + self.name)
        except:
            pass

        dat = hrl_util.load_pickle(fname)
        self.q = np.mat(dat[0].T)

        # subsample
        self.q = self.q[:, 0::30]
        # smooth
        self.t = np.linspace(1.3, 1.3 + (self.q.shape[1] - 1) * 0.3,
                             self.q.shape[1])

        self.vel = self.q.copy()
        self.vel[:, 1:] -= self.q[:, 0:-1]
        self.vel[:, 0] = 0
        self.vel /= 0.3

        self.pr2 = PR2()

        self.__service = rospy.Service('traj_playback/' + name,
                                       TrajPlaybackSrv, self.process_service)

        rospy.logout('TrajPlayback: Ready for service requests (%s)' %
                     self.name)
示例#21
0
    def __init__( self, name, fname, arm = 'right' ):
        self.arm = arm
        self.name = name

        rospy.logout( 'TrajPlayback: Initializing (%s)' % self.name )
        try:
            rospy.init_node('traj_playback_'+self.name)
        except:
            pass

        dat = hrl_util.load_pickle( fname )
        self.q = np.mat( dat[0].T )

        # subsample
        self.q = self.q[:,0::30]
        # smooth
        self.t = np.linspace( 1.3, 1.3 + (self.q.shape[1]-1)*0.3, self.q.shape[1] ) 

        self.vel = self.q.copy()
        self.vel[:,1:] -= self.q[:,0:-1]
        self.vel[:,0] = 0
        self.vel /= 0.3

        self.pr2 = PR2()

        self.__service = rospy.Service( 'traj_playback/' + name,
                                        TrajPlaybackSrv,
                                        self.process_service )

        rospy.logout( 'TrajPlayback: Ready for service requests (%s)' % self.name )
示例#22
0
 def get_traj_pkl(self, pkl_file):
     data = ut.load_pickle(pkl_file)
     self.all_trajs = data['all_trajs']
     self.aStart = data['start']
     self.aGoal = data['goal']        
     self.all_discret_trajs = self.discretize_traj(self.all_trajs)
     return data
示例#23
0
    def __init__(self):

        pth = os.environ[
            'HRLBASEPATH'] + '/src/projects/modeling_forces/handheld_hook/'
        blocked_thresh_dict = ut.load_pickle(
            pth +
            'blocked_thresh_dict.pkl')  # ['mean_charlie', 'mean_known_mech']

        # Get data
        self.semantic = blocked_thresh_dict[
            'mean_charlie']  # each category has (n_std, mn, std)  <= force profiles
        self.second_time = blocked_thresh_dict[
            'mean_known_mech']  # (Ms(mn_mn, var_mn, mn_std, var_std), n_std)=(tuple(4),float)

        self.force_table = None  # discrete force table
        self.force_max = 0.0
        self.force_resol = 0.5

        self.trans_size = None
        self.trans_mat = None
        self.trans_mat = None
        self.trans_prob_mat = None
        self.start_prob_vec = None

        self.means = None
        self.vars = None

        pass
def robot_trial_plot(cls,
                     mech,
                     pkl_nm,
                     one_pkl_nm,
                     start_idx=None,
                     mech_idx=None,
                     class_idx=None,
                     plt_st=None,
                     plt_mech=None,
                     plt_sem=None):

    # pkl_nm    : collision pickle
    # one_pkl_nm: perfect_perception

    one_d = ut.load_pickle(one_pkl_nm)
    one_trial = np.array(one_d['vec_list'][0:1])  # ee force_profile ? 4xN
    #one_trial = one_trial.reshape(1,len(one_trial))
    dt = second_time[mech]

    # Applied force (collision)
    plot_trial(pkl_nm, math.radians(len(dt[0][0])), start_idx, mech_idx,
               class_idx, plt_st, plt_mech, plt_sem)

    # Operating 1st time
    # semantic: human and robot data in where each category has (n_std, mn, std) <= force profile
    # 'RAM_db/*.pkl' 'RAM_db/robot_trials/perfect_perception/*.pkl' 'RAM_db/robot_trials/simulate_perception/*.pkl'
    # mechanism anlyse RAM with blocked option generates semantic data
    test_known_semantic_class(semantic[cls])

    # Expected force and operating 2nd time
    test_known_mechanism(dt, one_trial)

    ## pp.title(mech)
    mpu.legend(display_mode='normal')
def plot_all_angle_force(dirName, human=True):
    
    # Plot results
    fig = plt.figure()

    ax = plt.subplot(111, aspect='equal')

    plt.xlabel("$x_1$")
    plt.ylabel("$x_2$")

    lFile = getAllFiles(dirName)
    for f in lFile:
        strPath, strFile = os.path.split(f)
        if strFile.find('_new.pkl')>=0:

            try:
                data = ut.load_pickle(f)   
            except:
                continue            

            print data.keys()

            if human:
                if strFile.find('close')>=0:
                    plt.plot(data['mechanism_x']*180.0/np.pi, data['force_tan_list'], "r-")
                else:
                    plt.plot(data['mechanism_x']*180.0/np.pi, data['force_tan_list'], "b-")
            else:
                print data.keys()                
                plt.plot(data['config_list']*180.0/np.pi, data['ftan_list'], "b-")

    fig.savefig('/home/dpark/Dropbox/HRL/mech.pdf', format='pdf')                    
    plt.show()
    def run_foliage_trial(self, i, f_thresh, t_impulse, goal, num_reach, record = True):
        if record == True:
            self.rosbag_srv('foliage_goal_'+str(num_reach).zfill(3)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
            self.skin_record_srv('foliage_goal_'+str(num_reach).zfill(3)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')
            self.ft_and_humanoid_record_srv('foliage_goal_'+str(num_reach).zfill(3)+'_trial_'+str(i).zfill(3)+'_f_thresh_'+str(f_thresh).zfill(2)+'_delta_t_impulse_'+str(t_impulse).zfill(3)+'_')

        goal_ls = goal

        goal_str_buf = [str(goal_ls[0])+', '+str(goal_ls[1])+', '+str(goal_ls[2])]
        goal_str = ''.join(goal_str_buf)
        controller = subprocess.call(['python', 
                                      'run_controller_debug.py',
                                      '--darci',
                                      '--t_impulse='+str(t_impulse),
                                      '--f_thresh='+str(f_thresh),
                                      "--goal="+goal_str])

        time.sleep(1.0)
        
        if record == True:
            self.rosbag_srv('')
            self.skin_record_srv('')
            self.ft_and_humanoid_record_srv('')

        data = ut.load_pickle('./result.pkl')
        return data['result']
示例#27
0
    def kinematics_test(self):
        
        success_list, failure_list = util.getSubjectFileList(self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()


            time_max = np.amax(d['kinematics_time'])
            time_min = np.amin(d['kinematics_time'])

            ee_pos   = d['kinematics_ee_pos']
            x_max = np.amax(ee_pos[0,:])
            x_min = np.amin(ee_pos[0,:])

            y_max = np.amax(ee_pos[1,:])
            y_min = np.amin(ee_pos[1,:])

            z_max = np.amax(ee_pos[2,:])
            z_min = np.amin(ee_pos[2,:])
            
            fig = plt.figure()            
            ax  = fig.add_subplot(111, projection='3d')
            ax.plot(ee_pos[0,:], ee_pos[1,:], ee_pos[2,:])
            
            plt.show()
    def __init__(self, arm):
        HRLArmKinematics.__init__(self, n_jts = 7)

        # create joint limit dicts
        if arm == 'r':
            max_lim = np.radians([ 120.00, 122.15, 77.5, 144., 122.,  45.,  45.])
            min_lim = np.radians([ -47.61,  -20., -77.5,   0., -80., -45., -45.])
        else:
            max_lim = np.radians([ 120.00,   20.,  77.5, 144.,   80.,  45.,  45.])
            min_lim = np.radians([ -47.61, -122.15, -77.5,   0., -122., -45., -45.])

        self.joint_lim_dict = {}
        self.joint_lim_dict['max'] = max_lim
        self.joint_lim_dict['min'] = min_lim

        wrist_stub_length = 0.0135 + 0.04318 # wrist linkange and FT sensor lengths
        self.setup_kdl_chains(arm, wrist_stub_length)

        if arm == 'r':
            pkl_nm = 'q_guess_right_dict.pkl'
        else:
            pkl_nm = 'q_guess_left_dict.pkl'

        pth = roslib.rospack.rospackexec(['find', 'hrl_cody_arms'])
        q_guess_pkl = pth + '/src/hrl_cody_arms/'+pkl_nm
        self.q_guess_dict = ut.load_pickle(q_guess_pkl)
示例#29
0
def interp_data_plot(task_name, processed_data_path, nSet=1, save_pdf=False):    

    target_file = os.path.join(processed_data_path, task_name+'_dataSet_'+str(nSet) )                    
    if os.path.isfile(target_file) is not True: 
        print "There is no saved data"
        sys.exit()

    data_dict = ut.load_pickle(target_file)
    visualization_raw_data(data_dict, save_pdf=save_pdf)

    # training set
    trainingData, param_dict = extractLocalFeature(data_dict['trainData'], feature_list, local_range)

    # test set
    normalTestData, _ = extractLocalFeature(data_dict['normalTestData'], feature_list, local_range, \
                                            param_dict=param_dict)        
    abnormalTestData, _ = extractLocalFeature(data_dict['abnormalTestData'], feature_list, local_range, \
                                            param_dict=param_dict)

    print "======================================"
    print "Training data: ", np.shape(trainingData)
    print "Normal test data: ", np.shape(normalTestData)
    print "Abnormal test data: ", np.shape(abnormalTestData)
    print "======================================"

    visualization_hmm_data(feature_list, trainingData=trainingData, \
                           normalTestData=normalTestData,\
                           abnormalTestData=abnormalTestData, save_pdf=save_pdf)        
def robot_trial_plot(cls, mech, pkl_nm, one_pkl_nm, start_idx=None,
                     mech_idx=None, class_idx=None, plt_st=None,
                     plt_mech=None, plt_sem=None):

    # pkl_nm    : collision pickle
    # one_pkl_nm: perfect_perception

    one_d = ut.load_pickle(one_pkl_nm) 
    one_trial = np.array(one_d['vec_list'][0:1]) # ee force_profile ? 4xN
    #one_trial = one_trial.reshape(1,len(one_trial))
    dt = second_time[mech] 

    # Applied force (collision)
    plot_trial(pkl_nm, math.radians(len(dt[0][0])), start_idx,
               mech_idx, class_idx, plt_st, plt_mech, plt_sem) 

    # Operating 1st time
    # semantic: human and robot data in where each category has (n_std, mn, std) <= force profile
    # 'RAM_db/*.pkl' 'RAM_db/robot_trials/perfect_perception/*.pkl' 'RAM_db/robot_trials/simulate_perception/*.pkl'
    # mechanism anlyse RAM with blocked option generates semantic data
    test_known_semantic_class(semantic[cls])
    
    # Expected force and operating 2nd time 
    test_known_mechanism(dt, one_trial)

    ## pp.title(mech)
    mpu.legend(display_mode='normal')
示例#31
0
    def __init__(self, end_effector_length):
        # create joint limit dicts
        self.joint_lim_dict = {}
        self.joint_lim_dict['right_arm'] = {'max': np.radians([ 120.00, 122.15, 77.5, 144., 122.,  45.,  45.]),
                                            'min': np.radians([ -47.61,  -20., -77.5,   0., -80., -45., -45.])}

        self.joint_lim_dict['left_arm'] = {'max': np.radians([ 120.00,   20.,  77.5, 144.,   80.,  45.,  45.]),
                                           'min': np.radians([ -47.61, -122.15, -77.5,   0., -122., -45., -45.])}

        end_effector_length += 0.0135 + 0.04318 # add wrist linkange and FT sensor lengths
        self.setup_kdl_mekabot(end_effector_length)
        q_guess_pkl_l = os.environ['HOME']+'/svn/gt-ros-pkg/hrl/equilibrium_point_control/epc_core/src/cody_arms/q_guess_left_dict.pkl'
        q_guess_pkl_r = os.environ['HOME']+'/svn/gt-ros-pkg/hrl/equilibrium_point_control/epc_core/src/cody_arms/q_guess_right_dict.pkl'

        self.q_guess_dict_left = ut.load_pickle(q_guess_pkl_l)
        self.q_guess_dict_right = ut.load_pickle(q_guess_pkl_r)
示例#32
0
    def kinematics_test(self):

        success_list, failure_list = util.getSubjectFileList(
            self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()

            time_max = np.amax(d['kinematics_time'])
            time_min = np.amin(d['kinematics_time'])

            ee_pos = d['kinematics_ee_pos']
            x_max = np.amax(ee_pos[0, :])
            x_min = np.amin(ee_pos[0, :])

            y_max = np.amax(ee_pos[1, :])
            y_min = np.amin(ee_pos[1, :])

            z_max = np.amax(ee_pos[2, :])
            z_min = np.amin(ee_pos[2, :])

            fig = plt.figure()
            ax = fig.add_subplot(111, projection='3d')
            ax.plot(ee_pos[0, :], ee_pos[1, :], ee_pos[2, :])

            plt.show()
示例#33
0
    def audio_test(self):

        success_list, failure_list = util.getSubjectFileList(
            self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()

            time_max = np.amax(d['audio_time'])
            time_min = np.amin(d['audio_time'])

            self.azimuth_max = 90.0
            self.azimuth_min = -90.0

            power_max = np.amax(d['audio_power'])
            power_min = np.amin(d['audio_power'])

            time_list = d['audio_time']
            azimuth_list = np.arange(self.azimuth_min, self.azimuth_max, 1.0)

            audio_image = np.zeros((len(time_list), len(azimuth_list)))

            print "image size ", audio_image.shape

            for idx, p in enumerate(d['audio_power']):

                azimuth_idx = min(range(len(azimuth_list)), key=lambda i: \
                                  abs(azimuth_list[i]-d['audio_azimuth'][idx]))

                audio_image[idx][azimuth_idx] = (p - power_min) / (power_max -
                                                                   power_min)

            fig = plt.figure()
            ax1 = fig.add_subplot(211)
            ax1.imshow(audio_image.T)
            ax1.set_aspect('auto')
            ax1.set_ylabel('azimuth angle', fontsize=18)

            y = np.arange(0.0, len(azimuth_list), 30.0)
            new_y = np.arange(self.azimuth_min, self.azimuth_max, 30.0)
            plt.yticks(y, new_y)
            #------------------------------------------------------------

            n, m = np.shape(d['audio_feature'])

            last_feature = np.hstack(
                [np.zeros((n, 1)), d['audio_feature'][:, :-1]])
            feature_delta = d['audio_feature'] - last_feature

            ax2 = fig.add_subplot(212)
            ax2.imshow(feature_delta[:n / 2])
            ax2.set_aspect('auto')
            ax2.set_xlabel('time', fontsize=18)
            ax2.set_ylabel('MFCC derivative', fontsize=18)

            #------------------------------------------------------------
            plt.suptitle('Auditory features', fontsize=18)
            plt.show()
示例#34
0
    def __init__(self, connect=True, right_arm_settings=None,
                 left_arm_settings=None, end_effector_length=0.12818):
        ''' connect -  connect to the robot or not.
            can do FK and IK without connecting.
            right_arm_settings, left_arm_settings: objects of class
                        MekaArmSettings
        '''
        if connect:
            self.arm_settings = {}  # dict is set in set_arm_settings
            self.initialize_joints(right_arm_settings, left_arm_settings)
            #self.initialize_gripper()
            self.left_arm_ft = {'force': np.matrix(np.zeros((3,1),dtype='float32')),
                                'torque': np.matrix(np.zeros((3,1),dtype='float32'))}
            self.right_arm_ft = {'force': np.matrix(np.zeros((3,1),dtype='float32')),
                                 'torque': np.matrix(np.zeros((3,1),dtype='float32'))}
            self.fts_bias = {'left_arm': self.left_arm_ft, 'right_arm': self.right_arm_ft}

        # create joint limit dicts
        self.joint_lim_dict = {}
        self.joint_lim_dict['right_arm'] = {'max':[math.radians(a) for a in [ 100.00, 60.,  77.5, 144., 122.,  65.,  65.]],
                                            'min':[math.radians(a) for a in [ -47.61,  0., -77.5,   0., -80., -65., -65.]]}
        self.joint_lim_dict['left_arm'] = {'max':[math.radians(a) for a in [ 100.00,   20.,  77.5, 144.,   80.,  65.,  65.]],
                                           'min':[math.radians(a) for a in [ -47.61, -122., -77.5,   0., -125., -65., -65.]]}

        self.setup_kdl_mekabot(end_effector_length)
        q_guess_pkl_l = os.environ["HRLBASEPATH"] + "/src/libraries/mekabot/q_guess_left_dict.pkl"
        q_guess_pkl_r = os.environ["HRLBASEPATH"] + "/src/libraries/mekabot/q_guess_right_dict.pkl"
        self.q_guess_dict_left = ut.load_pickle(q_guess_pkl_l)
        self.q_guess_dict_right = ut.load_pickle(q_guess_pkl_r)

        self.jep = None # see set_joint_angles

        # kalman filtering force vector. (self.step and bias_wrist_ft)
        self.Q_force, self.R_force = {}, {}
        self.xhat_force, self.P_force = {}, {}

        self.Q_force['right_arm'] = [1e-3, 1e-3, 1e-3]
        self.R_force['right_arm'] = [0.2**2, 0.2**2, 0.2**2]
        self.xhat_force['right_arm'] = [0., 0., 0.]
        self.P_force['right_arm'] = [1.0, 1.0, 1.0]

        self.Q_force['left_arm'] = [1e-3, 1e-3, 1e-3]
        self.R_force['left_arm'] = [0.2**2, 0.2**2, 0.2**2]
        self.xhat_force['left_arm'] = [0., 0., 0.]
        self.P_force['left_arm'] = [1.0, 1.0, 1.0]
示例#35
0
    def audio_test(self):
        
        success_list, failure_list = util.getSubjectFileList(self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()

            time_max = np.amax(d['audio_time'])
            time_min = np.amin(d['audio_time'])

            self.azimuth_max = 90.0
            self.azimuth_min = -90.0

            power_max = np.amax(d['audio_power'])
            power_min = np.amin(d['audio_power'])

            time_list    = d['audio_time']
            azimuth_list = np.arange(self.azimuth_min, self.azimuth_max, 1.0)
            
            audio_image = np.zeros( (len(time_list), len(azimuth_list)) )

            print "image size ", audio_image.shape

            for idx, p in enumerate(d['audio_power']):

                azimuth_idx = min(range(len(azimuth_list)), key=lambda i: \
                                  abs(azimuth_list[i]-d['audio_azimuth'][idx]))
                
                audio_image[idx][azimuth_idx] = (p - power_min)/(power_max - power_min)

                
            fig = plt.figure()            
            ax1 = fig.add_subplot(211)
            ax1.imshow(audio_image.T)
            ax1.set_aspect('auto')
            ax1.set_ylabel('azimuth angle', fontsize=18)

            y     = np.arange(0.0, len(azimuth_list), 30.0)
            new_y = np.arange(self.azimuth_min, self.azimuth_max, 30.0)
            plt.yticks(y,new_y)
            #------------------------------------------------------------

            n,m = np.shape(d['audio_feature'])

            last_feature = np.hstack([ np.zeros((n,1)), d['audio_feature'][:,:-1] ])            
            feature_delta = d['audio_feature'] - last_feature
            
            ax2 = fig.add_subplot(212)
            ax2.imshow( feature_delta[:n/2] )
            ax2.set_aspect('auto')
            ax2.set_xlabel('time', fontsize=18)
            ax2.set_ylabel('MFCC derivative', fontsize=18)

            #------------------------------------------------------------
            plt.suptitle('Auditory features', fontsize=18)            
            plt.show()
def plot_trial(pkl_nm, max_ang, start_idx=None, mech_idx=None,
               class_idx=None, start=None, mech=None, sem=None):

    pull_dict = ut.load_pickle(pkl_nm)
    typ = 'rotary'
    pr2_log =  'pr2' in pkl_nm
    h_config, h_ftan = atr.force_trajectory_in_hindsight(pull_dict,
                                                   typ, pr2_log)
    h_config = np.array(h_config)
    h_ftan = np.array(h_ftan)
    h_ftan = h_ftan[h_config < max_ang]
    h_config = h_config[h_config < max_ang] # cut
    bin_size = math.radians(1.)
    h_config_degrees = np.degrees(h_config)
    ftan_raw = h_ftan

    # resampling with specific interval
    h_config, h_ftan = maa.bin(h_config, h_ftan, bin_size, np.mean, True) 
    pp.plot(np.degrees(h_config), h_ftan, 'yo-', mew=0, ms=0,
            label='applied force', linewidth=2)
    ## pp.xlabel('Angle (degrees)')
    ## pp.ylabel('Opening Force (N)')

    if start != None:
        x = start[0]
        y = start[1]
        pp.plot([x], [y], 'ko', mew=0, ms=5)
        #pp.text(x, y-1.5, '1', withdash=True) # cody_kitchen_box
        #pp.text(x-1, y+1.0, '1', withdash=True) # cody_fridge_box
        #pp.text(x, y-4., '1', withdash=True) # cody_fridge_chair
        #pp.text(x+1.0, y-0.5, '1', withdash=True) # locked_pr2
        #pp.text(x+1.0, y-0.5, '1', withdash=True) # locked_cody

        x = mech[0]
        y = mech[1]
        pp.plot([x], [y], 'ko', mew=0, ms=5)

        x = sem[0]
        y = sem[1]
        pp.plot([x], [y], 'ko', mew=0, ms=5)

    if start_idx != None:
        pp.plot([h_config_degrees[start_idx]], [ftan_raw[start_idx]],
                'ko', mew=0, ms=5)
        pp.plot([h_config_degrees[mech_idx]], [ftan_raw[mech_idx]],
                'ko', mew=0, ms=5)
        pp.plot([h_config_degrees[class_idx]], [ftan_raw[class_idx]],
                'ko', mew=0, ms=5)
        print 'Time with mechanism known:', (mech_idx-start_idx) * 100.
        print 'Time with class known:', (class_idx-start_idx) * 100.
        print ''
        print 'Force increase with known mechanism:', ftan_raw[mech_idx] - ftan_raw[start_idx]
        print 'Force increase with known class:', ftan_raw[class_idx] - ftan_raw[start_idx]
        print ''
        print 'Angle increase with known mechanism:', h_config_degrees[mech_idx] - h_config_degrees[start_idx]
        print 'Angle increase with known class:', h_config_degrees[class_idx] - h_config_degrees[start_idx]
示例#37
0
 def load(self, path, filename):
     self.filename = filename
     self.path = path
     #try:
     dict = ut.load_pickle(self.path+'/'+self.filename)
     #except:
     #    print 'loading of '+self.path+'/'+filename+' failed. WARNING: it will be overwritten on save()!'
     #    return
         
     self.datasets = dict['datasets']
def get_discrete_test_from_mean_dict(pkl_file):

    data = ut.load_pickle(pkl_file)

    mean_charlie    = data['mean_charlie']
    mean_known_mech = data['mean_known_mech']

    
    print mean_charlie['ikea_cabinet_noisy_cody'] # Force profile (mean * 0.0, mean, std)
    print mean_known_mech['ikea_cabinet_noisy_cody']
 def _load_database(self):
     #                           tree         dict
     #indexes of locations in tree => ids list => location data
     if not os.path.isfile(self.saved_locations_fname):
         return
     d = ut.load_pickle(self.saved_locations_fname)
     self.ids = d['ids']
     self.centers = d['centers']
     self.data = d['data']
     self.tree = sp.KDTree(np.array(self.centers).T)
示例#40
0
def test_dict(fname):
    dict = ut.load_pickle(fname)
    firenze = ar.M3HrlRobot(connect=False)

    rot = tr.rotY(math.radians(-90))
    p = np.matrix([0.4,-0.42,-0.2]).T

    c = find_good_config(p,dict)
    res = firenze.IK(p,rot,q_guess=c)
    print 'IK soln: ', [math.degrees(qi) for qi in res]
示例#41
0
    def ft_test(self):

        success_list, failure_list = util.getSubjectFileList(
            self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()

            fig = plt.figure()
示例#42
0
def test_dict(fname):
    dic = ut.load_pickle(fname)
    firenze = cak.CodyArmKinematics('r')

    rot = tr.rotY(math.radians(-90))
    p = np.matrix([0.4,-0.42,-0.2]).T

    c = find_good_config(p, dic)
    res = firenze.IK(p, rot, q_guess=c)
    print 'IK soln: ', [math.degrees(qi) for qi in res]
示例#43
0
    def ft_test(self):
        
        success_list, failure_list = util.getSubjectFileList(self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()


            fig = plt.figure()            
def test_dict(fname):
    dict = ut.load_pickle(fname)
    firenze = ar.M3HrlRobot(connect=False)

    rot = tr.rotY(math.radians(-90))
    p = np.matrix([0.4, -0.42, -0.2]).T

    c = find_good_config(p, dict)
    res = firenze.IK(p, rot, q_guess=c)
    print 'IK soln: ', [math.degrees(qi) for qi in res]
def create_dict(fname):
    firenze = ar.M3HrlRobot(connect=False)
    good_configs_list = ut.load_pickle(fname)
    cartesian_points_list = []
    for gc in good_configs_list:
        cartesian_points_list.append(firenze.FK('right_arm', gc).A1.tolist())

    m = np.matrix(cartesian_points_list).T
    print 'm.shape:', m.shape
    dict = {'cart_pts_mat': m, 'good_configs_list': good_configs_list}
    ut.save_pickle(dict, ut.formatted_time() + '_goodconf_dict.pkl')
示例#46
0
def density_plot(pickle_file_name):
    BIN_SIZE = 20
    #PICKLE_FOLDER = 'pickle_files'

    data_dict = ut.load_pickle(pickle_file_name)
    #pdb.set_trace()
    orig_pickle_folder, _ = pt.split(pickle_file_name)
    folder_name, img_name = pt.split(data_dict['image'])
    nimg_path = pt.join(orig_pickle_folder, img_name)
    img_obj = Image.open(nimg_path)
    w, h = img_obj.size
    pb.imshow(img_obj, origin='lower')

    data_dict['neg_pred'][1, :] = h - data_dict['neg_pred'][1, :]
    data_dict['pos_pred'][1, :] = h - data_dict['pos_pred'][1, :]

    all_pts = np.column_stack((data_dict['neg_pred'], data_dict['pos_pred']))
    Hall, xedges, yedges = np.histogram2d(all_pts[0, :].A1,
                                          all_pts[1, :].A1,
                                          bins=num_bins(all_pts, BIN_SIZE))
    Hneg, xedges, yedges = np.histogram2d(data_dict['neg_pred'][0, :].A1,
                                          data_dict['neg_pred'][1, :].A1,
                                          bins=[xedges, yedges])

    extent = [xedges[0], xedges[-1], yedges[-1], yedges[0]]
    Himage = (Hall - Hneg).T
    max_val, min_val = np.max(Himage), np.min(Himage)
    Hrgba = np.zeros((Himage.shape[0], Himage.shape[1], 4), dtype='uint8')
    Hrgba[:, :, 0] = 0
    Hrgba[:, :, 1] = 255  #Himage*80
    Hrgba[:, :, 2] = 0
    Hrgba[:, :, 3] = 255
    r, c = np.where(Himage == 0)
    Hrgba[r, c, 3] = 0

    print 'max', max_val, 'min', min_val
    pb.imshow(Hrgba,
              extent=extent,
              interpolation='spline36',
              origin='upper',
              alpha=.7)
    #pdb.set_trace()
    #pb.plot(data_dict['neg_pred'][0,:].A1, data_dict['neg_pred'][1,:].A1, 'rx')
    #pb.plot(data_dict['pos_pred'][0,:].A1, data_dict['pos_pred'][1,:].A1, 'x')
    min_x, max_x, min_y, max_y = minmax(all_pts)
    pb.axis([
        max(min_x - 100, 0),
        min(max_x + 100, w),
        max(min_y - 100, 0),
        min(max_y + 100, h)
    ])
    #pb.axis([0, w, 0, h])
    name, extension = pt.splitext(img_name)
    pb.savefig(pt.join(orig_pickle_folder, name + '_plot.png'))
示例#47
0
def create_dict(fname):
    firenze = cak.CodyArmKinematics('r')
    good_configs_list = ut.load_pickle(fname)
    cartesian_points_list = []
    for gc in good_configs_list:
        cartesian_points_list.append(firenze.FK(gc).A1.tolist())

    m = np.matrix(cartesian_points_list).T
    print 'm.shape:', m.shape
    dic = {'cart_pts_mat':m, 'good_configs_list':good_configs_list}
    ut.save_pickle(dic,ut.formatted_time()+'_goodconf_dict.pkl')
示例#48
0
文件: test08.py 项目: wklharry/hrl
def compress_pkl(surf_path_complete):
    features_list = ut.load_pickle(surf_path_complete)

    rospy.loginfo('making matrix')
    fmat = features_mat(features_list)

    rospy.loginfo('compressing')
    reduced_features = features_mat_compress(fmat, 1000)
    small_pickle_fname = gen_pkl_name(path_complete, '.surf_sm_pkl')
    ut.save_pickle(reduced_features, small_pickle_fname)
    rospy.loginfo('saved to %s' % small_pickle_fname)
示例#49
0
文件: test12.py 项目: wklharry/hrl
    def __init__(self, surf_name, contacts_name):
        forearm_cam_l = '/l_forearm_cam/image_rect_color'

        self.bridge = CvBridge()
        rospy.loginfo('loading %s' % surf_name)
        self.surf_data = ut.load_pickle(surf_name)
        rospy.loginfo('loading %s' % contacts_name)
        self.scene, self.contact_points = ut.load_pickle(contacts_name)
        self.surf_idx = None
        cv.NamedWindow('surf', 1)
        self.tflistener = tf.TransformListener()
        self.camera_geo = ROSCameraCalibration('/l_forearm_cam/camera_info')

        self.lmat0 = None
        self.rmat0 = None
        self.contact = False
        self.contact_stopped = False

        #rospy.Subscriber('/pressure/l_gripper_motor', pm.PressureState, self.lpress_cb)
        rospy.Subscriber(forearm_cam_l, sm.Image, self.image_cb)
        print 'ready'
示例#50
0
 def test(self, feature_data = None):
     #test on current scan:
     print getTime(), 'test on:', self.processor.scan_dataset.id    
         
     if feature_data == None:
         filename = self.processor.get_features_filename()
         dict = ut.load_pickle(filename)
     else:
         dict = feature_data
     
     baseline_labels = self.classify_baseline_code()
 
     return baseline_labels, self.test_results(dict, baseline_labels)  
示例#51
0
 def getCalibration(self, filename='bowl_frame.pkl'):
     if os.path.isfile(filename) == False: return False
     
     d = ut.load_pickle(filename)        
     self.bowl_cen_frame_off = d['frame']
     
     self.bowl_calib = True
     print "------------------------------------"
     print "Calibration complete! - bowl_cen offset"
     print "------------------------------------"
     print "P: ", self.bowl_cen_frame_off.p
     print "M: ", self.bowl_cen_frame_off.M
     print "------------------------------------"
     return True
示例#52
0
    def display(self, succ_pickle, fail_pickle):
        # load in pickle
        print 'loading...'
        successes = ut.load_pickle(succ_pickle)
        failures = ut.load_pickle(fail_pickle)

        topics = ['/pressure/l_gripper_motor', '/pressure/r_gripper_motor']
        topic = topics[0]

        red = np.matrix([1., 0, 0, 1.]).T
        green = np.matrix([0., 1., 0, 1.]).T
        blue = np.matrix([0., 0, 1., 1.]).T

        #data_dict [state number] [topic] [trial number] ['t' 'left' 'right']
        succ_avg = average_reading_over_trials(successes, topic)
        fail_avg = average_reading_over_trials(failures, topic)
        diff_avg = subtract_records(succ_avg, fail_avg, topic)

        succ_marker, succ_pc = construct_pressure_marker_message(
            succ_avg, topic, green)
        fail_marker, fail_pc = construct_pressure_marker_message(
            fail_avg, topic, red)
        diff_marker, diff_pc = construct_pressure_marker_message(
            diff_avg, topic, blue)

        r = rospy.Rate(10)
        print 'publishing...'
        while not rospy.is_shutdown():
            self.succ_marker.publish(succ_marker)
            self.fail_marker.publish(fail_marker)

            self.succ_pc_pub.publish(succ_pc)
            self.fail_pc_pub.publish(fail_pc)

            self.diff_marker.publish(diff_marker)
            self.diff_pc_pub.publish(diff_pc)
            r.sleep()
示例#53
0
 def load_state(self):
     try:
         rects = ut.load_pickle(self.save_filename)
         srects = []
         for r in rects:
             start, end, name = r
             srect = SelectRect(start, end, self.axes)
             srect.set_name(name)
             srects.append(srect)
         self.rects = srects
         self._set_active(0)
         self.state = 'TWO_POINTS'
         self.figure.canvas.draw()
     except IOError, e:
         print 'INFO: was not able to load', self.save_filename
示例#54
0
    def getCalibration(self, filename='mouth_frame.pkl'):
        if os.path.isfile(filename) == False: return False

        d = ut.load_pickle(filename)        
        print d.keys()
        self.mouth_frame_off = d['frame']
        
        self.head_calib = True
        print "------------------------------------"
        print "Calibration complete! - mouth offset"
        print "------------------------------------"
        print "P: ", self.mouth_frame_off.p
        print "M: ", self.mouth_frame_off.M
        print "------------------------------------"
        return True