Ejemplo n.º 1
0
 def add_all_nodes_old(self, node_id, metadata, planning_time, check_duplicates=True):
     """
     """
     if self.log:
         print self.node_idx
     edge_alphas = []
     for pose_ind, (pose, quality) in enumerate(zip(metadata['final_poses'], metadata['qualities'])):
         # Check if this pose exists in the graph already
         already_exists = False
         if check_duplicates:
             num_existing_nodes = len(self.G.nodes())
             for j, node in self.G.nodes(data=True):
                 if is_equivalent_pose(pose, node['pose']):
                     already_exists = True
                     break
         if not already_exists:
             self.G.add_node(self.node_idx, pose=pose, gq=quality, node_type='state')
             to_node_idx = self.node_idx
             self.node_idx += 1
         else:
             to_node_idx = j
         if self.log:
             print 'edge from {} to {}={}'.format(node_id, to_node_idx, np.clip(np.max(metadata['vertex_probs'][:,pose_ind]), 0, 1))
         self.G.add_edge(node_id, to_node_idx)
         edge_alphas.append(np.clip(np.max(metadata['vertex_probs'][:,pose_ind]), 0, 1))
     return edge_alphas
Ejemplo n.º 2
0
def visualize(env, datasets, obj_id_to_keys, models, model_names,
              use_sensitivities):
    for dataset, obj_id_to_key in zip(datasets, obj_id_to_keys):
        for i in range(dataset.num_datapoints):
            datapoint = dataset.datapoint(i)

            dataset_name, key = obj_id_to_key[str(
                datapoint['obj_id'])].split(KEY_SEP_TOKEN)
            obj = env._state_space._database.dataset(dataset_name)[key]
            orig_pose = to_rigid(datapoint['obj_pose'])
            obj.T_obj_world = orig_pose
            env.state.obj = obj

            actual = 0
            for i in range(NUM_PER_DATAPOINT):
                mat = datapoint['actual_poses'][i * 4:(i + 1) * 4]
                actual_pose = to_rigid(mat)
                if not is_equivalent_pose(actual_pose, orig_pose):
                    actual += 1
            actual /= float(NUM_PER_DATAPOINT)

            probs = []
            for model, model_name, use_sensitivity in zip(
                    models, model_names, use_sensitivities):
                model.load_object(env.state)
                _, vertex_probs, _ = model.predict(
                    [datapoint['vertex']],
                    [datapoint['normal']],
                    [-datapoint['normal']],  # push dir
                    use_sensitivity=use_sensitivity)
                probs.append(1 - vertex_probs[0, 0])

            if -(abs(probs[3] - actual) - abs(probs[2] - actual)) > .1:
                print 'actual {} {} {} {} {} {} {} {} {}'.format(
                    actual, model_names[0], probs[0], model_names[1], probs[1],
                    model_names[2], probs[2], model_names[3], probs[3])
                env.render_3d_scene()
                start_point = datapoint['vertex'] + .06 * datapoint['normal']
                end_point = datapoint['vertex']
                shaft_points = [start_point, end_point]
                h1 = np.array([[0.7071, -0.7071, 0], [0.7071, 0.7071, 0],
                               [0, 0, 1]]).dot(-datapoint['normal'])
                h2 = np.array([[0.7071, 0.7071, 0], [-0.7071, 0.7071, 0],
                               [0, 0, 1]]).dot(-datapoint['normal'])
                head_points = [
                    end_point - 0.02 * h2, end_point, end_point - 0.02 * h1
                ]
                vis3d.plot3d(shaft_points, color=[1, 0, 0], tube_radius=.002)
                vis3d.plot3d(head_points, color=[1, 0, 0], tube_radius=.002)
                vis3d.show()
Ejemplo n.º 3
0
    def combine_equivalent_poses(self, final_poses, vertex_probs):
        """
        Combines the probabilities of poses which are equivalent
        except for rotations along the z axis or translations

        Parameters
        ----------
        final_poses : :obj:`list` of :obj:`StablePose`
            list of poses the object ends up in after being toppled over each edge of size m
        vertex_probs : nxm :obj:`numpy.ndarray`
            probability of object landing in each of the m poses for each of the n topple actions

        Returns
        -------
        :obj:`list` of :obj:`StablePose` of size o
            list of unique Stable Poses
        nxo :obj:`numpy.ndarray`
            probability of object landing in each of the o unique stable poses
            for each of the n topple actions
        """
        i = 0
        edge_inds = list(np.arange(vertex_probs.shape[1]))
        grouped_poses = []
        grouped_edges = []
        while i < len(edge_inds):
            equivalent_edges = [edge_inds[i]]
            curr_edge_ind = edge_inds[i]
            j = i + 1
            while j < len(edge_inds):
                if is_equivalent_pose(self.final_poses[curr_edge_ind],
                                      self.final_poses[edge_inds[j]]):
                    equivalent_edges.append(edge_inds[j])
                    edge_inds.pop(j)
                else:
                    j += 1
            grouped_poses.append(self.final_poses[curr_edge_ind])
            grouped_edges.append(equivalent_edges)
            i += 1
        vertex_probs = \
            np.hstack([np.sum(vertex_probs[:,edges], axis=1).reshape(-1,1) for edges in grouped_edges])
        return grouped_poses, vertex_probs
Ejemplo n.º 4
0
 def add_all_nodes(self, node_id, metadata, planning_time, check_duplicates=True):
     """
     """
     self.G.nodes[node_id]['planning_time'] = planning_time
     edge_alphas = []
     vertex_probs = metadata['vertex_probs']
     metadata_to_graph_mapping = []
     for pose, quality in zip(metadata['final_poses'], metadata['qualities']):
         # Check if this pose exists in the graph already
         already_exists = False
         if check_duplicates:
             num_existing_nodes = len(self.G.nodes())
             for i, node in self.G.nodes(data=True):
                 if node['node_type'] == 'state' and is_equivalent_pose(pose, node['pose']):
                     already_exists = True
                     break
         if not already_exists:
             self.G.add_node(self.node_idx, pose=pose, gq=quality, value=quality, node_type='state')
             metadata_to_graph_mapping.append(self.node_idx)
             self.node_idx += 1
         else:
             metadata_to_graph_mapping.append(i)
     
     
     for pose_ind in range(len(metadata['final_poses'])):
         best_action = vertex_probs[np.argmax(vertex_probs[:,pose_ind])]
         #self.G.add_node(self.action_node_idx, best_action=best_action, value=0, node_type='action')
         action_node_idx = str(node_id)+str(metadata_to_graph_mapping[pose_ind])
         self.G.add_node(action_node_idx, best_action=best_action, value=0, node_type='action')
         self.G.add_edge(node_id, action_node_idx)
         edge_alphas.append(1)
         for prob, next_node_id, in zip(best_action, metadata_to_graph_mapping):
             if prob != 0.0:
                 self.G.add_edge(action_node_idx, next_node_id, prob=prob)
                 edge_alphas.append(np.clip(prob, 0, 1))
         #self.action_node_idx += 1
         
     return edge_alphas
Ejemplo n.º 5
0
def evaluate_models(models, datasets, obj_id_to_keys, env, use_sensitivity):
    y_true, y_pred = [], [[]]

    total_datapoints = 0
    for dataset, obj_id_to_key in zip(datasets, obj_id_to_keys):
        total_datapoints += dataset.num_datapoints
        for i in range(dataset.num_datapoints):
            datapoint = dataset.datapoint(i)

            dataset_name, key = obj_id_to_key[str(
                datapoint['obj_id'])].split(KEY_SEP_TOKEN)
            obj = env._state_space._database.dataset(dataset_name)[key]
            orig_pose = to_rigid(datapoint['obj_pose'])
            obj.T_obj_world = orig_pose
            env.state.obj = obj

            model.load_object(env.state)
            predicted_poses, vertex_probs, _ = model.predict(
                [datapoint['vertex']],
                [datapoint['normal']],
                [-datapoint['normal']],  # push dir
                use_sensitivity=use_sensitivity)
            vertex_probs = vertex_probs[0]
            y_pred.extend([1 - vertex_probs[0]] * NUM_PER_DATAPOINT)

            empirical_dist = []
            for i in range(NUM_PER_DATAPOINT):
                actual_pose_mat = datapoint['actual_poses'][i * 4:(i + 1) * 4]
                rot, trans = RigidTransform.rotation_and_translation_from_matrix(
                    actual_pose_mat)
                pose_to_add = RigidTransform(rot, trans, 'obj', 'world')

                y_true.append(
                    0 if is_equivalent_pose(pose_to_add, orig_pose) else 1)

                found = False
                for i in range(len(empirical_dist)):
                    actual_pose, actual_prob = empirical_dist[i]
                    if is_equivalent_pose(actual_pose, pose_to_add):
                        empirical_dist[i][1] += .1
                        found = True
                        break
                if not found:
                    empirical_dist.append([pose_to_add, .1])

            total_variation, l1 = 1, []
            for empirical_pose, empirical_prob in empirical_dist:
                for predicted_pose, predicted_prob in zip(
                        predicted_poses, vertex_probs):
                    if is_equivalent_pose(empirical_pose, predicted_pose):
                        total_variation = min(
                            total_variation,
                            abs(empirical_prob - predicted_prob))
                        l1.append(abs(empirical_prob - predicted_prob))
                        break
            l1 = np.mean(l1) if len(l1) > 0 else 0
            # if i + total_datapoints in test_set:
            #     test_tvs.append(total_variation)
            #     test_l1s.append(l1)
            # else:
            #     train_tvs.append(total_variation)
            #     train_l1s.append(l1)
            combined_tvs.append(total_variation)
            combined_l1s.append(l1)
            # for i in range(10):
            #     actual_pose_mat = datapoint['actual_poses'][i*4:(i+1)*4]
            #     rot, trans = RigidTransform.rotation_and_translation_from_matrix(actual_pose_mat)
            #     actual_pose = RigidTransform(rot, trans, 'obj', 'world')

            #     y_true.append(0 if is_equivalent_pose(orig_pose, actual_pose) else 1)
            #     y_pred.append(1-vertex_probs[0])

            #     #  Cross Entropy
            #     # q_x = 0
            #     # for predicted_pose, prob in zip(predicted_poses, vertex_probs):
            #     #     if is_equivalent_pose(actual_pose, predicted_pose):
            #     #         q_x = prob
            #     #         break
            #     # if q_x == 0:
            #     #     counter += 1
            #     #     # env.render_3d_scene()
            #     #     # vis3d.show(title='before', starting_camera_pose=CAMERA_POSE)
            #     #     # env.state.obj.T_obj_world = actual_pose
            #     #     # env.render_3d_scene()
            #     #     # vis3d.show(title='after', starting_camera_pose=CAMERA_POSE)
            #     #     # for predicted_pose, prob in zip(predicted_poses, vertex_probs):
            #     #     #     env.state.obj.T_obj_world = predicted_pose
            #     #     #     env.render_3d_scene()
            #     #     #     title = '{}, pose diff: {}, angle diff: {}, prob: {}'.format(
            #     #     #         is_equivalent_pose(actual_pose, predicted_pose),
            #     #     #         pose_diff(actual_pose, predicted_pose),
            #     #     #         pose_angle(actual_pose, predicted_pose),
            #     #     #         prob
            #     #     #     )
            #     #     #     vis3d.show(title=title, starting_camera_pose=CAMERA_POSE)
            #     #     # env.state.obj.T_obj_world = to_rigid(datapoint['obj_pose'])
            #     # q_x = max(q_x, 1e-5)
            #     # cross_entropy -= .1 * np.log(q_x) # p(x) * log(q(x))
            #     # n += .1

    # logger.info('Mean Cross Entropy '+str(cross_entropy/float(n)))
    # logger.info('frac 0: {} {} {}'.format(counter, 10*total_datapoints, counter / float(10*total_datapoints)))

    # precision, recall, _ = metrics.precision_recall_curve(y_true, y_pred)
    # aucs.append(metrics.auc(recall, precision))
    # prs.append((precision, recall, model))
    # return np.mean(train_tvs), np.mean(test_tvs), np.mean(combined_tvs), np.mean(train_l1s), np.mean(test_l1s), np.mean(combined_l1s)
    avg_precision = metrics.average_precision_score(y_true, y_pred)
    return combined_tvs, combined_l1s, avg_precision, y_true, y_pred