Exemplo n.º 1
0
    def read_datum(self, key):
        """Read in the GraspableObject3D corresponding to given key."""
        if key not in self.data_keys_:
            raise ValueError('Key %s not found in dataset %s' %
                             (key, self.name))

        file_root = os.path.join(self.dataset_root_dir_, key)
        sdf_filename = Dataset.sdf_filename(file_root)
        obj_filename = Dataset.obj_filename(file_root)
        features_filename = Dataset.features_filename(file_root)

        # read in data
        sf = sdf_file.SdfFile(sdf_filename)
        sdf = sf.read()

        of = obj_file.ObjFile(obj_filename)
        mesh = of.read()

        if os.path.exists(features_filename):
            ff = feature_file.LocalFeatureFile(features_filename)
            features = ff.read()
        else:
            features = None

        return go.GraspableObject3D(sdf,
                                    mesh=mesh,
                                    features=features,
                                    key=key,
                                    model_name=obj_filename,
                                    category=self.data_categories_[key])
Exemplo n.º 2
0
    def extract(self, graspable, feature_file_name):
        """ Returns a set of extracted SHOT features for a graspable """
        # make OS call to extract features to disk
        shot_os_call = 'bin/shot_extractor %s %s' % (graspable.model_name,
                                                     feature_file_name)
        os.system(shot_os_call)

        # add features to graspable and return
        feature_file = ff.LocalFeatureFile(feature_file_name)
        shot_features = feature_file.read()
        graspable.features[SHOTFeatureExtractor.key] = shot_features
Exemplo n.º 3
0
    def generate_shot_features(self):
        """ Extracts SHOT features """
        # extract shot features to the filesystem
        shot_os_call = 'bin/shot_extractor %s %s' %(self.obj_filename, self.shot_filename)
        os.system(shot_os_call)

        # read the features back in
        self.shot_features_ = None
        try:
            lff = feature_file.LocalFeatureFile(self.shot_filename)
            self.shot_features_ = lff.read()
        except Exception as e:
            logging.warning('Failed to load SHOT features')
        return self.shot_features_
Exemplo n.º 4
0
def test_new_feature_matching():
    a_features_filename = "data/test/features/pepper_orig_features.txt"
    b_features_filename = "data/test/features/pepper_tf_features.txt"
    a_mesh_filename = "data/test/features/pepper_orig.obj"
    b_mesh_filename = "data/test/features/pepper_tf.obj"

    # read data
    a_feat_file = ff.LocalFeatureFile(a_features_filename)
    a_features = a_feat_file.read()

    b_feat_file = ff.LocalFeatureFile(b_features_filename)
    b_features = b_feat_file.read()

    a_obj_file = of.ObjFile(a_mesh_filename)
    a_mesh = a_obj_file.read()

    b_obj_file = of.ObjFile(b_mesh_filename)
    b_mesh = b_obj_file.read()

    # match features
    feat_matcher = fm.RawDistanceFeatureMatcher()
    ab_corrs = feat_matcher.match(a_features, b_features)
    reg_solver = reg.RigidRegistrationSolver()
    tf = reg_solver.register(ab_corrs)

    # compare with true transform
    tf_true = np.loadtxt("data/test/features/tf_true.txt", delimiter=" ")
    delta_tf = tf.dot(np.linalg.inv(tf_true))

    print 'Estimated TF'
    print tf

    print 'True TF'
    print tf_true

    print 'Delta TF'
    print delta_tf
Exemplo n.º 5
0
def get_feature(object):
    feat_file = ffile.LocalFeatureFile(object[0])
    return feat_file.read()
Exemplo n.º 6
0
def zca_from_shot(feature_dir, num_samples_per_shape=75, num_clusters=25):
    num_shapes = 0
    feature_count = 0
    b = fs.BagOfFeatures()
    feat_filenames = []
    cat_filenames = []

    # walk through directory, adding files to feature rep
    for root, sub_folders, files in os.walk(feature_dir):
        for f in files:
            file_name = os.path.join(root, f)
            file_root, file_ext = os.path.splitext(file_name)

            if file_ext == '.cat':
                cat_filenames.append(file_name)
                file_name = file_root + '_features.txt'
                feat_filenames.append(file_name)
                print 'Processing file %s (%d of %d)' % (
                    feat_filenames[-1], num_shapes, len(files) / 2)

                # read features
                feat_file = ff.LocalFeatureFile(file_name)
                features = feat_file.read()

                # get a random subset of features
                num_features = features.descriptors.shape[0]
                num_samples = min(num_samples_per_shape, num_features)
                indices = np.random.choice(num_features,
                                           num_samples,
                                           replace=False)
                descriptor_subset = features.descriptors[indices, :]

                if num_shapes == 0:
                    feature_dim = features.descriptors.shape[1]
                    mu = np.zeros(feature_dim)
                    Sigma = np.zeros([feature_dim, feature_dim])

                # update data matrix, mean, covariance estimates
                new_feature_count = feature_count + num_samples
                old_weight = float(feature_count) / float(new_feature_count)
                new_weight = float(num_samples) / float(new_feature_count)
                b.extend(features.feature_subset(indices))
                mu = old_weight * mu + new_weight * np.sum(descriptor_subset,
                                                           axis=0)
                Sigma = old_weight * Sigma + new_weight * descriptor_subset.T.dot(
                    descriptor_subset)

                num_shapes += 1
                feature_count += num_samples


#                if num_shapes >= 200:
#                    break

# preprocessing transform with zca whitening
    print 'Learning ZCA'
    z = ZcaTransform()
    z.fit_bootstrapped(mu, Sigma)

    # lear feature dict with kmeans
    print 'Learning dict'
    k = KMeansFeatureDictionary(num_clusters, preprocess_fn=z)
    k.fit(b)

    # loop through objects and transform
    shape_reps = np.zeros([len(feat_filenames), num_clusters])
    categories = []
    i = 0
    print 'Repping shapes'
    for feat_file_name, cat_file_name in zip(feat_filenames, cat_filenames):
        print 'Repping ', feat_file_name
        cat_file = open(cat_file_name, 'r')
        cat = cat_file.readline()

        feat_file = ff.LocalFeatureFile(feat_file_name)
        features = feat_file.read()
        shape_rep = k.transform(features)

        shape_reps[i, :] = shape_rep
        categories.append(cat)
        i += 1

    # transform everything for plotting
    cat_list = list(set(categories))
    cat_indices = [cat_list.index(c) for c in categories]

    colors = plt.get_cmap('jet')(np.linspace(0, 1.0, len(cat_list)))
    pointwise_colors = colors[cat_indices, :]

    p = sd.PCA()
    shapes_proj = p.fit_transform(shape_reps)
    shapes_tf = shapes_proj[:, :2]

    # plot all points
    patches = []
    for i in range(len(cat_list)):
        patches.append(mpatches.Patch(color='red', label=cat_list[i]))

    cat_array = np.array(cat_indices)
    objs = []
    plt.figure()
    for i in range(len(cat_list)):
        cat_ind = np.where(cat_array == i)
        cat_ind = cat_ind[0]
        o = plt.scatter(shapes_tf[cat_ind, 0],
                        shapes_tf[cat_ind, 1],
                        c=colors[i])
        objs.append(o)
    plt.legend(objs, cat_list)
    plt.show()

    f = open('shot_feature_dict.pkl', 'w')
    pkl.dump(k, f)

    # nearest neighbors queries
    train_pct = 0.75
    num_pts = len(categories)
    all_indices = np.linspace(0, num_pts - 1, num_pts)
    train_indices = np.random.choice(num_pts,
                                     np.floor(train_pct * num_pts),
                                     replace=False)
    test_indices = np.setdiff1d(all_indices, train_indices)
    train_indices = train_indices.astype(np.int16)
    test_indices = test_indices.astype(np.int16)

    train_categories = []
    for i in range(train_indices.shape[0]):
        train_categories.append(categories[train_indices[i]])

    test_categories = []
    for i in range(test_indices.shape[0]):
        test_categories.append(categories[test_indices[i]])

    # nearest neighbors
    num_nearest = 5
    nn = kernels.NearPy()
    nn.train(shape_reps[train_indices, :], k=num_nearest)

    # setup confusion
    confusion = {}
    confusion[UNKNOWN_TAG] = {}
    for query_cat in cat_list:
        confusion[query_cat] = {}
    for query_cat in confusion.keys():
        for pred_cat in cat_list:
            confusion[query_cat][pred_cat] = 0

    # get test confusion matrix
    for i in range(test_indices.shape[0]):
        true_category = categories[test_indices[i]]
        [indices, dists] = nn.nearest_neighbors(shape_reps[test_indices[i], :],
                                                k=num_nearest,
                                                return_indices=True)
        neighbor_cats = []
        for index in indices:
            neighbor_cats.append(categories[train_indices[index]])
        print 'Shape nearest neighbors', true_category, neighbor_cats

        if len(indices) > 0:
            confusion[true_category][neighbor_cats[0]] += 1
        else:
            confusion[true_category][UNKNOWN_TAG] += 1

    # accumulate results
    # convert the dictionary to a numpy array
    row_names = confusion.keys()
    confusion_mat = np.zeros([len(row_names), len(row_names)])
    i = 0
    for query_cat in confusion.keys():
        j = 0
        for pred_cat in confusion.keys():
            confusion_mat[i, j] = confusion[query_cat][pred_cat]
            j += 1
        i += 1

    # get true positives, etc for each category
    num_preds = len(test_files)
    tp = np.diag(confusion_mat)
    fp = np.sum(confusion_mat, axis=0) - np.diag(confusion_mat)
    fn = np.sum(confusion_mat, axis=1) - np.diag(confusion_mat)
    tn = num_preds * np.ones(tp.shape) - tp - fp - fn

    # compute useful statistics
    recall = tp / (tp + fn)
    tnr = tn / (fp + tn)
    precision = tp / (tp + fp)
    npv = tn / (tn + fn)
    fpr = fp / (fp + tn)
    accuracy = np.sum(
        tp) / num_preds  # correct predictions over entire dataset

    # remove nans
    recall[np.isnan(recall)] = 0
    tnr[np.isnan(tnr)] = 0
    precision[np.isnan(precision)] = 0
    npv[np.isnan(npv)] = 0
    fpr[np.isnan(fpr)] = 0

    IPython.embed()
    """