def render_canonical_pc(self, poses):
     all_pcs = []
     for pose in poses:
         _, _, pc, pose = self.render(pose)
         pc = pc.dot(utils.inverse_transform(pose).T)
         all_pcs.append(pc)
     all_pcs = np.concatenate(all_pcs, 0)
     return all_pcs
Ejemplo n.º 2
0
    def __getitem__(self, index):
        path = self.paths[index]
        # print('\n\npath: ', path)
        pos_grasps, pos_qualities, _, _, _, cad_path, cad_scale = self.read_grasp_file(
            path)
        meta = {}
        try:
            all_clusters = self.sample_grasp_indexes(
                self.opt.num_grasps_per_object, pos_grasps, pos_qualities)
        except NoPositiveGraspsException:
            if self.opt.skip_error:
                return None
            else:
                return self.__getitem__(np.random.randint(0, self.size))

        #self.change_object(cad_path, cad_scale)
        #pc, camera_pose, _ = self.render_random_scene()
        pc, camera_pose, _ = self.change_object_and_render(
            cad_path,
            cad_scale,
            thread_id=torch.utils.data.get_worker_info().id
            if torch.utils.data.get_worker_info() else 0)

        output_qualities = []
        output_grasps = []
        for iter in range(self.opt.num_grasps_per_object):
            selected_grasp_index = all_clusters[iter]

            selected_grasp = pos_grasps[selected_grasp_index[0]][
                selected_grasp_index[1]]
            selected_quality = pos_qualities[selected_grasp_index[0]][
                selected_grasp_index[1]]
            output_qualities.append(selected_quality)
            output_grasps.append(camera_pose.dot(selected_grasp))
        gt_control_points = utils.transform_control_points_numpy(
            np.array(output_grasps), self.opt.num_grasps_per_object, mode='rt')

        meta['pc'] = np.array([pc] * self.opt.num_grasps_per_object)[:, :, :3]
        meta['grasp_rt'] = np.array(output_grasps).reshape(
            len(output_grasps), -1)

        meta['pc_pose'] = np.array([utils.inverse_transform(camera_pose)] *
                                   self.opt.num_grasps_per_object)
        meta['cad_path'] = np.array([cad_path] *
                                    self.opt.num_grasps_per_object)
        meta['cad_scale'] = np.array([cad_scale] *
                                     self.opt.num_grasps_per_object)
        meta['quality'] = np.array(output_qualities)
        meta['target_cps'] = np.array(gt_control_points[:, :, :3])
        return meta
Ejemplo n.º 3
0
    def on_epoch_end(self, epoch, logs={}):
        batches = len(self.validation_data)
        total = batches * self.batch_size

        val_pred = np.zeros((total, 1))
        val_true = np.zeros((total))

        for batch in range(batches):
            xVal, yVal = self.validation_data.__getitem__(batch)
            val_pred[batch * self.batch_size:(batch + 1) *
                     self.batch_size] = np.asarray(
                         self.model.predict(xVal)).round()
            val_true[batch * self.batch_size:(batch + 1) *
                     self.batch_size] = yVal

        label_true = []
        label_pred = []
        for i in range(len(val_pred)):
            y_pred, y_true = val_pred[i], val_true[i]
            lengths = self.get_lengths(y_true)

            y_true = inverse_transform(y_true, lengths)
            y_pred = inverse_transform(y_pred, lengths)

            label_true.extend(y_true)
            label_pred.extend(y_pred)

            y_true = inverse_transform(y_true, self.label_encoder, lengths)
            y_pred = inverse_transform(y_pred, self.label_encoder, lengths)

            label_true.extend(y_true)
            label_pred.extend(y_pred)

        score = balanced_accuracy_score(label_true, label_pred)
        print(' - BACC: {:04.2f}'.format(score * 100))
        score = f1_score(label_true, label_pred)
        print(' - f1: {:04.2f}'.format(score * 100))
Ejemplo n.º 4
0
    def infer(self, image, save_path, bright_diff=0, is_grayscale=True):
        # read image
        if isinstance(image, str):
            img = imread(image, is_grayscale=is_grayscale)
        else:
            img = image
        img = cv2.resize(img, dsize=(256, 256))
        img = np.reshape(img, newshape=(img.shape[0], img.shape[1], 1))
        gen_avatar = self.sess.run(self.testB, feed_dict={self.test_A: [img]})
        if save_path is not None:
            save_images(gen_avatar + bright_diff,
                        size=[1, 1],
                        image_path=save_path)
        gen_avatar = np.reshape(gen_avatar,
                                newshape=list(gen_avatar.shape[1:-1]))
        gen_avatar = inverse_transform(gen_avatar)

        return gen_avatar
Ejemplo n.º 5
0
    def get_uniform_evaluator_data(self, path, verify_grasps=False):
        pos_grasps, pos_qualities, neg_grasps, neg_qualities, obj_mesh, cad_path, cad_scale = self.read_grasp_file(
            path)

        output_pcs = []
        output_grasps = []
        output_qualities = []
        output_labels = []
        output_pc_poses = []
        output_cad_paths = [cad_path] * self.opt.batch_size
        output_cad_scales = np.asarray([cad_scale] * self.opt.batch_size,
                                       np.float32)

        num_positive = int(self.opt.batch_size * self.opt.ratio_positive)
        positive_clusters = self.sample_grasp_indexes(num_positive, pos_grasps,
                                                      pos_qualities)
        num_hard_negative = int(self.opt.batch_size *
                                self.opt.ratio_hardnegative)
        num_flex_negative = self.opt.batch_size - num_positive - num_hard_negative
        negative_clusters = self.sample_grasp_indexes(num_flex_negative,
                                                      neg_grasps,
                                                      neg_qualities)
        hard_neg_candidates = []
        # Fill in Positive Examples.

        for clusters, grasps, qualities in zip(
            [positive_clusters, negative_clusters], [pos_grasps, neg_grasps],
            [pos_qualities, neg_qualities]):
            for cluster in clusters:
                selected_grasp = grasps[cluster[0]][cluster[1]]
                selected_quality = qualities[cluster[0]][cluster[1]]
                hard_neg_candidates += utils.perturb_grasp(
                    selected_grasp,
                    self.collision_hard_neg_num_perturbations,
                    self.collision_hard_neg_min_translation,
                    self.collision_hard_neg_max_translation,
                    self.collision_hard_neg_min_rotation,
                    self.collision_hard_neg_max_rotation,
                )

        if verify_grasps:
            collisions, heuristic_qualities = utils.evaluate_grasps(
                output_grasps, obj_mesh)
            for computed_quality, expected_quality, g in zip(
                    heuristic_qualities, output_qualities, output_grasps):
                err = abs(computed_quality - expected_quality)
                if err > 1e-3:
                    raise ValueError(
                        'Heuristic does not match with the values from data generation {}!={}'
                        .format(computed_quality, expected_quality))

        # If queue does not have enough data, fill it up with hard negative examples from the positives.
        if path not in self.collision_hard_neg_queue or len(
                self.collision_hard_neg_queue[path]) < num_hard_negative:
            if path not in self.collision_hard_neg_queue:
                self.collision_hard_neg_queue[path] = []
            # hard negatives are perturbations of correct grasps.
            collisions, heuristic_qualities = utils.evaluate_grasps(
                hard_neg_candidates, obj_mesh)

            hard_neg_mask = collisions | (heuristic_qualities < 0.001)
            hard_neg_indexes = np.where(hard_neg_mask)[0].tolist()
            np.random.shuffle(hard_neg_indexes)
            for index in hard_neg_indexes:
                self.collision_hard_neg_queue[path].append(
                    (hard_neg_candidates[index], -1.0))
            random.shuffle(self.collision_hard_neg_queue[path])

        # Adding positive grasps
        for positive_cluster in positive_clusters:
            # print(positive_cluster)
            selected_grasp = pos_grasps[positive_cluster[0]][
                positive_cluster[1]]
            selected_quality = pos_qualities[positive_cluster[0]][
                positive_cluster[1]]
            output_grasps.append(selected_grasp)
            output_qualities.append(selected_quality)
            output_labels.append(1)

        # Adding hard neg
        for i in range(num_hard_negative):
            grasp, quality = self.collision_hard_neg_queue[path][i]
            output_grasps.append(grasp)
            output_qualities.append(quality)
            output_labels.append(0)

        self.collision_hard_neg_queue[path] = self.collision_hard_neg_queue[
            path][num_hard_negative:]

        # Adding flex neg
        if len(negative_clusters) != num_flex_negative:
            raise ValueError(
                'negative clusters should have the same length as num_flex_negative {} != {}'
                .format(len(negative_clusters), num_flex_negative))

        for negative_cluster in negative_clusters:
            selected_grasp = neg_grasps[negative_cluster[0]][
                negative_cluster[1]]
            selected_quality = neg_qualities[negative_cluster[0]][
                negative_cluster[1]]
            output_grasps.append(selected_grasp)
            output_qualities.append(selected_quality)
            output_labels.append(0)

        # self.change_object(cad_path, cad_scale)
        for iter in range(self.opt.num_grasps_per_object):
            if iter > 0:
                output_pcs.append(np.copy(output_pcs[0]))
                output_pc_poses.append(np.copy(output_pc_poses[0]))
            else:
                pc, camera_pose, _ = self.change_object_and_render(
                    cad_path,
                    cad_scale,
                    thread_id=torch.utils.data.get_worker_info().id
                    if torch.utils.data.get_worker_info() else 0)
                output_pcs.append(pc)
                output_pc_poses.append(utils.inverse_transform(camera_pose))

            output_grasps[iter] = camera_pose.dot(output_grasps[iter])

        output_pcs = np.asarray(output_pcs, dtype=np.float32)
        output_grasps = np.asarray(output_grasps, dtype=np.float32)
        output_labels = np.asarray(output_labels, dtype=np.int32)
        output_qualities = np.asarray(output_qualities, dtype=np.float32)
        output_pc_poses = np.asarray(output_pc_poses, dtype=np.float32)

        return output_pcs, output_grasps, output_labels, output_qualities, output_pc_poses, output_cad_paths, output_cad_scales
Ejemplo n.º 6
0
    def get_nonuniform_evaluator_data(self, path, verify_grasps=False):
        pos_grasps, pos_qualities, neg_grasps, neg_qualities, obj_mesh, cad_path, cad_scale = self.read_grasp_file(
            path)

        output_pcs = []
        output_grasps = []
        output_qualities = []
        output_labels = []
        output_pc_poses = []
        output_cad_paths = [cad_path] * self.opt.num_grasps_per_object
        output_cad_scales = np.asarray(
            [cad_scale] * self.opt.num_grasps_per_object, np.float32)

        num_positive = int(self.opt.num_grasps_per_object *
                           self.ratio_positive)
        positive_clusters = self.sample_grasp_indexes(num_positive, pos_grasps,
                                                      pos_qualities)
        num_negative = self.opt.num_grasps_per_object - num_positive
        negative_clusters = self.sample_grasp_indexes(num_negative, neg_grasps,
                                                      neg_qualities)
        hard_neg_candidates = []
        # Fill in Positive Examples.
        for positive_cluster in positive_clusters:
            selected_grasp = pos_grasps[positive_cluster[0]][
                positive_cluster[1]]
            selected_quality = pos_qualities[positive_cluster[0]][
                positive_cluster[1]]
            output_grasps.append(selected_grasp)
            output_qualities.append(selected_quality)
            output_labels.append(1)
            hard_neg_candidates += utils.perturb_grasp(
                selected_grasp,
                self.collision_hard_neg_num_perturbations,
                self.collision_hard_neg_min_translation,
                self.collision_hard_neg_max_translation,
                self.collision_hard_neg_min_rotation,
                self.collision_hard_neg_max_rotation,
            )

        if verify_grasps:
            collisions, heuristic_qualities = utils.evaluate_grasps(
                output_grasps, obj_mesh)
            for computed_quality, expected_quality, g in zip(
                    heuristic_qualities, output_qualities, output_grasps):
                err = abs(computed_quality - expected_quality)
                if err > 1e-3:
                    raise ValueError(
                        'Heuristic does not match with the values from data generation {}!={}'
                        .format(computed_quality, expected_quality))

        # If queue does not have enough data, fill it up with hard negative examples from the positives.
        if path not in self.collision_hard_neg_queue or self.collision_hard_neg_queue[
                path].qsize() < num_negative:
            if path not in self.collision_hard_neg_queue:
                self.collision_hard_neg_queue[path] = Queue()
            # hard negatives are perturbations of correct grasps.
            random_selector = np.random.rand()
            if random_selector < self.ratio_hardnegative:
                # print('add hard neg')
                collisions, heuristic_qualities = utils.evaluate_grasps(
                    hard_neg_candidates, obj_mesh)
                hard_neg_mask = collisions | (heuristic_qualities < 0.001)
                hard_neg_indexes = np.where(hard_neg_mask)[0].tolist()
                np.random.shuffle(hard_neg_indexes)
                for index in hard_neg_indexes:
                    self.collision_hard_neg_queue[path].put(
                        (hard_neg_candidates[index], -1.0))
            if random_selector >= self.ratio_hardnegative or self.collision_hard_neg_queue[
                    path].qsize() < num_negative:
                for negative_cluster in negative_clusters:
                    selected_grasp = neg_grasps[negative_cluster[0]][
                        negative_cluster[1]]
                    selected_quality = neg_qualities[negative_cluster[0]][
                        negative_cluster[1]]
                    self.collision_hard_neg_queue[path].put(
                        (selected_grasp, selected_quality))

        # Use negative examples from queue.
        for _ in range(num_negative):
            # print('qsize = ', self._collision_hard_neg_queue[file_path].qsize())
            grasp, quality = self.collision_hard_neg_queue[path].get()
            output_grasps.append(grasp)
            output_qualities.append(quality)
            output_labels.append(0)

        for iter in range(self.opt.num_grasps_per_object):
            if iter > 0:
                output_pcs.append(np.copy(output_pcs[0]))
                output_pc_poses.append(np.copy(output_pc_poses[0]))
            else:
                pc, camera_pose, _ = self.change_object_and_render(
                    cad_path,
                    cad_scale,
                    thread_id=torch.utils.data.get_worker_info().id
                    if torch.utils.data.get_worker_info() else 0)
                # self.change_object(cad_path, cad_scale)
                # pc, camera_pose, _ = self.render_random_scene()

                output_pcs.append(pc)
                output_pc_poses.append(utils.inverse_transform(camera_pose))

            output_grasps[iter] = camera_pose.dot(output_grasps[iter])

        output_pcs = np.asarray(output_pcs, dtype=np.float32)
        output_grasps = np.asarray(output_grasps, dtype=np.float32)
        output_labels = np.asarray(output_labels, dtype=np.int32)
        output_qualities = np.asarray(output_qualities, dtype=np.float32)
        output_pc_poses = np.asarray(output_pc_poses, dtype=np.float32)
        return output_pcs, output_grasps, output_labels, output_qualities, output_pc_poses, \
            output_cad_paths, output_cad_scales