Ejemplo n.º 1
0
if(type(th_outlier[0])==list):
    print("Individual outlier thresholds are applied for each object")
    dynamic_th=False
    th_outliers = np.squeeze(np.array(th_outlier))
th_inlier=cfg['inlier_th']
th_ransac=3

dummy_run=True
MODEL_DIR = os.path.join(bop_dir, "weight_detection")
if(detect_type=='rcnn'):
    #Load mask r_cnn
    '''
    standard estimation parameter for Mask R-CNN (identical for all dataset)
    '''
    config = BopInferenceConfig(dataset=dataset,
                            num_classes=model_ids.shape[0]+1,#1+len(model_plys),
                            im_width=im_width,im_height=im_height)
    config.display()
    model = modellib.MaskRCNN(mode="inference", config=config,
                            model_dir=MODEL_DIR)
    last_path = model.find_last()
    #Load the last model you trained and continue training
    model.load_weights(last_path, by_name=True)
elif(detect_type=='retinanet'):
    print("Loading weights for keras-retinanet")
    detection_weight_fn = cfg['detection_weight']
    model = models.load_model(os.path.join(MODEL_DIR,detection_weight_fn), backbone_name='resnet50')


'''
Load pix2pose inference weights
Ejemplo n.º 2
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.rgb_topic = cfg['rgb_topic']
        self.depth_topic = cfg['depth_topic']
        self.camK = np.array(cfg['cam_K']).reshape(3, 3)
        self.im_width = int(cfg['im_width'])
        self.im_height = int(cfg['im_height'])
        self.inlier_th = float(cfg['inlier_th'])
        self.ransac_th = float(cfg['ransac_th'])
        self.pub_before_icp = False
        self.graph = tf_backend.Graph()
        if (int(cfg['icp']) == 1):
            self.icp = True
        else:
            self.icp = False
        self.model_params = inout.load_json(cfg['norm_factor_fn'])
        self.detection_labels = cfg[
            'obj_labels']  #labels of corresponding detections
        n_objs = int(cfg['n_objs'])
        self.target_objs = cfg['target_obj_name']
        self.colors = np.random.randint(0, 255, (n_objs, 3))

        with self.graph.as_default():
            if (detect_type == "rcnn"):
                #Load mask r_cnn
                '''
                standard estimation parameter for Mask R-CNN (identical for all dataset)
                '''
                self.config = BopInferenceConfig(dataset="ros",
                                                 num_classes=n_objs + 1,
                                                 im_width=self.im_width,
                                                 im_height=self.im_height)
                self.config.DETECTION_MIN_CONFIDENCE = 0.3
                self.config.DETECTION_MAX_INSTANCES = 30
                self.config.DETECTION_NMS_THRESHOLD = 0.5

                self.detection_model = modellib.MaskRCNN(mode="inference",
                                                         config=self.config,
                                                         model_dir="/")
                self.detection_model.load_weights(
                    cfg['path_to_detection_weights'], by_name=True)

            self.obj_models = []
            self.obj_bboxes = []

            self.obj_pix2pose = []
            pix2pose_dir = cfg['path_to_pix2pose_weights']
            th_outlier = cfg['outlier_th']
            self.model_scale = cfg['model_scale']
            for t_id, target_obj in enumerate(self.target_objs):
                weight_fn = os.path.join(
                    pix2pose_dir, "{:02d}/inference.hdf5".format(target_obj))
                print("Load pix2pose weights from ", weight_fn)
                model_param = self.model_params['{}'.format(target_obj)]
                obj_param = bop_io.get_model_params(model_param)
                recog_temp = recog.pix2pose(weight_fn,
                                            camK=self.camK,
                                            res_x=self.im_width,
                                            res_y=self.im_height,
                                            obj_param=obj_param,
                                            th_ransac=self.ransac_th,
                                            th_outlier=th_outlier,
                                            th_inlier=self.inlier_th)
                self.obj_pix2pose.append(recog_temp)
                ply_fn = os.path.join(self.cfg['model_dir'],
                                      self.cfg['ply_files'][t_id])
                if (self.icp):
                    #for pyrender rendering
                    obj_model = trimesh.load_mesh(ply_fn)
                    obj_model.vertices = obj_model.vertices * self.model_scale
                    mesh = pyrender.Mesh.from_trimesh(obj_model)
                    self.obj_models.append(mesh)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model.vertices))

                else:
                    obj_model = inout.load_ply(ply_fn)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model['pts']))

                rospy.init_node('pix2pose', anonymous=True)
                self.detect_pub = rospy.Publisher("/pix2pose/detected_object",
                                                  ros_image)

                #self.pose_pub = rospy.Publisher("/pix2pose/object_pose", Pose)
                self.pose_pub = rospy.Publisher("/pix2pose/object_pose",
                                                ros_image)
                self.have_depth = False

                if (self.icp):
                    self.sub_depth = rospy.Subscriber(self.depth_topic,
                                                      ros_image,
                                                      self.callback_depth,
                                                      queue_size=1)
                    if (self.pub_before_icp):
                        self.pose_pub_noicp = rospy.Publisher(
                            "/pix2pose/object_pose_noicp", ros_image)

        self.depth_img = np.zeros((self.im_height, self.im_width))
        self.sub = rospy.Subscriber(self.rgb_topic,
                                    ros_image,
                                    self.callback,
                                    queue_size=1)