elif(detect_type=='retinanet'):
    print("Loading weights for keras-retinanet")
    detection_weight_fn = cfg['detection_weight']
    model = models.load_model(os.path.join(MODEL_DIR,detection_weight_fn), backbone_name='resnet50')


'''
Load pix2pose inference weights
'''
load_partial=False
obj_pix2pose=[]
obj_names=[]
image_dummy=np.zeros((im_height,im_width,3),np.uint8)
for m_id,model_id in enumerate(model_ids):
    model_param = model_params['{}'.format(model_id)]
    obj_param=bop_io.get_model_params(model_param)
    weight_dir = bop_dir+"/pix2pose_weights/{:02d}".format(model_id)
    #weight_dir = "/home/kiru/media/hdd/weights/tless/tless_{:02d}".format(model_id)
    weight_fn = os.path.join(weight_dir,"inference.hdf5")
    print("load pix2pose weight for obj_{} from".format(model_id),weight_fn)
    if not(dynamic_th):
        th_outlier = [th_outliers[m_id]] #provid a fixed outlier value
        print("Set outlier threshold to ",th_outlier[0])    
    recog_temp = recog.pix2pose(weight_fn,camK= cam_K,
                                res_x=im_width,res_y=im_height,obj_param=obj_param,
                                th_ransac=th_ransac,th_outlier=th_outlier,
                                th_inlier=th_inlier)
    obj_pix2pose.append(recog_temp)
    
    obj_names.append(model_id)
Esempio n. 2
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.rgb_topic = cfg['rgb_topic']
        self.depth_topic = cfg['depth_topic']
        self.camK = np.array(cfg['cam_K']).reshape(3, 3)
        self.im_width = int(cfg['im_width'])
        self.im_height = int(cfg['im_height'])
        self.inlier_th = float(cfg['inlier_th'])
        self.ransac_th = float(cfg['ransac_th'])
        self.pub_before_icp = False
        self.graph = tf_backend.Graph()
        if (int(cfg['icp']) == 1):
            self.icp = True
        else:
            self.icp = False
        self.model_params = inout.load_json(cfg['norm_factor_fn'])
        self.detection_labels = cfg[
            'obj_labels']  #labels of corresponding detections
        n_objs = int(cfg['n_objs'])
        self.target_objs = cfg['target_obj_name']
        self.colors = np.random.randint(0, 255, (n_objs, 3))

        with self.graph.as_default():
            if (detect_type == "rcnn"):
                #Load mask r_cnn
                '''
                standard estimation parameter for Mask R-CNN (identical for all dataset)
                '''
                self.config = BopInferenceConfig(dataset="ros",
                                                 num_classes=n_objs + 1,
                                                 im_width=self.im_width,
                                                 im_height=self.im_height)
                self.config.DETECTION_MIN_CONFIDENCE = 0.3
                self.config.DETECTION_MAX_INSTANCES = 30
                self.config.DETECTION_NMS_THRESHOLD = 0.5

                self.detection_model = modellib.MaskRCNN(mode="inference",
                                                         config=self.config,
                                                         model_dir="/")
                self.detection_model.load_weights(
                    cfg['path_to_detection_weights'], by_name=True)

            self.obj_models = []
            self.obj_bboxes = []

            self.obj_pix2pose = []
            pix2pose_dir = cfg['path_to_pix2pose_weights']
            th_outlier = cfg['outlier_th']
            self.model_scale = cfg['model_scale']
            for t_id, target_obj in enumerate(self.target_objs):
                weight_fn = os.path.join(
                    pix2pose_dir, "{:02d}/inference.hdf5".format(target_obj))
                print("Load pix2pose weights from ", weight_fn)
                model_param = self.model_params['{}'.format(target_obj)]
                obj_param = bop_io.get_model_params(model_param)
                recog_temp = recog.pix2pose(weight_fn,
                                            camK=self.camK,
                                            res_x=self.im_width,
                                            res_y=self.im_height,
                                            obj_param=obj_param,
                                            th_ransac=self.ransac_th,
                                            th_outlier=th_outlier,
                                            th_inlier=self.inlier_th)
                self.obj_pix2pose.append(recog_temp)
                ply_fn = os.path.join(self.cfg['model_dir'],
                                      self.cfg['ply_files'][t_id])
                if (self.icp):
                    #for pyrender rendering
                    obj_model = trimesh.load_mesh(ply_fn)
                    obj_model.vertices = obj_model.vertices * self.model_scale
                    mesh = pyrender.Mesh.from_trimesh(obj_model)
                    self.obj_models.append(mesh)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model.vertices))

                else:
                    obj_model = inout.load_ply(ply_fn)
                    self.obj_bboxes.append(
                        self.get_3d_box_points(obj_model['pts']))

                rospy.init_node('pix2pose', anonymous=True)
                self.detect_pub = rospy.Publisher("/pix2pose/detected_object",
                                                  ros_image)

                #self.pose_pub = rospy.Publisher("/pix2pose/object_pose", Pose)
                self.pose_pub = rospy.Publisher("/pix2pose/object_pose",
                                                ros_image)
                self.have_depth = False

                if (self.icp):
                    self.sub_depth = rospy.Subscriber(self.depth_topic,
                                                      ros_image,
                                                      self.callback_depth,
                                                      queue_size=1)
                    if (self.pub_before_icp):
                        self.pose_pub_noicp = rospy.Publisher(
                            "/pix2pose/object_pose_noicp", ros_image)

        self.depth_img = np.zeros((self.im_height, self.im_width))
        self.sub = rospy.Subscriber(self.rgb_topic,
                                    ros_image,
                                    self.callback,
                                    queue_size=1)