Ejemplo n.º 1
0
 def __init__(
         self,
         image_ids_path,  #
         input_seq,  #
         target_seq,  #
         gv_feat_path,  #''
         att_feats_folder,  #
         seq_per_img,
         max_feat_num):
     self.max_feat_num = max_feat_num
     self.seq_per_img = seq_per_img
     self.image_ids = utils.load_lines(image_ids_path)  # a list of str
     self.att_feats_folder = att_feats_folder if len(
         att_feats_folder) > 0 else None
     self.gv_feat = pickle.load(
         open(gv_feat_path,
              'rb'), encoding='bytes') if len(gv_feat_path) > 0 else None
     if input_seq is not None and target_seq is not None:
         self.input_seq = pickle.load(open(input_seq, 'rb'),
                                      encoding='bytes')
         self.target_seq = pickle.load(open(target_seq, 'rb'),
                                       encoding='bytes')
         self.seq_len = 0  #len(self.input_seq[self.image_ids[0]][0,:])
     else:
         self.seq_len = -1
         self.input_seq = None
         self.target_seq = None
     return
Ejemplo n.º 2
0
    def __init__(
        self,
        eval_ids,
        gv_feat,
        att_feats,
        eval_annfile,
        dataset_name
    ):
        super(Evaler, self).__init__()
        self.vocab = utils.load_vocab(cfg.INFERENCE.VOCAB)

        self.eval_ids = np.array(utils.load_lines(eval_ids))#np.array(utils.load_ids(eval_ids))
        self.eval_loader = data_loader.load_val(eval_ids, gv_feat, att_feats, dataset_name)
        self.evaler = evaluation.create(cfg.INFERENCE.EVAL, eval_annfile) 
        self.dataset_name = dataset_name
Ejemplo n.º 3
0
 def __init__(
     self,
     eval_ids,
     gv_feat,
     att_feats,
     dataset_name,
     eval_annfile=None
 ):
     super(Evaler, self).__init__()
     self.vocab = utils.load_vocab(cfg.INFERENCE.VOCAB)
     if dataset_name == 'raw':
         self.eval_ids = np.array([img.split('.')[0] for img in os.listdir(eval_ids)])
         self.evaler = None
     else:
         self.eval_ids = np.array(utils.load_lines(eval_ids))#np.array(utils.load_ids(eval_ids))
         self.evaler = evaluation.create(dataset_name, eval_annfile) 
     self.eval_loader = data_loader.load_val(eval_ids, gv_feat, att_feats, dataset_name)
     self.dataset_name = dataset_name
Ejemplo n.º 4
0
 def __init__(
         self,
         image_ids_path,  #/data/disk1/private/FXData/COCO/karpathy_image_ids/coco_train_image_id.txt
         input_seq,  #/data/disk1/private/FXData/COCO/sent/coco_train_input.pkl
         target_seq,  #/data/disk1/private/FXData/COCO/sent/coco_train_target.pkl
         gv_feat_path,  #''
         att_feats_folder,  #/data/disk1/private/FXData/COCO
         seq_per_img,
         max_feat_num,
         id2name_path,  #/data/disk1/private/FXData/COCO/id2name_123287.json
         annotation_path):
     super().__init__(image_ids_path, input_seq, target_seq, gv_feat_path,
                      att_feats_folder, seq_per_img, max_feat_num)
     self.annotation_path = annotation_path
     with open(
             os.path.join(self.annotation_path, 'bboxes_coco_123287.json'),
             'r') as f:
         self.annotation_bboxes = json.load(f)
     self.id2category = utils.load_lines(
         os.path.join(self.annotation_path, 'id2category.txt'))
     with open(id2name_path, 'r') as f:
         self.id2name = json.load(f)