示例#1
0
 def extract_batch(self, imgs, blobs=["fc6", "fc7"]):
     if self.oversample:  # Each oversampled image is a batch
         return self.extract(imgs, blobs)
     feats = {}
     for blob in blobs:
         feats[blob] = []
     data = None
     for img in imgs:
         if data is None:
             data = transform_image(img, self.oversample, self.mean, self.img_dim, self.crop_dim)
         else:
             data = np.vstack((data, transform_image(img, self.oversample, self.mean, self.img_dim, self.crop_dim)))
         if data.shape[0] == self.batch_size:
             self._process_batch(data, feats, blobs)
             data = None
     self._process_batch(data, feats, blobs)
     return feats
示例#2
0
 def extract_batch(self, imgs, blobs = ['fc6', 'fc7']):
   if self.oversample:   # Each oversampled image is a batch
     return self.extract(imgs, blobs)
   feats = {}
   for blob in blobs:
     feats[blob] = []
   data = None
   for img in imgs:
     if data is None:
       data = transform_image(img, self.oversample, self.mean, self.img_dim, self.crop_dim)
     else:
       data = np.vstack((data, transform_image(img, self.oversample, self.mean, self.img_dim, self.crop_dim)))
     if data.shape[0] == self.batch_size:
       self._process_batch(data, feats, blobs)
       data = None
   self._process_batch(data, feats, blobs)
   return feats
示例#3
0
 def extract(self, imgs, blobs=["fc6", "fc7"]):
     feats = {}
     for blob in blobs:
         feats[blob] = []
     for img in imgs:
         data = transform_image(img, self.oversample, self.mean, self.img_dim, self.crop_dim)
         # Use forward all to do the padding
         out = self.forward_all(**{self.inputs[0]: data, "blobs": blobs})
         for blob in blobs:
             feat = out[blob]
             if self.oversample:
                 feat = feat.reshape((len(feat) / self.batch_size, self.batch_size, -1))
                 feat = feat.mean(1)
             feats[blob].append(feat.flatten())
     return feats
示例#4
0
 def extract(self, imgs, blobs = ['fc6', 'fc7']):
   feats = {}
   for blob in blobs:
     feats[blob] = []
   for img in imgs:
     data = transform_image(img, self.oversample, self.mean, self.img_dim, self.crop_dim)
     # Use forward all to do the padding
     out = self.forward_all(**{self.inputs[0]: data, 'blobs': blobs})
     for blob in blobs:
       feat = out[blob]
       if self.oversample:
         feat = feat.reshape((len(feat) / self.batch_size, self.batch_size, -1))
         feat = feat.mean(1)
       feats[blob].append(feat.flatten())
   return feats
    parser.add_argument('weights', type=str, help='binary caffe model')
    parser.add_argument('img_list', type=str, help='The image list file')
    parser.add_argument('--batch_num',
                        type=int,
                        default=10,
                        help='Num of images in a batch')
    args = parser.parse_args()
    return args.model, args.weights, args.img_list, args.batch_num


if __name__ == "__main__":
    model_f, weights_f, img_list_f, batch_num = parse_args()
    # Load the imgs
    print("Loading images...")
    with open(img_list_f, 'r') as f:
        img_list = [line.strip() for line in f.readlines()]
        imgs = [transform_image(load_image(name)) for name in img_list]

    net, net_param = load_net_and_param(model_f, weights_f)
    imgs_batch = [
        imgs[i:i + batch_num] for i in xrange(0, len(imgs), batch_num)
    ]
    for ind, batch in enumerate(imgs_batch):
        data = batch[0]
        for img in batch[1::]:
            data = np.vstack((data, img))
        ts = time.time()
        out = net.forward(**{net.inputs[0]: data})
        te = time.time()
        print("Time cost of batch(%d):%2.2f sec" % (ind, te - ts))
def _extract_response(
    net,
    net_param,
    io_param,
    img_batch,
    layers_name,
    sample_ratio=0.1,
    batch_size=10,
):
    """ Extract the linear response in certain blobs in the net
    @Parameters:
        img_batch: list, the image list, with len == net.batch_size
        sample_ratio: sample `ratio` column from the response with shape
                      (C, num_batch*H'*W')
        layer_name: list, the name of the layer to the extract;
    @Returns:
        reponses: dict, {blob_name:[ responses for each image with
        shape(C,num_batch*H'*W') ]}
    """
    responses = {}
    indices = {}
    io_param = io_param
    data = transform_image(img_batch[0], io_param.over_sample, io_param.mean,
                           io_param.image_dim, io_param.crop_dim)
    for img in img_batch[1::]:
        data = np.vstack(
            (data,
             transform_image(img, io_param.over_sample, io_param.mean,
                             io_param.image_dim, io_param.crop_dim)))
    # Do the padding
    if len(img_batch) < batch_size:
        for i in xrange(0, batch_size - len(img_batch)):
            data = np.vstack(
                (data,
                 transform_image(img_batch[0], io_param.over_sample,
                                 io_param.mean, io_param.image_dim,
                                 io_param.crop_dim)))

    for ind, name in enumerate(layers_name):
        start = layers_name[ind - 1] if ind > 0 else None
        try:
            out = net.forward(**{
                net.inputs[0]: data,
                'start': start,
                'end': name
            })
        except KeyError as e:
            top_dict = {l.name: l.top[0] for l in net_param.layer}
            out = {name: net.blobs[top_dict[name]].data}
        # resp with shape(batch_num, c, H',W')
        resp = out[name][0:len(img_batch)]
        # swap axis into (c, batch_num, H',W')
        resp = resp.swapaxes(0, 1)
        column_idx = [i for i in xrange(0, resp.size / resp.shape[0])]
        # Reshape into (c, batch_num * H' * W')
        resp = resp.reshape(resp.shape[0], len(column_idx))
        # Random select `sample_ratio` num columns from `resp`
        random.shuffle(column_idx)
        column_idx.sort()
        column_idx = column_idx[0:int(len(column_idx) * sample_ratio)]
        responses[name] = resp[:, column_idx]
        indices[name] = column_idx
    return responses, indices