Beispiel #1
0
 def __init__(self, symbol, data_names, label_names,
              context=mx.cpu(), max_data_shapes=None,
              provide_data=None, provide_label=None,
              arg_params=None, aux_params=None):
     self._mod = MutableModule(symbol, data_names, label_names,
                               context=context, max_data_shapes=max_data_shapes)
     self._mod.bind(provide_data, provide_label, for_training=False)
     self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
Beispiel #2
0
class Predictor(object):
    def __init__(self,
                 symbol,
                 data_names,
                 label_names,
                 context=mx.cpu(),
                 max_data_shapes=None,
                 provide_data=None,
                 provide_label=None,
                 arg_params=None,
                 aux_params=None):
        self._mod = MutableModule(symbol,
                                  data_names,
                                  label_names,
                                  context=context,
                                  max_data_shapes=max_data_shapes)
        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)

    def predict(self, data_batch):
        self._mod.forward(data_batch)
        # [dict(zip(self._mod.output_names, _)) for _ in zip(*self._mod.get_outputs(merge_multi_context=False))]
        return [
            dict(zip(self._mod.output_names, _))
            for _ in zip(*self._mod.get_outputs(merge_multi_context=False))
        ]

    def save(self, prefix):
        self._mod.save(prefix, 1)
Beispiel #3
0
 def load_layers(self, symbol_name, bind_size=(224, 224)):
     self.cut_symbol_name = symbol_name
     self.cut_symbol = self.sym.get_internals()['%s_output' % symbol_name]
     self.mod = MutableModule(self.cut_symbol,
                              self.data_names,
                              self.label_names,
                              context=self.ctx)
     self.mod.bind([
         (self.data_names[0], (1, 3, bind_size[0], bind_size[1])),
     ],
                   for_training=False)
     self.mod.init_params(arg_params=self.arg_params,
                          aux_params=self.aux_params,
                          allow_missing=False)
 def __init__(self, symbol, data_names, label_names,
              context=mx.cpu(), max_data_shapes=None,
              provide_data=None, provide_label=None,
              arg_params=None, aux_params=None):
     self._mod = MutableModule(symbol, data_names, label_names,
                               context=context, max_data_shapes=max_data_shapes)
     self._mod.bind(provide_data, provide_label, for_training=False)
     self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
Beispiel #5
0
class Predictor(object):
    def __init__(self,
                 sym_gen,
                 cfg,
                 data_names,
                 label_names,
                 context=mx.cpu(),
                 max_data_shapes=None,
                 provide_data=None,
                 provide_label=None,
                 arg_params=None,
                 aux_params=None):
        self._mod = MutableModule(sym_gen,
                                  cfg,
                                  data_names,
                                  label_names,
                                  is_train=False,
                                  context=context,
                                  max_data_shapes=max_data_shapes)
        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)

    def predict(self, data_batch):
        self._mod.forward(data_batch)
        return dict(zip(self._mod.output_names, self._mod.get_outputs()))
class MutablePredictor(object):
    def __init__(self,
                 symbol,
                 prefix,
                 epoch,
                 provide_data,
                 provide_label=[],
                 ctx=mx.cpu(),
                 arg_params=None,
                 aux_params=None):
        data_names = [k[0] for k in provide_data]
        label_names = [k[0] for k in provide_label]
        self._mod = MutableModule(symbol, data_names, label_names, context=ctx)
        self._mod.bind(provide_data, for_training=False)
        if arg_params is None:
            arg_params, aux_params = load_param(prefix,
                                                epoch,
                                                convert=True,
                                                ctx=ctx,
                                                process=True)
        self._mod.set_params(arg_params, aux_params)

        self.symbol = symbol
        self.ctx = ctx

    def predict(self, data_batch):
        self._mod.forward(data_batch)
        return dict(zip(self._mod.output_names, self._mod.get_outputs()))
Beispiel #7
0
    def __init__(self,
                 symbol,
                 data_names,
                 label_names,
                 context=mx.cpu(),
                 max_data_shapes=None,
                 provide_data=None,
                 provide_label=None,
                 arg_params_list=None,
                 aux_params_list=None):
        # assert isinstance(context,List),"the context must be a list "
        assert len(context) == len(
            arg_params_list
        ), "num of the params must be equal to the arg_params"
        assert len(context) == len(
            aux_params_list
        ), "num of the params must be equal to the aux_params"

        self._mod_list = []
        for ctx, arg_params, aux_params in zip(context, arg_params_list,
                                               aux_params_list):
            mod = MutableModule(symbol,
                                data_names,
                                label_names,
                                context=[ctx],
                                max_data_shapes=max_data_shapes)
            mod.bind(provide_data, provide_label, for_training=False)
            mod.init_params(arg_params=arg_params, aux_params=aux_params)
            self._mod_list.append(mod)

        self.output_names = self._mod_list[0].output_names
    def __init__(self,
                 symbol,
                 prefix,
                 epoch,
                 provide_data,
                 provide_label=[],
                 ctx=mx.cpu(),
                 arg_params=None,
                 aux_params=None):
        data_names = [k[0] for k in provide_data]
        label_names = [k[0] for k in provide_label]
        self._mod = MutableModule(symbol, data_names, label_names, context=ctx)
        self._mod.bind(provide_data, for_training=False)
        if arg_params is None:
            arg_params, aux_params = load_param(prefix,
                                                epoch,
                                                convert=True,
                                                ctx=ctx,
                                                process=True)
        self._mod.set_params(arg_params, aux_params)

        self.symbol = symbol
        self.ctx = ctx
Beispiel #9
0
class Predictor(object):
    def __init__(self, symbol, data_names, label_names,
                 context=mx.cpu(), max_data_shapes=None,
                 provide_data=None, provide_label=None,
                 arg_params=None, aux_params=None):
        self._mod = MutableModule(symbol, data_names, label_names,
                                  context=context, max_data_shapes=max_data_shapes)
        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)

    def predict(self, data_batch):
        self._mod.forward(data_batch)
        return dict(zip(self._mod.output_names, self._mod.get_outputs()))
Beispiel #10
0
class PredictorOneGPU(object):
    def __init__(self, symbol, data_names, label_names,
                 context_id = mx.cpu(), max_data_shapes=None,
                 provide_data=None, provide_label=None,
                 arg_params=None, aux_params=None):
        self._mod = MutableModule(symbol, data_names, label_names,
                                  context=context_id, max_data_shapes=max_data_shapes)

        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)

    def predict_patch(self,im_tensor,feature_stride,num_steps,output_name ="softmax"):

        origin_shape = im_tensor.shape
        data_shape = (origin_shape[0], origin_shape[1], make_divisible(origin_shape[2], feature_stride),
                      make_divisible(origin_shape[3], feature_stride))

        canva_data = np.zeros(
            (data_shape[0], data_shape[1], data_shape[2] + feature_stride, data_shape[3] + feature_stride))

        sy = sx = feature_stride // 2
        canva_data[:, :, sy:sy + data_shape[2], sx:sx + data_shape[3]] = resize_batch_target(im_tensor, data_shape[3],
                                                                                             data_shape[2])

        canva_softmax = np.zeros((data_shape[0], data_shape[1], data_shape[2]//feature_stride*num_steps, data_shape[3]//feature_stride*num_steps))

        # prepare the start of the strides
        prediction_stride = feature_stride // num_steps
        sy = sx = prediction_stride // 2 + np.arange(num_steps) * prediction_stride

        for ix in xrange(num_steps):
            for iy in xrange(num_steps):
                input_data = canva_data[:,:,sy[iy]:sy[iy]+data_shape[2],sx[ix]:sx[ix]+data_shape[3]]
                data = [[mx.nd.array(input_data)]]
                data_name = "data"
                provide_data = [[(data_name, input_data.shape)]]
                batch_data = mx.io.DataBatch(data=data,provide_data=provide_data,label=None,provide_label=None)
                self._mod.forward(batch_data, is_train=False)
                result = [dict(zip(self._mod.output_names, _)) for _ in zip(*self._mod.get_outputs(merge_multi_context=True))]
                canva_softmax[:,:,iy::num_steps,ix::num_steps] = result[output_name][0][0].asnumpy()

        softmax_output = resize_batch_softmax_output(canva_softmax,origin_shape[2:])
        return np.squeeze(softmax_output)

    def predict(self, imarray, pixel_means, pixel_stds, crop_size=512, color_scale=-1,
                    feature_ratio= 2.0 / 3, num_steps=1, output_name="softmax",feature_stride = 8):

            im_tensor = transform(imarray, pixel_means, color_scale=color_scale, pixel_stds=pixel_stds)
            long_size = max(im_tensor[2:])
            if(long_size<crop_size):
                return self.predict_patch(im_tensor,feature_stride,num_steps,output_name)
Beispiel #11
0
class Predictor(object):
    def __init__(self,
                 symbol,
                 data_names,
                 label_names,
                 context=mx.cpu(),
                 max_data_shapes=None,
                 provide_data=None,
                 provide_label=None,
                 arg_params=None,
                 aux_params=None):
        self._mod = MutableModule(symbol,
                                  data_names,
                                  label_names,
                                  context=context,
                                  max_data_shapes=max_data_shapes)
        self._mod.bind(provide_data, provide_label, for_training=False)
        self._mod.init_params(arg_params=arg_params, aux_params=aux_params)

    def predict(self, data_batch):
        self._mod.forward(data_batch)
        # [dict(zip(self._mod.output_names, _)) for _ in zip(*self._mod.get_outputs(merge_multi_context=False))]
        return [
            dict(zip(self._mod.output_names, _))
            for _ in zip(*self._mod.get_outputs(merge_multi_context=False))
        ]

    def pred_raw_data(predictor,
                      data_batch,
                      data_names,
                      scales,
                      cfg,
                      scores_field='cls_prob_reshape',
                      imdb=None):
        output_all = predictor.predict(data_batch)
        data_dict_all = [
            dict(zip(data_names, idata)) for idata in data_batch.data
        ]
        scores_all = []
        pred_boxes_all = []
        for output, data_dict, scale in zip(output_all, data_dict_all, scales):
            if cfg.TEST.HAS_RPN:
                rois = output['rois_output'].asnumpy()[:, 1:]
            else:
                rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
            fc_new_1 = output['fc_new_1_relu_output'].asnumpy()
        return rois, fc_new_1
Beispiel #12
0
class Viewer (object):
  """
      for vgg: mean= [123.68, 116.28, 103.53], std=1, 224 for softmax
  """
  def __init__ (self,model_prefix, epoch, ctx=mx.gpu(), data_names=['data',], label_names=['prob_label',], mean=[0, 0, 0], std=1):
    self.sym, self.arg_params, self.aux_params = mx.model.load_checkpoint(model_prefix, epoch)
    self.ctx = ctx
    self.data_names = data_names
    self.label_names = label_names
    self.mean = np.array(mean)
    self.std  = std

    self.mod = None
    self.cut_symbol = None
    self.cut_symbol_name = None
    self.imgname  = None
    self.raw_img = None
    self.resized_img = None
    self.forward_img = None

    # containers for output
    self.raw_out_     = None   # hold raw output
    self.raw_out     = None    # hold post mean output
    self.resized_out = None    #  resize mean to raw image, set it to None once the output is a vector!


  def load_layers(self, symbol_name, bind_size=(224,224)):
    self.cut_symbol_name = symbol_name
    self.cut_symbol = self.sym.get_internals()['%s_output'%symbol_name]
    self.mod  = MutableModule(self.cut_symbol, self.data_names, self.label_names, context = self.ctx)
    self.mod.bind([( self.data_names[0],(1,3, bind_size[0], bind_size[1]) ), ], for_training = False)
    self.mod.init_params(arg_params = self.arg_params, aux_params = self.aux_params, allow_missing = False)

  def _predict(self, H0, W0, re_size_HW):
    if re_size_HW is not None: # force to resize
      self.resized_img = mx.img.imresize(self.raw_img, re_size_HW[1], re_size_HW[0])
    else:
      self.resized_img = self.raw_img
    self.resized_img = self.resized_img.asnumpy()
    self.raw_img = self.raw_img .asnumpy()

    self.forward_img = mx.nd.array( ( self.resized_img - self.mean )/self.std )
    self.forward_img = mx.nd.transpose(self.forward_img, axes=(2,0,1) )
    self.forward_img = mx.nd.expand_dims( self.forward_img, axis=0 )

    d= mx.io.DataBatch([self.forward_img,],provide_data=[(self.data_names[0],self.forward_img.shape),])
    self.mod.forward(d)

    # keepdims for mx.img.imresize
    self.raw_out_ = self.mod.get_outputs()[0]
    if len(self.raw_out_.shape)==2:  # softmax output
      self.resized_out  = None # referenced by view()
      self.raw_out = self.raw_out_[0].asnumpy()   #  ( num class, )

    elif len(self.raw_out_.shape) == 4: # normal
    #  raw feature map
      self.raw_out = mx.nd.mean(self.raw_out_[0], axis=0, keepdims=True) .asnumpy()  # eliminate batch axis
      self.raw_out = ( self.raw_out )/( self.raw_out.max() + np.exp(-8) )* 255
      self.raw_out = self.raw_out.astype(np.uint8) # 1 x h x w
    # resize...
      img_tmp = mx.nd.array(self.raw_out)
      img_tmp = mx.nd.transpose(img_tmp, axes=(1,2,0) )  # h x w x 1
      # resize to the original shape
      self.resized_out = mx.nd.transpose( mx.img.imresize(img_tmp, W0, H0), axes=(2,0,1)  )[0].asnumpy()  # h x w
      self.raw_out  = self.raw_out[0][:]    # h x w
    else:
    # Oop!
      assert 0


  def predict(self, img_path, re_size_HW=None):
    """ will not plot, use .view after this call"""

    assert os.path.isfile(img_path), '%s does not exist!'%img_path
    self.imgname = os.path.basename(img_path)
    self.raw_img = mx.img.imdecode( open(img_path, 'rb').read() )
    H0,W0 = self.raw_img.shape[:2]
    self._predict(H0, W0, re_size_HW)


  def view(self, block=False,top_k=5):
    assert self.raw_img is not None
    plt.figure()
    plt.suptitle(self.imgname)
    plt.subplot(221)
    plt.imshow(self.raw_img)
    plt.title('raw image')
    plt.subplot(223)
    plt.imshow(self.resized_img)
    plt.title('resized input')

    if self.resized_out is not None: # 1.raw 3.resized img 2. resized out 4. raw out
      plt.subplot(224)
      plt.imshow(self.raw_out)
      plt.title('raw feature map')
      plt.subplot(222)
      plt.imshow(self.resized_out)
      plt.title('resized feature map')
    else: # stem the vector
      plt.subplot(222)
      t=np.array(xrange(len(self.raw_out)))
      plt.stem(t, self.raw_out, marker='o')
      plt.title('softmax distribution')
      plt.subplot(224)
      idx = np.argsort(self.raw_out)[::-1]
      idx = idx[:top_k]
      t = t[idx]
      l = self.raw_out[idx]
      plt.stem(t,l,marker='o')
      label2show = 'most label: %s'%t
      plt.xlabel( 'most label: %s'%t)
      plt.title('Top-k distribution')
    plt.show(block=block)

  def crop_predict(self,img_path, xy_tl, xy_br, re_size_HW=None):
    """ will not plot, use .view after this call"""
    hw = (xy_br[1]-xy_tl[1], xy_br[0]-xy_tl[0])
    xy = xy_tl
    assert os.path.isfile(img_path), '%s does not exist!'%img_path
    self.imgname = os.path.basename(img_path)
    self.raw_img = mx.img.imdecode( open(img_path, 'rb').read() )
    self.raw_img = mx.img.fixed_crop(self.raw_img, xy[0], xy[1], hw[1], hw[0])
    H0,W0 = self.raw_img.shape[:2]
    self._predict(H0, W0, re_size_HW)
  def save_rawfeat(self, savename):
      plt.close()
      plt.matshow(self.raw_out)
      #plt.show()
      plt.savefig(savename)