def check_quantize_model(qdtype):
        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        sym = get_fp32_sym()
        mod = Module(symbol=sym)
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)
        mod.bind(data_shapes=[('data', data_shape)], label_shapes=[('softmax_label', label_shape)])
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)

        calib_data = mx.nd.random.uniform(shape=data_shape)
        calib_data = NDArrayIter(data=calib_data)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='naive',
                                                                         calib_data=calib_data,
                                                                         num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
  def check_quantize_whole_model(out_type):
    batch_size = 4
    data_shape = (batch_size, 4, 10, 10)
    data = mx.sym.Variable('data')
    conv0 = mx.sym.Convolution(data, kernel=(1, 1), num_filter=16, name='conv0')
    sym = mx.sym.Convolution(conv0, kernel=(1, 1), num_filter=16, name='conv1')
    sym_sg = sym.get_backend_symbol('MKLDNN_QUANTIZE')
    mod = Module(symbol=sym, label_names=None)
    mod.bind(for_training=False,
             data_shapes=[('data', data_shape)])

    mod.init_params(mx.init.Normal(0.5))
    arg_params, aux_params = mod.get_params()

    excluded_sym_names = []

    calib_data = mx.nd.random.uniform(shape=data_shape)
    calib_data = mx.io.NDArrayIter(data=calib_data)
    calib_data = DummyIter(calib_data)
    calib_layer = lambda name: name.endswith('_output')
    qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
                                                                     arg_params=arg_params,
                                                                     aux_params=aux_params,
                                                                     ctx=mx.current_context(),
                                                                     excluded_sym_names=excluded_sym_names,
                                                                     quantized_dtype=out_type,
                                                                     calib_mode='naive',
                                                                     calib_data=calib_data,
                                                                     calib_layer=calib_layer,
                                                                     label_names=None,
                                                                     num_calib_examples=1)
    qsym = qsym.get_backend_symbol('MKLDNN_QUANTIZE')
    check_qsym_forward(qsym, qarg_params, qaux_params, data_shape)
Beispiel #3
0
    def check_quantize_model(qdtype):
        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        sym = get_fp32_sym()
        mod = Module(symbol=sym)
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)
        mod.bind(data_shapes=[('data', data_shape)], label_shapes=[('softmax_label', label_shape)])
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)

        calib_data = mx.nd.random.uniform(shape=data_shape)
        calib_data = NDArrayIter(data=calib_data)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='naive',
                                                                         calib_data=calib_data,
                                                                         num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
Beispiel #4
0
def check_quantize(sym, data_shape, out_type, name='conv',
                   check_calibration=True, gluon_forward=False):
  sg_pass_name = config[name][SG_PASS_NAME]
  post_sg_pass_name = config[name][POST_SG_PASS_NAME]

  fc = mx.sym.FullyConnected(data=sym, num_hidden=10, flatten=True, name='fc_softmax')
  if gluon_forward == True:
    sym = fc
    sym_sg = sym.get_backend_symbol(sg_pass_name)
    mod = Module(symbol=sym, label_names=[])
    mod.bind(for_training=False,
            data_shapes=[('data', data_shape)])
  else:
    sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
    sym_sg = sym.get_backend_symbol(sg_pass_name)
    label_shape = (data_shape[0], 10)
    mod = Module(symbol=sym)
    mod.bind(for_training=False,
            data_shapes=[('data', data_shape)],
            label_shapes=[('softmax_label', label_shape)])
  mod.init_params(mx.init.Normal(0.5))
  arg_params, aux_params = mod.get_params()

  data = [mx.random.uniform(-1, 1, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
  batch = mx.io.DataBatch(data, [])

  mod.forward(batch, is_train=False)
  for output in mod.get_outputs():
      output.wait_to_read()
  ref_out = mod.get_outputs()

  excluded_sym_names = []
  if mx.current_context() == mx.cpu() and gluon_forward == True:
    excluded_sym_names += ['sg_mkldnn_fully_connected_0']
    excluded_sym_names += ['fc_softmax']

  calib_data = mx.nd.random.uniform(shape=data_shape)
  calib_data = NDArrayIter(data=calib_data)
  calib_data = DummyIter(calib_data)
  calib_layer = lambda name: name.endswith('_output')
  qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
                                                                   arg_params=arg_params,
                                                                   aux_params=aux_params,
                                                                   ctx=mx.current_context(),
                                                                   excluded_sym_names=excluded_sym_names,
                                                                   quantized_dtype=out_type,
                                                                   calib_mode='naive',
                                                                   calib_data=calib_data,
                                                                   calib_layer=calib_layer,
                                                                   num_calib_examples=5)
  qsym = qsym.get_backend_symbol(post_sg_pass_name)
  if check_calibration:
    check_qsym_calibrated(qsym, out_type, name=name)
  if gluon_forward == True:
    check_qsym_gluon_forward(qsym, qarg_params, qaux_params, data_shape)
  else:
    check_qsym_dummy_forward(qsym, batch, data_shape, label_shape)
    quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params, batch, data_shape, label_shape)
    for i in range(len(ref_out)):
      assert_almost_equal(ref_out[i].asnumpy(), quantized_out[i].asnumpy(), atol = 1)
Beispiel #5
0
def check_quantize(sym, data_shape, out_type, name='conv',
                   check_calibration=True, gluon_forward=False, check_scale_align=False):
  if name in config:
    name = config[name][OP_NAME]
  sym_sg = sym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
  mod = Module(symbol=sym, label_names=None)
  mod.bind(for_training=False,
            data_shapes=[('data', data_shape)])
  mod.init_params(mx.init.Normal(0.5))
  arg_params, aux_params = mod.get_params()

  if out_type == 'uint8':
    data = [mx.random.uniform(0.0, 1.0, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
  else:
    data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
  batch = mx.io.DataBatch(data, [])

  mod.forward(batch, is_train=False)
  for output in mod.get_outputs():
      output.wait_to_read()
  ref_out = mod.get_outputs()

  excluded_sym_names = []
  excluded_op_names = []
  if mx.current_context() == mx.cpu() and gluon_forward == True:
    excluded_op_names += ['_sg_mkldnn_fully_connected']

  calib_data = CalibIter(batch, data_shape, 1)

  qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
                                                                   arg_params=arg_params,
                                                                   aux_params=aux_params,
                                                                   ctx=mx.current_context(),
                                                                   excluded_sym_names=excluded_sym_names,
                                                                   excluded_op_names=excluded_op_names,
                                                                   quantized_dtype=out_type,
                                                                   calib_mode='naive',
                                                                   calib_data=calib_data,
                                                                   calib_layer=None,
                                                                   label_names=None,
                                                                   num_calib_examples=1)
  qsym = qsym.get_backend_symbol(QUANTIZE_SG_PASS_NAME)
  if check_calibration:
    check_qsym_calibrated(qsym, out_type, name=name)
  if check_scale_align:
    check_qsym_scale_align(qsym)
  if gluon_forward == True:
    check_qsym_gluon_forward(qsym, qarg_params, qaux_params, data_shape)
  else:
    quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params, batch, data_shape)
    for i in range(len(ref_out)):
      min_range = mx.nd.min(ref_out[i]).asscalar()
      max_range = mx.nd.max(ref_out[i]).asscalar()
      atol = 0.1 * max(abs(min_range), abs(max_range))
      assert_almost_equal_with_err(quantized_out[i].asnumpy(), ref_out[i].asnumpy(), rtol=0.1, atol=atol, etol=0.2)
    check_qsym_dummy_forward(qsym, batch, data_shape)
def check_quantize(sym, data_shape, check_conv=True):
    fc = mx.sym.FullyConnected(data=sym,
                               num_hidden=10,
                               flatten=True,
                               name='fc')
    sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
    sym_sg = sym.get_backend_symbol("MKLDNN")
    label_shape = (data_shape[0], 10)
    mod = Module(symbol=sym)
    mod.bind(for_training=False,
             data_shapes=[('data', data_shape)],
             label_shapes=[('softmax_label', label_shape)])
    mod.init_params(mx.init.Normal(0.5))
    arg_params, aux_params = mod.get_params()

    data = [
        mx.random.uniform(-1, 1, shape=shape, ctx=mx.current_context())
        for _, shape in mod.data_shapes
    ]
    batch = mx.io.DataBatch(data, [])

    mod.forward(batch, is_train=False)
    for output in mod.get_outputs():
        output.wait_to_read()
    ref_out = mod.get_outputs()

    excluded_sym_names = []
    if mx.current_context() == mx.cpu():
        excluded_sym_names += ['fc']

    calib_data = mx.nd.random.uniform(shape=data_shape)
    calib_data = NDArrayIter(data=calib_data)
    calib_data = DummyIter(calib_data)
    calib_layer = lambda name: name.endswith('_output')
    qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
        sym=sym_sg,
        arg_params=arg_params,
        aux_params=aux_params,
        ctx=mx.current_context(),
        excluded_sym_names=excluded_sym_names,
        quantized_dtype='uint8',
        calib_mode='naive',
        calib_data=calib_data,
        calib_layer=calib_layer,
        calib_quantize_op=True,
        num_calib_examples=5)
    qsym = qsym.get_backend_symbol("MKLDNN_POST_QUANTIZE")
    if check_conv:
        check_qsym_calibrated(qsym)
    quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params, batch,
                                       data_shape, label_shape)
    for i in range(len(ref_out)):
        assert_almost_equal(ref_out[i].asnumpy(),
                            quantized_out[i].asnumpy(),
                            atol=1)
    check_qsym_dummy_forward(qsym, batch, data_shape, label_shape)
def check_quantize(sym, data_shape, check_conv=True):
  fc = mx.sym.FullyConnected(data=sym, num_hidden=10, flatten=True, name='fc')
  sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
  sym_sg = sym.get_backend_symbol("MKLDNN")
  label_shape = (data_shape[0], 10)
  mod = Module(symbol=sym)
  mod.bind(for_training=False,
           data_shapes=[('data', data_shape)],
           label_shapes=[('softmax_label', label_shape)])
  mod.init_params(mx.init.Normal(0.5))
  arg_params, aux_params = mod.get_params()

  data = [mx.random.uniform(-1, 1, shape=shape, ctx=mx.current_context()) for _, shape in mod.data_shapes]
  batch = mx.io.DataBatch(data, [])

  mod.forward(batch, is_train=False)
  for output in mod.get_outputs():
      output.wait_to_read()
  ref_out = mod.get_outputs()

  excluded_sym_names = []
  if mx.current_context() == mx.cpu():
    excluded_sym_names += ['fc']

  calib_data = mx.nd.random.uniform(shape=data_shape)
  calib_data = NDArrayIter(data=calib_data)
  calib_data = DummyIter(calib_data)
  calib_layer = lambda name: name.endswith('_output')
  qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym_sg,
                                                                   arg_params=arg_params,
                                                                   aux_params=aux_params,
                                                                   ctx=mx.current_context(),
                                                                   excluded_sym_names=excluded_sym_names,
                                                                   quantized_dtype='uint8',
                                                                   calib_mode='naive',
                                                                   calib_data=calib_data,
                                                                   calib_layer=calib_layer,
                                                                   calib_quantize_op=True,
                                                                   num_calib_examples=5)
  qsym = qsym.get_backend_symbol("MKLDNN_POST_QUANTIZE")
  if check_conv:
    check_qsym_calibrated(qsym)
  quantized_out = check_qsym_forward(qsym, qarg_params, qaux_params, batch, data_shape, label_shape)
  for i in range(len(ref_out)):
    assert_almost_equal(ref_out[i].asnumpy(), quantized_out[i].asnumpy(), atol = 1)
  check_qsym_dummy_forward(qsym, batch, data_shape, label_shape)
Beispiel #8
0
class Solver(object):
    def __init__(self,
                 symbol,
                 data_names,
                 label_names,
                 data_shapes,
                 label_shapes,
                 logger=logging,
                 context=mx.cpu(),
                 work_load_list=None,
                 fixed_param_names=None):
        self.symbol = symbol
        self.data_names = data_names
        self.label_names = label_names
        self.data_shapes = data_shapes
        self.label_shapes = label_shapes
        self.context = context
        self.work_load_list = work_load_list
        self.fixed_param_names = fixed_param_names

        if logger is None:
            logger = logging.getLogger()
            logger.setLevel(logging.INFO)
        self.logger = logger
        self.module = Module(symbol=self.symbol,
                             data_names=self.data_names,
                             label_names=self.label_names,
                             logger=self.logger,
                             context=self.context,
                             work_load_list=self.work_load_list,
                             fixed_param_names=self.fixed_param_names)

    def fit(self,
            train_data,
            eval_data=None,
            eval_metric='acc',
            validate_metric=None,
            work_load_list=None,
            epoch_end_callback=None,
            batch_end_callback=None,
            fixed_param_prefix=None,
            initializer=None,
            arg_params=None,
            aux_params=None,
            allow_missing=False,
            optimizer=None,
            optimizer_params=None,
            begin_epoch=0,
            num_epoch=None,
            kvstore='device',
            teacher_modules=None):
        if type(teacher_modules) is not list:
            teacher_modules = [teacher_modules]
        self.module.bind(data_shapes=self.data_shapes,
                         label_shapes=self.label_shapes,
                         for_training=True)
        self.module.init_params(initializer=initializer,
                                arg_params=arg_params,
                                aux_params=aux_params,
                                allow_missing=allow_missing)
        self.module.init_optimizer(kvstore=kvstore,
                                   optimizer=optimizer,
                                   optimizer_params=optimizer_params)

        if validate_metric is None:
            validate_metric = eval_metric
        if not isinstance(eval_metric, metric.EvalMetric):
            eval_metric = metric.create(eval_metric)

        # training loop
        for epoch in range(begin_epoch, num_epoch):
            tic = time.time()
            eval_metric.reset()
            nbatch = 0
            data_iter = iter(train_data)
            end_of_batch = False
            next_data_batch = next(data_iter)
            while not end_of_batch:
                data_batch = next_data_batch

                if teacher_modules[0] is not None:
                    for teacher_module in teacher_modules:
                        teacher_module.forward(data_batch=data_batch,
                                               is_train=True)
                        transfer_label = teacher_module.get_outputs()
                        data_batch.label = data_batch.label + transfer_label
                self.module.forward(data_batch, is_train=True)
                self.module.backward()
                self.module.update()

                try:
                    next_data_batch = next(data_iter)
                except StopIteration:
                    end_of_batch = True

                self.module.update_metric(eval_metric, data_batch.label)

                if batch_end_callback is not None:
                    batch_end_params = BatchEndParam(epoch=epoch,
                                                     nbatch=nbatch,
                                                     eval_metric=eval_metric,
                                                     locals=locals())
                    for callback in _as_list(batch_end_callback):
                        callback(batch_end_params)
                nbatch += 1

            for name, val in eval_metric.get_name_value():
                self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
            toc = time.time()
            self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))

            arg_params, aux_params = self.module.get_params()
            self.module.set_params(arg_params, aux_params)

            if epoch_end_callback is not None:
                for callback in _as_list(epoch_end_callback):
                    callback(epoch, self.symbol, arg_params, aux_params)
            if eval_data:
                res = self.module.score(eval_data,
                                        validate_metric,
                                        score_end_callback=None,
                                        batch_end_callback=None,
                                        reset=True,
                                        epoch=epoch)
                for name, val in res:
                    self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name,
                                     val)

            train_data.reset()
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print(
                'skipped testing test_quantize_model_with_forward for native cpu since it is not supported yet'
            )
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print(
                'skipped testing test_quantize_model_with_forward for mkldnn cpu int8 since it is not supported yet'
            )
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print(
                'skipped testing test_quantize_model_with_forward for gpu uint8 since it is not supported yet'
            )
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params, th_dict={})
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape,
                               label_shape):
            mod = mx.mod.Module(symbol=qsym, context=mx.current_context())
            mod.bind(for_training=False,
                     data_shapes=[('data', data_shape)],
                     label_shapes=[('softmax_label', label_shape)])
            mod.set_params(qarg_params, qaux_params)
            data = [
                mx.random.uniform(-1.0, 1.0, shape=shape)
                for _, shape in mod.data_shapes
            ]
            batch = mx.io.DataBatch(data, [])
            mod.forward(batch, is_train=False)
            for output in mod.get_outputs():
                output.wait_to_read()

        sym = get_fp32_residual()
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)

        length = batch_size  # specify num of outputs from split op
        msym = get_fp32_sym_with_multiple_outputs(length)
        msym_label_shape = (length, 10)
        msym_data_shape = (length, 4, 4, 10, 10)

        for s, dshape, lshape in zip((sym, msym),
                                     (data_shape, msym_data_shape),
                                     (label_shape, msym_label_shape)):
            mod = Module(symbol=s)
            mod.bind(data_shapes=[('data', dshape)],
                     label_shapes=[('softmax_label', lshape)])

            mod.init_params()
            arg_params, aux_params = mod.get_params()
            excluded_names = []
            if mx.current_context() == mx.cpu():
                excluded_names += ['fc']
            excluded_names += ['concat']

            optional_names = ['pool0']
            for skip_optional_names in [False, True]:
                exclude_sym_names = []
                if skip_optional_names:
                    excluded_sym_names = excluded_names
                else:
                    excluded_sym_names = excluded_names + optional_names

                qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                    sym=s,
                    arg_params=arg_params,
                    aux_params=aux_params,
                    excluded_sym_names=excluded_sym_names,
                    ctx=mx.current_context(),
                    quantized_dtype=qdtype,
                    calib_mode='none')
                check_params(arg_params, qarg_params, qsym)
                check_params(aux_params, qaux_params)
                check_qsym_forward(qsym, qarg_params, qaux_params, dshape,
                                   lshape)

                calib_data = mx.nd.random.uniform(shape=dshape)
                calib_data = NDArrayIter(data=calib_data,
                                         batch_size=batch_size)
                calib_data = DummyIter(calib_data)
                qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                    sym=s,
                    arg_params=arg_params,
                    aux_params=aux_params,
                    excluded_sym_names=excluded_sym_names,
                    ctx=mx.current_context(),
                    quantized_dtype=qdtype,
                    calib_mode='naive',
                    calib_data=calib_data,
                    num_calib_examples=20)
                check_params(arg_params, qarg_params, qsym)
                check_params(aux_params, qaux_params)
                check_qsym_calibrated(qsym)
                check_qsym_qdtype(qsym, qdtype)
                check_qsym_forward(qsym, qarg_params, qaux_params, dshape,
                                   lshape)
Beispiel #10
0
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print(
                'skipped testing test_quantize_model_with_forward for native cpu since it is not supported yet'
            )
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print(
                'skipped testing test_quantize_model_with_forward for mkldnn cpu int8 since it is not supported yet'
            )
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print(
                'skipped testing test_quantize_model_with_forward for gpu uint8 since it is not supported yet'
            )
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params, th_dict={})
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape):
            mod = mx.mod.Module(symbol=qsym,
                                label_names=None,
                                context=mx.current_context())
            mod.bind(for_training=False, data_shapes=[('data', data_shape)])
            mod.set_params(qarg_params, qaux_params)
            data = [
                mx.random.uniform(-1.0, 1.0, shape=shape)
                for _, shape in mod.data_shapes
            ]
            batch = mx.io.DataBatch(data, [])
            mod.forward(batch, is_train=False)
            for output in mod.get_outputs():
                output.wait_to_read()

        batch_size = 4
        dshape = (batch_size, 4, 10, 10)
        data = mx.sym.Variable('data')
        sym = mx.sym.Convolution(data,
                                 kernel=(1, 1),
                                 num_filter=16,
                                 name='conv0')

        mod = Module(symbol=sym, label_names=None)
        mod.bind(data_shapes=[('data', dshape)])

        mod.init_params()
        arg_params, aux_params = mod.get_params()
        excluded_sym_names = []

        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
            sym=sym,
            arg_params=arg_params,
            aux_params=aux_params,
            excluded_sym_names=excluded_sym_names,
            ctx=mx.current_context(),
            quantized_dtype=qdtype,
            calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_forward(qsym, qarg_params, qaux_params, dshape)

        calib_data = mx.nd.random.uniform(shape=dshape)
        calib_data = NDArrayIter(data=calib_data, batch_size=batch_size)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
            sym=sym,
            arg_params=arg_params,
            aux_params=aux_params,
            excluded_sym_names=excluded_sym_names,
            ctx=mx.current_context(),
            quantized_dtype=qdtype,
            calib_mode='naive',
            calib_data=calib_data,
            num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
        check_qsym_forward(qsym, qarg_params, qaux_params, dshape)
Beispiel #11
0
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print(
                'skipped testing quantize_model for native cpu since it is not supported yet'
            )
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print(
                'skipped testing quantize_model for mkldnn cpu int8 since it is not supported yet'
            )
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print(
                'skipped testing quantize_model for gpu uint8 since it is not supported yet'
            )
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(
                    qsym, params, th_dict={})
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        sym = get_fp32_sym()
        batch_size = 4
        label_shape = (batch_size, 10)
        data_shape = (batch_size, 4, 10, 10)

        length = batch_size  # specify num of outputs from split op
        msym = get_fp32_sym_with_multiple_outputs(length)
        msym_label_shape = (length, 10)
        msym_data_shape = (length, 4, 4, 10, 10)

        for s, dshape, lshape in zip((sym, msym),
                                     (data_shape, msym_data_shape),
                                     (label_shape, msym_label_shape)):
            mod = Module(symbol=s)
            mod.bind(data_shapes=[('data', dshape)],
                     label_shapes=[('softmax_label', lshape)])
            mod.init_params()
            arg_params, aux_params = mod.get_params()
            qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                sym=s,
                arg_params=arg_params,
                aux_params=aux_params,
                ctx=mx.current_context(),
                quantized_dtype=qdtype,
                calib_mode='none')
            check_params(arg_params, qarg_params, qsym)
            check_params(aux_params, qaux_params)

            calib_data = mx.nd.random.uniform(shape=dshape)
            calib_data = NDArrayIter(data=calib_data, batch_size=batch_size)
            calib_data = DummyIter(calib_data)
            qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(
                sym=s,
                arg_params=arg_params,
                aux_params=aux_params,
                ctx=mx.current_context(),
                quantized_dtype=qdtype,
                calib_mode='naive',
                calib_data=calib_data,
                num_calib_examples=20)
            check_params(arg_params, qarg_params, qsym)
            check_params(aux_params, qaux_params)
            check_qsym_calibrated(qsym)
            check_qsym_qdtype(qsym, qdtype)
    def check_quantize_model(qdtype):
        if is_test_for_native_cpu():
            print('skipped testing quantized_residual_unit for native cpu since it is not supported yet')
            return
        elif qdtype == 'int8' and is_test_for_mkldnn():
            print('skipped testing quantized_residual_unit for mkldnn cpu int8 since it is not supported yet')
            return
        elif qdtype == 'uint8' and is_test_for_gpu():
            print('skipped testing quantized_residual_unit for gpu uint8 since it is not supported yet')
            return

        def check_params(params, qparams, qsym=None):
            if qsym is None:
                assert len(params) == len(qparams)
                for k, v in params.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())
            else:
                qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params)
                assert len(qparams) == len(qparams_ground_truth)
                for k, v in qparams_ground_truth.items():
                    assert k in qparams
                    assert same(v.asnumpy(), qparams[k].asnumpy())

        def check_qsym_calibrated(qsym):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('requantize_') != -1:
                    assert 'min_calib_range' in v
                    assert 'max_calib_range' in v

        def check_qsym_qdtype(qsym, qdtype):
            attrs = qsym.attr_dict()
            for k, v in attrs.items():
                if k.find('_quantize') != -1:
                    assert 'out_type' in v
                    assert v['out_type'] == qdtype

        def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape):
            mod = mx.mod.Module(symbol=qsym, context=mx.current_context())
            mod.bind(for_training=False,
                     data_shapes=[('data', data_shape)],
                     label_shapes=[('softmax_label', label_shape)])
            mod.set_params(qarg_params, qaux_params)
            data = [mx.random.uniform(-1.0, 1.0, shape=shape) for _, shape in mod.data_shapes]
            batch = mx.io.DataBatch(data, [])
            mod.forward(batch, is_train=False)
            for output in mod.get_outputs():
                output.wait_to_read()
             

        sym = get_fp32_residual()
        mod = Module(symbol=sym)
        batch_size = 4
        data_shape = (batch_size, 4, 10, 10)
        label_shape = (batch_size, 10)
        mod.bind(data_shapes=[('data', data_shape)], label_shapes=[('softmax_label', label_shape)])
        mod.init_params()
        arg_params, aux_params = mod.get_params()
        excluded_sym_names = []
        if mx.current_context() == mx.cpu():
           excluded_sym_names += ['fc']
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         excluded_sym_names=excluded_sym_names,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='none')
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape)

        calib_data = mx.nd.random.uniform(shape=data_shape)
        calib_data = NDArrayIter(data=calib_data)
        calib_data = DummyIter(calib_data)
        qsym, qarg_params, qaux_params = mx.contrib.quant.quantize_model(sym=sym,
                                                                         arg_params=arg_params,
                                                                         aux_params=aux_params,
                                                                         excluded_sym_names=excluded_sym_names,
                                                                         ctx=mx.current_context(),
                                                                         quantized_dtype=qdtype,
                                                                         calib_mode='naive',
                                                                         calib_data=calib_data,
                                                                         num_calib_examples=20)
        check_params(arg_params, qarg_params, qsym)
        check_params(aux_params, qaux_params)
        check_qsym_calibrated(qsym)
        check_qsym_qdtype(qsym, qdtype)
        check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape)
Beispiel #13
0
class Solver(object):
    def __init__(
            self,
            symbol,
            data_names,
            label_names,
            data_shapes,
            label_shapes,
            logger=logging,
            context=mx.cpu(),
            work_load_list=None,
            fixed_param_names=None,
            allow_missing=False,
            # for evaluate fold bn to create eval symbol
            config=None):
        self.symbol = symbol
        self.data_names = data_names
        self.label_names = label_names
        self.data_shapes = data_shapes
        self.label_shapes = label_shapes
        self.context = context
        self.work_load_list = work_load_list
        self.fixed_param_names = fixed_param_names

        if logger is None:
            logger = logging.getLogger()
            logger.setLevel(logging.INFO)
        self.logger = logger
        self.module = Module(symbol=self.symbol,
                             data_names=self.data_names,
                             label_names=self.label_names,
                             logger=self.logger,
                             context=self.context,
                             work_load_list=self.work_load_list,
                             fixed_param_names=self.fixed_param_names)
        # for fold bn
        self.config = config

    def fit(self,
            train_data,
            eval_data=None,
            eval_metric='acc',
            validate_metric=None,
            work_load_list=None,
            epoch_end_callback=None,
            batch_end_callback=None,
            fixed_param_prefix=None,
            initializer=None,
            arg_params=None,
            aux_params=None,
            allow_missing=False,
            optimizer=None,
            optimizer_params=None,
            begin_epoch=0,
            num_epoch=None,
            kvstore='device'):

        self.module.bind(data_shapes=self.data_shapes,
                         label_shapes=self.label_shapes,
                         for_training=True)
        self.module.init_params(initializer=initializer,
                                arg_params=arg_params,
                                aux_params=aux_params,
                                allow_missing=allow_missing)
        self.module.init_optimizer(kvstore=kvstore,
                                   optimizer=optimizer,
                                   optimizer_params=optimizer_params)

        if validate_metric is None:
            validate_metric = eval_metric
        if not isinstance(eval_metric, metric.EvalMetric):
            eval_metric = metric.create(eval_metric)

        temp_count = 0

        # # test model size by saving params of model
        # arg_params, aux_params = self.module.get_params()
        # for callback in _as_list(epoch_end_callback):
        #     callback(0, self.symbol, arg_params, aux_params)
        # raise NotImplementedError

        # training loop
        for epoch in range(begin_epoch, num_epoch):

            train_time = AverageMeter()
            kvstore_sync_time = AverageMeter()
            get_data_time = AverageMeter()
            iter_total_time = AverageMeter()

            tic = time.time()
            eval_metric.reset()
            nbatch = 0
            data_iter = iter(train_data)
            end_of_batch = False
            next_data_batch = next(data_iter)
            while not end_of_batch:
                start_time = time.time()
                data_batch = next_data_batch

                self.module.forward(data_batch, is_train=True)
                self.module.backward()

                # ndarray.waitall()
                train_time.update(time.time() - start_time)

                self.module.update()

                # ndarray.waitall()
                kvstore_sync_time.update(time.time() - start_time)

                try:
                    next_data_batch = next(data_iter)
                except StopIteration:
                    end_of_batch = True

                # ndarray.waitall()
                get_data_time.update(time.time() - start_time)

                if isinstance(data_batch, list):
                    self.module.update_metric(eval_metric,
                                              [db.label for db in data_batch],
                                              pre_sliced=True)
                else:
                    self.module.update_metric(eval_metric, data_batch.label)

                # ndarray.waitall()
                iter_total_time.update(time.time() - start_time)

                if batch_end_callback is not None:
                    # batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
                    #                                  eval_metric=eval_metric,
                    #                                  locals=locals())

                    batch_end_params = BatchEndParam(
                        epoch=epoch,
                        nbatch=nbatch,
                        eval_metric=eval_metric,
                        locals=locals(),
                        rank=kvstore.rank,
                        total_iter=temp_count,
                        cur_data_time=get_data_time.val,
                        avg_data_time=get_data_time.avg,
                        cur_batch_time=train_time.val,
                        avg_batch_time=train_time.avg,
                        cur_kvstore_sync_time=kvstore_sync_time.val,
                        avg_kvstore_sync_time=kvstore_sync_time.avg,
                        cur_iter_total_time=iter_total_time.val,
                        avg_iter_total_time=iter_total_time.avg)
                    for callback in _as_list(batch_end_callback):
                        callback(batch_end_params)
                nbatch += 1
                temp_count += 1

            for name, val in eval_metric.get_name_value():
                self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
            toc = time.time()
            self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))

            arg_params, aux_params = self.module.get_params()
            self.module.set_params(arg_params, aux_params)

            if epoch_end_callback is not None and kvstore.rank == 0:
                for callback in _as_list(epoch_end_callback):
                    callback(epoch, self.symbol, arg_params, aux_params)
            if eval_data:
                if self.config.network == 'mobilenet_int8_foldbn':
                    # for fold bn to create inference symbol
                    total_params_path = "./model/%s-%04d.params" % (
                        self.config.model_prefix, epoch + 1)
                    # total_params_path = "./model/mobilenet_flodbn_0904/mobilenet_int8_flodbn_imagenet_retrain_80_pertensor-fold-0100.params"
                    # _, arg_params, aux_params = mx.model.load_checkpoint('./model/mobilenet_flodbn_0904/mobilenet_int8_flodbn_imagenet_retrain_80_pertensor-fold', 100)
                    import os
                    assert os.path.exists(
                        total_params_path
                    ), "please provide the correct total_params_path for foldbn eval"
                    eval_sym = eval(self.config.network)(
                        num_classes=self.config.num_classes,
                        quant_mod=self.config.quant_mod,
                        delay_quant=self.config.delay_quant,
                        is_weight_perchannel=self.config.is_weight_perchannel,
                        total_params_path=total_params_path,
                        quantize_flag=self.config.quantize_flag)
                    eval_module = Module(
                        symbol=eval_sym,
                        data_names=self.data_names,
                        label_names=self.label_names,
                        logger=self.logger,
                        context=self.context,
                        work_load_list=self.work_load_list,
                        fixed_param_names=self.fixed_param_names)
                    eval_module.bind(data_shapes=self.data_shapes,
                                     label_shapes=self.label_shapes,
                                     for_training=False)
                    eval_module.init_params(initializer=initializer,
                                            arg_params=arg_params,
                                            aux_params=aux_params)
                    res = eval_module.score(eval_data,
                                            validate_metric,
                                            score_end_callback=None,
                                            batch_end_callback=None,
                                            reset=True,
                                            epoch=epoch)
                else:
                    res = self.module.score(eval_data,
                                            validate_metric,
                                            score_end_callback=None,
                                            batch_end_callback=None,
                                            reset=True,
                                            epoch=epoch)
                for name, val in res:
                    self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name,
                                     val)

            train_data.reset()