コード例 #1
0
def test_sym_nnvm(batch_size, iter_num):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    sym_file, param_file, ext_file = load_fname("mrt.all.quantize", True)
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    inputs_ext, _ = sim.load_ext(ext_file)
    val_data = dataset.load_voc(1, 512)
    val_data_iter = iter(val_data)
    data, _ = next(val_data_iter)

    if False:
        data = sim.load_real_data(data, 'data', inputs_ext)
        inputs_ext['data']['data'] = data
        spass.sym_dump_ops(sym,
                           params,
                           inputs_ext,
                           datadir="/data/wlt",
                           ctx=mx.gpu(1),
                           cleanDir=True,
                           ops=[
                               "broadcast_div0",
                           ])
    else:
        _mrt.std_dump(sym, params, inputs_ext, data, "ssd_ryt", max_num=100)
コード例 #2
0
def test_sym_nnvm(batch_size=10):
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    data_iter = utils.load_dataset(batch_size)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "resnet" + version)
コード例 #3
0
 def load(model_name, datadir="./data"):
     # pylint: disable=unbalanced-tuple-unpacking
     sym_file, params_file, ext_file = \
         utils.extend_fname(path.join(datadir, model_name), True)
     mrt = MRT(Model.load(sym_file, params_file))
     mrt.old_names, mrt.th_dict, mrt.precs, mrt.scales = \
         sim.load_ext(ext_file)
     return mrt
コード例 #4
0
def load_fname(prefix, suffix=None):
    suffix = "." + suffix if suffix is not None else ""
    load_prefix = prefix + suffix
    names = list(utils.extend_fname(load_prefix, True))
    names, ext_file = names[:-1], names[-1]
    (inputs_ext, ) = sim.load_ext(ext_file)
    dump_prefix = prefix + ".nnvm.compile"
    names.extend(utils.extend_fname(dump_prefix, False))
    return names, inputs_ext
コード例 #5
0
def test_sym_nnvm(batch_size, iter_num):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    sym_file, param_file, ext_file = load_fname("_darknet53_voc", "mrt.all.quantize", True)
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    inputs_ext, _ = sim.load_ext(ext_file)
    nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(sym, params, inputs_ext)
    spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext, *load_fname("_darknet53_voc", "nnvm"))
コード例 #6
0
def test_sym_nnvm(batch_size, iter_num):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                "all.quantize", True)
    dump_sym, dump_params = load_fname("_darknet53_voc", "all.nnvm.compile")
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    inputs_ext, _ = sim.load_ext(ext_file)
    spass.mxnet_to_nnvm(sym, params, inputs_ext, dump_sym, dump_params)
コード例 #7
0
def test_nnvm_pass(iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext,) = sim.load_ext(dump_ext)
    data_iter = iter(val_loader)
    data, _ = next(data_iter)
    _mrt.std_dump(sym, params, inputs_ext, data, "cvm_mnist")
コード例 #8
0
def test_sym_nnvm():
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext,) = sim.load_ext(dump_ext)
    data_iter = ds.load_imagenet_rec(1)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "mobilenet"+version)
コード例 #9
0
def test_sym_nnvm(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    data_iter = ds.load_imagenet_rec(batch_size, 224)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "shufflenet", max_num=100)
コード例 #10
0
def test_sym_pass(iter_num=10):
    inputs_ext = { 'data': {
            'shape': (batch_size, 1, 28, 28),
    } }
    inputs = [mx.sym.var(n) for n in inputs_ext]

    data_iter = iter(val_loader)
    def data_iter_func():
        return next(data_iter)
    data, _ = data_iter_func()

    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    def graph_func(data):
        return net1.forward(data.as_in_context(ctx))

    sym_file, param_file = load_fname(version)
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    if True:
        mrt = _mrt.MRT(sym, params, inputs_ext)
        mrt.set_data('data', data)
        mrt.calibrate(ctx=ctx)
        mrt.set_output_prec(8)
        qsym, qparams, inputs_ext = mrt.quantize()
    else:
        inputs_ext['data']['data'] = data
        th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx)
        qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, th_dict)
        qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, "cvm")
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sim.save_ext(dump_ext, inputs_ext)
    nd.save(dump_params, qparams)
    open(dump_sym, "w").write(qsym.tojson())

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext,) = sim.load_ext(dump_ext)
    inputs = [mx.sym.var(n) for n in inputs_ext]
    net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    def cvm_quantize(data):
        data = sim.load_real_data(data, 'data', inputs_ext)
        return net2.forward(data.as_in_context(ctx))

    utils.multi_eval_accuracy(graph_func, data_iter_func,
            cvm_quantize,
            iter_num=iter_num)
コード例 #11
0
def test_sym_nnvm(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    version = "v3"
    dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
    '''
    byr---------
    ./data/tf_inceptionv3.mrt.json
    ./data/tf_inceptionv3.mrt.params
    ./data/tf_inceptionv3.mrt.ext
    byr--------
    '''
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    data_iter = ds.load_imagenet_rec(batch_size, 299)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "inception_v3")
コード例 #12
0
def test_sym_nnvm(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    target = "llvm"
    tvm_ctx = tvm.context(target, 1)
    mx_ctx = mx.gpu(2)
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, 224, 224),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]
    inputs_shape = {k: v['shape'] for k, v in inputs_ext.items()}

    data_iter = load_dataset(batch_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    data_iter_func()

    version = ""
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    # sim.load_ins_ext(params, inputs_ext)

    # nnvm_sym, _ = nnvm.frontend.from_mxnet(sym)
    # with open('debug_nnvm_sym_after_load_from_mxnet.json', 'w') as fout:
    #    fout.write(nnvm_sym.debug_str())
    dump_sym, dump_params = load_fname(version, "nnvm.compile", False)
    spass.mxnet_to_nnvm(sym,
                        params,
                        inputs_ext,
                        dump_sym,
                        dump_params,
                        target='llvm')
コード例 #13
0
def run_mx(modelname,
           batch_size=160,
           quantized=False,
           mx_check_point=None,
           float_dtype="float32",
           evaluate=False):
    input_size = model_input_size[modelname]
    suffix = ".mrt.quantize" if quantized else ""
    symbol_file = path.join(mx_dir, modelname + suffix + ".json")
    params_file = path.join(mx_dir, modelname + suffix + ".params")
    symbol = mx.sym.load(symbol_file)
    params = mx.nd.load(params_file)
    params = tpass.convert_params_dtype(params, dest_dtype="float32")
    if mx_check_point is None:
        mx_check_point = sutils.topo_sort(symbol)[-1].attr('name')
    mx_data, mx_label = load_data_3(modelname,
                                    batch_size=batch_size,
                                    input_size=input_size,
                                    layout='NCHW',
                                    quantized=quantized)
    if quantized:
        ext_file = path.join(mx_dir, modelname + suffix + ".ext")
        _, _, _, scales = sim.load_ext(ext_file)
    else:
        op_names = tpass.collect_op_names(symbol, params)
        print(op_names)
        scales = None
    mx_outs = get_mxnet_outs(symbol, params, mx_data, mx_check_point)
    if scales and mx_check_point in scales:
        scale_factor = scales[mx_check_point]
        print('multiply scale factor: ', scale_factor)
        mx_outs /= scale_factor
    print('mxnet check with shape: ', mx_outs.shape)
    if evaluate:
        get_metric(mx_outs, mx_label)
    return mx_outs
コード例 #14
0
def test_sym_pass(batch_size=10, iter_num=10, quantize=True):
    logger = logging.getLogger("log.test.sym.pass")
    calib_ctx = mx.gpu(1)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, 224, 224),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    # load dataset and iter function
    data_iter = ds.load_imagenet_rec(batch_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    data, _ = data_iter_func()

    # load original model for accuracy
    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def shufflenet(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    if quantize:
        # load original model
        sym_fname, param_fname = load_fname(version)
        sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)

        # quantize process
        mrt = _mrt.MRT(sym, params, inputs_ext)  # initialize
        mrt.set_data('data', data)  # set input data
        mrt.calibrate(ctx=calib_ctx)  # calibration
        mrt.set_output_prec(8)  # set output prec, do nothing by default
        qsym, qparams, inputs_ext = mrt.quantize()  # quantization

        # dump quantized model
        dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize",
                                                     True)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    if False:
        # convert to cvm executor model
        inputs_ext['data']['shape'] = (1, 3, 224, 224)
        nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(qsym, qparams, inputs_ext)
        spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext,
                        *load_fname(version, "nnvm"))

    # load quantized model for accuracy
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    inputs = [mx.sym.var(n) for n in inputs_ext]
    net3 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)

    # net3 = mx.gluon.nn.SymbolBlock(qsym, inputs)
    # utils.load_parameters(net3, qparams, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net3.forward(d) for d in data]
        res = nd.concatenate(res)

        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    # compare accuracy between models
    utils.multi_validate(shufflenet,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)
コード例 #15
0
    # zoo.save_model('yolo3_darknet53_voc')
    # name = "yolo3_resnet18_v1_voc"
    # net = zoo.load_resnet18_v1_yolo()
    # sym = net(mx.sym.var('data'))
    # if isinstance(sym, tuple):
    #     sym = mx.sym.Group([*sym])
    # open("./data/%s.json"%name, "w").write(sym.tojson())
    # exit()

    if False:
        val_data = dataset.load_voc(1, 416)
        sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                    "all.quantize", True)
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        inputs_ext, _ = sim.load_ext(ext_file)
        if False:
            for data, _ in val_data:
                data = sim.load_real_data(data, 'data', inputs_ext)
                inputs_ext['data']['data'] = data
                spass.sym_dump_ops(sym,
                                   params,
                                   inputs_ext,
                                   datadir="/data/wlt",
                                   ctx=mx.gpu(2))
        else:
            val_data_iter = iter(val_data)
            data, _ = next(val_data_iter)
            data = sim.load_real_data(data, 'data', inputs_ext)
            inputs_ext['data']['data'] = data
            spass.sym_dump_layer_outputs(sym,
コード例 #16
0
def test_mrt_quant(batch_size=1, iter_num=10, from_scratch=0):
    logger = logging.getLogger("log.test.mrt.quantize")
    flag = [False]*from_scratch + [True]*(4-from_scratch)

    ctx = mx.gpu(4)
    qctx = mx.gpu(3)
    input_size = 416
    input_shape = (batch_size, 3, input_size, input_size)

    # define data iter function, get:
    # get_iter_func
    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)
    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    # split model, get:
    # base, base_params, top, top_params, top_inputs_ext 
    base, base_params, top, top_params, top_inputs_ext = \
            None, None, None, None, None
    if flag[0]:
        sym_file, param_file = load_fname("_darknet53_voc")
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        mrt = MRT(sym, params, input_shape)
        keys = [
          'yolov30_yolooutputv30_expand_dims0',
          'yolov30_yolooutputv31_expand_dims0',
          'yolov30_yolooutputv32_expand_dims0',
          'yolov30_yolooutputv30_tile0',
          'yolov30_yolooutputv31_tile0',
          'yolov30_yolooutputv32_tile0',
          'yolov30_yolooutputv30_broadcast_add1',
          'yolov30_yolooutputv31_broadcast_add1',
          'yolov30_yolooutputv32_broadcast_add1',
        ]
        base, base_params, top, top_params, top_inputs_ext \
                = split_model(mrt.csym, mrt.cprm, {'data': input_shape}, keys)
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        open(dump_sym, "w").write(base.tojson())
        nd.save(dump_params, base_params)
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.top", True)
        open(dump_sym, "w").write(top.tojson())
        nd.save(dump_params, top_params)
        sim.save_ext(dump_ext, top_inputs_ext)
    else:
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.top", True)
        top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
        (top_inputs_ext,) = sim.load_ext(dump_ext)

    base_graph = mx.gluon.nn.SymbolBlock(base, [mx.sym.var('data')])
    nbase_params = convert_params_dtype(base_params, src_dtypes="float64",
            dest_dtype="float32")
    utils.load_parameters(base_graph, nbase_params, ctx=ctx)

    top_graph = mx.gluon.nn.SymbolBlock(top,
            [mx.sym.var(n) for n in top_inputs_ext])
    ntop_params = convert_params_dtype(top_params, src_dtypes="float64",
            dest_dtype="float32")
    utils.load_parameters(top_graph, ntop_params, ctx=ctx)

    # calibrate split model, get:
    # th_dict
    th_dict = None
    if flag[1]:
        mrt = MRT(base, base_params, input_shape)
        for i in range(1):
            data, _ = data_iter_func()
            mrt.set_data(data)
            mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        th_dict = mrt.th_dict
        sim.save_ext(dump_ext, th_dict)
    else:
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        (th_dict,) = sim.load_ext(dump_ext)

    # quantize split model, get:
    # qbase, qbase_params, qbase_inputs_ext, oscales, maps
    qbase, qbase_params, qbase_inputs_ext, oscales, maps = \
            None, None, None, None, None
    if flag[2]:
        mrt = MRT(base, base_params, input_shape)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_threshold('yolov30_yolooutputv30_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv31_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv32_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv30_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv31_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv32_tile0', 416)
        mrt.set_output_prec(30)

        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()

        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc", "mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)
    else:
        qb_sym, qb_params, qb_ext = load_fname("_darknet53_voc", "mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

    # merge quantized split model, get:
    # qsym, qparams, oscales2
    qsym, qparams = None, None
    if flag[3]:
        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(node.get_children()), node.list_attr()
            if op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[3])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node
        qsym, qparams = merge_model(qbase, qbase_params,
                top, top_params, maps, box_nms)
        oscales2 = [oscales[1], oscales[0], oscales[2]]
        sym_file, param_file, ext_file = \
                load_fname("_darknet53_voc", "mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales2)
    else:
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.all.quantize", True)
        qsym, qparams = mx.sym.load(dump_sym), nd.load(dump_params)
        _, oscales2 = sim.load_ext(dump_ext)

    if False:
        compile_to_cvm(qsym, qparams, "yolo_tfm",
                datadir="/data/ryt", input_shape=(1, 3, 416, 416))
        exit()

    metric = dataset.load_voc_metric()
    metric.reset()
    def yolov3(data, label):
       def net(data):
           tmp = base_graph(data.as_in_context(ctx))
           outs = top_graph(*tmp)
           return outs
       acc = validate_data(net, data, label, metric)
       return "{:6.2%}".format(acc)

    net2 = mx.gluon.nn.SymbolBlock(qsym,
            [mx.sym.var(n) for n in qbase_inputs_ext])
    utils.load_parameters(net2, qparams, ctx=qctx)
    net2_metric = dataset.load_voc_metric()
    net2_metric.reset()
    def mrt_quantize(data, label):
        def net(data):
            data = sim.load_real_data(data, 'data', qbase_inputs_ext)
            outs = net2(data.astype("float64").as_in_context(qctx))
            outs = [o.as_in_context(ctx) / oscales2[i] \
                    for i, o in enumerate(outs)]
            return outs
        acc = validate_data(net, data, label, net2_metric)
        return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3, data_iter_func,
            mrt_quantize,
            iter_num=iter_num, logger=logger)
コード例 #17
0
def test_mrt_quant(batch_size=1, iter_num=10):
    logger = logging.getLogger("log.test.mrt.quantize")

    base_ctx = mx.gpu(1)
    ctx = mx.gpu(2)
    qctx = mx.gpu(3)
    input_size = 416
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }

    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)

    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    if False:
        sym_file, param_file = load_fname("_darknet53_voc")
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
        keys = [
            'yolov30_yolooutputv30_expand_dims0',
            'yolov30_yolooutputv31_expand_dims0',
            'yolov30_yolooutputv32_expand_dims0',
            'yolov30_yolooutputv30_tile0',
            'yolov30_yolooutputv31_tile0',
            'yolov30_yolooutputv32_tile0',
            'yolov30_yolooutputv30_broadcast_add1',
            'yolov30_yolooutputv31_broadcast_add1',
            'yolov30_yolooutputv32_broadcast_add1',
        ]
        base, base_params, base_inputs_ext, top, top_params, top_inputs_ext \
                = split_model(sym, params, inputs_ext, keys, logger)
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        open(dump_sym, "w").write(base.tojson())
        nd.save(dump_params, base_params)
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc",
                                                     "mrt.top", True)
        open(dump_sym, "w").write(top.tojson())
        nd.save(dump_params, top_params)
        sim.save_ext(dump_ext, top_inputs_ext)

    dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
    base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
    dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc", "mrt.top",
                                                 True)
    top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
    (top_inputs_ext, ) = sim.load_ext(dump_ext)

    base_inputs = [mx.sym.var(n) for n in inputs_ext]
    base_graph = mx.gluon.nn.SymbolBlock(base, base_inputs)
    utils.load_parameters(base_graph, base_params, ctx=ctx)

    top_inputs = [mx.sym.var(n) for n in top_inputs_ext]
    top_graph = mx.gluon.nn.SymbolBlock(top, top_inputs)
    utils.load_parameters(top_graph, top_params, ctx=ctx)

    metric = dataset.load_voc_metric()
    metric.reset()

    def yolov3(data, label):
        def net(data):
            tmp = base_graph(data.as_in_context(ctx))
            outs = top_graph(*tmp)
            # print ([o[0][0][:] for o in outs])
            return outs

        acc = validate_data(net, data, label, metric)
        return "{:6.2%}".format(acc)

    if False:
        mrt = _mrt.MRT(base, base_params, inputs_ext)
        for i in range(16):
            data, _ = data_iter_func()
            mrt.set_data('data', data)
            th_dict = mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        sim.save_ext(dump_ext, th_dict)

    _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
    (th_dict, ) = sim.load_ext(dump_ext)
    if True:
        mrt = _mrt.MRT(base, base_params, base_inputs_ext)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_threshold('yolov30_yolooutputv30_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv31_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv32_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv30_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv31_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv32_tile0', 416)
        # mrt.set_fixed('yolov30_yolooutputv30_broadcast_add1')
        # mrt.set_fixed('yolov30_yolooutputv31_broadcast_add1')
        # mrt.set_fixed('yolov30_yolooutputv32_broadcast_add1')
        mrt.set_output_prec(30)
        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()
        oscales = mrt.get_output_scales()
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc",
                                                     "mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales)

    if True:
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc",
                                                     "mrt.quantize", True)
        net2_inputs_ext, oscales = sim.load_ext(dump_ext)
        inputs = [mx.sym.var(n) for n in net2_inputs_ext]
        net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=qctx)
        net2_metric = dataset.load_voc_metric()
        net2_metric.reset()

        def mrt_quantize(data, label):
            def net(data):
                data = sim.load_real_data(data, 'data', net2_inputs_ext)
                outs = net2(data.as_in_context(qctx))

                outs = [
                    o.as_in_context(ctx) / oscales[i]
                    for i, o in enumerate(outs)
                ]
                # outs = b2_graph(*data)
                outs = top_graph(*outs)
                return outs

            acc = validate_data(net, data, label, net2_metric)
            return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3,
                         data_iter_func,
                         mrt_quantize,
                         iter_num=iter_num,
                         logger=logger)
コード例 #18
0
def test_sym_pass(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.sym.pass")

    base_ctx = mx.gpu(1)
    ctx = mx.gpu(2)
    input_size = 416
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }

    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)

    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    sym_file, param_file = load_fname("_darknet53_voc")
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    if False:
        th_dict = {}
        for i in range(16):
            data, _ = data_iter_func()
            for k, v in inputs_ext.items():
                v['data'] = data
            th_dict = calib.sym_calibrate(sym,
                                          params,
                                          inputs_ext,
                                          old_ths=th_dict,
                                          ctx=ctx)
        _, _, dump_ext = load_fname("_darknet53_voc", "dict", True)
        sim.save_ext(dump_ext, th_dict)

    _, _, dump_ext = load_fname("_darknet53_voc", "dict", True)
    (th_dict, ) = sim.load_ext(dump_ext)
    inputs = [mx.sym.var(name) for name in inputs_ext]
    net1 = mx.gluon.nn.SymbolBlock(sym, inputs)
    utils.load_parameters(net1, params, ctx=ctx)
    metric = dataset.load_voc_metric()
    metric.reset()

    def yolov3(data, label):
        def net(data):
            out = net1(data.as_in_context(ctx))
            print([o[0][0][:] for o in out])
            return out

        acc = validate_data(net, data, label, metric)
        return "{:6.2%}".format(acc)

    keys = [
        'yolov30_yolooutputv30_conv0_fwd',
        'yolov30_yolooutputv31_conv0_fwd',
        'yolov30_yolooutputv32_conv0_fwd',
    ]
    base, base_params, base_inputs_ext, top, top_params, top_inputs_ext \
            = split_model(sym, params, inputs_ext, keys, logger)
    dump_sym, dump_params = load_fname("_darknet53_voc", "base")
    open(dump_sym, "w").write(base.tojson())
    dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc", "top", True)
    open(dump_sym, "w").write(top.tojson())
    nd.save(dump_params, top_params)
    sim.save_ext(dump_ext, top_inputs_ext)

    base_inputs = [mx.sym.var(n) for n in base_inputs_ext]
    base_graph = mx.gluon.nn.SymbolBlock(base, base_inputs)
    utils.load_parameters(base_graph, base_params, ctx=base_ctx)

    top_inputs = [mx.sym.var(n) for n in top_inputs_ext]
    top_graph = mx.gluon.nn.SymbolBlock(top, top_inputs)
    utils.load_parameters(top_graph, top_params, ctx=ctx)

    # quantize base graph
    if False:
        qbase, qbase_params, qbase_prec, base_oscales = calib.sym_simulate(
            base, base_params, base_inputs_ext, th_dict)
        qbase, qbase_params = calib.sym_realize(qbase, qbase_params,
                                                base_inputs_ext, qbase_prec)
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc",
                                                     "base.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        sim.save_ext(dump_ext, base_inputs_ext, base_oscales)
        nd.save(dump_params, qbase_params)

    if False:
        qb_sym, qb_params, qb_ext = load_fname("_darknet53_voc",
                                               "base.quantize", True)
        net2_inputs_ext, base_oscales = sim.load_ext(qb_ext)
        net2_inputs = [mx.sym.var(n) for n in net2_inputs_ext]
        net2 = utils.load_model(qb_sym, qb_params, net2_inputs, ctx=ctx)
        base_metric = dataset.load_voc_metric()
        base_metric.reset()

        def base_quantize(data, label):
            def net(data):
                data = sim.load_real_data(data, 'data', net2_inputs_ext)
                tmp = list(net2(data.as_in_context(ctx)))
                tmp = [t / base_oscales[i] for i, t in enumerate(tmp)]
                return top_graph(*tmp)

            acc = validate_data(net, data, label, base_metric)
            return "{:6.2%}".format(acc)

    # quantize top graph
    if False:
        in_bit, out_bit = 8, 30
        outputs_ext = {
            'yolov30_yolooutputv30_expand_dims0': {
                'threshold': 1,
                'type': 'score'
            },
            'yolov30_yolooutputv31_expand_dims0': {
                'threshold': 1,
                'type': 'score'
            },
            'yolov30_yolooutputv32_expand_dims0': {
                'threshold': 1,
                'type': 'score'
            },
            'yolov30_yolooutputv30_tile0': {
                'threshold': 416,
                'type': 'bbox'
            },
            'yolov30_yolooutputv31_tile0': {
                'threshold': 416,
                'type': 'bbox'
            },
            'yolov30_yolooutputv32_tile0': {
                'threshold': 416,
                'type': 'bbox'
            },
            'yolov30_yolooutputv30_broadcast_add1': {
                'fixed': True,
                'type': 'ids'
            },
            'yolov30_yolooutputv31_broadcast_add1': {
                'fixed': True,
                'type': 'ids'
            },
            'yolov30_yolooutputv32_broadcast_add1': {
                'fixed': True,
                'type': 'ids'
            },
        }
        qsym, qparams, type_ext = anno.mixed_precision(top,
                                                       top_params,
                                                       top_inputs_ext,
                                                       th_dict,
                                                       in_bit=in_bit,
                                                       out_bit=out_bit,
                                                       out_ext=outputs_ext,
                                                       runtime="cvm")
        out_scales = [type_ext['ids'], type_ext['score'], type_ext['bbox']]

        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc",
                                                     "top.quantize", True)
        open(dump_sym, "w").write(qsym.tojson())
        sim.save_ext(dump_ext, top_inputs_ext, out_scales)
        nd.save(dump_params, qparams)

    if True:
        sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                    "top.quantize", True)
        net3_inputs_ext, net3_scales = sim.load_ext(ext_file)
        top_sym = base_graph(mx.sym.Group(base_inputs))
        top_names = [c.attr('name') for c in top_sym]
        net3_inputs = [mx.sym.var(n) for n in net3_inputs_ext]
        net3 = utils.load_model(sym_file, param_file, net3_inputs, ctx=ctx)
        top_qmetric = dataset.load_voc_metric()
        top_qmetric.reset()

        def top_quantize(data, label):
            def net(data):
                tmp = base_graph(data.as_in_context(base_ctx))
                tmp = [t.as_in_context(ctx) for t in tmp]
                tmp = [
                    sim.load_real_data(tmp[i], n, net3_inputs_ext)
                    for i, n in enumerate(top_names)
                ]
                out = net3(*tmp)
                out = [(t / net3_scales[i]) for i, t in enumerate(out)]
                print([o[0][0][:] for o in out])
                return out

            acc = validate_data(net, data, label, top_qmetric)
            return "{:6.2%}".format(acc)

    # merge quantize model
    if False:
        qb_sym, qb_params, qb_ext = load_fname("_darknet53_voc",
                                               "base.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, _ = sim.load_ext(qb_ext)
        qt_sym, qt_params, qt_ext = load_fname("_darknet53_voc",
                                               "top.quantize", True)
        qtop, qtop_params = mx.sym.load(qt_sym), nd.load(qt_params)
        _, out_scales = sim.load_ext(qt_ext)
        maps = dict(
            zip([c.attr('name') for c in qbase],
                [c.attr('name') for c in base]))
        qsym, qparams = merge_model(qbase, qbase_params, qbase_inputs_ext,
                                    qtop, qtop_params, maps)
        sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                    "all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, out_scales)

    if False:
        sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                    "all.quantize", True)
        net4_inputs_ext, net4_scales = sim.load_ext(ext_file)
        net4_inputs = [mx.sym.var(n) for n in net4_inputs_ext]
        net4 = utils.load_model(sym_file, param_file, net4_inputs, ctx=ctx)
        all_qmetric = dataset.load_voc_metric()
        all_qmetric.reset()

        def all_quantize(data, label):
            def net(data):
                data = sim.load_real_data(data, 'data', net4_inputs_ext)
                out = net4(data.as_in_context(ctx))
                out = [(t / net4_scales[i]) for i, t in enumerate(out)]
                return out

            acc = validate_data(net, data, label, all_qmetric)
            return "{:6.2%}".format(acc)

    if False:
        sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                    "all.quantize", True)
        net4_inputs_ext, net4_scales = sim.load_ext(ext_file)
        datadir = "/data/voc/data/"
        for i in range(50):
            countdir = datadir + "/" + str(i)
            os.makedirs(countdir, exist_ok=True)
            data, label = data_iter_func()
            data = sim.load_real_data(data, 'data', net4_inputs_ext)
            np.save(countdir + "/data.npy", data.asnumpy().astype('int8'))
            np.save(countdir + "/label.npy", label.asnumpy())

        # data = sim.load_real_data(data, 'data', net4_inputs_ext)
        # np.save("/tmp/yolo/data", data.asnumpy().astype('int8'))
        # out = net4(data.as_in_context(ctx))
        # for i, o in enumerate(out):
        #    np.save("/tmp/yolo/result"+str(i), o.asnumpy().astype('int32'))
        exit()

    utils.multi_validate(
        yolov3,
        data_iter_func,
        top_quantize,
        # base_quantize, # top_quantize, all_quantize,
        iter_num=iter_num,
        logger=logger)
コード例 #19
0
def test_sym_pass(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.sym.pass")

    calib_ctx = mx.gpu(0)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    inputs_ext = { 'data': {
            'shape': (batch_size, 3, 224, 224),
    } }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    data_iter = ds.load_imagenet_rec(batch_size)
    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]
    for i in range(10):
        if i == 3:
            break
        data, _ = data_iter_func()
    data_iter.reset()

    version = "19"
    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()
    def vgg(data, label):
        data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    sym_fname, param_fname = load_fname(version)
    print(sym_fname, param_fname)
    exit()
    sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    if True:
        mrt = _mrt.MRT(sym, params, inputs_ext)
        mrt.set_data('data', data)
        mrt.calibrate(ctx=calib_ctx)
        mrt.set_output_prec(8)
        qsym, qparams, inputs_ext = mrt.quantize()

        dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext,) = sim.load_ext(dump_ext)
    net3 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()
    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
        res = [net3.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(vgg, data_iter_func,
            cvm_quantize,
            iter_num=iter_num, logger=logger)
コード例 #20
0
def test_mx_quantize(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.mx.quantize")

    ctx = [mx.gpu(int(i)) for i in "1,3".split(',') if i.strip()]
    inputs_ext = { 'data': {
        'shape': (batch_size, 3, 224, 224),
    }}
    inputs = [mx.sym.var(n) for n in inputs_ext]

    data_iter = ds.load_imagenet_rec(batch_size)
    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]
    data, _ = data_iter_func()

    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()
    def mobilenet(data, label):
        data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    calib_ctx = mx.gpu(1)
    sym_fname, param_fname = load_fname(version)
    sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    if True:
        if True:
            mrt = _mrt.MRT(sym, params, inputs_ext)
            mrt.set_data('data', data)
            mrt.calibrate()
            # [ 0.0008745864 0.03330660510427334 ] 0.6670066884888368 0.7753906
            # mrt.set_threshold("mobilenet0_dense0_weight", 0.67)
            # # [ -0.0036011334 0.054821780899052534 ] 1.100036751338784 1.4626989
            # mrt.set_threshold("mobilenet0_conv24_batchnorm24_fwd_weight", 1.1)
            # # [ 0.013243316 1.7543557133786065 ] 70.18747185088569 94.66275
            # mrt.set_threshold("mobilenet0_conv23_batchnorm23_fwd_weight", 35.10)
            # # [ -0.0016149869 0.05713169649243355 ] 1.1442489167675376 1.7122083
            # mrt.set_threshold("mobilenet0_conv20_batchnorm20_fwd_weight", 1.144)
            # # [ -0.0015804865 0.04523811489343643 ] 0.9063427844084799 1.0745146
            # mrt.set_threshold("mobilenet0_conv16_batchnorm16_fwd_weight", 0.90)
            # # [ 0.4315614 2.447332109723772 ] 49.37820360490254 63.959927
            # mrt.set_threshold("mobilenet0_conv2_batchnorm2_fwd", 49.37)
            # # [ 0.9770754 1.3392452512468611 ] 27.761980422905516 40.729546
            # mrt.set_threshold("mobilenet0_relu2_fwd", 27.76)
            # [ 1.0975745 1.0489919010632773 ] 22.077412493692915 23.784576
            # mrt.set_threshold("mobilenet0_relu4_fwd", 22.08)
            # # [ 0.9885562 2.360489403014386 ] 48.19834426651407 69.22121
            # mrt.set_threshold("mobilenet0_conv5_batchnorm5_fwd", 48.2)
            # # [ 0.7895588 1.0544661745870065 ] 21.878882319617176 30.95745
            # mrt.set_threshold("mobilenet0_relu17_fwd", 21.88)
            # # [ 0.8717863 1.0887600296120434 ] 22.646986888608513 28.265652
            # mrt.set_threshold("mobilenet0_relu19_fwd", 22.65)
            # # [ 0.35124516 0.6501711574631898 ] 13.354668314135012 20.770807
            # mrt.set_threshold("mobilenet0_relu20_fwd", 13.35)
            # # [ 0.9378179 1.110470714216975 ] 23.147232155910086 27.886068
            # mrt.set_threshold("mobilenet0_relu21_fwd", 23.15)
            # # [ 0.36263302 0.6352599878026505 ] 13.067832775738754 17.18809
            # mrt.set_threshold("mobilenet0_relu22_fwd", 13.07)
            # # [ 0.19875833 0.49999100821358816 ] 10.198578498193196 16.625143
            # mrt.set_threshold("mobilenet0_relu24_fwd", 10.2)
            # # [ 0.32357717 1.6308352606637138 ] 65.55698759215218 75.84912
            # mrt.set_threshold("mobilenet0_conv25_batchnorm25_fwd", 32.94)
            # # [ 0.36793178 1.512995992388044 ] 30.62785163096019 49.464615
            # mrt.set_threshold("mobilenet0_relu26_fwd", 30.63)
            # # [ 18.028658 38.61970520019531 ] 790.4227619171143 805.51886
            # mrt.set_threshold("sum0", 790.423)
            mrt.set_output_prec(8)
            qsym, qparams, inputs_ext = mrt.quantize()
        else:
            inputs_ext['data']['data'] = data
            th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=calib_ctx)
            qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, th_dict)
            qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs)
        dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

        dump_sym, dump_params = load_fname(version, "nnvm.compile")
        nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(qsym, qparams, inputs_ext)
        spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext, dump_sym, dump_params)

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext,) = sim.load_ext(dump_ext)
    net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()
    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
        res = [net2.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(mobilenet, data_iter_func,
            cvm_quantize,
            iter_num=iter_num, logger=logger)
コード例 #21
0
def validate_model(sym_path, prm_path, ctx, num_channel=3,
                   input_size=224, batch_size=16, iter_num=10,
                   ds_name='imagenet', from_scratch=0, lambd=None,
                   dump_model=False, input_shape=None):
    from gluon_zoo import save_model

    flag = [False]*from_scratch + [True]*(2-from_scratch)
    model_name, _ = path.splitext(path.basename(sym_path))
    model_dir = path.dirname(sym_path)
    input_shape = input_shape if input_shape else \
                  (batch_size, num_channel, input_size, input_size)
    logger = logging.getLogger("log.validate.%s"%model_name)

    if not path.exists(sym_path) or not path.exists(prm_path):
        save_model(model_name)
    model = Model.load(sym_path, prm_path)
    model.prepare(input_shape)
    # model = init(model, input_shape)

    print(tpass.collect_op_names(model.symbol, model.params))

    data_iter_func = ds.data_iter(ds_name, batch_size, input_size=input_size)
    data, _ = data_iter_func()

    # prepare
    mrt = model.get_mrt()
    # mrt = MRT(model)

    # calibrate
    mrt.set_data(data)
    prefix = path.join(model_dir, model_name+'.mrt.dict')
    _, _, dump_ext = utils.extend_fname(prefix, True)
    if flag[0]:
        th_dict = mrt.calibrate(lambd=lambd)
        sim.save_ext(dump_ext, th_dict)
    else:
        (th_dict,) = sim.load_ext(dump_ext)
        mrt.set_th_dict(th_dict)

    mrt.set_input_prec(8)
    mrt.set_output_prec(8)

    if flag[1]:
        mrt.quantize()
        mrt.save(model_name+".mrt.quantize", datadir=model_dir)
    else:
        mrt = MRT.load(model_name+".mrt.quantize", datadir=model_dir)

    # dump model
    if dump_model:
        datadir = "/data/ryt"
        model_name = model_name + "_tfm"
        dump_shape = (1, num_channel, input_size, input_size)
        mrt.current_model.to_cvm(
            model_name, datadir=datadir, input_shape=input_shape)
        data = data[0].reshape(dump_shape)
        data = sim.load_real_data(
            data.astype("float64"), 'data', mrt.get_inputs_ext())
        np.save(datadir+"/"+model_name+"/data.npy", data.astype('int8').asnumpy())
        sys.exit(0)

    # validate
    org_model = load_model(Model.load(sym_path, prm_path), ctx)
    cvm_quantize = load_model(
            mrt.current_model, ctx,
            inputs_qext=mrt.get_inputs_ext())

    utils.multi_validate(org_model, data_iter_func, cvm_quantize,
                         iter_num=iter_num,
                         logger=logging.getLogger('mrt.validate'))
    logger.info("test %s finished.", model_name)
コード例 #22
0
def test_mrt_quant(batch_size=1, iter_num=10, from_scratchi=0):
    logger = logging.getLogger("log.test.mrt.quantize")
    flag = [False] * from_scratch + [True] * (4 - from_scratch)

    ctx = mx.gpu(1)
    qctx = mx.gpu(3)
    input_size = 512
    input_shape = (batch_size, 3, input_size, input_size)

    # define data iter function, get:
    # get_iter_func
    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)

    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    # split model, get:
    # base, base_params, top, top_params, top_inputs_ext
    base, base_params, top, top_params, top_inputs_ext = \
            None, None, None, None, None
    if flag[0]:
        sym_file, param_file = load_fname()
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        # mrt = MRT(sym, params, input_shape)
        sym, params = tfm.init(sym, params, input_shape)
        keys = [
            "ssd0_multiperclassdecoder0_zeros_like0",
            # "ssd0_multiperclassdecoder0_concat0",
            # "ssd0_multiperclassdecoder0__mulscalar0",
            "ssd0_multiperclassdecoder0_slice_axis0",
            # "ssd0_multiperclassdecoder0_zeros_like1",
            "ssd0_normalizedboxcenterdecoder0_concat0",
        ]
        base, base_params, top, top_params, top_inputs_ext \
                = split_model(sym, params, {'data': input_shape}, keys)
        dump_sym, dump_params = load_fname("mrt.base")
        open(dump_sym, "w").write(base.tojson())
        nd.save(dump_params, base_params)
        dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
        open(dump_sym, "w").write(top.tojson())
        nd.save(dump_params, top_params)
        sim.save_ext(dump_ext, top_inputs_ext)
    else:
        dump_sym, dump_params = load_fname("mrt.base")
        base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
        dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
        top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
        (top_inputs_ext, ) = sim.load_ext(dump_ext)

    base_graph = mx.gluon.nn.SymbolBlock(base, [mx.sym.var('data')])
    nbase_params = convert_params_dtype(base_params,
                                        src_dtypes="float64",
                                        dest_dtype="float32")
    utils.load_parameters(base_graph, nbase_params, ctx=ctx)

    top_graph = mx.gluon.nn.SymbolBlock(
        top, [mx.sym.var(n) for n in top_inputs_ext])
    ntop_params = convert_params_dtype(top_params,
                                       src_dtypes="float64",
                                       dest_dtype="float32")
    utils.load_parameters(top_graph, ntop_params, ctx=ctx)

    # calibrate split model, get:
    # th_dict
    th_dict = None
    if flag[1]:
        mrt = MRT(base, base_params, input_shape)
        for i in range(1):
            data, _ = data_iter_func()
            mrt.set_data(data)
            th_dict = mrt.calibrate(ctx=ctx)
        mrt.save("mrt.dict")
    else:
        mrt = MRT.load("mrt.dict")

    # quantize split model, get:
    # qbase, qbase_params, qbase_inputs_ext, oscales, maps
    qbase, qbase_params, qbase_inputs_ext, oscales, maps = \
            None, None, None, None, None
    if flag[2]:
        # mrt = MRT(base, base_params, input_shape)
        # mrt.set_th_dict(th_dict)
        # mrt.set_threshold('data', 2.64)
        # mrt.set_fixed("ssd0_multiperclassdecoder0_concat0")
        # mrt.set_fixed("ssd0_multiperclassdecoder0__mulscalar0")
        # mrt.set_fixed("ssd0_multiperclassdecoder0_zeros_like1")
        mrt.set_threshold("ssd0_multiperclassdecoder0_slice_axis0", 1)
        # mrt.set_threshold("ssd0_normalizedboxcenterdecoder0_concat0", 512)
        mrt.set_output_prec(30)
        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()
        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)
    else:
        qb_sym, qb_params, qb_ext = load_fname("mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

    # merge quantized split model, get:
    # qsym, qparams, oscales2
    qsym, qparams = None, None
    if flag[3]:
        name_maps = {
            "ssd0_slice_axis41": "ssd0_multiperclassdecoder0_zeros_like0",
            "ssd0_slice_axis42": "ssd0_multiperclassdecoder0_slice_axis0",
            "ssd0_slice_axis43": "ssd0_normalizedboxcenterdecoder0_concat0",
        }
        oscales_dict = dict(zip([c.attr('name') for c in base], oscales))
        oscales2 = [oscales_dict[name_maps[c.attr('name')]] for c in top]

        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(
                node.get_children()), node.list_attr()
            if op_name == '_greater_scalar':
                valid_thresh = sutils.get_attr(attr, 'scalar', 0)
                attr['scalar'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            elif op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node

        qsym, qparams = merge_model(qbase, qbase_params, top, top_params, maps,
                                    box_nms)
        sym_file, param_file, ext_file = load_fname("mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales2)
    else:
        dump_sym, dump_params, dump_ext = load_fname("mrt.all.quantize", True)
        qsym, qparams = mx.sym.load(dump_sym), nd.load(dump_params)
        _, oscales2 = sim.load_ext(dump_ext)

    if False:
        dump_shape = (1, 3, input_size, input_size)
        compile_to_cvm(qsym,
                       qparams,
                       "ssd_tfm",
                       datadir="/data/ryt",
                       input_shape=dump_shape)
        exit()

    metric = dataset.load_voc_metric()
    metric.reset()

    def yolov3(data, label):
        def net(data):
            tmp = base_graph(data.as_in_context(ctx))
            outs = top_graph(*tmp)
            return outs

        acc = validate_data(net, data, label, metric)
        return "{:6.2%}".format(acc)

    net2 = mx.gluon.nn.SymbolBlock(qsym,
                                   [mx.sym.var(n) for n in qbase_inputs_ext])
    nqparams = convert_params_dtype(qparams,
                                    src_dtypes="float64",
                                    dest_dtype="float32")
    utils.load_parameters(net2, nqparams, ctx=qctx)
    net2_metric = dataset.load_voc_metric()
    net2_metric.reset()

    def mrt_quantize(data, label):
        def net(data):
            data = sim.load_real_data(data, 'data', qbase_inputs_ext)
            outs = net2(data.as_in_context(qctx))
            outs = [o.as_in_context(ctx) / oscales2[i] \
                   for i, o in enumerate(outs)]
            return outs

        acc = validate_data(net, data, label, net2_metric)
        return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3,
                         data_iter_func,
                         mrt_quantize,
                         iter_num=iter_num,
                         logger=logger)
コード例 #23
0
def test_sym_pass(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.sym.pass")

    version = ""
    sym_fname, param_fname = load_fname(version)
    sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
    params = {k.split(':')[1]: v for k, v in params.items()}

    calib_ctx = mx.gpu(2)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4,5,6,7".split(',') if i.strip()]
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, 224, 224),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")

    order = sutils.topo_sort(sym)
    for op_head in order:
        if op_head.attr('name') == 'classifier':
            break
    sym = op_head
    net = mx.gluon.nn.SymbolBlock(sym, inputs)
    load_parameters(net, params, ctx=ctx)

    data_iter = ds.load_imagenet_rec(batch_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    for i in range(10):
        if i == 3:
            break
        data, _ = data_iter_func()
    data_iter.reset()

    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def resnet(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, data,
                                                 calib_ctx)
    qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, "cvm")
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sim.save_ext(dump_ext, inputs_ext)
    nd.save(dump_params, qparams)
    open(dump_sym, "w").write(qsym.tojson())

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    net3 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net3.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(resnet,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)
コード例 #24
0
def test_sym_pass(batch_size=10, iter_num=10, quantize=True):

    logger = logging.getLogger("log.test.sym.pass")

    calib_ctx = mx.gpu(2)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    input_size = 299
    version = "v3"
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    data_iter = ds.load_imagenet_rec(batch_size, input_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def inception_v3(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    if quantize:
        sym_file, param_file = load_fname(version)
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
        data, _ = data_iter_func()
        if True:
            dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
            mrt = _mrt.MRT(sym, params, inputs_ext)
            mrt.set_data('data', data)
            mrt.calibrate(ctx=calib_ctx)
            mrt.set_output_prec(8)
            qsym, qparams, inputs_ext = mrt.quantize()
        else:
            dump_sym, dump_params, dump_ext = load_fname(
                version, "sym.quantize", True)
            inputs_ext['data']['data'] = data
            th_dict = calib.sym_calibrate(sym,
                                          params,
                                          inputs_ext,
                                          ctx=calib_ctx)
            qsym, qparams, precs, _ = calib.sym_simulate(
                sym, params, inputs_ext, th_dict)
            qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net2.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(inception_v3,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)
コード例 #25
0
    _, top5 = acc_top5.get()
    return "top1={:6.2%} top5={:6.2%}".format(top1, top5)


# sym, params = mx.sym.load(sym_file), nd.load(param_file)
# sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
# qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, data, ctx)
# qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, "cvm")
# dump_sym, dump_params, dump_ext = load_fname("", "sym.quantize", True)
# sim.save_ext(dump_ext, inputs_ext)
# nd.save(dump_params, qparams)
# open(dump_sym, "w").write(qsym.tojson())

dump_sym, dump_params, dump_ext = load_fname("", "sym.quantize", True)
sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
(inputs_ext, ) = sim.load_ext(dump_ext)
if True:
    _mrt.std_dump(sym, params, inputs_ext, data, "alexnet", is_mxnet=True)
    exit()
inputs = [mx.sym.var(n) for n in inputs_ext]
net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
qacc_top1 = mx.metric.Accuracy()
qacc_top5 = mx.metric.TopKAccuracy(5)
qacc_top1.reset()
qacc_top5.reset()


def cvm_quantize(data, label):
    data = sim.load_real_data(data, 'data', inputs_ext)
    data = gluon.utils.split_and_load(data,
                                      ctx_list=ctx,
コード例 #26
0
def test_mrt_quant(batch_size=1, iter_num=10):
    logger = logging.getLogger("log.test.mrt.quantize")

    ctx = mx.gpu(1)
    qctx = mx.gpu(3)
    input_size = 512
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }

    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)

    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    sym_file, param_file = load_fname()
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    keys = [
        "ssd0_multiperclassdecoder0_concat0",
        "ssd0_multiperclassdecoder0__mulscalar0",
        "ssd0_multiperclassdecoder0_slice_axis0",
        "ssd0_multiperclassdecoder0_zeros_like1",
        "ssd0_normalizedboxcenterdecoder0_concat0",
    ]
    base, base_params, base_inputs_ext, top, top_params, top_inputs_ext \
            = _mrt.split_model(sym, params, inputs_ext, keys)
    dump_sym, dump_params = load_fname("mrt.base")
    open(dump_sym, "w").write(base.tojson())
    nd.save(dump_params, base_params)
    dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
    open(dump_sym, "w").write(top.tojson())
    nd.save(dump_params, top_params)
    sim.save_ext(dump_ext, top_inputs_ext)

    dump_sym, dump_params = load_fname("mrt.base")
    base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
    dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
    top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
    (top_inputs_ext, ) = sim.load_ext(dump_ext)

    base_inputs = [mx.sym.var(n) for n in inputs_ext]
    base_graph = mx.gluon.nn.SymbolBlock(base, base_inputs)
    utils.load_parameters(base_graph, base_params, ctx=ctx)

    top_inputs = [mx.sym.var(n) for n in top_inputs_ext]
    top_graph = mx.gluon.nn.SymbolBlock(top, top_inputs)
    utils.load_parameters(top_graph, top_params, ctx=ctx)

    metric = dataset.load_voc_metric()
    metric.reset()

    def yolov3(data, label):
        def net(data):
            tmp = base_graph(data.as_in_context(ctx))
            outs = top_graph(*tmp)
            return outs

        acc = validate_data(net, data, label, metric)
        return "{:6.2%}".format(acc)

    # utils.multi_validate(yolov3, data_iter_func,
    # iter_num=iter_num, logger=logger)
    # exit()

    if True:
        mrt = _mrt.MRT(base, base_params, inputs_ext)
        for i in range(16):
            data, _ = data_iter_func()
            mrt.set_data('data', data)
            th_dict = mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("mrt.dict", True)
        sim.save_ext(dump_ext, th_dict)

    _, _, dump_ext = load_fname("mrt.dict", True)
    (th_dict, ) = sim.load_ext(dump_ext)
    if True:
        mrt = _mrt.MRT(base, base_params, base_inputs_ext)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_fixed("ssd0_multiperclassdecoder0_concat0")
        mrt.set_fixed("ssd0_multiperclassdecoder0__mulscalar0")
        mrt.set_fixed("ssd0_multiperclassdecoder0_zeros_like1")
        mrt.set_threshold("ssd0_multiperclassdecoder0_slice_axis0", 1)
        #  mrt.set_threshold("ssd0_normalizedboxcenterdecoder0_concat0", 512)
        mrt.set_output_prec(30)
        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()
        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)

    # merge quantize model
    if True:
        qb_sym, qb_params, qb_ext = load_fname("mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

        name_maps = {
            "ssd0_slice_axis41": "ssd0_multiperclassdecoder0_concat0",
            "ssd0_slice_axis42": "ssd0_multiperclassdecoder0_slice_axis0",
            "ssd0_slice_axis43": "ssd0_normalizedboxcenterdecoder0_concat0",
        }
        oscales_dict = dict(zip([c.attr('name') for c in base], oscales))
        oscales = [oscales_dict[name_maps[c.attr('name')]] for c in top]

        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(
                node.get_children()), node.list_attr()
            if op_name == '_greater_scalar':
                valid_thresh = sutils.get_attr(attr, 'scalar', 0)
                attr['scalar'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            elif op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node

        qsym, qparams = _mrt.merge_model(qbase, qbase_params, top, top_params,
                                         maps, box_nms)
        sym_file, param_file, ext_file = load_fname("mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales)

    if True:
        dump_sym, dump_params, dump_ext = load_fname("mrt.all.quantize", True)
        net2_inputs_ext, oscales = sim.load_ext(dump_ext)
        inputs = [mx.sym.var(n) for n in net2_inputs_ext]
        net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=qctx)
        net2_metric = dataset.load_voc_metric()
        net2_metric.reset()

        def mrt_quantize(data, label):
            def net(data):
                data = sim.load_real_data(data, 'data', net2_inputs_ext)
                outs = net2(data.as_in_context(qctx))
                outs = [
                    o.as_in_context(ctx) / oscales[i]
                    for i, o in enumerate(outs)
                ]
                return outs

            acc = validate_data(net, data, label, net2_metric)
            return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3,
                         data_iter_func,
                         mrt_quantize,
                         iter_num=iter_num,
                         logger=logger)