Ejemplo n.º 1
0
    def load_parameters(uuid_map=None,
                        parameters_file=None,
                        variable_constants_file=None,
                        mxnet_constants_file=None,
                        context=None,
                        dtype=None,
                        current_params=None):
        """
        Loads back a sest of InferenceParameters from files.
        :param parameters_file: These are the parameters of the previous inference algorithm.  These are in a {uuid: mx.nd.array} mapping.
        :type mxnet_constants_file: file saved down with mx.nd.save(), so a {uuid: mx.nd.array} mapping saved in a binary format.
        :param mxnet_constants_file: These are the constants in mxnet format from the previous inference algorithm. These are in a {uuid: mx.nd.array} mapping.
        :type mxnet_constants_file: file saved down with mx.nd.save(), so a {uuid: mx.nd.array} mapping saved in a binary format.
        :param variable_constants_file: These are the constants in primitive format from the previous inference algorithm.
        :type variable_constants_file: json dict of {uuid: constant_primitive}
        """
        def with_uuid_map(item, uuid_map):
            if uuid_map is not None:
                return uuid_map[item]
            else:
                return item

        ip = InferenceParameters(context=context, dtype=dtype)

        if parameters_file is not None:
            old_params = ndarray.load(parameters_file)
            mapped_params = {
                with_uuid_map(k, uuid_map): v
                for k, v in old_params.items()
            }

            new_paramdict = ParameterDict()
            if current_params is not None:
                new_paramdict.update(current_params)

            # Do this because we need to map the uuids to the new Model
            # before loading them into the ParamDict
            for name, mapped_param in mapped_params.items():
                new_paramdict[name]._load_init(mapped_param, ip.mxnet_context)
            ip._params = new_paramdict

        new_mxnet_constants = {}
        new_variable_constants = {}
        if variable_constants_file is not None:
            import json
            with open(variable_constants_file) as f:
                old_constants = json.load(f)
                new_variable_constants = {
                    with_uuid_map(k, uuid_map): v
                    for k, v in old_constants.items()
                }
        if mxnet_constants_file is not None:
            new_mxnet_constants = {
                with_uuid_map(k, uuid_map): v
                for k, v in ndarray.load(mxnet_constants_file).items()
            }
        ip._constants = {}
        ip._constants.update(new_variable_constants)
        ip._constants.update(new_mxnet_constants)
        return ip
Ejemplo n.º 2
0
def get_conv_names(modelname):
    conv_name_dct_old = {}

    weight_thresh = {}
    myfile = path.expanduser('~/tvm-cvm/cvm/models/' + modelname + '_mid.ini')
    for k, v in [v.split(':') for v in load_file(myfile)]:
        wname = k.strip()
        thresh = float(v.replace(',', '').strip())
        weight_thresh[wname] = thresh

    myfile = path.expanduser('~/tvm-cvm/cvm/models/' + modelname +
                             '_normal.ini')
    weight_normal = [v.strip() for v in load_file(myfile)]

    sym_file = path.expanduser('~/tvm-cvm/data/' + modelname + '.prepare.json')
    params_file = path.expanduser('~/tvm-cvm/data/' + modelname +
                                  '.prepare.params')
    sym = mx.sym.load(sym_file)
    params = nd.load(params_file)
    weight_2_conv = []
    for sym in sutils.topo_sort(sym, params):
        if sym.attr('op_name') == 'Convolution':
            name = sym.attr('name')
            wname = sutils.sym_iter(sym.get_children())[1].attr('name')
            if wname in weight_thresh or wname in weight_normal:
                continue
            weight_2_conv.append((wname, name))
    return weight_2_conv
Ejemplo n.º 3
0
def resnet18_v1b_89(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
    """Constructs a ResNetV1b-18_2.6x model. Uses resnet18_v1b construction from resnetv1b.py

    Parameters
    ----------
    pretrained : bool or str
        Boolean value controls whether to load the default pretrained weights for model.
        String value represents the hashtag for a certain version of pretrained weights.
    root : str, default '~/.mxnet/models'
        Location for keeping the model parameters.
    ctx : Context, default CPU
        The context in which to load the pretrained weights.
    """
    model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], name_prefix='resnetv1b_', **kwargs)
    dirname = os.path.dirname(__file__)
    json_filename = os.path.join(dirname, 'resnet%d_v%db_%.1fx' % (18, 1, 2.6) + ".json")
    with open(json_filename, "r") as jsonFile:
        params_shapes = json.load(jsonFile)
    if pretrained:
        from ..model_store import get_model_file
        params_file = get_model_file('resnet%d_v%db_%.1fx' % (18, 1, 2.6), tag=pretrained,
                                     root=root)
        prune_gluon_block(model, model.name, params_shapes, params=ndarray.load(params_file),
                          pretrained=True, ctx=ctx)
    else:
        prune_gluon_block(model, model.name, params_shapes, params=None, pretrained=False, ctx=ctx)
    if pretrained:
        from ...data import ImageNet1kAttr
        attrib = ImageNet1kAttr()
        model.synset = attrib.synset
        model.classes = attrib.classes
        model.classes_long = attrib.classes_long
    return model
Ejemplo n.º 4
0
def load_data_3(modelname,
                input_size=224,
                batch_size=1,
                layout='NHWC',
                quantized=False):
    if quantized:
        data = nd.load(path.expanduser('~/data/' + modelname +
                                       '_qdata'))[0].asnumpy()
    else:
        data = nd.load(path.expanduser('~/data/' + modelname +
                                       '_data'))[0].asnumpy()
    label = nd.load(path.expanduser('~/data/' + modelname +
                                    '_label'))[0].asnumpy()
    if layout == 'NHWC':
        data = np.transpose(data, axes=[0, 2, 3, 1])
    return data, label
Ejemplo n.º 5
0
def test_sym_nnvm(batch_size, iter_num):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    sym_file, param_file, ext_file = load_fname("mrt.all.quantize", True)
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    inputs_ext, _ = sim.load_ext(ext_file)
    val_data = dataset.load_voc(1, 512)
    val_data_iter = iter(val_data)
    data, _ = next(val_data_iter)

    if False:
        data = sim.load_real_data(data, 'data', inputs_ext)
        inputs_ext['data']['data'] = data
        spass.sym_dump_ops(sym,
                           params,
                           inputs_ext,
                           datadir="/data/wlt",
                           ctx=mx.gpu(1),
                           cleanDir=True,
                           ops=[
                               "broadcast_div0",
                           ])
    else:
        _mrt.std_dump(sym, params, inputs_ext, data, "ssd_ryt", max_num=100)
Ejemplo n.º 6
0
def test_sym_nnvm(batch_size=10):
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    data_iter = utils.load_dataset(batch_size)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "resnet" + version)
Ejemplo n.º 7
0
 def load_vgg_encoder_params(self):
     """load from vggbn16 params,initialize the vgg conv1 for alpha zero"""
     vgg_file = get_model_file('vgg%d%s' % (16, '_bn'))
     loaded = ndarray.load(vgg_file)
     params = self._collect_params_with_prefix()
     for name in loaded:
         if name in params:
             params[name]._load_init(loaded[name])
Ejemplo n.º 8
0
def test_sym_nnvm(batch_size, iter_num):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    sym_file, param_file, ext_file = load_fname("_darknet53_voc", "mrt.all.quantize", True)
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    inputs_ext, _ = sim.load_ext(ext_file)
    nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(sym, params, inputs_ext)
    spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext, *load_fname("_darknet53_voc", "nnvm"))
Ejemplo n.º 9
0
def test_sym_nnvm(batch_size, iter_num):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    sym_file, param_file, ext_file = load_fname("_darknet53_voc",
                                                "all.quantize", True)
    dump_sym, dump_params = load_fname("_darknet53_voc", "all.nnvm.compile")
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    inputs_ext, _ = sim.load_ext(ext_file)
    spass.mxnet_to_nnvm(sym, params, inputs_ext, dump_sym, dump_params)
Ejemplo n.º 10
0
def test_sym_nnvm(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    data_iter = ds.load_imagenet_rec(batch_size, 224)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "shufflenet", max_num=100)
Ejemplo n.º 11
0
def pull(url, is_lambda=True):
    fname = url.split('/')[-1]
    fname = fname.split('.')[0]
    if is_lambda:
        fpath = '/tmp/%s' % fname
    else:
        fpath = './%s' % fname

    s3 = boto3.resource('s3')
    s3.meta.client.download_file('ps-lambda-mxnet', fname, fpath)
    return nd.load(fpath)
Ejemplo n.º 12
0
def load_params(prefix, epoch):
    save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params
Ejemplo n.º 13
0
def test_sym_nnvm():
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext,) = sim.load_ext(dump_ext)
    data_iter = ds.load_imagenet_rec(1)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "mobilenet"+version)
Ejemplo n.º 14
0
def load_params(prefix, epoch):
    save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params
Ejemplo n.º 15
0
def data_process(quantize_flag, input_shape, gpu_flag, num_test):
    if quantize_flag:
        symbol = mx.sym.load('/home/test/tvm-cvm/data/ssd_512_mobilenet1.0_coco.all.quantize.json')
        params = nd.load('/home/test/tvm-cvm/data/ssd_512_mobilenet1.0_coco.all.quantize.params')
        pfx = 'quant_'
    else:
        symbol = mx.sym.load('/home/test/tvm-cvm/data/ssd_512_mobilenet1.0_coco.json')
        params = nd.load('/home/test/tvm-cvm/data/ssd_512_mobilenet1.0_coco.params')
        pfx = 'org_'
    if gpu_flag:
        ctx = mx.gpu(3)
        pfx += 'gpu_'
    else:
        ctx = mx.cpu()
        pfx += 'cpu_'
    params = convert_params_dtype(params, dest_dtype="float32")
    utils.log_init()
    logger = logging.getLogger('main')
    stimes = {}
    for iter_num in range(num_test):
        for opn, dct in get_mxnet_outs(symbol, params, input_shape, ctx, gpu_flag, logger, iter_num).items():
            if opn not in stimes:
                stimes[opn] = {'sample_total': []}
            stimes[opn]['sample_total'].append(dct['total'])
    for opn, dct in stimes.items():
        stimes[opn]['mean_total'] = sum(dct['sample_total'][1:]) / len(dct['sample_total'][1:])
    arr = sorted([(stimes[opn]['mean_total'], opn) for opn in stimes], reverse=True)
    total = sum([dct['mean_total'] for opn, dct in stimes.items()])
    s = 'total forward time: %s second\n'%total
    s += '\n'
    for _, opn in arr:
        dct = stimes[opn]
        # s += 'op_name: %s\nmin: %s second\nmax: %s second\nmean: %s second\nstd: %s second\ntotal: %s second\n'%\
                # (opn, dct['max'], dct['min'], dct['mean'], dct['std'], dct['total'])
        s += 'op: %s\ntotal: %s second\n'%(opn, dct['mean_total'])
        s += '---------------------------\n'
        s += '---------------------------\n'
        s += '\n'
    filename = '/home/test/'+pfx+'test.txt'
    with open(filename, 'w') as f:
        f.write(s)
Ejemplo n.º 16
0
def resnet101_v1d_73(pretrained=False,
                     root='~/.mxnet/models',
                     ctx=cpu(0),
                     **kwargs):
    """Constructs a ResNetV1d-101_2.2x model. Uses resnet101_v1d construction from resnetv1b.py

    Parameters
    ----------
    pretrained : bool or str
        Boolean value controls whether to load the default pretrained weights for model.
        String value represents the hashtag for a certain version of pretrained weights.
    root : str, default '~/.mxnet/models'
        Location for keeping the model parameters.
    ctx : Context, default CPU
        The context in which to load the pretrained weights.
    """
    model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3],
                      deep_stem=True,
                      avg_down=True,
                      name_prefix='resnetv1d_',
                      **kwargs)
    dirname = os.path.dirname(__file__)
    json_filename = os.path.join(
        dirname, 'resnet%d_v%dd_%.1fx' % (101, 1, 2.2) + ".json")
    with open(json_filename, "r") as jsonFile:
        params_shapes = json.load(jsonFile)
    if pretrained:
        from ..model_store import get_model_file
        params_file = get_model_file('resnet%d_v%dd_%.1fx' % (101, 1, 2.2),
                                     tag=pretrained,
                                     root=root)
        prune_gluon_block(model,
                          model.name,
                          params_shapes,
                          params=ndarray.load(params_file),
                          pretrained=True,
                          ctx=ctx)
    else:
        prune_gluon_block(model,
                          model.name,
                          params_shapes,
                          params=None,
                          pretrained=False,
                          ctx=ctx)

    if pretrained:
        from ...data import ImageNet1kAttr
        attrib = ImageNet1kAttr()
        model.synset = attrib.synset
        model.classes = attrib.classes
        model.classes_long = attrib.classes_long
    return model
Ejemplo n.º 17
0
 def load_params(self, fname):
     save_dict = ndarray.load(fname)
     arg_params = {}
     aux_params = {}
     for k, value in save_dict.items():
         arg_type, name = k.split(':', 1)
         if arg_type == 'arg':
             arg_params[name] = value
         elif arg_type == 'aux':
             aux_params[name] = value
         else:
             raise ValueError("Invalid param file " + fname)
     self.set_params(arg_params, aux_params)
Ejemplo n.º 18
0
def load_params(prefix, epoch):
    #save_dict = nd.load('%s-%04d.params' % (prefix, epoch))

    save_dict = nd.load(
        "b32_q50_qa200_m20_std0.1_lr0.05_gn50.0_f50_s224-0160.params")
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params
Ejemplo n.º 19
0
def load_params(dir_path="", epoch=None, name=""):
    prefix = os.path.join(dir_path, name)
    _, param_loading_path, _ = get_saving_path(prefix, epoch)
    while not os.path.isfile(param_loading_path):
        logging.info("in load_param, %s Not Found!" % param_loading_path)
        time.sleep(60)
    save_dict = nd.load(param_loading_path)
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params, param_loading_path
Ejemplo n.º 20
0
def load_params(dir_path="", epoch=None, name=""):
    prefix = os.path.join(dir_path, name)
    _, param_loading_path, _ = get_saving_path(prefix, epoch)
    while not os.path.isfile(param_loading_path):
        logging.info("in load_param, %s Not Found!" % param_loading_path)
        time.sleep(60)
    save_dict = nd.load(param_loading_path)
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params, param_loading_path
Ejemplo n.º 21
0
def _try_load_parameters(self,
                         filename=None,
                         model=None,
                         ctx=None,
                         allow_missing=False,
                         ignore_extra=False):
    def getblock(parent, name):
        if len(name) == 1:
            if name[0].isnumeric():
                return parent[int(name[0])]
            else:
                return getattr(parent, name[0])
        else:
            if name[0].isnumeric():
                return getblock(parent[int(name[0])], name[1:])
            else:
                return getblock(getattr(parent, name[0]), name[1:])

    if filename is not None:
        loaded = ndarray.load(filename)
    else:
        loaded = model._collect_params_with_prefix()
    params = self._collect_params_with_prefix()
    if not loaded and not params:
        return

    if not any('.' in i for i in loaded.keys()):
        # legacy loading
        del loaded
        self.collect_params().load(filename, ctx, allow_missing, ignore_extra,
                                   self.prefix)
        return
    '''
        for name in params.keys():
            if name not in loaded:
                name_split = name.split('.')
                block = getblock(self, name_split)
                block.initialize(ctx=ctx)
        '''
    for name in loaded:
        if name in params:
            if params[name].shape != loaded[name].shape:
                continue
            params[name]._load_init(loaded[name], ctx)
Ejemplo n.º 22
0
def test_sym_nnvm(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    version = "v3"
    dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
    '''
    byr---------
    ./data/tf_inceptionv3.mrt.json
    ./data/tf_inceptionv3.mrt.params
    ./data/tf_inceptionv3.mrt.ext
    byr--------
    '''
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    data_iter = ds.load_imagenet_rec(batch_size, 299)
    data = data_iter.next().data[0]

    _mrt.std_dump(sym, params, inputs_ext, data, "inception_v3")
Ejemplo n.º 23
0
def test_sym_nnvm(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.nnvm")
    logger.info("=== Log Test NNVM ===")

    target = "llvm"
    tvm_ctx = tvm.context(target, 1)
    mx_ctx = mx.gpu(2)
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, 224, 224),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]
    inputs_shape = {k: v['shape'] for k, v in inputs_ext.items()}

    data_iter = load_dataset(batch_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    data_iter_func()

    version = ""
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
    # sim.load_ins_ext(params, inputs_ext)

    # nnvm_sym, _ = nnvm.frontend.from_mxnet(sym)
    # with open('debug_nnvm_sym_after_load_from_mxnet.json', 'w') as fout:
    #    fout.write(nnvm_sym.debug_str())
    dump_sym, dump_params = load_fname(version, "nnvm.compile", False)
    spass.mxnet_to_nnvm(sym,
                        params,
                        inputs_ext,
                        dump_sym,
                        dump_params,
                        target='llvm')
Ejemplo n.º 24
0
def load_checkpoint(prefix, epoch):
    """
    Parameters
    ----------
    prefix: str
        directory prefix of checkpoint
    epoch: int
    Returns
    -------
    arg_params: mxnet.ndarray
    aux_params: mxnet.ndarray
    """
    save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params
def read_mxnet_weights(path, show=False):

    # assert os.path.exists(path), "path erro: {}".format(path)

    name_MxnetArray_dict = nd.load(path)

    name_array_dict = {}
    for name in sorted(name_MxnetArray_dict.keys()):
        mxnet_array = name_MxnetArray_dict[name]
        array = mxnet_array.asnumpy()

        if show:
            print ("name: {} || shape: {} || dtype: {}".format(name, array.shape, array.dtype))

        if name.endswith("weight"):
            if name.endswith("fc.weight"):
                array = np.transpose(array, [1, 0])
            else:
                array = np.transpose(array, [2, 3, 1, 0])
            # (out_channel, in_channel, k, k)(mxnet) --> (k, k, in_channel, out_channel)(tf)
            # (32, 3, 3, 3)-->(3, 3, 3, 32)
        name_array_dict[name] = array

    return name_array_dict
Ejemplo n.º 26
0
def load_params(prefix, epoch):
    """

    Parameters
    ----------
    prefix : str
    epoch : int

    Returns
    -------
    arg_params : dict
    aux_params : dict
    """
    import mxnet.ndarray as nd
    save_dict = nd.load('%s-%04d.params' % (prefix, epoch))
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params
Ejemplo n.º 27
0
from mxnet import ndarray as nd

features = nd.load('./pig/features_train_vgg11.nd')[0]
labels = nd.load('./pig/labels.nd')[0]

# print(features)
print(labels[550:60])
Ejemplo n.º 28
0
    acc_top5.update(label, res)
    _, top5 = acc_top5.get()
    return "top1={:6.2%} top5={:6.2%}".format(top1, top5)


# sym, params = mx.sym.load(sym_file), nd.load(param_file)
# sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
# qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, data, ctx)
# qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, "cvm")
# dump_sym, dump_params, dump_ext = load_fname("", "sym.quantize", True)
# sim.save_ext(dump_ext, inputs_ext)
# nd.save(dump_params, qparams)
# open(dump_sym, "w").write(qsym.tojson())

dump_sym, dump_params, dump_ext = load_fname("", "sym.quantize", True)
sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
(inputs_ext, ) = sim.load_ext(dump_ext)
if True:
    _mrt.std_dump(sym, params, inputs_ext, data, "alexnet", is_mxnet=True)
    exit()
inputs = [mx.sym.var(n) for n in inputs_ext]
net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
qacc_top1 = mx.metric.Accuracy()
qacc_top5 = mx.metric.TopKAccuracy(5)
qacc_top1.reset()
qacc_top5.reset()


def cvm_quantize(data, label):
    data = sim.load_real_data(data, 'data', inputs_ext)
    data = gluon.utils.split_and_load(data,
Ejemplo n.º 29
0
mlp_model = mx.mod.Module(symbol=mlp, context=mx.cpu())
# pass train/test data to allocate model (bind state)
MLP_train_iter = mx.io.NDArrayIter(Z_train,
                                   yZ_train,
                                   batch_size,
                                   shuffle=False)
mlp_model.bind(MLP_train_iter.provide_data, MLP_train_iter.provide_label)
mlp_model.init_params()
mlp_model.init_optimizer()
mlp_model_params = mlp_model.get_params()[0]

# update parameters based on optimal found during cv Training
from mxnet import ndarray

params_dict = ndarray.load(
    os.path.join(save_to,
                 'mlp_model_params_z{}_mu{}.arg'.format(znum, num_centers)))
arg_params = {}
aux_params = {}
for k, value in params_dict.items():
    arg_type, name = k.split(':', 1)
    if arg_type == 'arg':
        arg_params[name] = value
    elif arg_type == 'aux':
        aux_params[name] = value
    else:
        raise ValueError("Invalid param file ")

# order of params: [(128L, 266L),(128L,),(32L, 128L),(32L,),(2L, 32L),(2L,)]
# organize weights and biases
l1 = [v.asnumpy().shape for k, v in mlp_model_params.iteritems()]
Ejemplo n.º 30
0
def data_iter_func():
    return next(data_iter)


data, label = data_iter_func()

sym_file, param_file = load_fname()
net1 = utils.load_model(sym_file, param_file, inputs, ctx=ctx)


def trec(data):
    res = net1(data.as_in_context(ctx))
    return res


sym, params = mx.sym.load(sym_file), nd.load(param_file)
sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
if True:
    mrt = _mrt.MRT(sym, params, inputs_ext)
    mrt.set_data('data', data)
    mrt.calibrate(ctx=ctx)
    mrt.set_input_prec('data', 16)
    mrt.set_fixed('data')
    mrt.set_output_prec(8)
    qsym, qparams, inputs_ext = mrt.quantize()
else:
    inputs_ext['data']['data'] = data
    th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx)
    qsym, qparams, _ = calib.pure_int8_quantize(sym, params, inputs_ext,
                                                th_dict)
net2 = gluon.nn.SymbolBlock(qsym, inputs)
Ejemplo n.º 31
0
def test_sym_pass(batch_size=10, iter_num=10, quantize=True):

    logger = logging.getLogger("log.test.sym.pass")

    calib_ctx = mx.gpu(2)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    input_size = 299
    version = "v3"
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    data_iter = ds.load_imagenet_rec(batch_size, input_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def inception_v3(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    if quantize:
        sym_file, param_file = load_fname(version)
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
        data, _ = data_iter_func()
        if True:
            dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
            mrt = _mrt.MRT(sym, params, inputs_ext)
            mrt.set_data('data', data)
            mrt.calibrate(ctx=calib_ctx)
            mrt.set_output_prec(8)
            qsym, qparams, inputs_ext = mrt.quantize()
        else:
            dump_sym, dump_params, dump_ext = load_fname(
                version, "sym.quantize", True)
            inputs_ext['data']['data'] = data
            th_dict = calib.sym_calibrate(sym,
                                          params,
                                          inputs_ext,
                                          ctx=calib_ctx)
            qsym, qparams, precs, _ = calib.sym_simulate(
                sym, params, inputs_ext, th_dict)
            qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net2.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(inception_v3,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)
Ejemplo n.º 32
0
                                kernel=(4, 4),
                                pad=(1, 1),
                                stride=(2, 2),
                                num_filter=32,
                                name='deconv1')
    data = mx.sym.BatchNorm(data=data, momentum=0.9, name='dcbn1')
    data = mx.sym.Activation(data=data, act_type='relu')
    data = mx.sym.Convolution(data=data,
                              num_filter=3,
                              kernel=(9, 9),
                              pad=(4, 4),
                              name='lastconv')
    return data


arg = nd.load("data/style_model/the_scream_args.nd")
aux = nd.load("data/style_model/the_scream_auxs.nd")
sym_mx = generator_symbol()

x = imread('data/style_model/tubingen.jpg')
x = np.transpose(x, axes=(2, 0, 1)).astype(np.float32)
x[0, :] -= 123.68
x[1, :] -= 116.779
x[2, :] -= 103.939

x = np.expand_dims(x, axis=0)
print("input shape", x.shape)
arg["data"] = nd.array(x, ctx=mx.cpu())

import nnvm
sym, params = nnvm.frontend.from_mxnet(sym_mx, arg, aux)
Ejemplo n.º 33
0
def test_sym_pass(batch_size=10, iter_num=10, quantize=True):
    logger = logging.getLogger("log.test.sym.pass")
    calib_ctx = mx.gpu(1)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, 224, 224),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    # load dataset and iter function
    data_iter = ds.load_imagenet_rec(batch_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    data, _ = data_iter_func()

    # load original model for accuracy
    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def shufflenet(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    if quantize:
        # load original model
        sym_fname, param_fname = load_fname(version)
        sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)

        # quantize process
        mrt = _mrt.MRT(sym, params, inputs_ext)  # initialize
        mrt.set_data('data', data)  # set input data
        mrt.calibrate(ctx=calib_ctx)  # calibration
        mrt.set_output_prec(8)  # set output prec, do nothing by default
        qsym, qparams, inputs_ext = mrt.quantize()  # quantization

        # dump quantized model
        dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize",
                                                     True)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    if False:
        # convert to cvm executor model
        inputs_ext['data']['shape'] = (1, 3, 224, 224)
        nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(qsym, qparams, inputs_ext)
        spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext,
                        *load_fname(version, "nnvm"))

    # load quantized model for accuracy
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    inputs = [mx.sym.var(n) for n in inputs_ext]
    net3 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)

    # net3 = mx.gluon.nn.SymbolBlock(qsym, inputs)
    # utils.load_parameters(net3, qparams, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net3.forward(d) for d in data]
        res = nd.concatenate(res)

        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    # compare accuracy between models
    utils.multi_validate(shufflenet,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)