Пример #1
0
    def save(self, prefix):
        """
        Saves the parameters and constants down to json files as maps from {uuid : value},
        where value is an mx.ndarray for parameters and either primitive number types or mx.ndarray for constants.
        Saves up to 3 files: prefix+["_params.json", "_variable_constants.json", "_mxnet_constants.json"]

        :param prefix: The directory and any appending tag for the files to save this Inference as.
        :type prefix: str , ex. "../saved_inferences/experiment_1"
        """
        param_file = prefix + "_params.json"
        variable_constants_file = prefix + "_variable_constants.json"
        mxnet_constants_file = prefix + "_mxnet_constants.json"
        to_save = {key: value._reduce() for key, value in self._params.items()}
        ndarray.save(param_file, to_save)

        mxnet_constants = {
            uuid: value
            for uuid, value in self._constants.items()
            if isinstance(value, mx.ndarray.ndarray.NDArray)
        }
        ndarray.save(mxnet_constants_file, mxnet_constants)

        variable_constants = {
            uuid: value
            for uuid, value in self._constants.items()
            if uuid not in mxnet_constants
        }
        import json
        with open(variable_constants_file, 'w') as f:
            json.dump(variable_constants, f, ensure_ascii=False)
Пример #2
0
def export_dense_mx(ckpt_dir, arg_list, aux_list, output_dir):
    ckpt = get_latest_ckpt_v2(ckpt_dir)
    print 'export dense mx from %s' % ckpt
    manager = multiprocessing.Manager()
    save_dict = manager.dict()
    print '# export arg:', arg_list
    p_list = []
    for n in xrange(len(arg_list)):
        p = multiprocessing.Process(target=_update_save_dict_run,
                                    args=(n, ckpt, arg_list, save_dict, 'mx',
                                          'var'))
        p_list.append(p)
        p.start()
    print '# export aux:', aux_list
    for n in xrange(len(aux_list)):
        p = multiprocessing.Process(target=_update_save_dict_run,
                                    args=(n, ckpt, aux_list, save_dict, 'mx',
                                          'aux'))
        p_list.append(p)
        p.start()
    for p in p_list:
        p.join()

    save_dict = dict(save_dict)
    from mxnet import ndarray
    ndarray.save('dense.txt', save_dict)
    output(output_dir, 'dense.txt')
    return save_dict
Пример #3
0
def save_params(dir_path=os.curdir, epoch=None, name="", params=None, aux_states=None,
                ctx=mx.cpu()):
    prefix = os.path.join(dir_path, name)
    _, param_saving_path, _ = get_saving_path(prefix, epoch)
    if not os.path.isdir(dir_path) and not (dir_path == ""):
        os.makedirs(dir_path)
    save_dict = {('arg:%s' % k): v.copyto(ctx) for k, v in params.items()}
    save_dict.update({('aux:%s' % k): v.copyto(ctx) for k, v in aux_states.items()})
    nd.save(param_saving_path, save_dict)
    return param_saving_path
Пример #4
0
def save_params(dir_path=os.curdir, epoch=None, name="", params=None, aux_states=None,
                ctx=mx.cpu()):
    prefix = os.path.join(dir_path, name)
    _, param_saving_path, _ = get_saving_path(prefix, epoch)
    if not os.path.isdir(dir_path) and not (dir_path == ""):
        os.makedirs(dir_path)
    save_dict = {('arg:%s' % k): v.copyto(ctx) for k, v in params.items()}
    save_dict.update({('aux:%s' % k): v.copyto(ctx) for k, v in aux_states.items()})
    nd.save(param_saving_path, save_dict)
    return param_saving_path
Пример #5
0
def out_concat_data_label(
    data_iter,
    senet,
    dpn,
    out_path,
    dpn_r_mean=124,
    dpn_g_mean=117,
    dpn_b_mean=104,
    dpn_scale=0.0167,
    se_r_mean=0,
    se_g_mean=0,
    se_b_mean=0,
    se_scale=1,
):
    data_list = []
    label_list = []
    i = 0
    print('start forward!!')
    while True:
        if i % 100 == 0:
            print('reach ', i, ' image')
        i = i + 1
        try:
            se_batch = data_iter.next()
            dpn_batch = se_batch
            # normalize
            dpn_batch.data[0][0:, 0, :, :] -= dpn_r_mean
            dpn_batch.data[0][0:, 1, :, :] -= dpn_g_mean
            dpn_batch.data[0][0:, 2, :, :] -= dpn_b_mean
            dpn_batch.data[0][:, :, :, :] *= dpn_scale
            se_batch.data[0][0:, 0, :, :] -= se_r_mean
            se_batch.data[0][0:, 1, :, :] -= se_g_mean
            se_batch.data[0][0:, 2, :, :] -= se_b_mean
            se_batch.data[0][:, :, :, :] *= se_scale
            # forward
            senet.forward(se_batch)
            senet_out = senet.get_outputs()[0]
            dpn.forward(dpn_batch)
            dpn_out = dpn.get_outputs()[0]
            feature = nd.concat(*[senet_out, dpn_out])
            data_list.append(feature.asnumpy())
            label_temp = se_batch.label[0].asscalar()
            if label_temp == dpn_batch.label[0].asscalar():
                label_list.append(label_temp)
            else:
                print('label conflict!')
                break
        except StopIteration:
            print('stop')
            break
    # record
    out_dict = {'data': nd.array(data_list), 'label': nd.array(label_list)}
    nd.save(out_path, out_dict)
Пример #6
0
def push(data, url, is_lambda=True):
    fname = url.split('/')[-1]
    fname = fname.split('.')[0]

    if is_lambda:
        fpath = '/tmp/%s' % fname
    else:
        fpath = './%s' % fname

    nd.save(fpath, data)
    s3 = boto3.resource('s3')
    s3.meta.client.upload_file(fpath, 'ps-lambda-mxnet', fname)
Пример #7
0
def test_load_ndarray():
    nd_file = 'test_predictor_load_ndarray.params'
    a = nd.random.uniform(shape=(7, 3))
    b = nd.random.uniform(shape=(7,))
    nd_data = {'a':a, 'b':b}
    nd.save(nd_file, nd_data)

    # test load_ndarray_file
    nd_load = load_ndarray_file(open(nd_file, "rb").read())
    assert(set(nd_data.keys()) == set(nd_load.keys()))
    for k in nd_data.keys():
        assert_almost_equal(nd_data[k].asnumpy(), nd_load[k], rtol=1e-5, atol=1e-6)
Пример #8
0
def save_model(net, path, sym=None, arg_dict=None):
    if not net._cached_graph:
        raise RuntimeError(
            "Please first call block.hybridize() and then run forward with "
            "this block at least once before calling export.")
    if sym is None:
        sym = net._cached_graph[1]
    sym.save('%s.json' % path)

    if arg_dict is None:
        arg_dict = reduce_params(net, sym)
    ndarray.save('%s.params' % path, arg_dict)
Пример #9
0
    def save_parameters(self, filename):
        """Save model

        Parameters
        ----------
        filename : str
            path to model file
        """
        params = self._collect_params_with_prefix()
        if self.pret_word_embs:  # don't save word embeddings inside model
            params.pop('pret_word_embs.weight', None)
        arg_dict = {key: val._reduce() for key, val in params.items()}
        ndarray.save(filename, arg_dict)
Пример #10
0
    def save_parameters(self, filename):
        """Save model

        Parameters
        ----------
        filename : str
            path to model file
        """
        params = self._collect_params_with_prefix()
        if self.pret_word_embs:  # don't save word embeddings inside model
            params.pop('pret_word_embs.weight', None)
        arg_dict = {key: val._reduce() for key, val in params.items()}
        ndarray.save(filename, arg_dict)
Пример #11
0
    def gen_noise(self):
        
#         if ctx == mx.gpu():
#             noise, _ = TimeseriesFromPSD_nd(self.param_noise)
#         elif ctx == mx.cpu():
#             noise, _ = TimeseriesFromPSD(self.param_noise)
#             noise = nd.array(noise)
        if ctx == mx.gpu():
            noise = GenNoise_matlab_nd(shape = (self.noiseAll_size, self.train.shape[-1]), params = self.pp)
            # print('Random noise!!')
            nd.save('./noise',noise[:10])
        else:
            raise
            
            
        return noise
Пример #12
0
def calculate_norm(x, y):
    assert x.shape == y.shape
    ndims = np.product(x.shape)
    x = nd.reshape(x, shape=(ndims, ))
    y = nd.reshape(y, shape=(ndims, ))
    res = x - y
    nx = nd.norm(x)
    ny = nd.norm(y)
    nr = nd.norm(res)
    print("saving...")
    f = "/home/ryt/data/cmp_"
    names = ["nx", "ny", "nr"]
    objs = [nx, ny, nr]
    for obj in objs:
        print(type(obj), obj.shape)
    for i in range(3):
        nd.save(f + names[i], objs[i])
    print('success')
Пример #13
0
def test_yxnet_mnist():
    mnist_sym = make_mnist_graph()

    inputs_ext = {
        'data': {
            'shape': (1, 1, 28, 28),
            'precision': 8,
        }
    }
    in_shape = (1, 1, 28, 28)
    arg_shapes, _, aux_shapes = mnist_sym.infer_shape(data=in_shape)
    args, auxs = mnist_sym.list_arguments(), mnist_sym.list_auxiliary_states()
    infer_shapes = {args[i]: arg_shapes[i] for i in range(len(args))}
    infer_shapes.update({auxs[i]: aux_shapes[i] for i in range(len(auxs))})

    root = "/home/serving/warehouse"
    _, bd = load_parameters(
        mnist_sym, infer_shapes,
        root + "/ca3d0286d5758697cdef653c1375960a868ac08a/data/params")
    mnist_sym, bd = spass.mx_set_precs(mnist_sym, bd, inputs_ext)

    dump_sym, dump_par = '/tmp/mnist_yxnet.symbol', '/tmp/mnist_yxnet.params'
    with open(dump_sym, 'w') as fout:
        fout.write(mnist_sym.tojson())
    nd.save(dump_par, bd)

    inputs = [mx.sym.var('data')]
    data = np.load(root + '/ba9fedfc87ccb6064fcd437fd2287f5edef1bd84/data')
    data = nd.array([data.astype(np.int8)])

    if False:
        graph = nn.SymbolBlock(mnist_sym, inputs)
        utils.load_parameters(graph, bd)
        res = graph.forward(data).astype('int32')
    else:
        prefix = "/tmp/yxnet/mnist"
        dump_sym, dump_params = prefix + ".json", prefix + ".params"
        print(sutils.sym_collect_attr(mnist_sym))
        spass.mxnet_to_nnvm(mnist_sym, bd, {'data': {
            'shape': (1, 1, 28, 28)
        }}, dump_sym, dump_params)
        exit()
    print(res.asnumpy().flatten()[:100])
Пример #14
0
def save_params(filename, net, select=None):
    """
    New in version 1.3.16

    Notes
    ------
    Q: Why not use the `save_parameters` in `mxnet.gluon`.
    A: Because we want to use the `select` argument to only preserve those parameters we want
    (i.e., excluding those unnecessary parameters such as pretrained embedding).
    """
    params = net._collect_params_with_prefix()
    if select is not None:
        pattern = re.compile(select)
        params = {
            name: value
            for name, value in params.items() if pattern.match(name)
        }
    arg_dict = {key: val._reduce() for key, val in params.items()}
    nd.save(filename, arg_dict)
    return filename
Пример #15
0
    def _save_checkpoint(self):
        if self.checkpoint_name is None:
            return
        
        checkpoint = {
#           'update_rule': self.update_rule,
          'lr_decay': nd.array([self.lr_decay]),
          'lr_rate': nd.array([self.lr_rate]),
#           'optim_config': self.optim_config,
          'batch_size': nd.array([self.batch_size]),
#           'num_train_samples': self.num_train_samples,
#           'num_val_samples': self.num_val_samples,
          'train_shift_list': nd.array(self.train_shift_list),
          'test_shift_list': nd.array(self.test_shift_list),
          'num_epoch': nd.array([self.num_epoch]),
          'epoch': nd.array([self.epoch]),
          'loss_history': nd.array(self.loss_history),
          'loss_v_history': nd.array(self.loss_v_history),
          'moving_loss_history': nd.array(self.moving_loss_history),
          'train_acc_history': nd.array(self.train_acc_history),
          'test_acc_history': nd.array(self.test_acc_history),
        }
        
        file_address = self.save_checkpoints_address
        # save the model modification
        if self.epoch == 1: 
            os.system('mkdir -p %s' %file_address)
            np.save(file_address+ '%s_structure_epoch.pkl' %(self.checkpoint_name) ,self.model.structure)    
        # save the best params
        if self.findabest:
            os.system('rm -rf '+file_address+'%s_best_params_epoch@*' %self.checkpoint_name)
            nd.save(file_address+'%s_best_params_epoch@%s_%s.pkl' %(self.checkpoint_name, self.best_params_epoch, self.best_test_acc) , self.best_params)
            self.findabest = 0
        # save all the parsms during the training
        # nd.save(file_address+'%s_params_epoch@%s.pkl' %(self.checkpoint_name, self.epoch), self.model.params)
        # save the processing info. within the training
        nd.save(file_address+'%s_info.pkl' %(self.checkpoint_name), checkpoint)
Пример #16
0
def save(params, filename, strip_prefix=''):
    """Save parameters to file.

    Parameters
    ----------
    filename : str
        Path to parameter file.
    strip_prefix : str, default ''
        Strip prefix from parameter names before saving.
    """
    arg_dict = {}
    for param in params.values():
        weight = param._reduce()
        if not param.name.startswith(strip_prefix):
            raise ValueError(
                "Prefix '%s' is to be striped before saving, but Parameter's "
                "name '%s' does not start with '%s'. "
                "this may be due to your Block shares parameters from other "
                "Blocks or you forgot to use 'with name_scope()' when creating "
                "child blocks. For more info on naming, please see "
                "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"
                % (strip_prefix, param.name, strip_prefix))
        arg_dict[param.name[len(strip_prefix):]] = weight
    nd.save(filename, arg_dict)
Пример #17
0
def test_mrt_quant(batch_size=1, iter_num=10, from_scratch=0):
    logger = logging.getLogger("log.test.mrt.quantize")
    flag = [False]*from_scratch + [True]*(4-from_scratch)

    ctx = mx.gpu(4)
    qctx = mx.gpu(3)
    input_size = 416
    input_shape = (batch_size, 3, input_size, input_size)

    # define data iter function, get:
    # get_iter_func
    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)
    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    # split model, get:
    # base, base_params, top, top_params, top_inputs_ext 
    base, base_params, top, top_params, top_inputs_ext = \
            None, None, None, None, None
    if flag[0]:
        sym_file, param_file = load_fname("_darknet53_voc")
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        mrt = MRT(sym, params, input_shape)
        keys = [
          'yolov30_yolooutputv30_expand_dims0',
          'yolov30_yolooutputv31_expand_dims0',
          'yolov30_yolooutputv32_expand_dims0',
          'yolov30_yolooutputv30_tile0',
          'yolov30_yolooutputv31_tile0',
          'yolov30_yolooutputv32_tile0',
          'yolov30_yolooutputv30_broadcast_add1',
          'yolov30_yolooutputv31_broadcast_add1',
          'yolov30_yolooutputv32_broadcast_add1',
        ]
        base, base_params, top, top_params, top_inputs_ext \
                = split_model(mrt.csym, mrt.cprm, {'data': input_shape}, keys)
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        open(dump_sym, "w").write(base.tojson())
        nd.save(dump_params, base_params)
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.top", True)
        open(dump_sym, "w").write(top.tojson())
        nd.save(dump_params, top_params)
        sim.save_ext(dump_ext, top_inputs_ext)
    else:
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.top", True)
        top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
        (top_inputs_ext,) = sim.load_ext(dump_ext)

    base_graph = mx.gluon.nn.SymbolBlock(base, [mx.sym.var('data')])
    nbase_params = convert_params_dtype(base_params, src_dtypes="float64",
            dest_dtype="float32")
    utils.load_parameters(base_graph, nbase_params, ctx=ctx)

    top_graph = mx.gluon.nn.SymbolBlock(top,
            [mx.sym.var(n) for n in top_inputs_ext])
    ntop_params = convert_params_dtype(top_params, src_dtypes="float64",
            dest_dtype="float32")
    utils.load_parameters(top_graph, ntop_params, ctx=ctx)

    # calibrate split model, get:
    # th_dict
    th_dict = None
    if flag[1]:
        mrt = MRT(base, base_params, input_shape)
        for i in range(1):
            data, _ = data_iter_func()
            mrt.set_data(data)
            mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        th_dict = mrt.th_dict
        sim.save_ext(dump_ext, th_dict)
    else:
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        (th_dict,) = sim.load_ext(dump_ext)

    # quantize split model, get:
    # qbase, qbase_params, qbase_inputs_ext, oscales, maps
    qbase, qbase_params, qbase_inputs_ext, oscales, maps = \
            None, None, None, None, None
    if flag[2]:
        mrt = MRT(base, base_params, input_shape)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_threshold('yolov30_yolooutputv30_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv31_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv32_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv30_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv31_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv32_tile0', 416)
        mrt.set_output_prec(30)

        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()

        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc", "mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)
    else:
        qb_sym, qb_params, qb_ext = load_fname("_darknet53_voc", "mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

    # merge quantized split model, get:
    # qsym, qparams, oscales2
    qsym, qparams = None, None
    if flag[3]:
        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(node.get_children()), node.list_attr()
            if op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[3])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node
        qsym, qparams = merge_model(qbase, qbase_params,
                top, top_params, maps, box_nms)
        oscales2 = [oscales[1], oscales[0], oscales[2]]
        sym_file, param_file, ext_file = \
                load_fname("_darknet53_voc", "mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales2)
    else:
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.all.quantize", True)
        qsym, qparams = mx.sym.load(dump_sym), nd.load(dump_params)
        _, oscales2 = sim.load_ext(dump_ext)

    if False:
        compile_to_cvm(qsym, qparams, "yolo_tfm",
                datadir="/data/ryt", input_shape=(1, 3, 416, 416))
        exit()

    metric = dataset.load_voc_metric()
    metric.reset()
    def yolov3(data, label):
       def net(data):
           tmp = base_graph(data.as_in_context(ctx))
           outs = top_graph(*tmp)
           return outs
       acc = validate_data(net, data, label, metric)
       return "{:6.2%}".format(acc)

    net2 = mx.gluon.nn.SymbolBlock(qsym,
            [mx.sym.var(n) for n in qbase_inputs_ext])
    utils.load_parameters(net2, qparams, ctx=qctx)
    net2_metric = dataset.load_voc_metric()
    net2_metric.reset()
    def mrt_quantize(data, label):
        def net(data):
            data = sim.load_real_data(data, 'data', qbase_inputs_ext)
            outs = net2(data.astype("float64").as_in_context(qctx))
            outs = [o.as_in_context(ctx) / oscales2[i] \
                    for i, o in enumerate(outs)]
            return outs
        acc = validate_data(net, data, label, net2_metric)
        return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3, data_iter_func,
            mrt_quantize,
            iter_num=iter_num, logger=logger)
Пример #18
0
        # Save
        for key, value in {
                'params': params
                #                        , 'loss_history': nd.array(loss_history)
                #                                  , 'loss_v_history': nd.array(loss_v_history)
                #                                  , 'moving_loss_history': nd.array(moving_loss_history)
                #                                  , 'test_accuracy_history': nd.array(test_accuracy_history)
                #                                  , 'train_accuracy_history': nd.array(train_accuracy_history)
        }.items():

            # if train_accuracy_history[-1] == 1 and test_accuracy_history[-1] >= max(test_accuracy_history_final):
            #     test_accuracy_history_final.append(test_accuracy_history[-1])
            #     nd.save('/output/info_%s/%s' %(str(SNR), key), value)
            #     nd.save('./output/info_%s/%s' %(str(SNR), key), value)
            # else:
            #     pass
            nd.save("./%s/%s_%s" % (address, key, index + 1), value)
    print('best_params_epoch:', best_params_epoch)

    params_ = nd.load('./SNR%s_PLB/params_%s' %
                      (int(snr * 10), best_params_epoch))
    os.system('rm `ls ./%s/params_*|egrep -v ./%s/params_%s`' %
              (address, address, best_params_epoch))

# floyd run --gpu \
# --data wctttty/datasets/gw_colored8192/2:GW_data \
# --data wctttty/datasets/ligonose9_9000_8192/7:ligo_localnoise_9_9000_8192_1 \
# --data wctttty/datasets/ligonose9_9000_8192/6:ligo_localnoise_9_9000_8192_2 \
# -m "PLB_oldversion" \
# "bash setup_floydhub.sh && python run_old_PLB.py"
Пример #19
0
def GRU(epoch = 100 , batch_size=100, save_period=100 , load_period=100 ,learning_rate= 0.1, ctx=mx.gpu(0)):

    train_data , test_data = FashionMNIST(batch_size)

    #network parameter
    time_step = 28
    num_inputs = 28
    num_hidden = 200
    num_outputs = 10

    path = "weights/FashionMNIST_GRUweights-{}".format(load_period)

    if os.path.exists(path):

        print("loading weights")
        [wxz, wxr, wxh, whz, whr, whh, bz, br, bh, why, by] = nd.load(path)  # weights load
        wxz = wxz.as_in_context(ctx)
        wxr = wxr.as_in_context(ctx)
        whz = whz.as_in_context(ctx)


        whz = whz.as_in_context(ctx)
        whr = whr.as_in_context(ctx)
        whh = whh.as_in_context(ctx)

        bz = bz.as_in_context(ctx)
        br = br.as_in_context(ctx)
        bh = bh.as_in_context(ctx)

        why = why.as_in_context(ctx)
        by = by.as_in_context(ctx)
        params = [wxz , wxr , wxh , whz, whr, whh, bz, br, bh, why , by]

    else:
        print("initializing weights")

        with ctx:
            wxz = nd.random.normal(loc=0, scale=0.01, shape=(num_hidden, num_inputs))
            wxr = nd.random.normal(loc=0, scale=0.01, shape=(num_hidden, num_inputs))
            wxh = nd.random.normal(loc=0, scale=0.01, shape=(num_hidden, num_inputs))

            whz = nd.random.normal(loc=0, scale=0.01, shape=(num_hidden, num_hidden))
            whr = nd.random.normal(loc=0, scale=0.01, shape=(num_hidden, num_hidden))
            whh = nd.random.normal(loc=0, scale=0.01, shape=(num_hidden, num_hidden))

            bz = nd.random.normal(loc=0,scale=0.01,shape=(num_hidden,))
            br = nd.random.normal(loc=0,scale=0.01,shape=(num_hidden,))
            bh = nd.random.normal(loc=0,scale=0.01,shape=(num_hidden,))

            why = nd.random.normal(loc=0,scale=0.1,shape=(num_outputs , num_hidden))
            by = nd.random.normal(loc=0,scale=0.1,shape=(num_outputs,))

        params = [wxz , wxr , wxh , whz, whr, whh, bz, br, bh, why , by]

    # attach gradient!!!
    for param in params:
        param.attach_grad()

    #Fully Neural Network with 1 Hidden layer
    def GRU_Cell(input, state):
        for x in input:
            z_t = nd.Activation(nd.FullyConnected(data=x,weight=wxz,no_bias=True,num_hidden=num_hidden)+
                                nd.FullyConnected(data=state,weight=whz,no_bias=True,num_hidden=num_hidden)+bz,act_type="sigmoid")
            r_t = nd.Activation(nd.FullyConnected(data=x,weight=wxr,no_bias=True,num_hidden=num_hidden)+
                                nd.FullyConnected(data=state,weight=whr,no_bias=True,num_hidden=num_hidden)+br,act_type="sigmoid")
            g_t = nd.Activation(nd.FullyConnected(data=x,weight=wxh,no_bias=True,num_hidden=num_hidden)+
                                nd.FullyConnected(data=r_t*state,weight=whh,no_bias=True,num_hidden=num_hidden)+bh,act_type="tanh")

            state = nd.multiply(z_t,state) + nd.multiply(1-z_t,g_t)

        output = nd.FullyConnected(data=state, weight=why, bias=by, num_hidden=num_outputs)
        output = nd.softmax(data=output)
        return output, state

    def cross_entropy(output, label):
        return - nd.sum(label * nd.log(output), axis=0 , exclude=True)

    #Adam optimizer
    state=[]
    optimizer=mx.optimizer.Adam(rescale_grad=1,learning_rate=learning_rate)

    for param in params:
        state.append(optimizer.create_state(0,param))

    for i in tqdm(range(1,epoch+1,1)):

        for data,label in train_data:

            states = nd.zeros(shape=(data.shape[0], num_hidden), ctx=ctx)
            data = data.as_in_context(ctx)
            data = data.reshape(shape=(-1,time_step,num_inputs))
            data=nd.transpose(data=data,axes=(1,0,2))
            label = label.as_in_context(ctx)
            label = nd.one_hot(label , num_outputs)

            with autograd.record():
                outputs, states = GRU_Cell(data, states)
                loss = cross_entropy(outputs,label) # (batch_size,)
            loss.backward()

            cost = nd.mean(loss).asscalar()
            for j,param in enumerate(params):
                optimizer.update(0,param,param.grad,state[j])

        test_accuracy = evaluate_accuracy(test_data, time_step, num_inputs, num_hidden, GRU_Cell, ctx)
        print(" epoch : {} , last batch cost : {}".format(i,cost))
        print("Test_acc : {0:0.3f}%".format(test_accuracy * 100))

        #weight_save
        if i % save_period==0:
            if not os.path.exists("weights"):
                os.makedirs("weights")
            print("saving weights")
            nd.save("weights/FashionMNIST_GRUweights-{}".format(i),params)

    test_accuracy = evaluate_accuracy(test_data, time_step, num_inputs, num_hidden, GRU_Cell, ctx)
    print("Test_acc : {0:0.3f}%".format(test_accuracy * 100))
    return "optimization completed"
Пример #20
0
def test_mx_quantize(batch_size=10, iter_num=10):
    logger = logging.getLogger("log.test.mx.quantize")

    ctx = [mx.gpu(int(i)) for i in "1,3".split(',') if i.strip()]
    inputs_ext = { 'data': {
        'shape': (batch_size, 3, 224, 224),
    }}
    inputs = [mx.sym.var(n) for n in inputs_ext]

    data_iter = ds.load_imagenet_rec(batch_size)
    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]
    data, _ = data_iter_func()

    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()
    def mobilenet(data, label):
        data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    calib_ctx = mx.gpu(1)
    sym_fname, param_fname = load_fname(version)
    sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    if True:
        if True:
            mrt = _mrt.MRT(sym, params, inputs_ext)
            mrt.set_data('data', data)
            mrt.calibrate()
            # [ 0.0008745864 0.03330660510427334 ] 0.6670066884888368 0.7753906
            # mrt.set_threshold("mobilenet0_dense0_weight", 0.67)
            # # [ -0.0036011334 0.054821780899052534 ] 1.100036751338784 1.4626989
            # mrt.set_threshold("mobilenet0_conv24_batchnorm24_fwd_weight", 1.1)
            # # [ 0.013243316 1.7543557133786065 ] 70.18747185088569 94.66275
            # mrt.set_threshold("mobilenet0_conv23_batchnorm23_fwd_weight", 35.10)
            # # [ -0.0016149869 0.05713169649243355 ] 1.1442489167675376 1.7122083
            # mrt.set_threshold("mobilenet0_conv20_batchnorm20_fwd_weight", 1.144)
            # # [ -0.0015804865 0.04523811489343643 ] 0.9063427844084799 1.0745146
            # mrt.set_threshold("mobilenet0_conv16_batchnorm16_fwd_weight", 0.90)
            # # [ 0.4315614 2.447332109723772 ] 49.37820360490254 63.959927
            # mrt.set_threshold("mobilenet0_conv2_batchnorm2_fwd", 49.37)
            # # [ 0.9770754 1.3392452512468611 ] 27.761980422905516 40.729546
            # mrt.set_threshold("mobilenet0_relu2_fwd", 27.76)
            # [ 1.0975745 1.0489919010632773 ] 22.077412493692915 23.784576
            # mrt.set_threshold("mobilenet0_relu4_fwd", 22.08)
            # # [ 0.9885562 2.360489403014386 ] 48.19834426651407 69.22121
            # mrt.set_threshold("mobilenet0_conv5_batchnorm5_fwd", 48.2)
            # # [ 0.7895588 1.0544661745870065 ] 21.878882319617176 30.95745
            # mrt.set_threshold("mobilenet0_relu17_fwd", 21.88)
            # # [ 0.8717863 1.0887600296120434 ] 22.646986888608513 28.265652
            # mrt.set_threshold("mobilenet0_relu19_fwd", 22.65)
            # # [ 0.35124516 0.6501711574631898 ] 13.354668314135012 20.770807
            # mrt.set_threshold("mobilenet0_relu20_fwd", 13.35)
            # # [ 0.9378179 1.110470714216975 ] 23.147232155910086 27.886068
            # mrt.set_threshold("mobilenet0_relu21_fwd", 23.15)
            # # [ 0.36263302 0.6352599878026505 ] 13.067832775738754 17.18809
            # mrt.set_threshold("mobilenet0_relu22_fwd", 13.07)
            # # [ 0.19875833 0.49999100821358816 ] 10.198578498193196 16.625143
            # mrt.set_threshold("mobilenet0_relu24_fwd", 10.2)
            # # [ 0.32357717 1.6308352606637138 ] 65.55698759215218 75.84912
            # mrt.set_threshold("mobilenet0_conv25_batchnorm25_fwd", 32.94)
            # # [ 0.36793178 1.512995992388044 ] 30.62785163096019 49.464615
            # mrt.set_threshold("mobilenet0_relu26_fwd", 30.63)
            # # [ 18.028658 38.61970520019531 ] 790.4227619171143 805.51886
            # mrt.set_threshold("sum0", 790.423)
            mrt.set_output_prec(8)
            qsym, qparams, inputs_ext = mrt.quantize()
        else:
            inputs_ext['data']['data'] = data
            th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=calib_ctx)
            qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, th_dict)
            qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs)
        dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

        dump_sym, dump_params = load_fname(version, "nnvm.compile")
        nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(qsym, qparams, inputs_ext)
        spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext, dump_sym, dump_params)

    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext,) = sim.load_ext(dump_ext)
    net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()
    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data, ctx_list=ctx, batch_axis=0, even_split=False)
        res = [net2.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(mobilenet, data_iter_func,
            cvm_quantize,
            iter_num=iter_num, logger=logger)
Пример #21
0
    print('[%d/%d] Loss_G: %.4f Loss_D: %.4f' % (i, num_train_iters,  mx.nd.mean(loss_generator).asscalar(), mx.nd.mean(loss_discrim).asscalar()))
        # # transform both dslr and enhanced images to grayscale
        # enhanced_gray = tf.reshape(tf.image.rgb_to_grayscale(enhanced), [-1, PATCH_WIDTH * PATCH_HEIGHT])
        # dslr_gray = tf.reshape(tf.image.rgb_to_grayscale(dslr_image), [-1, PATCH_WIDTH * PATCH_HEIGHT])

    print(np.array(enhanced_images_gray.asnumpy()).shape)
    print(np.array(dslr_images_gray.asnumpy()).shape)
    print(np.array(adversarial_.asnumpy()).shape)
    print(np.array(discrim_predictions.asnumpy()).shape)
    print(np.array(discrim_target.asnumpy()).shape)

    print(np.array(enhanced_vgg.asnumpy()).shape)
    print(np.array(dslr_vgg.asnumpy()).shape)
    # dsfsd
    arg_names = set(y_enhanced.list_arguments())
    aux_names = set(y_enhanced.list_auxiliary_states())
    arg_dict = {}
    for name, param in enhanced.collect_params().items():
        if name in arg_names:
            arg_dict['arg:%s' % name] = param._reduce()
        else:
            assert name in aux_names
            arg_dict['aux:%s' % name] = param._reduce()

    ndarray.save('./models/dlsr.params', arg_dict)
    # model.collect_params().save('./models/CAE_mxnet.params')
    y_enhanced.save('./models/dlsr.json')


Пример #22
0
 def save(self, symbol_file, params_file):
     """ Model dump to disk. """
     with open(symbol_file, 'w') as fout:
         fout.write(self.symbol.tojson())
     nd.save(params_file, self.params)
Пример #23
0
import mxnet as mx
from mxnet import gluon
from mxnet import ndarray as nd
from mxnet import init

x = nd.ones(3)
y = nd.zeros(4)

# save ndarray
filename = './data/test1.params'
nd.save(filename, [x, y])  # a list

# load ndarray
a, b, = nd.load(filename)
print(a == x)
print(b == y)

# other data type
mydict = {'x': x, 'y': y}
filename = './data/test2.params'
nd.save(filename, mydict)

c = nd.load(filename)
print(c)


# save & load gluon model parameters
def get_net():
    net = gluon.nn.Sequential()
    with net.name_scope():
        net.add(gluon.nn.Dense(10, activation='relu'))
Пример #24
0
def muitlclass_logistic_regression(epoch=100,
                                   batch_size=10,
                                   save_period=10,
                                   load_period=100,
                                   weight_decay=0.001,
                                   learning_rate=0.1,
                                   dataset="MNIST",
                                   ctx=mx.gpu(0)):

    #data selection
    if dataset == "MNIST":
        train_data, test_data = MNIST(batch_size)
    elif dataset == "CIFAR10":
        train_data, test_data = CIFAR10(batch_size)
    elif dataset == "FashionMNIST":
        train_data, test_data = FashionMNIST(batch_size)
    else:
        return "The dataset does not exist."

    # data structure
    if dataset == "MNIST" or dataset == "FashionMNIST":
        num_inputs = 28 * 28
    elif dataset == "CIFAR10":
        num_inputs = 32 * 32
    num_outputs = 10

    if dataset == "MNIST":
        path = "weights/MNIST_weights-{}".format(load_period)
    elif dataset == "FashionMNIST":
        path = "weights/FashionMNIST_weights-{}".format(load_period)
    elif dataset == "CIFAR10":
        path = "weights/CIFAR10_weights-{}".format(load_period)

    if os.path.exists(path):
        print("loading weights")
        [W, B] = nd.load(path)  # weights load
        W = W.as_in_context(ctx)
        B = B.as_in_context(ctx)
        params = [W, B]
    else:
        print("initializing weights")
        with ctx:
            W = nd.random.normal(loc=0,
                                 scale=0.01,
                                 shape=(num_inputs, num_outputs))
            B = nd.random.normal(loc=0, scale=0.01, shape=num_outputs)
        params = [W, B]

    # attach gradient!!!
    for i, param in enumerate(params):
        param.attach_grad()

    def network(X):
        Y = nd.dot(X, W) + B
        softmax_Y = nd.softmax(Y)
        return softmax_Y

    def cross_entropy(output, label):
        return -nd.sum(label * nd.log(output), axis=1)

    #Adam optimizer
    state = []
    optimizer = mx.optimizer.Adam(rescale_grad=1, learning_rate=learning_rate)
    for i, param in enumerate(params):
        state.append(optimizer.create_state(0, param))

    def SGD(params, lr, wd, bs):
        for param in params:
            param -= ((lr * param.grad) / bs + wd * param)

    for i in tqdm(range(1, epoch + 1, 1)):
        for data, label in train_data:
            if dataset == "CIFAR10":
                data = nd.slice_axis(data=data, axis=3, begin=0, end=1)
            data = data.as_in_context(ctx).reshape((-1, num_inputs))
            label = label.as_in_context(ctx)
            label = nd.one_hot(label, num_outputs)

            with autograd.record():
                output = network(data)

                #loss definition
                loss = cross_entropy(output, label)  # (batch_size,)
                cost = nd.mean(loss).asscalar()

            loss.backward()

            for j, param in enumerate(params):
                optimizer.update(0, param, param.grad, state[j])

            #SGD(params, learning_rate , weight_decay , batch_size)

        print(" epoch : {} , last batch cost : {}".format(i, cost))

        #weight_save
        if i % save_period == 0:

            if not os.path.exists("weights"):
                os.makedirs("weights")

            print("saving weights")
            if dataset == "MNIST":
                nd.save("weights/MNIST_weights-{}".format(i), params)

            elif dataset == "CIFAR10":
                nd.save("weights/CIFAR10_weights-{}".format(i), params)

            elif dataset == "FashionMNIST":
                nd.save("weights/FashionMNIST_weights-{}".format(i), params)

    test_accuracy = evaluate_accuracy(test_data, num_inputs, network, ctx,
                                      dataset)
    print("Test_acc : {}".format(test_accuracy))

    return "optimization completed"
Пример #25
0
def test_sym_pass(batch_size=10, iter_num=10, quantize=True):
    logger = logging.getLogger("log.test.sym.pass")
    calib_ctx = mx.gpu(1)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, 224, 224),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    # load dataset and iter function
    data_iter = ds.load_imagenet_rec(batch_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    data, _ = data_iter_func()

    # load original model for accuracy
    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def shufflenet(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    if quantize:
        # load original model
        sym_fname, param_fname = load_fname(version)
        sym, params = mx.sym.load(sym_fname), nd.load(param_fname)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)

        # quantize process
        mrt = _mrt.MRT(sym, params, inputs_ext)  # initialize
        mrt.set_data('data', data)  # set input data
        mrt.calibrate(ctx=calib_ctx)  # calibration
        mrt.set_output_prec(8)  # set output prec, do nothing by default
        qsym, qparams, inputs_ext = mrt.quantize()  # quantization

        # dump quantized model
        dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize",
                                                     True)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    if False:
        # convert to cvm executor model
        inputs_ext['data']['shape'] = (1, 3, 224, 224)
        nnvm_sym, nnvm_params = spass.mxnet_to_nnvm(qsym, qparams, inputs_ext)
        spass.cvm_build(nnvm_sym, nnvm_params, inputs_ext,
                        *load_fname(version, "nnvm"))

    # load quantized model for accuracy
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    inputs = [mx.sym.var(n) for n in inputs_ext]
    net3 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)

    # net3 = mx.gluon.nn.SymbolBlock(qsym, inputs)
    # utils.load_parameters(net3, qparams, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net3.forward(d) for d in data]
        res = nd.concatenate(res)

        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    # compare accuracy between models
    utils.multi_validate(shufflenet,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)
Пример #26
0
def test_mrt_quant(batch_size=1, iter_num=10):
    logger = logging.getLogger("log.test.mrt.quantize")

    ctx = mx.gpu(1)
    qctx = mx.gpu(3)
    input_size = 512
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }

    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)

    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    sym_file, param_file = load_fname()
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    keys = [
        "ssd0_multiperclassdecoder0_concat0",
        "ssd0_multiperclassdecoder0__mulscalar0",
        "ssd0_multiperclassdecoder0_slice_axis0",
        "ssd0_multiperclassdecoder0_zeros_like1",
        "ssd0_normalizedboxcenterdecoder0_concat0",
    ]
    base, base_params, base_inputs_ext, top, top_params, top_inputs_ext \
            = _mrt.split_model(sym, params, inputs_ext, keys)
    dump_sym, dump_params = load_fname("mrt.base")
    open(dump_sym, "w").write(base.tojson())
    nd.save(dump_params, base_params)
    dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
    open(dump_sym, "w").write(top.tojson())
    nd.save(dump_params, top_params)
    sim.save_ext(dump_ext, top_inputs_ext)

    dump_sym, dump_params = load_fname("mrt.base")
    base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
    dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
    top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
    (top_inputs_ext, ) = sim.load_ext(dump_ext)

    base_inputs = [mx.sym.var(n) for n in inputs_ext]
    base_graph = mx.gluon.nn.SymbolBlock(base, base_inputs)
    utils.load_parameters(base_graph, base_params, ctx=ctx)

    top_inputs = [mx.sym.var(n) for n in top_inputs_ext]
    top_graph = mx.gluon.nn.SymbolBlock(top, top_inputs)
    utils.load_parameters(top_graph, top_params, ctx=ctx)

    metric = dataset.load_voc_metric()
    metric.reset()

    def yolov3(data, label):
        def net(data):
            tmp = base_graph(data.as_in_context(ctx))
            outs = top_graph(*tmp)
            return outs

        acc = validate_data(net, data, label, metric)
        return "{:6.2%}".format(acc)

    # utils.multi_validate(yolov3, data_iter_func,
    # iter_num=iter_num, logger=logger)
    # exit()

    if True:
        mrt = _mrt.MRT(base, base_params, inputs_ext)
        for i in range(16):
            data, _ = data_iter_func()
            mrt.set_data('data', data)
            th_dict = mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("mrt.dict", True)
        sim.save_ext(dump_ext, th_dict)

    _, _, dump_ext = load_fname("mrt.dict", True)
    (th_dict, ) = sim.load_ext(dump_ext)
    if True:
        mrt = _mrt.MRT(base, base_params, base_inputs_ext)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_fixed("ssd0_multiperclassdecoder0_concat0")
        mrt.set_fixed("ssd0_multiperclassdecoder0__mulscalar0")
        mrt.set_fixed("ssd0_multiperclassdecoder0_zeros_like1")
        mrt.set_threshold("ssd0_multiperclassdecoder0_slice_axis0", 1)
        #  mrt.set_threshold("ssd0_normalizedboxcenterdecoder0_concat0", 512)
        mrt.set_output_prec(30)
        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()
        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)

    # merge quantize model
    if True:
        qb_sym, qb_params, qb_ext = load_fname("mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

        name_maps = {
            "ssd0_slice_axis41": "ssd0_multiperclassdecoder0_concat0",
            "ssd0_slice_axis42": "ssd0_multiperclassdecoder0_slice_axis0",
            "ssd0_slice_axis43": "ssd0_normalizedboxcenterdecoder0_concat0",
        }
        oscales_dict = dict(zip([c.attr('name') for c in base], oscales))
        oscales = [oscales_dict[name_maps[c.attr('name')]] for c in top]

        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(
                node.get_children()), node.list_attr()
            if op_name == '_greater_scalar':
                valid_thresh = sutils.get_attr(attr, 'scalar', 0)
                attr['scalar'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            elif op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node

        qsym, qparams = _mrt.merge_model(qbase, qbase_params, top, top_params,
                                         maps, box_nms)
        sym_file, param_file, ext_file = load_fname("mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales)

    if True:
        dump_sym, dump_params, dump_ext = load_fname("mrt.all.quantize", True)
        net2_inputs_ext, oscales = sim.load_ext(dump_ext)
        inputs = [mx.sym.var(n) for n in net2_inputs_ext]
        net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=qctx)
        net2_metric = dataset.load_voc_metric()
        net2_metric.reset()

        def mrt_quantize(data, label):
            def net(data):
                data = sim.load_real_data(data, 'data', net2_inputs_ext)
                outs = net2(data.as_in_context(qctx))
                outs = [
                    o.as_in_context(ctx) / oscales[i]
                    for i, o in enumerate(outs)
                ]
                return outs

            acc = validate_data(net, data, label, net2_metric)
            return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3,
                         data_iter_func,
                         mrt_quantize,
                         iter_num=iter_num,
                         logger=logger)
Пример #27
0
def CNN(epoch = 100 , batch_size=256, save_period=10 , load_period=100 , weight_decay=0.001 ,learning_rate= 0.1 , dataset = "MNIST", ctx=mx.cpu(0)):

    #only for fullynetwork , 2d convolution
    def BN(X,gamma,beta,momentum=0.9,eps=1e-5,scope_name="",is_training=True):

        if len(X.shape)==2 :
            mean = nd.mean(X,axis=0)
            variance = nd.mean(nd.square(X-mean),axis=0)

            if is_training:
                Normalized_X=(X-mean)/nd.sqrt(variance+eps)
            elif is_training==False and not os.path.exists(path1) and epoch==0: #not param
                Normalized_X = (X - mean) / nd.sqrt(variance + eps)
            else:
                Normalized_X=(X-MOVING_MEANS[scope_name] / nd.sqrt(MOVING_VARS[scope_name]+eps))

            out=gamma*Normalized_X+beta

        #pay attention that when it comes to (2D) CNN , We normalize batch_size * height * width over each channel, so that gamma and beta have the lengths the same as channel_count ,
        #referenced by http://gluon.mxnet.io/chapter04_convolutional-neural-networks/cnn-batch-norm-scratch.html
        elif len(X.shape)==4:
            N , C , H , W = X.shape

            mean = nd.mean(X , axis=(0,2,3)) #normalize batch_size * height * width over each channel
            variance = nd.mean(nd.square(X-mean.reshape((1,C,1,1))),axis=(0,2,3))

            if is_training:
                Normalized_X = (X-mean.reshape((1,C,1,1)))/nd.sqrt(variance.reshape((1,C,1,1))+eps)
            elif is_training == False and not os.path.exists(path1) and epoch==0:  # load param , when epoch=0
                Normalized_X = (X-mean.reshape((1,C,1,1)))/nd.sqrt(variance.reshape((1,C,1,1))+eps)
            else:
                Normalized_X = (X - MOVING_MEANS[scope_name].reshape((1, C, 1, 1))) / nd.sqrt(MOVING_VARS[scope_name].reshape((1, C, 1, 1)) + eps)

            out=gamma.reshape((1,C,1,1))*Normalized_X+beta.reshape((1,C,1,1))

        if scope_name not in MOVING_MEANS and scope_name not in MOVING_VARS:
            MOVING_MEANS[scope_name] = mean
            MOVING_VARS[scope_name] = variance
        else:
            MOVING_MEANS[scope_name] = MOVING_MEANS[scope_name] * momentum + mean * (1.0 - momentum)
            MOVING_VARS[scope_name] = MOVING_VARS[scope_name] * momentum + variance * (1.0 - momentum)

        return out

    #data selection
    if dataset =="MNIST":
        train_data , test_data = MNIST(batch_size)
    elif dataset == "CIFAR10":
        train_data, test_data = CIFAR10(batch_size)
    elif dataset == "FashionMNIST":
        train_data, test_data = FashionMNIST(batch_size)
    else:
        return "The dataset does not exist."

    # data structure
    if dataset == "MNIST" or dataset =="FashionMNIST":
        color = 1
    elif dataset == "CIFAR10":
        color = 3
    num_outputs = 10

    if dataset == "MNIST":
        path1 = "weights/MNIST_weights-{}".format(load_period)
        path2 = "weights/MNIST_weights_MEANS-{}".format(load_period)
        path3 = "weights/MNIST_weights_VARS-{}".format(load_period)
    elif dataset == "FashionMNIST":
        path1 = "weights/FashionMNIST_weights-{}".format(load_period)
        path2 = "weights/FashionMNIST_weights_MEANS-{}".format(load_period)
        path3 = "weights/FashionMNIST_weights_VARS-{}".format(load_period)
    elif dataset == "CIFAR10":
        path1 = "weights/CIFAR10_weights-{}".format(load_period)
        path2 = "weights/CIFAR10_weights_MEANS-{}".format(load_period)
        path3 = "weights/CIFAR10_weights_VARS-{}".format(load_period)

    if os.path.exists(path1):

        print("loading weights")
        [W1, B1, gamma1, beta1, W2, B2, gamma2, beta2, W3, B3, gamma3, beta3, W4, B4, gamma4, beta4, W5, B5]= nd.load(path1)  # weights load
        MOVING_MEANS = nd.load(path2)
        MOVING_VARS = nd.load(path3)

        for m,v in zip(MOVING_MEANS.values() , MOVING_VARS.values()):
            m.as_in_context(ctx)
            v.as_in_context(ctx)

        W1=W1.as_in_context(ctx)
        B1=B1.as_in_context(ctx)
        gamma1=gamma1.as_in_context(ctx)
        beta1=beta1.as_in_context(ctx)
        W2=W2.as_in_context(ctx)
        B2=B2.as_in_context(ctx)
        gamma2=gamma2.as_in_context(ctx)
        beta2=beta2.as_in_context(ctx)
        W3=W3.as_in_context(ctx)
        B3=B3.as_in_context(ctx)
        gamma3=gamma3.as_in_context(ctx)
        beta3=beta3.as_in_context(ctx)
        W4=W4.as_in_context(ctx)
        B4=B4.as_in_context(ctx)
        gamma4=gamma4.as_in_context(ctx)
        beta4=beta4.as_in_context(ctx)
        W5=W5.as_in_context(ctx)
        B5=B5.as_in_context(ctx)

        params = [W1 , B1 , gamma1 , beta1 , W2 , B2 , gamma2 , beta2 , W3 , B3 , gamma3 , beta3 , W4 , B4, gamma4 , beta4 , W5 , B5]

    else:

        print("initializing weights")
        weight_scale=0.1
        BN_weight_scale = 0.01

        MOVING_MEANS, MOVING_VARS = {}, {}

        with ctx:
            W1 = nd.random.normal(loc=0 , scale=weight_scale , shape=(60,color,3,3))
            B1 = nd.random.normal(loc=0 , scale=weight_scale , shape=60)

            gamma1 = nd.random.normal(shape=60, loc=1, scale=BN_weight_scale)
            beta1 = nd.random.normal(shape=60, scale=BN_weight_scale)

            W2 = nd.random.normal(loc=0 , scale=weight_scale , shape=(30,60,6,6))
            B2 = nd.random.normal(loc=0 , scale=weight_scale , shape=30)

            gamma2 = nd.random.normal(shape=30, loc=1, scale=BN_weight_scale)
            beta2 = nd.random.normal(shape=30, scale=BN_weight_scale)

            if dataset == "CIFAR10":
                reshape=750
            elif dataset == "MNIST" or dataset == "FashionMNIST":
                reshape=480

            W3 = nd.random.normal(loc=0 , scale=weight_scale , shape=(120, reshape))
            B3 = nd.random.normal(loc=0 , scale=weight_scale , shape=120)

            gamma3 = nd.random.normal(shape=120, loc=1, scale=BN_weight_scale)
            beta3 = nd.random.normal(shape=120, scale=BN_weight_scale)

            W4 = nd.random.normal(loc=0 , scale=weight_scale , shape=(64, 120))
            B4 = nd.random.normal(loc=0 , scale=weight_scale , shape=64)

            gamma4 = nd.random.normal(shape=64, loc=1, scale=BN_weight_scale)
            beta4 = nd.random.normal(shape=64, scale=BN_weight_scale)

            W5 = nd.random.normal(loc=0 , scale=weight_scale , shape=(num_outputs , 64))
            B5 = nd.random.normal(loc=0 , scale=weight_scale , shape=num_outputs)

        params = [W1 , B1 , gamma1 , beta1 , W2 , B2 , gamma2 , beta2 , W3 , B3 , gamma3 , beta3 , W4 , B4, gamma4 , beta4 , W5 , B5]

    # attach gradient!!!
    for i, param in enumerate(params):
        param.attach_grad()

    # network - similar to lenet5

    '''Convolution parameter
    data: (batch_size, channel, height, width)
    weight: (num_filter, channel, kernel[0], kernel[1])
    bias: (num_filter,)
    out: (batch_size, num_filter, out_height, out_width).
    '''

    def network(X, is_training=True, drop_rate=0.0): # formula : output_size=((input−weights+2*Padding)/Stride)+1
        #data size
        # MNIST,FashionMNIST = (batch size , 1 , 28 ,  28)
        # CIFAR = (batch size , 3 , 32 ,  32)

        C_H1=nd.Activation(data=BN(nd.Convolution(data=X , weight = W1 , bias = B1 , kernel=(3,3) , stride=(1,1) , num_filter=60), gamma1 , beta1 ,scope_name="BN1",is_training=is_training) , act_type="relu") # MNIST : result = ( batch size , 60 , 26 , 26) , CIFAR10 : : result = ( batch size , 60 , 30 , 30)
        P_H1=nd.Pooling(data = C_H1 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 60 , 13 , 13) , CIFAR10 : result = (batch size , 60 , 15 , 15)
        C_H2=nd.Activation(data=BN(nd.Convolution(data=P_H1 , weight = W2 , bias = B2 , kernel=(6,6) , stride=(1,1) , num_filter=30), gamma2 , beta2 ,scope_name="BN2",is_training=is_training), act_type="relu") # MNIST :  result = ( batch size , 30 , 8 , 8), CIFAR10 :  result = ( batch size , 30 , 10 , 10)
        P_H2=nd.Pooling(data = C_H2 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 30 , 4 , 4) , CIFAR10 : result = (batch size , 30 , 5 , 5)
        P_H2 = nd.flatten(data=P_H2)

        '''FullyConnected parameter
        • data: (batch_size, input_dim)
        • weight: (num_hidden, input_dim)
        • bias: (num_hidden,)
        • out: (batch_size, num_hidden)
        '''
        F_H1 =nd.Activation(BN(nd.FullyConnected(data=P_H2 , weight=W3 , bias=B3 , num_hidden=120), gamma3, beta3 ,scope_name="BN3",is_training=is_training),act_type="relu")
        F_H1 =nd.Dropout(data=F_H1, p=drop_rate)
        F_H2 =nd.Activation(BN(nd.FullyConnected(data=F_H1 , weight=W4 , bias=B4 , num_hidden=64), gamma4, beta4, scope_name="BN4",is_training=is_training),act_type="relu")
        F_H2 =nd.Dropout(data=F_H2, p=drop_rate)
        softmax_Y = nd.softmax(nd.FullyConnected(data=F_H2 ,weight=W5 , bias=B5 , num_hidden=10))
        return softmax_Y

    def cross_entropy(output, label):
        return - nd.sum(label * nd.log(output), axis=1)

    #Adam optimizer
    state=[]
    optimizer=mx.optimizer.Adam(rescale_grad=1,learning_rate=learning_rate)
    for i,param in enumerate(params):
        state.append(optimizer.create_state(0,param))

    def SGD(params, lr , wd , bs):
        for param in params:
             param -= ((lr * param.grad)/bs+wd*param)

    for i in tqdm(range(1,epoch+1,1)):
        for data,label in train_data:
            data = data.as_in_context(ctx)
            label = label.as_in_context(ctx)
            label = nd.one_hot(label , num_outputs)

            with autograd.record():
                output = network(data,is_training=True,drop_rate=0.0)

                #loss definition
                loss = cross_entropy(output,label) # (batch_size,)
                cost = nd.mean(loss).asscalar()
            loss.backward()

            for j,param in enumerate(params):
                optimizer.update(0,param,param.grad,state[j])

            #SGD(params, learning_rate , weight_decay , batch_size)

        print(" epoch : {} , last batch cost : {}".format(i,cost))

        #weight_save
        if i % save_period==0:

            if not os.path.exists("weights"):
                os.makedirs("weights")

            print("saving weights")
            if dataset=="MNIST":
                nd.save("weights/MNIST_weights-{}".format(i), params)
                nd.save("weights/MNIST_weights_MEANS-{}".format(i), MOVING_MEANS)
                nd.save("weights/MNIST_weights_VARS-{}".format(i), MOVING_VARS)

            elif dataset=="CIFAR10":
                nd.save("weights/CIFAR10_weights-{}".format(i), params)
                nd.save("weights/CIFAR10_weights_MEANS-{}".format(i), MOVING_MEANS)
                nd.save("weights/CIFAR10_weights_VARS-{}".format(i), MOVING_VARS)

            elif dataset=="FashionMNIST":
                nd.save("weights/FashionMNIST_weights-{}".format(i),params)
                nd.save("weights/FashionMNIST_weights_MEANS-{}".format(i), MOVING_MEANS)
                nd.save("weights/FashionMNIST_weights_VARS-{}".format(i), MOVING_VARS)

    test_accuracy = evaluate_accuracy(test_data , network , ctx)
    print("Test_acc : {}".format(test_accuracy))

    return "optimization completed"
Пример #28
0
def CNN(epoch = 100 , batch_size=10, save_period=10 , load_period=100 , weight_decay=0.001 ,learning_rate= 0.1 , dataset = "MNIST", ctx=mx.cpu(0)):

    #data selection
    if dataset =="MNIST":
        train_data , test_data = MNIST(batch_size)
    elif dataset == "CIFAR10":
        train_data, test_data = CIFAR10(batch_size)
    elif dataset == "FashionMNIST":
        train_data, test_data = FashionMNIST(batch_size)
    else:
        return "The dataset does not exist."


    # data structure
    if dataset == "MNIST" or dataset =="FashionMNIST":
        color = 1
    elif dataset == "CIFAR10":
        color = 3
    num_outputs = 10

    if dataset == "MNIST":
        path = "weights/MNIST_weights-{}".format(load_period)
    elif dataset == "FashionMNIST":
        path = "weights/FashionMNIST_weights-{}".format(load_period)
    elif dataset == "CIFAR10":
        path = "weights/CIFAR10_weights-{}".format(load_period)

    if os.path.exists(path):
        print("loading weights")
        [W1, B1, W2, B2, W3, B3, W4, B4, W5, B5] = nd.load(path)  # weights load

        W1=W1.as_in_context(ctx)
        B1=B1.as_in_context(ctx)
        W2=W2.as_in_context(ctx)
        B2=B2.as_in_context(ctx)
        W3=W3.as_in_context(ctx)
        B3=B3.as_in_context(ctx)
        W4=W4.as_in_context(ctx)
        B4=B4.as_in_context(ctx)
        W5=W5.as_in_context(ctx)
        B5=B5.as_in_context(ctx)

        params = [W1 , B1 , W2 , B2 , W3 , B3 , W4 , B4 , W5 , B5]
    else:
        print("initializing weights")
        with ctx:
            W1 = nd.random.normal(loc=0 , scale=0.1 , shape=(60,color,3,3))
            B1 = nd.random.normal(loc=0 , scale=0.1 , shape=60)

            W2 = nd.random.normal(loc=0 , scale=0.1 , shape=(30,60,6,6))
            B2 = nd.random.normal(loc=0 , scale=0.1 , shape=30)

            if dataset == "CIFAR10":
                reshape=750
            elif dataset == "MNIST" or dataset == "FashionMNIST":
                reshape=480

            W3 = nd.random.normal(loc=0 , scale=0.1 , shape=(120, reshape))
            B3 = nd.random.normal(loc=0 , scale=0.1 , shape=120)

            W4 = nd.random.normal(loc=0 , scale=0.1 , shape=(64, 120))
            B4 = nd.random.normal(loc=0 , scale=0.1 , shape=64)

            W5 = nd.random.normal(loc=0 , scale=0.1 , shape=(num_outputs , 64))
            B5 = nd.random.normal(loc=0 , scale=0.1 , shape=num_outputs)

        params = [W1 , B1 , W2 , B2 , W3 , B3 , W4 , B4, W5 , B5]
        
    # attach gradient!!!
    for i, param in enumerate(params):
        param.attach_grad()

    # network - similar to lenet5 

    '''Convolution parameter
    data: (batch_size, channel, height, width)
    weight: (num_filter, channel, kernel[0], kernel[1])
    bias: (num_filter,)
    out: (batch_size, num_filter, out_height, out_width).
    '''

    def network(X,drop_rate=0.0): # formula : output_size=((input−weights+2*Padding)/Stride)+1
        #data size 
        # MNIST,FashionMNIST = (batch size , 1 , 28 ,  28)
        # CIFAR = (batch size , 3 , 32 ,  32)

        C_H1=nd.Activation(data= nd.Convolution(data=X , weight = W1 , bias = B1 , kernel=(3,3) , stride=(1,1)  , num_filter=60) , act_type="relu") # MNIST : result = ( batch size , 60 , 26 , 26) , CIFAR10 : : result = ( batch size , 60 , 30 , 30) 
        P_H1=nd.Pooling(data = C_H1 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 60 , 13 , 13) , CIFAR10 : result = (batch size , 60 , 15 , 15)
        C_H2=nd.Activation(data= nd.Convolution(data=P_H1 , weight = W2 , bias = B2 , kernel=(6,6) , stride=(1,1) , num_filter=30), act_type="relu") # MNIST :  result = ( batch size , 30 , 8 , 8), CIFAR10 :  result = ( batch size , 30 , 10 , 10)
        P_H2=nd.Pooling(data = C_H2 , pool_type = "max" , kernel=(2,2), stride = (2,2)) # MNIST : result = (batch size , 30 , 4 , 4) , CIFAR10 : result = (batch size , 30 , 5 , 5)
        P_H2 = nd.flatten(data=P_H2)

        '''FullyConnected parameter
        • data: (batch_size, input_dim)
        • weight: (num_hidden, input_dim)
        • bias: (num_hidden,)
        • out: (batch_size, num_hidden)
        '''
        F_H1 =nd.Activation(nd.FullyConnected(data=P_H2 , weight=W3 , bias=B3 , num_hidden=120),act_type="sigmoid")
        F_H1 =nd.Dropout(data=F_H1, p=drop_rate)
        F_H2 =nd.Activation(nd.FullyConnected(data=F_H1 , weight=W4 , bias=B4 , num_hidden=64),act_type="sigmoid")
        F_H2 =nd.Dropout(data=F_H2, p=drop_rate)
        softmax_Y = nd.softmax(nd.FullyConnected(data=F_H2 ,weight=W5 , bias=B5 , num_hidden=10))
        return softmax_Y

    def cross_entropy(output, label):
        return - nd.sum(label * nd.log(output), axis=1)

    #Adam optimizer
    state=[]
    optimizer=mx.optimizer.Adam(rescale_grad=1,learning_rate=learning_rate)
    for i,param in enumerate(params):
        state.append(optimizer.create_state(0,param))

    def SGD(params, lr , wd , bs):
        for param in params:
             param -= ((lr * param.grad)/bs+wd*param)

    for i in tqdm(range(1,epoch+1,1)):
        for data,label in train_data:
            data = data.as_in_context(ctx)
            label = label.as_in_context(ctx)
            label = nd.one_hot(label , num_outputs)

            with autograd.record():
                output = network(data,drop_rate=0.2)

                #loss definition
                loss = cross_entropy(output,label) # (batch_size,)
                cost = nd.mean(loss).asscalar()

            loss.backward()
            for j,param in enumerate(params):
                optimizer.update(0,param,param.grad,state[j])

            #SGD(params, learning_rate , weight_decay , batch_size)

        print(" epoch : {} , last batch cost : {}".format(i,cost))

        #weight_save
        if i % save_period==0:

            if not os.path.exists("weights"):
                os.makedirs("weights")

            print("saving weights")
            if dataset=="MNIST":
                nd.save("weights/MNIST_weights-{}".format(i),params)

            elif dataset=="CIFAR10":
                nd.save("weights/CIFAR10_weights-{}".format(i),params)

            elif dataset=="FashionMNIST":
                nd.save("weights/FashionMNIST_weights-{}".format(i),params)

    test_accuracy = evaluate_accuracy(test_data , network , ctx)
    print("Test_acc : {}".format(test_accuracy))

    return "optimization completed"
Пример #29
0
def test_sym_pass(batch_size=10, iter_num=10, quantize=True):

    logger = logging.getLogger("log.test.sym.pass")

    calib_ctx = mx.gpu(2)
    ctx = [mx.gpu(int(i)) for i in "1,2,3,4".split(',') if i.strip()]
    input_size = 299
    version = "v3"
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }
    inputs = [mx.sym.var(name) for name in inputs_ext]

    logger.info("load dataset, symbol and parameters")
    data_iter = ds.load_imagenet_rec(batch_size, input_size)

    def data_iter_func():
        data = data_iter.next()
        return data.data[0], data.label[0]

    net1 = utils.load_model(*load_fname(version), inputs, ctx=ctx)
    acc_top1 = mx.metric.Accuracy()
    acc_top5 = mx.metric.TopKAccuracy(5)
    acc_top1.reset()
    acc_top5.reset()

    def inception_v3(data, label):
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net1.forward(d) for d in data]
        res = nd.concatenate(res)
        acc_top1.update(label, res)
        _, top1 = acc_top1.get()
        acc_top5.update(label, res)
        _, top5 = acc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    if quantize:
        sym_file, param_file = load_fname(version)
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
        data, _ = data_iter_func()
        if True:
            dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
            mrt = _mrt.MRT(sym, params, inputs_ext)
            mrt.set_data('data', data)
            mrt.calibrate(ctx=calib_ctx)
            mrt.set_output_prec(8)
            qsym, qparams, inputs_ext = mrt.quantize()
        else:
            dump_sym, dump_params, dump_ext = load_fname(
                version, "sym.quantize", True)
            inputs_ext['data']['data'] = data
            th_dict = calib.sym_calibrate(sym,
                                          params,
                                          inputs_ext,
                                          ctx=calib_ctx)
            qsym, qparams, precs, _ = calib.sym_simulate(
                sym, params, inputs_ext, th_dict)
            qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs)
        sim.save_ext(dump_ext, inputs_ext)
        nd.save(dump_params, qparams)
        open(dump_sym, "w").write(qsym.tojson())

    dump_sym, dump_params, dump_ext = load_fname(version, "mrt", True)
    (inputs_ext, ) = sim.load_ext(dump_ext)
    net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
    qacc_top1 = mx.metric.Accuracy()
    qacc_top5 = mx.metric.TopKAccuracy(5)
    qacc_top1.reset()
    qacc_top5.reset()

    def cvm_quantize(data, label):
        data = sim.load_real_data(data, 'data', inputs_ext)
        data = gluon.utils.split_and_load(data,
                                          ctx_list=ctx,
                                          batch_axis=0,
                                          even_split=False)
        res = [net2.forward(d) for d in data]
        res = nd.concatenate(res)
        qacc_top1.update(label, res)
        _, top1 = qacc_top1.get()
        qacc_top5.update(label, res)
        _, top5 = qacc_top5.get()
        return "top1={:6.2%} top5={:6.2%}".format(top1, top5)

    utils.multi_validate(inception_v3,
                         data_iter_func,
                         cvm_quantize,
                         iter_num=iter_num,
                         logger=logger)
Пример #30
0
if True:
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)

    import os
    open(os.path.expanduser('~/tvm-cvm/data/test_ryt2.json'),
         'w').write(sym.tojson())
    exit()

    qsym, qparams, precs, _ = calib.sym_simulate(sym, params, inputs_ext, data,
                                                 calib_ctx)
    qsym, qparams = calib.sym_realize(qsym, qparams, inputs_ext, precs, "tvm")
    dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
    sim.save_ext(dump_ext, inputs_ext)
    nd.save(dump_params, qparams)
    open(dump_sym, "w").write(qsym.tojson())

dump_sym, dump_params, dump_ext = load_fname(version, "sym.quantize", True)
sym, params = mx.sym.load(dump_sym), nd.load(dump_params)
(inputs_ext, ) = sim.load_ext(dump_ext)
inputs = [mx.sym.var(n) for n in inputs_ext]
net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=ctx)
qacc_top1 = mx.metric.Accuracy()
qacc_top5 = mx.metric.TopKAccuracy(5)
qacc_top1.reset()
qacc_top5.reset()


def cvm_quantize(data, label):
    data = sim.load_real_data(data, 'data', inputs_ext)
Пример #31
0
 def save(self, path, name):
     emb_fname = os.path.join(path, name + '.emb')
     nd.save(emb_fname, self.emb)
Пример #32
0
import mxnet as mx
from mxnet import ndarray as nd

model_name = "resnet-v1-101"

data = nd.load('./%s.params' % model_name)

new_data = dict()
first_conv_weight = 'arg:conv1_weight'
first_conv_bias = 'arg:conv1_bias'
print(data[first_conv_weight].shape)
for k, v in data.items():
    if k == first_conv_weight:
        # change Channel
        v = mx.nd.array(v.asnumpy()[:, ::-1, :, :])
        print("CHANGE")
    elif k == first_conv_bias:
        print(v.shape)
    new_data[k] = v

nd.save('%s-rgb-0000.params' % model_name, new_data)