Ejemplo n.º 1
0
def save_features(model_name, data_train_iter, data_test_iter, ignore=False):
    # 文件已存在
    if os.path.exists('features_train_%s.nd' % model_name) and ignore:
        if os.path.exists('features_test_%s.nd' % model_name):
            return

    net = models.get_model(model_name, pretrained=True, ctx=ctx)

    for prefix, data_iter in zip(['train', 'test'],
                                 [data_train_iter, data_test_iter]):
        features = []
        for data in tqdm(data_iter):
            # 并行预测数据
            for data_slice in gluon.utils.split_and_load(data,
                                                         ctx,
                                                         even_split=False):
                feature = net.features(data_slice)
                if 'squeezenet' in model_name:
                    feature = gluon.nn.GlobalAvgPool2D()(feature)
                feature = gluon.nn.Flatten()(feature)
                features.append(feature.as_in_context(mx.cpu()))
            nd.waitall()

        features = nd.concat(*features, dim=0)
        nd.save('features_%s_%s.nd' % (prefix, model_name), features)
Ejemplo n.º 2
0
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params):
    """Checkpoint the model data into file.

    Parameters
    ----------
    prefix : str
        Prefix of model name.
    epoch : int
        The epoch number of the model.
    symbol : Symbol
        The input symbol
    arg_params : dict of str to NDArray
        Model parameter, dict of name to NDArray of net's weights.
    aux_params : dict of str to NDArray
        Model parameter, dict of name to NDArray of net's auxiliary states.
    Notes
    -----
    - ``prefix-symbol.json`` will be saved for symbol.
    - ``prefix-epoch.params`` will be saved for parameters.
    """
    symbol.save('%s-symbol.json' % prefix)
    save_dict = {('arg:%s' % k) : v for k, v in arg_params.items()}
    save_dict.update({('aux:%s' % k) : v for k, v in aux_params.items()})
    param_name = '%s-%04d.params' % (prefix, epoch)
    nd.save(param_name, save_dict)
    logging.info('Saved checkpoint to \"%s\"', param_name)
Ejemplo n.º 3
0
def test_load_save():
    x = create_2d_tensor(SMALL_Y, LARGE_X)
    tmp = tempfile.mkdtemp()
    tmpfile = os.path.join(tmp, 'large_tensor')
    nd.save(tmpfile, [x])
    y = nd.load(tmpfile)
    y = y[0]
    assert x[0][0] == y[0][0]
    assert x[-1][-1]== y[-1][-1]
def save2():
    x = nd.ones(3)
    y = nd.zeros(4)
    dt = {'x': x, 'y': y}
    nd.save('C_4_5_xy', [x, y])
    nd.save('C_4_5_dt', dt)

    x2, y2 = nd.load('C_4_5_xy')
    print(x2, y2)

    dt2 = nd.load('C_4_5_dt')
    print(dt2)
Ejemplo n.º 5
0
    def _callback(iter_no, sym, arg, aux):
        """The checkpoint function."""
        if (iter_no + 1) % period == 0:
            if sym is not None:
                sym.save(osp.join(symbol_root_path, '%s-symbol.json' % prefix))

            save_dict = {('arg:%s' % k): v.as_in_context(cpu())
                         for k, v in arg.items()}
            save_dict.update({('aux:%s' % k): v.as_in_context(cpu())
                              for k, v in aux.items()})
            param_name = osp.join(params_root_path,
                                  '%s-%04d.params' % (prefix, iter_no + 1))
            nd.save(param_name, save_dict)
def save_features(model_name,  data_test_iter, ignore=False):
    
    
    net = models.get_model(model_name, pretrained=True, ctx=ctx)
    
    for prefix, data_iter in zip(['test'], [ data_test_iter]):
        features = []
        for data in tqdm(data_iter):
           
            for data_slice in gluon.utils.split_and_load(data, ctx1, even_split=False):
                feature = net.features(data_slice)       
                feature = gluon.nn.Flatten()(feature)
                features.append(feature.as_in_context(mx.cpu()))
            nd.waitall()
        
        features = nd.concat(*features, dim=0)
        nd.save('features_%s_%s.nd' % (prefix, model_name), features)
Ejemplo n.º 7
0
def get(prefix):
    array: nd.NDArray = load("%s.param" % prefix)
    arrays_on_gpu = [array.as_in_context(mx.gpu(i)) for i in range(CONTEXT_NUM)]
    num_classes = array.shape[0]
    idx = 0
    result = []
    while idx < num_classes:
        for i in range(CONTEXT_NUM):
            try:
                sim, indices = score(arrays_on_gpu[i][idx:idx + BATCH_SIZE], arrays_on_gpu[i])
                result.append(indices.as_in_context(mx.cpu(i)))
                idx += BATCH_SIZE
            except IndexError:
                sim, indices = score(arrays_on_gpu[i][idx:], arrays_on_gpu[i])
                result.append(indices.as_in_context(mx.cpu(i)))
                idx += BATCH_SIZE
                break

    index_matrix = nd.concat(*result, dim=0)
    nd.save("%s.index" % prefix, index_matrix)
Ejemplo n.º 8
0
def embedding(data_iterator, net, ctx=mx.cpu()):
    convnet_codes = None
    resize_images = None
    labels = None
    for i, batch in enumerate(data_iterator):
        data, label = _get_batch(batch, ctx)
        idx = nd.arange(data.shape[0])
        _, output = net(data)
        output = output[idx.as_in_context(ctx), :, label]
        output.wait_to_read()
        if convnet_codes is None:
            convnet_codes = output
        else:
            convnet_codes = nd.concat(*[convnet_codes, output], dim=0)
        if labels is None:
            labels = label
        else:
            labels = nd.concat(*[labels, label], dim=0)
        images = data.copyto(mx.cpu())
        if images.shape[1] != 1:
            images[:, 0, :, :] += 0.4914
            images[:, 1, :, :] += 0.4822
            images[:, 2, :, :] += 0.4465
        images = nd.clip(images * 255, 0, 255).astype('uint8')
        if resize_images is None:
            resize_images = images
        else:
            resize_images = nd.concat(*[resize_images, images], dim=0)
    nd.save('convet.ndarray', convnet_codes.as_in_context(mx.cpu()))
    nd.save('resize_image.ndarray', resize_images)
    nd.save('label.ndarray', labels.astype('int32').as_in_context(mx.cpu()))
Ejemplo n.º 9
0
 def save(self):
     # if hard server
     if hard_config.HARD_SERVER:
         # we just need weight to find those hard classes
         nd.save(fname=os.path.join(
             hard_config.PREFIX,
             "%d_%s_largeFC.param" % (self.rank, self.name)),
                 data=self.weight)
     else:
         nd.save(fname=os.path.join(
             self.prefix, "%d_%s_largeFC.param" % (self.rank, self.name)),
                 data=self.weight)
     nd.save(fname=os.path.join(
         self.prefix, "%d_%s_largeFC_mom.param" % (self.rank, self.name)),
             data=self.weight_mom)