Beispiel #1
0
 def __init__(self, ctx, nameparams=None):
     #inception = vision.inception_v3(pretrained=True, ctx=ctx).features #提取预训练模型inception_v3的features
     resnet = vision.resnet152_v1(
         pretrained=True, ctx=ctx).features  # 提取预训练模型resnet152_v1的features
     self.features = resnet
     self.output = self.__get_output(ctx, nameparams)  #只有输出层的网络
     self.net = OneNet(self.features,
                       self.output)  #将含有features的网络和输出层的网络构造为新的网络
 def __init__(self, ctx, num_class, nameparams=None):
     inception = vision.inception_v3(pretrained=True,
                                     ctx=ctx).features  #加载预训练模型
     resnet = vision.resnet152_v1(pretrained=True,
                                  ctx=ctx).features  #加载预训练模型
     self.features = ConcatNet(resnet, inception)  #融合2个网络并得到网络特征
     self.output = self.__get_output(ctx, num_class, nameparams)  #得到网络输出层
     self.net = OneNet(self.features, self.output)  #将特征层和输出层合并为一个网络
Beispiel #3
0
def test_models():
    ctx = mx.cpu()

    data_dir = 'data'
    models_dir = 'models'
    target_size = (128, 128, 1)

    dataset = pd.read_csv(os.path.join(data_dir, 'labels.csv'))
    dic = dict(zip(np.unique(dataset.breed), range(0, np.unique(dataset.breed).__len__() + 1)))

    net = build_model_mxnet(ctx)
    net.load_parameters(os.path.join(models_dir, 'model.params'))

    model = load_model(os.path.join(models_dir, 'dog-recognition.h5'))

    test_set = dataset.sample(20).reset_index()

    result = []

    res151 = models.resnet152_v1(pretrained=True, ctx=ctx).features
    with res151.name_scope():
        res151.add(gluon.nn.GlobalAvgPool2D())
    res151.collect_params().reset_ctx(ctx)
    res151.hybridize()

    inception = models.inception_v3(pretrained=True, ctx=ctx)
    inception_net = inception.features
    inception_net.collect_params().reset_ctx(ctx)
    inception_net.hybridize()

    for i in tqdm(range(20)):
        # -- Tensorflow
        img = tf_image.load_img(os.path.join(data_dir, 'train', test_set['id'][i]) + '.jpg', target_size=target_size,
                                grayscale=False)

        img = img_to_array(img)
        img = img / 255

        predict_tensorflow = model.predict_classes(np.array([img]))

        # -- MXNet

        img = mx.nd.array(cv2.imread(os.path.join(data_dir, 'train', test_set['id'][i]) + '.jpg'))
        img = transform_test(img)

        img_res151, img_inception = get_features_test(res151, inception_net, img, ctx)
        img_res151 = img_res151.reshape(img_res151.shape[:2])
        img_inception = img_inception.reshape(img_inception.shape[:2])

        img = nd.concat(mx.nd.array(img_inception), mx.nd.array(img_res151))

        predict_mx = nd.softmax(net(nd.array(img).as_in_context(ctx)))

        result.append({
            'id': test_set['id'][i],
            'expected': test_set['breed'][i],
            'tensor': list(dic.keys())[list(dic.values()).index(predict_tensorflow)],
            'mx': list(dic.keys())[list(dic.values()).index(predict_mx.topk(k=1).asnumpy()[0][0])],
            'mx_percentage': predict_mx[0, 0].asscalar()
        })
    print(tabulate(result))
    input("Press Enter to continue...")
def get_features1(ctx):
    resnet = vision.resnet152_v1(pretrained=True,ctx=ctx)
    return resnet.features
 def __init__(self,ctx,nameparams=None):
     inception = vision.inception_v3(pretrained=True,ctx=ctx).features
     resnet = vision.resnet152_v1(pretrained=True,ctx=ctx).features
     self.features = ConcatNet(resnet,inception)
     self.output = get_output(ctx,nameparams)
     self.net = OneNet(self.features,self.output)
Beispiel #6
0
        feature = net.features(X.as_in_context(ctx))
        features.append(feature.asnumpy())
        labels.append(y.asnumpy())

    features = np.concatenate(features, axis=0)
    labels = np.concatenate(labels, axis=0)
    return features, labels


preprocessing[0] = image.ForceResizeAug((224, 224))
imgs = vision.ImageFolderDataset('for_train_stanford', transform=transform)
data = gluon.data.DataLoader(imgs, 64)
features_vgg, labels = get_features(models.vgg16_bn(pretrained=True, ctx=ctx),
                                    data)
features_resnet, _ = get_features(
    models.resnet152_v1(pretrained=True, ctx=ctx), data)
features_densenet, _ = get_features(
    models.densenet161(pretrained=True, ctx=ctx), data)

preprocessing[0] = image.ForceResizeAug((299, 299))
imgs_299 = vision.ImageFolderDataset('for_train_stanford', transform=transform)
data_299 = gluon.data.DataLoader(imgs_299, 64)
features_inception, _ = get_features(
    models.inception_v3(pretrained=True, ctx=ctx), data)

with h5py.File('features_train_stanford.h5', 'w') as f:
    f['vgg'] = features_vgg
    f['resnet'] = features_resnet
    f['densenet'] = features_densenet
    f['inception'] = features_inception
    f['labels'] = labels
Beispiel #7
0
import json
import os

import mxnet as mx
import numpy as np
from mxnet import gluon, nd, image
from mxnet.gluon import nn
from mxnet.gluon.model_zoo import vision as models

ctx = mx.cpu()

res151 = models.resnet152_v1(pretrained=True, ctx=ctx).features
with res151.name_scope():
    res151.add(gluon.nn.GlobalAvgPool2D())
res151.collect_params().reset_ctx(ctx)
res151.hybridize()

inception = models.inception_v3(pretrained=True, ctx=ctx)
inception_net = inception.features
inception_net.collect_params().reset_ctx(ctx)
inception_net.hybridize()

with open(os.path.join('functions', 'labels.json'), 'r') as fp:
    dic = json.load(fp)


def build_model_mxnet(ctx):
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.BatchNorm())
        net.add(nn.Dense(1024))