def get_model_inception_v3(classes_num, ctx):
    pretrained_net = models.inception_v3(pretrained=True)
    # print(pretrained_net)

    finetune_net = models.inception_v3(classes=classes_num)     # 输出为classes_num个类
    finetune_net.features = pretrained_net.features             # 特征设置为inceptionv3的特征
    finetune_net.output.initialize(init.Xavier(), ctx=ctx)      # 对输出层做初始化
    finetune_net.collect_params().reset_ctx(ctx)                # 设置CPU或GPU
    finetune_net.hybridize()                                    # gluon特征,运算转成符号运算,提高运行速度
    return finetune_net
Пример #2
0
def get_net(ctx=mx.cpu()):
    pre_net = vision.inception_v3(pretrained=True)
    net = vision.inception_v3(classes=2)
    # net.load_parameters('./params_ffn/dbc_15.param', ctx=ctx)
    net.features = pre_net.features
    net.output.initialize(mx.init.Xavier())
    net.collect_params().reset_ctx(ctx)
    net.output.collect_params().setattr('lr_mult', 10)
    # net.initialize(mx.init.Xavier(), ctx=ctx) #
    return net
Пример #3
0
 def __init__(self, ctx, num_class, nameparams=None):
     inception = vision.inception_v3(pretrained=True,
                                     ctx=ctx).features  #加载预训练模型
     resnet = vision.resnet152_v1(pretrained=True,
                                  ctx=ctx).features  #加载预训练模型
     self.features = ConcatNet(resnet, inception)  #融合2个网络并得到网络特征
     self.output = self.__get_output(ctx, num_class, nameparams)  #得到网络输出层
     self.net = OneNet(self.features, self.output)  #将特征层和输出层合并为一个网络
Пример #4
0
 def __init__(self, ctx, nameparams=None):
     inception = vision.inception_v3(
         pretrained=True, ctx=ctx).features  #提取预训练模型inception_v3的features
     resnet = vision.resnet152_v1(
         pretrained=True, ctx=ctx).features  #提取预训练模型resnet152_v1的features
     self.features = ConcatNet(resnet, inception)  #融合后的含有features的网络
     self.output = self.__get_output(ctx, nameparams)  #只有输出层的网络
     self.net = OneNet(self.features,
                       self.output)  #将含有features的网络和输出层的网络构造为新的网络
Пример #5
0
def inception_score(imgs, use_gpu=True, batch_size=32, resize=False, splits=1):
    """
    Computes the inception score of the generated images imgs
    :param imgs: nd dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    :param use_gpu: whether or not to run on GPU
    :param batch_size: batch size for feeding into Inception v3
    :param resize:
    :param splits: number of splits
    :return:
    """

    N = len(imgs)
    assert batch_size > 0
    assert N > batch_size

    # Set up device
    ctx = mx.gpu(0) if use_gpu else mx.cpu()

    # Set up dataloader
    dataloader = DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True, ctx=ctx)

    def up(data):
        output = nd.contrib.BilinearResize2D(data, height=299, width=299)
        return output

    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return nd.softmax(x).asnumpy()

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader):
        batch = batch.as_in_context(ctx)
        batch_size_i = batch.shape[0]
        preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batch)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Пример #6
0
def get_image_feature(img_path):

    img_net = vision.inception_v3(pretrained=True, ctx=ctx)
    img = load_image(img_path, 448)

    feature = img_net.features(img.as_in_context(ctx)).asnumpy()
    #logging.debug("feature shape is %s", feature.shape)
    feature = feature.reshape(-1)
    #img_net.forward(img)
    #feature = img_net.output()[0].asnumpy()

    return feature
Пример #7
0
def get_net_inception3(epoch=-1):

    net = nn.HybridSequential()

    # backbone
    feature_net = vision.inception_v3(pretrained=True)
    net.add(feature_net.features)
    net.add(nn.Flatten())

    # freeze parameters
    net.collect_params().setattr('lr_mult', 0)

    # feature mapping
    # 2048 --> 1024
    net.add(nn.Dense(1024))

    if epoch >= 0:
        # load saved model
        pass
    else:
        net.initialize(mx.init.Xavier())

    net.hybridize()
    return net
 def __init__(self,ctx,nameparams=None):
     inception = vision.inception_v3(pretrained=True,ctx=ctx).features
     resnet = vision.resnet152_v1(pretrained=True,ctx=ctx).features
     self.features = ConcatNet(resnet,inception)
     self.output = get_output(ctx,nameparams)
     self.net = OneNet(self.features,self.output)
Пример #9
0
  return train_transform, val_transform


def get_imagenet_iterator(root, batch_size, num_workers, data_shape=224, dtype='float32'):
  """Dataset loader with preprocessing."""
  train_dir = os.path.join(root, 'train')
  train_transform, val_transform = get_imagenet_transforms(data_shape, dtype)
  logging.info("Loading image folder %s, this may take a bit long...", train_dir)
  train_dataset = ImageFolderDataset(train_dir, transform=train_transform)
  train_data = DataLoader(train_dataset, batch_size, shuffle=True,
                          last_batch='discard', num_workers=num_workers)
  
  val_dir = os.path.join(root, 'val')
  if not os.path.isdir(os.path.expanduser(os.path.join(root, 'val', 'n01440764'))):
    user_warning = 'Make sure validation images are stored in one subdir per category, a helper script is available at https://git.io/vNQv1'
    raise ValueError(user_warning)
  
  logging.info("Loading image folder %s, this may take a bit long...", val_dir)
  val_dataset = ImageFolderDataset(val_dir, transform=val_transform)
  val_data = DataLoader(val_dataset, batch_size, last_batch='keep', num_workers=num_workers)
  return DataLoaderIter(train_data, dtype), DataLoaderIter(val_data, dtype)


resnet18 = vision.resnet18_v1(pretrained=True)
alexnet = vision.alexnet(pretrained=True)
inception = vision.inception_v3(pretrained=True)
#squeezenet = vision.squeezenet1_0()
#densenet = vision.densenet_161()

get_imagenet_iterator("c:\\data\\images", batch_size, num_workers, data_shape=224, dtype='float32'):
Пример #10
0
import mxnet as mx
import time
import gluoncv

from mxnet import nd, autograd
from mxnet import gluon
from mxnet.gluon import nn

inputShape = (1, 3, 224, 224)

from mxnet.gluon.model_zoo import vision

alexnet = vision.alexnet()
inception = vision.inception_v3()

resnet18v1 = vision.resnet18_v1()
resnet18v2 = vision.resnet18_v2()
squeezenet = vision.squeezenet1_0()
densenet = vision.densenet121()
mobilenet = vision.mobilenet0_5()

############### 그래프 ###############
import gluoncv
gluoncv.utils.viz.plot_network(resnet18v1, shape=inputShape)
#####################################
Пример #11
0
 def __init__(self, ctx):
     inception = vision.inception_v3(pretrained=True,
                                     ctx=ctx)  #提取预训练模型inception_v3的features
     self.finetune_net = vision.inception_v3(classes=120)
     self.finetune_net.features = inception.features
     self.finetune_net.output.initialize(init.Xavier())
Пример #12
0
def _init_inception():
    global inception_model
    inception_model = models.inception_v3(pretrained=True)
    print("success import inception model, and the model is inception_v3!")
Пример #13
0
import mxnet as mx
import numpy as np
from mxnet import gluon, nd, image
from mxnet.gluon import nn
from mxnet.gluon.model_zoo import vision as models

ctx = mx.cpu()

res151 = models.resnet152_v1(pretrained=True, ctx=ctx).features
with res151.name_scope():
    res151.add(gluon.nn.GlobalAvgPool2D())
res151.collect_params().reset_ctx(ctx)
res151.hybridize()

inception = models.inception_v3(pretrained=True, ctx=ctx)
inception_net = inception.features
inception_net.collect_params().reset_ctx(ctx)
inception_net.hybridize()

with open(os.path.join('functions', 'labels.json'), 'r') as fp:
    dic = json.load(fp)


def build_model_mxnet(ctx):
    net = nn.Sequential()
    with net.name_scope():
        net.add(nn.BatchNorm())
        net.add(nn.Dense(1024))
        net.add(nn.BatchNorm())
        net.add(nn.Activation('relu'))
def _init_inception():
    global inception_model
    inception_model = models.inception_v3(pretrained=True)
    print("success import inception model, and the model is inception_v3!")
Пример #15
0
##################
# Hyperparameter #
#----------------#
ctx = mx.cpu()
lr=0.05
epochs=10
momentum=0.9
batch_size=64
#----------------#
# Hyperparameter #
##################

################## model
from mxnet.gluon.model_zoo import vision
net = vision.inception_v3(classes=10, pretrained=False, ctx=ctx)


################## 그래프
import gluoncv
inputShape = (1,3,299,299)
gluoncv.utils.viz.plot_network(net, shape=inputShape)


##### 전처리 ##############################################
def transformer(data, label):
    data = mx.image.imresize(data, 299, 299)
    data = mx.nd.transpose(data, (2, 0, 1))
    data = data.astype(np.float32)
    return data, label
Пример #16
0
def inceptionv3mxnetload():
    net = vision.inception_v3(pretrained=True)
    net.hybridize()
    return net
Пример #17
0
def load_inception_v3(ctx):
    return vision.inception_v3(pretrained=True, ctx=ctx, prefix="")
def get_features2(ctx):
    resnet = vision.inception_v3(pretrained=True,ctx=ctx)
    return resnet.features
Пример #19
0
    x = image.color_normalize(x,
                              mean=nd.array([0.485, 0.456, 0.406]),
                              std=nd.array([0.229, 0.224, 0.225]))
    x = x.reshape((1, 3, 448, 448))

    return x


data_path = {
    'train': './../data/train_img/',
    'val': './../data/train_img/',
    'test': './../data/test_img/'
}

ctx = gb.try_gpu()
img_net = vision.inception_v3(pretrained=True, ctx=ctx)
detect_net = model_zoo.get_model('ssd_512_resnet50_v1_voc',
                                 pretrained=True,
                                 ctx=ctx)


def get_image_feature(img_path):

    img = load_image(img_path, 448)

    img_feature = img_net.features(
        img.as_in_context(ctx)).asnumpy().reshape(-1)
    detect_class = detect_net(img.as_in_context(ctx))[0].asnumpy().reshape(-1)
    detect_feature = np.zeros(256)
    for class_id in detect_class:
        detect_feature[int(class_id)] = 1
Пример #20
0
    # instatiate source and target models
    if source_model == 'mobilenet':
        from mxnet.gluon.model_zoo.vision import mobilenet1_0
        pretrained_net = mobilenet1_0(pretrained=True, prefix='model_')
        net = mobilenet1_0(classes=2, prefix='model_')
    elif source_model == 'vgg19':
        from mxnet.gluon.model_zoo.vision import vgg19_bn
        pretrained_net = vgg19_bn(pretrained=True, prefix='model_')
        net = vgg19_bn(classes=2, prefix='model_')
    elif source_model == 'resnet101':
        from mxnet.gluon.model_zoo.vision import resnet101_v2
        pretrained_net = resnet101_v2(pretrained=True, prefix='model_')
        net = resnet101_v2(classes=2, prefix='model_')
    elif source_model == 'inceptionv3':
        from mxnet.gluon.model_zoo.vision import inception_v3
        pretrained_net = inception_v3(pretrained=True, prefix='model_')
        net = inception_v3(classes=2, prefix='model_')
    elif source_model == 'squeezenet':
        from mxnet.gluon.model_zoo.vision import squeezenet1_0
        pretrained_net = squeezenet1_0(pretrained=True, prefix='model_')
        net = squeezenet1_0(classes=2, prefix='model_')
    else:
        print(
            "Available source models (ordered small -> large):\n['squeezenet', 'mobilenet', 'inceptionv3', 'resnet101', 'vgg19']"
        )

    # pathing
    train_rec = os.path.join(data_dir, 'img.rec')
    # validation_rec = os.path.join(data_dir, 'validation/img.rec')
    validation_rec = '/home/ubuntu/projects/computer_vision/pools/test/img.rec'
    model_out_path = os.path.join(
Пример #21
0
def test_models():
    ctx = mx.cpu()

    data_dir = 'data'
    models_dir = 'models'
    target_size = (128, 128, 1)

    dataset = pd.read_csv(os.path.join(data_dir, 'labels.csv'))
    dic = dict(zip(np.unique(dataset.breed), range(0, np.unique(dataset.breed).__len__() + 1)))

    net = build_model_mxnet(ctx)
    net.load_parameters(os.path.join(models_dir, 'model.params'))

    model = load_model(os.path.join(models_dir, 'dog-recognition.h5'))

    test_set = dataset.sample(20).reset_index()

    result = []

    res151 = models.resnet152_v1(pretrained=True, ctx=ctx).features
    with res151.name_scope():
        res151.add(gluon.nn.GlobalAvgPool2D())
    res151.collect_params().reset_ctx(ctx)
    res151.hybridize()

    inception = models.inception_v3(pretrained=True, ctx=ctx)
    inception_net = inception.features
    inception_net.collect_params().reset_ctx(ctx)
    inception_net.hybridize()

    for i in tqdm(range(20)):
        # -- Tensorflow
        img = tf_image.load_img(os.path.join(data_dir, 'train', test_set['id'][i]) + '.jpg', target_size=target_size,
                                grayscale=False)

        img = img_to_array(img)
        img = img / 255

        predict_tensorflow = model.predict_classes(np.array([img]))

        # -- MXNet

        img = mx.nd.array(cv2.imread(os.path.join(data_dir, 'train', test_set['id'][i]) + '.jpg'))
        img = transform_test(img)

        img_res151, img_inception = get_features_test(res151, inception_net, img, ctx)
        img_res151 = img_res151.reshape(img_res151.shape[:2])
        img_inception = img_inception.reshape(img_inception.shape[:2])

        img = nd.concat(mx.nd.array(img_inception), mx.nd.array(img_res151))

        predict_mx = nd.softmax(net(nd.array(img).as_in_context(ctx)))

        result.append({
            'id': test_set['id'][i],
            'expected': test_set['breed'][i],
            'tensor': list(dic.keys())[list(dic.values()).index(predict_tensorflow)],
            'mx': list(dic.keys())[list(dic.values()).index(predict_mx.topk(k=1).asnumpy()[0][0])],
            'mx_percentage': predict_mx[0, 0].asscalar()
        })
    print(tabulate(result))
    input("Press Enter to continue...")
Пример #22
0
preprocessing[0] = image.ForceResizeAug((224, 224))
imgs = vision.ImageFolderDataset('for_train_stanford', transform=transform)
data = gluon.data.DataLoader(imgs, 64)
features_vgg, labels = get_features(models.vgg16_bn(pretrained=True, ctx=ctx),
                                    data)
features_resnet, _ = get_features(
    models.resnet152_v1(pretrained=True, ctx=ctx), data)
features_densenet, _ = get_features(
    models.densenet161(pretrained=True, ctx=ctx), data)

preprocessing[0] = image.ForceResizeAug((299, 299))
imgs_299 = vision.ImageFolderDataset('for_train_stanford', transform=transform)
data_299 = gluon.data.DataLoader(imgs_299, 64)
features_inception, _ = get_features(
    models.inception_v3(pretrained=True, ctx=ctx), data)

with h5py.File('features_train_stanford.h5', 'w') as f:
    f['vgg'] = features_vgg
    f['resnet'] = features_resnet
    f['densenet'] = features_densenet
    f['inception'] = features_inception
    f['labels'] = labels

preprocessing[0] = image.ForceResizeAug((224, 224))
imgs = vision.ImageFolderDataset('for_test', transform=transform)
data = gluon.data.DataLoader(imgs, 64)
features_vgg, _ = get_features(models.vgg16_bn(pretrained=True, ctx=ctx), data)
features_resnet, _ = get_features(
    models.resnet152_v1(pretrained=True, ctx=ctx), data)
features_densenet, _ = get_features(