예제 #1
0
    def run(self, topic=False, show=True, radar=False, ctx=gpu(0)):
        self.radar = radar
        self.show = show

        self.topk = 1
        self._init_ros()
        self.resz = image.ForceResizeAug((self.size[1], self.size[0]))

        if radar:
            plt.ion()
            fig = plt.figure()
            self.ax = fig.add_subplot(111, polar=True)
            
            self.ax.grid(True)

        if not topic:
            threading.Thread(target=self._get_frame).start()
            print('\033[1;33;40m Use USB Camera')
        else: 
            rospy.Subscriber(topic, Image, self._image_callback)
            print('\033[1;33;40m Image Topic: %s\033[0m'%topic)
        
        rate = rospy.Rate(30)
        while not rospy.is_shutdown():
            if hasattr(self, 'img'):
                nd_img = nd.array(self.img)
                nd_img = self.resz(nd_img).as_in_context(ctx)

                nd_img =  nd_img.transpose((2,0,1)).expand_dims(axis=0)/255.        
                out = self.predict(nd_img)
                self.visualize(out)
예제 #2
0
def predict_mxnet(net, ctx, fname, label):
    '''
    使用mxnet对图像进行预测
    :param net:训练好的模型
    :param ctx:数据context
    :param fname:图像路径
    :param label:标签词典
    :return:预测类别及概率
    '''
    with open(fname, 'rb') as f:
        img = image.imdecode(f.read())
        img = image.ForceResizeAug((image_size, image_size))(img)
    data, _ = transform_test(img, -1)
    data = data.expand_dims(axis=0)
    out = net(data.as_in_context(ctx))
    out = nd.SoftmaxActivation(out)
    pred = int(nd.argmax(out, axis=1).asscalar())
    prob = out[0][pred].asscalar()
    return '置信度=%f, 类别 %s' % (prob, label[str(pred)])
import h5py
import os
import gc
from tqdm import tqdm
from sklearn.model_selection import train_test_split

# In[ ]:

# try run via gpu
ctx = mx.gpu()

# In[ ]:

transformers = [
    # 强制resize成pretrain模型的输入大小
    image.ForceResizeAug((224, 224)),
    # 标准化处理
    image.ColorNormalizeAug(mean=nd.array([0.485, 0.456, 0.406]),
                            std=nd.array([0.229, 0.224, 0.225]))
]

# Transoform image and label to our target format data
#
# 为避免过拟合,我们在这里使用`image.CreateAugmenter`来加强数据集。例如我们设`rand_mirror=True`即可随机对每张图片做镜面反转。我们也通过`mean`和`std`对彩色图像RGB三个通道分别做标准化。以下我们列举了该函数里的所有参数,这些参数都是可以调的。

# In[ ]:

train_augs_params = {
    "resize": 1,
    "rand_crop": False,
    "rand_resize": False,
예제 #4
0
from mxnet.gluon.model_zoo import vision as models
import numpy as np
from tqdm import tqdm
import h5py
import matplotlib.pyplot as plt

#%matplotlib inline
#%config InlineBackend.figure_format = 'retina'

import warnings
warnings.filterwarnings("ignore")

ctx = mx.gpu()

preprocessing = [
    image.ForceResizeAug((224, 224)),
    image.ColorNormalizeAug(mean=nd.array([0.485, 0.456, 0.406]),
                            std=nd.array([0.229, 0.224, 0.225]))
]


def transform(data, label):
    data = data.astype('float32') / 255
    for pre in preprocessing:
        data = pre(data)

    data = nd.transpose(data, (2, 0, 1))
    return data, nd.array([label]).asscalar().astype('float32')


def get_features(net, data):
예제 #5
0
def get_image_mat(data_path, pic_size):
    img = image.imdecode(open(data_path, 'rb').read())
    if pic_size[0]*pic_size[1] != 0:
        img = image.ForceResizeAug(pic_size)(img)
    return img.astype('float32').asnumpy()
from mxnet import init
from mxnet import nd
from mxnet.gluon.data import vision
from mxnet.gluon.model_zoo import vision as models
import numpy as np
from tqdm import tqdm
import h5py
import matplotlib.pyplot as plt

import warnings
warnings.filterwarnings("ignore")

ctx = mx.gpu()

preprocessing = [
    image.ForceResizeAug((224, 224)),
    image.ColorNormalizeAug(mean=nd.array([0.485, 0.456, 0.406]),
                            std=nd.array([0.229, 0.224, 0.225]))
]


def transform(data, label):
    data = data.astype('float32') / 255
    for pre in preprocessing:
        data = pre(data)

    data = nd.transpose(data, (2, 0, 1))
    return data, nd.array([label]).asscalar().astype('float32')


def get_features(net, data):