Beispiel #1
0
def test_load_ndarray():
    nd_file = 'test_predictor_load_ndarray.params'
    a = nd.random.uniform(shape=(7, 3))
    b = nd.random.uniform(shape=(7,))
    nd_data = {'a':a, 'b':b}
    nd.save(nd_file, nd_data)

    # test load_ndarray_file
    nd_load = load_ndarray_file(open(nd_file, "rb").read())
    assert(set(nd_data.keys()) == set(nd_load.keys()))
    for k in nd_data.keys():
        assert_almost_equal(nd_data[k].asnumpy(), nd_load[k], rtol=1e-5, atol=1e-6)
Beispiel #2
0
import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append("../mxnet/amalgamation/python/")
from mxnet_predict import Predictor, load_ndarray_file
import json
import numpy as np
import base64
from skimage import io, transform

jsonmodel = json.loads(open('inception-bn-model.json').read())
mean_img = load_ndarray_file(base64.b64decode(
    jsonmodel['meanimgbase64']))["mean_img"]


def PreprocessImage(path):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy:yy + short_egde, xx:xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 255
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
Beispiel #3
0
from mxnet_predict import Predictor, load_ndarray_file
import mxnet as mx
import logging
import numpy as np
from skimage import io, transform

# Load the pre-trained model
prefix = "Inception/Inception_BN"
num_round = 39
symbol_file = "%s-symbol.json" % prefix
param_file = "%s-0039.params" % prefix
predictor = Predictor(
    open(symbol_file).read(),
    open(param_file).read(), {'data': (1, 3, 224, 224)})
mean_img = load_ndarray_file(open("Inception/mean_224.nd").read())["mean_img"]

synset = [l.strip() for l in open('Inception/synset.txt').readlines()]


def PreprocessImage(path, show_img=False):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy:yy + short_egde, xx:xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
Beispiel #4
0
import mxnet as mx
import logging
import numpy as np
from skimage import io, transform
# Load the pre-trained model
prefix = "./face-0"
num_round = 125
batch_size = 20
if len(sys.argv) > 2:
    num_round = int(sys.argv[2])
symbol_file = "%s-symbol.json" % prefix
param_file = "%s-%s.params" % (prefix, str(num_round).zfill(4))
predictor = Predictor(
    open(symbol_file).read(),
    open(param_file).read(), {'data': (batch_size, 3, 224, 224)}, 'gpu', 0)
mean_img = load_ndarray_file(open("./mean.bin").read())["mean_img"]

synset = [l.strip() for l in open('./labels.txt').readlines()]


def PreprocessImage(batchs, index, path, show_img=False):
    # load image
    img = io.imread(path)
    #print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy:yy + short_egde, xx:xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
Beispiel #5
0
import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append("../mxnet/amalgamation/python/")
from mxnet_predict import Predictor, load_ndarray_file
import json
import numpy as np
import base64
from skimage import io, transform

jsonmodel = json.loads(open('inception-bn-model.json').read())
mean_img = load_ndarray_file(base64.b64decode(jsonmodel['meanimgbase64']))["mean_img"]

def PreprocessImage(path):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 255
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    normed_img = sample - mean_img
    normed_img.resize(1, 3, 224, 224)
symbol_file = "%s-symbol.json" % prefix
param_file = "%s-0039.params" % prefix

# gpu predictor
predictor_gpu = Predictor(open(symbol_file).read(),
                      open(param_file).read(),
                      {'data':(1, 3, 224, 224)},
                      dev_type='gpu')

# cpu predictor
predictor_cpu = Predictor(open(symbol_file).read(),
                      open(param_file).read(),
                      {'data':(1, 3, 224, 224)},
                      dev_type='cpu')

mean_img = load_ndarray_file(open("Inception/mean_224.nd").read())["mean_img"]

synset = [l.strip() for l in open('Inception/synset.txt').readlines()]

def PreprocessImage(path, show_img=False):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    #short_egde = min(img.shape[:2])
    #yy = int((img.shape[0] - short_egde) / 2)
    #xx = int((img.shape[1] - short_egde) / 2)
    #crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(img, (224, 224))
    if show_img:
sys.path.append("../../predict/python/")
sys.path.append("../../python/")

from mxnet_predict import Predictor, load_ndarray_file
import mxnet as mx
import logging
import numpy as np
from skimage import io, transform

# Load the pre-trained model
prefix = "Inception/Inception_BN"
num_round = 39
symbol_file = "%s-symbol.json" % prefix
param_file = "%s-0039.params" % prefix
predictor = Predictor(symbol_file, param_file, {'data':(1, 3, 224, 224)})
mean_img = load_ndarray_file("Inception/mean_224.nd")["mean_img"]

synset = [l.strip() for l in open('Inception/synset.txt').readlines()]

def PreprocessImage(path, show_img=False):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    if show_img:
from mxnet_predict import Predictor, load_ndarray_file
import mxnet as mx
import logging
import numpy as np
from skimage import io, transform
# Load the pre-trained model
prefix = "./face-0"
num_round = 125
batch_size = 20
if len(sys.argv) >2:
    num_round = int(sys.argv[2])
symbol_file = "%s-symbol.json" % prefix
param_file = "%s-%s.params" % (prefix,str(num_round).zfill(4))
predictor = Predictor(open(symbol_file).read(), open(param_file).read(), {'data':(batch_size , 3, 224, 224)},'gpu',0)
mean_img = load_ndarray_file(open("./mean.bin").read())["mean_img"]

synset = [l.strip() for l in open('./labels.txt').readlines()]

def PreprocessImage(batchs, index, path, show_img=False):
    # load image
    img = io.imread(path)
    #print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    if show_img: