Esempio n. 1
0
    def new_fun_use(self):
        print 'new_fun_use'
        cl = CaffeLoader(prototxt_path=prototxt_19, caffemodel_path=model_file_19)
        model = cl.load()
        im = cv2.resize(cv2.imread('Cats.jpg'), (224, 224))
        im = im.transpose((2, 0, 1))
        im = np.expand_dims(im, axis=0)

        # Test pretrained model
        sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='categorical_crossentropy')

        out = model.predict(im)
        top5 = np.argsort(out)[0][::-1][:5]
        probs = np.sort(out)[0][::-1][:5]
        print 'yes'
        words = open('synset_words.txt').readlines()
        words = [(w[0], ' '.join(w[1:])) for w in [w.split() for w in words]]
        words = np.asarray(words)

        for w, p in zip(words[top5], probs):
            print('{}\tprobability:{}'.format(w, p))
Esempio n. 2
0
    def new_fun_use(self):
        print 'new_fun_use'
        cl = CaffeLoader(prototxt_path=prototxt_19,
                         caffemodel_path=model_file_19)
        model = cl.load()
        im = cv2.resize(cv2.imread('Cats.jpg'), (224, 224))
        im = im.transpose((2, 0, 1))
        im = np.expand_dims(im, axis=0)

        # Test pretrained model
        sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='categorical_crossentropy')

        out = model.predict(im)
        top5 = np.argsort(out)[0][::-1][:5]
        probs = np.sort(out)[0][::-1][:5]
        print 'yes'
        words = open('synset_words.txt').readlines()
        words = [(w[0], ' '.join(w[1:])) for w in [w.split() for w in words]]
        words = np.asarray(words)

        for w, p in zip(words[top5], probs):
            print('{}\tprobability:{}'.format(w, p))
Esempio n. 3
0
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil

from caffe2keras.caffeloader import CaffeLoader

prototxt = '/mnt/share/projects/keras_test/chainer-imagenet-vgg-master/VGG_ILSVRC_16_layers_deploy.prototxt'
model_file = '/mnt/share/projects/keras_test/chainer-imagenet-vgg-master/VGG_ILSVRC_16_layers.caffemodel'

cl = CaffeLoader(prototxt_path=prototxt, caffemodel_path=model_file)
model = cl.load()
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')

words = open('synset_words.txt').readlines()
words = [(w[0], ' '.join(w[1:])) for w in [w.split() for w in words]]
words = np.asarray(words)

UPLOAD_FOLDER = '/tmp/keras_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])

# Obtain the flask app object
app = flask.Flask(__name__)