예제 #1
0
def predict(model, img, target_size):

    if img.size != target_size:
        img = img.resize(target_size)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    clock_initial = time.clock()
    preds = model.predict(x)
    clock_final = time.clock()

    # Class 0: Normal
    # Class 1: With Diabetic Retinopathy

    #print('Class 0:', preds[0,0] * 100., '%')
    #print('Class 1:', preds[0,1] * 100, '%')

    control = np.round(preds[0, 0] * 100, 2)
    case = np.round(preds[0, 1] * 100, 2)

    pred = [control, case]

    return pred
예제 #2
0
파일: model2.py 프로젝트: vivekmids/crow
def infer(model, image):
    """Here is the code that to perform inference using the model.
    Expect this to be the model you return in `load_model()`

    Args:
        model: one built using load_model
        image: numpy array as created by opencv

    Returns: bool, list(string)
        - a boolean to signal we found something
        - name of identified animals
    """

    classes = ['rodent','squirrel','rabbit','bird','deer','raccoon','skunk','opossum']
    classes_dict_lookup = dict(zip(range(10), classes + ['other'] + ['empty']))

    # run inference
    image = preprocess_input(image)
    model.set_tensor(1, image)
    model.invoke()

    predicted_id = model.get_tensor(0)
    predicted_name = classes_dict_lookup[predicted_id.argmax()]

    if predicted_name in classes:
        return True, predicted_name
    else:
        return False, None
def predict(model, img):
    x = img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    prediction_index = np.argmax(preds[0])
    return CATEGORIES[prediction_index]
def vgg_predictor(img_path):
    model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              "VGG_all_100p_94.h5")
    img_target_size = (100, 100)

    model = load_model(model_path)

    # get image as array and resize it if necessary
    pil_img = Image.open(img_path)
    if pil_img.size != img_target_size:
        pil_img = pil_img.resize(img_target_size)

    img = image.img_to_array(pil_img)

    # if alpha channel found, discard it
    if img.shape[2] == 4:
        img = img[:, :, :3]

    # preprocess image
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)

    preds = model.predict(img).flatten()

    # get predictions index sorted based on the best predictions
    value_ = preds.argsort()
    sorted_preds_index = value_[::-1]

    # all species and supported species names
    SPECIES = [
        "Apple", "Soybean", 'Blueberry', 'Cherry', 'Corn', "Grape",
        'grapefruit', 'Orange', 'Peach', "Pepper", 'Potato', 'Raspberry',
        'Sorghum', 'Soybean', 'Squash', 'Strawberry', 'sugarcane', "Tomato"
    ]
    return SPECIES[sorted_preds_index[0]]
예제 #5
0
파일: model1.py 프로젝트: vivekmids/crow
def infer(model, image):
    """Here is the code that to perform inference using the model.
    Expect this to be the model you return in `load_model()`

    Args:
        model: one built using load_model
        image: numpy array as created by opencv

    Returns: bool, list(string)
        - a boolean to signal we found something
        - name of identified animals
    """

    # classes = ['skunk','fox','rodent','dog','squirrel','cat','rabbit','bird','cow','bobcat','deer','raccoon','coyote','opossum']
    # classes_dict_lookup = dict(zip(range(15), classes+['other']))

    input_index = model.get_input_details()[0]['index']
    output_index = model.get_output_details()[0]['index']

    # run inference
    image = preprocess_input(image)
    model.set_tensor(input_index, image)
    model.invoke()
    predicted_id = model.get_tensor(output_index)
    predicted_name = prediction_map[predicted_id.argmax()]

    logging.warning('Probability of top class is ' + str(predicted_id.max()))

    if predicted_name != 'empty':
        return True, predicted_name
    else:
        return False, None
예제 #6
0
def prepare_image_data(paths):
    images_root = paths['images_root']
    captions_path = paths['train_captions_path']
    output_path = paths['image_features_path']

    if os.path.isfile(output_path):
        print('Image prep: Output file already exists, doing nothing.')
        return

    # 'avg_pool' is the final layer in InceptionV3 before 'predictions'. That is the
    # data used by VETE.
    # NOTE(laser): This will download InceptionV3 and depends on pillow and h5py
    base_model = inception_v3.InceptionV3(weights='imagenet', include_top=True)
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('avg_pool').output)

    with open(captions_path) as f:
        image_metadata = json.load(f)
    path = os.path.join(images_root, image_metadata['images'][0]['file_name'])

    chunk_size = 512
    image_ids = []
    result_list = []

    # OPTIMIZE(laser): In theory, image loading here is super slow. We're doing at
    # least 2-3 times the number of copies we need. In practice, this only needs to
    # run once over night.
    chunk_idx = 0
    for start in range(0, len(image_metadata['images']), chunk_size):
        image_count = 0
        image_list = []
        for image_entry in image_metadata['images'][start:start + chunk_size]:
            path = os.path.join(images_root, image_entry['file_name'])

            # NOTE(laser): Paper mentions rescaling to 300x300 but default arguments
            # in InceptionV3 docs say 299x299. Using that instead.
            img = image.load_img(path, target_size=(299, 299))
            x = image.img_to_array(img)
            image_ids.append(image_entry['id'])
            image_list.append(x)
            image_count += 1
            if image_count == chunk_size:
                chunk_idx += 1
                print('Loaded %s images (chunk %d)' %
                      (chunk_size * chunk_idx, chunk_idx - 1))
                break

        data = concat_np_list(image_list)
        data = inception_v3.preprocess_input(data)
        result = model.predict(data)
        result_list.append(result)
        print('Processed %s images (chunk %d)' %
              (chunk_size * chunk_idx, chunk_idx - 1))

    final_result = np.concatenate(result_list)
    final_result = np.insert(final_result, 0, np.array(image_ids), axis=1)
    final_result = np.sort(final_result, axis=0)
    np.save(output_path, final_result)
예제 #7
0
def image_preprocess(image_path):
    # Convert all the images to size 299x299 as expected by the inception v3 model
    img = image.load_img(image_path, target_size=(299, 299))
    # Convert PIL image to numpy array of 3-dimensions
    x = image.img_to_array(img)
    # Add one more dimension
    x = np.expand_dims(x, axis=0)
    # preprocess the images using preprocess_input() from inception module
    x = preprocess_input(x)
    return x
예제 #8
0
def preprocess_image(im_path, im_size, model_name):
    im = image.load_img(im_path, target_size=(im_size[0], im_size[1]))
    im = image.img_to_array(im)
    im = np.expand_dims(im, axis=0)
    if model_name == 'inception_v3':
        im = inception_v3.preprocess_input(im)
    elif model_name == 'resnet50':
        im = resnet50.preprocess_input(im)
    elif model_name == 'vgg16':
        im = vgg16.preprocess_input(im)
    return im
예제 #9
0
def get_predictions(model_path, img_path, img_target_size):
    """
    Loads model and image and make predictions using them

    Args:
        model_path: filesystem path of model
        img_path: filesystem path of image
        img_target_size: target image size to reshape the image if necessary

    Returns:
        a tuple of:
            1. array of prediction values by the model for all classes
            2. array of indices that can sort the classes from best prediction to worst
    """

    if not os.path.exists(model_path):
        raise ValueError(
            'No such `{}` file found\n'
            'Please, checkout the readme of the project '
            'on github and download required models'.format(model_path))
    model = load_model(model_path)

    # get image as array and resize it if necessary
    pil_img = Image.open(img_path)
    if pil_img.size != img_target_size:
        pil_img = pil_img.resize(img_target_size)

    img = image.img_to_array(pil_img)

    # if alpha channel found, discard it
    if img.shape[2] == 4:
        img = img[:, :, :3]

    # preprocess image
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)

    preds = model.predict(img).flatten()

    # get predictions index sorted based on the best predictions
    value_ = preds.argsort()
    sorted_preds_index = value_[::-1]

    return preds, sorted_preds_index
예제 #10
0
파일: predict.py 프로젝트: GRSEB9S/NASNet
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.inception_v3 import preprocess_input, decode_predictions
import numpy as np
import nasnet

img = image.load_img('image.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

model = nasnet.mobile()
model.load_weights('mobile.h5')

y = model.predict(x)
for index, res in enumerate(decode_predictions(y)[0]):
    print('{}. {}: {:.3f}%'.format(index + 1, res[1], 100 * res[2]))
예제 #11
0
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.python.keras import backend as K

from keras.applications.imagenet_utils import preprocess_input, decode_predictions

from IPython.display import Image
from keras.preprocessing import image
import numpy as np

print(tf.keras.__version__)
print(tf.__version__)

model = inception_v3.InceptionV3(weights='imagenet', include_top=True)
image_path = "./imagenet/"

img_path = os.path.join(image_path, 'cow.jpg')
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = inception_v3.preprocess_input(x)
print('Input image shape:', x.shape)

preds = model.predict(x)

print(' ')
print('Predicted:', decode_predictions(preds))

Image(img_path)
img.show()
예제 #12
0
def preprocess(PIL_image):
    resized_image = PIL_image.resize((299, 299))
    x = image.img_to_array(resized_image)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x