示例#1
0
def preprocessing_image(img_path):
    im = image.load_img(img_path, target_size=(224, 224, 3))
    im = image.img_to_array(im)
    im = np.expand_dims(im, axis=0)
    im = preprocess_input(im)
    return im
示例#2
0
def generate_dataset_main(n=10000,
                          save_path=None,
                          seed=None,
                          model_res=1024,
                          image_size=256,
                          minibatch_size=16,
                          truncation=0.7):
    """
    Generates a dataset of 'n' images of shape ('size', 'size', 3) with random seed 'seed'
    along with their dlatent vectors W of shape ('n', 512)

    These datasets can serve to train an inverse mapping from X to W as well as explore the latent space

    More variation added to latents; also, negative truncation added to balance these examples.
    """

    n = n // 2  # this gets doubled because of negative truncation below
    model_scale = int(2 *
                      (math.log(model_res, 2) - 1))  # For example, 1024 -> 18

    Gs = load_Gs()

    if (model_scale % 3 == 0):
        mod_l = 3
    else:
        mod_l = 2

    if seed is not None:
        b = bool(np.random.RandomState(seed).randint(2))
        Z = np.random.RandomState(seed).randn(n * mod_l, Gs.input_shape[1])
    else:
        b = bool(np.random.randint(2))
        Z = np.random.randn(n * mod_l, Gs.input_shape[1])

    if b:
        mod_l = model_scale // 2
    mod_r = model_scale // mod_l

    if seed is not None:
        Z = np.random.RandomState(seed).randn(n * mod_l, Gs.input_shape[1])
    else:
        Z = np.random.randn(n * mod_l, Gs.input_shape[1])

    W = Gs.components.mapping.run(
        Z, None, minibatch_size=minibatch_size
    )  # Use mapping network to get unique dlatents for more variation.
    dlatent_avg = Gs.get_var('dlatent_avg')  # [component]
    W = (W[np.newaxis] - dlatent_avg) * np.reshape([truncation, -truncation], [
        -1, 1, 1, 1
    ]) + dlatent_avg  # truncation trick and add negative image pair
    W = np.append(W[0], W[1], axis=0)
    W = W[:, :mod_r]
    W = W.reshape((n * 2, model_scale, 512))
    X = Gs.components.synthesis.run(W,
                                    randomize_noise=False,
                                    minibatch_size=minibatch_size,
                                    print_progress=True,
                                    output_transform=dict(
                                        func=tflib.convert_images_to_uint8,
                                        nchw_to_nhwc=True))
    X = np.array([
        cv2.resize(x, (image_size, image_size), interpolation=cv2.INTER_AREA)
        for x in X
    ])
    #X = preprocess_input(X, backend = keras.backend, layers = keras.layers, models = keras.models, utils = keras.utils)
    X = preprocess_input(X)
    return W, X
示例#3
0
def prepare_image(raw_image):
    img = cv2.resize(raw_image, dsize=(224, 224))
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)
    return img
def rgb_to_gray(img):
    x = preprocess_input(img)
    gray_pic = np.dot(x[..., :3], [0.299, 0.587, 0.114])
    final_pic = np.repeat(gray_pic[:, :, np.newaxis], 3, axis=2)
    return final_pic
        metadata[r[0]] = {"coord": [r[2], r[3]], "score": trueskill.Rating()}

# Load and preprocess images to be scored

# In[8]:

print("loading images")
images = {}
for imName in metadata.keys():
    im = cv2.imread(data_path + img_folder + imName + ".jpg")

    if im is not None:
        im = cv2.resize(im, (224, 224), interpolation=cv2.INTER_CUBIC)
        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

        images[imName] = preprocess_input(im)

# Keep only the "mu" value of the trueskill scores

# In[9]:

# Experimental: compare the analyzed images
print("Loading left and right")
i = 0
endi = len(images) / length

for l in range(0, len(images), length):
    leftIds = [
        tId for tId in random.sample(images.keys(), nComp)
        for u in range(0, length)
    ]
示例#6
0
t0 = time()

trained_model_path = "new_trained_from_resnet50_jun1"

model = tf.keras.models.load_model(trained_model_path)

t = gmtime(time() - t0)
print("Model Load Done in", strftime("%H:%M:%S", t))

while True:
    try:
        frame = get_frame_from_UDP()

        image = frame.reshape(-1, 224, 224, 3)
        x = resnet50.preprocess_input(image)
        y = model.predict(x)
        print(y, y.argmax())

        if len(y) == 1:
            prob_blocked = y[0][0]
            if prob_blocked < 0.5:
                cv.putText(frame, "Blocked", (10, 20), cv.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
            else:
                cv.putText(frame, "Free", (10, 20), cv.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
            cv.imshow("jetbot", frame)

        if cv.waitKey(1)==ord('q') :
            break
    except Exception as e:
        print(e.args[0])
示例#7
0
        idx for idx in glob.glob(
            '/home/VinBigData_ChestXray/data_classify/4_10_11_12_single_class_ver2_stage1_test_cropped/*.png'
        )
    ]

    classes_lst = []
    class2id = {0: '4', 1: '10', 2: '11', 3: '12'}
    RESULTS_PATH = '/home/VinBigData_ChestXray/data_classify/results_4_10_11_12_single_class_ver2_stage1_test_cropped/'

    create_folder(RESULTS_PATH)
    create_folder(os.path.join(RESULTS_PATH, '4'))
    create_folder(os.path.join(RESULTS_PATH, '10'))
    create_folder(os.path.join(RESULTS_PATH, '11'))
    create_folder(os.path.join(RESULTS_PATH, '12'))

    for image_idx in test_images:
        img = image.load_img(image_idx, target_size=(1024, 1024))
        img_array = image.img_to_array(img)
        img_batch = np.expand_dims(img_array, axis=0)
        img_preprocessed = preprocess_input(img_batch)
        prediction = model.predict(img_preprocessed)
        classes = class2id[np.argmax(prediction[0])]
        print('Saving {} as {}'.format(
            image_idx.split('/')[-1],
            classes + '_' + image_idx.split('/')[-1]))
        shutil.copyfile(
            image_idx,
            os.path.join(RESULTS_PATH, classes,
                         classes + '_' + image_idx.split('/')[-1]))

    print('Length of test images', len(classes_lst))
示例#8
0
# One-hot編碼
Y_train = to_categorical(Y_train, 10)
Y_test = to_categorical(Y_test, 10)
# 載入 ResNet50 模型
resnet_model = ResNet50(weights="imagenet",
                        include_top=False,
                        input_shape=(200, 200, 3))
# 調整X_train的圖片尺寸
print("調整X_train的圖片尺寸...")
X_train_new = np.array([
    np.asarray(Image.fromarray(X_train[i]).resize((200, 200)))
    for i in range(0, len(X_train))
])
X_train_new = X_train_new.astype("float32")
# 訓練資料的資料前處理
train_input = preprocess_input(X_train_new)
# 使用 ResNet50 模型預測訓練資料的特徵資料
print("使用 ResNet50 模型預測訓練資料的特徵資料...")
train_features = resnet_model.predict(train_input)
# 調整X_test的圖片尺寸
print("調整X_test的圖片尺寸...")
X_test_new = np.array([
    np.asarray(Image.fromarray(X_test[i]).resize((200, 200)))
    for i in range(0, len(X_test))
])
X_test_new = X_test_new.astype("float32")
# 測試資料的資料前處理
test_input = preprocess_input(X_test_new)
# 使用 ResNet50 模型預測測試資料的特徵資料
print("使用 ResNet50 模型預測測試資料的特徵資料...")
test_features = resnet_model.predict(test_input)
示例#9
0
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions

import base64

tf.compat.v1.disable_v2_behavior()  # 以静态图的方式运行
# 输入是string类型
input_imgs = tf.compat.v1.placeholder(shape=None, dtype=tf.string)
# 把base64字符串图像解码成jpeg格式
decoded = tf.image.decode_jpeg(tf.compat.v1.decode_base64(input_imgs), channels=3)
# 用最近邻法调整图像大小到[224,224],因为ResNet50需要输入图像大小是[224,224]
decoded = tf.compat.v1.image.resize_images(decoded, [224, 224], tf.image.ResizeMethod.NEAREST_NEIGHBOR)

# 在0位置增加一个值是1的维度,使其成为一个图像
tensorimg = tf.expand_dims(tf.cast(decoded, dtype=tf.float32), 0)

tensorimg = preprocess_input(tensorimg)  # 图像预处理

with tf.compat.v1.Session() as sess:  # 构建一个会话
    sess.run(tf.compat.v1.global_variables_initializer())
    # 加载ResNet50模型
    Reslayer = ResNet50(weights='resnet50_weights_tf_dim_ordering_tf_kernels.h5')

    logits = Reslayer(tensorimg)  # 获取模型的输出节点
    # 得到该图片的每个类别的概率值
    prediction = tf.squeeze(tf.cast(tf.argmax(logits, 1), dtype=tf.int32), [0])

    img_path = './dog.jpg'  # 定义测试图片路径

    with open(img_path, "rb") as image_file:
        # 把图像编码成base64字符串格式
        encoded_string = str(base64.urlsafe_b64encode(image_file.read()), "utf-8")
示例#10
0
'''

# In[9]:
'''
img_generator = image_dataset_from_directory('C:\\Users\\calvom\\ThesisMaster\\notebooks\\downloaded_data\\training_top_23_0.1_resize_672_672', 
                                             batch_size=32, 
                                             image_size=(224, 224), 
                                             shuffle=True, 
                                             label_mode='categorical')
'''

base_model = ResNet50(weights='imagenet', include_top=False)
#base_model = MobileNetV2(weights='imagenet', include_top=False)
input_img = Input((224, 224, 3))

x = preprocess_input(input_img)

#x = tf.keras.applications.mobilenet_v2.preprocess_input(input_img)

encoded_features = base_model(x, training=False)

x3 = GlobalAveragePooling2D()(encoded_features)
x4 = Dense(1024, activation='relu')(x3)
x5 = Dropout(0.4)(x4)

predictions = Dense(
    n_classes,
    activation='softmax',
    name='softmax',
    kernel_regularizer=l2(0.01),
    bias_regularizer=l2(0.01),
示例#11
0
image_path =IMAGES_DIR / 'lena.jpg'
image_path =IMAGES_DIR / 'military-raptor.jpg' # warplane
image_path =IMAGES_DIR / 'baboon.png' # baboon
image_path =IMAGES_DIR / 'fighter_jet.jpg' # warplane
image_path =IMAGES_DIR / 'watch.png' # stopwatch
image_path =IMAGES_DIR / 'bear.tif' # brown_bear
image_path =IMAGES_DIR / 'wild_flowers.tif' # greenhouse


print (image_path)
# read image in RGB format
image = imageio.imread(str(image_path))
print(image.shape)
image = vision.resize(image, 224, 224)
batch = expand_to_batch(image)
batch = preprocess_input(batch)

b_model = resnet.model_resnet50(weights="imagenet")
b_y = b_model.predict(batch)
b_label = decode_predictions(b_y)
b_label = b_label[0][0]
# print the classification
print('OUR: %s (%.2f%%)' % (b_label[1], b_label[2]*100))


a_model = ResNet50(weights="imagenet")
a_y = a_model.predict(batch)
a_label = decode_predictions(a_y)
a_label = a_label[0][0]
# print the classification
print('KERAS: %s (%.2f%%)' % (a_label[1], a_label[2]*100))
示例#12
0
def ImageEncode(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    return x
示例#13
0
def process_image(img_path):
    img = image.load_img(img_path, target_size=(512, 512))
    x = image.img_to_array(img)
    return preprocess_input(x)
# model.fit(x=x_train, y=y_train, epochs=3, callbacks=[checkpoint], verbose=0)
# print(get_test_accuracy(model, x_test, y_test))
#
# new_model = load_model(checkpoint_path)
# print(get_test_accuracy(new_model, x_test, y_test))
#
# model.save('my_model.h5')
# new_model1 = load_model('my_model.h5')
# print(get_test_accuracy(new_model1, x_test, y_test))

model = ResNet50(weights='imagenet', include_top=True)
from tensorflow.keras.preprocessing import image
img_input = image.load_img('my_picture.jpg', target_size=(224, 224))
img_input = image.img_to_array(img_input)
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
preprocess_input(img_input[np.newaxis, ...])
preds = model.predict(img_input)
decode_predictions = decode_predictions(preds, top=3)[0]

from tensorflow.keras.applications import ResNet50
model = ResNet50(weights='imagenet')

from tensorflow.keras.preprocessing.image import load_img

lemon_img = load_img('lemon.jpg', target_size=(224, 224))
viaduct_img = load_img('viaduct.jpg', target_size=(224, 224))
water_tower_img = load_img('water_tower.jpg', target_size=(224, 224))

# Useful function: presents top 5 predictions and probabilities

from tensorflow.keras.preprocessing.image import img_to_array
    def _read_image(self, image):
        full_img_path = path.join(self._basepath, "all_images", image)
        img = pil_image.open(full_img_path).convert("RGB")
        img = img.resize((224, 224), pil_image.BILINEAR)

        return preprocess_input(np.array(img, dtype=np.float32))
end = time.time()
time_taken = end - start
print('ResNet50 loading time: ', time_taken, " seconds")
all_images = np.zeros((len(names), 224, 224, 3))

start = time.time()
for i, img_path in enumerate(names):
    img = image.load_img(img_path, target_size=(224, 224))
    img2 = np.expand_dims(image.img_to_array(img), axis=0)
    all_images[i, :, :, :] = img2
end = time.time()
time_taken = end - start
print('Image loading time: ', time_taken, " seconds")

start = time.time()
all_images = preprocess_input(all_images)
time_taken = end - start
print('Preprocess time: ', time_taken, " seconds")

start = time.time()
predictions = model.predict(all_images)
del all_images
end = time.time()
time_taken = end - start
print('Feature extraction time: ', time_taken, " seconds")
predictions = predictions.reshape((predictions.shape[0], -1))
pca_comp = 62
start = time.time()
pca = PCA(n_components=pca_comp)
pca_predictions = pca.fit_transform(predictions)
end = time.time()
    def _read_image(self, image_path):
        img = pil_image.open(image_path).convert("RGB")
        img = img.resize((224, 224), pil_image.BILINEAR)

        return preprocess_input(np.array(img, dtype=np.float32))
示例#18
0
    def build_model(self, data_sources: Dict[str, BaseDataSource], mode: str):
        """Build model."""
        data_source = next(iter(data_sources.values()))
        input_tensors = data_source.output_tensors
        # stack left-eye and right-eye to get a 6 channels tensors, placeholder
        # le = input_tensors['left-eye']
        # re = input_tensors['right-eye']

        le = input_tensors['left-eye'] # shape (60 224 3)
        re = input_tensors['right-eye'] # shape (60 224 3)
        face = input_tensors['face'] #shape (224, 224, 3)

        le = preprocess_input(le) # add one dimension for batch
        re = preprocess_input(re) # add one dimension for batch
        face = preprocess_input(face)  # add one dimension for batch
        # RN = resNet50.ResNet50
        # le_conv = RN(weights='imagenet', include_top=False, input_tensor = le)
        le_conv = RN(weights='imagenet', include_top=False, input_tensor = le)
        # re_conv = RN(weights='imagenet', include_top=False, input_tensor = re)
        re_conv = RN(weights='imagenet', include_top=False, input_tensor = re)
        face_conv = RN(weights='imagenet', include_top=False, input_tensor = face)

        le_flatten = Flatten()(le_conv.output)
        re_flatten = Flatten()(re_conv.output)
        face_flatten = Flatten()(face_conv.output)

        # for face
        # face_flatten = Dense(2048, activation='relu', name='face_1')(face_flatten)
        face_flatten = Dense(1000, activation='relu', name='face_3')(face_flatten)

        # for eye
        # le_flatten = Dense(2048, activation='relu', name='le_3')(le_flatten)
        le_flatten = Dense(1000, activation='relu', name='le_4')(le_flatten)

        # re_flatten = Dense(2048, activation='relu', name='le_3')(re_flatten)
        re_flatten = Dense(1000, activation='relu', name='le_4')(re_flatten)

        # for face landmark
        # add face landmarks(coordinates)

        # add head pose into the vector
        # x = tf.concat((le_flatten, re_flatten, face_flatten, lm,  input_tensors['head']), axis = 1)
        x = tf.concat((le_flatten, re_flatten, face_flatten, input_tensors['head']), axis = 1)
        x = BatchNormalization()(x)
        # x = tf.concat((le_flatten, re_flatten, face_flatten), axis = 1)
        x = Dense(1024, activation='relu', name='fc_3')(x)
        # x = Dense(1024, activation='relu', name='fc_4')(x)
        x = Dense(512, activation='relu', name='fc_6')(x)
        x = Dense(128, activation='relu', name='fc_7')(x)
        preds = Dense(2,name='preds')(x)

        # eye_model = Model(lre, preds)
        
        # Define outputs
        loss_terms = {}
        metrics = {}
        if 'gaze' in input_tensors:
            y = input_tensors['gaze']
            with tf.variable_scope('mse'):  # To optimize
                # NOTE: You are allowed to change the optimized loss
                loss_terms['gaze_mse'] = tf.reduce_mean(tf.squared_difference(preds, y))
                # loss_terms['gaze_mse'] = util.gaze.tensorflow_angular_error_from_pitchyaw(preds, y)
            with tf.variable_scope('ang'):  # To evaluate in addition to loss terms
                metrics['gaze_angular'] = util.gaze.tensorflow_angular_error_from_pitchyaw(preds, y)
        return {'gaze': preds}, loss_terms, metrics   # are graphs to be executed
示例#19
0
from sklearn.preprocessing import LabelBinarizer
from random import shuffle

# select which GPU to use for training
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

### Params ###
# Set here the basic params to choose which data to use for the training
path = "images-atmos/"  # The folder where the images are located
cspath = "crowdsourcing/atmosphere-final.json"  # The file where the crowdsourcing results are located
output_path = "models/full_model_atmos.h5"  # The file where the trained model will be saved

# Load all images
tempImages = {}
for f in os.listdir(path):
    tempImages[f[:-4]] = preprocess_input(cv2.imread(path + f))


# Shuffle dataset, to have it more balanced
def shuffle_data(left, right, lab):
    shuffled = list(zip(left, right, lab))
    shuffle(shuffled)
    return zip(*shuffled)


lb = LabelBinarizer()

imgsLeft = []
imgsRight = []
labels = []
示例#20
0
    batchPaths = imagePaths[i:i + bs]
    batchLabels = labels[i:i + bs]
    batchImages = []

    # loop over the images and labels in the current batch
    for imagePath in batchPaths:
        # load the input image using the Keras helper utility while
        # ensuring the image is resized to 224x224 pixels
        image = load_img(imagePath, target_size=(224, 224))
        image = img_to_array(image)

        # preprocess the image by (1) expanding the dimensions and
        # (2) subtracting the mean RGB pixel intensity from the
        # ImageNet dataset
        image = np.expand_dims(image, axis=0)
        image = preprocess_input(image)

        # add the image to the batch
        batchImages.append(image)

    # pass the images through the network and use the outputs as our
    # actual features, then reshape the features into a flattened
    # volume
    batchImages = np.vstack(batchImages)
    features = model.predict(batchImages, batch_size=bs)
    features = features.reshape((features.shape[0], 7 * 7 * 2048))

    # loop over the class labels and extracted features
    for (label, vec) in zip(batchLabels, features):
        # construct a row that exists of the class label and extracted
        # features
示例#21
0
def preprocessing(image):
    # Pre-processing
    input_tensor = preprocess_input(np.expand_dims(image, axis=0))
    return input_tensor
示例#22
0
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 15:11:45 2020

@author: ASELSAN
title: RESNET50 MİMARİSİ İLE HAYVAN SINIFLANDIRMA
STAJDAN BAĞIMSIZ BİR ÖRNEK OLDU AMA KALABİLİR :)

"""

from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np

model = ResNet50(weights='imagenet')

img_path = 'tiger.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
#print(preds)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
NUM_LOOPS_PER_THREAD = 100
COMPILED_MODEL_DIR = "./rn50_fp16_compiled_b" + str(args.batch_size) + "_nc" + str(args.num_neuroncores) + "/1"

# Ensure there's enough buffer capacity to hold in-flight requests in runtime
NUM_INFERS_IN_FLIGHT = args.num_neuroncores + 1
os.environ['NEURON_MAX_NUM_INFERS'] = str(NUM_INFERS_IN_FLIGHT)

num_groups = avail_neuroncores // args.num_neuroncores
group_sizes = [str(args.num_neuroncores)] * num_groups
os.environ['NEURONCORE_GROUP_SIZES'] = ','.join(group_sizes)

# Create input from image
img_sgl = image.load_img('kitten_small.jpg', target_size=(224, 224))
img_arr = image.img_to_array(img_sgl)
img_arr2 = np.expand_dims(img_arr, axis=0)
img_arr3 = resnet50.preprocess_input(np.repeat(img_arr2, USER_BATCH_SIZE, axis=0))

# Load model
NUM_THREADS_PER_PREDICTOR = args.num_neuroncores + 1
pred_list = [tf.contrib.predictor.from_saved_model(COMPILED_MODEL_DIR) for _ in range(num_groups)]
pred_list = pred_list * NUM_THREADS_PER_PREDICTOR
num_threads = len(pred_list)

num_infer_per_thread = []
tot_latency_per_thread = []
thread_active = []
for i in range(num_threads):
    num_infer_per_thread.append(0)
    tot_latency_per_thread.append(0)
    thread_active.append(0)
示例#24
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-d", "--dataset", required=True, help="Path to dataset")
    parser.add_argument("--pca", default=None, help="Path to PCA model")
    parser.add_argument("--scaler", default=None, help="Path to Scaler model")
    parser.add_argument("-o", "--output", required=True, help="Output HDF5 file")
    parser.add_argument("-b", "--buffer", type=int, default=1000, help="Buffer size for feature extraction")
    args = parser.parse_args()

    # Get labels
    img_paths = sorted(glob.glob(f"{args.dataset}/*.jpg"))
    labels = [int(os.path.basename(img_path)[0]) for img_path in img_paths]
    labels = np.array(labels)

    batch_size = 32

    # Feature extraction model
    model = ResNet50(weights="imagenet", include_top=False)
    features = []
    for i in tqdm.tqdm(np.arange(0, len(img_paths), batch_size)):
        # Get batch
        batch_img_paths = img_paths[i:i+batch_size]

        # Process each path
        batch_images = []
        for img_path in batch_img_paths:
            image = load_img(img_path, target_size=(224, 224))
            image = img_to_array(image)
            image = np.expand_dims(image, axis=0)
            image = preprocess_input(image)
            batch_images.append(image)
        batch_images = np.vstack(batch_images)

        # Extract features
        batch_features = model.predict(batch_images, batch_size=batch_size)
        batch_features = batch_features.reshape((batch_features.shape[0], -1))
        features.append(batch_features)

    features = np.vstack(features)
    print(f"[INFO] Extracted features: {features.shape}")

    # Load models
    scaler = joblib.load(args.scaler) if args.scaler else StandardScaler().fit(features)
    features = scaler.transform(features)
    pca = joblib.load(args.pca) if args.pca else PCA(n_components=1024, whiten=True).fit(features)
    features = pca.transform(features)
    print(f"[INFO] PCA features: {features.shape}")
    print(f"[INFO] PCA Explained variance %: {sum(pca.explained_variance_ratio_)}")

    # Save models
    os.makedirs("models", exist_ok=True)
    if not args.scaler:
        print("[INFO] Saving Scaler model ...")
        joblib.dump(scaler, "models/scaler.pkl")
    if not args.pca:
        print("[INFO] Saving PCA model ...")
        joblib.dump(pca, "models/pca.pkl")

    # Write to hdf5
    dims = (len(img_paths), 1024)
    dataset = HDF5Writer(args.output, dims, buffer_size=args.buffer)
    dataset.store_class_names(["Not Food", "Food"])
    for i in tqdm.tqdm(np.arange(0, len(img_paths), batch_size)):
        # Add to dataset
        batch_features = features[i:i+batch_size]
        batch_labels = labels[i:i+batch_size]
        dataset.add(batch_features, batch_labels)
    dataset.close()
            name: sess.graph.get_tensor_by_name(ts_name)
            for name, ts_name in input_names.items()
        }
        outputs = {
            name: sess.graph.get_tensor_by_name(ts_name)
            for name, ts_name in output_names.items()
        }
        tf.saved_model.simple_save(sess, model_dir, inputs, outputs)


SAVED_MODEL_DIR = './rn50_fp16'
shutil.rmtree(SAVED_MODEL_DIR, ignore_errors=True)
input_tname = "{}:0".format(args.input)
output_tname = "{}:0".format(args.output)
pb_to_saved_model(args.graph, {input_tname: input_tname},
                  {output_tname: output_tname}, SAVED_MODEL_DIR)

# Create input from image
img_sgl = image.load_img('kitten_small.jpg', target_size=(224, 224))
img_arr = image.img_to_array(img_sgl)
img_arr2 = np.expand_dims(img_arr, axis=0)
img_arr3 = resnet50.preprocess_input(np.repeat(img_arr2, 1, axis=0))

# Load model
predictor_host = tf.contrib.predictor.from_saved_model(SAVED_MODEL_DIR)

# Run inference
model_feed_dict = {'input_1:0': img_arr3}
infa_rslts = predictor_host(model_feed_dict)
print(resnet50.decode_predictions(infa_rslts[output_tname], top=5)[0])
# looping over region proposal bounding boxes coordinates generated by running selective search
for (x, y, w, h) in rects:
    # filtering the unusable boxes
    # filtering the boxes with size less than 10% of the size of the image
    if w / float(W) < 0.1 or h / float(H) < 0.1:
        continue
    # extracting regions of interest from the input image and converting it from BGR to RGB
    # resizing it 224x224 shape, required by the classifier
    roi = image[y:y + h, x:x + w]
    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
    roi = cv2.resize(roi, (224, 224))

    # further preprocessing required
    roi = img_to_array(roi)
    roi = preprocess_input(roi)

    #updating list of bounding boxes and proposals
    proposals.append(roi)
    boxes.append((x, y, w, h))

# converting the list of proposals to a numpy array
proposals = np.array(proposals)
print("[INFO] proposals shape {}".format(proposals.shape))

# classifying each proposal ROIs using ResNet and decoding the predictions
print("[INFO] classifying proposals...")
preds = model.predict(proposals)
preds = imagenet_utils.decode_predictions(preds, top=1)
# initializing a dictionary to map class labels to bounding box associated with it
labels = {}
for item in (PATH_DATA / 'validation').glob('**/*'):
    if not item.is_dir():
        images_validation_path.append(item)

n_images_validation = 3
images_validation_path = random.sample(images_validation_path,
                                       n_images_validation)
images_validation = [
    Image.open(image_validation_path)
    for image_validation_path in images_validation_path
]

# Remember to preprocess the input!
image_batch = list()
image_batch = np.stack([
    preprocess_input(np.array(image_validation.resize((224, 224))))
    for image_validation in images_validation
])
prediction_probabilities = model_tl.predict(image_batch)

fig, axes = plt.subplots(1, n_images_validation, figsize=(15, 5))
for ii, image in enumerate(images_validation):
    axes[ii].imshow(image)
    axes[ii].set_title("Thanos {:.1f}%, Grimace {:.1f}%".format(
        100 * prediction_probabilities[ii, 0],
        100 * (1 - prediction_probabilities[ii, 0])))
    axes[ii].axis('off')

# %% [markdown]
# ### Bonus: Grimos
# Lastly, I want to try the model with images that I consider specially
示例#28
0
def read_and_prep_image(img_path, img_height = image_size, img_width = image_size):
    img = load_img(img_path, target_size = (img_height, img_width))
    img_array = np.array([img_to_array(img)])
    output = preprocess_input(img_array)
    return(output)
示例#29
0
def preprocess_image(img):
    img = image.load_img(img, target_size=(224, 224))
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)
    return img
示例#30
0
def dogornot(img):
    img = preprocess_input(img)
    img = np.array([img])
    return DOgOrNot[np.argsort(-(model.predict(img)))[0][0]]