def predict(original_image):
	model.load_weights("models/_mini_XCEPTION.87-0.65.hdf5")
	highlighted_face_image, faces = find_face(original_image)
	nfaces = len(faces)
	prediction = None
	predicted_emotion = None
	if len(faces) > 0:
		for [x, y, w, h] in faces:
			image = original_image[y:y+h,x:x+w]
			image = preprocess_live_image(image)
			image = [[image]]
			prediction = model.predict(image)
			maximum = max(prediction)
			predicted_emotion = emotion_list[prediction.argmax()]
			cv2.putText(highlighted_face_image, predicted_emotion, (x,y-20), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 10)
	return image_extraction(highlighted_face_image, faces, nfaces, prediction, predicted_emotion,original_image)
Example #2
0
def test(image_path, snapshot_path, transform=False):

    img = Image.open(
        image_path)  # WARNING : this image is well centered and square
    img = img.resize(model.inputs[0].shape)

    imarr = np.array(img).astype(np.float32)

    imarr = imarr.transpose((2, 0, 1))
    imarr = np.expand_dims(imarr, axis=0)

    model.load_weights(snapshot_path)

    out = model.predict(imarr)

    best_index = np.argmax(out, axis=1)[0]
    print best_index
Example #3
0
def predict(word):
    model.load_weights('data/model.h5')

    windows = dataset.process_word(word.lower(), training=False)
    hyphenated = word[:2]

    for offset, window in enumerate(windows):
        result = model.predict(np.array([window]))

        #print('>>> {}{}{} => {: >5.2f} %'.format(
        #	word[:2 + offset],
        #	dataset.HYPHENATION_INDICATOR,
        #	word[2 + offset:],
        #	result[0][0] * 100
        #))

        if result[0][0] > 0.5:
            hyphenated += dataset.HYPHENATION_INDICATOR

        hyphenated += word[offset + 2]

    hyphenated += word[-1:]
    return hyphenated
# -*- coding: utf-8 -*-
# @File    : eval.py
# @Author  : AaronJny
# @Time    : 2019/12/30
# @Desc    :
from dataset import tokenizer
from model import model
import settings
import utils

# 加载训练好的模型
model.load_weights(settings.BEST_MODEL_PATH)
# 随机生成一首诗
print(utils.generate_random_poetry(tokenizer, model))
# 给出部分信息的情况下,随机生成剩余部分
print(utils.generate_random_poetry(tokenizer, model, s='床前明月光,'))
# 生成藏头诗
print(utils.generate_acrostic(tokenizer, model, head='海阔天空'))
print(utils.generate_acrostic(tokenizer, model, head='天道酬勤'))
Example #5
0
import cv2
import numpy as np
import os
import uuid
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from model import model, preprocess
import time
import pickle

model_name = 'model_train_v2.h5'

model.load_weights(model_name)

cap = cv2.VideoCapture(0)
last_warning = 0


def save_image(image, pos=False):
    if pos:
        fname = os.path.join("./pos_frames", str(uuid.uuid4()) + ".png")
    else:
        fname = os.path.join("./raw_data", str(uuid.uuid4()) + ".png")
    cv2.imwrite(fname, image)


bad_count = 0
avg_prediction = []
frame_array = []

while (True):
import numpy as np
from model import model
import matplotlib.pyplot as plt

# Read subset of data
all_data = np.load('simple_data.npz')
imgs_color = all_data['imgs']
speedx = np.concatenate((all_data['spds'], all_data['accel']))
speedx = speedx.reshape((-1, 2))
steer = all_data['steer']

#make predictions
start = 45000
stop = 65000
model.load_weights('steer_comma_0_0.00057615.h5')
preds = model.predict([speedx[start:stop], imgs_color[start:stop]])
steer_preds = preds.reshape([-1])

# Video of prediction
import matplotlib.animation as animation
from PIL import Image, ImageDraw
figure = plt.figure()
imageplot = plt.imshow(np.zeros((64, 64, 3), dtype=np.uint8))
val_idx = start


def get_point(s, start=0, end=63, height=16):
    X = int(s * (end - start))
    if X < start:
        X = start
    if X > end:
Example #7
0
from tensorflow.keras.datasets import mnist
from model import model
from skimage import io
import matplotlib.pyplot as plt
import numpy as np

SAVED_MODEL = 'ToReport/Checkpoints/digits-cnn_best_weights.h5'

_, (test_images, test_labels) = mnist.load_data()

N = len(test_images)
k = np.random.randint(N)

test_img = test_images[k].reshape(1, *test_images[k].shape, 1)
test_img = test_img / 255.0

model.load_weights(SAVED_MODEL)

pred = model.predict(test_img)
io.imshow(test_images[k])
plt.title(
    f'Истинное значение: {test_labels[k]}, Предсказание модели: {np.argmax(pred)}'
)
plt.show()
Example #8
0
    tb = TensorBoard(log_dir='./logs')
    update_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.8,
                                  patience=5,
                                  min_lr=1e-6,
                                  verbose=1)
    checkpoint = ModelCheckpoint(weights_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True)

    # MODEL AND TRAIN

    model = model(img_height, img_width, num_classes, lr)
    model.fit(X_train,
              Y_train,
              epochs=epochs,
              batch_size=batch_size,
              verbose=1,
              validation_data=(X_val, Y_val),
              shuffle=True,
              callbacks=[checkpoint, update_lr, tb])

    # MODEL TEST

    model.load_weights(weights_path)
    preds = model.evaluate(X_test, Y_test)
    print("Loss = " + str(preds[0]))
    print("Test Accuracy = " + str(preds[1]))
import cv2
import numpy as np
import tensorflow as tf
from model import model
import matplotlib.pyplot as plt

classes_name = {0: 'Airplane', 1: 'Automobile', 2: 'Bird', 3: 'Cat', 4: 'Deer', 5: 'Dog', 6: 'Frog', 7: 'Horse',
                8: 'Ship', 9: 'Truck'}

model = model()
model.load_weights('../MODEL DATA/tf-model.h5')


def classify(img):
    img = cv2.resize(img, (32, 32)) / 255.0
    img = tf.expand_dims(tf.cast(img, tf.float32), axis=0)
    prediction = model.predict(img)[0]
    index = np.argmax(prediction)
    class_name = classes_name[int(index)]
    return class_name


image = plt.imread('../IMAGES/car1.jpg')
cls_name = classify(image)
plt.imshow(image)
plt.title(cls_name, fontsize=10)
plt.show()
Example #10
0
from properties import *
from model import model
from data_generator import midi_input_generator, generate_song_array, save_array_to_midi


print "Testing saved model"
model.load_weights('net_dump.nw')

save_array_to_midi([generate_song_array(model)], 'Generated.mid')
        log_dir + "\\weights_epoch_{epoch:02d}-loss_{val_loss:.2f}.hdf5",
        monitor='val_loss',
        verbose=0,
        save_best_only=False,
        save_weights_only=False,
        mode='auto',
        period=1)
    # lr_sched = keras.callbacks.LearningRateScheduler(lambda epoch: 0.002* 0.75 ** (epoch-1) )
    cfm_callback = confusion_matrix_callback(train_generator,
                                             validation_generator)
    callbacks = [tensorboard_callback, reduce_lr, modelCP,
                 csv_logger]  # , lr_sched] #, cfm_callback ]
    # ------------------------------

    # saved_weights = r"C:\Users\User\PycharmProjects\PlantPathology\logs\fit\20200331-120037\weights_epoch_17-loss_1.00.hdf5"
    # saved_weights = r"C:\Users\User\PycharmProjects\PlantPathology\logs\fit\saved_weights\weights_epoch_34-loss_1.06.hdf5"
    if C.MODEL == 'VGG' and C.PRETRAINED_VGG:
        model.load_weights(C.PRETRAINED_VGG)

    model.fit_generator(
        train_generator,
        validation_data=validation_generator,
        epochs=500,
        callbacks=callbacks,
        verbose=True,
        # class_weight=get_class_weights()
        class_weight=1 / pd.Series(train_generator.classes).value_counts(),
        # initial_epoch = 20,
        # class_weight= 'auto',
    )
Example #12
0
# Written by Markus Siemens (MIT; https://github.com/msiemens/HypheNN-de)

from time import time

import util
from dataset import data_validation
from model import model


start_time = time()

model.load_weights('models/model_saved.h5')
data = data_validation()

print('Validating model...')
result = model.evaluate(data[0], data[1])
print()
print('Done')
print('Result:', result)
print('Time:', util.time_delta(time() - start_time))
Example #13
0
def warn(*args, **kwargs):
    pass


import warnings
warnings.warn = warn

from dataset import tokenizer
from model import model
import config
import utils
import argparse

# 加载训练好的模型
model.load_weights(config.BEST_MODEL_PATH)

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='诗歌生成器  功能:1.随机生成一首诗  2.根据开头生成后面的诗句  3.生成藏头诗')
    parser.add_argument('--start', '-s', help='生成的开头')
    parser.add_argument('--acrostic', '-a', help='藏头诗')
    parser.add_argument('--number', '-n', help='五言、七言')
    parser.add_argument('--count', '-c', help='律诗、绝句')
    args = parser.parse_args()

    # 给出部分信息的情况下,随机生成剩余部分
    if args.start:
        print(
            utils.generate_random_poetry(tokenizer,
                                         model,
                        help="Input text file")
    parser.add_argument("-w",
                        "--weights",
                        action="store",
                        required=False,
                        dest="weights",
                        help="Model weights path")
    parser.add_argument("-i",
                        "--input",
                        action="store",
                        required=False,
                        dest="input",
                        help="Input string for complete")
    parser.add_argument("-o",
                        "--out_len",
                        action="store",
                        required=False,
                        dest="out_len",
                        help="Out length")
    args = parser.parse_args()

    _, _, vectorizer = load_data.load_data(args.text, False, False, False, 1)
    model = model.make_text_generator_model(1, vectorizer.vocab_size)
    model.load_weights(args.weights)

    res = generate(model,
                   vectorizer,
                   seed=args.input,
                   length=int(args.out_len))
    print(res)
Example #15
0
from model import model
from qlearning4k import Agent
from flappy_bird import FlappyBird

game = FlappyBird(frame_rate=30, sounds=True)
model.load_weights('weights.dat')
agent = Agent(model)
agent.play(game, nb_epoch=100, epsilon=0.01, visualize=False)
Example #16
0
from model import model
from nmt_utils import *

m = 10000
Tx = 30
Ty = 10
n_a = 32
n_s = 64
learning_rate = 0.005
batch_size = 100

dataset, human_vocab, machine_vocab, inv_vocab = load_dataset(m)
X, Y, Xoh, Yoh = preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty)
model = model(Tx, Ty, n_a, n_s, len(human_vocab), len(machine_vocab))

model.load_weights('models/model_50.h5')

s0 = np.zeros((m, n_s))
c0 = np.zeros((m, n_s))

EXAMPLES = [
    '3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007',
    'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001'
]

total = len(EXAMPLES)
count = 1

TARGETS = [
    '1979-05-03', '2009-04-05', '2016-08-21', '2007-07-10', '2018-05-09',
    '2001-03-03', '2001-03-03', '2001-03-01'
from model import model, preprocess_input, smodel
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau
from generator import Generator
import json

if __name__ == '__main__':
    model = smodel()
    opt = SGD(lr=0.01, momentum=0.9)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    print(model.summary())
    model.load_weights('weights/classifier.h5')

    listsss = json.load(open('list_withbndbx.json', 'r'))
    train_gen = Generator(
        listsss[:7211],
        '/home/palm/PycharmProjects/DATA/Tanisorn/imgCarResize/',
        preprocess_function=preprocess_input)
    test_gen = Generator(
        listsss[7211:],
        '/home/palm/PycharmProjects/DATA/Tanisorn/imgCarResize/',
        preprocess_function=preprocess_input)
    reduce_lr_01 = ReduceLROnPlateau(monitor='val_1st_acc',
                                     factor=0.2,
                                     patience=5,
                                     min_lr=0,
                                     mode='max')
    reduce_lr_02 = ReduceLROnPlateau(monitor='val_2nd_acc',
                                     factor=0.2,
Example #18
0
classes_name = {
    0: 'Airplane',
    1: 'Automobile',
    2: 'Bird',
    3: 'Cat',
    4: 'Deer',
    5: 'Dog',
    6: 'Frog',
    7: 'Horse',
    8: 'Ship',
    9: 'Truck'
}

model = model()
model.summary()
model.load_weights('MODEL DATA/model.h5')


def classify(image):
    img = image.resize((32, 32))
    img = np.asarray(img)
    img = np.expand_dims(img, axis=0)
    img = img / 255.0
    prediction = model.predict(img)
    prediction = prediction[0]
    max_index = np.argmax(prediction)
    class_name = classes_name.get(max_index)
    return class_name


image_names = os.listdir('IMAGES/')
Example #19
0
INIT_LR = 5e-3
BATCH_SIZE = 32
EPOCHS = 14

data_loader = DataLoader()
x_train, y_train, x_test, y_test, classes = data_loader.load_data()

x_train2 = (x_train / 255) - 0.5
x_test2 = (x_test / 255) - 0.5

y_train2 = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test2 = keras.utils.to_categorical(y_test, NUM_CLASSES)

model = model()

model.load_weights("weights/weights.h5")

# make test predictions

y_pred_test = model.predict_proba(x_test)

y_pred_test_classes = np.argmax(y_pred_test, axis=1)

y_pred_test_max_probas = np.max(y_pred_test, axis=1)

plt.figure(figsize=(7, 6))
plt.title('Confusion matrix', fontsize=16)
plt.imshow(confusion_matrix(y_test, y_pred_test_classes))
plt.xticks(np.arange(10), classes, rotation=45, fontsize=12)
plt.yticks(np.arange(10), classes, fontsize=12)
plt.colorbar()
Example #20
0
classes_name = {
    0: 'Airplane',
    1: 'Automobile',
    2: 'Bird',
    3: 'Cat',
    4: 'Deer',
    5: 'Dog',
    6: 'Frog',
    7: 'Horse',
    8: 'Ship',
    9: 'Truck'
}

model = model()
model.summary()
model.load_weights('MODEL DATA/cifar-10.h5')


def classify(image):
    img = image.resize((32, 32))
    img = np.asarray(img)
    img = np.expand_dims(img, axis=0)
    img = img / 255.0
    prediction = model.predict(img)
    prediction = prediction[0]
    max_index = np.argmax(prediction)
    class_name = classes_name.get(max_index)
    return class_name


image_names = os.listdir('IMAGES/')
Example #21
0
import cv2
import numpy as np
from model import model
from darkflow.net.build import TFNet

options = {
    'model': 'cfg/tiny-yolo-voc-1c.cfg',
    'load': 3250,
    'threshold': 0.1,
    'gpu': 1.0
}
tfnet = TFNet(options)

model = model()
model.summary()
model.load_weights('weights/fingertip_weights/Fingertip.h5')


def detect_hand(image):
    """ Hand detection """
    output = tfnet.return_predict(image)
    print(output)
    tl, br = None, None
    for prediction in output:
        # label = prediction['label']
        # confidence = prediction['confidence']
        tl = (prediction['topleft']['x'], prediction['topleft']['y'])
        br = (prediction['bottomright']['x'], prediction['bottomright']['y'])
    return tl, br

Example #22
0
from time import time

import util
from dataset import data_validation
from model import model

start_time = time()

model.load_weights('data/model.h5')
data = data_validation()

print('Validating model...')
result = model.evaluate(data[0], data[1])
print()
print('Done')
print('Result:', result)
print('Time:', util.time_delta(time() - start_time))