예제 #1
0
def train(dataset_path: str, save_path: str, weights: Optional[str]) -> None:
    """
    Training hat/beards classifier.

    :param dataset_path: path to full dataset.
    :param save_path: path to save weights and training logs.
    :param weights: path to saved model weights.
    """
    log_dir = os.path.join(save_path, LOGS_DIR)
    os.makedirs(log_dir, exist_ok=True)

    train_data_gen = DataGenerator(dataset_path, is_train=True)
    test_data_gen = DataGenerator(dataset_path, is_train=False)
    model = get_model(weights)
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(LEARNING_RATE),
                  metrics=[
                      Recall(NUM_CLASSES),
                      Precision(NUM_CLASSES),
                      F1Score(NUM_CLASSES), 'accuracy'
                  ])
    model.summary()

    with LogCallback(save_path, log_dir) as log_callback:
        callbacks = [
            keras.callbacks.TensorBoard(log_dir=log_dir), log_callback
        ]
        model.fit_generator(generator=train_data_gen,
                            validation_data=test_data_gen,
                            validation_freq=1,
                            validation_steps=len(test_data_gen),
                            epochs=EPOCHS,
                            callbacks=callbacks,
                            workers=min(24, cpu_count()))
def result():
    body_list = ["筋肉型","標準型","痩せ型","肥満型"]
    # submitした画像が存在したら、画像データをモデル用に整形
    try :
        if request.files['image']:
            img_file = request.files['image']
            temp_img = Image.open(request.files['image'])
            temp_img = temp_img.convert("RGB") #色空間をRGBに
            #今回は、モデルの精度を上げるために(64,64)で画像を学習させています。
            temp_img = temp_img.resize((64,64))
            temp_img = np.asarray(temp_img)
            im_rows = 64
            im_cols = 64
            im_color = 3
            in_shape = (im_rows,im_cols,im_color)
            nb_classes = 4
            img_array = temp_img.reshape(-1,im_rows,im_cols,im_color)
            img_array = img_array / 255
            model = cnn_model.get_model(in_shape,nb_classes)
            #学習済みモデルを呼び出す
            model.load_weights("photos-newmodel-light.hdf5")
            predict = model.predict([img_array])[0]
            index = predict.argmax()
            body_shape = body_list[index]
            body_score = 90*predict[0]+70*predict[1]+40*predict[2]+40*predict[3]
            body_score = int(body_score)

            return render_template('./result.html', title='結果',                  body_score=body_score,body_shape=body_shape,img_file=img_file)
    
    except:
        return render_template('./flask_api_index.html')
예제 #3
0
def deploy_model():
    """
    Test model in real time
    we will be getting screenshots and sending joystick input straight to the game
    and see how it goes
    :return:
    """

    # getting the saved model
    model = cnn_model.get_model()
    model.summary()


    j = pyvjoy.VJoyDevice(1) # this should be set according your device num

    #running the infinite loop
    while(True):
        screenshot = np.array(ImageGrab.grab(bbox=(0, 40, 1024, 768)))
        screenshot = data_preprocessing.transform_image(screenshot)

        prediction = model.predict(np.array([screenshot]))
        j.data.wAxisX = int(prediction[0][0] * constants.MAX_VJOY)
        j.data.wAxisY = int(prediction[0][1] * constants.MAX_VJOY)
        j.update()
        print(prediction)
예제 #4
0
    def train_model(self):

        model = cnn_model.get_model()
        model.compile(optimizer=Adam(lr=1e-4), loss='mse')
        model.summary()

        images = np.array(data_handler.x_data)
        angles = data_handler.y_data

        print("Images size", images.shape)

        start_time = time.time()

        history = model.fit(x=images,
                            y=angles,
                            batch_size=100,
                            epochs=self.epochs,
                            validation_split=self.val_split,
                            shuffle=True)
        model.save(self.model_save_path + "/" + self.model_name)

        training_time = time.time() - start_time
        print("Total training time : " + str(training_time))

        loss = history.history['loss']
        val_loss = history.history['val_loss']
        epochs = range(1, len(loss) + 1)
        plt.plot(epochs, loss, color='red', label='Training loss')
        plt.plot(epochs, val_loss, color='green', label='Validation loss')
        plt.title('Training and validation loss')
        plt.xlabel('Epochs')
        plt.ylabel('Loss')
        plt.legend()
        plt.show()
예제 #5
0
def convert(weights: Optional[str]) -> None:
    """
    Model conversion. The selected model will be saved in keras format (*.h5), tf 1.x frozen graph (*.pb)
    and onnx (*.onnx). Model will be saved in folder "WEIGHTS_PATH/_(current date)/".

    :param weights: path to saved keras model weights.
    """
    keras.backend.set_learning_phase(0)
    save_path = os.path.join(WEIGHTS_PATH, get_date()[2:])
    os.makedirs(save_path, exist_ok=True)
    if weights is None:
        logger.warning(
            '\nNo weights provided. Converting random initialized model.\n')
        weights_name = 'random'
    else:
        weights_name = os.path.basename(weights)
        shutil.copyfile(weights, os.path.join(save_path, weights_name))
    model = get_model(weights)
    if weights is None:
        model.save(os.path.join(save_path, weights_name))
    frozen_model_path = freeze_keras_model(model, save_path,
                                           os.path.splitext(weights_name)[0],
                                           list(OUTPUT_NAMES))
    convert_frozen_graph_to_onnx(frozen_model_path, [INPUT_NAME],
                                 OUTPUT_NAMES,
                                 save_path,
                                 opset=None)
예제 #6
0
def train():
    # Training, validation, and testing sets
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    checkpoint = cnn_utils.get_model_saves_path()

    x, y_, keep_prob, y_conv, variables = cnn_model.get_model()

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver(var_list=variables)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    # saver.restore(sess, checkpoint)

    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:
            saver.save(sess, checkpoint)
            train_accuracy = accuracy.eval(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 1.0
            })
            print 'step %d, training accuracy %g' % (i, train_accuracy)
        train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

    print 'test accuracy %g' % accuracy.eval(feed_dict={
        x: mnist.test.images,
        y_: mnist.test.labels,
        keep_prob: 1.0
    })

    sess.close()
def evaluate(dataset_path: str, weights: Optional[str]) -> None:
    """
    Evaluate keras model on loss and training metrics. Function will print evaluating results on validation data.

    :param dataset_path: path to full dataset.
    :param weights: path to saved keras model weights.
    """
    train_data_gen = DataGenerator(dataset_path, is_train=True)
    test_data_gen = DataGenerator(dataset_path, is_train=False)
    model = get_model(weights)
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(LEARNING_RATE),
                  metrics=[
                      Recall(NUM_CLASSES),
                      Precision(NUM_CLASSES),
                      F1Score(NUM_CLASSES)
                  ])
    message = 'Testing model "{}".\n\n'.format(weights)

    # Evaluate on training data.
    message += 'Train data:\n'
    results = model.evaluate_generator(train_data_gen,
                                       workers=min(24, cpu_count()),
                                       verbose=1)
    for name, res in zip(model.metrics_names, results):
        message += '{} = {:.04f}; '.format(name, res)
    message = message[:-2] + '\n\nTest data:\n'

    # Evaluate on testing data.
    results = model.evaluate_generator(test_data_gen,
                                       workers=min(24, cpu_count()),
                                       verbose=1)
    for name, res in zip(model.metrics_names, results):
        message += '{} = {:.04f}; '.format(name, res)
    message = message[:-2] + '\n'
    logger.info('\n' + message)
예제 #8
0
import matplotlib.pyplot as plt
from calorie import calories
from cnn_model import get_model
import os
import cv2
import numpy as np

IMG_SIZE = 400
LR = 1e-3
no_of_fruits = 7

MODEL_NAME = 'Fruits_dectector-{}-{}.model'.format(LR, '5conv-basic')

model_save_at = os.path.join("model", MODEL_NAME)

model = get_model(IMG_SIZE, no_of_fruits, LR)

model.load(model_save_at)
labels = list(np.load('labels.npy'))

test_data = 'test5.jpg'
img = cv2.imread(test_data)
img1 = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
model_out = model.predict([img1])
result = np.argmax(model_out)
name = labels[result]
cal = round(calories(result+1, img), 2)

plt.imshow(img)
plt.title('{}({} cal)'.format(name, cal))
plt.axis('off')
예제 #9
0
from PIL import Image
import matplotlib.pyplot as plt

target_image = "test-sushi.jpg"

im_rows = 32  # 画像の縦ピクセルサイズ
im_cols = 32  # 画像の横ピクセルサイズ
im_color = 3  # 画像の色空間
in_shape = (im_rows, im_cols, im_color)
nb_classes = 3

LABELS = ["寿司", "サラダ", "麻婆豆腐"]
CALORIES = [588, 118, 648]

# 保存したCNNモデルを読み込む
model = cnn_model.get_model(in_shape, nb_classes)
model.load_weights('./image/photos-model.hdf5')


def check_photo(path):
    # 画像を読み込む
    img = Image.open(path)
    img = img.convert("RGB")  # 色空間をRGBに
    img = img.resize((im_cols, im_rows))  # サイズ変更
    plt.imshow(img)
    plt.show()
    # データに変換
    x = np.asarray(img)
    x = x.reshape(-1, im_rows, im_cols, im_color)
    x = x / 255
예제 #10
0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

SAVE_AUDIO = False
SAVE_IMAGE = True
audio_saved_count = 0
image_saved_count = 0

MODEL_TPYE = 'SVM'

cnn_weight_save_path = 'models/model2_checkpoint'
svm_model_path = 'models/svm_model'
test_data = 'test_data/'

if MODEL_TPYE == 'CNN':
    my_model = get_model(n_class=10)
    my_model.load_weights(cnn_weight_save_path)

if MODEL_TPYE == 'SVM':
    my_model = joblib.load(svm_model_path)
block_buffer = []

recording = True
detected = False


def callback(in_data, frame_count, time_info, flag):
    global block_buffer, detected, recording
    signal_block = np.frombuffer(in_data, dtype=np.int16)

    audio_valid = np.max(signal_block) - np.min(
예제 #11
0
 def model_set(self, model_path):
     print("model set")
     self.model = cnn_model.get_model(self.in_shape, len(LABELS))
     self.model.load_weights(model_path)
예제 #12
0
def output():
    if request.method == "POST":
        inputimg = request.files['user_img']

        import cnn_model
        import keras
        import matplotlib.pyplot as plt
        import numpy as np
        from PIL import Image
        from keras.models import load_model
        import cv2
        from keras import backend as K
        from tensorflow.keras.preprocessing.image import img_to_array
        from tensorflow.keras.models import load_model

        im_rows = 64  # 이미지의 높이
        im_cols = 64  # 이미지의 너비
        im_color = 3  # 이미지의 색공간
        in_shape = (im_rows, im_cols, im_color)
        nb_classes = 7

        LABELS = ["꼬부기", "치코리타", "잠만보", "뽀뽀라", "케이시", "푸린", "리아코"]
        # 저장한 CNN 모델 읽어 들이기
        model = cnn_model.get_model(in_shape, nb_classes)

        # CNN 모델 읽기
        model = load_model('photos-cnn-model.h5')

        # 학습된 데이타 불러오기
        model.load_weights('photos-cnn-weight.hdf5')

        # MLP로 학습한 이미지 데이터에 형태 맞추기
        # im = im.reshape(in_shape).astype('float32') / 255

        # 이미지 읽어 들이기
        img = Image.open(
            inputimg)  #<class 'PIL.JpegImagePlugin.JpegImageFile'>

        #img = img.convert("RGB") # 색공간 변환하기

        #img = img.resize((im_cols, im_rows)) # 크기 변경하기

        # 데이터 변환하기
        n_img = np.asarray(img)
        n_img = cv2.cvtColor(n_img, cv2.COLOR_BGR2RGB)
        cv2.imwrite("test_before.jpg", n_img)

        cascade_file = "haarcascade_frontalface_alt.xml"  # 정면 얼굴 검출
        cascade = cv2.CascadeClassifier(cascade_file)
        img_gray = cv2.cvtColor(n_img, cv2.COLOR_BGR2GRAY)
        face_list = cascade.detectMultiScale(
            img_gray, minSize=(30, 30))  # miniSize - 얼굴 인식 영역의 최소 크기 지정
        if len(face_list) == 1:
            for (x, y, w, h) in face_list:
                #print("얼굴의 좌표 =", x, y, w, h)
                dst = n_img.copy()  ####이미지복사해서
                dst = n_img[y:y + h, x:x + w]  ###자르기
                cv2.imwrite("test_after.jpg", dst)
                dst = Image.fromarray(dst, 'RGB')

            dst = dst.resize((im_cols, im_rows))
            dst = np.asarray(dst)
            cv2.imwrite("test_resize.jpg", dst)
            dst = dst.reshape(-1, im_rows, im_cols, im_color)
            dst = dst / 255

            # 예측하기
            pre = model.predict([dst])[0]
            K.clear_session()
            idx = pre.argmax()

            result_pm = LABELS[idx]

        elif len(face_list) != 1:
            result_pm = "메타몽"

        return render_template("output.html", result_pm=result_pm)
예제 #13
0
파일: cnn_fool.py 프로젝트: dkim616/ml-5hpx
def fool():
    # Training, validation, and testing sets
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
    x, y_, keep_prob, y_conv, variables = cnn_model.get_model()

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
    grad = tf.gradients(cross_entropy, x)

    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # Create session
    sess = tf.InteractiveSession()
    saver = tf.train.Saver(variables)

    # sess.run(tf.global_variables_initializer())
    saver.restore(sess, cnn_utils.get_model_saves_path())

    two_image = None
    two_onehot = None
    index = 0
    num_test_examples = mnist.test.num_examples
    while index < num_test_examples and two_image is None:
        if mnist.test.labels[index][2] == 1:
            two_image = mnist.test.images[index].reshape(1, 784)
            two_onehot = mnist.test.labels[index].reshape(1, 10)
        index += 1

    six_onehot = None
    index = 0
    while index < num_test_examples and six_onehot is None:
        if mnist.test.labels[index][6] == 1:
            six_onehot = mnist.test.labels[index].reshape(1, 10)
        index += 1

    np_grad = sess.run(grad,
                       feed_dict={
                           x: two_image,
                           y_: six_onehot,
                           keep_prob: 1.0
                       })
    signed_grad = numpy.sign(np_grad[0])
    delta_image_unsigned = 0.1 * np_grad[0]
    delta_image = 0.01 * signed_grad
    adv_image = delta_image + two_image

    print sess.run(y_conv, feed_dict={x: two_image, keep_prob: 1.0})

    print two_onehot
    print six_onehot

    print sess.run(y_conv, feed_dict={x: adv_image, keep_prob: 1.0})

    cnn_utils.save_image(np_grad[0], 'unsigned_grad')
    cnn_utils.save_image(signed_grad, 'signed_grad')
    cnn_utils.save_image(delta_image_unsigned, 'delta_image_unsigned')
    cnn_utils.save_image(delta_image, 'delta_image')
    cnn_utils.save_image(adv_image, 'adv_image')
    cnn_utils.save_image(two_image, 'two_image')

    print two_image
    print delta_image
    print adv_image

    sess.close()