Exemple #1
0
@author: ladvien
"""

import cv2

import os
import sys

import matplotlib.pyplot as plt

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()

#############
# Parameters
#############

input_path = "/home/ladvien/deep_arcane/images/0_raw/2_black_and_white/"
output_path = "/home/ladvien/deep_arcane/images/0_raw/3_all_white/"

threshold = 80

samples = 10

#############
# Extract
#############
Exemple #2
0
 # 如果写入tensorboard
 if training_options['output_board']:
     merged = tf.summary.merge_all()  # tensorflow >= 0.12
     writer = tf.summary.FileWriter(training_options['log_path'],
                                    sess.graph)  # tensorflow >=0.12
 # 获得saver对象,可以保存model以及读取model
 saver = tf.train.Saver()
 # 如果已经存在model副本就直接读取,否则就初始化神经网络参数
 if os.path.exists(training_options['model_path'] + '/checkpoint'):
     saver.restore(
         sess,
         training_options['model_path'] + training_options['model_name'])
 else:
     sess.run(tf.global_variables_initializer())
 # 初始化 imageUtils类,获得所有训练,测试数据
 imageUtils = ImageUtils()
 train_data = imageUtils.train_data
 train_label = imageUtils.train_label
 test_data = imageUtils.test_data
 test_label = imageUtils.test_label
 # 开始训练啦
 for episode in range(training_options['total_episode']):
     # 随机获得定义好数量的训练数据
     sample_datas, sample_labels = imageUtils.sample(
         len(train_data), training_options['batch_size'], train_data,
         train_label)
     # 训练神经网络
     global_step, _, loss = sess.run(
         [train['global_step'], train['train'], train['loss']],
         feed_dict={
             image: sample_datas,
def post_img(bi_img):
    response = requests.post("http://10.40.16.226:11111/photo-service/photo", data=ImageUtils.resize_to_16v9(bi_img))
    url = response.json()['url']
    avatar_url = "http://webapi-rings.shabikplus.mozat.com/" + re.sub("__", "0", url)
    logging.info("post a faked user avatar, url=%s", avatar_url)
    return avatar_url
Exemple #4
0
import sys

import matplotlib.pyplot as plt
from random import randint
import numpy as np
from PIL import Image
import wandb

# Import Keras
import tensorflow.keras

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()

#################################
# TODO: Make experiment folder
#################################
"""
DONE:
1. Add WandB
2. Test results.
4. Train with dropout.

TODO:
3. Split train / test images.
"""

#################################
Exemple #5
0
def main():
    if (prepareData):
        imgUtils = ImageUtils(imgSize,
                              useAditional=useAditional,
                              keepAspectRatio=keepAspectRatio,
                              useKaggleData=useKaggleData)
        imgUtils.dataPreparation()

    K.set_image_data_format('channels_first')
    K.set_floatx('float32')

    np.random.seed(seed)

    print("\nLoading train data...\n" + SEPARATOR)

    if (keepAspectRatio):
        if (useAditional):
            train_data = np.load('saved_data/trainExtra' + str(imgSize) +
                                 '_OrigAspectRatio.npy')
            train_target = np.load('saved_data/trainExtra_target.npy')
        else:
            train_data = np.load('saved_data/train' + str(imgSize) +
                                 '_OrigAspectRatio.npy')
            train_target = np.load('saved_data/train_target.npy')
    else:

        if (useAditional):
            train_data = np.load('saved_data/trainExtra' + str(imgSize) +
                                 '.npy')
            train_target = np.load('saved_data/trainExtra_target.npy')
        else:
            train_data = np.load('saved_data/train' + str(imgSize) + '.npy')
            train_target = np.load('saved_data/train_target.npy')

    x_train, x_val_train, y_train, y_val_train = train_test_split(
        train_data,
        train_target,
        test_size=percentTrainForValidation,
        random_state=17)

    print(
        "\nTraining Set shape (num Instances, RGB chanels, width, height): " +
        str(x_train.shape) + "\nTraining labels: " + str(y_train.shape) +
        "\nValidating set shape: " + str(x_val_train.shape) +
        "\nValidating set labels: " + str(y_val_train.shape) + "\n" +
        SEPARATOR)

    print("\nMaking data augmentation...\n" + SEPARATOR)
    datagen = da.prepareDataAugmentation(train_data=train_data)

    currentDate = datetime.today()
    timeStamp = currentDate.strftime("%d-%m-%Y_%H-%M")
    print("\nCreating model...\n" + SEPARATOR)
    if (loadPreviousModel):
        model = load_model(pathToPreviousModel)
        print("Loaded model from: " + pathToPreviousModel)
        model.summary()
    else:
        if (hiperParamOpt):
            print("\nHyperparameter optimization...\n" + SEPARATOR)
            model = KerasClassifier(build_fn=create_model,
                                    epochs=NumEpoch,
                                    batch_size=batchSize,
                                    validation_split=percentTrainForValidation)
            grid_result = hiperParametersOptimization(model, x_train, y_train)
            # summarize results
            print("Best score: %f using parameters %s" %
                  (grid_result.best_score_, grid_result.best_params_))
            means = grid_result.cv_results_['mean_test_score']
            stds = grid_result.cv_results_['std_test_score']
            params = grid_result.cv_results_['params']
            for mean, stdev, param in zip(means, stds, params):
                print("%f (%f) with: %r" % (mean, stdev, param))
            grid_result.best_estimator_.model.save(
                "saved_data/GridCV_Best_estimator" + timeStamp + ".h5")
            model = grid_result

        else:
            model = create_model()

    if (saveNetArchImage):
        if (hiperParamOpt):
            plot_model(grid_result.best_estimator,
                       to_file='saved_data/model_' + timeStamp + '.png')
        else:
            plot_model(model, to_file='saved_data/model_' + timeStamp + '.png')

    if (onlyEvaluate):

        print("\nEvaluating Model...\n" + SEPARATOR)
        evaluateModel(model, x_val_train, y_val_train)

    else:
        if hiperParamOpt is False:
            fitKerasModel(datagen, model, timeStamp, x_train, x_val_train,
                          y_train, y_val_train)

    makePrediction(model, timeStamp)
Exemple #6
0
@author: ladvien

    This script finds subimages in an image, extracts them, and saves
    them to a file.

"""

import os
import sys

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()

#############
# Parameters
#############

input_path = "/home/ladvien/deep_arcane/images/0_raw/0_scraped"
output_path = "/home/ladvien/deep_arcane/images/0_raw/1_extracted"

minimum_size = 30

dry_run = False

#############
# Extract
#############
@author: ladvien
"""

import cv2

import sys
import os

import matplotlib.pyplot as plt

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()

#############
# Parameters
#############

input_path = "/home/ladvien/deep_arcane/images/0_raw/3_all_white/"
output_path = "/home/ladvien/deep_arcane/images/0_raw/4_no_empty/"

threshold = 240

#############
# Extract
#############

if not os.path.exists(output_path):
Exemple #8
0
"""
Created on Wed Nov 11 19:00:54 2020

@author: ladvien
"""

import cv2

import sys
import os

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()

#############
# Parameters
#############

input_path = "/home/ladvien/denoising_vae/data/raw/"
output_path = "/home/ladvien/denoising_vae/data/extracted/"

threshold = 240
minimum_size = 30
target_size = (128, 128)

dry_run = False

#############
Exemple #9
0
            yield mpimpg.imread(image_path)

    return load_image(images)


def processVideo(video_input, video_output):
    print("* Processing video %s to %s" % (video_input, video_output))
    input_video_clip = VideoFileClip(video_input)
    clip = input_video_clip.fl_image(process_image)
    clip.write_videofile(video_output, audio=False)
    print("* Done")


if __name__ == "__main__":

    utils = ImageUtils()

    parser = argparse.ArgumentParser()
    parser.add_argument("-v",
                        action="store_true",
                        dest="process_video",
                        default=False)
    parser.add_argument("--input",
                        action="store",
                        dest="src_video",
                        default="project_video.mp4")
    args = parser.parse_args()

    utils.compute_perspective_transform_for_points(src_points, dst_points)
    load_image = load_jpg_images_from("camera_cal")
Exemple #10
0
def create_feature_extractor():
    if (prepareData):
        imgUtils = ImageUtils(imgSize, useAditional=useAditional, keepAspectRatio=keepAspectRatio,
                              useKaggleData=useKaggleData)
        imgUtils.dataPreparation()()

    K.set_image_data_format('channels_first')
    K.set_floatx('float32')

    np.random.seed(RDM)

    print("\nLoading train data...\n" + SEPARATOR)
    if (keepAspectRatio):
        if (useAditional):
            train_data = np.load('saved_data/trainExtra' + str(imgSize) + '_OrigAspectRatio.npy')
            train_target = np.load('saved_data/trainExtra_target.npy')
        else:
            train_data = np.load('saved_data/train' + str(imgSize) + '_OrigAspectRatio.npy')
            train_target = np.load('saved_data/train_target.npy')
    else:

        if (useAditional):
            train_data = np.load('saved_data/trainExtra' + str(imgSize) + '.npy')
            train_target = np.load('saved_data/trainExtra_target.npy')
        else:
            train_data = np.load('saved_data/train' + str(imgSize) + '.npy')
            train_target = np.load('saved_data/train_target.npy')

    x_train, x_val_train, y_train, y_val_train = train_test_split(
        train_data, train_target, test_size=percentTrainForValidation,
        random_state=RDM)

    print("\nLoading test data...\n" + SEPARATOR)
    if (keepAspectRatio):
        test_data = np.load('saved_data/test' + str(imgSize) + '_OrigAspectRatio.npy')
    else:
        test_data = np.load('saved_data/test' + str(imgSize) + '.npy')

    if (dataAugmentation):
        print("\nMaking data augmentation...\n" + SEPARATOR)
        datagen = da.prepareDataAugmentation(train_data=train_data)

    print("\nCreating model...\n" + SEPARATOR)
    if (loadPreviousModel):
        baseModel = load_model(pathToPreviousModel)
        print("Loaded model from: " + pathToPreviousModel)

        if (ftModel == "VGG16"):
            model = Model(input=baseModel.input, outputs=baseModel.get_layer("block5_pool").output)
        elif (ftModel == "IV3"):
            model = Model(input=baseModel.input, outputs=baseModel.get_layer("mixed10").output)
    else:
        if (ftModel == "VGG16"):
            # loading VGG16 model weights
            model = VGG16(weights='imagenet', include_top=False, input_shape=(3, imgSize, imgSize))
        elif (ftModel == "IV3"):
            model = InceptionV3(weights='imagenet', include_top=False, input_shape=(3, imgSize, imgSize))

    # Extracting features from the train dataset using the VGG16 pre-trained model
    print("\nGenerating features...\n" + SEPARATOR)
    print("\nTraining features...\n")
    if (dataAugmentation):
        # predict_generator(self, generator, steps, max_q_size=10, workers=1, pickle_safe=False, verbose=1)
        # TODO dar mas imagenes7

        batches = 0
        features_train = []
        train_labels = []
        for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=batchSize, shuffle=True):
            features_batch = model.predict_on_batch(x_batch)
            features_train.append(features_batch)
            train_labels.append(y_batch)
            batches += 1
            print("Batches: " + str(batches) + '/' + str(len(x_train)))
            if batches >= len(x_train):
                # we need to break the loop by hand because
                # the generator loops indefinitely
                break

        print("\nValidation features...\n")

        batches = 0
        features_valid = []
        valid_labels = []
        for x_batch, y_batch in datagen.flow(x_val_train, y_val_train, batch_size=batchSize, shuffle=True):
            features_batch = model.predict_on_batch(x_batch)
            features_valid.append(features_batch)
            valid_labels.append(y_batch)
            batches += 1
            print("Batches: " + str(batches) + '/' + str(len(x_val_train)))
            if batches >= len(x_val_train) // batchSize:
                # we need to break the loop by hand because
                # the generator loops indefinitely
                break

        print("\nTest features...\n")

        features_test = model.predict(test_data, batch_size=batchSize, verbose=1)

    else:
        features_train = model.predict(x_train, batch_size=batchSize, verbose=1)
        print("\nValidation features...\n")
        features_valid = model.predict(x_val_train, batch_size=batchSize, verbose=1)
        print("\nTest features...\n")
        features_test = model.predict(test_data, batch_size=batchSize, verbose=1)

    if (dataAugmentation):
        if (useAditional):
            if (keepAspectRatio):
                np.save('saved_data/feaExt_DATrain' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_DATrain' + str(imgSize) + '_target.npy', train_labels,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/feaExt_DAValid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/feaExt_DAValid' + str(imgSize) + '_target.npy', valid_labels,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
            else:
                np.save('saved_data/feaExt_DA_NAR_Train' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_DA_NAR_Train' + str(imgSize) + '_target.npy', train_labels,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/feaExt_DA_NAR_Valid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/feaExt_DA_NAR_Valid' + str(imgSize) + '_target.npy', valid_labels,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt__NAR_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
        else:
            if (keepAspectRatio):
                np.save('saved_data/fea_DATrain' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_DATrain' + str(imgSize) + '_target.npy', train_labels,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/fea_DAValid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/fea_DAValid' + str(imgSize) + '_target.npy', valid_labels,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
            else:
                np.save('saved_data/fea_DA_NAR_Train' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_DA_NAR_Train' + str(imgSize) + '_target.npy', train_labels,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/fea_DA_NAR_Valid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)

                np.save('saved_data/fea_DA_NAR_Valid' + str(imgSize) + '_target.npy', valid_labels,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea__NAR_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
    else:
        if (useAditional):
            if (keepAspectRatio):
                np.save('saved_data/feaExt_Train' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_Train' + str(imgSize) + '_target.npy', y_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_Valid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_Valid' + str(imgSize) + '_target.npy', y_val_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
            else:
                np.save('saved_data/feaExt_NAR_Train' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_NAR_Train' + str(imgSize) + '_target.npy', y_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_NAR_Valid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_NAR_Valid' + str(imgSize) + '_target.npy', y_val_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/feaExt_NAR_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
        else:
            if (keepAspectRatio):
                np.save('saved_data/fea_Train' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_Train' + str(imgSize) + '_target.npy', y_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_Valid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_Valid' + str(imgSize) + '_target.npy', y_val_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
            else:
                np.save('saved_data/fea_NAR_Train' + str(imgSize) + '.npy', features_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_NAR_Train' + str(imgSize) + '_target.npy', y_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_NAR_Valid' + str(imgSize) + '.npy', features_valid,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_NAR_Valid' + str(imgSize) + '_target.npy', y_val_train,
                        allow_pickle=True, fix_imports=True)
                np.save('saved_data/fea_NAR_test' + str(imgSize) + '.npy', features_test,
                        allow_pickle=True, fix_imports=True)
Exemple #11
0
def main():
    if (prepareData):
        imgUtils = ImageUtils(imgSize, useAditional=useAditional, keepAspectRatio=keepAspectRatio,
                              useKaggleData=useKaggleData)
        imgUtils.dataPreparationOVA()

    K.set_image_data_format('channels_first')
    K.set_floatx('float32')

    np.random.seed(17)

    print("\nLoading train data...\n" + SEPARATOR)

    train_target = []
    if (keepAspectRatio):
        if (useAditional):
            train_data = np.load('saved_data/trainExtra' + str(imgSize) + '_OrigAspectRatio.npy')

            train_target.append(np.load('saved_data/trainExtraOVA1_target.npy'))
            train_target.append(np.load('saved_data/trainExtraOVA2_target.npy'))
            train_target.append(np.load('saved_data/trainExtraOVA3_target.npy'))

        else:
            train_data = np.load('saved_data/train' + str(imgSize) + '_OrigAspectRatio.npy')

            train_target.append(np.load('saved_data/train_targetOVA1.npy'))
            train_target.append(np.load('saved_data/train_targetOVA2.npy'))
            train_target.append(np.load('saved_data/train_targetOVA3.npy'))
    else:

        if (useAditional):
            train_data = np.load('saved_data/trainExtra' + str(imgSize) + '.npy')

            train_target.append(np.load('saved_data/trainExtraOVA1_target.npy'))
            train_target.append(np.load('saved_data/trainExtraOVA2_target.npy'))
            train_target.append(np.load('saved_data/trainExtraOVA3_target.npy'))
        else:
            train_data = np.load('saved_data/train' + str(imgSize) + '.npy')

            train_target.append(np.load('saved_data/train_targetOVA1.npy'))
            train_target.append(np.load('saved_data/train_targetOVA2.npy'))
            train_target.append(np.load('saved_data/train_targetOVA3.npy'))

    print("\nMaking data augmentation...\n" + SEPARATOR)
    datagen = da.prepareDataAugmentation(train_data=train_data)

    model = []
    currentDate = datetime.today()
    timeStamp = currentDate.strftime("%d-%m-%Y_%H-%M")

    for i in range(len(train_target)):

        x_train, x_val_train, y_train, y_val_train = train_test_split(
            train_data, train_target[i], test_size=percentTrainForValidation,
            random_state=17)

        print("\nCreating model " + str(i + 1) + "...\n" + SEPARATOR)
        if (loadPreviousModel):
            model.append(load_model(pathToPreviousModel[i]))
            print("Loaded model from: " + pathToPreviousModel[i])
            model[i].summary()
        else:
            model.append(create_model())

        print("\nTraining Set shape (num Instances, RGB chanels, width, height): " + str(
            x_train.shape) + "\nTraining labels: " + str(y_train.shape) + "\nValidating set shape: " + str(
            x_val_train.shape) + "\nValidating set labels: " + str(
            y_val_train.shape) + "\n" + SEPARATOR)

        if (saveNetArchImage):
            plot_model(model[i], to_file='saved_data/model_' + timeStamp + '.png')

        if (onlyEvaluate):

            print("\nEvaluating Model " + str(i + 1) + "...\n" + SEPARATOR)
            evaluateModel(model[i], x_val_train, y_val_train)

        else:
            print("\nFitting model " + str(i + 1) + "...\n" + SEPARATOR)
            checkPoint = ModelCheckpoint(
                "saved_data/OVA_model" + str(i + 1) + "_ep{epoch:02d}_" + timeStamp + ".hdf5",
                save_best_only=True)

            model[i].fit_generator(datagen.flow(x_train, y_train, batch_size=batchSize,
                                                shuffle=True),
                                   steps_per_epoch=10, epochs=NumEpoch,
                                   validation_data=(x_val_train, y_val_train),
                                   callbacks=[checkPoint])  # , verbose=2)

    print("\nLoading test data...\n" + SEPARATOR)

    if (keepAspectRatio):
        test_data = np.load('saved_data/test' + str(imgSize) + '_OrigAspectRatio.npy')
        test_id = np.load('saved_data/test_id.npy')
    else:
        test_data = np.load('saved_data/test' + str(imgSize) + '.npy')
        test_id = np.load('saved_data/test_id.npy')

    pred = []
    for i in range(len(model)):
        print("\nPredicting with model " + str(i + 1) + "...\n" + SEPARATOR)
        pred.append(model[i].predict_proba(test_data))

    predictions = np.transpose(np.vstack((pred[0][:, 1], pred[1][:, 1], pred[2][:, 1])))

    df = pd.DataFrame(predictions,
                      columns=['Type_1', 'Type_2', 'Type_3'])
    df['image_name'] = test_id

    df.to_csv("../submission/OVA_" + timeStamp + ".csv", index=False)
# -*- coding: utf-8 -*-
"""
Created on Mon Nov  2 01:56:16 2020

@author: ladvien
"""
import os
import sys
import cv2
import matplotlib.pyplot as plt

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()

#############
# Parameters
#############

input_path = "/home/ladvien/deep_arcane/images/0_raw/1_extracted"
output_path = "/home/ladvien/deep_arcane/images/0_raw/2_black_and_white/"

threshold = 120

samples = 10

#############
# Extract
#############
"""

import sys
import os

import cv2
import numpy as np
from random import randint

import matplotlib.pyplot as plt

image_tools_path = "/home/ladvien/deep_arcane/"
sys.path.append(image_tools_path)

from image_utils import ImageUtils
iu = ImageUtils()


def noisy(noise_typ, image):
    if noise_typ == "gauss":
        row, col, ch = image.shape
        mean = 0
        var = 0.1
        sigma = var**0.5
        gauss = np.random.normal(mean, sigma, (row, col, ch))
        gauss = gauss.reshape(row, col, ch)
        noisy = image + gauss
        return noisy
    elif noise_typ == "s&p":
        row, col, ch = image.shape
        s_vs_p = 0.5
Exemple #14
0
# Test num 001. Marek
# <https://www.kaggle.com/marek3000/test-num-001/code/>

from image_utils import ImageUtils
imgSize = 10
useAditional = True
keepAspectRatio = True
useKaggleData = True

if __name__ == '__main__':
    imgUtils = ImageUtils(imgSize, useAditional, keepAspectRatio, useKaggleData)

    imgUtils.dataPreparation()
Exemple #15
0
args = parser.parse_args()

data_directory = args.data_directory
num_classes = args.num_classes
gpu = args.gpu
save_dir = args.save_dir
architecture = args.architecture
optimizer = args.optimizer
loss_function = args.loss_function
learning_rate = args.learning_rate
drop_out = args.drop_out
epochs = args.epochs
print_every = args.print_every
min_accuracy = args.min_accuracy

image_utils = ImageUtils(data_directory)
model_utils = ModelUtils(gpu)

print('\nBuilding model...\n')

model = model_utils.build_model(architecture,
                                num_classes,
                                dropout_prob=drop_out)
data_loaders = image_utils.create_data_loaders()

optimizer = model_utils.create_optimizer(model,
                                         optimizer,
                                         learning_rate=learning_rate)
criterion = model_utils.loss_function(loss_function)

print('\nTraining network...\n')
Exemple #16
0
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
    # 保证存放model的文件夹存在
    if not os.path.exists(training_options['model_path']):
        os.mkdir(training_options['model_path'])
    # 定义神经网络
    net, train = model.build_network(training_options=training_options, image=image,
                                     drop_rate=training_options['drop_rate'], labels=labels)
    # 获得saver对象,可以保存model以及读取model
    saver = tf.train.Saver()
    saver.restore(sess, training_options['model_path'] + training_options['model_name'])
    # 初始化 imageUtils类,获得所有训练,测试数据

    imageUtils = ImageUtils()
    test_data = imageUtils.test_data
    test_label = imageUtils.test_label
    total_result = np.zeros((200, 2))
    for i in range(200):
        result = sess.run([net['digit1'], net['digit2'], net['digit3'], net['digit4']],
                          feed_dict={image: imageUtils.trainstion_data(test_data, start=i * 1000,end=i * 1000 + 1000)})
        result = code_utils.batch_out_transition(result)
        predicted = [result[0][index] + result[1][index] + result[2][index] + result[3][index]
                     for index in range(len(result[0]))]
        label = code_utils.batch_out_transition(test_label[i * 1000:i * 1000 + 1000])
        four_right_count = np.count_nonzero([predicted[index] == label[index] for index in range(len(predicted))])
        one_right_count = np.count_nonzero(
            [predicted[index][s_index] == label[index][s_index] for index in range(len(predicted)) for s_index in
             range(len(predicted[index]))])
        total_result[i, 0] = four_right_count / 1000 * 100
Exemple #17
0
with tf.Session(config=config) as sess:
    # 保证存放model的文件夹存在
    if not os.path.exists(training_options['model_path']):
        os.mkdir(training_options['model_path'])
    # 定义神经网络
    net, train = model.build_network(training_options=training_options,
                                     image=image,
                                     drop_rate=training_options['drop_rate'],
                                     labels=labels)
    # 获得saver对象,可以保存model以及读取model
    saver = tf.train.Saver()
    saver.restore(
        sess, training_options['model_path'] + training_options['model_name'])
    # 初始化 imageUtils类,获得所有训练,测试数据

    imageUtils = ImageUtils()
    test_data = imageUtils.test_data
    test_label = imageUtils.test_label
    total_result = np.zeros((200, 2))
    for i in range(200):
        result = sess.run(
            [net['digit1'], net['digit2'], net['digit3'], net['digit4']],
            feed_dict={
                image:
                imageUtils.trainstion_data(test_data,
                                           start=i * 1000,
                                           end=i * 1000 + 1000)
            })
        result = code_utils.batch_out_transition(result)
        predicted = [
            result[0][index] + result[1][index] + result[2][index] +