示例#1
0
def Learn(augmentation, input_epochs, train_dir, val_dir, window):
    #path
    BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

    train_dir = os.path.join(BASE_DIR, train_dir)
    val_dir = os.path.join(BASE_DIR, val_dir)
    # Define hyperparameter
    # INPUT_SIZE = 299
    INPUT_SIZE = 200
    CHANNELS = 3
    INPUT_SHAPE = (INPUT_SIZE, INPUT_SIZE, CHANNELS)
    NUM_CLASSES = 10
    NUM_TRAIN_IMGS = 3000
    NUM_VAL_IMGS = 1000
    BATCH_SIZE = 32

    HORIZONTAL_FLIP = augmentation[0]
    VERTICAL_FLIP = augmentation[1]
    BRIGHTNESS_RANGE = None
    ROTATION_RANGE = augmentation[2]

    EPOCHS = input_epochs
    train_steps_per_epoch = NUM_TRAIN_IMGS // BATCH_SIZE
    val_steps_per_epoch = NUM_VAL_IMGS // BATCH_SIZE

    # Data Preprocessing
    training_datagen = ImageDataGenerator(
        rescale=1. / 255,
        horizontal_flip=HORIZONTAL_FLIP,
        vertical_flip=VERTICAL_FLIP,
        brightness_range=BRIGHTNESS_RANGE,
        rotation_range=ROTATION_RANGE,
    )
    validation_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = training_datagen.flow_from_directory(
        train_dir,
        target_size=(INPUT_SIZE, INPUT_SIZE),
        class_mode='categorical',
        batch_size=BATCH_SIZE)

    validation_generator = validation_datagen.flow_from_directory(
        val_dir,
        target_size=(INPUT_SIZE, INPUT_SIZE),
        class_mode='categorical',
        batch_size=BATCH_SIZE)

    # Load pre-trained model
    base_model = tf.keras.applications.InceptionV3(
        include_top=False,
        weights='imagenet',
        input_shape=INPUT_SHAPE,
    )

    # Freeze the pre-trained layers
    base_model.trainable = False

    # Add a fully connected layer
    model = tf.keras.Sequential()
    model.add(base_model)
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(512, activation='relu'))
    model.add(tf.keras.layers.Dropout(0.5))
    model.add(tf.keras.layers.Dense(256, activation='relu'))
    model.add(tf.keras.layers.Dropout(0.5))
    model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))

    model.summary()

    # Compile
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Callbacks
    checkpoint_filepath = os.path.join(
        BASE_DIR, 'learning_test/checkpoint/InceptionV3_cifar10.h5')

    plotLosses = PlotLosses(input_epochs, window)

    callbacks = [
        tf.keras.callbacks.EarlyStopping(
            patience=10,
            monitor='val_accuracy',
            #  restore_best_weights=True
        ),
        tf.keras.callbacks.ModelCheckpoint(
            filepath=checkpoint_filepath,
            monitor='val_accuracy',
            mode='max',
            save_best_only=True,
            # save_weights_only=True,
        ),
        plotLosses,
    ]

    # training model
    history = model.fit(train_generator,
                        epochs=EPOCHS,
                        steps_per_epoch=train_steps_per_epoch,
                        validation_data=validation_generator,
                        validation_steps=val_steps_per_epoch,
                        verbose=1,
                        callbacks=callbacks)
    window.textBox_terminal.append("Training Done!")

    val_loss = history.history['val_loss']
    val_accuracy = history.history['val_accuracy']

    max_val_accuracy = round(np.max(val_accuracy), 4)
    min_val_loss = round(np.min(val_loss), 4)
    message = "Epoch: " + str(np.argmin(val_loss) +
                              1) + " , Min val_loss: " + str(min_val_loss)
    window.textBox_terminal.append(message)
    plt.close()
示例#2
0
	df = pd.read_csv('train.truth.csv')

	training_size = math.ceil(len(df)* 0.8)
	training_set = df[:training_size]
	validation_set = df[training_size:]

	build_directory_structure('training_set', training_set)
	build_directory_structure('valid_set', validation_set)

	train_datagen = ImageDataGenerator(rescale=1./255)
	valid_datagen = ImageDataGenerator(rescale=1./255)
	test_datagen = ImageDataGenerator(rescale=1./255)

	training_set = train_datagen.flow_from_directory(
	    'datasets/training_set',
	    target_size=(64, 64),
	    batch_size=32,
        seed=42,
	    class_mode='categorical')

	valid_set = valid_datagen.flow_from_directory(
	    'datasets/valid_set/',
	    target_size=(64, 64),
	    batch_size=32,
        seed=42,
	    class_mode='categorical')

	test_set = test_datagen.flow_from_directory(
        'original_data/',
        classes=['test'],
        target_size=(64, 64),
        seed=42,
示例#3
0
    rescale=1. / 255,  # 0~1로 범위 조정
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    brightness_range=(0.5, 1.3),
    horizontal_flip=True,
    fill_mode='nearest',
    preprocessing_function=add_random_noise)

validation_datagen = ImageDataGenerator(rescale=1. /
                                        255)  # testset에서는 augmentation 진행하지 않음

# Reading images from directory and pass them to the model
train_generator = training_datagen.flow_from_directory(
    TRAINING_DIR)  # batch_size=batch_size,
# target_size=(224, 224),
# class_mode='categorical',shuffle=True

validation_generator = validation_datagen.flow_from_directory(
    VALIDATION_DIR,
    batch_size=batch_size,  # batch size(한번에 처리할 이미지 개수) 설정
    target_size=(224, 224),  # 이미지 크기 설정
    class_mode='categorical'  # 분류
)

# Plotting the augmented images
img, label = next(train_generator)  # train에서 학습데이터 불러와서 augmentation까지 처리
plt.figure(figsize=(20, 20))

for i in range(8):
示例#4
0
classifier.add(Flatten())
classifier.add(Dropout(.5))
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))

classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory('Training_set',
                                                 target_size=(64, 64),
                                                 batch_size=32,
                                                 class_mode='binary')
test_set = test_datagen.flow_from_directory('Test_set',
                                            target_size=(64, 64),
                                            batch_size=30,
                                            class_mode='binary')
history = classifier.fit_generator(training_set,
                                   steps_per_epoch=50,
                                   epochs=20,
                                   validation_data=test_set,
                                   validation_steps=25)

# Part 3 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img(
示例#5
0
文件: file.py 项目: gautam43/mlops
model.summary()
model.add(Dense(units=128, activation='relu'))
model.summary()
model.add(Dense(units=1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
from keras_preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
        '/dataset/cnn_dataset/training_set/',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')
test_set = test_datagen.flow_from_directory(
        '/dataset/cnn_dataset/test_set/',
        target_size=(64, 64),
        batch_size=32,
        class_mode='binary')
history=model.fit(
        training_set,
        steps_per_epoch=8000,
        epochs=1,
        validation_data=test_set,
        validation_steps=2000)
model.save('mymodel.h5')
from keras.preprocessing import image
image_path = os.path.expanduser('~/Documents/Research/VISAGE_a/DeepUAge_dataset')
test_path = os.path.join(image_path, 'test')
datagen_batch_size = 32
batch_size = 32
image_size = 224
learning_rate = 0.0025389437553681262
momentum = 0.2753813367505939

datagen = ImageDataGenerator()

# load and iterate test dataset
test_generator = datagen.flow_from_directory(
    test_path,
    class_mode='categorical',
    batch_size=batch_size,
    target_size=(image_size, image_size),
    shuffle=False

)

optimizer = optimizers.SGD(lr=learning_rate, momentum=momentum, nesterov=True)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['mae'])

true_labels = test_generator.classes

scores = model.evaluate_generator(test_generator, test_generator.samples)
print("Error = ", scores)

predictions = model.predict_generator(test_generator)

y_true = true_labels
示例#7
0
     fill_mode='reflect',
     horizontal_flip=True,
     vertical_flip=False,
     rescale=1 / 255,
     preprocessing_function=get_random_eraser(p=0.8,
                                              s_l=0.02,
                                              s_h=0.4,
                                              r_1=0.3,
                                              r_2=1 / 0.3,
                                              v_l=0,
                                              v_h=255,
                                              pixel_level=True))
 valid_datagen = ImageDataGenerator(rescale=1 / 255)
 train = train_datagen.flow_from_directory(TRAIN_DIR,
                                           target_size=IMAGE_SIZE,
                                           color_mode='rgb',
                                           batch_size=BATCH_SIZE,
                                           interpolation='bicubic')
 valid = valid_datagen.flow_from_directory(VAL_DIR,
                                           target_size=IMAGE_SIZE,
                                           color_mode='rgb',
                                           batch_size=BATCH_SIZE,
                                           interpolation='bicubic')
 class_weights = compute_class_weight('balanced', np.arange(0, N_CLASSES),
                                      train.classes)
 # model
 input_tensor = keras.layers.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
 base_model = SEResNextImageNet(input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1],
                                             3),
                                depth=[3, 4, 6, 3],
                                cardinality=32,
示例#8
0
def Retrain(augmentation, input_epochs, train_path, val_path, window, trained_model_path):
    #path
    BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    train_dir = os.path.join(BASE_DIR, train_path)
    val_dir = os.path.join(BASE_DIR, val_path)
    MODEL_PATH = os.path.join(BASE_DIR, trained_model_path)
    # Define hyperparameter
    INPUT_SIZE = 200
    CHANNELS = 3
    INPUT_SHAPE = (INPUT_SIZE, INPUT_SIZE, CHANNELS)
    NUM_CLASSES = window.learn_num_data[0]
    NUM_TRAIN_IMGS = window.learn_num_data[1]
    NUM_VAL_IMGS = window.learn_num_data[2]
    BATCH_SIZE = 32

    HORIZONTAL_FLIP = augmentation[0]
    VERTICAL_FLIP = augmentation[1]
    BRIGHTNESS_RANGE = augmentation[2]
    ROTATION_RANGE = augmentation[3]
    if augmentation[4] == True:
        CUT_OUT = cutout(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, pixel_level=False)
    else :
        CUT_OUT = None

    EPOCHS = input_epochs
    train_steps_per_epoch = NUM_TRAIN_IMGS // BATCH_SIZE
    val_steps_per_epoch = NUM_VAL_IMGS // BATCH_SIZE

    # Data Preprocessing
    if window.settingsData[0] == "EfficientnetB0":
    # 이피션넷일 경우
        training_datagen = ImageDataGenerator(
                                horizontal_flip = HORIZONTAL_FLIP,
                                vertical_flip = VERTICAL_FLIP,
                                brightness_range = BRIGHTNESS_RANGE,
                                rotation_range = ROTATION_RANGE,
                                preprocessing_function = CUT_OUT,
                                )
        validation_datagen = ImageDataGenerator()
    else:
        training_datagen = ImageDataGenerator(
                                rescale = 1./255,
                                horizontal_flip = HORIZONTAL_FLIP,
                                vertical_flip = VERTICAL_FLIP,
                                brightness_range = BRIGHTNESS_RANGE,
                                rotation_range = ROTATION_RANGE,
                                preprocessing_function = CUT_OUT,
                                )
        validation_datagen = ImageDataGenerator(
                                rescale = 1./255
                                )


    train_generator = training_datagen.flow_from_directory(
        train_dir,
        target_size=(INPUT_SIZE, INPUT_SIZE),
        class_mode='categorical',
        batch_size= BATCH_SIZE
    )

    validation_generator = validation_datagen.flow_from_directory(
        val_dir,
        target_size=(INPUT_SIZE, INPUT_SIZE),
        class_mode='categorical',
        batch_size= BATCH_SIZE
    )


    # Load pre-trained model
    model = tf.keras.models.load_model(MODEL_PATH)
    model.pop()
    # Add last layer
    for layr in model.layers:
        if ('vgg16' in layr.name):
            layr.trainable = False
        elif ('resnet152' in layr.name):
            layr.trainable = False
        elif ('efficientnetb0' in layr.name):
            layr.trainable = False
        elif ('inception_v3' in layr.name):
            layr.trainable = False
            
    model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax', name='predictions'+str(NUM_CLASSES)))
    model.summary()

    # Compile
    model.compile(optimizer = 'adam',
                loss = 'categorical_crossentropy',
                metrics = ['accuracy'])

    # Callbacks
    checkpoint_filepath = os.path.join(BASE_DIR, 'checkpoint', window.settingsData[3] + '.h5')

    plotLosses = PlotLosses(input_epochs, window)

    callbacks = [
        tf.keras.callbacks.EarlyStopping(patience=10, monitor='val_loss',
                                        #  restore_best_weights=True
                                        ),
        tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath,
                                            monitor='val_loss',
                                            mode='min',
                                            save_best_only=True,
                                            # save_weights_only=True,
                                        ),
        plotLosses,
    ]


    # training model
    history = model.fit(train_generator, epochs=EPOCHS, steps_per_epoch=train_steps_per_epoch, validation_data = validation_generator, validation_steps=val_steps_per_epoch, verbose = 1,  callbacks=callbacks)
    window.textBox_terminal.append("Training Done!")
    val_loss = history.history['val_loss']
    val_accuracy = history.history['val_accuracy']

    max_val_accuracy = round(np.max(val_accuracy), 4)
    min_val_loss = round(np.min(val_loss), 4)
    message = "Epoch: "+ str(np.argmin(val_loss)+1)+ " , Min val_loss: "+ str(min_val_loss)
    window.textBox_terminal.append(message)
    window.settingsData.append(min_val_loss)
    window.settingsData.append(max_val_accuracy)
    plt.close()
# --- Get the current working directory ---
cwd = os.getcwd()


# --- Change directories ---
os.chdir("C:\\Users\\15713\\Desktop\\DS Projects\\Speech Recognition\\speech-recognition\\src")


# --- More import ---
import evaluate_models


# --- Get test data ---
test_dir = "C:\\Users\\15713\\Desktop\\DS Projects\\Speech Recognition\\speech-recognition\\data\\processed\\spectrograms\\test"
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(test_dir, target_size=(128, 128), seed=340, shuffle=False)


# --- Change directories ---
os.chdir("C:\\Users\\15713\\Desktop\\DS Projects\\Speech Recognition\\speech-recognition\\models\\spectrograms")


# --- Load the best model and its weights ---
model = load_model("1_cnn_base_2.hdf5")


# --- Evaluate the best model ---
score = model.evaluate_generator(test_generator)
print("\nTest loss:      ", score[0])
print("Test accuracy:  ", score[1])
print()
                  optimizer=SGD(lr=0.01,
                                decay=1e-6,
                                momentum=0.9,
                                nesterov=True),
                  metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        TRAIN_DIR,
        target_size=(IMG_WIDTH, IMG_HEIGHT),
        batch_size=BATCH_SIZE,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        VALIDATE_DIR,
        target_size=(IMG_WIDTH, IMG_HEIGHT),
        batch_size=BATCH_SIZE,
        class_mode='binary')

    callbacks = [TensorBoard(log_dir="logs/{}".format(NAME))]

    model.fit_generator(train_generator,
                        callbacks=callbacks,
                        steps_per_epoch=TRAIN_STEP,
                        epochs=EPOCHS,
示例#11
0
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense

img_width, img_height = 150, 150

test_image = image.load_img('random2.jpg', target_size=(img_width,img_height))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)

classifier = keras.models.load_model('catsdogs.h5')
result = classifier.predict(test_image)

train_dataen = ImageDataGenerator(
    rescale=1./255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

training_set = train_dataen.flow_from_directory(
    'C:\images\Training',
    target_size=(img_width, img_height),
    batch_size=24,
    class_mode='binary'
)

if result [0][0] >= 0.5:
    prediction = 'dog'
else:
    prediction = 'cat'

print(prediction)
示例#12
0
    def transfer_learning(self, train_data_dir, validation_data_dir, epochs):
        # compile the model
        self.transfer_model.compile(loss="categorical_crossentropy",
                                    optimizer=optimizers.SGD(lr=1e-3,
                                                             momentum=0.9),
                                    metrics=["accuracy"])

        # Initiate the train and test generators with data Augumentation
        # Save the model according to the conditions
        checkpoint = ModelCheckpoint("facenet_transfer_weight.h5",
                                     monitor='val_accuracy',
                                     verbose=2,
                                     save_best_only=True,
                                     save_weights_only=False,
                                     mode='auto',
                                     period=1)
        early = EarlyStopping(monitor='val_accuracy',
                              min_delta=0,
                              patience=100,
                              verbose=1,
                              mode='auto')

        temp_path = os.path.join(os.getcwd(), "temp")
        train_data_path = os.path.join(temp_path, "train")
        val_data_path = os.path.join(temp_path, "val")

        # doing preprocessing when temp dir not exist
        if not os.path.exists(temp_path):
            os.mkdir(temp_path)
            if not os.path.exists(train_data_path):
                os.mkdir(train_data_path)

            if not os.path.exists(val_data_path):
                os.mkdir(val_data_path)

            self.preprocessing(train_data_dir, train_data_path)
            self.preprocessing(validation_data_dir, val_data_path)

        train_datagen = ImageDataGenerator(featurewise_center=True,
                                           featurewise_std_normalization=True,
                                           rotation_range=20,
                                           width_shift_range=0.2,
                                           height_shift_range=0.2,
                                           horizontal_flip=True)

        test_datagen = ImageDataGenerator(rescale=1. / 255)

        train_generator = train_datagen.flow_from_directory(
            train_data_path,
            target_size=(160, 160),
            batch_size=32,
            class_mode="categorical")

        validation_generator = test_datagen.flow_from_directory(
            val_data_path, target_size=(160, 160), class_mode="categorical")

        # Train the model
        history = self.transfer_model.fit_generator(
            train_generator,
            steps_per_epoch=2,
            epochs=epochs,
            validation_data=validation_generator,
            validation_steps=2,
            callbacks=[checkpoint, early])

        return history
示例#13
0
model.add(Activation('sigmoid'))

opt = keras.optimizers.rmsprop(lr=0.0000001, decay=1e-5)

model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.1,
                                   zoom_range=0.1,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode='binary')

model.fit_generator(train_generator,
                    steps_per_epoch=num_train_size,
                    epochs=epochs,
                    validation_data=validation_generator,
                    validation_steps=num_validation_size)
示例#14
0
              metrics=['acc'])

train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest')  # with data augmentation

val_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(directory=train_dir,
                                                    target_size=(150, 150),
                                                    batch_size=32,
                                                    class_mode='binary')
validation_generator = val_datagen.flow_from_directory(
    directory=validation_dir,
    target_size=(150, 150),
    batch_size=32,
    class_mode='binary')
history = model.fit_generator(generator=train_generator,
                              steps_per_epoch=150,
                              epochs=60,
                              validation_data=validation_generator,
                              validation_steps=100)

model.save("cat_and_dogs_small_2.h5")

acc = history.history['acc']
#from tensorflow import keras
import tensorflow as tf
#import numpy as np
#import keras_preprocessing
#from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator

train_data_path = 'FinalColor/Training'
test_data_path = 'FinalColor/Testing'

TrainDataGen = ImageDataGenerator(rescale=1. / 255)
TestDataGen = ImageDataGenerator(rescale=1. / 255)

TrainSet = TrainDataGen.flow_from_directory(train_data_path,
                                            target_size=(100, 100),
                                            class_mode='categorical')
TestSet = TestDataGen.flow_from_directory(test_data_path,
                                          target_size=(100, 100),
                                          class_mode='categorical')

cnn_model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(32, (3, 3),
                           activation='relu',
                           input_shape=(100, 100, 3)),
    tf.keras.layers.MaxPooling2D((2, 2)),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(14, activation='softmax')
])
"""cnn_model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
示例#16
0
print(f'This is the number of trainable weight after freezing the conv base: {len(model_2.trainable_weights)}')
# plot_history(history)

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True,
                                   fill_mode='nearest')

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(150, 150),
                                                    batch_size=BATCH_SIZE,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                        target_size=(150, 150),
                                                        batch_size=BATCH_SIZE,
                                                        class_mode='binary')

model_2.compile(loss='binary_crossentropy',
                optimizer=optimizers.RMSprop(lr=2e-5),
                metrics=['acc'])

history = model_2.fit_generator(train_generator,
                                steps_per_epoch=N_TRAIN_IMAGES // BATCH_SIZE,
                                epochs=10,
                                validation_data=validation_generator,
示例#17
0
import keras
import time
import numpy as np

from keras_preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator1 = train_datagen.flow_from_directory(
    'F:\\facenet_train_data\\train',
    target_size=(96, 96),
    batch_size=400,
    class_mode='binary')

train_generator2 = train_datagen.flow_from_directory(
    'F:\\facenet_train_data\\train',
    target_size=(96, 96),
    batch_size=400,
    class_mode='binary')

train_generator = zip(train_generator1, train_generator2)

# samples = train_generator.samples
# print('samples: ', samples)
# print(train_generator.class_indices)

for x, y in train_generator:
    lab = []
    for i in range(len(x[1])):
        if x[1][i] == y[1][i]:
            lab.append(1)
示例#18
0
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Input, Dropout

from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
from tensorflow.keras.optimizers import Adam
import numpy as np

train_path = 'fire/train'
valid_path = 'fire/valid'
test_path = 'fire/test'

training_datagen = ImageDataGenerator(rescale=1./255, zoom_range=0.15, horizontal_flip=True, fill_mode='nearest')
train_generator = training_datagen.flow_from_directory(train_path, target_size=(224,224), classes=['fire', 'no-fire'], class_mode='categorical', shuffle = True, batch_size = 128)

validation_datagen = ImageDataGenerator(rescale = 1./255)
validation_generator = validation_datagen.flow_from_directory(valid_path, target_size=(224,224), classes=['fire', 'no-fire'], class_mode='categorical', shuffle = True, batch_size= 14)

test_generator = ImageDataGenerator(rescale=1./255).flow_from_directory(directory=test_path, target_size=(224,224), classes=['fire', 'no-fire'], class_mode='categorical', shuffle=False, batch_size=10)
testSteps = test_generator.n/8

input_tensor = Input(shape=(224, 224, 3))
base_model = InceptionV3(input_tensor=input_tensor, weights='imagenet', include_top=False)

x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.25)(x)
x = Dense(1024, activation='relu')(x)
示例#19
0
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    'C:/Train2',  # this is the target directory
    target_size=(150, 150),  # all images will be resized to 150x150
    batch_size=5,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory('C:/Test',
                                                        target_size=(150, 150),
                                                        batch_size=5,
                                                        class_mode='binary')

model.fit_generator(train_generator,
                    steps_per_epoch=50,
                    epochs=20,
                    validation_data=validation_generator,
                    validation_steps=60)
model.save('D:/retrain.h5')
示例#20
0
def main(session_name, epochs, batch_size, optimizer, loss, metrics):
    # kafka_dataset = tfio.kafka.KafkaDataset(
    #     topics='deeplearnint_training_1', servers='localhost', group='', eof=False, timeout=1000,
    #     config_global=None, config_topic=None, message_key=False
    # )

    _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'

    path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip',
                                          origin=_URL,
                                          extract=True)

    PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
    train_dir = os.path.join(PATH, 'train')
    validation_dir = os.path.join(PATH, 'validation')
    train_cats_dir = os.path.join(
        train_dir, 'cats')  # directory with our training cat pictures
    train_dogs_dir = os.path.join(
        train_dir, 'dogs')  # directory with our training dog pictures
    validation_cats_dir = os.path.join(
        validation_dir, 'cats')  # directory with our validation cat pictures
    validation_dogs_dir = os.path.join(
        validation_dir, 'dogs')  # directory with our validation dog pictures
    num_cats_tr = len(os.listdir(train_cats_dir))
    num_dogs_tr = len(os.listdir(train_dogs_dir))

    num_cats_val = len(os.listdir(validation_cats_dir))
    num_dogs_val = len(os.listdir(validation_dogs_dir))

    total_train = num_cats_tr + num_dogs_tr
    total_val = num_cats_val + num_dogs_val

    print('total training cat images:', num_cats_tr)
    print('total training dog images:', num_dogs_tr)

    print('total validation cat images:', num_cats_val)
    print('total validation dog images:', num_dogs_val)
    print("--")
    print("Total training images:", total_train)
    print("Total validation images:", total_val)

    # batch_size = 128
    # epochs = 15
    IMG_HEIGHT = 150
    IMG_WIDTH = 150
    train_image_generator = ImageDataGenerator(
        rescale=1. / 255)  # Generator for our training data
    validation_image_generator = ImageDataGenerator(
        rescale=1. / 255)  # Generator for our validation data
    train_data_gen = train_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=train_dir,
        shuffle=True,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    val_data_gen = validation_image_generator.flow_from_directory(
        batch_size=batch_size,
        directory=validation_dir,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        class_mode='binary')
    sample_training_images, _ = next(train_data_gen)

    model = Sequential([
        Conv2D(16,
               3,
               padding='same',
               activation='relu',
               input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
        MaxPooling2D(),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1, activation='sigmoid')
    ])
    # model.compile(optimizer=optimizer,
    #               loss=loss,
    #               metrics=metrics)

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    model.summary()

    # model.fit(train_images, train_labels, epochs=epochs, batch_size=batch_size, callbacks=[KafkaCallback(session_name)])
    # test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2, callbacks=[KafkaCallback(session_name)])

    history = model.fit(train_data_gen,
                        steps_per_epoch=total_train // batch_size,
                        epochs=epochs,
                        validation_data=val_data_gen,
                        validation_steps=total_val // batch_size,
                        callbacks=[KafkaCallback(session_name)])
示例#21
0
                        rotation_range=30,
                        width_shift_range=0.2,
                        height_shift_range=0.2,
                        shear_range=0.2,
                        zoom_range=0.2,
                        horizontal_flip=True,
                        featurewise_center=True
                        )



# train data
train_generator = train_valid_datagen.flow_from_directory(
                                          train_path,
                                          target_size=(299, 299),
                                          batch_size=32,
                                          class_mode='categorical', 
                                          subset='training'
                                          )


# vaild data
vaild_generator = train_valid_datagen.flow_from_directory(
                                          train_path,
                                          target_size=(299, 299),
                                          batch_size=32,
                                          class_mode='categorical', 
                                          subset='validation'
                                          )

示例#22
0
def input_model(x_train, y_train, x_val, y_val, params):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)
    # input generators
    if params['model'] == 'NASNetLarge':
        batch_size = 64
    else:
        batch_size = 128
    IMAGE_SIZE = (params['image_size'], params['image_size'])
    train_datagen = ImageDataGenerator(rotation_range=5,
                                       width_shift_range=0.2,
                                       height_shift_range=0.2,
                                       brightness_range=(0.85, 1.15),
                                       shear_range=0.0,
                                       zoom_range=0.2,
                                       channel_shift_range=0.2,
                                       fill_mode='nearest',
                                       horizontal_flip=True,
                                       vertical_flip=False,
                                       preprocessing_function=preprocess_input)
    valid_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
    train = train_datagen.flow_from_directory(TRAIN_DIR,
                                              target_size=IMAGE_SIZE,
                                              color_mode='rgb',
                                              batch_size=batch_size,
                                              interpolation='bicubic')
    valid = valid_datagen.flow_from_directory(VAL_DIR,
                                              target_size=IMAGE_SIZE,
                                              color_mode='rgb',
                                              batch_size=batch_size,
                                              interpolation='bicubic')

    # model
    input_tensor = keras.layers.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
    if params['model'] == 'xception':
        base_model = Xception(include_top=False,
                              weights=None,
                              input_tensor=input_tensor,
                              input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3),
                              pooling='avg',
                              classes=N_CLASSES)
    elif params['model'] == 'inception_resnet_v2':
        base_model = InceptionResNetV2(include_top=False,
                                       weights=None,
                                       input_tensor=input_tensor,
                                       input_shape=(IMAGE_SIZE[0],
                                                    IMAGE_SIZE[1], 3),
                                       pooling='avg',
                                       classes=N_CLASSES)
    elif params['model'] == 'nasnet':
        base_model = NASNetLarge(include_top=False,
                                 weights=None,
                                 input_tensor=input_tensor,
                                 input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3),
                                 pooling='avg',
                                 classes=N_CLASSES)
    elif params['model'] == 'resnext101':
        base_model = ResNeXt101(include_top=False,
                                weights=None,
                                input_tensor=input_tensor,
                                input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3),
                                pooling='avg',
                                classes=N_CLASSES)
    else:
        base_model = ResNeXt50(include_top=False,
                               weights=None,
                               input_tensor=input_tensor,
                               input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3),
                               pooling='avg',
                               classes=N_CLASSES)

    x = base_model.output
    predictions = Dense(N_CLASSES, activation='softmax')(x)
    model = keras.models.Model(inputs=base_model.input, outputs=predictions)

    LR_BASE = params['learning_rate']
    decay = LR_BASE / (EPOCHS)
    sgd = keras.optimizers.SGD(lr=LR_BASE,
                               decay=decay,
                               momentum=0.9,
                               nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # callbacks
    checkpoint_path = os.path.join(CHECKPOINT_PATH,
                                   'model_{}_checkpoints'.format(MODEL_NO))
    if not os.path.isdir(checkpoint_path):
        os.makedirs(checkpoint_path)
    ckpt = keras.callbacks.ModelCheckpoint(os.path.join(
        checkpoint_path, 'model.{epoch:02d}-{val_acc:.2f}.h5'),
                                           monitor='val_acc',
                                           verbose=1,
                                           save_best_only=True)
    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.2,
                                            patience=7,
                                            verbose=1,
                                            mode='auto',
                                            min_delta=0.001,
                                            cooldown=0,
                                            min_lr=0)
    early_stopping = callbacks.EarlyStopping(monitor='val_acc',
                                             min_delta=0.001,
                                             patience=15)
    log_dir = "logs/model_{}_{}_{}".format(
        MODEL_NO, params['model'],
        datetime.utcnow().strftime("%d%m%Y_%H%M%S"))
    if not os.path.isdir(log_dir):
        os.makedirs(log_dir)
    tensorboard = callbacks.TensorBoard(log_dir)

    out = model.fit_generator(
        train,
        steps_per_epoch=train.n / train.batch_size,
        epochs=EPOCHS,
        validation_data=valid,
        validation_steps=valid.n / valid.batch_size,
        callbacks=[ckpt, reduce_lr, early_stopping, tensorboard])

    return out, model
#图片基本路径
base_path = "../input/images/images/"

datagen_train = ImageDataGenerator(
                                   rotation_range=30,
                                   zoom_range=0.5,
                                   horizontal_flip=True)
datagen_validation = ImageDataGenerator(
                                   rotation_range=30,
                                   zoom_range=0.5,
                                   horizontal_flip=True)

train_generator = datagen_train.flow_from_directory(
    base_path+"train",target_size=(pic_size,pic_size),
    color_mode="grayscale",
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True)

validation_generator = datagen_validation.flow_from_directory(
    base_path+"validation",target_size=(pic_size,pic_size),
    color_mode="grayscale",
    batch_size=batch_size,
    class_mode='categorical',
    shuffle = True)


nb_classes = 7

#初始化cnn
model = Sequential()
示例#24
0
training_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1. / 255)

train_gen = training_datagen.flow_from_directory(
    TRAINING_DIR,
    target_size=(150, 150),
    class_mode='categorical',
    batch_size=126
)
validation_gen = validation_datagen.flow_from_directory(
    VALIDATION_DIR,
    target_size=(150, 150),
    class_mode='categorical',
    batch_size=126
)
a = next(train_gen)[0][0]
model = tf.keras.Sequential([
    Conv2D(64, 3, activation='relu', input_shape=(150, 150, 3)),
    MaxPooling2D(2, 2),
    Conv2D(64, 3, activation='relu'),
    MaxPooling2D(2, 2),
    Conv2D(128, 3, activation='relu'),
示例#25
0
model.summary()

# In[5]:

from keras_preprocessing.image import ImageDataGenerator

# In[9]:

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
training_set = train_datagen.flow_from_directory(
    '/model_files/chest-xray-pneumonia/chest_xray/chest_xray/train/',
    target_size=(64, 64),
    batch_size=32,
    class_mode='binary')

# In[10]:

test_datagen = ImageDataGenerator(rescale=1. / 255)
test_set = test_datagen.flow_from_directory(
    '/model_files/chest-xray-pneumonia/chest_xray/chest_xray/test/',
    target_size=(64, 64),
    batch_size=32,
    class_mode='binary')

# In[36]:
model.fit(training_set,
          steps_per_epoch=100,
示例#26
0
img_height, img_width = 56, 56

"""

> Image Processing

"""

train_ImageGen = ImageDataGenerator(
    rescale = 1./255
)

train_data = train_ImageGen.flow_from_directory(directory = train_path,
                                                target_size = (img_height, img_width ),   
                                                batch_size = 128,
                                                shuffle=True,
                                                color_mode = 'rgb',
                                                class_mode = 'categorical'
)

valid_datagen = ImageDataGenerator(rescale = 1./255)

test_data = valid_datagen.flow_from_directory(directory = test_path,
                                              target_size = (img_height, img_width),
                                              batch_size = 128,
                                              shuffle=False,
                                              color_mode = 'rgb',
                                              class_mode = 'categorical'
)

"""
])

model.compile(optimizer=RMSprop(lr=0.001),
              loss='categorical_crossentropy',
              metrics=['acc'])
model.summary()

TRAINING_DIR = '/home/siddharth.soni/TensorFlow_fast_slow/Training/'
VALIDATION_DIR = '/home/siddharth.soni/TensorFlow_fast_slow/Validation/'

train_datagen = ImageDataGenerator(rescale=1.0 / 255.0)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255.0)

train_datagenerator = train_datagen.flow_from_directory(
    TRAINING_DIR,
    batch_size=20,
    target_size=(150, 150),
    class_mode='categorical')

validation_datagenerator = validation_datagen.flow_from_directory(
    VALIDATION_DIR,
    batch_size=20,
    target_size=(150, 150),
    class_mode='categorical')

history = model.fit(train_datagenerator,
                    steps_per_epoch=765 // 20,
                    epochs=10,
                    verbose=True,
                    validation_data=validation_datagenerator,
                    validation_steps=145 // 20,
from keras.models import Sequential
from keras import layers

training_datagen = ImageDataGenerator(rescale=1. / 255,
                                      rotation_range=45,
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      shear_range=0.2,
                                      zoom_range=0.2,
                                      horizontal_flip=True,
                                      fill_mode='nearest')

validation_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = training_datagen.flow_from_directory(
    "../dataset/rock_paper_scissors/training_set",
    target_size=(150, 150),
    class_mode='categorical')

validation_generator = validation_datagen.flow_from_directory(
    "../dataset/rock_paper_scissors/test_set",
    target_size=(150, 150),
    class_mode='categorical')

rpsmodel = Sequential([
    layers.Conv2D(64, (3, 3), activation='relu', input_shape=(150, 150, 3)),
    layers.MaxPooling2D(2, 2),
    layers.Conv2D(64, (3, 3), activation='relu'),
    layers.MaxPooling2D(2, 2),
    layers.Conv2D(128, (3, 3), activation='relu'),
    layers.MaxPooling2D(2, 2),
    layers.Conv2D(128, (3, 3), activation='relu'),
示例#29
0
def create_model():
    model = Sequential()

    # Step 1 - Convolution
    input_shape = (64, 64, 3
                   )  # 3 channels (coloured image) and image dimensions
    model.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      input_shape=input_shape,
                      activation='relu'))

    # Step 2 - Pooling (Max pooling technique)
    # # We are reducing the complexity keeping information
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # Adding second Convolution layer
    model.add(Convolution2D(filters=32, kernel_size=(3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # Step 3 - Flattening
    # # Put the pooled feature map in one big vector
    model.add(Flatten())

    # Step 4 - Full connection
    model.add(Dense(128, activation='relu'))
    # Binary outcome -> sigmoid
    # More -> softmax
    model.add(Dense(1, activation='sigmoid'))

    # Compiling the CNN
    # # Binary outcome -> binary_crossentropy
    # # More -> categorical_crossentropy
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    # Fitting the CNN to the image
    # # We are using image augmentation
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    training_set = train_datagen.flow_from_directory(
        'dataset/training_set',
        target_size=(64, 64),  # size of images
        batch_size=32,
        class_mode='binary')
    test_set = test_datagen.flow_from_directory('dataset/test_set',
                                                target_size=(64, 64),
                                                batch_size=32,
                                                class_mode='binary')
    model.fit(
        training_set,
        steps_per_epoch=8000 // 32,  # Numbers of image
        epochs=25,
        batch_size=32,
        validation_data=test_set,
        validation_steps=2000 // 32)

    model.save("model.h5")
    return model
####################################
# Starting training and validation generator

# Randomize data set
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True,
                                   vertical_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

# Generator flow from directory
train_generator = train_datagen.flow_from_directory(train_data_dir,
                                                    target_size=(img_width,
                                                                 img_heigth),
                                                    class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_width, img_heigth),
    class_mode='categorical')

# Compile and full connection
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples / batch_size,
                    validation_data=validation_generator,