def train(self, x_train, y_train, x_test, y_test, batch_size, epochs,
              save_interval):
        # Image augmentation object
        datagen = ImageDataGenerator(
            rotation_range=0,  # Rotation in angles
            width_shift_range=0.,
            height_shift_range=0.,
            shear_range=0.,  # Image shearing, counter-clockwise
            horizontal_flip=False,  # TODO: These may mess up the training
            vertical_flip=False,
            fill_mode='nearest')
        # Fit to data
        datagen.fit(x_train, seed=self.weight_seed)

        # Main loop
        for epoch in range(epochs):
            # Counter
            batch_idx = 0
            for imgs, y_batch in datagen.flow(x_train,
                                              y_train,
                                              shuffle=False,
                                              batch_size=batch_size,
                                              seed=(self.batch_seed + epoch)):
                # Counter
                batch_idx = batch_idx + 1
                # Generate a half batch of new images
                latent_fake = self.encoder.predict(imgs)

                # Generate random samples
                (latent_real, labels) = self.generateRandomVectors(
                    y_batch, seed=(self.batch_seed + epoch + batch_idx))
                valid = np.ones((batch_size, 1))
                fake = np.zeros((batch_size, 1))

                # Train the discriminator
                d_loss_real = self.discriminator.train_on_batch(
                    [latent_real, labels], valid)
                d_loss_fake = self.discriminator.train_on_batch(
                    [latent_fake, labels], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                # Generator wants the discriminator to label the generated representations as valid
                valid_y = np.ones((batch_size, 1))

                # Train autoencoder for reconstruction
                g_loss_reconstruction = self.autoencoder.train_on_batch(
                    imgs, imgs)

                # Train generator
                g_logg_similarity = self.encoder_discriminator.train_on_batch(
                    [imgs, labels], valid_y)

                # Plot progress per batch
                print(
                    "Epoch %d, batch %d : [D loss: %f, acc: %.2f%%] [G acc: %f, mse: %f]"
                    % (epoch, batch_idx, d_loss[0], 100 * d_loss[1],
                       g_logg_similarity[1], g_loss_reconstruction))

                # Break loop by hand (Keras trick)
                if batch_idx >= len(x_train) / batch_size:
                    break

            # Write to file
            if (epoch % save_interval == 0):
                self.saveWeights(epoch)
                self.saveLogs(epoch, x_test, y_test)
# epochs = app_config.getint('fine_tune_epochs')
epochs = 100

# batch size used by flow_from_directory and predict_generator
batch_size = app_config.getint('fine_tune_batch_size')

# the checkpoint to load and continue from
checkpoint_to_load = "data/models/checkpoints/model-74-0.81.h5"
# get the epoch number to continue from
init_epoch = get_init_epoch(checkpoint_to_load)

# prepare data augmentation configuration
datagen = ImageDataGenerator(rescale=1. / 255,
                             rotation_range=40,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest',
                             validation_split=0.25)

train_generator = datagen.flow_from_directory(train_data_dir,
                                              target_size=(img_width,
                                                           img_height),
                                              batch_size=batch_size,
                                              class_mode='categorical',
                                              interpolation='lanczos',
                                              subset='training')

validation_generator = datagen.flow_from_directory(train_data_dir,
                                                   target_size=(img_width,
                                                                img_height),
    layer.trainable = False
   
for layer in model.layers[1:7]:
    layer.trainable = False

for layer in model.layers[15:]:
    layer.trainable = False

for layer in model.layers:
    print(layer, layer.trainable)
'''

train_datagen=ImageDataGenerator(
    preprocessing_function=preprocess_input,
    shear_range=0.2,
    zoom_range=0.2,
    rotation_range=20,
    horizontal_flip=True,
    fill_mode='nearest')

valid_datagen=ImageDataGenerator(
        preprocessing_function=preprocess_input)

train_path = 'E:/Master Project/data_split/train_dir' 
valid_path = 'E:/Master Project/data_split/val_dir' 
test_path = 'E:/Master Project/data_split/test_dir' 

train_generator = train_datagen.flow_from_directory(train_path,
                                                 target_size=(224,224),
                                                 color_mode='rgb',
                                                 batch_size=10,
示例#4
0
def go(args):
    drop_rate = args[0]

    now = datetime.datetime.now()
    current_time = '{:04d}_{:02d}_{:02d}_{:02d}{:02d}{:02d}'.format(
        now.year, now.month, now.day, now.hour, now.minute, now.second)

    DATASET_PATH = './dataset'
    BATCH_SIZE = 20
    NUM_CLASS = 13
    NUM_EPOCHS = 20
    SAVE_PATH = current_time
    os.mkdir(SAVE_PATH)

    train_datagen = ImageDataGenerator(
        # rotation_range=rotation_range,
        # width_shift_range=width_shift_range,
        # height_shift_range=height_shift_range,
        # shear_range=shear_range,
        # zoom_range=zoom_range,
        preprocessing_function=preprocess_input,
        horizontal_flip=True,
        fill_mode='reflect',
        cval=0,
        validation_split=0.1)

    train_generator = train_datagen.flow_from_directory(
        os.path.join(DATASET_PATH, 'train'),
        target_size=(256, 256),
        interpolation='bicubic',
        class_mode='categorical',
        shuffle=True,
        batch_size=BATCH_SIZE,
        subset='training')

    validation_generator = train_datagen.flow_from_directory(
        os.path.join(DATASET_PATH, 'train'),
        target_size=(256, 256),
        interpolation='bicubic',
        class_mode='categorical',
        batch_size=BATCH_SIZE,
        subset='validation')  # set as validation data

    net = VGG16(include_top=False,
                weights='imagenet',
                input_tensor=None,
                input_shape=(256, 256, 3))
    x = net.output
    x = Flatten()(x)
    x = Dropout(drop_rate)(x)

    x = Dense(1024, activation='relu', name='fc1')(x)
    x = Dropout(drop_rate)(x)
    x = Dense(512, activation='relu', name='fc2')(x)
    output_layer = Dense(NUM_CLASS, activation='softmax', name='softmax')(x)

    net_final = Model(inputs=net.input, outputs=output_layer)

    for layer in net_final.layers[:-4]:
        layer.trainable = False
    for layer in net_final.layers[-4:]:
        layer.trainable = True

    net_final.compile(optimizer=Adam(lr=1e-4),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    mcp_save = ModelCheckpoint(os.path.join(SAVE_PATH,
                                            'model-vgg16-final_best.h5'),
                               monitor='val_loss',
                               verbose=1,
                               save_best_only=True,
                               save_weights_only=False,
                               mode='min',
                               period=1)
    print(net_final.summary())
    history = net_final.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.samples // BATCH_SIZE,
        validation_data=validation_generator,
        validation_steps=validation_generator.samples // BATCH_SIZE,
        epochs=NUM_EPOCHS,
        callbacks=[mcp_save],
        verbose=1)

    net_final.save(os.path.join(SAVE_PATH, 'model-vgg16-final.h5'))
    with open(os.path.join(SAVE_PATH, 'min_val_loss.txt'), 'a') as out_file:
        out_file.write(str(min(history.history['val_loss'])))
    print('Loss:', min(history.history['val_loss']))

    return min(history.history['val_loss'])
               padding="same",
               activation="relu"))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(num_classes, activation='softmax'))
    return model


model = BaseModel()
model.summary()

### create data generator
datagen = ImageDataGenerator(rotation_range=4,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             zoom_range=.1)

batch_size = 32
train_batches = datagen.flow(x_train, y_train, batch_size=batch_size)
valid_batches = datagen.flow(x_valid, y_valid, batch_size=batch_size)
test_batches = datagen.flow(x_test, y_test, batch_size=batch_size)

##### parameters for the model compilation + training
opt = keras.optimizers.Adam(learning_rate=1e-5)

early_stop = EarlyStopping('val_loss', patience=7)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.5, patience=2, cooldown=2)
num_epochs = 20

#---
def make_generators(args):
    trainPath = os.path.join(args.dataset, "train")
    valPath = os.path.join(args.dataset, "validation")
    testPath = os.path.join(args.dataset, "test")

    if args.eraser:
        preprocessing_function = get_random_eraser(pixel_level=True)
    else:
        preprocessing_function = None

    # initialize the training data augmentation objects
    trainAug = ImageDataGenerator(
        rescale=1.0 / 255,
        rotation_range=30,
        #zoom_range=[0, 0.2],
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.15,
        horizontal_flip=True,
        preprocessing_function=preprocessing_function,
        fill_mode="nearest")

    valAug = ImageDataGenerator(rescale=1.0 / 255)

    testAug = ImageDataGenerator(rescale=1.0 / 255)

    mean = np.array([123.68, 116.779, 103.939], dtype="float32")

    trainAug.mean = mean
    valAug.mean = mean
    testAug.mean = mean

    # initialize the training generator objects
    trainGen = trainAug.flow_from_directory(trainPath,
                                            class_mode="binary",
                                            target_size=(args.image_size,
                                                         args.image_size),
                                            color_mode="rgb",
                                            shuffle=True,
                                            batch_size=args.batch_size)

    # initialize the validation generator
    valGen = valAug.flow_from_directory(valPath,
                                        class_mode="binary",
                                        target_size=(args.image_size,
                                                     args.image_size),
                                        color_mode="rgb",
                                        shuffle=False,
                                        batch_size=args.batch_size)

    # initialize the testing generator
    testGen = testAug.flow_from_directory(testPath,
                                          class_mode="binary",
                                          target_size=(args.image_size,
                                                       args.image_size),
                                          color_mode="rgb",
                                          shuffle=False,
                                          batch_size=args.batch_size)

    print(testGen.class_indices.keys())
    return trainGen, valGen, testGen
from keras.layers import Dense

classifier = Sequential()
classifier.add(Conv2D(6, kernel_size=(5,5), activation="tanh", input_shape=(64, 64, 3)))
classifier.add(MaxPooling2D(pool_size=(2,2)))
classifier.add(Conv2D(16, kernel_size=(5,5), activation="tanh"))
classifier.add(MaxPooling2D(pool_size=(2,2)))
classifier.add(Flatten())
classifier.add(Dense(120, activation="tanh"))
classifier.add(Dense(84, activation="tanh"))
classifier.add(Dense(5, activation="tanh"))
classifier.compile(loss = keras.metrics.mse, optimizer = keras.optimizers.Adam(), metrics=["accuracy"])

from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)

training_set = train_datagen.flow_from_directory('/home/murugesh/PycharmProjects/multi_class_classifier/assets',
                                                 target_size = (64, 64),
                                                 batch_size = 32)

test_set = test_datagen.flow_from_directory('/home/murugesh/PycharmProjects/multi_class_classifier/assets',
                                            target_size = (64, 64),
                                            batch_size = 32)

model = classifier.fit_generator(training_set,
                         steps_per_epoch = 80,
                         epochs = 10,
                         verbose=1,
                         validation_data = test_set,    
示例#8
0
y_train = np.array(y_train, np.uint8)
x_train = np.array(x_train, np.float16) / 255.0
y_valid = np.array(y_valid, np.uint8)
x_valid = np.array(x_valid, np.float16) / 255.0

filepath= "best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0002, patience=5, verbose=0, mode='auto')

callbacks_list = [checkpoint, earlystop]

datagen = ImageDataGenerator(
    featurewise_center=True,
    featurewise_std_normalization=True,
    rotation_range=30,
    width_shift_range=0.3,
    height_shift_range=0.3,
    horizontal_flip=True,
    vertical_flip = False,
    fill_mode =  "reflect")
datagen.fit(x_train)


model = Sequential()
model.add(BatchNormalization(input_shape=(48, 48, 3)))
model.add(Conv2D(4, kernel_size=(5, 5),padding='same'))
model.add(LeakyReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(4, kernel_size=(3, 3),padding='same'))
model.add(LeakyReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
示例#9
0
# Declare a few useful values
num_train_samples = 9013
num_val_samples = 1002
train_batch_size = 10
val_batch_size = 10
image_size = 224

# Declare how many steps are needed in an iteration
train_steps = np.ceil(num_train_samples / train_batch_size)
val_steps = np.ceil(num_val_samples / val_batch_size)

# Set up generators
train_batches = ImageDataGenerator(
    preprocessing_function= \
        keras.applications.mobilenet.preprocess_input).flow_from_directory(
    train_path,
    target_size=(image_size, image_size),
    batch_size=train_batch_size)

valid_batches = ImageDataGenerator(
    preprocessing_function= \
        keras.applications.mobilenet.preprocess_input).flow_from_directory(
    valid_path,
    target_size=(image_size, image_size),
    batch_size=val_batch_size)

test_batches = ImageDataGenerator(
    preprocessing_function= \
        keras.applications.mobilenet.preprocess_input).flow_from_directory(
    valid_path,
    target_size=(image_size, image_size),
示例#10
0
文件: imagenet10.py 项目: wrccrwx/DAC
import json
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import RMSprop
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model
from sys import path

path.append('../DAC')
from myMetrics import *

global upper, lower
datagen = ImageDataGenerator(rotation_range=10,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             channel_shift_range=0.05,
                             horizontal_flip=True,
                             rescale=0.975,
                             zoom_range=[0.95, 1.05])


class Adaptive(Layer):
    def __init__(self, **kwargs):
        super(Adaptive, self).__init__(**kwargs)

    def build(self, input_shape):
        self.nb_sample = input_shape[0]
        self.nb_dim = input_shape[1]

    def call(self, x, mask=None):
        y = self.transfer(x)
def fine_tune():
    # Start by instantiating the VGG base and loading its weights.
    epochs = 10

    model_vgg = applications.VGG16(weights='imagenet',
                                   include_top=False,
                                   input_shape=(256, 256, 3))

    # Build a classifier model to put on top of the convolutional model. For the fine tuning, we start with a fully trained-classifer. We will use the weights from the earlier model. And then we will add this model on top of the convolutional base.

    top_model = Sequential()
    top_model.add(Flatten(input_shape=model_vgg.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    top_model.load_weights(str(exp_url) + 'models/bottleneck_30_epochs.h5')

    # model_vgg.add(top_model)
    model = Model(inputs=model_vgg.input, outputs=top_model(model_vgg.output))

    # For fine turning, we only want to train a few layers.  This line will set the first 25 layers (up to the conv block) to non-trainable.

    for layer in model.layers[:15]:
        layer.trainable = False

    # compile the model with a SGD/momentum optimizer
    # and a very slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                  metrics=['accuracy'])

    # prepare data augmentation configuration  . . . do we need this?
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    # fine-tune the model
    model.fit_generator(train_generator,
                        steps_per_epoch=train_samples // batch_size,
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=validation_samples // batch_size)

    model.save_weights(str(exp_url) + 'models/finetuning_30epochs_vgg.h5')
    model.save(str(exp_url) + 'models/theultimate.h5')

    # ### Evaluating on validation set

    # Computing loss and accuracy :

    print(model.evaluate_generator(validation_generator, validation_samples))
示例#12
0
# coding:utf-8
'''
Created on 2017/12/26.

@author: chk01
'''
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from PIL import Image

datagen = ImageDataGenerator(rotation_range=0.2,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')

img = load_img(
    '1.jpg')  # this is a PIL image, please replace to your own file path
x = img_to_array(img)  # this is a Numpy array with shape (3, 150, 150)
x = x.reshape(
    (1, ) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)

# the .flow() command below generates batches of randomly transformed images
# and saves the results to the `preview/` directory

i = 0
for batch in datagen.flow(x,
                          batch_size=1,
                          save_to_dir='/',
                          save_prefix='lena',
示例#13
0
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    return model


nb_classes = 10
nb_epoch = 30
nb_step = 6
batch_size = 64

x, y = loadImages()

from keras.preprocessing.image import ImageDataGenerator
dataGenerator = ImageDataGenerator()
dataGenerator.fit(x)
data_generator = dataGenerator.flow(x, y, batch_size,
                                    True)  #generator函數,用來生成批處理數據

model = Net_model(nb_classes=nb_classes, lr=0.0001)  #加載網絡模型

history = model.fit_generator(data_generator,
                              epochs=nb_epoch,
                              steps_per_epoch=nb_step,
                              shuffle=True)  #訓練網絡

model.save_weights('./star_trained_model_weights.h5')  #將圖片處理成h5格式
print("DONE, model saved in path")

end = time.time()
示例#14
0
    class_mode = 'categorical'
    loss_function = 'categorical_crossentropy'

    model_name = 'testing_model'

    model = create_cnn(input_shape, loss=loss_function)


    call_backs = [ModelCheckpoint(filepath='/Users/christopherlawton/galvanize/module_2/capstone_2/save_model/{}'.format(model_name),
                                monitor='val_loss',
                                save_best_only=True),
                                EarlyStopping(monitor='val_loss', patience=5, verbose=0)]

    train_datagen = ImageDataGenerator(
                    rescale=1./scale,
                    rotation_range=0.4,
                    width_shift_range=0.2,
                    height_shift_range=0.2,
                    horizontal_flip=True)

    validatiobn_datagen = ImageDataGenerator(
                    rescale=1./scale)

    train_generator = train_datagen.flow_from_directory(
                        train_path,
                        color_mode='grayscale',
                        target_size=target_size,
                        batch_size=batch_size,
                        class_mode=class_mode,
                        shuffle=True)

    validation_generator = validation_datagen.flow_from_directory(
示例#15
0
# Compilando a CNN
# adam - um tipo de descida de gradiente estocástico
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

# Parte 2 - Treinando a rede nas imagens
# Image augmentation - Usado para evitar overfitting
# Image augmentation cria um novo batch de imagens com pequenas mudanças,
# de forma a aumentar virtualmente o número de imagens
from keras.preprocessing.image import ImageDataGenerator

train_datagen = ImageDataGenerator(
    rescale=1. / 255,  # Cria imagens com tamanhos diferentes
    shear_range=0.2,  # Transformações geométricas
    zoom_range=0.2,  # Zoom randômico 
    horizontal_flip=True)  # Flipping

test_datagen = ImageDataGenerator(rescale=1. /
                                  255)  # É apenas necessário fazer rescale

training_set = train_datagen.flow_from_directory(
    'dataset/training_set',
    target_size=(64, 64),  # Dimensão esperada pela CNN
    batch_size=32,  # Número de imagens simultâneas na CNN
    class_mode='binary')  # 2 classes

test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(64, 64),
                                            batch_size=32,
示例#16
0
    sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # set callback
    tb_cb = TensorBoard(log_dir='./densenet/', histogram_freq=0)
    change_lr = LearningRateScheduler(scheduler)
    ckpt = ModelCheckpoint('./ckpt.h5',
                           save_best_only=False,
                           mode='auto',
                           period=10)
    cbks = [change_lr, tb_cb, ckpt]

    # set data augmentation
    print('Using real-time data augmentation.')
    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.125,
                                 height_shift_range=0.125,
                                 fill_mode='constant',
                                 cval=0.)

    datagen.fit(x_train)

    # start training
    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                        steps_per_epoch=iterations,
                        epochs=epochs,
                        callbacks=cbks,
                        validation_data=(x_test, y_test))
    model.save('densenet.h5')
示例#17
0
csv_reader["id"] = csv_reader['GalaxyID'].astype(str).apply(append_ext)

#specifying the image dimensions before passing through the input layer in the CNN model
IMG_WIDTH = 256
IMG_HEIGHT = 256
input_shape = (IMG_WIDTH, IMG_HEIGHT, 1)

#a timer to allow for recorded results
start = timeit.default_timer()
#the image data generator class was cruical for the data augmentation
#this use of ratiting , changing the shear and zoom range allowed for new images to be looked at

train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=45,
    shear_range=0.2,
    zoom_range=0.2,
    #this validation split will change how the training and validation splits the images between both training stages
    #speicifcally 20% of the images will be used for vlidation as a way of testing the training data\
    validation_split=0.20)

test_datagen = ImageDataGenerator(rescale=1. / 255)
#both training sets use the image training folder but due the validation split 20% can be soley used for validation, like testing the training data
training_set = train_datagen.flow_from_dataframe(
    dataframe=csv_reader,
    directory='C:/Users/itsmr/Desktop/CS 3rd Year/images_training_rev1',
    x_col="id",
    #the classes array is set to the y column to represent each class
    y_col=classes,
    target_size=(IMG_WIDTH, IMG_HEIGHT),
    #here the class array is used to define the classes used by class mode input is raw for the data as there is no definitive class
    classes=classes,
示例#18
0
def create_model():
  model = Sequential()
  model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(256, 256, 3)))
  model.add(MaxPooling2D(pool_size=(2, 2)))
  model.add(Dropout(0.25))

  model.add(Flatten())
  model.add(Dense(128, activation='relu'))
  model.add(Dropout(0.5))
  model.add(Dense(1, activation='softmax'))

  return model

train_generator = ImageDataGenerator(
  data_format="channels_last",
  rescale = 1. / 255
)

train_batches = train_generator.flow_from_directory(
    batch_size=32,
    directory='./dataset/train',
    target_size=(256, 256),
    class_mode='binary'
)

validation_generator = ImageDataGenerator(
  data_format="channels_last",
  rescale = 1. / 255
)

validation_batches = validation_generator.flow_from_directory(
    # return the new learning rate
    return alpha


# determine the total number of image paths in training, validation,
# and testing directories
totalTrain = len(list(paths.list_images(config.TRAIN_PATH)))
totalVal = len(list(paths.list_images(config.VAL_PATH)))
totalTest = len(list(paths.list_images(config.TEST_PATH)))

# initialize the training training data augmentation object
trainAug = ImageDataGenerator(rescale=1 / 255.0,
                              rotation_range=20,
                              zoom_range=0.05,
                              width_shift_range=0.05,
                              height_shift_range=0.05,
                              shear_range=0.05,
                              horizontal_flip=True,
                              fill_mode="nearest")

# initialize the validation (and testing) data augmentation object
valAug = ImageDataGenerator(rescale=1 / 255.0)

# initialize the training generator
trainGen = trainAug.flow_from_directory(config.TRAIN_PATH,
                                        class_mode="categorical",
                                        target_size=(64, 64),
                                        color_mode="rgb",
                                        shuffle=True,
                                        batch_size=BS)
示例#20
0
WIDTH, HEIGHT = (664, 485)

DROPOUT = 0.2
CLASSES = 2
BATCH_SIZE = 16
NUM_EPOCHS = 20
INIT_LR = 0.0001

BASE_PATH = 'data/chest_xray/'
TRAIN_PATH = BASE_PATH + 'train'
VAL_PATH = BASE_PATH + 'val'
TEST_PATH = BASE_PATH + 'test'

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   rotation_range=10,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(TRAIN_PATH,
                                                    target_size=(HEIGHT,
                                                                 WIDTH),
                                                    color_mode='rgb',
                                                    class_mode='categorical',
                                                    batch_size=BATCH_SIZE)

validation_generator = test_datagen.flow_from_directory(
    VAL_PATH,
    target_size=(HEIGHT, WIDTH),
示例#21
0
WIDTH, HEIGHT = 256, 256
# === ===== ===== ===== ===

target_dir = './' + VERSION + '/Model/'
if not os.path.exists(target_dir):
    os.mkdir(target_dir)

model = './' + VERSION + '/Model/model.h5'
weights = './' + VERSION + '/Model/weights.h5'
print("Loading model from:", model)

cnn = load_model(model)
cnn.load_weights(weights)

entrenamiento_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

entrenamiento_generador = entrenamiento_datagen.flow_from_directory(
    INPUT_PATH_TRAIN,
    target_size=(HEIGHT, WIDTH),
    batch_size=BATCH_SIZE,
    class_mode='categorical')

validacion_generador = test_datagen.flow_from_directory(
    INPUT_PATH_VAL,
    target_size=(HEIGHT, WIDTH),
    batch_size=BATCH_SIZE,
    class_mode='categorical')
This script trains a CNN classifier for predicting the viewpoint of a car.
'''
import numpy as np
import pandas
from keras.applications import InceptionV3
from keras.engine import Input
from keras.engine import Model
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.preprocessing import image as image_utils
from keras.utils import np_utils
from keras.callbacks import TensorBoard
from keras.preprocessing.image import ImageDataGenerator

# Some image augmentation and normalization for the training images.
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=False)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    '/home/ubuntu/csc420_data/segmented_cars',  # this is the target directory
    target_size=(128, 128),  # all images will be resized to 128x128
    batch_size=32,
    class_mode='categorical')

input_tensor = Input(shape=(128, 128, 3))

# We use the InceptionV3 /GoogLeNet model but retrain it to classify out datset.
base_model = InceptionV3(input_tensor=input_tensor,
                         weights='imagenet',
num_of_train_samples = 8983 
num_of_valid_samples = 2987


############################################
# Set Data Generator for training, testing and validataion.
# Note for testing, set shuffle = false (For proper Confusion matrix)

 # This will do preprocessing and realtime data augmentation:
 # this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40, # randomly rotate images in the range 
                                                      # (degrees, 0 to 40)
                                   width_shift_range=0.2, # randomly shift images horizontally 
                                                          # (fraction of total width)
                                   height_shift_range=0.2,# randomly shift images vertically 
                                                          # (fraction of total height)
                                   shear_range=30, #shear_range is for randomly applying shearing transformations
                                   zoom_range=0.3, #randomly zooming inside pictures
                                   horizontal_flip=True, # randomly flip images
                                   fill_mode='nearest')
###***Note 1: We can modify the augmenation specification and range. We can also ignore the whole augmenation process. In that case we just need to rescale the data as like the validation and test dataset shown below. More information is available in https://keras.io/api/preprocessing/image/ *****

###***Note 2: The Keras ImageDataGenerator class is not an “additive” operation. The ImageDataGenerator accepts the original data, randomly transforms it, and returns only the new, transformed data.In this process, model will see a slightly modified set of trainng samples (e.g., zoomed, shifted, rotated) in every iteration. more info: https://www.pyimagesearch.com/2019/07/08/keras-imagedatagenerator-and-data-augmentation/

#**************
# this is the augmentation configuration we will use for validation and testing:
# for validation and testing we do only rescaling
#The rescale parameter rescales the images pixel values between zero and one.
test_datagen = ImageDataGenerator(rescale=1. / 255)
valid_datagen = ImageDataGenerator(rescale=1. / 255)
示例#24
0
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1.)
    datagen.mean = np.array([103.939, 116.779, 123.68],
                            dtype=np.float32).reshape(3, 1, 1)

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    assert os.path.exists(
        weights_path
    ), 'Model weights not found (see "weights_path" variable in script).'
    f = h5py.File(weights_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            break
        g = f['layer_{}'.format(k)]
        weights = [
            g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])
        ]
        model.layers[k].set_weights(weights)
    f.close()
    print('Model loaded.')

    generator = datagen.flow_from_directory(train_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=32,
                                            class_mode=None,
                                            shuffle=False)
    bottleneck_features_train = model.predict_generator(
        generator, nb_train_samples)
    np.save(open('bottleneck_features_train.npy', 'w'),
            bottleneck_features_train)

    generator = datagen.flow_from_directory(validation_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=32,
                                            class_mode=None,
                                            shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        generator, nb_validation_samples)
    np.save(open('bottleneck_features_validation.npy', 'w'),
            bottleneck_features_validation)
示例#25
0
testX = testX.astype('float') / 255.

# Convert labels from integers to vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)

# initialize label names
label_names = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]

# Construct image generator for data augmentation
aug = ImageDataGenerator(rotation_range=10,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         horizontal_flip=True)

# loop over number of models to train
for i in range(0, args['num_models']):
    # initialize optimizer and compile model
    print(f"[INFO] training model {i+1}/{args['num_models']}...")
    opt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True)
    model = MiniVGGNet.build(32, 32, 3, len(label_names))
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # train network
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=64),
                            validation_data=(testX, testY),
示例#26
0
import keras
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, Flatten, Activation, Dropout, BatchNormalization, MaxPooling2D, Dense
from keras.models import Sequential

img_rows, img_col = 74, 74
Batch_size = 16
no_of_classes = 3

train_dir = r'E:\face rec\train'
validation_dir = r'E:\face rec\validation'

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   zoom_range=0.3,
                                   shear_range=0.4,
                                   horizontal_flip=True,
                                   vertical_flip=True)
validation_datagen = ImageDataGenerator(rescale=1. / 255)

training_data = train_datagen.flow_from_directory(train_dir,
                                                  target_size=(img_rows,
                                                               img_col),
                                                  color_mode='rgb',
                                                  class_mode='categorical',
                                                  shuffle=True,
                                                  batch_size=Batch_size)
validation_data = validation_datagen.flow_from_directory(
    validation_dir,
    target_size=(img_rows, img_col),
    color_mode='rgb',
示例#27
0
n_classes = len(label_binarizer.classes_)

print(label_binarizer.classes_)

np_image_list = np.array(image_list, dtype=np.float16) / 225.0

print("[INFO] Spliting data to train, test")
x_train, x_test, y_train, y_test = train_test_split(np_image_list,
                                                    image_labels,
                                                    test_size=0.2,
                                                    random_state=42)

aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
    inputShape = (depth, height, width)
    chanDim = 1
model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
示例#28
0
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, Dropout, MaxPooling2D, ZeroPadding2D
from keras import optimizers

#For collecting the data on my pc
img_width, img_height = 150, 150

train_data_dir = 'data/train'
validation_data_dir = 'data/validation'

# used to rescale the pixel values from [0, 255] to [0, 1] interval
datagen = ImageDataGenerator(rescale=1. / 255)

# automagically retrieve images and their classes for train and validation sets
train_generator = datagen.flow_from_directory(train_data_dir,
                                              target_size=(img_width,
                                                           img_height),
                                              batch_size=16,
                                              class_mode='binary')

validation_generator = datagen.flow_from_directory(validation_data_dir,
                                                   target_size=(img_width,
                                                                img_height),
                                                   batch_size=32,
                                                   class_mode='binary')
"""
This is the simple keras CNN model, CNN models often don't need more than 3 layers when working with small datasets. The focus here is to set alot of 
filters on the layers, so the model have the possibility too find alot of patterns for the diffrent kinds of dogs and cats.
示例#29
0
batch_Size = 32
steps_Per_Epoch = 32
numEpochs = 2

#Instantating VGG19 model
model = ResNet.ResNet18((360,360,1),4) #here you can choose ResNet18 34 50 101. The detail settings are input shape and class number

#Creating an optimizers
adaDelta = keras.optimizers.Adadelta(lr=1.0, rho=0.95)
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.95, nesterov=True)
model.compile(optimizer = sgd , loss = 'mse', metrics = ['accuracy'])

#Image data generation for the training 
datagen = ImageDataGenerator(
               featurewise_center = False, 
               samplewise_center = False,  # set each sample mean to 0
               featurewise_std_normalization = True,  
               samplewise_std_normalization = False)  

# Preparing training and test sets
X_train, Y_train = loadTrainImageAndLabels()
x_train, x_valid, y_train, y_valid = train_test_split(X_train, Y_train, test_size=0.20, random_state=42)

x_train = x_train.reshape(len(x_train), len(x_train[0]), len(x_train[0][0]),1)
x_train = x_train.astype('float32')
x_train /= 255

x_valid= x_valid.reshape(len(x_valid), len(x_valid[0]), len(x_valid[0][0]),1)
x_valid= x_valid.astype('float32')
x_valid/= 255
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  validation_data=(x_test, y_test),
                  shuffle=True,
                  callbacks=[lr_reducer, early_stopper, csv_logger])
    else:
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(featurewise_center=False,
                                     samplewise_center=False,
                                     featurewise_std_normalization=False,
                                     samplewise_std_normalization=False,
                                     zca_whitening=False,
                                     rotation_range=0,
                                     width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     vertical_flip=False)

        datagen.fit(x_train)

        model.fit_generator(datagen.flow(x_train,
                                         y_train,
                                         batch_size=batch_size),
                            steps_per_epoch=x_train.shape[0] // batch_size,
                            validation_data=(x_test, y_test),
                            epochs=nb_epoch,
                            verbose=1,
                            max_q_size=100,