Esempio n. 1
0
def adversarial_training(synthesis_eyes_dir,
                         mpii_gaze_dir,
                         refiner_model_path=None,
                         discriminator_model_path=None):
    """Adversarial training of refiner network Rθ and discriminator network Dφ."""
    #
    # define model input and output tensors
    #

    synthetic_image_tensor = layers.Input(shape=(img_height, img_width,
                                                 img_channels))
    refined_image_tensor = refiner_network(synthetic_image_tensor)

    refined_or_real_image_tensor = layers.Input(shape=(img_height, img_width,
                                                       img_channels))
    discriminator_output = discriminator_network(refined_or_real_image_tensor)

    #
    # define models
    #

    refiner_model = models.Model(input=synthetic_image_tensor,
                                 output=refined_image_tensor,
                                 name='refiner')
    discriminator_model = models.Model(input=refined_or_real_image_tensor,
                                       output=discriminator_output,
                                       name='discriminator')

    # combined must output the refined image along w/ the disc's classification of it for the refiner's self-reg loss
    refiner_model_output = refiner_model(synthetic_image_tensor)
    combined_output = discriminator_model(refiner_model_output)
    combined_model = models.Model(
        input=synthetic_image_tensor,
        output=[refiner_model_output, combined_output],
        name='combined')

    discriminator_model_output_shape = discriminator_model.output_shape

    print(refiner_model.summary())
    print(discriminator_model.summary())
    print(combined_model.summary())

    #
    # define custom l1 loss function for the refiner
    #

    def self_regularization_loss(y_true, y_pred):
        delta = 0.0001  # FIXME: need to figure out an appropriate value for this
        return tf.multiply(delta, tf.reduce_sum(tf.abs(y_pred - y_true)))

    #
    # define custom local adversarial loss (softmax for each image section) for the discriminator
    # the adversarial loss function is the sum of the cross-entropy losses over the local patches
    #

    def local_adversarial_loss(y_true, y_pred):
        # y_true and y_pred have shape (batch_size, # of local patches, 2), but really we just want to average over
        # the local patches and batch size so we can reshape to (batch_size * # of local patches, 2)
        y_true = tf.reshape(y_true, (-1, 2))
        y_pred = tf.reshape(y_pred, (-1, 2))
        loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_true,
                                                       logits=y_pred)

        return tf.reduce_mean(loss)

    #
    # compile models
    #

    sgd = optimizers.SGD(lr=0.001)

    refiner_model.compile(optimizer=sgd, loss=self_regularization_loss)
    discriminator_model.compile(optimizer=sgd, loss=local_adversarial_loss)
    discriminator_model.trainable = False
    combined_model.compile(
        optimizer=sgd, loss=[self_regularization_loss, local_adversarial_loss])

    #
    # data generators
    #

    datagen = image.ImageDataGenerator(
        preprocessing_function=applications.xception.preprocess_input)

    flow_from_directory_params = {
        'target_size': (img_height, img_width),
        'color_mode': 'grayscale' if img_channels == 1 else 'rgb',
        'class_mode': None,
        'batch_size': batch_size
    }

    synthetic_generator = datagen.flow_from_directory(
        directory=synthesis_eyes_dir, **flow_from_directory_params)

    real_generator = datagen.flow_from_directory(directory=mpii_gaze_dir,
                                                 **flow_from_directory_params)

    def get_image_batch(generator):
        """keras generators may generate an incomplete batch for the last batch"""
        img_batch = generator.next()
        if len(img_batch) != batch_size:
            img_batch = generator.next()

        assert len(img_batch) == batch_size

        return img_batch

    # the target labels for the cross-entropy loss layer are 0 for every yj (real) and 1 for every xi (refined)
    y_real = np.array([[[1.0, 0.0]] * discriminator_model_output_shape[1]] *
                      batch_size)
    y_refined = np.array([[[0.0, 1.0]] * discriminator_model_output_shape[1]] *
                         batch_size)
    assert y_real.shape == (batch_size, discriminator_model_output_shape[1], 2)

    if not refiner_model_path:
        # we first train the Rθ network with just self-regularization loss for 1,000 steps
        print('pre-training the refiner network...')
        gen_loss = np.zeros(shape=len(refiner_model.metrics_names))

        for i in range(1000):
            synthetic_image_batch = get_image_batch(synthetic_generator)
            gen_loss = np.add(
                refiner_model.train_on_batch(synthetic_image_batch,
                                             synthetic_image_batch), gen_loss)

            # log every `log_interval` steps
            if not i % log_interval:
                figure_name = 'refined_image_batch_pre_train_step_{}.png'.format(
                    i)
                print(
                    'Saving batch of refined images during pre-training at step: {}.'
                    .format(i))

                synthetic_image_batch = get_image_batch(synthetic_generator)
                plot_image_batch_w_labels.plot_batch(
                    np.concatenate(
                        (synthetic_image_batch,
                         refiner_model.predict_on_batch(synthetic_image_batch)
                         )),
                    os.path.join(cache_dir, figure_name),
                    label_batch=['Synthetic'] * batch_size +
                    ['Refined'] * batch_size)

                print('Refiner model self regularization loss: {}.'.format(
                    gen_loss / log_interval))
                gen_loss = np.zeros(shape=len(refiner_model.metrics_names))

        refiner_model.save(
            os.path.join(cache_dir, 'refiner_model_pre_trained.h5'))
    else:
        refiner_model.load_weights(refiner_model_path)

    if not discriminator_model_path:
        # and Dφ for 200 steps (one mini-batch for refined images, another for real)
        print('pre-training the discriminator network...')
        disc_loss = np.zeros(shape=len(discriminator_model.metrics_names))

        for _ in range(100):
            real_image_batch = get_image_batch(real_generator)
            disc_loss = np.add(
                discriminator_model.train_on_batch(real_image_batch, y_real),
                disc_loss)

            synthetic_image_batch = get_image_batch(synthetic_generator)
            refined_image_batch = refiner_model.predict_on_batch(
                synthetic_image_batch)
            disc_loss = np.add(
                discriminator_model.train_on_batch(refined_image_batch,
                                                   y_refined), disc_loss)

        discriminator_model.save(
            os.path.join(cache_dir, 'discriminator_model_pre_trained.h5'))

        # hard-coded for now
        print('Discriminator model loss: {}.'.format(disc_loss / (100 * 2)))
    else:
        discriminator_model.load_weights(discriminator_model_path)

    # TODO: what is an appropriate size for the image history buffer?
    image_history_buffer = ImageHistoryBuffer(
        (0, img_height, img_width, img_channels), batch_size * 100, batch_size)

    combined_loss = np.zeros(shape=len(combined_model.metrics_names))
    disc_loss_real = np.zeros(shape=len(discriminator_model.metrics_names))
    disc_loss_refined = np.zeros(shape=len(discriminator_model.metrics_names))

    # see Algorithm 1 in https://arxiv.org/pdf/1612.07828v1.pdf
    for i in range(nb_steps):
        print('Step: {} of {}.'.format(i, nb_steps))

        # train the refiner
        for _ in range(k_g * 2):
            # sample a mini-batch of synthetic images
            synthetic_image_batch = get_image_batch(synthetic_generator)

            # update θ by taking an SGD step on mini-batch loss LR(θ)
            combined_loss = np.add(
                combined_model.train_on_batch(synthetic_image_batch,
                                              [synthetic_image_batch, y_real]),
                combined_loss)

        for _ in range(k_d):
            # sample a mini-batch of synthetic and real images
            synthetic_image_batch = get_image_batch(synthetic_generator)
            real_image_batch = get_image_batch(real_generator)

            # refine the synthetic images w/ the current refiner
            refined_image_batch = refiner_model.predict_on_batch(
                synthetic_image_batch)

            # use a history of refined images
            half_batch_from_image_history = image_history_buffer.get_from_image_history_buffer(
            )
            image_history_buffer.add_to_image_history_buffer(
                refined_image_batch)

            if len(half_batch_from_image_history):
                refined_image_batch[:batch_size //
                                    2] = half_batch_from_image_history

            # update φ by taking an SGD step on mini-batch loss LD(φ)
            disc_loss_real = np.add(
                discriminator_model.train_on_batch(real_image_batch, y_real),
                disc_loss_real)
            disc_loss_refined = np.add(
                discriminator_model.train_on_batch(refined_image_batch,
                                                   y_refined),
                disc_loss_refined)

        # sess.run()

        if not i % log_interval:
            # plot batch of refined images w/ current refiner
            figure_name = 'refined_image_batch_step_{}.png'.format(i)
            print('Saving batch of refined images at adversarial step: {}.'.
                  format(i))

            synthetic_image_batch = get_image_batch(synthetic_generator)
            plot_image_batch_w_labels.plot_batch(
                np.concatenate(
                    (synthetic_image_batch,
                     refiner_model.predict_on_batch(synthetic_image_batch))),
                os.path.join(cache_dir, figure_name),
                label_batch=['Synthetic'] * batch_size +
                ['Refined'] * batch_size)

            # log loss summary
            print('Refiner model loss: {}.'.format(combined_loss /
                                                   (log_interval * k_g * 2)))
            print('Discriminator model loss real: {}.'.format(
                disc_loss_real / (log_interval * k_d * 2)))
            print('Discriminator model loss refined: {}.'.format(
                disc_loss_refined / (log_interval * k_d * 2)))

            combined_loss = np.zeros(shape=len(combined_model.metrics_names))
            disc_loss_real = np.zeros(
                shape=len(discriminator_model.metrics_names))
            disc_loss_refined = np.zeros(
                shape=len(discriminator_model.metrics_names))

            # save model checkpoints
            model_checkpoint_base_name = os.path.join(cache_dir,
                                                      '{}_model_step_{}.h5')
            refiner_model.save(model_checkpoint_base_name.format('refiner', i))
            discriminator_model.save(
                model_checkpoint_base_name.format('discriminator', i))
Esempio n. 2
0
        self.x.append(self.i)
        self.losses.append(logs.get('loss'))
        self.val_losses.append(logs.get('val_loss'))
        self.i += 1

        clear_output(wait=True)
        plt.plot(self.x, self.losses, label="loss")
        plt.plot(self.x, self.val_losses, label="val_loss")
        plt.legend()
        plt.show()


plot_losses = PlotLosses()

datagen = km.ImageDataGenerator(rotation_range=50,
                                width_shift_range=.2,
                                height_shift_range=.2,
                                featurewise_std_normalization=True)
datagen.fit(X_train)

model.compile(loss='categorical_crossentropy',
              metrics=['accuracy'],
              optimizer='adam')

h = model.fit_generator(
    datagen.flow(X_train, y_train, batch_size=32),
    epochs=25,
    verbose=1,
    validation_data=(X_val, y_val),
    shuffle=False,
    # callbacks=[
    #    ModelCheckpoint(filepath=path_model),
opt = Nadam(lr=0.001,
            beta_1=0.9,
            beta_2=0.999,
            epsilon=1e-08,
            schedule_decay=0.006)
model.compile(optimizer=opt,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

mnist = MNISTImages()

train_generator = image.ImageDataGenerator(rotation_range=20,
                                           width_shift_range=0.1,
                                           height_shift_range=0.1,
                                           shear_range=0.05,
                                           zoom_range=0.1,
                                           dim_ordering='tf').flow(
                                               mnist.train_input,
                                               mnist.train_labels,
                                               batch_size=512)

test_generator = image.ImageDataGenerator().flow(mnist.test_input,
                                                 mnist.test_labels)

for i in range(3, 7):
    model.optimizer.lr = 0.1**i

    result = model.fit_generator(train_generator,
                                 samples_per_epoch=len(mnist.train_labels),
                                 nb_epoch=8,
                                 validation_data=test_generator,
Esempio n. 4
0
y_test = onehot(y_test)

y_train[:5]

mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)

def norm_input(x): 
    return (x - mean_px) / std_px

def GetSteps(batch):
    return batch.n / batch.batch_size

#Data augmentation

gen = image.ImageDataGenerator(rotation_range=8., width_shift_range=0.08, shear_range=0.3, height_shift_range=0.08, zoom_range=0.08, data_format='channels_first')
batches = gen.flow(X_train, y_train, batch_size=64)
test_batches = gen.flow(X_test, y_test, batch_size=64)


#Batchnorm + data augmentation
def get_model_bn():
    model = Sequential(
        [
            Lambda(norm_input, input_shape=(1, 28, 28), output_shape=(1, 28, 28)),
            Convolution2D(32,(3,3), activation='relu'),
            BatchNormalization(axis=1),
            Convolution2D(32,(3,3), activation='relu'),
            MaxPooling2D(),
            BatchNormalization(axis=1),
            Convolution2D(64,(3,3), activation='relu'),
Esempio n. 5
0
batches_per_epoch = 150
batch_size = 16
gamma = .5  #between 0 and 1

#image parameters
img_size = 32  #Size of square image
channels = 3  #1 for grayscale

#Model parameters
z = 100  #Generator input
h = 128  #Autoencoder hidden representation
adam = Adam(lr=0.00005)  #lr: between 0.0001 and 0.00005
#In the paper, Adam's learning rate decays if M stalls. This is not implemented.

#Build models
generator = models.decoder(z, img_size, channels)
discriminator = models.autoencoder(h, img_size, channels)
gan = models.gan(generator, discriminator)

generator.compile(loss=models.l1Loss, optimizer=adam)
discriminator.compile(loss=models.l1Loss, optimizer=adam)
gan.compile(loss=models.l1Loss, optimizer=adam)

#Load data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
dataGenerator = image.ImageDataGenerator(
    preprocessing_function=utils.dataRescale)
batchIterator = dataGenerator.flow(X_train, batch_size=batch_size)

trainer = train.GANTrainer(generator, discriminator, gan, batchIterator)
trainer.train(epochs, batches_per_epoch, batch_size, gamma)
Esempio n. 6
0
def resnet_classifier(in_shape=(P, P, 3)):
    model = resnet50.ResNet50(weights='imagenet',
                              include_top=False,
                              input_shape=in_shape)
    flatten = Flatten()
    new_layer2 = Dense(2, activation='softmax', name='my_dense_2')
    inp2 = model.input
    out2 = new_layer2(flatten(model.output))
    model = Model(inp2, out2)
    # model.summary(line_length=150)
    return model


aug = image.ImageDataGenerator(rotation_range=50,
                               zoom_range=0.7,
                               horizontal_flip=True)

BS = 2
EPOCHS = 50
model = eff_classifier()
lr = 1e-6
b1 = 0.8

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(learning_rate=lr, beta_1=b1),
              metrics=[
                  'accuracy',
                  tf.keras.metrics.AUC(),
                  tf.keras.metrics.Precision(),
                  tf.keras.metrics.Recall()
Esempio n. 7
0
import matplotlib.pyplot as plt
from keras.preprocessing import image
import os
import glob
from DeleteAllFileFromDirs import *
import numpy as np
from ImageGenerator_config import *
import uuid
path = config_fromPath

from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img

datagen_rotation = ImageDataGenerator(rotation_range=360, fill_mode='constant')
datagen_shift = image.ImageDataGenerator(width_shift_range=10,
                                         height_shift_range=10,
                                         fill_mode='constant')
datagen_zoom_out = image.ImageDataGenerator(zoom_range=[1, 1.5],
                                            fill_mode='constant')
datagen_zoom_in = image.ImageDataGenerator(zoom_range=[0.5, 1],
                                           fill_mode='constant')


def generator(img, new_path, basename):
    new_path = config_toPath
    x = img_to_array(img)  # this is a Numpy array with shape (3, 150, 150)
    x = x.reshape(
        (1, ) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)

    gen_data_rotation = datagen_rotation.flow(x,
                                              batch_size=1,
                                              save_to_dir=new_path + "\\" +
Esempio n. 8
0
def compute_preds(model,
                  num_classes,
                  train_dir="data/model_train",
                  test_dir="data/model_valid",
                  test_csv="data/model_valid.csv"):

    batch_size = 16  # used for training as well as validation
    max_preds = 5  # number of ranked predictions (default 5)

    if model.name == 'InceptionV3' or model.name == 'Xception' or model.name == 'InceptionResNetV2':
        target_size = (299, 299)
    elif model.name == 'ResNet50' or model.name == 'MobileNet':
        target_size = (224, 224)
    else:
        print("invalid model: ", model.name)
    print("training model", model.name)
    '''    
    num_train_imgs, num_valid_imgs = ut.create_small_case(
        sel_whales = np.arange(1,num_classes+1),  # whales to be considered
        all_train_dir = all_train_dir,
        all_train_csv = all_train_csv,
        train_dir = test_dir,
        train_csv = test_csv,
        valid_dir = None,     # no validation, copy all data into test_dir "data/model_test"
        valid_csv = None,
        train_valid = 1.,
        sub_dirs = True) 
    '''
    test_gen = image.ImageDataGenerator(rescale=1. / 255, fill_mode="nearest")

    test_flow = test_gen.flow_from_directory(
        test_dir,
        shuffle=False,
        batch_size=batch_size,
        target_size=target_size,
        class_mode=None)  # use "categorical" ??

    preds = model.predict_generator(test_flow, verbose=1)

    # whale_class_map = (test_flow.class_indices)           # get dict mapping whalenames --> class_no
    class_whale_map = ut.make_label_dict(
        directory=train_dir)  # get dict mapping class_no --> whalenames
    '''
    print("whale_class_map:")
    print(whale_class_map)
    print("class_whale_map:")
    print(class_whale_map)
    print("preds.shape:")
    print(preds.shape)
    print("preds[:10]")
    print(preds[:10])
    '''
    # get list of model predictions: one ordered list of maxpred whalenames per image
    top_k = preds.argsort()[:, -max_preds:][:, ::-1]
    model_preds = [([class_whale_map[i] for i in line]) for line in top_k]

    # get list of true labels: one whalename per image
    true_labels = []
    file_names = []
    if test_csv != '':
        test_list = ut.read_csv(
            file_name=test_csv)  # list with (filename, whalename)
    i = 0
    for fn in test_flow.filenames:
        if i < 3:
            print("fn", fn)
        i = i + 1
        offset, directory, filename = fn.split('/')
        file_names.append(filename)
        if test_csv != '':
            whale = [line[1] for line in test_list if line[0] == filename][0]
            true_labels.append(whale)

    return file_names, model_preds, true_labels
Esempio n. 9
0
from keras.utils import to_categorical
import sys
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from keras.preprocessing import image

datagen = image.ImageDataGenerator(
    featurewise_center=False,
    samplewise_center=False,
    featurewise_std_normalization=False,
    samplewise_std_normalization=False,
    zca_whitening=False,
    rotation_range=0,
    width_shift_range=0,
    height_shift_range=0,
    shear_range=0.,
    zoom_range=0,
    channel_shift_range=0.,
    fill_mode='nearest',
    cval=0.0,
    horizontal_flip=False,
    vertical_flip=False,
    rescale=1. / 255,
    preprocessing_function=None,
    data_format=K.image_data_format(),
)

train_generator = datagen.flow_from_directory(
    '/home/lian19931201/datasets/num_ocr',  # this is the target directory
    target_size=(48, 48),  # all images will be resized
    batch_size=128,
    class_mode='categorical',
Esempio n. 10
0
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing import image as image_utils
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import preprocess_input
import numpy as np

datagen = image_utils.ImageDataGenerator(rescale=1. / 255,
                                         validation_split=0.2)

generator = datagen.flow_from_directory('fisheye_bmp',
                                        save_to_dir='transformed_pngs',
                                        target_size=(224, 224),
                                        batch_size=10)

#def cnnModel():
model = Sequential()
model.add(
    Conv2D(62, (5, 5),
           strides=(2, 2),
           input_shape=(224, 224, 3),
           activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (5, 5), strides=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(4, activation='softmax'))
#model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(64, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss=keras.losses.binary_crossentropy, optimizer='adam', metrics=['accuracy'])

model.summary()

# Train from scracth 

train_datagen = image.ImageDataGenerator(
    rescale = 1./255,
    shear_range = 0.2,
    zoom_range = 0.2,
    horizontal_flip = True
)

test_datagen = image.ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    'CovidDataset/Train',
    target_size = (224, 224),
    batch_size = 32, 
    class_mode = 'binary'
)

train_generator.class_indices

validation_generator = test_datagen.flow_from_directory(
def dataAugmente(dogOrCat, image, label, image_W, image_H, batch_size,
                 capacity):
    '''
    Args:
        image: list type
        label: list type
        image_W: image width
        image_H: image height
        batch_size: batch size
        capacity: the maximum elements in queue
    Returns:
        image_batch: 4D tensor [batch_size, width, height, 3], dtype=tf.float32
        label_batch: 1D tensor [batch_size], dtype=tf.int32
    '''

    image = tf.cast(image, tf.string)
    label = tf.cast(label, tf.int32)

    # make an input queue
    input_queue = tf.train.slice_input_producer([image, label], num_epochs=1)
    label = input_queue[1]
    image_contents = tf.read_file(
        input_queue[0])  #read_fine need 1 dimention string
    image = tf.image.decode_jpeg(image_contents, channels=3)

    #     image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
    image = tf.image.resize_images(image, [image_W, image_H])

    image_batch, label_batch = tf.train.batch([image, label],
                                              batch_size=batch_size,
                                              num_threads=64,
                                              capacity=capacity)

    datagen = im.ImageDataGenerator(featurewise_center=True,
                                    featurewise_std_normalization=True,
                                    rotation_range=20,
                                    width_shift_range=0.15,
                                    height_shift_range=0.15,
                                    horizontal_flip=True,
                                    fill_mode='nearest')
    with tf.Session() as sess:
        # Required to get the filename matching to run.

        # Coordinate the loading of image files.
        #     sess.run(tf.initialize_all_variables())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        fileNum = 0
        try:
            while 1:
                if coord.should_stop():
                    break

                imageBatch, labelBatch = sess.run([image_batch, label_batch])
                datagen.fit(imageBatch)
                data = datagen.flow(imageBatch,
                                    batch_size=batch_size,
                                    save_to_dir=save_dir,
                                    save_prefix=dogOrCat + '.gen',
                                    save_format='jpeg')
                for i in range(
                        1
                ):  #generate pics, range(num) 1 pic generae num of times
                    data.next()

                fileNum = fileNum + 1
        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            print(fileNum)
            coord.request_stop()

        coord.join(threads)
        sess.close()

    return
DATA_FOLDER = "CalTech101"
TRAIN_PATH = os.path.join(DATA_FOLDER, "training")  # Path for training data
VALID_PATH = os.path.join(DATA_FOLDER,
                          "validation")  # Path for validation data
NUMBER_OF_CLASSES = len(
    os.listdir(TRAIN_PATH))  # Number of classes of the dataset
EPOCHS = 50
RESULTS_PATH = os.path.join(
    "AutoConv_DenseNet121_new1",
    "Eff_AutoConv_DenseNet121_log_" + DATA_FOLDER.split('/')[-1] +
    "_autoconv_bayes_opt_v1.csv")  # The path to the results file

# Creating generators from training and validation data
batch_size = 8  # the mini-batch size to use for the dataset
datagen = image.ImageDataGenerator(
    preprocessing_function=keras.applications.densenet.preprocess_input
)  # creating an instance of the data generator
train_generator = datagen.flow_from_directory(
    TRAIN_PATH, target_size=(224, 224),
    batch_size=batch_size)  # creating the generator for training data
valid_generator = datagen.flow_from_directory(
    VALID_PATH, target_size=(224, 224),
    batch_size=batch_size)  # creating the generator for validation data

# creating callbacks for the model
reduce_LR = callbacks.ReduceLROnPlateau(monitor='val_acc',
                                        factor=np.sqrt(0.01),
                                        cooldown=0,
                                        patience=5,
                                        min_lr=0.5e-10)
Esempio n. 14
0
# Compile the model
sgd = optimizers.SGD(lr=1e-3)

refiner_model.compile(optimizer=sgd, loss=self_regularization_loss)
discriminator_model.compile(optimizer=sgd, loss=local_adversarial_loss)
discriminator_model.trainable = False
combined_model.compile(optimizer=sgd,
                       loss=[self_regularization_loss, local_adversarial_loss])

refiner_model_path = None
discriminator_model_path = None

# Set up the pipeline to feed new images to both modelsqui
datagen = image.ImageDataGenerator(
    preprocessing_function=applications.xception.preprocess_input,
    dim_ordering='tf')

flow_from_directory_params = {
    'target_size': (img_height, img_width),
    'color_mode': 'grayscale' if img_channels == 1 else 'rgb',
    'class_mode': None,
    'batch_size': batch_size
}
flow_params = {'batch_size': batch_size}

synthetic_generator = datagen.flow(X=syn_image_stack, seed=1, **flow_params)

real_generator = datagen.flow(X=real_image_stack, seed=1, **flow_params)

Esempio n. 15
0
import tensorflow as tf
import numpy as np
from keras import layers
from keras import models
from keras.preprocessing import image
from pathlib import Path
import csv
from PIL import Image
from keras.utils import to_categorical
path = 'C:/Users/binho/Desktop/Pythono/Interface/datasets/geometricas/treino'
path = Path(path)
labels = 'inferred'
itenes = list(path.rglob('*.*'))
features = len(itenes)
input_shape = (128, 128, 3)
generator = image.ImageDataGenerator(samplewise_std_normalization=True,
                                     fill_mode='constant')
batch_size = 20
input_size = (128, 128)
timesteps = int(features / batch_size)
for x in range(0, len(itenes)):
    itenes[x] = Image.open(itenes[x])
    itenes[x] = itenes[x].resize(size=input_size)
    itenes[x] = np.asarray(itenes[x])
itenes = np.asarray(itenes)
y = itenes.shape
if (len(y) == 3):
    itenes = np.reshape(itenes, (y[0], y[1], y[2], 1))
generator.fit(itenes)
traingen = generator.flow_from_directory(path,
                                         target_size=input_size,
                                         batch_size=batch_size,
Esempio n. 16
0
model.add(Dense(512, kernel_regularizer=regularizers.l2(1e-4)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# the final classification is done via fully connected layers
model.add(Dense(3))
model.add(Activation('softmax'))
model.summary()
# categorical crossentropy is used since there are 3 classes
model.compile(loss='categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])

# image preprocessing and augmentation
train_datagen = image.ImageDataGenerator(
    rescale=1. / 256,
    shear_range=0.1,  #0.2
    zoom_range=0.1,  #0.2
    horizontal_flip=True,
)
test_dataset = image.ImageDataGenerator(rescale=1. / 256)

#Take the path to a directory
train_generator = train_datagen.flow_from_directory(
    '/content/drive/MyDrive/dataset-xgboost/training',
    target_size=(256, 256),
    color_mode='rgb',
    batch_size=32,
    class_mode='categorical')

train_generator.class_indices

# take validation path to a directory
def generator(dir, gen=image.ImageDataGenerator(rescale=1./255), shuffle=True,batch_size=1,target_size=(24,24),class_mode='categorical' ):

    return gen.flow_from_directory(dir,batch_size=batch_size,shuffle=shuffle,color_mode='grayscale',class_mode=class_mode,target_size=target_size)
Esempio n. 18
0
def predict(model_name, weights_filename):
    """
    Predicts the test images and shows how many correct and incorrect predictions were made for Top 1 and Top 5 classes.

    @param model_name: String containing the name of the model to use for prediction.
    @param weights_filename: String containing the filename of the weights file to load for the corresponding model.

    @return: PredictionResults object if successful, else None.
    """
    img_rows = 224
    img_cols = 224
    channel = 3
    batch_size = 64
    data_path = "../data/compcars/data/image/"
    model_path = "../models/"
    imagenet_model_path = "../imagenet_models/"

    # Get images
    batches = utils.get_batches(
        data_path + 'train',
        gen=image.ImageDataGenerator(
            preprocessing_function=utils.vgg_preprocess),
        batch_size=batch_size,
        shuffle=False,
        class_mode=None)
    test_batches = utils.get_batches(
        data_path + 'test',
        gen=image.ImageDataGenerator(
            preprocessing_function=utils.vgg_preprocess),
        shuffle=False,
        batch_size=batch_size,
        class_mode=None)

    # Create model and load weights
    print "Using %s model" % model_name
    if model_name == "vgg19":
        model = vgg19_model(img_rows, img_cols, channel, batches.nb_class,
                            imagenet_model_path)
    elif model_name == "inception_v1":
        model = googlenet_model(img_rows, img_cols, channel, batches.nb_class,
                                imagenet_model_path)
    else:
        return None
    model.load_weights(model_path + weights_filename)

    # Predict
    probs = model.predict_generator(test_batches, test_batches.nb_sample)
    labels = test_batches.classes
    filenames = test_batches.filenames

    # Get a list of all the class labels
    classes_ids = {v: k for k, v in batches.class_indices.iteritems()}

    # Process the results for Top 1
    print "First filename is "
    print filenames[0]
    print "Probs for it are"
    print probs[0]
    labels_predicted = [np.argmax(prob) for prob in probs]
    classes = [classes_ids[idx] for idx in labels_predicted]
    correct, incorrect = utils.count_correct_compcars(filenames, classes)

    # Process the results for Top 5
    top_5_labels_pred = [np.argpartition(prob, -5)[-5:] for prob in probs]
    classes_top_5 = []
    for i in range(len(top_5_labels_pred)):
        classes_temp = [classes_ids[idx] for idx in top_5_labels_pred[i]]
        classes_top_5.append(classes_temp)
    correct_top_5, incorrect_top_5 = utils.count_correct_compcars_top_k(
        filenames, classes_top_5)

    print "Top 1: Correct %d, Incorrect %d" % (correct, incorrect)
    print "Top 5: Correct %d, Incorrect %d" % (correct_top_5, incorrect_top_5)

    results = PredictionResults()
    results.probabilities = probs
    results.filenames = filenames
    results.classes_ids = classes_ids

    return results
    """
model1 = keras.models.load_model(MODEL_PATH)

#base_model = InceptionResNetV2(weights='imagenet', include_top=False)
#base_model_full = InceptionResNetV2(weights='imagenet', include_top=True)
#base_model.summary()
#base_model.get_config()
#base_model.save("/home/calvin/Documents/NNDL/Models/model0.h5")

##preprocessing
#create image data generator, specify preprocessing
train_datagen = image.ImageDataGenerator(
        width_shift_range=0.3,
        height_shift_range=0.3,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        vertical_flip=True,
        rotation_range=90,
        preprocessing_function=preprocess_input)

#generate batches of data for training
train_data_flow = train_datagen.flow_from_directory(
        directory = TRAIN_DIR,
        target_size = (IMG_H, IMG_W),
        class_mode = "categorical",
        batch_size = BATCH_SIZE)

MINI_BATCHES = int(np.floor(train_data_flow.n/BATCH_SIZE)) + 1
#MINI_BATCHES = 5
Esempio n. 20
0
from __future__ import absolute_import
from __future__ import print_function

import os
from os.path import isfile

from keras.preprocessing import image as im

training = 2000
testing = 500

datagen = im.ImageDataGenerator(rotation_range=70,
                                width_shift_range=0.2,
                                height_shift_range=0.2,
                                rescale=1. / 255,
                                shear_range=0.2,
                                zoom_range=0.2,
                                horizontal_flip=False,
                                fill_mode='nearest')

for f in os.listdir("training-template"):
    if not isfile("training-template/" + f):
        if not os.path.exists("training-images1/" + f):
            os.makedirs("training-images1/" + f)
        for g in os.listdir("training-template/" + f):
            if g.endswith("png"):
                i = 0
                trip = False
                img = im.load_img("training-template/" + f + "/" +
                                  g)  # this is a PIL image
                x = im.img_to_array(
Esempio n. 21
0
 def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):
     return gen.flow_from_directory(path, target_size=(224,224),
             class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
Esempio n. 22
0
    # Image sizes - Requirements of the CNN model
    img_reshape_size = (224, 224)

    # Working directories
    dataset_dir_train = os.path.join(
        '../Human-Action-Recognition-with-Keras/imgs/', 'train')

    # Load train dataset

    # Image Data Generator for preprocessing and data augmentation
    if (data_augmentation == True):
        train_datagen = image.ImageDataGenerator(
            preprocessing_function=preprocess_input_image,
            width_shift_range=0.1,
            height_shift_range=0.1,
            shear_range=0.1,
            rotation_range=8,
            fill_mode='nearest')
    else:
        train_datagen = image.ImageDataGenerator()

    train_generator = train_datagen.flow_from_directory(
        dataset_dir_train,
        target_size=img_reshape_size,
        batch_size=batch_size,
        class_mode='categorical')

    # Create and compile model
    model = create_VGG16_model(n_layers_train=n_layers_train,
                               learning_rate=learning_rate)
if K.image_data_format() == 'channels_first':
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
    input_shape = (1, img_rows, img_cols)
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Training data iterator.
train_gen = image.ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True,
                                     horizontal_flip=True, width_shift_range=0.2, height_shift_range=0.2)
train_gen.fit(x_train)
train_iter = train_gen.flow(x_train, y_train, batch_size=args.batch_size)

# Validation data iterator.
test_gen = image.ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True)
test_gen.mean = train_gen.mean
test_gen.std = train_gen.std
test_iter = test_gen.flow(x_test, y_test, batch_size=args.val_batch_size)

# Restore from a previous checkpoint, if initial_epoch is specified.
if resume_from_epoch > 0:
    model = keras.models.load_model(args.checkpoint_format.format(epoch=resume_from_epoch))
else:
    # Set up standard WideResNet-16-10 model.
    model = WideResidualNetwork(depth=16, width=10, weights=None, input_shape=input_shape,
Esempio n. 24
0
def generator_x_y(img_paths,labeltypes="density",isTrain=True):
    datagen = image.ImageDataGenerator(fill_mode='wrap', horizontal_flip=True, vertical_flip=True,channel_shift_range=10)
    while True:

        for i in img_paths:
            x, y = [], []
            if isTrain:
                h5_path = str(i).replace('.jpg', '.h5').replace('images',
                                                                            'train_data\\maps_fixed_kernel')
            else:
                h5_path = str(i).replace('.jpg', '.h5').replace('images',
                                                                            'test_data\\maps_fixed_kernel')
            consist = glob.glob(h5_path)
            if len(consist) <= 0:
                continue
            x_ = load_img(i)
            x.append(np.expand_dims(x_, axis=0))
            y_ = img_from_h5(h5_path, labeltypes=labeltypes)
            y.append(np.expand_dims(y_, axis=-1))
            y.append(np.expand_dims(np.expand_dims(y_, axis=0), axis=-1))
            yield np.array(x), np.array(y)
        # 垂直翻转
        for i in img_paths:
            x, y = [], []
            if isTrain:
                h5_path = str(i).replace('.jpg', '.h5').replace('images',
                                                                            'train_data\\maps_fixed_kernel')
            else:
                h5_path = str(i).replace('.jpg', '.h5').replace('images',
                                                                            'test_data\\maps_fixed_kernel')
            consist = glob.glob(h5_path)
            if len(consist) <= 0:
                continue
            x_ = load_img(i)
            x_ = datagen.apply_transform(x_, {"flip_vertical": True})
            x.append(x_)
            y_ = img_from_h5(h5_path, labeltypes=labeltypes)
            y_ = datagen.apply_transform(y_, {"flip_vertical": True})
            y.append(np.expand_dims(y_, axis=-1))

            yield np.array(x), np.array(y)

        # 水平翻转
        for i in img_paths:
            x, y = [], []
            if isTrain:
                h5_path = str(i).replace('.jpg', '.h5').replace('images',
                                                                'train_data\\maps_fixed_kernel')
            else:
                h5_path = str(i).replace('.jpg', '.h5').replace('images',
                                                                'test_data\\maps_fixed_kernel')
            consist = glob.glob(h5_path)
            if len(consist) <= 0:
                continue
            x_ = load_img(i)
            x_ = datagen.apply_transform(x_, {"flip_horizontal": True})
            x.append(x_)
            y_ = img_from_h5(h5_path, labeltypes=labeltypes)
            y_ = datagen.apply_transform(y_, {"flip_horizontal": True})
            y.append(np.expand_dims(y_, axis=-1))

            yield np.array(x), np.array(y)
        X_test_parts = X_test[prev_i:]
        done = True
    else:
        X_test_parts = X_test[prev_i:i]


    # conver RGB image to 4D tensor with shape (1, 244, 244, 3)
    test_tensors = paths_to_tensor(img_folder, 'test_photos', X_test_parts).astype('float32')/255
    print("\nTest tenors size: {}".format(test_tensors.shape))

    # perform image augmentations
    print("perform image augmentations for test data")
    datagen_test = image.ImageDataGenerator(
                rotation_range=20,
                width_shift_range=0.2,
                height_shift_range=0.2,
                shear_range=0.05,
                zoom_range=0.2,
                horizontal_flip=True,
                fill_mode='nearest')

    datagen_test.fit(test_tensors)


    # extract bottleneck features
    test_features = extract_Resnet50(test_tensors * 255)

    print("Test features shape: {}".format(test_features.shape))

    np.save(bottleneck_path + 'Resnet50_test_{}'.format(num_files), test_features)

    print("Successfully saved test bottleneck features for Resnet50_test_{}".format(num_files)) 
Esempio n. 26
0
from __future__ import absolute_import
from __future__ import print_function

import os
from os.path import isfile

from keras.preprocessing import image as im

training = 5000  # rough estimate, will generate a number nondeterministically lower than 5000 of images

datagen = im.ImageDataGenerator(rotation_range=70,
                                rescale=1. / 255,
                                shear_range=0.2,
                                zoom_range=0.2,
                                horizontal_flip=False,
                                featurewise_center=True,
                                featurewise_std_normalization=True,
                                fill_mode='nearest')


def createShapes(g, name):

    os.makedirs("training-images1/" + name)
    i = 0
    trip = False
    img = im.load_img("training-template/" + g)  # this is a PIL image
    x = im.img_to_array(img)  # this is a Numpy array with shape (3, 150, 150)
    x = x.reshape(
        (1, ) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)
    datagen.fit(x)
    for batch in datagen.flow(x,
Esempio n. 27
0
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.imagenet_utils import decode_predictions
from keras.layers import Dense, Activation, Flatten
from keras.layers import merge, Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
import tensorflow as tf

with tf.device('/gpu:0'):

    train_dir = "dataset/train_bin"
    test_dir = "dataset/test_bin"

    datagen = image.ImageDataGenerator(rescale=1. / 255)
    batch_size = 32

    num_classes = 2

    train_generator = datagen.flow_from_directory(train_dir,
                                                  target_size=(224, 224),
                                                  batch_size=batch_size,
                                                  class_mode='binary',
                                                  shuffle=True)

    test_generator = datagen.flow_from_directory(test_dir,
                                                 target_size=(224, 224),
                                                 batch_size=batch_size,
                                                 class_mode='binary',
                                                 shuffle=False)
Esempio n. 28
0

val_classes = val_batches.classes
trn_classes = batches.classes
val_labels = onehot(val_classes)
trn_labels = onehot(trn_classes)

# Fine-tuning

model.pop()
for layer in model.layers:
    layer.trainable = False

model.add(Dense(121, activation='softmax'))

gen = image.ImageDataGenerator()
batches = gen.flow(trn_data, trn_labels, batch_size=64, shuffle=True)
val_batches = gen.flow(val_data, val_labels, batch_size=64, shuffle=False)


def fit_model(model, batches, val_batches, nb_epoch=1):
    model.fit_generator(batches,
                        samples_per_epoch=batches.N,
                        nb_epoch=nb_epoch,
                        validation_data=val_batches,
                        nb_val_samples=val_batches.N)


opt = RMSprop(lr=0.1)
model.compile(optimizer=opt,
              loss='categorical_crossentropy',
Esempio n. 29
0
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
                target_size=(224,224)):
    return gen.flow_from_directory(dirname, target_size=target_size,
            class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
Esempio n. 30
0
        resume_from_epoch = try_epoch
        break

# Horovod: broadcast resume_from_epoch from rank 0 (which will have
# checkpoints) to other ranks.
resume_from_epoch = hvd.broadcast(resume_from_epoch,
                                  0,
                                  name='resume_from_epoch')

# Horovod: print logs on the first worker.
verbose = 1 if hvd.rank() == 0 else 0

# Training data iterator.
train_gen = image.ImageDataGenerator(
    width_shift_range=0.33,
    height_shift_range=0.33,
    zoom_range=0.5,
    horizontal_flip=True,
    preprocessing_function=keras.applications.resnet50.preprocess_input)
train_iter = train_gen.flow_from_directory(args.train_dir,
                                           batch_size=args.batch_size,
                                           target_size=(224, 224))

# Validation data iterator.
test_gen = image.ImageDataGenerator(
    zoom_range=(0.875, 0.875),
    preprocessing_function=keras.applications.resnet50.preprocess_input)
test_iter = test_gen.flow_from_directory(args.val_dir,
                                         batch_size=args.val_batch_size,
                                         target_size=(224, 224))

# Set up standard ResNet-50 model.