Exemple #1
0
def fit(model, data):
    datagen = ImageDataGenerator(featurewise_center=False,
                                 samplewise_center=False,
                                 featurewise_std_normalization=False,
                                 samplewise_std_normalization=False,
                                 zca_whitening=False,
                                 rotation_range=10,
                                 zoom_range=0.1,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1,
                                 horizontal_flip=False,
                                 vertical_flip=False)

    learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                patience=3,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)

    datagen.fit(data[0])
    history = model.fit_generator(datagen.flow(data[0],
                                               data[2],
                                               batch_size=batch_size),
                                  epochs=epochs,
                                  validation_data=(data[1], data[3]),
                                  verbose=2,
                                  steps_per_epoch=data[0].shape[0] //
                                  batch_size,
                                  callbacks=[learning_rate_reduction])

    model.save(model_dir + name + "_" + str(epochs) + "_" + str(batch_size) +
               ".h5")

    return history
def imageAugmentation(directory,
                      export_directory=None,
                      prefix="aug",
                      extension="jpg",
                      logger=None):
    aug = ImageDataGenerator(rotation_range=10,
                             zoom_range=0.15,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.15,
                             horizontal_flip=True,
                             fill_mode="nearest")

    dir_path = os.path.abspath(directory)
    if export_directory is None:
        export_directory = dir_path + "\\augmented"
        try:
            os.mkdir(export_directory)
            print("Directory", export_directory, "Created ")
        except FileExistsError:
            print("Directory", export_directory, "already exists")
    extension = extension.lower()

    try:
        file_names = os.listdir(dir_path)
        counter = 1
        for file_name in file_names:
            if not file_name.lower().endswith(extension):
                continue
            fp = FilePath(dir_path + "\\" + file_name)
            try:
                image = np.expand_dims(load_img(fp.getAbsPath()), axis=0)
            except Exception:
                continue
            print("Processing Image " + str(counter) + ": " + file_name)
            if logger is not None:
                logger.info("Processing Image " + str(counter) + ": " +
                            file_name)
            aug.fit(image)
            for x, val in zip(
                    aug.flow(image,
                             save_to_dir=export_directory,
                             save_format=extension,
                             save_prefix=prefix), range(10)):
                pass
                # if logger is not None:
                #     logger.info("...... saving augmented image " + str(val + 1) + " ......")
            counter += 1

        print("Done")
        if logger is not None:
            logger.info("Done")

    except Exception as e:
        if logger is not None:
            logger.info(e)
        print(e)
    def setUpClass(cls):

        (x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]
        cls.mnist = (x_train, y_train), (x_test, y_test), (min_, max_)

        # Create simple keras model
        import tensorflow as tf

        tf_version = [int(v) for v in tf.__version__.split(".")]
        if tf_version[0] == 2 and tf_version[1] >= 3:
            tf.compat.v1.disable_eager_execution()
            from tensorflow.keras import backend as k
            from tensorflow.keras.models import Sequential
            from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
        else:
            import keras.backend as k
            from keras.models import Sequential
            from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D

        k.set_learning_phase(1)
        model = Sequential()
        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation="relu",
                   input_shape=x_train.shape[1:]))
        model.add(MaxPooling2D(pool_size=(3, 3)))
        model.add(Flatten())
        model.add(Dense(10, activation="softmax"))

        model.compile(loss="categorical_crossentropy",
                      optimizer="adam",
                      metrics=["accuracy"])

        from art.estimators.classification.keras import KerasClassifier

        cls.classifier = KerasClassifier(model=model, clip_values=(min_, max_))

        cls.classifier.fit(x_train, y_train, nb_epochs=1, batch_size=128)

        cls.defence = ActivationDefence(cls.classifier, x_train, y_train)

        datagen = ImageDataGenerator()
        datagen.fit(x_train)

        data_gen = KerasDataGenerator(datagen.flow(x_train,
                                                   y_train,
                                                   batch_size=NB_TRAIN),
                                      size=NB_TRAIN,
                                      batch_size=NB_TRAIN)

        cls.defence_gen = ActivationDefence(cls.classifier,
                                            None,
                                            None,
                                            generator=data_gen)
    def train(self):
        # training parameters
        batch_size = 128
        maxepoches = 50
        learning_rate = 0.1
        lr_decay = 1e-6
        lr_drop = 20
        # The data, shuffled and split between train and test sets:
        trainX_norm, testX_norm = self.normalize(self.trainX, self.testX)

        def lr_scheduler(epoch):
            return learning_rate * (0.5**(epoch // lr_drop))

        reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)

        # data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            15,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(trainX_norm)

        # optimization details
        sgd = optimizers.SGD(lr=learning_rate,
                             decay=lr_decay,
                             momentum=0.9,
                             nesterov=True)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])

        # training process in a for loop with learning rate drop every 25 epoches.

        history = self.model.fit_generator(
            datagen.flow(trainX_norm, self.trainY, batch_size=batch_size),
            steps_per_epoch=trainX_norm.shape[0] // batch_size,
            epochs=maxepoches,
            validation_data=(testX_norm, self.testY),
            callbacks=[reduce_lr],
            verbose=2)
        self.model.save_weights('weights/vgg16_cifar10.h5')
        return history
Exemple #5
0
def data_augmentation(x_train_in, x_train_out, augment_size):
    def histogram_equalization(x):
        if np.random.random() < 0.5:
            x = exposure.equalize_hist(x)

    def adaptive_equalization(x):
        if np.random.random() < 0.5:
            x = exposure.equalize_adapthist(x, clip_limit=0.01)

    def contrast_stretching(x):
        if np.random.random() < 0.5:
            p2, p98 = np.percentile(x, (2, 98))
            x = exposure.rescale_intensity(x, in_range=(p2, p98))

    def to_lab(x):
        if np.random.random() < 0.5:
            x = exposure.rgb2lab(x, illuminant='D65', observer='2')

    x_train_in = np.reshape(
        x_train_in, (x_train_in.shape[0], 120, 120, 1)).astype('float32') / 255
    x_train_out = np.reshape(
        x_train_out,
        (x_train_out.shape[0], 120, 120, 1)).astype('float32') / 255

    image_generator = ImageDataGenerator(
        #rescale=1.0/255.0,
        rotation_range=10,
        #shear_range=0.8,
        featurewise_center=False,
        samplewise_std_normalization=False,
        zoom_range=0.05,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True,
        vertical_flip=True,
        #preprocessing_function=histogram_equalization
    )

    # fit data for zca whitening
    image_generator.fit(x_train_in, augment=True)
    # get transformed images
    randidx = np.random.randint(x_train_in.shape[0], size=augment_size)
    x_augmented = x_train_in[randidx].copy()
    y_augmented = x_train_out[randidx].copy()
    x_augmented = image_generator.flow(x_augmented,
                                       np.zeros(augment_size),
                                       batch_size=augment_size,
                                       shuffle=False).next()[0]
    # append augmented data to trainset
    x_train = np.concatenate((x_train_in, x_augmented))
    y_train = np.concatenate((x_train_out, y_augmented))

    return x_train, y_train
Exemple #6
0
def _train_seed_amateur(model,
                        loaders,
                        log=False,
                        checkpoint=False,
                        logfile='',
                        checkpointFile=''):
    (x_train, y_train), (x_test, y_test) = loaders

    lr_decay = CustomLearningRateScheduler(initial_lr=0.1)

    callbacks = [lr_decay]
    # callbacks = []

    optimizer = tf.keras.optimizers.SGD(learning_rate=0.1,
                                        momentum=0.9,
                                        nesterov=True)

    model.build(input_shape=(None, ) + x_train[0].shape)
    model.summary()

    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.sparse_categorical_crossentropy,
                  metrics=[tf.keras.metrics.sparse_categorical_accuracy])

    image_gen = ImageDataGenerator(featurewise_center=True,
                                   featurewise_std_normalization=True,
                                   data_format='channels_last')
    image_gen.fit(x_train)
    x_train = (x_train - image_gen.mean) / image_gen.std
    x_test = (x_test - image_gen.mean) / image_gen.std

    history = model.fit(x_train,
                        y_train,
                        epochs=200,
                        batch_size=128,
                        validation_split=0.1,
                        callbacks=callbacks,
                        verbose=1)

    loss, acc = model.evaluate(x_test, y_test)

    results = history.history

    filename = 'history_epochs{4}_{0}_batchsize{1}_eta{2}_{3}'.format(
        args.dataset, args.batch_size,
        str(args.learning_rate).replace('.', '_'), model.get_filename(),
        args.epochs)

    results['test_loss'] = loss
    results['test_acc'] = acc

    print('test loss is {} and acc is {}'.format(loss, acc))
    return acc
Exemple #7
0
def augment_data(set_to_augment,
                 prefix="",
                 total_size=35000,
                 batchsize=64,
                 use_cached_training_data=None,
                 verbose=True):
    """ Helper function to load the CIFAR-10 data
    """
    filepath = prefix + use_cached_training_data + str(total_size)
    # Look for cached training data
    if use_cached_training_data:
        x, y = load_2d_numpy_array_if_exists(filepath)
        if x is not None and y is not None:
            print("     Found cached training data for {}".format(
                use_cached_training_data))
            return (x, y), True

    # Enhance training set with augmentation
    generated_data = set_to_augment[0].copy(), set_to_augment[1].copy()
    if not len(generated_data[0]) >= total_size:
        datagen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
        )
        datagen.fit(set_to_augment[0])
        generator = datagen.flow(set_to_augment[0],
                                 set_to_augment[1],
                                 batch_size=batchsize)
        print_percentage = 0.1
        while len(generated_data[0]) < total_size:
            next_sample = generator.next()
            generated_data = np.concatenate((generated_data[0], next_sample[0]), axis=0), \
                             np.concatenate((generated_data[1], next_sample[1]), axis=0)

            if verbose and len(
                    generated_data[0]) / total_size > print_percentage:
                print("{}%..".format(int(print_percentage * 100)),
                      end="",
                      flush=True)
                print_percentage += 0.1
        if verbose:
            print("100%! Done!")

    # Look for cached training data
    if use_cached_training_data:
        save_2d_numpy_array_if_exists(filepath, generated_data[0][:total_size],
                                      generated_data[1][:total_size])
    generated_data = shuffle(*generated_data)
    return (generated_data[0][:total_size],
            generated_data[1][:total_size]), False
Exemple #8
0
def data_generator(batch_size):
    (X_train, Y_train), (X_test, Y_test) = keras.datasets.cifar10.load_data()
    X_train = X_train.astype(np.float32) / 255
    X_test = X_test.astype(np.float32) / 255

    datagen = ImageDataGenerator(featurewise_center=True, zca_whitening=True)
    datagen.fit(X_train)

    X_train = datagen.standardize(X_train)
    X_test = datagen.standardize(X_test)

    return tf.data.Dataset.from_tensor_slices((X_train, Y_train)).batch(batch_size), \
           tf.data.Dataset.from_tensor_slices((X_test, Y_test)).batch(batch_size)
def cifar10_data_generator(x_train):
    """
    set up image augmentation for cifar 10
    :param x_train: x_train data
    :return: data generator
    """
    datagen = ImageDataGenerator(rotation_range=15,
                                 horizontal_flip=True,
                                 width_shift_range=0.1,
                                 height_shift_range=0.1
                                 # zoom_range=0.3
                                 )
    datagen.fit(x_train)
    return datagen
Exemple #10
0
def train_classifier(classes, path, epoch, lr, weights_path=None):
    model = res18(classes)
    if weights_path:
        model.load_weights(weights_path)
    model.compile(optimizer=Adam(lr=float(lr)),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    trains, labels = img_reader()
    generator = ImageDataGenerator(featurewise_center=True,
                                   featurewise_std_normalization=True,
                                   width_shift_range=0.1,
                                   height_shift_range=0.1,
                                   rotation_range=10,
                                   shear_range=0.2,
                                   zoom_range=(0.8, 1.2),
                                   rescale=1. / 255,
                                   horizontal_flip=True,
                                   validation_split=0.2)
    generator.fit(trains)
    train_flow = generator.flow(trains,
                                labels,
                                shuffle=True,
                                subset='training')
    validate_flow = generator.flow(trains,
                                   labels,
                                   shuffle=True,
                                   subset='validation')

    STEP_SIZE_TRAIN = train_flow.n // train_flow.batch_size
    STEP_SIZE_VALID = validate_flow.n // validate_flow.batch_size

    p = os.path.abspath('.')
    weights_name = 'lr_0' + lr[2:] + '_weights_{epoch:02d}_{val_acc:.2f}.hdf5'
    model_checkpoint = ModelCheckpoint(os.path.join(p, weights_name),
                                       monitor='val_acc',
                                       save_best_only=True)

    reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=10)
    callbacks = [model_checkpoint, reduce_lr]

    history = model.fit_generator(train_flow,
                                  epochs=epoch,
                                  steps_per_epoch=STEP_SIZE_TRAIN,
                                  verbose=1,
                                  callbacks=callbacks,
                                  validation_data=validate_flow,
                                  validation_steps=STEP_SIZE_VALID)
    with open('history.json', 'w') as f:
        json.dump(history.history, f)
    def fit(self, X, bag_of_cells, epochs=30, batch_size=6):

        self.boc = bag_of_cells

        # Compute initial labels using LDA
        self.lda = LDATopicGen(self.boc, topics=self.K)
        y = self.lda.fit_predict()

        self.X = X
        self.y = y

        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.3)

        self.X_train = X_train
        self.y_train = y_train
        self.X_test = X_test
        self.y_test = y_test

        datagen = ImageDataGenerator(
            # featurewise_center=True,
            # featurewise_std_normalization=True,
            # rotation_range=20,
            # width_shift_range=0.2,
            # height_shift_range=0.2,
            # horizontal_flip=True
        )

        datagen.fit(self.X_train)

        for e in range(epochs):
            print("=" * 5, "Epoch %s" % str(e), "=" * 5)
            batches = 0
            # self._update_labels()

            for x_batch, y_batch in datagen.flow(self.X_train,
                                                 self.y_train,
                                                 batch_size=batch_size):
                loss = self.model.train_on_batch(x_batch, y_batch)
                print("loss: %s - accuracy: %s" % (loss[0], loss[1]))

                batches += 1

                if batches >= len(X_train) / batch_size:
                    break

        self.model.evaluate(x=X_test, y=y_test)
Exemple #12
0
def preprocess_train_data(x, y):
    y_train = keras.utils.to_categorical(y, config['model']['num_classes'])

    x_train = x.astype('float32')
    x_train /= 255

    datagen = ImageDataGenerator(
        # featurewise_center=False,  # set input mean to 0 over the dataset
        # samplewise_center=False,  # set each sample mean to 0
        # featurewise_std_normalization=False,  # divide inputs by std of the dataset
        # samplewise_std_normalization=False,  # divide each input by its std
        # zca_whitening=False,  # apply ZCA whitening
        # zca_epsilon=1e-06,  # epsilon for ZCA whitening
        rotation_range=
        7,  # randomly rotate images in the range (degrees, 0 to 180)
        # randomly shift images horizontally (fraction of total width)
        width_shift_range=0.1,
        # randomly shift images vertically (fraction of total height)
        # brightness_range=[0.7, 1.3],
        height_shift_range=0.1,
        shear_range=0.,  # set range for random shear
        zoom_range=0.,  # set range for random zoom
        channel_shift_range=0.,  # set range for random channel shifts
        # set mode for filling points outside the input boundaries
        fill_mode='nearest',
        cval=0.,  # value used for fill_mode = "constant"
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,  # randomly flip images
        # set rescaling factor (applied before any other transformation)
        rescale=None,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format='channels_last',
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train, augment=True)

    return datagen, x_train, y_train
Exemple #13
0
def training_set_generator(X_train, Y_train):
    data_gen_args = dict(featurewise_center=True,
                         featurewise_std_normalization=True,
                         rotation_range=90,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         zoom_range=0.2,
                         rescale=1. / 255)
    # Provide the same seeda and keyword arguments to the fit and flow methods
    seed = 1
    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)

    image_datagen.fit(X_train, augment=True, seed=seed)
    mask_datagen.fit(Y_train, augment=True, seed=seed)

    image_generator = image_datagen.flow(X_train, seed=seed, batch_size=1)
    mask_generator = mask_datagen.flow(Y_train, seed=seed, batch_size=1)

    for (img, mask) in zip(image_generator, mask_generator):
        yield (img, mask)
def imgGen(img,
           zca=False,
           rotation=0.,
           w_shift=0.,
           h_shift=0.,
           shear=0.,
           zoom=0.,
           h_flip=False,
           v_flip=False,
           preprocess_fcn=None,
           batch_size=20):
    """
    Datagen generation, tool to carry out the various image 
    augmentation transformations automatically, based on the 
    value of the parameters set. The DataGen is returned.
    Parameters:
    ----------
    - img, zca, rotation, w_shift, h_shift, shear, zoom, h_flip,
      vflip, preprocess_fcn, batch size
    #Return:
    ----------
    - datagen
    """
    datagen = ImageDataGenerator(zca_whitening=zca,
                                 rotation_range=rotation,
                                 width_shift_range=w_shift,
                                 height_shift_range=h_shift,
                                 shear_range=shear,
                                 zoom_range=zoom,
                                 fill_mode='nearest',
                                 horizontal_flip=h_flip,
                                 vertical_flip=v_flip,
                                 preprocessing_function=preprocess_fcn,
                                 data_format=K.image_data_format())

    datagen.fit(img)
    return datagen
print("y_train shape = ",yTrain.shape)
print("\nx_test shape = ",xTest.shape)
print("y_test shape = ",yTest.shape)
y_train_cat = to_categorical(yTrain)
y_test_cat = to_categorical(yTest)


#5) create generator for data augmentation
datagenTrain = ImageDataGenerator(
    featurewise_center=True,
    featurewise_std_normalization=True,
    rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=True)
datagenTrain.fit(xTrain)


#6) create the model
prefix = ""
if MODEL == "SIMPLE_CNN":
    prefix = "simpleCNN"
    model = build_simple_CNN()
elif MODEL == "TRANSFER_LEARNING":
    model = transfer_learning()
else:
    print("Model not available, using default TRANSFER_LEARNING")
    model = transfer_learning()


#7) model compiling and training
Exemple #16
0
INIT_LR = 1e-3
BS = 32
PATH = '/Users/Anna/work/data/'

dg = DataGenerator()
(x_train, y_train), (x_test, y_test) = dg.load_data(PATH)

x_train = np.swapaxes(x_train, 1, 3)
x_test = np.swapaxes(x_test, 1, 3)


print('{} {}'.format(x_train.shape, y_train.shape))

###############
datagen = ImageDataGenerator()
datagen.fit(x_train)

# Create model
input, model = create_googlenet("../googLeNet/googlenet_weights.h5")
print("[INFO] compiling model...")

# for index, layer in enumerate(model.layers):
#     print("{} {}".format(str(index), str(layer.name)))
# model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')

# Plugging into old net
loss1_classifier = Dense(2, name='loss1/classifier', kernel_regularizer=l2(0.0002))(model.layers[100].output)
loss2_classifier = Dense(2, name='loss2/classifier', kernel_regularizer=l2(0.0002))(model.layers[101].output)
loss3_classifier = Dense(2, name='loss3/classifier', kernel_regularizer=l2(0.0002))(model.layers[102].output)
Exemple #17
0
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
x_test = np.asarray(x_test)
y_test = np.asarray(y_test)

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

from keras_preprocessing.image import ImageDataGenerator

datagen = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True)

# fit augmented data
datagen.fit(x_train, augment=True)

for X_batch, y_batch in datagen.flow(x_train, y_train, batch_size=12):
    # Show 10 images
    for i in range(0, 10):
        pyplot.subplot(3, 4, i + 1)
        pyplot.imshow(X_batch[i].reshape(img_rows, img_cols, 3).astype(np.uint8))

    # show the plot
    pyplot.show()
    break
Exemple #18
0
def train(dataset, ckpt=None, output=None):
    """
        Train the model
        **input: **
            *dataset: (String) Dataset folder to used
            *ckpt: (String) [Optional] Path to the ckpt file to restore
            *output: (String) [Optional] Path to the output folder to used. ./outputs/ by default
    """
    def preprocessing_function(img):
        """
            Custom preprocessing_function
        """
        img = img * 255
        img = PIL.Image.fromarray(img.astype('uint8'), 'RGB')
        img = ImageEnhance.Brightness(img).enhance(random.uniform(0.6, 1.5))
        img = ImageEnhance.Contrast(img).enhance(random.uniform(0.6, 1.5))
        return np.array(img) / 255

    X_train, y_train, X_valid, y_valid, X_test, y_test = get_images(dataset)

    X_train = X_train / 255
    X_valid = X_valid / 255
    X_test = X_test / 255

    train_datagen = ImageDataGenerator()
    train_datagen_augmented = ImageDataGenerator(
        rotation_range=20,
        shear_range=0.2,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True,
        preprocessing_function=preprocessing_function)
    inference_datagen = ImageDataGenerator()
    train_datagen.fit(X_train)
    #train_datagen_augmented.fit(X_train)
    inference_datagen.fit(X_valid)
    inference_datagen.fit(X_test)

    # Utils method to print the current progression
    def plot_progression(b, cost, acc, label):
        print("[%s] Batch ID = %s, loss = %s, acc = %s" %
              (label, b, cost, acc))

    # Init model
    model = ModelTreemap("Treemap", output_folder=output)
    if ckpt is None:
        model.init()
    else:
        model.load(ckpt)

    # Training pipeline
    b = 0
    valid_batch = inference_datagen.flow(X_valid,
                                         y_valid,
                                         batch_size=BATCH_SIZE)
    best_validation_loss = None
    augmented_factor = 0.99
    decrease_factor = 0.80
    train_batches = train_datagen.flow(X_train, y_train, batch_size=BATCH_SIZE)
    augmented_train_batches = train_datagen_augmented.flow(
        X_train, y_train, batch_size=BATCH_SIZE)

    while True:
        next_batch = next(augmented_train_batches if random.
                          uniform(0, 1) < augmented_factor else train_batches)
        x_batch, y_batch = next_batch

        ### Training
        cost, acc = model.optimize(x_batch, y_batch)
        ### Validation
        x_batch, y_batch = next(valid_batch, None)
        # Retrieve the cost and acc on this validation batch and save it in tensorboard
        cost_val, acc_val = model.evaluate(x_batch, y_batch, tb_test_save=True)

        if b % 10 == 0:  # Plot the last results
            plot_progression(b, cost, acc, "Train")
            plot_progression(b, cost_val, acc_val, "Validation")
        if b % 1000 == 0:  # Test the model on all the validation
            print("Evaluate full validation dataset ...")
            loss, acc, _ = model.evaluate_dataset(X_valid, y_valid)
            print("Current loss: %s Best loss: %s" %
                  (loss, best_validation_loss))
            plot_progression(b, loss, acc, "TOTAL Validation")
            if best_validation_loss is None or loss < best_validation_loss:
                best_validation_loss = loss
                model.save()
            augmented_factor = augmented_factor * decrease_factor
            print("Augmented Factor = %s" % augmented_factor)

        b += 1
Exemple #19
0
def test_mnist():     
    import os
    os.environ['CUDA_VISIBLE_DEVICES']="2"
    os.environ['TF_FORCE_GPU_ALLOW_GROWTH']="true"
    #os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 
    import keras
    from keras.datasets import mnist,cifar10
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Flatten, Input, Add, ReLU
    from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D
    from keras.layers import BatchNormalization,Activation, Concatenate
    from keras.regularizers import l2,l1
    from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard
    from keras_preprocessing.image import ImageDataGenerator
    from keras.optimizers import SGD, Adadelta
    #from keras.initializers import glorot_uniform as w_ini
    from keras.initializers import he_uniform as w_ini
    from keras.initializers import VarianceScaling as VS_ini
    from keras import backend as K
    from keras_utils import RecordVariable, PrintLayerVariableStats, SGDwithLR
    
    #config = tf.ConfigProto()
    #config.gpu_options.allow_growth = True
    #config.gpu_options.per_process_gpu_memory_fraction = 0.1
   # Create a session with the above options specified.
    #K.tensorflow_backend.set_session(tf.Session(config=config))
    
    sid = 9
    #sess = K.get_session()
    K.clear_session()
    #sess = tf.Session(graph=g)
    #K.set_session(sess)
    np.random.seed(sid)
    tf.random.set_random_seed(sid)
    tf.compat.v1.random.set_random_seed(sid)
    
    
    #dset='cifar10'
    
    dset = 'mnist'
    batch_size = 512
    num_classes = 10
    epochs =200
    test_acnn = True
    regulazer = None
    prfw = 5
    fw = 5
    residual = False
    data_augmentation = False
    
    if dset=='mnist':
        # input image dimensions
        img_rows, img_cols = 28, 28  
        # the data, split between train and test sets
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        n_channels=1
    
    elif dset=='cifar10':    
        img_rows, img_cols = 32,32
        n_channels=3
        
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    
    
    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], n_channels, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], n_channels, img_rows, img_cols)
        input_shape = (n_channels, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, n_channels)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, n_channels)
        input_shape = (img_rows, img_cols, n_channels)
            
    
        
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    trn_mn = np.mean(x_train, axis=0)
    x_train -= trn_mn
    x_test -= trn_mn
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')
    
    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)
    
    network=[]
    network.append(Input(shape=input_shape))

#    if test_acnn:
#        
#        prev_layer = network[-1]
#        
#        
#        conv_node = Conv2DAdaptive(rank=2,nfilters=32,kernel_size=(fw,fw), 
#                                data_format='channels_last',strides=1,
#                                padding='same',name='acnn-1', activation='linear',
#                                trainSigmas=True, trainWeights=True, 
#                                init_sigma=[0.15,1.0],
#                                gain = np.sqrt(1.0),
#                                kernel_regularizer=None,
#                                init_bias=initializers.Constant(0))(prev_layer)
#        if residual:
#            network.append(Add()([conv_node,prev_layer]))
#        else:
#            network.append(conv_node)
#        #, input_shape=input_shape))
#    else:
#        
#        network.append(Conv2D(24, (fw, fw), activation='linear', 
#                         kernel_initializer=w_ini(), 
#                         kernel_regularizer=None,
#                         padding='same')(network[-1]))
        
    network.append(Conv2D(32, kernel_size=(prfw, prfw),
                       activation='linear',padding='same', kernel_initializer=w_ini(),
                       kernel_regularizer=regulazer)(network[-1]))
    network.append(BatchNormalization()(network[-1]))
    network.append(Activation('relu')(network[-1]))
    network.append(Dropout(0.2)(network[-1]))
    network.append(Conv2D(32, (prfw, prfw), activation='linear', 
                     kernel_initializer=w_ini(), padding='same',
                     kernel_regularizer=regulazer)(network[-1]))
    #odel.add(MaxPooling2D(pool_size=(2, 2)))
    network.append(BatchNormalization()(network[-1]))
    network.append(Activation('relu')(network[-1]))
    
    network.append(Dropout(0.2)(network[-1]))
    
#    model.add(Conv2D(32, (3, 3), activation='linear', 
#                     kernel_initializer=w_ini(), padding='same',
#                     kernel_regularizer=regulazer))
#    #odel.add(MaxPooling2D(pool_size=(2, 2)))
#    model.add(BatchNormalization())
#    model.add(Activation('relu'))
#    
#    model.add(Dropout(0.2))
#    
    #odel.add(Dense(128, activation='relu'))
    
    #odel.add(Dropout(0.25))
    
      #=============================================================================
    nfilter= 32
    if test_acnn:
        
        prev_layer = network[-1]
        
        
        conv_node = Conv2DAdaptive(rank=2,nfilters=nfilter,kernel_size=(fw,fw), 
                                data_format='channels_last',strides=1,
                                padding='same',name='acnn-1', activation='linear',
                                trainSigmas=True, trainWeights=True, 
                                init_sigma=[0.25,0.75],
                                gain = 1.0,
                                kernel_regularizer=None,
                                init_bias=initializers.Constant(0),
                                norm=2)(prev_layer)
        if residual:
            network.append(Add()([conv_node,prev_layer]))
        else:
            network.append(conv_node)
        #, input_shape=input_shape))
    else:
        #fw = 7
        #v_ini = VS_ini(scale=0.25,mode='fan_in',distribution='uniform')
        network.append(Conv2D(nfilter, (fw, fw), activation='linear', 
                         kernel_initializer=w_ini(), 
                         kernel_regularizer=None,
                         padding='same')(network[-1]))
        #, input_shape=input_shape))
        
    network.append(BatchNormalization()(network[-1]))
    network.append(ReLU()(network[-1]))
    #network.append(ReLU(negative_slope=0.01)(network[-1]))
    #network.append(Activation('selu'))
    network.append(MaxPooling2D(pool_size=(2,2))(network[-1]))
    print("MAY BE MAXPOOL LAYER IS AFFECTING SIGNAL ")
    network.append(Dropout(0.2)(network[-1]))
    #model.add(keras.layers.AlphaDropout(0.2))
    #network.append(GlobalAveragePooling2D()(network[-1]))
    network.append(Flatten()(network[-1]))
    network.append(Dense(units=128, activation='linear',
                   kernel_regularizer=regulazer)(network[-1]))
    network.append(BatchNormalization()(network[-1]))
    network.append(ReLU()(network[-1]))
    network.append(Dropout(0.2)(network[-1]))
    
    
    network.append(Dense(num_classes, activation='softmax',
                    kernel_regularizer=regulazer)(network[-1]))
    model = keras.models.Model(inputs=[network[0]], outputs=network[-1])
    model.summary()
    print("MAY BE MAXPOOL LAYER IS AFFECTING SIGNAL ")
    
    
    from lr_multiplier import LearningRateMultiplier
    #lr=0.001
    #multipliers = {'acnn-1/Sigma:0': 1.0,'acnn-1/Weights:0': 1000.0,
    #               'acnn-2/Sigma:0': 1.0,'acnn-2/Weights:0': 1000.0}
    #opt = LearningRateMultiplier(SGD, lr_multipliers=multipliers,
    #                             lr=lr, momentum=0.9,decay=0)
    
    #opt= SGD(lr=lr,momentum=0.9,decay=0,nesterov=False)
    '''lr_dict = {'all':0.01,'acnn-1/Sigma:0': 0.01,'acnn-1/Weights:0': 1.0,
                   'acnn-2/Sigma:0': 0.01,'acnn-2/Weights:0': 0.1}
    
    
    mom_dict = {'all':0.9,'acnn-1/Sigma:0': 0.5,'acnn-1/Weights:0': 0.9,
                   'acnn-2/Sigma:0': 0.9,'acnn-2/Weights:0': 0.9}
    
    
    decay_dict = {'all':0.95, 'acnn-1/Sigma:0': 0.05, 'acnn-1/Weights:0':0.95,
                  'acnn-1/Sigma:0': 0.05,'acnn-2/Weights:0': 0.95}

    clip_dict = {'acnn-1/Sigma:0':(0.05,1.0),'acnn-2/Sigma:0':(0.05,1.0)}
    '''
    lr_dict = {'all':0.1,'acnn/Sigma:0': 0.1,'acnn/Weights:0': 0.1,
               'acnn-2/Sigma:0': 0.0001,'acnn-2/Weights:0': 0.01}
    
    mom_dict = {'all':0.9,'acnn/Sigma:0': 0.9,'acnn/Weights:0': 0.9,
                'acnn-2/Sigma:0': 0.9,'acnn-2/Weights:0': 0.9}
    clip_dict = {'acnn/Sigma:0': [0.1, 2.0]}
    
    decay_dict = {}
    decay_dict.update({'focus-1'+'/Sigma:0':0.9})  #best results 0.5
    decay_dict.update({'focus-1'+'/Mu:0':0.9})
    decay_dict.update({'all':0.9})
    
    e_i = x_train.shape[0] // batch_size
    decay_epochs =[e_i*10,e_i*30,e_i*60,e_i*90,e_i*100]
    
    opt = SGDwithLR(lr=lr_dict, momentum = mom_dict, decay=decay_dict, clips=clip_dict,
                    decay_epochs=decay_epochs,clipvalue=0.01)
        
    e_i = x_train.shape[0] // batch_size
    
    
    #decay_epochs =np.array([e_i*10], dtype='int64') #for 20 epochs
    #decay_epochs =np.array([e_i*10,e_i*80,e_i*120,e_i*160], dtype='int64')
    
    #opt = SGDwithLR(lr_dict, mom_dict,decay_dict,clip_dict, decay_epochs)#, decay=None)
    #opt= Adadelta()
    #lr_scheduler = LearningRateScheduler(lr_schedule,lr)
    
    # Prepare model model saving directory.
    save_dir = os.path.join(os.getcwd(), 'saved_models')
    if test_acnn:
        model_name = '%s_acnn%dx_model.{epoch:03d}.h5'% (dset, fw)
    else:
        model_name = '%s_cnn%dx_model.{epoch:03d}.h5'% (dset, fw)
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
        
    filepath = os.path.join(save_dir, model_name)
    print("Saving in ", filepath)

#    # Prepare callbacks for model saving and for learning rate adjustment.
#    checkpoint = ModelCheckpoint(filepath=filepath,
#                             monitor='val_acc',
#                             verbose=1,
#                             save_best_only=True)
    chkpt= keras.callbacks.ModelCheckpoint('best-model.h5', 
                                    monitor='val_acc', 
                                    verbose=1, 
                                    save_best_only=True, 
                                    save_weights_only=True, 
                                    mode='max', period=1)
    
    
    tb = TensorBoard(log_dir='./tb_logs/mnist/acnn-res-lr5',
                     histogram_freq = 1, 
                     write_grads=True,
                     write_graph=False)
    
    stat_func_name = ['max: ', 'mean: ', 'min: ', 'var: ', 'std: ']
    stat_func_list = [np.max, np.mean, np.min, np.var, np.std]
    callbacks = [tb]
    callbacks = []
    
    if test_acnn:
        pr_1 = PrintLayerVariableStats("acnn-1","Weights:0",stat_func_list,stat_func_name)
        pr_2 = PrintLayerVariableStats("acnn-1","Sigma:0",stat_func_list,stat_func_name)
        rv_weights_1 = RecordVariable("acnn-1","Weights:0")
        rv_sigma_1 = RecordVariable("acnn-1","Sigma:0")
        callbacks+=[pr_1,pr_2,rv_weights_1,rv_sigma_1]
    else:
        pr_1 = PrintLayerVariableStats("conv2d_3","kernel:0",stat_func_list,stat_func_name)
        rv_weights_1 = RecordVariable("conv2d_3","kernel:0")
        callbacks+=[pr_1, rv_weights_1]
    pr_3 = PrintLayerVariableStats("conv2d_1","kernel:0",stat_func_list,stat_func_name)
    rv_kernel = RecordVariable("conv2d_1","kernel:0")
    callbacks+=[pr_3,rv_kernel]
    
    print("CALLBACKS:",callbacks)
    
    print("TRAINABLE WEIGHTS:",model.trainable_weights)
    
    print("WARNING by BTEK: if you see An operation has `None` for gradient. \
          Please make sure that all of your ops have a gradient defined \
          (i.e. are differentiable). Common ops without gradient: \
              K.argmax, K.round, K.eval. REMOVE TENSORBOARD CALLBACK OR EDIT IT!")
    
    
    #print(opt)
    #opt = SGD(lr=0.01,momentum=0.9)
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])
    
    plt = False
    if plt and test_acnn:
        print("Plotting kernels before...")
        import matplotlib.pyplot as plt
        acnn_layer = model.get_layer('acnn-1')
        ws = acnn_layer.get_weights()
        print("Sigmas before",ws[0])
        u_func = K.function(inputs=[model.input], outputs=[acnn_layer.U()])
        output_func = K.function(inputs=[model.input], outputs=[acnn_layer.output])
    
        U_val=u_func([np.expand_dims(x_test[0], axis=0)])
        
        print("U shape", U_val[0].shape)
        print("U max:", np.max(U_val[0][:,:,:,:]))
        num_filt=min(U_val[0].shape[3],12)
        fig=plt.figure(figsize=(20,8))
        for i in range(num_filt):
            ax1=plt.subplot(1, num_filt, i+1)
            im = ax1.imshow(np.squeeze(U_val[0][:,:,0,i]))
        fig.colorbar(im, ax=ax1)
        
        plt.show(block=False)
        
        fig=plt.figure(figsize=(20,8))
        num_show = min(U_val[0].shape[3],12)
        indices = np.int32(np.linspace(0,U_val[0].shape[3]-1,num_show))
        for i in range(num_show):
            ax1=plt.subplot(1, num_filt, i+1)
            #print("U -shape: ", acnn_layer.U().shape,type(K.eval(acnn_layer.U()[:,:,0,i])))
            #print("Prod-shape", (ws[1][:,:,0,i]*acnn_layer.U()[:,:,0,i]).shape)
            plt.imshow(np.float32(ws[1][:,:,0,indices[i]]*
                                  K.eval(acnn_layer.U()[:,:,0,indices[i]])))
 
        plt.show(block=False)
        
    # Run training, with or without data augmentation.
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(x_train, y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  validation_data=(x_test, y_test),
                  shuffle=True,
                  callbacks=callbacks,verbose=2)
    else:
        print('Using real-time data augmentation.')
        # This will do preprocessing and realtime data augmentation:
        datagen = ImageDataGenerator(
            # set input mean to 0 over the dataset
            featurewise_center=False,
            # set each sample mean to 0
            samplewise_center=False,
            # divide inputs by std of dataset
            featurewise_std_normalization=False,
            # divide each input by its std
            samplewise_std_normalization=False,
            # apply ZCA whitening
            zca_whitening=False,
            # epsilon for ZCA whitening
            zca_epsilon=1e-06,
            # randomly rotate images in the range (deg 0 to 180)
            rotation_range=0,
            # randomly shift images horizontally
            width_shift_range=0.1,
            # randomly shift images vertically
            height_shift_range=0.1,
            # set range for random shear
            shear_range=0.,
            # set range for random zoom
            zoom_range=0.,
            # set range for random channel shifts
            channel_shift_range=0.,
            # set mode for filling points outside the input boundaries
            fill_mode='nearest',
            # value used for fill_mode = "constant"
            cval=0.,
            # randomly flip images
            horizontal_flip=True,
            # randomly flip images
            vertical_flip=False,
            # set rescaling factor (applied before any other transformation)
            rescale=None,
            # set function that will be applied on each input
            preprocessing_function=None,
            # image data format, either "channels_first" or "channels_last"
            data_format='channels_last',
            # fraction of images reserved for validation (strictly between 0 and 1)
            validation_split=0.0)
    
        # Compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)
    
        # Fit the model on the batches generated by datagen.flow().
        model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                            validation_data=(x_test, y_test),
                            epochs=epochs, verbose=2, workers=4,
                            callbacks=callbacks, 
                            steps_per_epoch=x_train.shape[0]//batch_size)
    
    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    
    
    if plt and test_acnn:
        print("Plotting kernels after ...")
        
        print("U max:", np.max(U_val[0][:,:,:,:]))
        import matplotlib.pyplot as plt
        ws = acnn_layer.get_weights()
        print("Sigmas after",ws[0])
        U_val=u_func([np.expand_dims(x_test[2], axis=0)])
        
        print("U shape", U_val[0].shape)
        num_filt=min(U_val[0].shape[3],12)
        
        indices = np.int32(np.linspace(0,U_val[0].shape[3]-1,num_filt))

        fig=plt.figure(figsize=(16,5))
        for i in range(num_filt):
            ax=plt.subplot(1, num_filt, i+1)
            kernel_u = U_val[0][:,:,0,indices[i]]
            im = ax.imshow(np.squeeze(kernel_u))
            print("kernel mean,var,max,min",np.mean(kernel_u),
                                           np.var(kernel_u),
                                           np.max(kernel_u), np.min(kernel_u))
        #fig.colorbar(im, ax=ax1)
        plt.show(block=False)
        
        
        print("outputs  ...")
        
        n=5
        
        out_val=output_func([np.expand_dims(x_test[5], axis=0)])
        print("Outputs shape", out_val[0].shape)
        num_filt=min(out_val[0].shape[3],12)
        
        indices = np.int32(np.linspace(0,out_val[0].shape[3]-1,num_filt))
        fig=plt.figure(figsize=(20,8))
        ax=plt.subplot(1, num_filt+1, 1)
        im = ax.imshow(np.squeeze(x_test[5]))
        print(y_test[5])
        print("input mean,var,max",np.mean(x_test[n]),np.var(x_test[n]),np.max(x_test[n]))
        for i in range(num_filt):
            ax=plt.subplot(1, num_filt+1, i+2)
            out_im = out_val[0][0,:,:,indices[i]]
            im = ax.imshow(np.squeeze(out_im))
            
            print("ouput mean,var,max",np.mean(out_im),
                                           np.var(out_im),
                                           np.max(out_im),np.min(out_im))
            #plt.colorbar(im,ax=ax)
        plt.show(block=False)
        
        print("Weights")
        fig=plt.figure(figsize=(20,8))
        num_show = min(U_val[0].shape[3],12)
        indices = np.int32(np.linspace(0,U_val[0].shape[3]-1,num_show))
        for i in range(num_show):
            ax1=plt.subplot(1, num_filt, i+1)
            #print("U -shape: ", acnn_layer.U().shape,type(K.eval(acnn_layer.U()[:,:,0,i])))
            #print("Prod-shape", (ws[1][:,:,0,i]*acnn_layer.U()[:,:,0,i]).shape)
            plt.imshow(np.float32(ws[1][:,:,0,indices[i]]),cmap='gray')
 
        plt.show(block=False)
        
        print("ACNN Filters after")
        fig=plt.figure(figsize=(20,8))
        num_show = min(U_val[0].shape[3],12)
        indices = np.int32(np.linspace(0,U_val[0].shape[3]-1,num_show))
        for i in range(num_show):
            ax1=plt.subplot(1, num_filt, i+1)
            #print("U -shape: ", acnn_layer.U().shape,type(K.eval(acnn_layer.U()[:,:,0,i])))
            #print("Prod-shape", (ws[1][:,:,0,i]*acnn_layer.U()[:,:,0,i]).shape)
            plt.imshow(np.float32(ws[1][:,:,0,indices[i]]*
                                  K.eval(acnn_layer.U()[:,:,0,indices[i]])),cmap='gray')
 
        plt.show(block=False)
        
        
        cnn_layer = model.get_layer('conv2d_1')
        wcnn = cnn_layer.get_weights()
        print("CNN Filters of", cnn_layer)
        fig=plt.figure(figsize=(20,8))
        num_show = min(wcnn[0].shape[3],12)
        indices = np.int32(np.linspace(0,wcnn[0].shape[3]-1,num_show))
        for i in range(num_show):
            ax1=plt.subplot(1, num_filt, i+1)
            #print("U -shape: ", acnn_layer.U().shape,type(K.eval(acnn_layer.U()[:,:,0,i])))
            #print("Prod-shape", (ws[1][:,:,0,i]*acnn_layer.U()[:,:,0,i]).shape)
            plt.imshow(np.float32(wcnn[0][:,:,0,indices[i]]),cmap='gray')
 
        plt.show(block=False)
        
        
        rv_sigma_arr = np.array(rv_sigma_1.record)
        fig=plt.figure(figsize=(4,8))
        plt.plot(rv_sigma_arr)
        plt.title('Sigma')
        plt.show(block=False)
        
        rv_weights_arr = np.array(rv_weights_1.record)
        rv_weights_arr2d = np.reshape(rv_weights_arr,
                            (rv_weights_arr.shape[0],
                             np.prod(rv_weights_arr.shape[1:])))
        print(rv_weights_arr.shape)
        fig=plt.figure(figsize=(4,8))
        klist=[1,1,5,9,12,15,18,25,32,132,1132]
        for i in klist:
            plt.plot(rv_weights_arr2d[:,i])
        plt.title('weights-acnn')
        plt.show(block=False)
        
         
        
        rv_kernel_arr = np.array(rv_kernel.record)
        rv_kernel_arr2d = np.reshape(rv_kernel_arr,
                            (rv_kernel_arr.shape[0],
                             np.prod(rv_kernel_arr.shape[1:])))
        print(rv_kernel_arr.shape)
        fig=plt.figure(figsize=(4,8))
        klist=[1,1,5,9,12,15,18,25,32]
        for i in klist:
            plt.plot(rv_kernel_arr2d[:,i])
        plt.title('weights-conv2d-1')
        plt.show(block=False)
Exemple #20
0
# xtas, ytas = [], []
# for i in range(X_train.shape[0]):
#     num_aug = 0
#     x = X_train[i] # (32, 32, )
#     x = x.reshape((1, ) + x.shape) # (1, 32, 32, 3)
#     for x_aug in datagen.flow(x, batch_size=1,
#                               save_to_dir='preview',
#                               save_prefix='cifar',
#                               save_format='jpeg'):
#         if num_aug >= NUM_TO_AUGMENT:
#             break
#         xtas.append(x_aug[0])
#         num_aug += 1

# fit the dataset
datagen.fit(X_train)

# train

model = keras.models.Sequential([
    keras.layers.Conv2D(32, (3, 3), padding='SAME',
                        activation='relu',
                        input_shape=(IMG_ROWS, IMG_COLS, IMG_CHANNELS)),
    keras.layers.Conv2D(32, (3, 3), padding='SAME',
                        activation='relu'),
    keras.layers.MaxPool2D(pool_size=(2, 2)),
    keras.layers.Dropout(0.25),
    keras.layers.Conv2D(64, (3, 3), padding='SAME',
                        activation='relu'),
    keras.layers.Conv2D(64, 3, 3, activation='relu'),
    keras.layers.MaxPool2D(pool_size=(2, 2)),
from keras_preprocessing.image import ImageDataGenerator, img_to_array, load_img
import os

datagen = ImageDataGenerator(rotation_range=20,
                             width_shift_range=0.15,
                             height_shift_range=0.15,
                             zoom_range=0.15,
                             shear_range=0.2,
                             horizontal_flip=False,
                             fill_mode='nearest')
dirs = os.listdir("picture")
print("文件总数%d" % len(dirs))
for filename in dirs:
    img = load_img("picture//{}".format(filename))
    x = img_to_array(img)
    x = x.reshape((1, ) + x.shape)
    datagen.fit(x)
    prefix = filename.split('.')[0]
    print(prefix)
    counter = 0
    for batch in datagen.flow(x,
                              batch_size=4,
                              save_to_dir="augmentation_pic",
                              save_prefix=prefix,
                              save_format='jpg'):
        print("生成图片增强%s第%d张" % (filename, counter))
        counter += 1
        if counter > 400:
            break
def augm_gen(data):
    datagen = ImageDataGenerator(rotation_range=5, horizontal_flip=True, vertical_flip=True, zoom_range=0.1)
    datagen.fit(data)
    return datagen
x_test,y_test=generateds(kinds, test_path)
x_train=tf.convert_to_tensor(x_train)
x_test=tf.convert_to_tensor(x_test)
y_train = tf.convert_to_tensor(y_train)
y_test = tf.convert_to_tensor(y_test)

#数据增强
image_gen_train = ImageDataGenerator(
                                     rescale=1./255,#归至0~1
                                     rotation_range=45,#随机45度旋转
                                     width_shift_range=.15,#宽度偏移
                                     height_shift_range=.15,#高度偏移
                                     horizontal_flip=True,#水平翻转
                                     zoom_range=0.5#将图像随机缩放到50%
                                     )
image_gen_train.fit(x_train)


model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(input_shape=(32, 32, 3),filters=32,kernel_size=(5, 5),padding='same'),  # 卷积层
    tf.keras.layers.BatchNormalization(),  # BN层
    tf.keras.layers.Activation('relu'),  # 激活层
    tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2, padding='same'),  # 池化层
    tf.keras.layers.Dropout(0.2),  # dropout层

    tf.keras.layers.Conv2D(64, kernel_size=(5,5), padding='same'),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.Activation('relu'),
    tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=2, padding='same'),
    tf.keras.layers.Dropout(0.2),
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
x_validation = x_validation.reshape(x_validation.shape[0],
                                    x_validation.shape[1],
                                    x_validation.shape[2], 1)

print(x_train.shape)

#Argumenting the images like zooming,rotating, etc to make the dataset more generic
dataGen = ImageDataGenerator(width_shift_range=0.1,
                             height_shift_range=0.1,
                             zoom_range=0.2,
                             shear_range=0.1,
                             rotation_range=10)

#help our generator perform some statistics before calculation
dataGen.fit(x_train)

#One Hot encoding our data
y_train = to_categorical(y_train, noOfClasses)
y_test = to_categorical(y_test, noOfClasses)
y_validation = to_categorical(y_validation, noOfClasses)


#Creating our model
#### CREATING THE MODEL
def myModel():
    noOfFilters = 60
    sizeOfFilter1 = (5, 5)
    sizeOfFilter2 = (3, 3)
    sizeOfPool = (2, 2)
    noOfNodes = 500
Exemple #25
0
def runCNNconfusion(a, b, c, d, e, f, g, h):
    # In -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out
    batch_size = 128
    num_classes = f
    epochs = g
    #img_rows, img_cols = X_train.shape[1],b.shape[2]
    input_shape = (128, 128, 3)
    model = Sequential()
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu',
               input_shape=input_shape,
               strides=h))
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(
        Conv2D(filters=128,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))
    model.add(
        Conv2D(filters=86,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(
        Conv2D(filters=86,
               kernel_size=(3, 3),
               padding='Same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(BatchNormalization())
    model.add(Dropout(0.25))
    model.add(Flatten())
    #model.add(Dense(1024, activation = "relu"))
    #model.add(Dropout(0.5))
    model.add(Dense(512, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation="softmax"))
    # Define the optimizer
    optimizer = Adagrad()
    model.compile(optimizer=optimizer,
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=True,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        40,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.4,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.4,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images
    datagen.fit(a)
    history = model.fit_generator(datagen.flow(a, b, batch_size=32),
                                  steps_per_epoch=len(a) / 32,
                                  epochs=epochs,
                                  class_weight=e,
                                  validation_data=[c, d],
                                  callbacks=[MetricsCheckpoint('logs')])
    score = model.evaluate(c, d, verbose=0)
    plot_learning_curve(history)
    plt.show()
    plotKerasLearningCurve()
    plt.show()
    print('\nKeras CNN #2B - accuracy:', score[1], '\n')
    Y_pred = model.predict(c)
    print('\n',
          sklearn.metrics.classification_report(np.where(d > 0)[1],
                                                np.argmax(Y_pred, axis=1),
                                                target_names=list(
                                                    dict_characters.values())),
          sep='')
    Y_pred_classes = np.argmax(Y_pred, axis=1)
    Y_true = np.argmax(d, axis=1)
    confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
    plot_confusion_matrix(confusion_mtx,
                          classes=list(dict_characters.values()))
    plt.show()
Exemple #26
0
    from keras.optimizers import Adam

    adam = Adam(lr=0.0001)
    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])

    train_datagen = ImageDataGenerator(
        rotation_range=40,
        shear_range=0.2,
        zoom_range=0.2,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')

    train_datagen.fit(train_image)

    train_generator = train_datagen.flow(
        train_image,
        train_label,
        batch_size=16)

    test_datagen = ImageDataGenerator()

    test_datagen.fit(test_image)

    validation_generator = test_datagen.flow(
        test_image, test_label,
        batch_size=16)

    model.fit_generator(
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

input_shape = (28, 28, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')

image_gen = ImageDataGenerator(
    rotation_range=15,
    width_shift_range=.25,
    height_shift_range=.2,
)

# training the image preprocessing
image_gen.fit(x_train, augment=True)
image_gen.fit(x_test, augment=True)

x_train, x_test = x_train / 255.0, x_test / 255.0


def makeModel():
    model = Sequential()
    model.add(
        Conv2D(64, kernel_size=(3, 3), padding='same',
               input_shape=(28, 28, 1)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, kernel_size=(3, 3), padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(256, activation=tf.nn.relu))
# Number of epochs
epoch = 50  # 500 for augmentation
batch_size = 32  #32 used by authors

x_train, y_train, x_val, y_val = L.getArrays()
x_train = numpy.array(x_train)
y_train = numpy.array(y_train)
x_val = numpy.array(x_val)
y_val = numpy.array(y_val)

datagen = ImageDataGenerator(rotation_range=20,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=True)
datagen.fit(x_train)

validation_generator = ImageDataGenerator(rotation_range=20,
                                          width_shift_range=0.2,
                                          height_shift_range=0.2,
                                          horizontal_flip=True)
validation_generator.fit(x_val)

# To stop if sufficietly trained
es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1)
mc = keras.callbacks.ModelCheckpoint('best_model' + suffix + '.h5',
                                     monitor='val_loss',
                                     mode='min')

cb = [mc]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dir',
                        default='../data/FIDS30/',
                        help='Root folder for the (unprocessed) data set.')
    parser.add_argument(
        '--log_dir',
        default=
        'C:\\Users\\Patrick\\Documents\\TU\\2019S\\ML\\ML_Exercise3\\ML_Exercise3',
        help='Root folder for TensorBoard logging.')
    dir = parser.parse_args().dir
    log_dir = parser.parse_args().log_dir

    os.chdir(dir)
    fileNames = glob.glob("*/*.jpg")
    targetLabels = []
    imageList = []
    for fileName in fileNames:
        pathSepIndex = fileName.index(os.path.sep)
        targetLabels.append(fileName[:pathSepIndex])
        # print(np.array(Image.open(fileName)).shape)
        image = cv2.resize(np.array(Image.open(fileName)), image_size)
        imageList.append(np.array(image))

    toDelete = np.where(np.array([x.shape for x in imageList]) == 4)[0][0]
    del imageList[toDelete]
    imageArr = np.array(imageList)
    #imageArr = imageArr / 255.0

    le = preprocessing.LabelEncoder()
    le.fit(targetLabels)
    target = le.transform(targetLabels)
    target = np.delete(target, toDelete, 0)
    target_C = to_categorical(target)

    # imageArr = np.array(imageList)
    X_train, X_test, y_train, y_test = train_test_split(imageArr,
                                                        target_C,
                                                        random_state=42)

    datagen_train = ImageDataGenerator(
        rescale=1. / 255,
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=10,
        #width_shift_range=0.1,
        #height_shift_range=0.1,
        #shear_range=0.1,
        #zoom_range=0.1,
        horizontal_flip=True,
        #vertical_flip=True
    )
    datagen_train.fit(X_train)
    generator_train = datagen_train.flow(X_train,
                                         y_train,
                                         batch_size=batch_size)

    datagen_test = ImageDataGenerator(rescale=1. / 255,
                                      featurewise_center=True,
                                      featurewise_std_normalization=True)
    datagen_test.fit(X_train)
    generator_test = datagen_test.flow(X_test, y_test, batch_size=batch_size)

    # Instantiate an empty model
    model = Sequential()

    # 1st Convolutional Layer
    model.add(
        Conv2D(filters=96,
               input_shape=(224, 224, 3),
               kernel_size=(11, 11),
               strides=(4, 4),
               padding='valid'))
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # 2nd Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(11, 11),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # 3rd Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))

    # 4th Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))

    # 5th Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # Passing it to a Fully Connected layer
    model.add(Flatten())
    # 1st Fully Connected Layer
    model.add(Dense(4096, input_shape=(224 * 224 * 3, )))
    model.add(Activation('relu'))
    # Add Dropout to prevent overfitting
    # model.add(Dropout(0.2))

    # 2nd Fully Connected Layer
    model.add(Dense(4096))
    model.add(Activation('relu'))
    # Add Dropout
    # model.add(Dropout(0.2))

    # 3rd Fully Connected Layer
    model.add(Dense(1000))
    model.add(Activation('relu'))
    # Add Dropout
    # model.add(Dropout(0.2))

    # Output Layer
    model.add(Dense(30))
    model.add(Activation('softmax'))

    model.summary()

    # Compile the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    now = time.strftime("%b%d_%H-%M")
    model.fit_generator(generator_train,
                        steps_per_epoch=512 // batch_size,
                        epochs=50,
                        validation_data=generator_test,
                        validation_steps=500 // batch_size,
                        callbacks=[
                            TensorBoard(histogram_freq=0,
                                        log_dir=os.path.join(
                                            log_dir, 'logs', now + '-' + NAME),
                                        write_graph=True)
                        ])
print("Augmentation process begins ...")

# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
                     rescale=0.5,
                     samplewise_center=True,
                     featurewise_std_normalization=True,
                     horizontal_flip=True)

image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)

# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(all_images, augment=True, seed=seed)
# mask_datagen.fit(masks, augment=True, seed=seed)

print("*************")

image_gen = image_datagen.flow(all_images, save_to_dir="C:/Users/nana/PycharmProjects/Lane_detection/data/augmented/")

aug = np.array([next(image_gen).astype(np.uint8) for i in range(1)])

image_generator = image_datagen.flow_from_directory(
    "data/input/",
    target_size=(960, 540),
    class_mode=None,
    seed=seed,
    save_to_dir="data/augmented/")