예제 #1
0
def image_process(batch_size, train_dir, test_dir, image_lengh, image_width):
    datagen_train = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=180,
                                       width_shift_range=0.1,
                                       height_shift_range=0.1,
                                       shear_range=0.1,
                                       zoom_range=[0.9, 1.5],
                                       horizontal_flip=True,
                                       vertical_flip=True,
                                       fill_mode='nearest')
    datagen_test = ImageDataGenerator(rescale=1. / 255,
                                      rotation_range=180,
                                      width_shift_range=0.1,
                                      height_shift_range=0.1,
                                      shear_range=0.1,
                                      zoom_range=[0.9, 1.5],
                                      horizontal_flip=True,
                                      vertical_flip=True,
                                      fill_mode='nearest')
    generator_train = datagen_train.flow_from_directory(
        directory=train_dir,
        target_size=(image_lengh, image_width),
        batch_size=batch_size,
        shuffle=True,
        class_mode='categorical')

    generator_test = datagen_test.flow_from_directory(
        directory=test_dir,
        target_size=(image_lengh, image_width),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=True)
    cls_train = generator_train.classes
    cls_test = generator_test.class_indices
    print(cls_test, cls_train)
    return generator_train, generator_test
def data_generator_with_augmentation():
    """
    Create an ImageDataGenerator and do Image Augmentation
    """

    return ImageDataGenerator(
        rescale=1.0 / 255,
        rotation_range=40,
        width_shift_range=0.1,
        height_shift_range=0.1,
        zoom_range=0.2,
        shear_range=0.2,
        horizontal_flip=True,
        fill_mode="nearest",
    )
예제 #3
0
def main():
    tf.random.set_seed(42)

    total_depth = 36
    n_blocks = 3
    basic_block_count = total_depth // n_blocks

    # region Model
    input_layer = Input(shape=[32, 32, 3])
    layer = input_layer

    for k in range(n_blocks):
        strides = 2 if k < (n_blocks - 1) else 1
        layer = ResBlock2D(filters=16 * (2 ** k), basic_block_count=basic_block_count, strides=strides)(layer)

        if k == (n_blocks - 1):
            layer = AveragePooling2D(pool_size=8)(layer)

    layer = Flatten()(layer)
    layer = Dense(units=10, activation="softmax")(layer)
    model = Model(inputs=input_layer, outputs=layer)
    model.summary()

    model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["acc"])
    # endregion

    # region Data
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.astype(np.float32) / 255.0
    x_test = x_test.astype(np.float32) / 255.0

    y_train = to_categorical(y_train, num_classes=10)
    y_test = to_categorical(y_test, num_classes=10)

    generator = ImageDataGenerator(rotation_range=15,
                                   width_shift_range=5. / 32,
                                   height_shift_range=5. / 32,
                                   horizontal_flip=True)
    generator.fit(x_train)
    # endregion

    log_dir = "../logs/tests/res_block_cifar10/{}".format(int(time()))
    log_dir = os.path.normpath(log_dir)
    tensorboard = TensorBoard(log_dir=log_dir, profile_batch=0)

    model.fit_generator(generator.flow(x_train, y_train, batch_size=64),
                        steps_per_epoch=100, epochs=300, validation_data=(x_test, y_test),
                        validation_steps=100, verbose=1, callbacks=[tensorboard])
예제 #4
0
def run_model():
    train = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True, shear_range=0.2, zoom_range=0.2,
                               width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest', validation_split=0.2)

    img_size = 128
    batch_size = 20
    t_steps = 3462 / batch_size
    v_steps = 861 / batch_size
    classes = 5
    flower_path = "C:\\Users\\Yaniv\\Desktop\\flowerClassification-master\\flowers"
    train_gen = train.flow_from_directory(flower_path, target_size=(img_size, img_size), batch_size=batch_size,
                                          class_mode='categorical', subset='training')
    valid_gen = train.flow_from_directory(flower_path, target_size=(img_size, img_size), batch_size=batch_size,
                                          class_mode='categorical', subset='validation')

    model.add(Conv2D(25, kernel_size=(5, 5), activation='relu', input_shape=(128, 128, 3)))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # layer number 2
    model.add(Conv2D(50, kernel_size=(5, 5), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # layer number 3
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, kernel_size=(5, 5), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # layer number 4
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())

    # layer number 6
    model.add(Dense(256, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(layers.Dense(classes, activation='softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    model_hist = model.fit_generator(train_gen, steps_per_epoch=t_steps, epochs=30, validation_data=valid_gen,
                                     validation_steps=v_steps)
    plt_modle(model_hist)
    model.save('flowers_model.h5')
    plt_modle(model_hist)
예제 #5
0
    def __init__(self, X, y, batch_size, shuffle=True, crop_size=128, val=False):

        self.X = X
        self.y = y
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.crop_size = crop_size
        self.val = val

        if not self.val:
            self.statistics = self.extract_statistics(self.X)

        self.augmenter = ImageDataGenerator(horizontal_flip=True)
        self.indexes = np.arange(len(self.X), dtype=int)

        self.on_epoch_end()
예제 #6
0
def classify(model, img_dir):
    X, all_files = load_data_to_test(img_dir)
    total = len(all_files)
    ans = {}
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    test_generator = test_datagen.flow(X,
                                       batch_size=IMG_BATCHS_NUMBER,
                                       shuffle=None,
                                       save_to_dir=None)
    pred = model.predict_generator(test_generator,
                                   steps=total // IMG_BATCHS_NUMBER)
    for i in range(0, pred.shape[0], 1):
        print(pred[i])
        ans[all_files[i]] = np.argmax(pred[i])
    print(ans)
    return ans
 def __init__(self):
     # Configuration: Rescale RGB values of testing data
     self.test_gen = ImageDataGenerator(rescale=1.0 / 255,
                                        data_format='channels_last')
     # get dictionary of classes and invert dictionary to {0:"label_name", ...}
     self.class_dictionary = {
         0: "Broccoli",
         1: "Cabbage",
         2: "Onion",
         3: "Spinach",
         4: "Strawberry",
         5: "Tomato"
     }
     # load saved model
     self.restoredModel = models.load_model(saved_direc)
     print("Loaded model from disk")
예제 #8
0
def tta(image, model, model_output='regression'):
    datagen = ImageDataGenerator()
    all_images = np.expand_dims(image, 0)
    hori_image = np.expand_dims(datagen.apply_transform(x=image, transform_parameters={"flip_horizontal": True}),
                                axis=0)
    vert_image = np.expand_dims(datagen.apply_transform(x=image, transform_parameters={"flip_vertical": True}), axis=0)
    rotated_image = np.expand_dims(datagen.apply_transform(x=image, transform_parameters={"theta": 15}), axis=0)
    all_images = np.append(all_images, hori_image, axis=0)
    all_images = np.append(all_images, vert_image, axis=0)
    all_images = np.append(all_images, rotated_image, axis=0)
    prediction = model.predict(all_images)
    if model_output is 'regression':
        return np.mean(prediction)
    else:
        prediction = np.sum(prediction, axis=0)
        return np.argmax(prediction)
예제 #9
0
def preprocess(train_path, test_path):
    df = pd.read_csv(train_path + 'labels.csv', sep='\t')
    df = df.drop(columns=[df.columns[0]]).drop(columns=[df.columns[1]])
    df['face_shape'] = df['face_shape'].apply(str)

    df2 = pd.read_csv(test_path + 'labels.csv', sep='\t')
    df2 = df2.drop(columns=[df2.columns[0]]).drop(columns=[df2.columns[1]])
    df2['face_shape'] = df2['face_shape'].apply(str)

    training, testing = np.split(df.sample(frac=1), [
        int(0.9 * len(df)),
    ])  #splitting at n-array

    img = (train_path + 'img')
    img2 = (test_path + 'img')

    # set up data generator
    data_generator = ImageDataGenerator(rescale=1. / 255.,
                                        validation_split=0.2,
                                        horizontal_flip=True,
                                        vertical_flip=True)

    # Get batches of training dataset from the dataframe
    print("Training Dataset Preparation: ")
    train_generator = data_generator.flow_from_dataframe(
        dataframe=training,
        directory=img,
        x_col="file_name",
        y_col="face_shape",
        class_mode='categorical',
        target_size=(64, 64),
        batch_size=128,
        subset='training')

    # Get batches of validation dataset from the dataframe
    print("\nValidation Dataset Preparation: ")
    validation_generator = data_generator.flow_from_dataframe(
        dataframe=training,
        directory=img,
        x_col="file_name",
        y_col="face_shape",
        class_mode='categorical',
        target_size=(64, 64),
        batch_size=128,
        subset='validation')

    return train_generator, validation_generator, data_generator, df2, img2
예제 #10
0
def train_model(trainFile, testFile):
    train = pd.read_csv(trainFile)

    f, ax = plt.subplots(5, 5)
    for i in range(1, 26):
        data = train.iloc[i, 1:785].values
        nrows, ncols = 28, 28
        grid = data.reshape((nrows, ncols))
        n = math.ceil(i / 5) - 1
        m = [0, 1, 2, 3, 4] * 5
        ax[m[i - 1], n].imshow(grid)

    dataTest = pd.read_csv(testFile)
    trainNumbers = train['label']
    train = train.drop(labels=['label'], axis=1)
    trainNumbers = to_categorical(trainNumbers, num_classes=10)

    train = train / 255
    test = dataTest / 255
    train = train.values.reshape(-1, 28, 28, 1)
    test = test.values.reshape(-1, 28, 28, 1)

    model = Sequential()
    model.add(Conv2D(24, (3, 3), padding='same', input_shape=(28, 28, 1)))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(64, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    datagen = ImageDataGenerator(
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=5,
        zoom_range=0.1,
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=False,
        vertical_flip=False)

    datagen.fit(train)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(train, trainNumbers, epochs=5, batch_size=120)
    return model
 def fit_and_evaluate(X_train,y_train):
   model = None
   gc.collect()
   model = createModel(X_train)
   batch_size = 32
   epochs = 30
   gc.collect()
   datagen = ImageDataGenerator(zoom_range = 0.2,horizontal_flip = False)
   datagen.fit(X_train)
   gc.collect()
   train_x, val_x, train_y, val_y = train_test_split(X_train, y_train, test_size=0.1, random_state = np.random.randint(1,1000, 1)[0])
   plotCategories(train_y,val_y)
   results = model.fit_generator(datagen.flow(train_x,train_y,batch_size=batch_size), epochs = epochs,steps_per_epoch = X_train.shape[0] // batch_size ,callbacks=[early_stopping, model_checkpoint], 
             verbose=1,validation_data = (val_x,val_y))  
   gc.collect()
   print("Val Score: ", model.evaluate(val_x, val_y))
   return 
예제 #12
0
def train_model(model, x, y, task):
    date = datetime.now().strftime("%d-%m-%Y-%H-%M-%S")
    checkpoints_path = f'/opt/ml/model/checkpoints/{task}/{date}'
    checkpoints_file_format = 'checkpoints.{epoch:02d}-{val_loss:.2f}.hdf5'
    checkpoint_callback = ModelCheckpoint(
        posixpath.join(checkpoints_path, checkpoints_file_format))

    L = len(x)
    split_ration = 0.8
    train_indexes = random.sample(range(L), int(L * split_ration))
    validation_indexes = list(set(range(L)) - set(train_indexes))

    x_train = x[train_indexes]
    y_train = y[train_indexes]
    x_val = x[validation_indexes]
    y_val = y[validation_indexes]

    generator = ImageDataGenerator(rotation_range=30,
                                   zoom_range=0.20,
                                   fill_mode="nearest",
                                   shear_range=0.20,
                                   horizontal_flip=False,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2)

    lr_schedule = ExponentialDecay(initial_learning_rate=0.01,
                                   decay_steps=100,
                                   decay_rate=0.9,
                                   staircase=False)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(learning_rate=lr_schedule),
                  metrics=['accuracy'])

    model.fit(generator.flow(x_train, y_train, batch_size=32),
              steps_per_epoch=len(x_train) // 32,
              verbose=2,
              epochs=500,
              shuffle=True,
              validation_data=(x_val, y_val),
              callbacks=[
                  WandbCallback(), checkpoint_callback,
                  LearningRateCallback(model)
              ])
    model.save('/opt/ml/model')
예제 #13
0
 def _datagen(self):
   """
     Image Data Enhancement
   """
   datagen = ImageDataGenerator(
     rotation_range=10,
     width_shift_range=0.05,
     height_shift_range=0.05,
     shear_range=0.05,
     zoom_range=0.05,
     horizontal_flip=True,
   )
   datagen.fit(self.DATASET.train_x)
   return datagen.flow(
     self.DATASET.train_x, 
     self.DATASET.train_y, 
     batch_size=self.BATCH_SIZE,
     shuffle=True)
예제 #14
0
   def train_model( model ):

    data_generator_with_aug = ImageDataGenerator(preprocessing_function=preprocess_input,
                                width_shift_range=0.1,
                                height_shift_range=0.1,
                                #sear_range=0.01,
                                zoom_range=[0.9, 1.25],
                                horizontal_flip=True,
                                vertical_flip=False,
                                data_format='channels_last',
                                brightness_range=[0.5, 1.5]
                               )
                                       
    train_generator = data_generator_with_aug.flow_from_directory(
            '../input/fire-detection-from-cctv/data/data/img_data/train',
            target_size=(IMG_SIZE, IMG_SIZE),
            batch_size=TRAIN_BATCH_SIZE,
            class_mode='categorical')
예제 #15
0
def tta(image, model):
    datagen = ImageDataGenerator()
    all_images = np.expand_dims(image, 0)
    hori_image = np.expand_dims(datagen.apply_transform(
        x=image, transform_parameters={"flip_horizontal": True}),
                                axis=0)
    vert_image = np.expand_dims(datagen.apply_transform(
        x=image, transform_parameters={"flip_vertical": True}),
                                axis=0)
    rotated_image = np.expand_dims(datagen.apply_transform(
        x=image, transform_parameters={"theta": 15}),
                                   axis=0)
    all_images = np.append(all_images, hori_image, axis=0)
    all_images = np.append(all_images, vert_image, axis=0)
    all_images = np.append(all_images, rotated_image, axis=0)
    prediction = model.predict(all_images)
    # print(prediction)
    return np.mean(prediction)
예제 #16
0
def brightness(samples, imagePath, augPath, label, iImg, iAug):

    datagen = ImageDataGenerator(brightness_range=[0.2, 1.0])
    # prepare iterator
    it = datagen.flow(samples, batch_size=1)
    # generate samples
    for i in range(5):

        # generate batch of images
        batch = it.next()
        # convert to unsigned integers for viewing
        newImage = batch[0].astype('uint8')

        saveImage(imagePath, newImage, augPath, label, iImg, iAug)

        iAug += 1

    return iAug, iImg
예제 #17
0
def createAugmentedData(training_data, training_labels):
    complete_training_data_set = []
    complete_training_labels_set = []

    for data in training_data:
        complete_training_data_set.append(data)
    print("Complete Training Data: " + str(len(complete_training_data_set)))

    for label in training_labels:
        complete_training_labels_set.append(label)
    print("Complete Training Label: " + str(len(complete_training_labels_set)))

    # create augmented data
    data_augmented = ImageDataGenerator(featurewise_center=True,
                                        featurewise_std_normalization=True,
                                        rotation_range=90,
                                        width_shift_range=0.2,
                                        height_shift_range=0.2,
                                        horizontal_flip=True,
                                        vertical_flip=True)
    data_augmented.fit(training_data)

    training_data_size = training_data.shape[0]
    aug_counter = 0
    while aug_counter < (augmented_multiple - 1):
        iterator = data_augmented.flow(training_data, training_labels, batch_size=training_data_size)
        # iterator = data_augmented.flow(training_data, training_labels, batch_size=batch_size)
        augmented_data = iterator.next()
        for data in augmented_data[0]:
            complete_training_data_set.append(data)
        for label in augmented_data[1]:
            complete_training_labels_set.append(label)
        aug_counter += 1

    print("Size of All Training Data: " + str(len(complete_training_data_set)))
    print("Size of All Training Labels: " + str(len(complete_training_labels_set)))

    array_training_data = np.array(complete_training_data_set)
    array_training_labels = np.array(complete_training_labels_set)

    print("Shape of complete training data: " + str(array_training_data.shape))
    print("Shape of complete training labels: " + str(array_training_labels.shape))

    return np.array(complete_training_data_set), np.array(complete_training_labels_set)
예제 #18
0
    def __init__(self):
        os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

        self.image_data_gen = ImageDataGenerator(
            rotation_range=15,
            width_shift_range=0.1,
            height_shift_range=0.1,
            shear_range=0.2,
            zoom_range=[0.8, 1.1],
            brightness_range=[0.5, 1.5],
            fill_mode='reflect')

        self.isolator = Isolator()
        self.current_working_dir = os.getcwd()

        self.training_data = []

        self.logger = None
        self.__create_logger()
예제 #19
0
    def __init__(self, X, y, batch_size, shuffle=True):

        self.X = X
        self.y = y
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.index = np.arange(len(X), dtype=int)
        self.checked = []

        self.augmenter = ImageDataGenerator(
                    featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False,
                    samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0,
                    width_shift_range=0.1, height_shift_range=0.1, brightness_range=None, shear_range=0.0, zoom_range=0,
                    channel_shift_range=0., fill_mode='nearest', cval=0., horizontal_flip=True, vertical_flip=False,
                    rescale=None, preprocessing_function=None, data_format='channels_last', validation_split=0,
                    dtype='float32')
        self.augmenter.fit(X)

        self.on_epoch_end()
예제 #20
0
def zoom(samples, imagePath, augPath, label, iImg, iAug):
    print("[INFO] Saving random zoom images...")

    datagen = ImageDataGenerator(zoom_range=[0.5, 1.0])
    # prepare iterator
    it = datagen.flow(samples, batch_size=1)
    # generate samples
    for i in range(5):

        # generate batch of images
        batch = it.next()
        # convert to unsigned integers for viewing
        newImage = batch[0].astype('uint8')

        saveImage(imagePath, newImage, augPath, label, iImg, iAug)

        iAug += 1

    return iAug, iImg
def get_datagen():
    datagen = ImageDataGenerator(
        # set input mean to 0 over the dataset
        featurewise_center=False,
        # set each sample mean to 0
        samplewise_center=False,
        # divide inputs by std of dataset
        featurewise_std_normalization=False,
        # divide each input by its std
        samplewise_std_normalization=False,
        # apply ZCA whitening
        zca_whitening=False,
        # epsilon for ZCA whitening
        zca_epsilon=1e-06,
        # randomly rotate images in the range (deg 0 to 180)
        rotation_range=45,
        # randomly shift images horizontally
        width_shift_range=0.2,
        # randomly shift images vertically
        height_shift_range=0.2,
        # set range for random shear
        shear_range=0.1,
        # set range for random zoom
        zoom_range=0.2,
        # set range for random channel shifts
        channel_shift_range=0.,
        # set mode for filling points outside the input boundaries
        fill_mode='nearest',
        # value used for fill_mode = "constant"
        cval=0.,
        # randomly flip images
        horizontal_flip=True,
        # randomly flip images
        vertical_flip=True,
        # set rescaling factor (applied before any other transformation)
        rescale=1. / 255,
        # set function that will be applied on each input
        preprocessing_function=None,
        # image data format, either "channels_first" or "channels_last"
        data_format=None,
        # fraction of images reserved for validation (strictly between 0 and 1)
        validation_split=0.0)
    return datagen
def train_model(model):
    #ata_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

    data_generator_with_aug = ImageDataGenerator(
        preprocessing_function=preprocess_input,
        width_shift_range=0.1,
        height_shift_range=0.1,
        #sear_range=0.01,
        zoom_range=[0.9, 1.25],
        horizontal_flip=True,
        vertical_flip=False,
        data_format='channels_last',
        brightness_range=[0.5, 1.5])

    train_generator = data_generator_with_aug.flow_from_directory(
        'input/fire-detection-from-cctv/data/data/img_data/train',
        target_size=(IMG_SIZE, IMG_SIZE),
        batch_size=TRAIN_BATCH_SIZE,
        class_mode='categorical')

    validation_generator = data_generator_with_aug.flow_from_directory(
        'input/fire-detection-from-cctv/data/data/img_data/test',
        target_size=(IMG_SIZE, IMG_SIZE),
        batch_size=TEST_BATCH_SIZE,
        shuffle=False,
        class_mode='categorical')

    #y_train = get_labels(train_generator)
    #weights = class_weight.compute_class_weight('balanced',np.unique(y_train), y_train)
    #dict_weights = { i: weights[i] for i in range(len(weights)) }

    H = model.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.n / TRAIN_BATCH_SIZE,
        epochs=NUM_EPOCHS,
        validation_data=validation_generator,
        validation_steps=1  #,
        #class_weight=dict_weights
    )

    #plot_history( H, NUM_EPOCHS )

    return model, train_generator, validation_generator
def create_more_images(image_file, path, use_name):
    img = load_img(image_file)  # loads image we want to transform
    # convert to numpy array
    data = img_to_array(img)
    # expand dimension to one sample
    samples = expand_dims(data, 0)
    # create image data augmentation generator (increases dataset)
    train_datagen = ImageDataGenerator(
        width_shift_range=[-10, 30],
        brightness_range=[0.7, 1.0],
        fill_mode=
        "constant"  # should make images fill with black when translating
    )
    # prepare iterator
    it = train_datagen.flow(samples, batch_size=1)
    # displays transformed images on pyplot
    for i in range(20):
        plt.interactive(False)
        batch = it.next()  # transforms image
        image = batch[0].astype(
            'uint8')  # gets the image and converts it to an unsigned integer
        fig = plt.figure(figsize=[0.72, 0.72])  # creates a figure
        plt.imshow(image)  # shows the transformed image
        # does not display axes
        ax = fig.add_subplot(111)
        ax.axes.get_xaxis().set_visible(False)
        ax.axes.get_yaxis().set_visible(False)
        ax.set_frame_on(False)
        filename = "image_" + use_name + "_" + str(
            i) + ".jpg"  # creates file name
        # creates image.jpg file in the common_western_scales directory
        plt.savefig(path + filename,
                    dpi=250,
                    bbox_inches='tight',
                    pad_inches=0)
        # print("saved " + str(i))

        plt.close()
        fig.clf()
        plt.close(fig)
        plt.close('all')
        del filename, fig, ax
    def augment_data(self):
        datagen = ImageDataGenerator(
            brightness_range=[0.5, 1.5],
            horizontal_flip=True,
            shear_range=0.2,
            zoom_range=0.2,
            validation_split=0.2,
        )

        self.train_flow = datagen.flow_from_directory(
            directory='cars_augmented/',
            target_size=(224, 224),
            batch_size=8,
            subset='training')

        self.val_flow = datagen.flow_from_directory(
            directory='cars_augmented/',
            target_size=(224, 224),
            batch_size=8,
            subset='validation')
 def fit_and_evaluate(train_x, val_x, train_y, val_y, model, callbacks):
     gc.collect()
     batch_size = 32
     epochs = 30
     gc.collect()
     datagen = ImageDataGenerator(zoom_range=0.2, horizontal_flip=False)
     print("DataGen Started..")
     datagen.fit(train_x)
     print("DataGen Finished..")
     gc.collect()
     results = model.fit_generator(datagen.flow(train_x,
                                                train_y,
                                                batch_size=batch_size),
                                   epochs=epochs,
                                   callbacks=callbacks,
                                   verbose=1,
                                   validation_data=(val_x, val_y))
     gc.collect()
     print("Val Score: ", model.evaluate(val_x, val_y))
     return model, results
예제 #26
0
def load_images(path):
    datagen = ImageDataGenerator(
        rotation_range=40,  #data pre process
        width_shift_range=0.2,
        height_shift_range=0.2,  # unify the format of images
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest',
        validation_split=0.2)

    test = datagen.flow_from_directory(
        path,
        target_size=(300, 300),
        classes=['cherry', 'strawberry', 'tomato'],
        batch_size=BATCH_SIZE,
        class_mode='categorical')

    return test
예제 #27
0
    def evaluate(
            self,
            model: tf.keras.Model,
            evaluation_set: Union[tf.data.Dataset, ClassificationDataset],
            evaluation_steps: Union[int, None] = None,
            batch_size: Union[int, None] = None,
            augmentation: bool = False) -> Union[float, List[float], None]:
        """
        Evaluate the model on provided set.
        :return: the loss value if model has no other metrics, otw returns array with loss and metrics
        values.
        """

        self.__logs['training'].info('Evaluating the model...')

        if augmentation:
            x_eval, y_eval = evaluation_set.get_xy_evaluation()

            data_generator = ImageDataGenerator()

            evaluation_set = data_generator.flow_from_dataframe(
                dataframe=pd.DataFrame({
                    'image': x_eval,
                    'class': y_eval
                }),
                directory='',
                x_col='image',
                y_col='class',
                class_mode='other',
                target_size=(self.__input_width, self.__input_height),
                batch_size=batch_size)

        else:
            if evaluation_steps is not None and evaluation_steps == 0:
                self.__logs['training'].warn(
                    'Skipping evaluation since provided set is empty')
                return None

        return model.evaluate(evaluation_set,
                              verbose=1,
                              steps=evaluation_steps)
예제 #28
0
    def evaluate(self, test_filepath, name="evaluation"):
        # Set the right parameters for evaluation
        if self.identifier == 1:
            class_mode = 'binary'
            results_name = "_gender.csv"
        else:
            class_mode = 'categorical'
            results_name = "_age.csv"
        # Load the test dataset via ImageDataGenerator
        datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
        test_generator = datagen.flow_from_directory(test_filepath,
                                                     target_size=(224, 224),
                                                     batch_size=1,
                                                     class_mode=class_mode,
                                                     shuffle=False)

        # Start the prediction walk through
        pred = self.model.predict_generator(test_generator, verbose=1)

        # Depending on gender or age estimation we need our class_indices in a different structure
        # for the predictions
        if self.identifier == 1:
            predicted_class_indices = np.around(pred)
        else:
            predicted_class_indices = np.argmax(pred, axis=1)

        labels = (test_generator.class_indices)
        labels = dict((v, k) for k, v in labels.items())

        if self.identifier == 1:
            predictions = [labels[k] for k in predicted_class_indices[:, 0]]
        else:
            predictions = [labels[k] for k in predicted_class_indices]

        filenames = test_generator.filenames
        # Create the .csv file for the evaluation
        results = pd.DataFrame({
            "Filename": filenames,
            "Predictions": predictions
        })
        results.to_csv(name + results_name, index=False)
예제 #29
0
def train_model(model, train_data, categories):

    print('Training model...\n')

    batch_size = 100
    num_examples = len(train_data)
    validation_split = 0.2  # 20% validation split.

    # Randomly split the train/validation data sets.
    t_data, v_data, t_cats, v_cats = train_test_split(
        train_data,
        categories,
        train_size=(1 - validation_split),
        random_state=1138)  # To get the same split for consistency.

    # Create generator for training data.
    gen_a = ImageDataGenerator(
        width_shift_range=0.1,
        height_shift_range=0.1,
        zoom_range=0.1,  # Adding upper and lower causes low accuracy.
        rotation_range=10)
    train_gen = gen_a.flow(t_data, t_cats, batch_size=batch_size)

    # Add a learning rate reducer.
    lr_reducer = ReduceLROnPlateau(monitor='val_acc',
                                   patience=3,
                                   verbose=1,
                                   factor=0.5,
                                   min_lr=0.000001)

    # Fit the model.
    start = time.time()
    model.fit_generator(train_gen,
                        steps_per_epoch=(num_examples // batch_size),
                        epochs=50,
                        validation_data=(v_data, v_cats),
                        callbacks=[lr_reducer])
    print('Training time elapsed: ' + str(int(time.time() - start)) +
          ' seconds.')

    return model
예제 #30
0
    def train_model_with_generator(self):
        self.logger.info('Training model with image data generator')
        image_data_gen = ImageDataGenerator(rotation_range=15,
                                            width_shift_range=0.1,
                                            height_shift_range=0.1,
                                            shear_range=0.2,
                                            zoom_range=[0.8, 1.1],
                                            brightness_range=[0.5, 1.5],
                                            fill_mode='reflect')

        image_data_gen.fit(self.trainX)

        self.model.fit_generator(
            image_data_gen.flow(self.trainX,
                                self.trainY,
                                batch_size=constants.BATCH_SIZE),
            validation_data=(self.testX, self.testY),
            steps_per_epoch=len(self.trainX) // constants.BATCH_SIZE,
            epochs=constants.EPOCHS)

        self.__evaluate_model()