Example #1
0
def image_triple_generator(lst_files,
                           input_shape,
                           batch_size,
                           crop_shape=None):
    pos_ratio, neg_ratio = 1, 1
    pos_limit, neg_limit = 1, 4
    pos_factor, neg_factor = 1, 1.01
    img_cache = {}
    datagen_args = dict(horizontal_flip=True)
    datagen_left = IDG(**datagen_args)
    datagen_right = IDG(**datagen_args)
    f = np.load(lst_files)
    lst, y = f['lst'], f['label']
    num_batches = len(y) // batch_size + 1
    clss = np.unique(y)
    num_clss = clss.shape[0]
    kmap = {v: k for k, v in enumerate(clss)}
    label_set = [np.where(y == c)[0] for c in clss]
    step = 0
    while True:
        step += 1
        #loop per epoch
        for bid in range(num_batches):
            id_left, id_right, y_diff = utils.gen_pairs(
                y, kmap, label_set, batch_size, pos_ratio, neg_ratio)
            Xleft = utils.process_images([lst[i] for i in id_left],
                                         datagen_left, img_cache, input_shape,
                                         crop_shape)
            Xright = utils.process_images([lst[i] for i in id_right],
                                          datagen_right, img_cache,
                                          input_shape, crop_shape)
            Y_diff = np.array(y_diff)
            yield [Xleft, Xright], Y_diff
            if step % 10 is 0:
                pos_ratio = min(pos_ratio * pos_factor, pos_limit)
                neg_ratio = min(neg_ratio * neg_factor, neg_limit)
on_windows = False
if on_windows:
    data_directory = 'D:\\Data\\Sketches\\png'
    path_delim = '\\'
else:
    data_directory = '../data/places_200'
    path_delim = '/'

# From https://stackoverflow.com/questions/46717742/split-data-directory-into-training-and-test-directory-with-sub-directory-structu
#image dimensions
img_height = 299
img_width = 299

train_datagen = IDG(samplewise_std_normalization=True,
                    shear_range=0.2,
                    zoom_range=0.2,
                    horizontal_flip=True,
                    validation_split=0.2)

batch_size = 100
class_mode = 'categorical'
color_mode = 'rgb'
shuffle = True

train_generator = train_datagen.flow_from_directory(data_directory,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode=class_mode,
                                                    color_mode=color_mode,
                                                    shuffle=shuffle,
Example #3
0
def data_generator(X, y, num_classes, batch_size=32):
    return IDG(rescale=1. / 225, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) \
        .flow(X, to_categorical(y, num_classes), batch_size=batch_size)
Example #4
0
if on_windows:
    data_directory = 'D:\\Data\\Sketches\\png'
    path_delim = '\\'
else:
    data_directory = '../data/Sketches/png'
    path_delim = '/'

# From https://stackoverflow.com/questions/46717742/split-data-directory-into-training-and-test-directory-with-sub-directory-structu
#image dimensions
img_height = 224
img_width = 224

train_datagen = IDG(featurewise_std_normalization=True,
                    shear_range=0.25,
                    zoom_range=0.25,
                    rotation_range=45,
                    horizontal_flip=True,
                    vertical_flip=True,
                    validation_split=0.2)

batch_size = 100
class_mode = 'categorical'
color_mode = 'grayscale'
shuffle = True

train_generator = train_datagen.flow_from_directory(data_directory,
                                                    target_size=(img_width,
                                                                 img_height),
                                                    batch_size=batch_size,
                                                    class_mode=class_mode,
                                                    color_mode=color_mode,
Example #5
0
mdl_clssify.add(Conv2D(64,3,3, input_shape=(64,64,3),activation='relu'))
mdl_clssify.add(MaxPooling2D(pool_size=(2,2)))
mdl_clssify.add(Conv2D(64,3,3,activation='relu'))
mdl_clssify.add(MaxPooling2D(pool_size=(2,2)))
#mdl.clssify.add(Dropout(0.5))
mdl_clssify.add(Flatten())

mdl_clssify.add(Dense(output_dim=128,activation='relu'))
mdl_clssify.add(Dense(output_dim=1 ,activation='sigmoid'))

mdl_clssify.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])


trainData_generat = IDG(
        rescale=1./255,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
testData_generat = IDG(rescale=1./255)


set_train = trainData_generat.flow_from_directory('D:/final year project/AMD/data/train',
                                                   target_size=(64, 64), batch_size=9,
                                                    class_mode='binary')

set_test = testData_generat.flow_from_directory('D:/final year project/AMD/data/test',
                                                target_size=(64, 64), batch_size=9,
                                                class_mode='binary')

#pdb.set_trace() 
Example #6
0
import cv2  #for reading the images into grayscale vectors
from PIL import Image  #used to manipulate the images into the appropriate sizes
import matplotlib.pyplot as plt
# %matplotlib inline

from keras.preprocessing.image import ImageDataGenerator as IDG  #keras's built in image augmentor, which is perfect due to our limited data set.

size = (160, 160)  #the new size of each image after resizing. Dimensions are still up in the air

#using IDG, make an augmentor with the parameters indicating what augmentations can take place.

aug_generator = IDG(rescale=1./255,  #this is to normalize each pixel's numerical values to between 0 and 1, since RGB values go from 0 to 255.
                    rotation_range=45,
                    width_shift_range=0.2,
                    height_shift_range=0.2,
                    shear_range=0.2,
                    zoom_range=0.2,
                    horizontal_flip=True,
                    fill_mode='nearest',
                    validation_split=0.2)  #some of our data will be used for validation, so split it off


normal_generator = IDG(rescale=1./255)  #no need to have augmentation parameters since the model isn't being trained to fit data generated by this. 
                                        # Just normalize the data appropriately.

#we'll use our augmented data generator instead of just extracting our training data. IDG has something perfect for this,
#considering we have the directories already.

#needed data generators
training_generator = aug_generator.flow_from_directory(train_dir,
                                                       target_size=size,  #force resizes all input images, super nice.
Example #7
0
        labels[i * batch_size:(i + 1) * batch_size] = labels_batch
        i += 1
        if i * batch_size >= sample_count:
            break
        return features, labels


if __name__ == "__main__":
    ##using VGG16 to pre-extract feature used to train the FC layer
    conv_base = VGG16(weights='imagenet',
                      include_top=False,
                      input_shape=(150, 150, 3))
    #print conv_base.summary()
    train_dir = '/Users/han/keras_use/examples/CNN/dogsvscats/train'
    validation_dir = '/Users/han/keras_use/examples/CNN/dogsvscats/validation'
    datagen = IDG(rescale=1. / 255)
    batch_size = 20
    train_features, train_labels = extract_features(train_dir, 2000)
    validation_features, validation_labels = extract_features(
        validation_dir, 1000)
    train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
    validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))

    ##FC layer
    model = models.Sequential()
    model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
def showIm(n):
    plt.figure()
    plt.imshow(np.array(xTrain[n]).reshape((28, 28)), cmap='binary_r')
    plt.xlabel(f"Value: {yTrain[n]}", fontsize=18)
    plt.show()


showIm(randint(0, 10000))

xTrain = np.reshape(xTrain, (-1, 28, 28, 1))
print(xTrain.shape)

dataGeneration = IDG(rescale=1. / 255,
                     rotation_range=20,
                     zoom_range=0.2,
                     width_shift_range=0.1,
                     height_shift_range=0.1,
                     validation_split=0.2)

trainDataGen = dataGeneration.flow(xTrain,
                                   yTrain,
                                   batch_size=64,
                                   subset='training')
valDataGen = dataGeneration.flow(xTrain,
                                 yTrain,
                                 batch_size=64,
                                 subset='validation')

model = Sequential()

model.add(
Example #9
0
C, W, H = 3, 50, 50

TRAIN_DIR = '/home/main/programming/kaggle/fisheries/data/train/'
TEST_DIR = '/home/main/programming/kaggle/fisheries/data/test/'

ORDER = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']
NDIM = len(ORDER)

idg = IDG(
    # featurewise_center=True,
    # featurewise_std_normalization=True,
    rotation_range=5.,
    width_shift_range=0.05,
    height_shift_range=0.05,
    zoom_range=(0.95, 1 / 0.95),
    channel_shift_range=0.5,
    horizontal_flip=True,
    vertical_flip=True,
    fill_mode='constant',
    cval=0,
)

os.makedirs('/tmp/fisheries', exist_ok=True)
mem = Memory(cachedir='/tmp/fisheries', verbose=0)


@mem.cache
def read_imagefile(fn, c, w, h):
    img = load_img(fn,
                   keep_aspect_ratio=True,
Example #10
0
    train_cats_dir = '/Users/han/keras_use/examples/CNN/dogsvscats/train/cats'
    fnames = [
        os.path.join(train_cats_dir, fname)
        for fname in os.listdir(train_cats_dir)
    ]
    #	print fnames
    img_path = fnames[3]
    print img_path
    img = image.load_img(img_path, target_size=(150, 150))
    #	print type(img)
    x = image.img_to_array(img)
    x = x.reshape((1, ) + x.shape)
    #	print type(x),x.shape

    ##Image data augmentation
    i = 0
    datagen = IDG(rotation_range=60,
                  width_shift_range=0.2,
                  height_shift_range=0.2,
                  shear_range=0.2,
                  zoom_range=0.2,
                  horizontal_flip=True,
                  fill_mode='nearest')
    for batch in datagen.flow(x, batch_size=1):
        plt.figure(i)
        imgplot = plt.imshow(image.array_to_img(batch[0]))
        plt.savefig(str(i) + "cat.png")
        i += 1
        if i % 4 == 0: break
    plt.show()
Example #11
0
 def train(self,
           loss = 'categorical_crossentropy',
           optimizer = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
           metrics = ['accuracy'],
           batch_size = 32,
           epochs = 10,
           verbose = 1,
           generator = False,
           use_saved_model = False):
     
     if self.trained:
         if len(os.listdir('./photos')) <= self.cls_num:
             print('\nAlready using the newest model!\nOffer more data to re-train.')
             return
         
     if os.path.exists('./models') and use_saved_model:
         self.model = keras.models.load_model(os.listdir('./models')[-1])
         self.trained = True
         self.result = 'unused'
         return
         
     
     if  self._read_data():
         
         self.data, self.label = self._preprocessing(self.data)
         
         if verbose:
             print('\nSuccessfully load data: ', end = '')
             print(self.data.shape)
             print('\nNow start build model... ')
             self.do_nothing(2)
         
         self._build_model(optimizer, loss, metrics, self.cls_num)
         
         if verbose:
             print('\nSuccessfully built model.')
             print('\nModel summary:')
             print(self.model.summary())
         
         rst = raw_input('Start training? [Y/n] ')
         if rst in ['n','N']:
             print('Abort')
             return
         
         print('\ntraining...')
         self.do_nothing(2)
         if generator:
             data_generator = IDG(featurewise_center=False,
                                  samplewise_center=False,
                                  featurewise_std_normalization=False,
                                  samplewise_std_normalization=False,
                                  zca_whitening=False,
                                  rotation_range=15,
                                  width_shift_range=0.1,
                                  height_shift_range=0.1,
                                  horizontal_flip=True,
                                  vertical_flip=True)
             
             # Required for featurewise_center,
             #  featurewise_std_normalization
             #   and zca_whitening.
             #data_generator.fit(self.data)
             
             self.model.fit_generator(data_generator.flow(self.data,
                                                          self.label,
                                                          batch_size),
                                      steps_per_epoch = self.data.shape[0] / batch_size,
                                      epochs = epochs)
         else:
             self.model.fit(x = self.data,
                            y = self.label,
                            batch_size = batch_size,
                            epochs = epochs,
                            verbose = verbose)
         
         self.trained = True
         self.result = 'unused'
         self.model.save_weights(os.path.join(os.getcwd(),
                                              'models',
                                              time.strftime('%Y%m%d%H%M')+'.h5'))
             
         print('\33[1mSuccessfully trained the model!\33[0m')
         return
     
     print('\nError occurs when trying to train the model. Abort')
x = Flatten()(xception.output)

prediction = Dense(len(folders), activation='relu')(x)

model = Model(inputs=xception.input,outputs=prediction)

model.summary()

model.compile(
    loss='categorical_crossentropy',
    optimizer='adam',
    metrics=['accuracy'])

train_datagen = IDG(rescale = 1./255,
                                   shear_range = 0.2,
                                   zoom_range = 0.2,
                                   horizontal_flip = True)

test_datagen = IDG(rescale = 1./255)

val_datagen = IDG(rescale = 1./255)

training_set = train_datagen.flow_from_directory(train_path,
                                                 target_size = (299, 299),
                                                 batch_size = 32,
                                                 class_mode = 'categorical')

test_set = test_datagen.flow_from_directory(test_path,
                                            target_size = (299, 299),
                                            batch_size = 32,
                                            class_mode = 'categorical')
Example #13
0
def train(model: keras.models.Model,
          optimizer: dict,
          save_path: str,
          train_dir: str,
          valid_dir: str,
          batch_size: int = 32,
          epochs: int = 10,
          samples_per_epoch=1000,
          pretrained=None,
          augment: bool = True,
          weight_mode=None,
          verbose=0,
          **kwargs):
    """ Trains the model with the given configurations. """
    shape = model.input_shape[1:3]
    optimizer_cpy = optimizer.copy()
    shared_gen_args = {
        'rescale': 1. / 255,  # to preserve the rgb palette
    }
    train_gen_args = {}
    if augment:
        train_gen_args = {
            "fill_mode": 'reflect',
            'horizontal_flip': True,
            'vertical_flip': True,
            'width_shift_range': .15,
            'height_shift_range': .15,
            'shear_range': .5,
            'rotation_range': 45,
            'zoom_range': .2,
        }
    gen = IDG(**{**shared_gen_args, **train_gen_args})
    gen = gen.flow_from_directory(train_dir,
                                  target_size=shape,
                                  batch_size=batch_size,
                                  seed=SEED)

    val_count = len(
        glob(os.path.join(valid_dir, '**', '*.jpg'), recursive=True))
    valid_gen = IDG(**shared_gen_args)

    optim = getattr(keras.optimizers, optimizer['name'])
    if optimizer.pop('name') != 'sgd':
        optimizer.pop('nesterov')
    schedule = optimizer.pop('schedule')
    if schedule == 'decay' and 'lr' in optimizer.keys():
        initial_lr = optimizer.pop('lr')
    else:
        initial_lr = 0.01
    optim = optim(**optimizer)

    callbacks = [
        utils.checkpoint(save_path),
        utils.csv_logger(save_path),
    ]

    if pretrained is not None:
        if not os.path.exists(pretrained):
            raise FileNotFoundError()

        model.load_weights(pretrained, by_name=False)
        if verbose == 1:
            print("Loaded weights from {}".format(pretrained))

    if optimizer_cpy['name'] == 'sgd':
        if schedule == 'decay':
            callbacks.append(utils.step_decay(epochs, initial_lr=initial_lr))
        elif schedule == 'big_drop':
            callbacks.append(utils.constant_schedule())

    model.compile(optim,
                  loss='categorical_crossentropy',
                  metrics=['accuracy', top3_acc])

    create_xml_description(save=os.path.join(save_path, 'model_config.xml'),
                           title=model.name,
                           epochs=epochs,
                           batch_size=batch_size,
                           samples_per_epoch=samples_per_epoch,
                           augmentations=augment,
                           schedule=schedule,
                           optimizer=optimizer_cpy,
                           **kwargs)

    if weight_mode:
        class_weights = [[key, value] for key, value in weight_mode.items()]
        filen = os.path.join(save_path, 'class_weights.npy')
        np.save(filen, class_weights)

    h = None  # has to be initialized here, so we can reference it later
    try:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            h = model.fit_generator(
                gen,
                steps_per_epoch=samples_per_epoch / batch_size,
                epochs=epochs,
                validation_data=valid_gen.flow_from_directory(
                    valid_dir,
                    target_size=shape,
                    batch_size=batch_size,
                    seed=SEED),
                validation_steps=val_count / batch_size,
                callbacks=callbacks,
                class_weight=weight_mode,
                verbose=2)
    except KeyboardInterrupt:
        save_results(verbose=1, save_path=save_path, model=model, hist=h)
        return

    save_results(verbose=1, save_path=save_path, model=model, hist=h)
Example #14
0
"""
Created on Sun Mar 11 23:52:26 2018

@author: Ibo Turk
"""

from keras.preprocessing.image import ImageDataGenerator as IDG
from IboTurk_part0 import data as x_train
from IboTurk_part0 import data_values as y_train
from matplotlib import pyplot as plt
from keras.utils import np_utils
from preprocessing import ZiscFunction

ITdatagen = IDG(samplewise_center=False,
                rotation_range=360,
                shear_range=360,
                data_format="channels_last",
                fill_mode="constant",
                preprocessing_function=ZiscFunction)

#y_train = np_utils.to_categorical(y_train, 2)
#print(y_train)
#ITdatagen.fit(x_train)
#batches = 0
#for x_batch, y_batch in ITdatagen.flow(x_train, y_train, batch_size=32):
#        batches += 1
#        if batches >= 1:
# we need to break the loop by hand because
# the generator loops indefinitely
#           break

#plt.subplot(121)
Example #15
0
from keras.preprocessing.image import ImageDataGenerator as IDG
import numpy as np
import os
from scipy.misc import imread

datagen = IDG(horizontal_flip=True, vertical_flip=True)
# def load_images_from_folder(folder):
#     images = []
#     for filename in os.listdir('1'):
#         if any([filename.endswith(x) for x in ['.jpeg', '.jpg','.JPG']]):
#             img = Image.open(os.path.join(folder, filename))
#             img=np.array(img)
#             datagen.fit(img.reshape(1,1696,2544,3))
#             i=0
# 		for batch in datagen.flow(img.reshape(1,1696,2544,3), batch_size=1,save_to_dir='preview',
#                            save_prefix='lol', save_format='jpeg'):
#   			i += 1
#     		if i == 4:
#         break
#             if img is not None:
#                 images.append(img)
#     return images

for root, dirs, files in os.walk('3'):
    for file in files:
        if file.endswith(".jpg") or file.endswith(".JPG"):
            img = imread(os.path.join(root, file))
            k = img.shape
            i = 0
            for batch in datagen.flow(img.reshape(1, k[0], k[1], k[2]),
                                      batch_size=1,
Example #16
0
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Conv2D(128, (3, 3), activation="relu"))
model.add(layers.MaxPooling2D((2, 2)))

model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4))

train_datagen = IDG(rescale=1. / 255)
test_datagen = IDG(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(train_dir,
                                                    target_size=(150, 150),
                                                    batch_size=20,
                                                    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(validation_dir,
                                                        target_size=(150, 150),
                                                        batch_size=20,
                                                        class_mode='binary')

history = model.fit_generator(train_generator,
                              steps_per_epoch=100,
                              epochs=30,
Example #17
0
print("Network Parameters:\n",opt.get_config())
model = MohlerNet4.build(width=32,height=32,depth=3,classes=3)
model.compile(loss="categorical_crossentropy", optimizer=opt,
	metrics=["accuracy"])

# train the network
print("[INFO] training network...")
numEpochs = 50

batch_size = 16

if dataAugmentation == True:
    train_datagen = IDG(
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range = 0.2,
        zoom_range = 0.2,
        horizontal_flip=True)

    train_datagen.fit(trainX)
    H = model.fit_generator(train_datagen.flow(trainX,trainY,batch_size=batch_size),
                   steps_per_epoch=(2250//batch_size)
                  ,epochs=numEpochs,
                  validation_data=(testX,testY))
else: 
    H = model.fit(trainX, trainY, validation_data=(testX, testY),
	    batch_size=batch_size, epochs=numEpochs, verbose=1)

 #evaluate the network
print("[INFO] evaluating network...")