def get_generators():
    train_datagen = ImageDataGenerator(
        rescale=1./255,
        shear_range=0.2,
        horizontal_flip=True,
        rotation_range=10.,
        width_shift_range=0.2,
        height_shift_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1./255)

    train_generator = train_datagen.flow_from_directory(
        os.path.join('data', 'train'),
        target_size=(299, 299),
        batch_size=32,
        classes=data.classes,
        class_mode='categorical')

    validation_generator = test_datagen.flow_from_directory(
        os.path.join('data', 'test'),
        target_size=(299, 299),
        batch_size=32,
        classes=data.classes,
        class_mode='categorical')

    return train_generator, validation_generator
Пример #2
1
def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
Пример #3
1
    def train(self,save_model_to_file = True,rotation_range = 20,width_shift_range=0.5,height_shift_range=0.2):
        """ Trains the model using the dataset in letters_folder """

        # Read the data
        data = []
        labels = []
        for imgName in listdir(self.letters_folder):
            img = cv2.imread(self.letters_folder+"/"+imgName, cv2.IMREAD_GRAYSCALE)
            data.append(img)
            # Get the label from the image path and then get the index from the letters list
            labels.append(self.letters.index(imgName.split('_')[0]))

        data = np.array(data)
        labels = np.array(labels)

        # Split train and test
        X_train, X_test, y_train, y_test = train_test_split(
                 data, labels, test_size=0.33, random_state=42)

        X_train = X_train.reshape(X_train.shape[0], 1, self.img_rows, self.img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, self.img_rows, self.img_cols)
        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, self.nb_classes)
        Y_test = np_utils.to_categorical(y_test, self.nb_classes)

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(rotation_range=rotation_range,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=width_shift_range,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=height_shift_range)# randomly shift images vertically (fraction of total height))

        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        history = self.model.fit_generator(datagen.flow(X_train, Y_train, batch_size=self.batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=self.nb_epoch,
                            validation_data=(X_test, Y_test))


        # Plot History
        plt.figure(figsize=(10,10))
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

        if save_model_to_file:
            self.model.save_weights(self.weights_path,overwrite=True)
Пример #4
0
def train():
    (X_test, y_test, y_conf) = load.load_test_data()
    Y_test = np_utils.to_categorical(y_test, classes)
    print(X_test.shape[0], 'test samples')
    X_test = X_test.astype("float32")
    X_test /= 255
    datagen = ImageDataGenerator(rotation_range=30,  width_shift_range=0.01,  height_shift_range=0.01,  horizontal_flip=True, vertical_flip=True)
    t0=time.time()
    for e in range(nb_epoch):
        print ("******** Epoch %d ********" % (e+1))
        print ("Epoch Number: " + str(e))
        for X_batch, y_batch, class_weight in BatchGenerator():
            datagen.fit(X_batch)
            model.fit_generator(datagen.flow(X_batch, y_batch, batch_size=18, shuffle=True),
            callbacks=[lh,checkpointer],
            samples_per_epoch=split_size,
            nb_epoch=nb_epoch_per,
            validation_data=(X_test,Y_test)
            ,class_weight=class_weight
            )
            y_pred = model.predict_classes(X_test, batch_size=20)
        (accuracy, correct)=PredictionMatrix()
        #model.save_weights((direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' )  % (e+1), overwrite=True)
        #print ("Weights saved to " + direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' % (e+1))
    t1=time.time()
    tyme = t1-t0   
    print("Training completed in %f seconds" % tyme)
    if save_name != '':
        model.save_weights(direct + '/weights/' + save_name, overwrite=True)
        print ("Weights saved to " + save_name)
    print ("Final training weights saved to " + save_name)
    return tyme
Пример #5
0
def data():
    nb_classes = 10
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    # this will do preprocessing and realtime data augmentation
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    return datagen, X_train, Y_train, X_test, Y_test
Пример #6
0
def predict_labels(model):
    """writes test image labels and predictions to csv"""
    
    test_datagen = ImageDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
        test_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        shuffle=False,
        class_mode=None)

    base_path = "../data/test/test/"

    with open("prediction.csv", "w") as f:
        p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
        for _, _, imgs in os.walk(base_path):
            for im in imgs:
                pic_id = im.split(".")[0]
                img = load_img(base_path + im)
                img = imresize(img, size=(img_height, img_width))
                test_x = img_to_array(img).reshape(3, img_height, img_width)
                test_x = test_x.reshape((1,) + test_x.shape)
                test_generator = test_datagen.flow(test_x,
                                                   batch_size=1,
                                                   shuffle=False)
                prediction = model.predict_generator(test_generator, 1)[0][0]
                p_writer.writerow([pic_id, prediction])
Пример #7
0
def train_model(model,X_train,y_train):
    
    print("Training Model")
    
    # Image data generator to augment the data
    datagen = ImageDataGenerator(rotation_range = 2,
                             featurewise_center = False,
                             featurewise_std_normalization=False,
                             zoom_range = [0.8, 1],
                             fill_mode = 'constant',
                             cval=0)
    
    # Setup a regression network with adam optimizer
    model.compile(loss='mean_squared_error', optimizer='adam')
    
    # Incrementally save the best model basd on loss value
    chkpnt = ModelCheckpoint('model.h5',monitor='loss',verbose=1,save_best_only=True,mode='min')
    callbacks_list = [chkpnt]

    # Shuffle data
    X_train,y_train = shuffle(X_train,y_train)
    
    #Train the network with a batch size of 32 using the image data generator for a total of 10 epochs
    model.fit_generator(datagen.flow(X_train,y_train,batch_size=64),samples_per_epoch=len(X_train),nb_epoch=10,callbacks=callbacks_list,verbose=1)
    #,save_to_dir='./AugData',save_prefix='aug'
    model.save("model_final.h5")
    return model
def train():
    model_ = 'VGG_16'
    batch_size = 8
    nb_classes = 5
    nb_epoch = 200
    data_augmentation = True

    # input image dimensions
    if model_ in MODELS[0:2]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299
    # the Yelp images are RGB
    img_channels = 3

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = yelp_data(dtype=np.float32, grayscale=False, pixels=img_rows, batches=3,
                                                     model='VGG_16', data_dir='/home/rcamachobarranco/datasets')
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # generate model
    model = VGG_16(img_rows, img_cols, img_channels, nb_classes)

    # let's train the model using SGD + momentum
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_train, y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, show_accuracy=True,
                  validation_data=(X_test, y_test), shuffle=True)
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch, show_accuracy=True,
                            validation_data=(X_test, y_test),
                            nb_worker=1)
	def fit(self,x,y,doRTA):
		if doRTA == False:
			self.model.fit({"input":x,"output":y},nb_epoch=self.epochs,batch_size=self.batch_size)
		else:
			datagen = ImageDataGenerator(
			        featurewise_center=True,  # set input mean to 0 over the dataset
			        samplewise_center=False,  # set each sample mean to 0
			        featurewise_std_normalization=True,  # divide inputs by std of the dataset
			        samplewise_std_normalization=False,  # divide each input by its std
			        zca_whitening=False,
			        rotation_range=20,
			        width_shift_range=0.2, 
			        height_shift_range=0.2,
			        horizontal_flip=True, 
			        vertical_flip=False)
			datagen.fit(x)

			for e in range(self.epochs):
			    print('-'*40)
			    print('Epoch', e)
			    print('-'*40)
			    print('Training...')
			    # batch train with realtime data augmentation
			    progbar = generic_utils.Progbar(x.shape[0])
			    for X_batch, Y_batch in datagen.flow(x, y):
			        loss = self.model.train_on_batch({"input":X_batch,"output":Y_batch})
			        progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
Пример #10
0
 def train_generator(x, y, batch_size, shift_fraction=0.):
     train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
                                        height_shift_range=shift_fraction)  # shift up to 2 pixel for MNIST
     generator = train_datagen.flow(x, y, batch_size=batch_size)
     while 1:
         x_batch, y_batch = generator.next()
         yield ([x_batch, y_batch], [y_batch, x_batch])
    def train(self):
        # load data
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        # build network
        model = self.build_model()
        model.summary()

        # Save the best model during each training checkpoint
        checkpoint = ModelCheckpoint(self.model_filename,
                                    monitor='val_loss', 
                                    verbose=0,
                                    save_best_only= True,
                                    mode='auto')
        plot_callback = PlotLearning()
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)

        cbks = [checkpoint, plot_callback, tb_cb]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
        datagen.fit(x_train)

        # start training
        model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),steps_per_epoch=self.iterations,epochs=self.epochs,callbacks=cbks,validation_data=(x_test, y_test))
        
        model.save(self.model_filename)

        self._model = model
Пример #12
0
def preprocess_data(X_train, y_train, X_val, y_val, X_test, y_test):
	print('start preprocess...')

	X_train=scale_data(X_train)
	X_val=scale_data(X_val)
	X_test=scale_data(X_test)

	#substract mean, per sample and per color channel 
	X_train, X_val, X_test = im.mean2(X_train, X_val, X_test)

	#apply ZCA whitening on each color channel
	#X_train=im.whiten(X_train,epsilon=0.1)
	#X_test=im.whiten(X_test,epsilon=0.1)

	g = ImageDataGenerator(width_shift_range=0.2,height_shift_range=0.2,horizontal_flip=True,\
	fill_mode='nearest',dim_ordering='th') 
	g.fit(X_train)
	
	y_train = to_categorical(y_train)
	y_val = to_categorical(y_val)
	y_test = to_categorical(y_test)

	print('...done')

	return g, X_train, y_train, X_val, y_val, X_test, y_test
Пример #13
0
def load_data_generator(train_folderpath, mask_folderpath, img_size = (768, 768), mask_size=(768,768), batch_size=32):
    """
    Returns a data generator with masks and training data specified by the directory paths given.
    """
    data_gen_args = dict(
                        width_shift_range=0.2,
                        height_shift_range=0.2,
                        horizontal_flip=True,
                        rotation_range=10,
                        zoom_range=0.2,
                        fill_mode="constant", 
                        cval=0       
                        )

    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)
        
    seed = 42
    
    image_generator = image_datagen.flow_from_directory(train_folderpath, class_mode=None,
        target_size = img_size, seed=seed, color_mode = 'rgb', batch_size=batch_size)
    mask_generator = mask_datagen.flow_from_directory(mask_folderpath, class_mode=None, 
        target_size = mask_size,seed=seed, color_mode='grayscale', batch_size=batch_size)

    return zip(image_generator, mask_generator)
Пример #14
0
def _get_data_generators(img_width, img_height, labels):
    train_datagen = ImageDataGenerator(
        fill_mode="nearest",
        horizontal_flip=True,
        rescale=1. / 255,
        shear_range=0.2,
        zoom_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        batch_size=32,
        classes=labels,
        target_size=(img_width, img_height),
        class_mode="categorical")

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        batch_size=32,
        classes=labels,
        target_size=(img_width, img_height),
        class_mode="categorical")

    return train_generator, validation_generator
Пример #15
0
class Machine_Generator(Machine_cnn_lenet):
    def __init__(self, X, y, nb_classes=2, steps_per_epoch=10, fig=True,
                 gen_param_dict=None):
        super().__init__(X, y, nb_classes=nb_classes, fig=fig)
        self.set_generator(steps_per_epoch=steps_per_epoch, gen_param_dict=gen_param_dict)

    def set_generator(self, steps_per_epoch=10, gen_param_dict=None):
        if gen_param_dict is not None:
            self.generator = ImageDataGenerator(**gen_param_dict)
        else:
            self.generator = ImageDataGenerator()

        print(self.data.X_train.shape)

        self.generator.fit(self.data.X_train, seed=0)
        self.steps_per_epoch = steps_per_epoch

    def fit(self, nb_epoch=10, batch_size=64, verbose=1):
        model = self.model
        data = self.data
        generator = self.generator
        steps_per_epoch = self.steps_per_epoch

        history = model.fit_generator(generator.flow(data.X_train, data.Y_train, batch_size=batch_size),
                                      epochs=nb_epoch, steps_per_epoch=steps_per_epoch,
                                      validation_data=(data.X_test, data.Y_test))

        return history
def augment_img(input_file, output_folder, img_format='jpg',
                number_imgs=10):
    """
    Generate number_imgs new images from a given image.
    This function is inspired from the following blog post:
    https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
    """
    datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')

    img = load_img(input_file)
    x = img_to_array(img)
    x = x.reshape((1,) + x.shape)
    i = 0
    for batch in datagen.flow(x, batch_size=1,
                              save_to_dir=output_folder,
                              save_format=img_format):
        i += 1
        if i > number_imgs:
            break
def augment_data(train_data):
    augmented_data_generator = ImageDataGenerator(
        rotation_range=20,
        horizontal_flip=True
    )
    augmented_data_generator.fit(train_data)
    return augmented_data_generator
def main():
    model = Model()
    if (sys.argv[1] == "test"):
        global nb_epoch
        nb_epoch = 0
        global WEIGHTS_FILE
        WEIGHTS_FILE = sys.argv[2]

    elif(sys.argv[1] == "add"):
        global X_train, Y_train, X_val1, Y_val1
        X_train = np.concatenate((X_train, X_val1), axis=0)
        Y_train = np.concatenate((Y_train, Y_val1), axis=0)

    adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam)

    datagen = ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=False)

    datagen.fit(X_train)
    callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
                 EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),
                        show_accuracy=True, callbacks=callbacks)

    model.load_weights(WEIGHTS_FILE)
    predict_test(model)
Пример #19
0
def autoGenerator():
  image_loader = ImageDataGenerator(rescale=1./255.)
  loader = image_loader.flow_from_directory("/data/tiles/", color_mode='rgb',batch_size=batch_size, target_size=(256,256), class_mode='binary')
  for batch in loader:
    if np.isnan(batch[0]).any():
	print 'problem with batch'
    yield (batch[0],np.copy(batch[0])) 
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1. / 255)

    # build the VGG16 network
    model = applications.VGG16(include_top=False, weights='imagenet')

    generator = datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_train = model.predict_generator(
        generator, nb_train_samples // batch_size)
    np.save(open('bottleneck_features_train.npy', 'wb'),
            bottleneck_features_train)

    generator = datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size,
        class_mode=None,
        shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        generator, nb_validation_samples // batch_size)
    np.save(open('bottleneck_features_validation.npy', 'wb'),
            bottleneck_features_validation)
Пример #21
0
def gen_augment_arrays(array, label, augmentations, rounds = 1):
    if augmentations is None:
        yield array, label
    else:

        auggen = ImageDataGenerator(featurewise_center = augmentations['featurewise_center'],
                                    samplewise_center = augmentations['samplewise_center'],
                                    featurewise_std_normalization = augmentations['featurewise_std_normalization'],
                                    samplewise_std_normalization = augmentations['samplewise_std_normalization'],
                                    zca_whitening = augmentations['zca_whitening'],
                                    rotation_range = augmentations['rotation_range'],
                                    width_shift_range = augmentations['width_shift_range'],
                                    height_shift_range = augmentations['height_shift_range'],
                                    shear_range = augmentations['shear_range'],
                                    zoom_range = augmentations['zoom_range'],
                                    channel_shift_range = augmentations['channel_shift_range'],
                                    fill_mode = augmentations['fill_mode'],
                                    cval = augmentations['cval'],
                                    horizontal_flip = augmentations['horizontal_flip'],
                                    vertical_flip = augmentations['vertical_flip'],
                                    rescale = augmentations['rescale'])

        array_augs, label_augs = next(auggen.flow(np.tile(array[np.newaxis],
                                                (rounds * augmentations['rounds'], 1, 1, 1)),
                                        np.tile(label[np.newaxis],
                                                (rounds * augmentations['rounds'], 1)),
                                        batch_size=rounds * augmentations['rounds']))

        for array_aug, label_aug in zip(array_augs, label_augs):
            yield array_aug, label_aug
Пример #22
0
def augmentation(scans,masks,n):
    datagen = ImageDataGenerator(
        featurewise_center=False,   
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,  
        rotation_range=25,   
        width_shift_range=0.3,  
        height_shift_range=0.3,   
        horizontal_flip=True,   
        vertical_flip=True,  
        zoom_range=False)
    i=0
    for batch in datagen.flow(scans, batch_size=1, seed=1000): 
        scans=np.vstack([scans,batch])
        i += 1
        if i > n:
            break
    i=0
    for batch in datagen.flow(masks, batch_size=1, seed=1000): 
        masks=np.vstack([masks,batch])
        i += 1
        if i > n:
            break
    return((scans,masks))
Пример #23
0
def CNN(trainDir, validationDir, classNum):
    model = Sequential()
    model.add(Convolution2D(4, 3, 3, input_shape=(img_width, img_height, 1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(4, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(Convolution2D(8, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Convolution2D(16, 3, 3))
    # model.add(Activation('relu'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer
    model.add(Flatten())
    model.add(Dense(64))
    model.add(Activation('relu'))
    # model.add(Dropout(0.5))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Dense(classNum))
    model.add(Activation('softmax'))
    # test
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # this is the augmentation configuration we will use for training
    train_datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            zca_whitening=True,
            zoom_range=0.2,
            horizontal_flip=False)
    # this is the augmentation configuration we will use for testing:
    # only rescaling
    test_datagen = ImageDataGenerator(rescale=1./255, zca_whitening=True)
    train_generator = train_datagen.flow_from_directory(
            trainDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    validation_generator = test_datagen.flow_from_directory(
            validationDir,
            target_size=(img_width, img_height),
            batch_size=32,
            color_mode='grayscale',
            class_mode='categorical')
    model.fit_generator(
            train_generator,
            samples_per_epoch=nb_train_samples,
            nb_epoch=nb_epoch,
            validation_data=validation_generator,
            nb_val_samples=nb_validation_samples)
    return model
Пример #24
0
def extract_test_features(base_model, target_size, preprocess):
    datagen = ImageDataGenerator(preprocessing_function=preprocess)
    test_generator = datagen.flow_from_directory(test_path, target_size=target_size, batch_size=batch_size, class_mode=None, shuffle=False)
    test_features = base_model.predict_generator(test_generator, test_generator.samples // batch_size, verbose=1)
    
    test_features_name = 'test_{0}_features.npz'.format(base_model.name)
    np.savez(test_features_name, test=test_features, test_filename=test_generator.filenames)
    return test_features_name
def getDataGenerator(dir,  img_width, img_height, batch_size):
  datagen = ImageDataGenerator(rescale=1./255)
  generator = datagen.flow_from_directory(
        dir,  
        target_size=(img_width, img_height), 
        batch_size=batch_size,
        class_mode='categorical') 
  return generator
Пример #26
0
    def train(self,model):

        #training parameters
        batch_size = 128
        maxepoches = 250
        learning_rate = 0.1
        lr_decay = 1e-6

        # The data, shuffled and split between train and test sets:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train, x_test = self.normalize(x_train, x_test)

        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)

        lrf = learning_rate


        #data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)



        #optimization details
        sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])


        # training process in a for loop with learning rate drop every 25 epoches.

        for epoch in range(1,maxepoches):

            if epoch%25==0 and epoch>0:
                lrf/=2
                sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
                model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

            historytemp = model.fit_generator(datagen.flow(x_train, y_train,
                                             batch_size=batch_size),
                                steps_per_epoch=x_train.shape[0] // batch_size,
                                epochs=epoch,
                                validation_data=(x_test, y_test),initial_epoch=epoch-1)
        model.save_weights('cifar100vgg.h5')
        return model
Пример #27
0
def test_ensembles():
    from keras.models import model_from_json
    m_prec=0
    m_rec=0
    m_fmeas=0
    m_acc=0
    for name_file in os.listdir('weights/'):
        f = open('backend_inceptionv2.json', 'r')
        model=None
        model = model_from_json(f.read())
        f.close()
        model.load_weights('weights/'+name_file)
        from keras.optimizers import SGD
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
        datagen2 = ImageDataGenerator(
            rescale=1. / 255
        )
        generator = datagen2.flow_from_directory(
            'test/',
            target_size=(img_width, img_height),
            batch_size=50,
            class_mode=None,
            shuffle=False)
        np.set_printoptions(suppress=True)
        predictions = model.predict_generator(generator, 8)
        index = 0
        confusion_matrix = np.zeros((8, 8))
        for i in predictions:
            true_class = index // 50
            confusion_matrix[np.argmax(i)][true_class] += 1
            index += 1
        tps = confusion_matrix.diagonal()
        fps = np.sum(confusion_matrix, (0))
        fps -= tps
        fns = np.sum(confusion_matrix, (1))
        fns -= tps
        precision = tps / (np.sum(confusion_matrix, (1)))
        recall = tps / (np.sum(confusion_matrix, (0)))
        accuracy = np.sum(tps) / (np.sum(confusion_matrix))
        f_measure = (2 * precision * recall) / (precision + recall)
        m_prec+=np.mean(precision)
        m_rec+=np.mean(recall)
        m_fmeas+=np.mean(f_measure)
        m_acc+=accuracy
        print('p:',end='')
        print(np.mean(precision))
        print('r:', end='')
        print(np.mean(recall))
        print('fm:', end='')
        print(np.mean(f_measure))
        print('a:', end='')
        print(accuracy)
        print('-------------')
    print('final precision ' + str(m_prec / 5))
    print('final recall ' + str(m_rec / 5))
    print('final fmeas ' + str(m_fmeas / 5))
    print('final accura ' + str(m_acc / 5))
Пример #28
0
    def create_test_images(self):
        shutil.rmtree('card_training', ignore_errors=True)
        shutil.rmtree('card_testing', ignore_errors=True)

        datagen = ImageDataGenerator(
            rotation_range=1,
            width_shift_range=0.1,
            height_shift_range=0.1,
            shear_range=0.05,
            zoom_range=0.1,
            horizontal_flip=False,
            fill_mode='nearest')

        dirs = [(base_dir + r'/pics/PP_old/', r'/card_training/'),
                (base_dir + r'/pics/SN/', r'/card_training/'),
                (base_dir + r'/pics/PS/', r'/card_training/'),
                (base_dir + r'/pics/PS/', r'/card_testing/'),
                             (r'tests/', r'/card_testing/')]

        for d in dirs:
            source_folder = d[0]
            destination_folder = d[1]
            card_ranks_original = '23456789TJQKA'
            original_suits = 'CDHS'

            namelist = []
            namelist.append('empty')
            for c in card_ranks_original:
                for s in original_suits:
                    namelist.append(c + s)

            for name in namelist:
                try:
                    img = load_img(source_folder + name + '.png')  # this is a PIL image
                    x = np.asarray(img)
                    x = adjust_colors(x)

                    x = x.reshape((1,) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)

                    # the .flow() command below generates batches of randomly transformed images
                    # and saves the results to the `preview/` directory
                    i = 0
                    directory = dir_path + destination_folder + name
                    if not os.path.exists(directory):
                        os.makedirs(directory)

                    for batch in datagen.flow(x, batch_size=1,
                                              save_to_dir=directory,
                                              save_prefix=name,
                                              save_format='png',
                                              ):
                        i += 1
                        if i > 50:
                            break  # otherwise the generator would loop indefinitely
                except:
                    print("skipping: " + name)
Пример #29
0
    def set_generator(self, steps_per_epoch=10, gen_param_dict=None):
        if gen_param_dict is not None:
            self.generator = ImageDataGenerator(**gen_param_dict)
        else:
            self.generator = ImageDataGenerator()

        print(self.data.X_train.shape)

        self.generator.fit(self.data.X_train, seed=0)
        self.steps_per_epoch = steps_per_epoch
def train():
    """Use fine-tuning to train a network on a new dataset"""
    train_count = get_file_count(FLAGS.train_dir)
    class_count = len(glob.glob(FLAGS.train_dir + "/*"))
    val_count = get_file_count(FLAGS.val_dir)
    epochs = int(FLAGS.epochs)
    batch_size = int(FLAGS.batch_size)
    target_size = (int(FLAGS.resolution), int(FLAGS.resolution))

    train_datagen =  ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=30,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
    )
    test_datagen = ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=30,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
    )

    train_generator = train_datagen.flow_from_directory(
        FLAGS.train_dir,
        target_size=target_size,
        batch_size=batch_size
    )

    validation_generator = test_datagen.flow_from_directory(
        FLAGS.val_dir,
        target_size=target_size,
        batch_size=batch_size
    )

    model = create_model(class_count) 
    model = freeze_layers(model)
    early_stopping = EarlyStopping(monitor='val_loss', patience=2)

    model.fit_generator(
        train_generator,
        steps_per_epoch=train_count/batch_size,
        epochs=epochs,
        validation_data=validation_generator,
        validation_steps=val_count/batch_size,
        class_weight='auto',
        callbacks=[early_stopping]
    )

    model.save(FLAGS.output_model_file)
def fine_tune():
    # Start by instantiating the VGG base and loading its weights.
    epochs = 10

    model_vgg = applications.VGG16(weights='imagenet',
                                   include_top=False,
                                   input_shape=(256, 256, 3))

    # Build a classifier model to put on top of the convolutional model. For the fine tuning, we start with a fully trained-classifer. We will use the weights from the earlier model. And then we will add this model on top of the convolutional base.

    top_model = Sequential()
    top_model.add(Flatten(input_shape=model_vgg.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(1, activation='sigmoid'))

    top_model.load_weights(str(exp_url) + 'models/bottleneck_30_epochs.h5')

    # model_vgg.add(top_model)
    model = Model(inputs=model_vgg.input, outputs=top_model(model_vgg.output))

    # For fine turning, we only want to train a few layers.  This line will set the first 25 layers (up to the conv block) to non-trainable.

    for layer in model.layers[:15]:
        layer.trainable = False

    # compile the model with a SGD/momentum optimizer
    # and a very slow learning rate.
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
                  metrics=['accuracy'])

    # prepare data augmentation configuration  . . . do we need this?
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_height, img_width),
        batch_size=batch_size,
        class_mode='binary')

    # fine-tune the model
    model.fit_generator(train_generator,
                        steps_per_epoch=train_samples // batch_size,
                        epochs=epochs,
                        validation_data=validation_generator,
                        validation_steps=validation_samples // batch_size)

    model.save_weights(str(exp_url) + 'models/finetuning_30epochs_vgg.h5')
    model.save(str(exp_url) + 'models/theultimate.h5')

    # ### Evaluating on validation set

    # Computing loss and accuracy :

    print(model.evaluate_generator(validation_generator, validation_samples))
Пример #32
0
y_train = np.array(y_train, np.uint8)
x_train = np.array(x_train, np.float16) / 255.0
y_valid = np.array(y_valid, np.uint8)
x_valid = np.array(x_valid, np.float16) / 255.0

filepath= "best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.0002, patience=5, verbose=0, mode='auto')

callbacks_list = [checkpoint, earlystop]

datagen = ImageDataGenerator(
    featurewise_center=True,
    featurewise_std_normalization=True,
    rotation_range=30,
    width_shift_range=0.3,
    height_shift_range=0.3,
    horizontal_flip=True,
    vertical_flip = False,
    fill_mode =  "reflect")
datagen.fit(x_train)


model = Sequential()
model.add(BatchNormalization(input_shape=(48, 48, 3)))
model.add(Conv2D(4, kernel_size=(5, 5),padding='same'))
model.add(LeakyReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(4, kernel_size=(3, 3),padding='same'))
model.add(LeakyReLU())
model.add(MaxPooling2D(pool_size=(2, 2)))
Пример #33
0
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, Dropout, MaxPooling2D, ZeroPadding2D
from keras import optimizers

#For collecting the data on my pc
img_width, img_height = 150, 150

train_data_dir = 'data/train'
validation_data_dir = 'data/validation'

# used to rescale the pixel values from [0, 255] to [0, 1] interval
datagen = ImageDataGenerator(rescale=1. / 255)

# automagically retrieve images and their classes for train and validation sets
train_generator = datagen.flow_from_directory(train_data_dir,
                                              target_size=(img_width,
                                                           img_height),
                                              batch_size=16,
                                              class_mode='binary')

validation_generator = datagen.flow_from_directory(validation_data_dir,
                                                   target_size=(img_width,
                                                                img_height),
                                                   batch_size=32,
                                                   class_mode='binary')
"""
This is the simple keras CNN model, CNN models often don't need more than 3 layers when working with small datasets. The focus here is to set alot of 
filters on the layers, so the model have the possibility too find alot of patterns for the diffrent kinds of dogs and cats.
Пример #34
0
    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    return model


nb_classes = 10
nb_epoch = 30
nb_step = 6
batch_size = 64

x, y = loadImages()

from keras.preprocessing.image import ImageDataGenerator
dataGenerator = ImageDataGenerator()
dataGenerator.fit(x)
data_generator = dataGenerator.flow(x, y, batch_size,
                                    True)  #generator函數,用來生成批處理數據

model = Net_model(nb_classes=nb_classes, lr=0.0001)  #加載網絡模型

history = model.fit_generator(data_generator,
                              epochs=nb_epoch,
                              steps_per_epoch=nb_step,
                              shuffle=True)  #訓練網絡

model.save_weights('./star_trained_model_weights.h5')  #將圖片處理成h5格式
print("DONE, model saved in path")

end = time.time()
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
    layer.trainable = False

# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
              optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
              metrics=['accuracy'])

# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_height, img_width),
    batch_size=batch_size,
    class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
    validation_data_dir,
    target_size=(img_height, img_width),
    batch_size=batch_size,
Пример #36
0
import json
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import RMSprop
from keras.layers.advanced_activations import LeakyReLU
from keras.models import load_model
from sys import path

path.append('../DAC')
from myMetrics import *

global upper, lower
datagen = ImageDataGenerator(rotation_range=10,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             channel_shift_range=0.05,
                             horizontal_flip=True,
                             rescale=0.975,
                             zoom_range=[0.95, 1.05])


class Adaptive(Layer):
    def __init__(self, **kwargs):
        super(Adaptive, self).__init__(**kwargs)

    def build(self, input_shape):
        self.nb_sample = input_shape[0]
        self.nb_dim = input_shape[1]

    def call(self, x, mask=None):
        y = self.transfer(x)
Пример #37
0
    sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # set callback
    tb_cb = TensorBoard(log_dir='./densenet/', histogram_freq=0)
    change_lr = LearningRateScheduler(scheduler)
    ckpt = ModelCheckpoint('./ckpt.h5',
                           save_best_only=False,
                           mode='auto',
                           period=10)
    cbks = [change_lr, tb_cb, ckpt]

    # set data augmentation
    print('Using real-time data augmentation.')
    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.125,
                                 height_shift_range=0.125,
                                 fill_mode='constant',
                                 cval=0.)

    datagen.fit(x_train)

    # start training
    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                        steps_per_epoch=iterations,
                        epochs=epochs,
                        callbacks=cbks,
                        validation_data=(x_test, y_test))
    model.save('densenet.h5')
Пример #38
0

# determine the total number of image paths in training, validation,
# and testing directories
totalTrain = len(list(paths.list_images(trainPath)))
totalVal = len(list(paths.list_images(valPath)))
totalTest = len(list(paths.list_images(testPath)))

# totalTrain = len(os.listdir(trainPath))
# totalVal = len(os.listdir(valPath))
# totalTest = len(os.listdir(testPath))
# initialize the training data augmentation object
trainAug = ImageDataGenerator(
	rotation_range=30,
	zoom_range=0.15,
	width_shift_range=0.2,
	height_shift_range=0.2,
	shear_range=0.15,
	horizontal_flip=True,
	fill_mode="nearest")

# initialize the validation/testing data augmentation object (which
# we'll be adding mean subtraction to)
valAug = ImageDataGenerator()

# define the ImageNet mean subtraction (in RGB order) and set the
# the mean subtraction value for each of the data augmentation
# objects
mean = np.array([123.68, 116.779, 103.939], dtype="float32")
trainAug.mean = mean
valAug.mean = mean
Пример #39
0
    class_mode = 'categorical'
    loss_function = 'categorical_crossentropy'

    model_name = 'testing_model'

    model = create_cnn(input_shape, loss=loss_function)


    call_backs = [ModelCheckpoint(filepath='/Users/christopherlawton/galvanize/module_2/capstone_2/save_model/{}'.format(model_name),
                                monitor='val_loss',
                                save_best_only=True),
                                EarlyStopping(monitor='val_loss', patience=5, verbose=0)]

    train_datagen = ImageDataGenerator(
                    rescale=1./scale,
                    rotation_range=0.4,
                    width_shift_range=0.2,
                    height_shift_range=0.2,
                    horizontal_flip=True)

    validatiobn_datagen = ImageDataGenerator(
                    rescale=1./scale)

    train_generator = train_datagen.flow_from_directory(
                        train_path,
                        color_mode='grayscale',
                        target_size=target_size,
                        batch_size=batch_size,
                        class_mode=class_mode,
                        shuffle=True)

    validation_generator = validation_datagen.flow_from_directory(
Пример #40
0
def create_model():
  model = Sequential()
  model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(256, 256, 3)))
  model.add(MaxPooling2D(pool_size=(2, 2)))
  model.add(Dropout(0.25))

  model.add(Flatten())
  model.add(Dense(128, activation='relu'))
  model.add(Dropout(0.5))
  model.add(Dense(1, activation='softmax'))

  return model

train_generator = ImageDataGenerator(
  data_format="channels_last",
  rescale = 1. / 255
)

train_batches = train_generator.flow_from_directory(
    batch_size=32,
    directory='./dataset/train',
    target_size=(256, 256),
    class_mode='binary'
)

validation_generator = ImageDataGenerator(
  data_format="channels_last",
  rescale = 1. / 255
)

validation_batches = validation_generator.flow_from_directory(
    images_paths = glob.glob(of_path + '/' + word + '/*.png')
    print(images_paths)

    length = len(images_paths)
    number = math.ceil(800 / length)
    for image_path in images_paths:
        img = load_img(image_path)
        # convert to numpy array
        data = img_to_array(img)
        # expand dimension to one sample
        samples = expand_dims(data, 0)
        image_name = image_path.split('/')[-1].split('.')[0]
        # create image data augmentation generator
        datagen = ImageDataGenerator(width_shift_range=[-24, 24],
                                     height_shift_range=[-12, 12],
                                     rotation_range=5,
                                     brightness_range=[0.5, 1],
                                     zoom_range=0.3,
                                     horizontal_flip=True)  # Zoom 0.1
        # prepare iterator
        it = datagen.flow(samples, batch_size=1)

        result_directory_path = result_path + '/' + word
        result_image_path = result_directory_path + '/' + image_name + '_original.png'
        print(result_image_path)
        cv2.imwrite(result_image_path, data.astype('uint8'))
        for i in range(number - 1):
            # generate batch of images
            batch = it.next()
            # convert to unsigned integers for viewing
            image = batch[0].astype('uint8')
            # cv2_imshow(image)
#Step 3-Flatteing
classifier.add(Flatten())

#Step 4- Full Connection
classifier.add(Dense(output_dim=128, activation='relu'))
classifier.add(Dense(output_dim=1, activation='sigmoid'))

#Compiling the CNN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

#Part 2-Fitting the CNN to the Images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
                                                 target_size=(64, 64),
                                                 batch_size=32,
                                                 class_mode='binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')
classifier.fit(training_set,
               steps_per_epoch=8000,
               epochs=25,
               validation_data=test_set,
               validation_steps=2000)
Пример #43
0
def save_bottlebeck_features():
    datagen = ImageDataGenerator(rescale=1.)
    datagen.mean = np.array([103.939, 116.779, 123.68],
                            dtype=np.float32).reshape(3, 1, 1)

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    assert os.path.exists(
        weights_path
    ), 'Model weights not found (see "weights_path" variable in script).'
    f = h5py.File(weights_path)
    for k in range(f.attrs['nb_layers']):
        if k >= len(model.layers):
            break
        g = f['layer_{}'.format(k)]
        weights = [
            g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])
        ]
        model.layers[k].set_weights(weights)
    f.close()
    print('Model loaded.')

    generator = datagen.flow_from_directory(train_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=32,
                                            class_mode=None,
                                            shuffle=False)
    bottleneck_features_train = model.predict_generator(
        generator, nb_train_samples)
    np.save(open('bottleneck_features_train.npy', 'w'),
            bottleneck_features_train)

    generator = datagen.flow_from_directory(validation_data_dir,
                                            target_size=(img_width,
                                                         img_height),
                                            batch_size=32,
                                            class_mode=None,
                                            shuffle=False)
    bottleneck_features_validation = model.predict_generator(
        generator, nb_validation_samples)
    np.save(open('bottleneck_features_validation.npy', 'w'),
            bottleneck_features_validation)
Пример #44
0
# Declare a few useful values
num_train_samples = 9013
num_val_samples = 1002
train_batch_size = 10
val_batch_size = 10
image_size = 224

# Declare how many steps are needed in an iteration
train_steps = np.ceil(num_train_samples / train_batch_size)
val_steps = np.ceil(num_val_samples / val_batch_size)

# Set up generators
train_batches = ImageDataGenerator(
    preprocessing_function= \
        keras.applications.mobilenet.preprocess_input).flow_from_directory(
    train_path,
    target_size=(image_size, image_size),
    batch_size=train_batch_size)

valid_batches = ImageDataGenerator(
    preprocessing_function= \
        keras.applications.mobilenet.preprocess_input).flow_from_directory(
    valid_path,
    target_size=(image_size, image_size),
    batch_size=val_batch_size)

test_batches = ImageDataGenerator(
    preprocessing_function= \
        keras.applications.mobilenet.preprocess_input).flow_from_directory(
    valid_path,
    target_size=(image_size, image_size),
Пример #45
0
# coding:utf-8
'''
Created on 2017/12/26.

@author: chk01
'''
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from PIL import Image

datagen = ImageDataGenerator(rotation_range=0.2,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')

img = load_img(
    '1.jpg')  # this is a PIL image, please replace to your own file path
x = img_to_array(img)  # this is a Numpy array with shape (3, 150, 150)
x = x.reshape(
    (1, ) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)

# the .flow() command below generates batches of randomly transformed images
# and saves the results to the `preview/` directory

i = 0
for batch in datagen.flow(x,
                          batch_size=1,
                          save_to_dir='/',
                          save_prefix='lena',
Пример #46
0
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy','precision', 'recall'])

if(PRINT_MODEL):
    print('-'*30)
    print('Printing model...')
    print('-'*30)
    plot(model, to_file='method1_VGG16_model.png')

# this is the augmentation configuration use for training
train_datagen = ImageDataGenerator(
                    rescale=1./255,
                    shear_range=0,
                    rotation_range=40, # randomly rotate images in the range (degrees, 0 to 180)
                    width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
                    height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
                    zoom_range=0.2,
                    horizontal_flip=True, # randomly flip images
                    vertical_flip=False)  # randomly flip images

print('-'*30)
print('Data augmentation...')
print('-'*30)
# this is the augmentation configuration for testing:
test_datagen = ImageDataGenerator(rescale=1./255)

print('-'*30)
print('Creating train batches...')
print('-'*30)
train_generator = train_datagen.flow_from_directory(
Пример #47
0
from keras.layers import Input
from keras.models import Model
from imutils import paths
import numpy as np
import argparse
import os

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
ap.add_argument("-m", "--model", required=True, help="path to output model")
args = vars(ap.parse_args())

aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode='nearest')

print("[INFO] Loading images")

imagePaths = list(paths.list_images(args["dataset"]))
classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]

aap = aspectawarepreprocessor.AspectAwarePreprocessor(224, 224)
iap = imagetoarraypreprocessor.ImageToArrayPreprocessor()

sdl = dataloader.DatasetLoader(preprocessors=[aap, iap])
This script trains a CNN classifier for predicting the viewpoint of a car.
'''
import numpy as np
import pandas
from keras.applications import InceptionV3
from keras.engine import Input
from keras.engine import Model
from keras.layers import Dense, GlobalAveragePooling2D, Dropout
from keras.preprocessing import image as image_utils
from keras.utils import np_utils
from keras.callbacks import TensorBoard
from keras.preprocessing.image import ImageDataGenerator

# Some image augmentation and normalization for the training images.
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=False)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(
    '/home/ubuntu/csc420_data/segmented_cars',  # this is the target directory
    target_size=(128, 128),  # all images will be resized to 128x128
    batch_size=32,
    class_mode='categorical')

input_tensor = Input(shape=(128, 128, 3))

# We use the InceptionV3 /GoogLeNet model but retrain it to classify out datset.
base_model = InceptionV3(input_tensor=input_tensor,
                         weights='imagenet',
Пример #49
0
print("[INFO] loading CIFRA-10 data...")
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float")
testX = testX.astype("float")

mean = np.mean(trainX, axis=0)
trainX -= mean
testX -= mean

lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.fit_transform(testY)

aug = ImageDataGenerator(width_shift_range=0.1,
                         height_shift_range=0.1,
                         horizontal_flip=True,
                         fill_mode="nearest")

if args["model"] is None:
    print("[INFO] compiling model ...")
    opt = SGD(lr=1e-1)
    model = ResNet.build(32,
                         32,
                         3,
                         10, (9, 9, 9), (64, 64, 128, 256),
                         reg=0.0005)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

else:
Пример #50
0
import keras
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, Flatten, Activation, Dropout, BatchNormalization, MaxPooling2D, Dense
from keras.models import Sequential

img_rows, img_col = 74, 74
Batch_size = 16
no_of_classes = 3

train_dir = r'E:\face rec\train'
validation_dir = r'E:\face rec\validation'

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   zoom_range=0.3,
                                   shear_range=0.4,
                                   horizontal_flip=True,
                                   vertical_flip=True)
validation_datagen = ImageDataGenerator(rescale=1. / 255)

training_data = train_datagen.flow_from_directory(train_dir,
                                                  target_size=(img_rows,
                                                               img_col),
                                                  color_mode='rgb',
                                                  class_mode='categorical',
                                                  shuffle=True,
                                                  batch_size=Batch_size)
validation_data = validation_datagen.flow_from_directory(
    validation_dir,
    target_size=(img_rows, img_col),
    color_mode='rgb',
Пример #51
0
f.write(pickle.dumps(label_binarizer))
f.close()

# split the data into training and testing (80% and 20% respectively)
print("[INFO] splitting data for train/test...")
(trainX, testX, trainY,
 testY) = train_test_split(data,
                           labels,
                           test_size=0.2,
                           random_state=config.RANDOM_SEED)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=25,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the optimizer
# opt = Adam(lr=config.LR, decay=config.LR / config.EPOCHS)
# opt = SGD(lr=0.0001, momentum=0.9, nesterov=True)
# opt = RMSprop(lr=0.0001)
opts = []
opts.append(Adam(lr=config.LR, decay=config.LR / config.EPOCHS))
opts.append(RMSprop(lr=0.0001))
opts.append(Adagrad(lr=0.01))

for idx, opt in enumerate(opts):
    idx = str(idx + 1)  # so that it will start from 1
Пример #52
0
WIDTH, HEIGHT = (664, 485)

DROPOUT = 0.2
CLASSES = 2
BATCH_SIZE = 16
NUM_EPOCHS = 20
INIT_LR = 0.0001

BASE_PATH = 'data/chest_xray/'
TRAIN_PATH = BASE_PATH + 'train'
VAL_PATH = BASE_PATH + 'val'
TEST_PATH = BASE_PATH + 'test'

train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   rotation_range=10,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(TRAIN_PATH,
                                                    target_size=(HEIGHT,
                                                                 WIDTH),
                                                    color_mode='rgb',
                                                    class_mode='categorical',
                                                    batch_size=BATCH_SIZE)

validation_generator = test_datagen.flow_from_directory(
    VAL_PATH,
    target_size=(HEIGHT, WIDTH),
Пример #53
0
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(x_train,
                  y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  validation_data=(x_test, y_test),
                  shuffle=True,
                  callbacks=[lr_reducer, early_stopper, csv_logger])
    else:
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(featurewise_center=False,
                                     samplewise_center=False,
                                     featurewise_std_normalization=False,
                                     samplewise_std_normalization=False,
                                     zca_whitening=False,
                                     rotation_range=0,
                                     width_shift_range=0.1,
                                     height_shift_range=0.1,
                                     horizontal_flip=True,
                                     vertical_flip=False)

        datagen.fit(x_train)

        model.fit_generator(datagen.flow(x_train,
                                         y_train,
                                         batch_size=batch_size),
                            steps_per_epoch=x_train.shape[0] // batch_size,
                            validation_data=(x_test, y_test),
                            epochs=nb_epoch,
                            verbose=1,
                            max_q_size=100,
Пример #54
0
    def train(self, x_train, y_train, x_test, y_test, batch_size, epochs,
              save_interval):
        # Image augmentation object
        datagen = ImageDataGenerator(
            rotation_range=0,  # Rotation in angles
            width_shift_range=0.,
            height_shift_range=0.,
            shear_range=0.,  # Image shearing, counter-clockwise
            horizontal_flip=False,  # TODO: These may mess up the training
            vertical_flip=False,
            fill_mode='nearest')
        # Fit to data
        datagen.fit(x_train, seed=self.weight_seed)

        # Main loop
        for epoch in range(epochs):
            # Counter
            batch_idx = 0
            for imgs, y_batch in datagen.flow(x_train,
                                              y_train,
                                              shuffle=False,
                                              batch_size=batch_size,
                                              seed=(self.batch_seed + epoch)):
                # Counter
                batch_idx = batch_idx + 1
                # Generate a half batch of new images
                latent_fake = self.encoder.predict(imgs)

                # Generate random samples
                (latent_real, labels) = self.generateRandomVectors(
                    y_batch, seed=(self.batch_seed + epoch + batch_idx))
                valid = np.ones((batch_size, 1))
                fake = np.zeros((batch_size, 1))

                # Train the discriminator
                d_loss_real = self.discriminator.train_on_batch(
                    [latent_real, labels], valid)
                d_loss_fake = self.discriminator.train_on_batch(
                    [latent_fake, labels], fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                # Generator wants the discriminator to label the generated representations as valid
                valid_y = np.ones((batch_size, 1))

                # Train autoencoder for reconstruction
                g_loss_reconstruction = self.autoencoder.train_on_batch(
                    imgs, imgs)

                # Train generator
                g_logg_similarity = self.encoder_discriminator.train_on_batch(
                    [imgs, labels], valid_y)

                # Plot progress per batch
                print(
                    "Epoch %d, batch %d : [D loss: %f, acc: %.2f%%] [G acc: %f, mse: %f]"
                    % (epoch, batch_idx, d_loss[0], 100 * d_loss[1],
                       g_logg_similarity[1], g_loss_reconstruction))

                # Break loop by hand (Keras trick)
                if batch_idx >= len(x_train) / batch_size:
                    break

            # Write to file
            if (epoch % save_interval == 0):
                self.saveWeights(epoch)
                self.saveLogs(epoch, x_test, y_test)
validation_path ='/content/drive/MyDrive/Final Images for Safa Samaj/validation'

#img_list = glob.glob(os.path.join(dir_path, '/.jpg'))
img_list_train = glob.glob(os.path.join(train_path, '/.jpg'))
img_list_valid = glob.glob(os.path.join(validation_path, '/.jpg'))

#len(img_list)
len(img_list_train)
#len(img_list_valid)

#image augumentation
train=ImageDataGenerator(horizontal_flip=True,
                         vertical_flip=True,
                         validation_split=0.1,
                         rotation_range= 45,
                         rescale=1./255,
                         shear_range = 0.1,
                         zoom_range = 0.1,
                         width_shift_range = 0.1,
                         height_shift_range = 0.1,)

validation=ImageDataGenerator(rescale=1/255,
                        validation_split=0.1)

train_generator=train.flow_from_directory(train_path,
                                          target_size=(300,300),
                                          batch_size=32,
                                          shuffle=True,
                                          class_mode='categorical',
                                          subset='training')
Пример #56
0
#Flattening
classifier.add(Flatten())

#Full Connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=10, activation='softmax'))

classifier.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

# In[36]:

from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255)

training_set = train_datagen.flow_from_directory('dataset/trainingSet',
                                                 target_size=(28, 28),
                                                 batch_size=64,
                                                 class_mode='categorical')

# In[ ]:

classifier.fit_generator(training_set, steps_per_epoch=8000 / 64, epochs=25)
Пример #57
0
from keras.layers import Dense
from keras.layers import Flatten
from keras.callbacks import EarlyStopping
import sys
import time
import numpy as np
np.random.seed(777)
if (len(sys.argv)<3):
	print("Input arguments:")
	print("1. Train images path")
	print("2. Test images path")
	exit()
TrainImagesPath=sys.argv[1]
TestImagesPath=sys.argv[2]
#%%
datagen=ImageDataGenerator(samplewise_center=True,
    samplewise_std_normalization=True)
train_count=6026
test_count=2651
batch=32
train_generator = datagen.flow_from_directory(
        TrainImagesPath,
        target_size=(128, 128),
        batch_size=batch,
        class_mode='categorical', seed=777)
test_generator = datagen.flow_from_directory(
        TestImagesPath,
        target_size=(128, 128),
        batch_size=batch,
        class_mode='categorical',shuffle=False)
#%%
model = Sequential()
from keras.models import Model,Sequential
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import Adadelta, RMSprop,SGD,Adam
from keras import regularizers
from keras import backend as K
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator

batch_size = 64
epochs = 25
inChannel = 3
x, y = 128, 128
input_img = Input(shape = (x, y, inChannel))
num_classes = 2

train_datagen = ImageDataGenerator()
#                                    rotation_range=40,
#                                    width_shift_range=0.2,
#                                    height_shift_range=0.2,
#                                    shear_range=0.2,
#                                    zoom_range=0.2,
#                                    channel_shift_range=10,
#                                    horizontal_flip=True,
#                                    fill_mode='nearest')

train_batches = train_datagen.flow_from_directory("./../data/MURA-v1.1/data2/train/",
                                                  target_size=(x,y),
                                                  interpolation='bicubic',
                                                  class_mode='categorical',
                                                  shuffle=True,
                                                  batch_size=batch_size)
Пример #59
0
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
#model.add(Dropout(0.2))
model.add(Dense(num_classes))
model.add(Activation('sigmoid'))

#Derleme
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

datagen = ImageDataGenerator(featurewise_center=True,
                             featurewise_std_normalization=True,
                             rotation_range=20,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=True)

datagen_generator = datagen.flow(x_train[:40000],
                                 y_train[:40000],
                                 batch_size=batch_size)

#Modeli eğit
history = model.fit_generator(datagen.flow(x_train,
                                           y_train,
                                           batch_size=batch_size),
                              epochs=epochs,
                              validation_data=(x_test, y_test),
                              shuffle=True)
Пример #60
0
x = Flatten()(x)
x = Dense_layer(x,512,512,500)

model = Model(inputs=inputs,outputs=x)
model.summary()

model.compile(
	optimizer = optimizers.adam(lr = 1e-5),
	loss = 'categorical_crossentropy',
	metrics = ['accuracy']
	)


train_data = ImageDataGenerator(
    rescale=1./255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True)

test_data = ImageDataGenerator(rescale=1./255)

train_generator = train_data.flow_from_directory(
    path,
    target_size=(100,80),
	batch_size=138
	)

validation_generator = test_data.flow_from_directory(
    path1,
    target_size=(100,80),
	batch_size=270