def train(self,save_model_to_file = True,rotation_range = 20,width_shift_range=0.5,height_shift_range=0.2):
        """ Trains the model using the dataset in letters_folder """

        # Read the data
        data = []
        labels = []
        for imgName in listdir(self.letters_folder):
            img = cv2.imread(self.letters_folder+"/"+imgName, cv2.IMREAD_GRAYSCALE)
            data.append(img)
            # Get the label from the image path and then get the index from the letters list
            labels.append(self.letters.index(imgName.split('_')[0]))

        data = np.array(data)
        labels = np.array(labels)

        # Split train and test
        X_train, X_test, y_train, y_test = train_test_split(
                 data, labels, test_size=0.33, random_state=42)

        X_train = X_train.reshape(X_train.shape[0], 1, self.img_rows, self.img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, self.img_rows, self.img_cols)
        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, self.nb_classes)
        Y_test = np_utils.to_categorical(y_test, self.nb_classes)

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(rotation_range=rotation_range,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=width_shift_range,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=height_shift_range)# randomly shift images vertically (fraction of total height))

        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        history = self.model.fit_generator(datagen.flow(X_train, Y_train, batch_size=self.batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=self.nb_epoch,
                            validation_data=(X_test, Y_test))


        # Plot History
        plt.figure(figsize=(10,10))
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

        if save_model_to_file:
            self.model.save_weights(self.weights_path,overwrite=True)
	def fit(self,x,y,doRTA):
		if doRTA == False:
			self.model.fit({"input":x,"output":y},nb_epoch=self.epochs,batch_size=self.batch_size)
		else:
			datagen = ImageDataGenerator(
			        featurewise_center=True,  # set input mean to 0 over the dataset
			        samplewise_center=False,  # set each sample mean to 0
			        featurewise_std_normalization=True,  # divide inputs by std of the dataset
			        samplewise_std_normalization=False,  # divide each input by its std
			        zca_whitening=False,
			        rotation_range=20,
			        width_shift_range=0.2, 
			        height_shift_range=0.2,
			        horizontal_flip=True, 
			        vertical_flip=False)
			datagen.fit(x)

			for e in range(self.epochs):
			    print('-'*40)
			    print('Epoch', e)
			    print('-'*40)
			    print('Training...')
			    # batch train with realtime data augmentation
			    progbar = generic_utils.Progbar(x.shape[0])
			    for X_batch, Y_batch in datagen.flow(x, y):
			        loss = self.model.train_on_batch({"input":X_batch,"output":Y_batch})
			        progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
    def train(self):
        # load data
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        # build network
        model = self.build_model()
        model.summary()

        # Save the best model during each training checkpoint
        checkpoint = ModelCheckpoint(self.model_filename,
                                    monitor='val_loss', 
                                    verbose=0,
                                    save_best_only= True,
                                    mode='auto')
        plot_callback = PlotLearning()
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)

        cbks = [checkpoint, plot_callback, tb_cb]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
        datagen.fit(x_train)

        # start training
        model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),steps_per_epoch=self.iterations,epochs=self.epochs,callbacks=cbks,validation_data=(x_test, y_test))
        
        model.save(self.model_filename)

        self._model = model
def train():
    model_ = 'VGG_16'
    batch_size = 8
    nb_classes = 5
    nb_epoch = 200
    data_augmentation = True

    # input image dimensions
    if model_ in MODELS[0:2]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299
    # the Yelp images are RGB
    img_channels = 3

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = yelp_data(dtype=np.float32, grayscale=False, pixels=img_rows, batches=3,
                                                     model='VGG_16', data_dir='/home/rcamachobarranco/datasets')
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # generate model
    model = VGG_16(img_rows, img_cols, img_channels, nb_classes)

    # let's train the model using SGD + momentum
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_train, y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, show_accuracy=True,
                  validation_data=(X_test, y_test), shuffle=True)
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch, show_accuracy=True,
                            validation_data=(X_test, y_test),
                            nb_worker=1)
def augment_data(train_data):
    augmented_data_generator = ImageDataGenerator(
        rotation_range=20,
        horizontal_flip=True
    )
    augmented_data_generator.fit(train_data)
    return augmented_data_generator
示例#6
0
def data():
    nb_classes = 10
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    # this will do preprocessing and realtime data augmentation
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    return datagen, X_train, Y_train, X_test, Y_test
def main():
    model = Model()
    if (sys.argv[1] == "test"):
        global nb_epoch
        nb_epoch = 0
        global WEIGHTS_FILE
        WEIGHTS_FILE = sys.argv[2]

    elif(sys.argv[1] == "add"):
        global X_train, Y_train, X_val1, Y_val1
        X_train = np.concatenate((X_train, X_val1), axis=0)
        Y_train = np.concatenate((Y_train, Y_val1), axis=0)

    adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam)

    datagen = ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=False)

    datagen.fit(X_train)
    callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
                 EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),
                        show_accuracy=True, callbacks=callbacks)

    model.load_weights(WEIGHTS_FILE)
    predict_test(model)
示例#8
0
def train():
    (X_test, y_test, y_conf) = load.load_test_data()
    Y_test = np_utils.to_categorical(y_test, classes)
    print(X_test.shape[0], 'test samples')
    X_test = X_test.astype("float32")
    X_test /= 255
    datagen = ImageDataGenerator(rotation_range=30,  width_shift_range=0.01,  height_shift_range=0.01,  horizontal_flip=True, vertical_flip=True)
    t0=time.time()
    for e in range(nb_epoch):
        print ("******** Epoch %d ********" % (e+1))
        print ("Epoch Number: " + str(e))
        for X_batch, y_batch, class_weight in BatchGenerator():
            datagen.fit(X_batch)
            model.fit_generator(datagen.flow(X_batch, y_batch, batch_size=18, shuffle=True),
            callbacks=[lh,checkpointer],
            samples_per_epoch=split_size,
            nb_epoch=nb_epoch_per,
            validation_data=(X_test,Y_test)
            ,class_weight=class_weight
            )
            y_pred = model.predict_classes(X_test, batch_size=20)
        (accuracy, correct)=PredictionMatrix()
        #model.save_weights((direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' )  % (e+1), overwrite=True)
        #print ("Weights saved to " + direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' % (e+1))
    t1=time.time()
    tyme = t1-t0   
    print("Training completed in %f seconds" % tyme)
    if save_name != '':
        model.save_weights(direct + '/weights/' + save_name, overwrite=True)
        print ("Weights saved to " + save_name)
    print ("Final training weights saved to " + save_name)
    return tyme
示例#9
0
文件: convnet.py 项目: ybenigot/keras
def preprocess_data(X_train, y_train, X_val, y_val, X_test, y_test):
	print('start preprocess...')

	X_train=scale_data(X_train)
	X_val=scale_data(X_val)
	X_test=scale_data(X_test)

	#substract mean, per sample and per color channel 
	X_train, X_val, X_test = im.mean2(X_train, X_val, X_test)

	#apply ZCA whitening on each color channel
	#X_train=im.whiten(X_train,epsilon=0.1)
	#X_test=im.whiten(X_test,epsilon=0.1)

	g = ImageDataGenerator(width_shift_range=0.2,height_shift_range=0.2,horizontal_flip=True,\
	fill_mode='nearest',dim_ordering='th') 
	g.fit(X_train)
	
	y_train = to_categorical(y_train)
	y_val = to_categorical(y_val)
	y_test = to_categorical(y_test)

	print('...done')

	return g, X_train, y_train, X_val, y_val, X_test, y_test
示例#10
0
文件: dl.py 项目: jskDr/jamespy_py3
class Machine_Generator(Machine_cnn_lenet):
    def __init__(self, X, y, nb_classes=2, steps_per_epoch=10, fig=True,
                 gen_param_dict=None):
        super().__init__(X, y, nb_classes=nb_classes, fig=fig)
        self.set_generator(steps_per_epoch=steps_per_epoch, gen_param_dict=gen_param_dict)

    def set_generator(self, steps_per_epoch=10, gen_param_dict=None):
        if gen_param_dict is not None:
            self.generator = ImageDataGenerator(**gen_param_dict)
        else:
            self.generator = ImageDataGenerator()

        print(self.data.X_train.shape)

        self.generator.fit(self.data.X_train, seed=0)
        self.steps_per_epoch = steps_per_epoch

    def fit(self, nb_epoch=10, batch_size=64, verbose=1):
        model = self.model
        data = self.data
        generator = self.generator
        steps_per_epoch = self.steps_per_epoch

        history = model.fit_generator(generator.flow(data.X_train, data.Y_train, batch_size=batch_size),
                                      epochs=nb_epoch, steps_per_epoch=steps_per_epoch,
                                      validation_data=(data.X_test, data.Y_test))

        return history
示例#11
0
    def train(self,model):

        #training parameters
        batch_size = 128
        maxepoches = 250
        learning_rate = 0.1
        lr_decay = 1e-6

        # The data, shuffled and split between train and test sets:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train, x_test = self.normalize(x_train, x_test)

        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)

        lrf = learning_rate


        #data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)



        #optimization details
        sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])


        # training process in a for loop with learning rate drop every 25 epoches.

        for epoch in range(1,maxepoches):

            if epoch%25==0 and epoch>0:
                lrf/=2
                sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
                model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

            historytemp = model.fit_generator(datagen.flow(x_train, y_train,
                                             batch_size=batch_size),
                                steps_per_epoch=x_train.shape[0] // batch_size,
                                epochs=epoch,
                                validation_data=(x_test, y_test),initial_epoch=epoch-1)
        model.save_weights('cifar100vgg.h5')
        return model
示例#12
0
def hard_train(data_prefix, prefix, seed, col):
    what = ['systole', 'diastole'][col % 2]
    print('We are going to train hard {} {}'.format(what, col))
    print('Loading training data...')

    X, y = load_train_data(data_prefix, seed)
    X_train, y_train, X_test, y_test = split_data(X, y, split_ratio=0.2)

    model = get_model()

    nb_iter = 200
    epochs_per_iter = 1
    batch_size = 32

    min_val = sys.float_info.max


    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True)  # randomly flip images


    print('-'*50)
    print('Training...')
    print('-'*50)

    datagen.fit(X_train)


    checkpointer_best = ModelCheckpoint(filepath=prefix + "weights_{}_best.hdf5".format(what), verbose=1, save_best_only=True)
    checkpointer = ModelCheckpoint(filepath=prefix + "weights_{}.hdf5".format(what), verbose=1, save_best_only=False)

    hist = model.fit_generator(datagen.flow(X_train, y_train[:, col], batch_size=batch_size),
                                           samples_per_epoch=X_train.shape[0],
                                           nb_epoch=nb_iter, show_accuracy=False,
                                           validation_data=(X_test, y_test[:, col]),
                                           callbacks=[checkpointer, checkpointer_best],
                                           nb_worker=4)

    loss = hist.history['loss'][-1]
    val_loss = hist.history['val_loss'][-1]

    with open(prefix + 'val_loss.txt', mode='w+') as f:
        f.write(str(min(hist.history['val_loss'])))
        f.write('\n')
def train_model(model, dataset):
    """
    Train convolutional neural network model.

    Provides the option of using data augmentation to minimize over-fitting.
    Options used currently are:
        rotation_range - rotates the image.
        width_shift_range - shifts the position of the image horizontally.
        height_shift_range - shifts the position of the image vertically.
        horizontal_flip - flips the image horizontally.
    """
    print("\n- TRAINING MODEL -----------------------------------------------")
    if not DATA_AUGMENTATION:
        print('Not using data augmentation.')
        model.fit(dataset.train_data, dataset.train_labels,
                  batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
                  verbose=1, show_accuracy=True,
                  validation_data=(dataset.validate_data,
                                   dataset.validate_labels))
    else:
        print('Using real-time data augmentation.')
        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,
            samplewise_center=False,
            featurewise_std_normalization=False,
            samplewise_std_normalization=False,
            zca_whitening=False,
            # Rotate image between 0 and 10 degrees randomly
            rotation_range=0.1,
            # Shift image by 1px horizontally randomly
            width_shift_range=0.1,
            # Shift image by 1px vertically randomly
            height_shift_range=0.1,
            # Flip the image horizontally randomly
            horizontal_flip=True,
            vertical_flip=False)

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(dataset.train_data)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(dataset.train_data,
                                         dataset.train_labels,
                                         shuffle=True, batch_size=BATCH_SIZE),
                            samples_per_epoch=dataset.train_data.shape[0],
                            nb_epoch=NB_EPOCH, verbose=1, show_accuracy=True,
                            validation_data=(dataset.validate_data,
                                             dataset.validate_labels),
                            nb_worker=1)
    return model
示例#14
0
def train(data, Model, file_name, num_epochs=50, batch_size=128, init=None):
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted)

    model = Model(None).model
    print(model.summary())

    def get_lr(epoch):
        return base_lr*(.5**(epoch/num_epochs*10))
    sgd = SGD(lr=0.00, momentum=0.9, nesterov=False)
    schedule= LearningRateScheduler(get_lr)

    model.compile(loss=fn,
                  optimizer=sgd,
                  metrics=['accuracy'])
    
    if Model == MNISTModel:
        datagen = ImageDataGenerator(
            rotation_range=0,
            width_shift_range=0.0,
            height_shift_range=0.0,
            horizontal_flip=False)
        base_lr = 0.1
    else:
        datagen = ImageDataGenerator(
            rotation_range=10,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True)
        base_lr = 0.1


    datagen.fit(data.train_data)

    model.fit_generator(datagen.flow(data.train_data, data.train_labels,
                                     batch_size=batch_size),
                        steps_per_epoch=data.train_data.shape[0] // batch_size,
                        epochs=num_epochs,
                        verbose=1,
                        validation_data=(data.validation_data, data.validation_labels),
                        callbacks=[schedule])

    print('Test accuracy:', np.mean(np.argmax(model.predict(data.test_data),axis=1)==np.argmax(data.test_labels,axis=1)))

    if file_name != None:
        model.save_weights(file_name)

    return model
示例#15
0
def get_datagen(X):
    datagen = ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=0,
        width_shift_range=0,
        height_shift_range=0,
        horizontal_flip=False,
        vertical_flip=False)

    Xsample = X[np.random.choice(X.shape[0], 10000), :]
    datagen.fit(Xsample)

    return datagen
def train_model(num_epochs, X_train, y_train, X_val, y_val, model = None, training_time = TRAINING_TIME):
    if model is None:
        model = initialize_model_graham()
    
    # convert class vectors to binary class matrices
    #Y_train = np_utils.to_categorical(y_train, STARS)
    #Y_val = np_utils.to_categorical(y_val, STARS)
    Y_train = y_train
    Y_val = y_val
    
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    #X_train /= 255
    #X_val /= 255

    temporal = TemporalCallback("netsave/network.gb.temp", training_time)
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_train, Y_train, batch_size=MINIBATCH_SIZE,
                  nb_epoch=num_epochs, show_accuracy=True,
                  validation_data=(X_val, Y_val), shuffle=True, callbacks=[temporal])
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=True,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=True,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=5,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=True)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(X_train, Y_train, batch_size=MINIBATCH_SIZE),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=num_epochs, show_accuracy=True,
                            validation_data=(X_val, Y_val),
                            nb_worker=1, callbacks=[temporal])	
    return model
def generate_datagen(train_data):
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    datagen.fit(train_data)

    return datagen
示例#18
0
文件: cnn4.py 项目: tracholar/wiki
    def train(self):
        mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

        train_datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2)
        train_datagen.fit(mnist.train.images.reshape(-1, 28, 28, 1))

        x_test, y_test = mnist.test.images.reshape(-1, 28, 28, 1), mnist.test.labels
        self.model.fit_generator(train_datagen.flow(mnist.train.images.reshape(-1, 28, 28, 1), mnist.train.labels),
                       #batch_size=128,
                       epochs=20,
                       verbose=1,
                       validation_data=(x_test, y_test),
                       callbacks=[TrainValTensorBoard(log_dir='./logs/cnn4', histogram_freq=1, write_grads=True)])

        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Loss', score[0], 'acc', score[1])
示例#19
0
    def train(self, dataset, metric, nb_epoch=30, data_augmentation=True):
        
        # let's train the model using different optimization methods.
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) #acc: 99.58%
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #acc: 99.63
        adamax = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss='categorical_crossentropy', ##'binary_crossentropy'
                           optimizer= adam, 
                           metrics=[metric]) #
        earlyStopping= callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')                   
                           
        if not data_augmentation:
            print('Not using data augmentation.')
            self.model.fit(dataset.X_train, dataset.Y_train,
                           batch_size=self.batch_size,
                           nb_epoch=nb_epoch, callbacks=earlyStopping, 
                           validation_data=(dataset.X_valid, dataset.Y_valid),
                           shuffle=True)
        else:
            print('Using real-time data augmentation.')

            # This will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=False,             # set input mean to 0 over the dataset
                samplewise_center=False,              # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,   # divide each input by its std
                zca_whitening=False,                  # apply ZCA whitening
                rotation_range=20,                    # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.,                 # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.,                # randomly shift images vertically (fraction of total height)
                channel_shift_range=0.2,
                fill_mode = 'nearest',        # Points outside the boundaries of the input are filled according to the given mode.
                horizontal_flip=False,                 # randomly flip images
                vertical_flip=True)                  # randomly flip images

            # compute quantities required for featurewise normalization
            # (std, mean, and principal components if ZCA whitening is applied)
            datagen.fit(dataset.X_train)

            # fit the model on the batches generated by datagen.flow()
            self.model.fit_generator(datagen.flow(dataset.X_train, dataset.Y_train,
                                                  batch_size= self.batch_size),
                                     samples_per_epoch=dataset.X_train.shape[0],
                                     nb_epoch=nb_epoch, callbacks=[earlyStopping], 
                                     validation_data=(dataset.X_valid, dataset.Y_valid))
示例#20
0
def tune(X_train, X_test, y_train, y_test):
    print y_train
    Y_train = np_utils.to_categorical(y_train, config.nb_class)
    Y_test = np_utils.to_categorical(y_test, config.nb_class)

    model = None
    model = util.load_alexnet_model_finetune567(weights_path=config.alexnet_weights_path, nb_class=config.nb_class)

    model.compile(

        loss='sparse_categorical_crossentropy',
        optimizer=SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    print "Fine-tuning CNN.."

    #Real-time Data Augmentation using In-Built Function of Keras
    datagen = ImageDataGenerator(rotation_range=40,
                                 width_shift_range=0.3,
                                 height_shift_range=0.3,
                                 horizontal_flip=True,
                                 zoom_range = 0.25,
                                 shear_range = 0.25,
                                 fill_mode='nearest')
    datagen.fit(X_train)
    hist = model.fit_generator(datagen.flow(X_train, y_train, batch_size=32), nb_epoch=400,
                        samples_per_epoch=X_train.shape[0], validation_data = (X_test,y_test))

    #hist = model.fit(X_train, Y_train,
    #          nb_epoch=400, batch_size=32,verbose=1,
    #          validation_data=(X_test, Y_test))

    util.save_history(hist,"alex_finetune567_aug_fold"+ str(fold_count),fold_count)

    model.save_weights("models/alex_finetune567_aug_weights"+ str(fold_count) +".h5")

    #scores = model.evaluate(X_test, y_test, verbose=0)
    #print("Softmax %s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

    # Clear memory
    model= None
    X_train = None
    Y_train = None
    X_test = None
    Y_test = None
    def train(self):
        # load data
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        # color preprocessing
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        # build network
        img_input = Input(shape=(self.img_rows,self.img_cols,self.img_channels))
        output = self.wide_residual_network(img_input,self.num_classes,self.depth,self.wide)
        resnet = Model(img_input, output)
        resnet.summary()
        
        # set optimizer
        sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)
        resnet.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

        # set callback
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)
        change_lr = LearningRateScheduler(self.scheduler)
        checkpoint = ModelCheckpoint(self.model_filename, 
                monitor='val_loss', verbose=0, save_best_only= True, mode='auto')
        plot_callback = PlotLearning()
        cbks = [change_lr,tb_cb,checkpoint,plot_callback]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,
                width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)

        datagen.fit(x_train)

        # start training
        resnet.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),
                            steps_per_epoch=self.iterations,
                            epochs=self.epochs,
                            callbacks=cbks,
                            validation_data=(x_test, y_test))
        resnet.save(self.model_filename)

        self.param_count = self._model.count_params()
        self._model = resnet
示例#22
0
    def fit(self, train_set, test_set, nb_epoch):
        super(ModelCNNBasic, self).fit(train_set, test_set, nb_epoch)
        # data augmentation
        datagen = ImageDataGenerator(
            zca_whitening=False,
            rotation_range=180,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            vertical_flip=True,
            shear_range=0.2,
            zoom_range=0.2
        )

        datagen.fit(self.x_train)

        verbose = 1
        if not self.verbose:
            verbose = 0

        if test_set is not None:
            early_stopping = EarlyStopping(monitor='val_loss', patience=20)
            checkpoint_path = 'output/checkpoint/{}'.format(self.name)
            helpers.prepare_dir(checkpoint_path, empty=True)
            checkpoint_path = os.path.join(checkpoint_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
            checkpoint = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', save_best_only=False)
            callbacks = [early_stopping, checkpoint]
            self.model.fit_generator(datagen.flow(self.x_train,
                                                  self.y_train,
                                                  shuffle=True),
                                     samples_per_epoch=self.x_train.shape[0],
                                     nb_epoch=nb_epoch,
                                     validation_data=(self.x_test, self.y_test),
                                     callbacks=callbacks,
                                     verbose=verbose,
                                     )
        else:
            self.model.fit_generator(datagen.flow(self.x_train,
                                                  self.y_train,
                                                  shuffle=True),
                                     samples_per_epoch=self.x_train.shape[0],
                                     nb_epoch=nb_epoch,
                                     verbose=verbose
                                     )
示例#23
0
def train():
    print("Training...")
    t0 = time.time()
    datagen = ImageDataGenerator(
        rotation_range=90, width_shift_range=0.01, height_shift_range=0.01, horizontal_flip=True, vertical_flip=True
    )
    for e in range(nb_epoch):
        print("Epoch Number: " + str(e))
        for X_batch, y_batch, split, class_weight in BatchGenerator():
            if split > train_size / split_size:
                break
            split_number = (e * splits) + split
            # if split_number > 2:
            # if split_losses[split_number-2]>=split_losses[split_number-3]:
            # break
            datagen.fit(X_batch)
            model.fit_generator(
                datagen.flow(
                    X_batch,
                    y_batch,
                    shuffle=True,
                    batch_size=batch_size
                    # ,save_to_dir='/home/harry/Pictures/dataaug'
                ),
                callbacks=[lh],
                samples_per_epoch=split_size,
                nb_epoch=1,
                show_accuracy=True,
                class_weight=class_weights,
            )
        # if e%5 == 0 & e!=0:
        # print('Testing... as 5 epochs have passed')
        # score = model.evaluate(X_test, Y_test, batch_size=20)
        # print('Test score:', score)
        model.save_weights(
            ("/media/harry/Storage/NewModel/weights512/" + save_name[:-5] + "E-%d.hdf5") % (e + 1), overwrite=True
        )
        # print ("Weights saved to " + save_name)
    t1 = time.time()
    tyme = t1 - t0
    print("Training completed in %f seconds" % tyme)
    model.save_weights("/media/harry/Storage/NewModel/weights512/" + save_name, overwrite=True)
    print("Final training weights saved to " + save_name)
    return tyme
示例#24
0
    def fit(self, X_train, Y_train, X_test, Y_test, nb_classes, csv_filename, img_augmentation={}):
        """
        Fit function meka resnet model run training.

        Args
        --------
        X_train: array, training datasets.
        Y_train: array, training dataset's labels.
        X_test: array, test datasets.
        Y_test: array, test dataset's labels.
        nb_classes: int, number of class.
        csv_filename: str, name of input file as csv.
        img_augmentation: dict, selection image's augmentatin:

        Return
        --------
        model: object, trained model.
        """
        csv_logger = CSVLogger(csv_filename)
        img_rows, img_cols, img_channels = X_train[0].shape
        self._compile(img_channels, img_rows, img_cols, nb_classes)

        if not self.data_augmentation:
            print('Not using data augmentation.')
            self.model.fit(
                X_train,
                Y_train,
                batch_size=self.batch_size,
                nb_epoch=self.nb_epoch,
                validation_data=(X_test, Y_test),
                shuffle=True,
                callbacks=[self.lr_reducer, self.early_stopper, csv_logger])
        else:
            print('Using real-time data augmentation.')
            datagen = ImageDataGenerator(img_augmentation)
            datagen.fit(X_train)
            self.model.fit_generator(
                datagen.flow(X_train, Y_train, batch_size=self.batch_size),
                steps_per_epoch=X_train.shape[0] // self.batch_size,
                validation_data=(X_test, Y_test),
                epochs=self.nb_epoch, verbose=1, max_q_size=100,
                callbacks=[self.lr_reducer, self.early_stopper, csv_logger])

        return self.model
示例#25
0
def train():
    
    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    #checkpointer = ModelCheckpoint(filepath="/Users/quinnjarrell/Desktop/Experiments/keras/saved/", verbose=1, save_best_only=True)
    min_score = 91293921
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=True,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=True,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        datagen.fit(X_train)
        progbar = generic_utils.Progbar(X_train.shape[0])
        x = 0
        for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=128):# save_to_dir="/Users/quinnjarrell/datasets/catsvsdogs/train/resized/resized_generated"):
            loss = model.train_on_batch(X_batch, Y_batch)
            x += 1
            check_for_early_shutdown(x)
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=128):
            score = model.test_on_batch(X_batch, Y_batch)
            x += 1
            check_for_early_shutdown(x)
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
        if score < min_score:
            print ("New best model with score: %s", score)
            save_data()
            min_score = score
    def train(self):
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        # color preprocessing
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        model = self.pure_cnn_network(self.input_shape)
        model.summary()

        # Save the best model during each training checkpoint
        checkpoint = ModelCheckpoint(self.model_filename,
                                    monitor='val_loss', 
                                    verbose=0,
                                    save_best_only= True,
                                    mode='auto')
        plot_callback = PlotLearning()
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)

        cbks = [checkpoint, plot_callback, tb_cb]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,
                width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)

        datagen.fit(x_train)

        model.compile(loss='categorical_crossentropy', # Better loss function for neural networks
                    optimizer=Adam(lr=self.learn_rate), # Adam optimizer with 1.0e-4 learning rate
                    metrics = ['accuracy']) # Metrics to be evaluated by the model

        model.fit_generator(datagen.flow(x_train, y_train, batch_size = self.batch_size),
                            epochs = self.epochs,
                            validation_data= (x_test, y_test),
                            callbacks=cbks,
                            verbose=1)

        model.save(self.model_filename)

        self._model = model
示例#27
0
def generator():
    datagen =ImageDataGenerator(
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True
    )
    datagen.fit(X_train)
    model.fit_generator(datagen.flow(X_train,Y_train,batch_size=32,)
                        ,steps_per_epoch=len(X_train),epochs=10
                        )
    for i in range(10):
        print ('Epoch',i)
        batches =0
        for x_bath,y_bath in datagen.flow(X_train,Y_train,batch_size=32):
            loss =model.train_on_batch(x_bath,y_bath)
            batches +=1
            if batches>=len(X_train)/32:
                break
示例#28
0
    def train_ops(self):
        # Actually train neural network
        # Set optimizers, we use adam
        opt = keras.optimizers.adam(lr=self.lr)
        # Use cross entropy as our loss function
        self.model.compile(loss='categorical_crossentropy',
                    optimizer=opt,
                    metrics=['accuracy'])
        
        if not self.data_augmentation:
            print('Not using data augmentation.')
            self.model.fit(self.x_train, self.y_train,
                        batch_size=self.batch_size,
                        epochs=self.epochs,
                        validation_data=(self.x_test, self.y_test),
                        shuffle=True)
        else:
            print('Using real-time data augmentation.')
            # This will do preprocessing and realtime data augmentation:
            datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,  # randomly flip images
                vertical_flip=False)  # randomly flip images

            # Use data augmentation to generate more sample
            datagen.fit(self.x_train)

            # Fit the model on the batches generated by datagen.flow().
            self.model.fit_generator(datagen.flow(self.x_train, self.y_train, batch_size=self.batch_size),
                                steps_per_epoch=int(np.ceil(self.x_train.shape[0] / float(self.batch_size))),
                                epochs=self.epochs,
                                validation_data=(self.x_test, self.y_test),
                                workers=4)
示例#29
0
    def train(self, dataset, batch_size=32, nb_epoch=40, data_augmentation=True):
        # let's train the model using SGD + momentum (how original).
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=sgd,
                           metrics=['accuracy'])
        if not data_augmentation:
            print('Not using data augmentation.')
            self.model.fit(dataset.X_train, dataset.Y_train,
                           batch_size=batch_size,
                           nb_epoch=nb_epoch,
                           validation_data=(dataset.X_valid, dataset.Y_valid),
                           shuffle=True)
        else:
            print('Using real-time data augmentation.')

            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=False,             # set input mean to 0 over the dataset
                samplewise_center=False,              # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,   # divide each input by its std
                zca_whitening=False,                  # apply ZCA whitening
                rotation_range=20,                    # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.2,                # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.2,               # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,                 # randomly flip images
                vertical_flip=False)                  # randomly flip images

            # compute quantities required for featurewise normalization
            # (std, mean, and principal components if ZCA whitening is applied)
            datagen.fit(dataset.X_train)

            # fit the model on the batches generated by datagen.flow()
            self.model.fit_generator(datagen.flow(dataset.X_train, dataset.Y_train,
                                                  	batch_size=batch_size),
                                     				samples_per_epoch=dataset.X_train.shape[0],
                                     				nb_epoch=nb_epoch,
                                     				validation_data=(dataset.X_valid, dataset.Y_valid))
def data_generator(x,y,batch_size):
    x_train,y_train = x,y
    from keras.preprocessing.image import ImageDataGenerator
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # Compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)
    generator = datagen.flow(x_train,y_train,batch_size=batch_size)
    while True:
        x,y  = generator.next()
        yield ([x,y],[y,x])
示例#31
0
def generate_data_aug(X_train, y_train):
    datagen = ImageDataGenerator(rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1)  
    datagen.fit(X_train)
    data_from_generator = datagen.flow(X_train,y_train, batch_size = 86)
    return data_from_generator
示例#32
0
def train():
    model = densenet_reg.DenseNet(img_dim,
                                  classes=nb_classes,
                                  depth=depth,
                                  nb_dense_block=nb_dense_block,
                                  growth_rate=growth_rate,
                                  nb_filter=nb_filter,
                                  dropout_rate=dropout_rate,
                                  bottleneck=bottleneck,
                                  reduction=reduction,
                                  weights=None)

    # model = densenet_reg.DenseNetImageNet264(input_shape=img_dim, classes=nb_classes)
    # print("Model created")

    model.summary()
    optimizer = Adam(lr=1e-3)  # Using Adam instead of SGD to speed up training
    model.compile(loss=losses.mean_absolute_error,
                  optimizer=optimizer,
                  metrics=["accuracy"])
    print("Finished compiling")
    print("Building model...")

    trainX, trainY, testX, testY = load_data(train_file_path, com_path)

    print(trainX.shape)
    print(trainY.shape)
    print(testX.shape)
    print(testY.shape)

    trainX = trainX.astype('float32')
    testX = testX.astype('float32')

    # Y_train = np_utils.to_categorical(trainY, nb_classes)
    Y_train = trainY.astype('float32')
    # Y_test = np_utils.to_categorical(testY, nb_classes)
    Y_test = testY.astype('float32')

    generator = ImageDataGenerator(rotation_range=15,
                                   width_shift_range=10. / img_rows,
                                   height_shift_range=10. / img_cols)

    generator.fit(trainX, seed=0)

    weights_file = r'../dataB/Zero_DenseNet_Reg.h5'
    if os.path.exists(weights_file):
        model.load_weights(weights_file, by_name=True)
        print("Model loaded.")

    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=1e-5)
    model_checkpoint = ModelCheckpoint(weights_file,
                                       monitor="val_loss",
                                       save_best_only=True,
                                       save_weights_only=True,
                                       verbose=1)

    callbacks = [lr_reducer, model_checkpoint]

    history = model.fit_generator(generator.flow(trainX,
                                                 Y_train,
                                                 batch_size=batch_size),
                                  samples_per_epoch=len(trainX),
                                  nb_epoch=nb_epoch,
                                  callbacks=callbacks,
                                  validation_data=(testX, Y_test),
                                  nb_val_samples=testX.shape[0],
                                  verbose=1)

    model_id = np.int64(
        time.strftime('%Y%m%d%H%M', time.localtime(time.time())))
    model.save('../dataB/Zero_DenseNet_Reg' + str(model_id) + '.h5')

    yPred = model.predict(testX)
    yTrue = testY.astype('float32')

    loss = np.mean(np.abs(yPred - yTrue))

    print("test loss:\t" + str(loss))

    # accuracy = metrics.accuracy_score(yTrue, yPred) * 100
    # error = 100 - accuracy
    # print("Accuracy : ", accuracy)
    # print("Error : ", error)

    return history
        'FARMLAND':3,
        'DESERT':4,
        'CITY':5
    }
train_path = './train/'   
label_csv = pd.read_csv('train_label.csv', header = None, encoding='utf-8')
label_csv[2] = label_csv[1].map(lambda x: label2num[x])
label_csv[0] = label_csv[0].map(lambda x: train_path + x)
label_csv = label_csv.drop( [1],  axis=1)

for index, row in label_csv.iterrows():
    if row[2] == 3:
        x = image.load_img( row[0] )
        x = np.array(x)
        x = x[np.newaxis,:,:,:]
        datagen.fit(x)
        num = 30
        i = 1
        if not os.path.exists('./%s/' % row[2]):
            os.makedirs('./%s/' % row[2])
        for x_batch in datagen.flow( x, batch_size = 2,
                                     save_to_dir = './%s/' % row[2],
                                     save_prefix = num,
                                     save_format = 'jpg'):
            num = num+1
            i += 1
            if i > 1:
                break
    if row[2] == 2:
        x = image.load_img( row[0] )
        x = np.array(x)
示例#34
0
model.add(Conv2D(256, (2, 3), kernel_regularizer=regularizers.l2(0.03)))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2,3), padding='same'))
model.add(Dropout(0.5))

model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Dropout(0.5))

model.add(Dense(3))
model.add(Activation("softmax"))

#sgd = optimizers.SGD(lr=0.04, decay=1e-10, momentum=0.9, nesterov=True)
ada = optimizers.Adam(lr=3e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0.0, amsgrad=False)
model.compile(loss="categorical_crossentropy",
              optimizer=ada,
              metrics=["accuracy"])

# Data augmentation
datagen = ImageDataGenerator(width_shift_range=0.4)
datagen.fit(x_train)

# Optimization
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
                    epochs=150, validation_data=(x_test, y_test))

model.summary()

示例#35
0
trainX = trainX.astype('float32')
trainX = np.reshape(trainX, [60000, 28, 28, 1])

testX = testX.astype('float32')
testX = np.reshape(testX, [10000, 28, 28, 1])

Y_train = np_utils.to_categorical(trainY, nb_classes)
Y_test = np_utils.to_categorical(testY, nb_classes)

generator = ImageDataGenerator(rotation_range=15,
                               width_shift_range=5. / 28,
                               height_shift_range=5. / 28,
                               horizontal_flip=True)

generator.fit(trainX, seed=0)

# Load model
weights_file = "weights/DenseNet-40-12-FashionMNIST.h5"

if os.path.exists(weights_file) and load_models:
    model.load_weights(weights_file, by_name=True)
    print("Model loaded.")

if not os.path.exists(weights_file):
    file = open(weights_file, 'w')

lr_reducer = ReduceLROnPlateau(monitor='val_acc',
                               factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
示例#36
0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    # fit the model on the batches generated by datagen.flow()
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=nb_epoch,
                        show_accuracy=True,
                        validation_data=(X_test, Y_test),
                        nb_worker=1)

# model.save('cifar10_model_%s.h5' %(str(quesNo)))  # creates a HDF5 file 'my_model.h5'
loss_mat, acc_mat = [], []
tmp = hist.history
print('loss', '\n', tmp['loss'])
loss_mat.append(tmp['loss'])
loss_mat.append(tmp['val_loss'])
                         include_top=False)

model = create_model(base_model)


def train_model(epochs, train_generator):
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=final_data_img.shape[0] //
                                  BATCH_SIZE,
                                  epochs=epochs,
                                  verbose=1)


BATCH_SIZE = 32
train_datagen = ImageDataGenerator()
train_datagen.fit(final_data_img)
train_generator = train_datagen.flow(final_data_img,
                                     final_labels_keras,
                                     batch_size=BATCH_SIZE)

start = time.time()
hist = train_model(10, train_generator)
end = time.time()

model_json = model.to_json()
with open("../model_InceptionV3_10epochs.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(
    "../model_no_augmentation_all_train_InceptionV3_10epochs.h5")
print("Saved model to disk")
示例#38
0
x_validation = np.array(list(map(preProcess, x_validation)))

x_train = x_train.reshape(
    -1, 32, 32,
    1)  # -1 demek x_trainin boyutu neyse sen onu kendine göre ayarla demek
print(x_train.shape)
x_test = x_test.reshape(-1, 32, 32, 1)
x_validation = x_validation.reshape(-1, 32, 32, 1)

# data generate
dataGen = ImageDataGenerator(width_shift_range=0.1,
                             height_shift_range=0.1,
                             zoom_range=0.1,
                             rotation_range=10)

dataGen.fit(x_train)

y_train = to_categorical(y_train, noOfClasses)
y_test = to_categorical(y_test, noOfClasses)
y_validation = to_categorical(y_validation, noOfClasses)

#Create Model
model = Sequential()
model.add(
    Conv2D(input_shape=(32, 32, 1),
           filters=8,
           kernel_size=(5, 5),
           activation="relu",
           padding="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
示例#39
0
# augment data to increase variety in dataset and prevent overfitting
print('[INFO] augmenting data...')
aug_data=ImageDataGenerator(featurewise_center=False, #set input mean to 0
                           samplewise_center=False,  #set each sample mean to 0
                           featurewise_std_normalization=False, #divide input datas to std
                           samplewise_std_normalization=False,  #divide each datas to own std
                           zca_whitening=False,  #dimension reduction
                           rotation_range=0.5,    #rotate 5 degree
                           zoom_range=0.5,        #zoom in-out 5%
                           width_shift_range=0.5, #shift 5%
                           height_shift_range=0.5,
                           horizontal_flip=False,  #randomly flip images
                           vertical_flip=False,
                           )
aug_data.fit(train_x)

# make patient early stopping
print('[INFO] initiating tools for early stopping and saving')
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)
mc = ModelCheckpoint(args['model'], monitor='val_acc', mode='max', verbose=1, save_best_only=True)

# train network
print('[INFO] training network')
history = model.fit_generator(aug_data.flow(train_x, train_y, batch_size=BS), 
                                            validation_data=(val_x, val_y),
                                            steps_per_epoch=len(train_x) // BS,
                                            epochs=EPOCHS,
                                            verbose=1,
                                            callbacks=[es, mc])