Beispiel #1
1
def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
    def train(self,save_model_to_file = True,rotation_range = 20,width_shift_range=0.5,height_shift_range=0.2):
        """ Trains the model using the dataset in letters_folder """

        # Read the data
        data = []
        labels = []
        for imgName in listdir(self.letters_folder):
            img = cv2.imread(self.letters_folder+"/"+imgName, cv2.IMREAD_GRAYSCALE)
            data.append(img)
            # Get the label from the image path and then get the index from the letters list
            labels.append(self.letters.index(imgName.split('_')[0]))

        data = np.array(data)
        labels = np.array(labels)

        # Split train and test
        X_train, X_test, y_train, y_test = train_test_split(
                 data, labels, test_size=0.33, random_state=42)

        X_train = X_train.reshape(X_train.shape[0], 1, self.img_rows, self.img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, self.img_rows, self.img_cols)
        X_train = X_train.astype('float32')
        X_test = X_test.astype('float32')
        X_train /= 255
        X_test /= 255

        # convert class vectors to binary class matrices
        Y_train = np_utils.to_categorical(y_train, self.nb_classes)
        Y_test = np_utils.to_categorical(y_test, self.nb_classes)

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(rotation_range=rotation_range,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=width_shift_range,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=height_shift_range)# randomly shift images vertically (fraction of total height))

        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        history = self.model.fit_generator(datagen.flow(X_train, Y_train, batch_size=self.batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=self.nb_epoch,
                            validation_data=(X_test, Y_test))


        # Plot History
        plt.figure(figsize=(10,10))
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()

        if save_model_to_file:
            self.model.save_weights(self.weights_path,overwrite=True)
Beispiel #3
0
def augmentation(scans,masks,n):
    datagen = ImageDataGenerator(
        featurewise_center=False,   
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,  
        rotation_range=25,   
        width_shift_range=0.3,  
        height_shift_range=0.3,   
        horizontal_flip=True,   
        vertical_flip=True,  
        zoom_range=False)
    i=0
    for batch in datagen.flow(scans, batch_size=1, seed=1000): 
        scans=np.vstack([scans,batch])
        i += 1
        if i > n:
            break
    i=0
    for batch in datagen.flow(masks, batch_size=1, seed=1000): 
        masks=np.vstack([masks,batch])
        i += 1
        if i > n:
            break
    return((scans,masks))
def main():
    model = Model()
    if (sys.argv[1] == "test"):
        global nb_epoch
        nb_epoch = 0
        global WEIGHTS_FILE
        WEIGHTS_FILE = sys.argv[2]

    elif(sys.argv[1] == "add"):
        global X_train, Y_train, X_val1, Y_val1
        X_train = np.concatenate((X_train, X_val1), axis=0)
        Y_train = np.concatenate((Y_train, Y_val1), axis=0)

    adam = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam)

    datagen = ImageDataGenerator(
        featurewise_center=False,
        featurewise_std_normalization=False,
        rotation_range=15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=False)

    datagen.fit(X_train)
    callbacks = [ModelCheckpoint(WEIGHTS_FILE, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'),
                 EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')]
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        samples_per_epoch=len(X_train), nb_epoch=nb_epoch, validation_data=(X_val1, Y_val1),
                        show_accuracy=True, callbacks=callbacks)

    model.load_weights(WEIGHTS_FILE)
    predict_test(model)
Beispiel #5
0
def train():
    (X_test, y_test, y_conf) = load.load_test_data()
    Y_test = np_utils.to_categorical(y_test, classes)
    print(X_test.shape[0], 'test samples')
    X_test = X_test.astype("float32")
    X_test /= 255
    datagen = ImageDataGenerator(rotation_range=30,  width_shift_range=0.01,  height_shift_range=0.01,  horizontal_flip=True, vertical_flip=True)
    t0=time.time()
    for e in range(nb_epoch):
        print ("******** Epoch %d ********" % (e+1))
        print ("Epoch Number: " + str(e))
        for X_batch, y_batch, class_weight in BatchGenerator():
            datagen.fit(X_batch)
            model.fit_generator(datagen.flow(X_batch, y_batch, batch_size=18, shuffle=True),
            callbacks=[lh,checkpointer],
            samples_per_epoch=split_size,
            nb_epoch=nb_epoch_per,
            validation_data=(X_test,Y_test)
            ,class_weight=class_weight
            )
            y_pred = model.predict_classes(X_test, batch_size=20)
        (accuracy, correct)=PredictionMatrix()
        #model.save_weights((direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' )  % (e+1), overwrite=True)
        #print ("Weights saved to " + direct + '/weights/' + save_name[:-5] + 'E-%d.hdf5' % (e+1))
    t1=time.time()
    tyme = t1-t0   
    print("Training completed in %f seconds" % tyme)
    if save_name != '':
        model.save_weights(direct + '/weights/' + save_name, overwrite=True)
        print ("Weights saved to " + save_name)
    print ("Final training weights saved to " + save_name)
    return tyme
Beispiel #6
0
def gen_augment_arrays(array, label, augmentations, rounds = 1):
    if augmentations is None:
        yield array, label
    else:

        auggen = ImageDataGenerator(featurewise_center = augmentations['featurewise_center'],
                                    samplewise_center = augmentations['samplewise_center'],
                                    featurewise_std_normalization = augmentations['featurewise_std_normalization'],
                                    samplewise_std_normalization = augmentations['samplewise_std_normalization'],
                                    zca_whitening = augmentations['zca_whitening'],
                                    rotation_range = augmentations['rotation_range'],
                                    width_shift_range = augmentations['width_shift_range'],
                                    height_shift_range = augmentations['height_shift_range'],
                                    shear_range = augmentations['shear_range'],
                                    zoom_range = augmentations['zoom_range'],
                                    channel_shift_range = augmentations['channel_shift_range'],
                                    fill_mode = augmentations['fill_mode'],
                                    cval = augmentations['cval'],
                                    horizontal_flip = augmentations['horizontal_flip'],
                                    vertical_flip = augmentations['vertical_flip'],
                                    rescale = augmentations['rescale'])

        array_augs, label_augs = next(auggen.flow(np.tile(array[np.newaxis],
                                                (rounds * augmentations['rounds'], 1, 1, 1)),
                                        np.tile(label[np.newaxis],
                                                (rounds * augmentations['rounds'], 1)),
                                        batch_size=rounds * augmentations['rounds']))

        for array_aug, label_aug in zip(array_augs, label_augs):
            yield array_aug, label_aug
    def train(self):
        # load data
        (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)
        
        x_train, x_test = self.color_preprocessing(x_train, x_test)

        # build network
        model = self.build_model()
        model.summary()

        # Save the best model during each training checkpoint
        checkpoint = ModelCheckpoint(self.model_filename,
                                    monitor='val_loss', 
                                    verbose=0,
                                    save_best_only= True,
                                    mode='auto')
        plot_callback = PlotLearning()
        tb_cb = TensorBoard(log_dir=self.log_filepath, histogram_freq=0)

        cbks = [checkpoint, plot_callback, tb_cb]

        # set data augmentation
        print('Using real-time data augmentation.')
        datagen = ImageDataGenerator(horizontal_flip=True,width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)
        datagen.fit(x_train)

        # start training
        model.fit_generator(datagen.flow(x_train, y_train,batch_size=self.batch_size),steps_per_epoch=self.iterations,epochs=self.epochs,callbacks=cbks,validation_data=(x_test, y_test))
        
        model.save(self.model_filename)

        self._model = model
	def fit(self,x,y,doRTA):
		if doRTA == False:
			self.model.fit({"input":x,"output":y},nb_epoch=self.epochs,batch_size=self.batch_size)
		else:
			datagen = ImageDataGenerator(
			        featurewise_center=True,  # set input mean to 0 over the dataset
			        samplewise_center=False,  # set each sample mean to 0
			        featurewise_std_normalization=True,  # divide inputs by std of the dataset
			        samplewise_std_normalization=False,  # divide each input by its std
			        zca_whitening=False,
			        rotation_range=20,
			        width_shift_range=0.2, 
			        height_shift_range=0.2,
			        horizontal_flip=True, 
			        vertical_flip=False)
			datagen.fit(x)

			for e in range(self.epochs):
			    print('-'*40)
			    print('Epoch', e)
			    print('-'*40)
			    print('Training...')
			    # batch train with realtime data augmentation
			    progbar = generic_utils.Progbar(x.shape[0])
			    for X_batch, Y_batch in datagen.flow(x, y):
			        loss = self.model.train_on_batch({"input":X_batch,"output":Y_batch})
			        progbar.add(X_batch.shape[0], values=[('train loss', loss[0])])
Beispiel #9
0
 def train_generator(x, y, batch_size, shift_fraction=0.):
     train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
                                        height_shift_range=shift_fraction)  # shift up to 2 pixel for MNIST
     generator = train_datagen.flow(x, y, batch_size=batch_size)
     while 1:
         x_batch, y_batch = generator.next()
         yield ([x_batch, y_batch], [y_batch, x_batch])
Beispiel #10
0
def predict_labels(model):
    """writes test image labels and predictions to csv"""
    
    test_datagen = ImageDataGenerator(rescale=1./255)
    test_generator = test_datagen.flow_from_directory(
        test_data_dir,
        target_size=(img_height, img_width),
        batch_size=32,
        shuffle=False,
        class_mode=None)

    base_path = "../data/test/test/"

    with open("prediction.csv", "w") as f:
        p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
        for _, _, imgs in os.walk(base_path):
            for im in imgs:
                pic_id = im.split(".")[0]
                img = load_img(base_path + im)
                img = imresize(img, size=(img_height, img_width))
                test_x = img_to_array(img).reshape(3, img_height, img_width)
                test_x = test_x.reshape((1,) + test_x.shape)
                test_generator = test_datagen.flow(test_x,
                                                   batch_size=1,
                                                   shuffle=False)
                prediction = model.predict_generator(test_generator, 1)[0][0]
                p_writer.writerow([pic_id, prediction])
def augment_img(input_file, output_folder, img_format='jpg',
                number_imgs=10):
    """
    Generate number_imgs new images from a given image.
    This function is inspired from the following blog post:
    https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
    """
    datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')

    img = load_img(input_file)
    x = img_to_array(img)
    x = x.reshape((1,) + x.shape)
    i = 0
    for batch in datagen.flow(x, batch_size=1,
                              save_to_dir=output_folder,
                              save_format=img_format):
        i += 1
        if i > number_imgs:
            break
Beispiel #12
0
def train_model(model,X_train,y_train):
    
    print("Training Model")
    
    # Image data generator to augment the data
    datagen = ImageDataGenerator(rotation_range = 2,
                             featurewise_center = False,
                             featurewise_std_normalization=False,
                             zoom_range = [0.8, 1],
                             fill_mode = 'constant',
                             cval=0)
    
    # Setup a regression network with adam optimizer
    model.compile(loss='mean_squared_error', optimizer='adam')
    
    # Incrementally save the best model basd on loss value
    chkpnt = ModelCheckpoint('model.h5',monitor='loss',verbose=1,save_best_only=True,mode='min')
    callbacks_list = [chkpnt]

    # Shuffle data
    X_train,y_train = shuffle(X_train,y_train)
    
    #Train the network with a batch size of 32 using the image data generator for a total of 10 epochs
    model.fit_generator(datagen.flow(X_train,y_train,batch_size=64),samples_per_epoch=len(X_train),nb_epoch=10,callbacks=callbacks_list,verbose=1)
    #,save_to_dir='./AugData',save_prefix='aug'
    model.save("model_final.h5")
    return model
def train():
    model_ = 'VGG_16'
    batch_size = 8
    nb_classes = 5
    nb_epoch = 200
    data_augmentation = True

    # input image dimensions
    if model_ in MODELS[0:2]:
        img_rows, img_cols = 224, 224
    if model_ in MODELS[3]:
        img_rows, img_cols = 299, 299
    # the Yelp images are RGB
    img_channels = 3

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = yelp_data(dtype=np.float32, grayscale=False, pixels=img_rows, batches=3,
                                                     model='VGG_16', data_dir='/home/rcamachobarranco/datasets')
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # generate model
    model = VGG_16(img_rows, img_cols, img_channels, nb_classes)

    # let's train the model using SGD + momentum
    sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_train, y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, show_accuracy=True,
                  validation_data=(X_test, y_test), shuffle=True)
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch, show_accuracy=True,
                            validation_data=(X_test, y_test),
                            nb_worker=1)
Beispiel #14
0
    def train(self,model):

        #training parameters
        batch_size = 128
        maxepoches = 250
        learning_rate = 0.1
        lr_decay = 1e-6

        # The data, shuffled and split between train and test sets:
        (x_train, y_train), (x_test, y_test) = cifar100.load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train, x_test = self.normalize(x_train, x_test)

        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)

        lrf = learning_rate


        #data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)



        #optimization details
        sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])


        # training process in a for loop with learning rate drop every 25 epoches.

        for epoch in range(1,maxepoches):

            if epoch%25==0 and epoch>0:
                lrf/=2
                sgd = optimizers.SGD(lr=lrf, decay=lr_decay, momentum=0.9, nesterov=True)
                model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

            historytemp = model.fit_generator(datagen.flow(x_train, y_train,
                                             batch_size=batch_size),
                                steps_per_epoch=x_train.shape[0] // batch_size,
                                epochs=epoch,
                                validation_data=(x_test, y_test),initial_epoch=epoch-1)
        model.save_weights('cifar100vgg.h5')
        return model
Beispiel #15
0
    def fit(self, train_set, test_set, nb_epoch):
        super(ModelCNNBasic, self).fit(train_set, test_set, nb_epoch)
        # data augmentation
        datagen = ImageDataGenerator(
            zca_whitening=False,
            rotation_range=180,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True,
            vertical_flip=True,
            shear_range=0.2,
            zoom_range=0.2
        )

        datagen.fit(self.x_train)

        verbose = 1
        if not self.verbose:
            verbose = 0

        if test_set is not None:
            early_stopping = EarlyStopping(monitor='val_loss', patience=20)
            checkpoint_path = 'output/checkpoint/{}'.format(self.name)
            helpers.prepare_dir(checkpoint_path, empty=True)
            checkpoint_path = os.path.join(checkpoint_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
            checkpoint = ModelCheckpoint(filepath=checkpoint_path, monitor='val_loss', save_best_only=False)
            callbacks = [early_stopping, checkpoint]
            self.model.fit_generator(datagen.flow(self.x_train,
                                                  self.y_train,
                                                  shuffle=True),
                                     samples_per_epoch=self.x_train.shape[0],
                                     nb_epoch=nb_epoch,
                                     validation_data=(self.x_test, self.y_test),
                                     callbacks=callbacks,
                                     verbose=verbose,
                                     )
        else:
            self.model.fit_generator(datagen.flow(self.x_train,
                                                  self.y_train,
                                                  shuffle=True),
                                     samples_per_epoch=self.x_train.shape[0],
                                     nb_epoch=nb_epoch,
                                     verbose=verbose
                                     )
def train():
    
    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    #checkpointer = ModelCheckpoint(filepath="/Users/quinnjarrell/Desktop/Experiments/keras/saved/", verbose=1, save_best_only=True)
    min_score = 91293921
    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=True,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=True,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        datagen.fit(X_train)
        progbar = generic_utils.Progbar(X_train.shape[0])
        x = 0
        for X_batch, Y_batch in datagen.flow(X_train, Y_train, batch_size=128):# save_to_dir="/Users/quinnjarrell/datasets/catsvsdogs/train/resized/resized_generated"):
            loss = model.train_on_batch(X_batch, Y_batch)
            x += 1
            check_for_early_shutdown(x)
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test, batch_size=128):
            score = model.test_on_batch(X_batch, Y_batch)
            x += 1
            check_for_early_shutdown(x)
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
        if score < min_score:
            print ("New best model with score: %s", score)
            save_data()
            min_score = score
Beispiel #17
0
    def create_test_images(self):
        shutil.rmtree('card_training', ignore_errors=True)
        shutil.rmtree('card_testing', ignore_errors=True)

        datagen = ImageDataGenerator(
            rotation_range=1,
            width_shift_range=0.1,
            height_shift_range=0.1,
            shear_range=0.05,
            zoom_range=0.1,
            horizontal_flip=False,
            fill_mode='nearest')

        dirs = [(base_dir + r'/pics/PP_old/', r'/card_training/'),
                (base_dir + r'/pics/SN/', r'/card_training/'),
                (base_dir + r'/pics/PS/', r'/card_training/'),
                (base_dir + r'/pics/PS/', r'/card_testing/'),
                             (r'tests/', r'/card_testing/')]

        for d in dirs:
            source_folder = d[0]
            destination_folder = d[1]
            card_ranks_original = '23456789TJQKA'
            original_suits = 'CDHS'

            namelist = []
            namelist.append('empty')
            for c in card_ranks_original:
                for s in original_suits:
                    namelist.append(c + s)

            for name in namelist:
                try:
                    img = load_img(source_folder + name + '.png')  # this is a PIL image
                    x = np.asarray(img)
                    x = adjust_colors(x)

                    x = x.reshape((1,) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)

                    # the .flow() command below generates batches of randomly transformed images
                    # and saves the results to the `preview/` directory
                    i = 0
                    directory = dir_path + destination_folder + name
                    if not os.path.exists(directory):
                        os.makedirs(directory)

                    for batch in datagen.flow(x, batch_size=1,
                                              save_to_dir=directory,
                                              save_prefix=name,
                                              save_format='png',
                                              ):
                        i += 1
                        if i > 50:
                            break  # otherwise the generator would loop indefinitely
                except:
                    print("skipping: " + name)
    def get_train_generator(self, batch_size):
        """
        Returns a batch generator which transforms chunk of raw images into numpy matrices
        and then "yield" them for the classifier. Doing so allow to greatly optimize
        memory usage as the images are processed then deleted by chunks (defined by batch_size)
        instead of preprocessing them all at once and feeding them to the classifier.
        :param batch_size: int
            The batch size
        :return: generator
            The batch generator
        """
        # Image Augmentation
        datagen = ImageDataGenerator(
            rescale=1./255,
            shear_range=0.2,
            horizontal_flip=True,
            vertical_flip=True)  # randomly flip images horizontally
        loop_range = len(self.X_train)
        while True:
            for i in range(loop_range):
                start_offset = batch_size * i

                # The last remaining files could be smaller than the batch_size
                range_offset = min(batch_size, loop_range - start_offset)

                # If we reached the end of the list then we break the loop
                if range_offset <= 0:
                    break

                batch_features = np.zeros((range_offset, *self.img_resize, 3))
                batch_labels = np.zeros((range_offset, len(self.y_train[0])))

                for j in range(range_offset):
                    # Maybe shuffle the index?
                    img = Image.open(self.X_train[start_offset + j])
                    img.thumbnail(self.img_resize)

                    # Augment the image `img` here

                    # Convert to RGB and normalize
                    img_array = np.asarray(img.convert("RGB"), dtype=np.float32)

                    img_array = img_array[:, :, ::-1]
                    # Zero-center by mean pixel
                    img_array[:, :, 0] -= 103.939
                    img_array[:, :, 1] -= 116.779
                    img_array[:, :, 2] -= 123.68

                    batch_features[j] = img_array
                    batch_labels[j] = self.y_train[start_offset + j]

                # Augment the images (using Keras allow us to add randomization/shuffle to augmented images)
                # Here the next batch of the data generator (and only one for this iteration)
                # is taken and returned in the yield statement
                yield next(datagen.flow(batch_features, batch_labels, range_offset))
Beispiel #19
0
def hard_train(data_prefix, prefix, seed, col):
    what = ['systole', 'diastole'][col % 2]
    print('We are going to train hard {} {}'.format(what, col))
    print('Loading training data...')

    X, y = load_train_data(data_prefix, seed)
    X_train, y_train, X_test, y_test = split_data(X, y, split_ratio=0.2)

    model = get_model()

    nb_iter = 200
    epochs_per_iter = 1
    batch_size = 32

    min_val = sys.float_info.max


    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=True)  # randomly flip images


    print('-'*50)
    print('Training...')
    print('-'*50)

    datagen.fit(X_train)


    checkpointer_best = ModelCheckpoint(filepath=prefix + "weights_{}_best.hdf5".format(what), verbose=1, save_best_only=True)
    checkpointer = ModelCheckpoint(filepath=prefix + "weights_{}.hdf5".format(what), verbose=1, save_best_only=False)

    hist = model.fit_generator(datagen.flow(X_train, y_train[:, col], batch_size=batch_size),
                                           samples_per_epoch=X_train.shape[0],
                                           nb_epoch=nb_iter, show_accuracy=False,
                                           validation_data=(X_test, y_test[:, col]),
                                           callbacks=[checkpointer, checkpointer_best],
                                           nb_worker=4)

    loss = hist.history['loss'][-1]
    val_loss = hist.history['val_loss'][-1]

    with open(prefix + 'val_loss.txt', mode='w+') as f:
        f.write(str(min(hist.history['val_loss'])))
        f.write('\n')
def generator():
    datagen =ImageDataGenerator(
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        horizontal_flip=True
    )
    datagen.fit(X_train)
    model.fit_generator(datagen.flow(X_train,Y_train,batch_size=32,)
                        ,steps_per_epoch=len(X_train),epochs=10
                        )
    for i in range(10):
        print ('Epoch',i)
        batches =0
        for x_bath,y_bath in datagen.flow(X_train,Y_train,batch_size=32):
            loss =model.train_on_batch(x_bath,y_bath)
            batches +=1
            if batches>=len(X_train)/32:
                break
def train_model(model, dataset):
    """
    Train convolutional neural network model.

    Provides the option of using data augmentation to minimize over-fitting.
    Options used currently are:
        rotation_range - rotates the image.
        width_shift_range - shifts the position of the image horizontally.
        height_shift_range - shifts the position of the image vertically.
        horizontal_flip - flips the image horizontally.
    """
    print("\n- TRAINING MODEL -----------------------------------------------")
    if not DATA_AUGMENTATION:
        print('Not using data augmentation.')
        model.fit(dataset.train_data, dataset.train_labels,
                  batch_size=BATCH_SIZE, nb_epoch=NB_EPOCH, shuffle=True,
                  verbose=1, show_accuracy=True,
                  validation_data=(dataset.validate_data,
                                   dataset.validate_labels))
    else:
        print('Using real-time data augmentation.')
        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,
            samplewise_center=False,
            featurewise_std_normalization=False,
            samplewise_std_normalization=False,
            zca_whitening=False,
            # Rotate image between 0 and 10 degrees randomly
            rotation_range=0.1,
            # Shift image by 1px horizontally randomly
            width_shift_range=0.1,
            # Shift image by 1px vertically randomly
            height_shift_range=0.1,
            # Flip the image horizontally randomly
            horizontal_flip=True,
            vertical_flip=False)

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(dataset.train_data)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(dataset.train_data,
                                         dataset.train_labels,
                                         shuffle=True, batch_size=BATCH_SIZE),
                            samples_per_epoch=dataset.train_data.shape[0],
                            nb_epoch=NB_EPOCH, verbose=1, show_accuracy=True,
                            validation_data=(dataset.validate_data,
                                             dataset.validate_labels),
                            nb_worker=1)
    return model
Beispiel #22
0
class RandomBatchGenerator(object):

    """Generate random batches of data."""

    def __init__(self, batch_size, typs, imdir, augment, randomize):
        # typs should be a list of "train", "val", or "test".
        self._randomize = randomize
        self._idx = 0
        if augment is True:
            self._datagen = ImageDataGenerator(
                featurewise_center=False,
                samplewise_center=False,
                featurewise_std_normalization=False,
                samplewise_std_normalization=False,
                zca_whitening=False,
                rotation_range=0,
                width_shift_range=0,
                height_shift_range=0,
                shear_range=0,
                horizontal_flip=True,
                vertical_flip=True
            )
        else:
            self._datagen = None
        self._ims = []
        for typ in set(typs):
            vids_file = os.path.join(SPLIT_DIR, "{}.txt".format(typ))
            with open(vids_file) as vf:
                self._ims.extend([os.path.join(imdir, line.strip() + ".jpg") for line in vf])
        self._batch_size = min([len(self._ims), batch_size])

    def __iter__(self):
        return self

    def next(self):
        if self._randomize:
            batch_ims = random.sample(self._ims, self._batch_size)
        else:
            batch_ims = self._ims[self._idx:self._idx+self._batch_size]
            self._idx += self._batch_size
            if self._idx >= len(self._ims):
                self._idx = 0
        batch_X, batch_y = generate_batch(batch_ims)
        if self._datagen is None:
            return batch_X, batch_y
        else:
            return next(self._datagen.flow(
                X=batch_X,
                y=batch_y,
                batch_size=self._batch_size,
                shuffle=False
            ))
def k_fold_train(imgs_train, imgs_mask_train, n_fold=5, use_generator=True):
    from sklearn.cross_validation import KFold
    kf = KFold(imgs_train.shape[0], n_folds=n_fold)
    current = 0
    for train_index, test_index in kf:
        # model = get_unet()
        model = get_keras_example_net()
        # model = get_keras_mnist_cnn()

        a_train_imgs = np.take(imgs_train, train_index, axis=0)
        a_train_mask = np.take(imgs_mask_train, train_index, axis=0)
        a_valid_imgs = np.take(imgs_train, test_index, axis=0)
        a_valid_mask = np.take(imgs_mask_train, test_index, axis=0)
        model_checkpoint = ModelCheckpoint('unet_fold%s.hdf5' % current, monitor='val_loss', save_best_only=True)
        if use_generator:
            train_datagen = ImageDataGenerator(
                rescale=1.,
                shear_range=0.2,
                zoom_range=0.2,
                horizontal_flip=True)
            test_datagen = ImageDataGenerator(rescale=1.)
            train_generator = train_datagen.flow(a_train_imgs, a_train_mask, batch_size=batch_size)
            validation_generator = test_datagen.flow(a_valid_imgs, a_valid_mask, batch_size=batch_size)
            model.fit_generator(
                train_generator,
                samples_per_epoch=len(a_train_imgs),
                validation_data=validation_generator,
                nb_val_samples=len(a_valid_mask),
                nb_epoch=nb_epoch, verbose=1,
                callbacks=[model_checkpoint],
            )

        else:
            model.fit(a_train_imgs, a_train_mask, validation_data=(a_valid_imgs, a_valid_mask),
                      batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=True,
                      callbacks=[model_checkpoint])
        current += 1
def train(data, Model, file_name, num_epochs=50, batch_size=128, init=None):
    def fn(correct, predicted):
        return tf.nn.softmax_cross_entropy_with_logits(labels=correct,
                                                       logits=predicted)

    model = Model(None).model
    print(model.summary())

    def get_lr(epoch):
        return base_lr*(.5**(epoch/num_epochs*10))
    sgd = SGD(lr=0.00, momentum=0.9, nesterov=False)
    schedule= LearningRateScheduler(get_lr)

    model.compile(loss=fn,
                  optimizer=sgd,
                  metrics=['accuracy'])
    
    if Model == MNISTModel:
        datagen = ImageDataGenerator(
            rotation_range=0,
            width_shift_range=0.0,
            height_shift_range=0.0,
            horizontal_flip=False)
        base_lr = 0.1
    else:
        datagen = ImageDataGenerator(
            rotation_range=10,
            width_shift_range=0.1,
            height_shift_range=0.1,
            horizontal_flip=True)
        base_lr = 0.1


    datagen.fit(data.train_data)

    model.fit_generator(datagen.flow(data.train_data, data.train_labels,
                                     batch_size=batch_size),
                        steps_per_epoch=data.train_data.shape[0] // batch_size,
                        epochs=num_epochs,
                        verbose=1,
                        validation_data=(data.validation_data, data.validation_labels),
                        callbacks=[schedule])

    print('Test accuracy:', np.mean(np.argmax(model.predict(data.test_data),axis=1)==np.argmax(data.test_labels,axis=1)))

    if file_name != None:
        model.save_weights(file_name)

    return model
def train_model(num_epochs, X_train, y_train, X_val, y_val, model = None, training_time = TRAINING_TIME):
    if model is None:
        model = initialize_model_graham()
    
    # convert class vectors to binary class matrices
    #Y_train = np_utils.to_categorical(y_train, STARS)
    #Y_val = np_utils.to_categorical(y_val, STARS)
    Y_train = y_train
    Y_val = y_val
    
    X_train = X_train.astype('float32')
    X_val = X_val.astype('float32')
    #X_train /= 255
    #X_val /= 255

    temporal = TemporalCallback("netsave/network.gb.temp", training_time)
    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_train, Y_train, batch_size=MINIBATCH_SIZE,
                  nb_epoch=num_epochs, show_accuracy=True,
                  validation_data=(X_val, Y_val), shuffle=True, callbacks=[temporal])
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=True,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=True,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=5,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=True)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(X_train, Y_train, batch_size=MINIBATCH_SIZE),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=num_epochs, show_accuracy=True,
                            validation_data=(X_val, Y_val),
                            nb_worker=1, callbacks=[temporal])	
    return model
Beispiel #26
0
	def train(self, X_in, X_out, n_epoch=100, batch_size=32, filter_imgfile=None, recon_imgfile=None, verbose=True):

		gdatagen = ImageDataGenerator(
		    featurewise_center=False, # set input mean to 0 over the dataset
		    samplewise_center=False, # set each sample mean to 0
		    featurewise_std_normalization=False, # divide inputs by std of the dataset
		    samplewise_std_normalization=False, # divide each input by its std
		    zca_whitening=False # apply ZCA whitening
		)

		e = 0
		self.rec_losses = []
		while e < n_epoch:
			e += 1
			if verbose:
				print('-'*40)
				print('Epoch', e)
				print('-'*40)

			if verbose:
				progbar = generic_utils.Progbar(X_in.shape[0])

			for X_batch, Y_batch in gdatagen.flow(X_in, X_out, batch_size=batch_size):
			    X_batch = get_corrupted_output(X_batch, corruption_level=self.corruption_level)
			    train_score = self.ae.train_on_batch(X_batch, Y_batch)
			    if verbose:
			    	progbar.add(X_batch.shape[0], values=[("train generative loss", train_score)])

			# Evaluate
			self.loss = self.ae.evaluate(X_in, X_out, batch_size=1024, verbose=0)


			if filter_imgfile is not None:
				# visualize the weights
				W0 = self.ae.get_weights()[0]
				show_images(np.transpose(W0[:,0:100],(1,0)), grayscale=True, filename=filter_imgfile)

			if recon_imgfile is not None:
				# AE recontruction

				# Get random samples
				idx = np.random.permutation(X.shape[0])
				idx = idx[:100]
				Xs = X[idx]

				# Reconstruct input
				Xr = self.ae.predict(Xs)
				show_images(Xr, grayscale=True, filename=recon_imgfile)
def data_augmentation(img):
    datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        fill_mode='nearest')
    x = img_to_array(img)
    x = x.reshape((1,) + x.shape)
    i = 0
    for batch in datagen.flow(x, batch_size=1,save_to_dir='augmentation', save_prefix='1', save_format='jpeg'):
        i += 1
        if i >= 30:
            break
Beispiel #28
0
    def train(self):
        mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)

        train_datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2)
        train_datagen.fit(mnist.train.images.reshape(-1, 28, 28, 1))

        x_test, y_test = mnist.test.images.reshape(-1, 28, 28, 1), mnist.test.labels
        self.model.fit_generator(train_datagen.flow(mnist.train.images.reshape(-1, 28, 28, 1), mnist.train.labels),
                       #batch_size=128,
                       epochs=20,
                       verbose=1,
                       validation_data=(x_test, y_test),
                       callbacks=[TrainValTensorBoard(log_dir='./logs/cnn4', histogram_freq=1, write_grads=True)])

        score = self.model.evaluate(x_test, y_test, verbose=0)
        print('Loss', score[0], 'acc', score[1])
Beispiel #29
0
    def train(self, dataset, metric, nb_epoch=30, data_augmentation=True):
        
        # let's train the model using different optimization methods.
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) #acc: 99.58%
        adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #acc: 99.63
        adamax = Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss='categorical_crossentropy', ##'binary_crossentropy'
                           optimizer= adam, 
                           metrics=[metric]) #
        earlyStopping= callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')                   
                           
        if not data_augmentation:
            print('Not using data augmentation.')
            self.model.fit(dataset.X_train, dataset.Y_train,
                           batch_size=self.batch_size,
                           nb_epoch=nb_epoch, callbacks=earlyStopping, 
                           validation_data=(dataset.X_valid, dataset.Y_valid),
                           shuffle=True)
        else:
            print('Using real-time data augmentation.')

            # This will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=False,             # set input mean to 0 over the dataset
                samplewise_center=False,              # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,   # divide each input by its std
                zca_whitening=False,                  # apply ZCA whitening
                rotation_range=20,                    # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.,                 # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.,                # randomly shift images vertically (fraction of total height)
                channel_shift_range=0.2,
                fill_mode = 'nearest',        # Points outside the boundaries of the input are filled according to the given mode.
                horizontal_flip=False,                 # randomly flip images
                vertical_flip=True)                  # randomly flip images

            # compute quantities required for featurewise normalization
            # (std, mean, and principal components if ZCA whitening is applied)
            datagen.fit(dataset.X_train)

            # fit the model on the batches generated by datagen.flow()
            self.model.fit_generator(datagen.flow(dataset.X_train, dataset.Y_train,
                                                  batch_size= self.batch_size),
                                     samples_per_epoch=dataset.X_train.shape[0],
                                     nb_epoch=nb_epoch, callbacks=[earlyStopping], 
                                     validation_data=(dataset.X_valid, dataset.Y_valid))
class RandomBatchGenerator(object):

    """Generate random batches of data."""

    def __init__(self, batch_size, typ, imdir, augment):
        # typ should be "train", "val", or "test".
        self._batch_size = batch_size
        self._ims = []
        self._idx = 0
        if augment is True:
            self._datagen = ImageDataGenerator(
                featurewise_center=False,
                samplewise_center=False,
                featurewise_std_normalization=False,
                samplewise_std_normalization=False,
                zca_whitening=False,
                rotation_range=30,
                width_shift_range=0.25,
                height_shift_range=0.25,
                shear_range=0.1,
                horizontal_flip=True,
                vertical_flip=True
            )
        else:
            self._datagen = None
        vids_file = os.path.join(SPLIT_DIR, "{}.txt".format(typ))
        with open(vids_file) as vf:
            for line in vf:
                vid_ims = os.path.join(imdir, line.strip(), "*")
                self._ims.extend(glob.glob(vid_ims))

    def __iter__(self):
        return self

    def next(self):
        batch_ims = random.sample(self._ims, self._batch_size)
        batch_X, batch_y = generate_batch(batch_ims)
        if self._datagen is None:
            return batch_X, batch_y
        else:
            return next(self._datagen.flow(
                X=batch_X,
                y=batch_y,
                batch_size=self._batch_size,
                shuffle=False
            ))
Beispiel #31
0
def train():
    """
    #   #   ネットワークモデルの学習をするための関数
    -Network model  :U-net
    -save weight only
    
    ##注意点
    -学習時の画像サイズを必ず記録する事
    """


    """
    画像読み込み
    前処理(サイズ統一、終わってたらスルー)
    学習(fit)
    検証(predict)
    精度と学習モデル保存
    """
    #Setup Parameter
    size = (512,512)
    target_size = (size[0] , size[1], 1)
    batch_size = 4
    epoch = 50
    classes = 2
    gen_num = 5
    seed = 10

    #実行時間の計測用(処理実行開始時間)
    dt_now = datetime.datetime.now()    
    days = dt_now.year+dt_now.month + dt_now.day


    #Load model
    #model = unet_2d(input_shape = target_size)
    model = unet_2d_GAM(input_shape = target_size)
    model.summary()

    #Setup dataset
    image = load_image_s('./image/image.pkl') 
    label = load_image_s('./image/label.pkl')

    #preprocessing -前処理-
    #前処理に使うのはやめた方がいいかもしれない
    #image,label = Echo_Extraction(image,label)
    #for ss in range(len(image)):
    #    cv2.imwrite("./image/check/image/{0:04d}.jpg".format(ss) , image[ss])
    #    cv2.imwrite("./image/check/label/{0:04d}.jpg".format(ss), label[ss])

    #正規化
    image = image/255
    label = label/255
    train_image, val_image = train_test_split(image, test_size=0.3,random_state = seed,shuffle = True)
    train_label, val_label = train_test_split(label, test_size=0.3,random_state = seed,shuffle = True)
    
    #check dataset
    """
    t_image = train_image * 255
    t_label = train_label * 255
    v_image = val_image * 255
    v_label = val_label * 255

    for i in range(len(train_image)):
        cv2.imwrite("./image/check/train_image/{0:04d}.jpg".format(i) , t_image[i])
        cv2.imwrite("./image/check/train_label/{0:04d}.jpg".format(i), t_label[i])
    for i in range(len(val_image)):
        cv2.imwrite("./image/check/val_image/{0:04d}.jpg".format(i), v_image[i])
        cv2.imwrite("./image/check/val_label/{0:04d}.jpg".format(i), v_label[i])
    """

    #Data Augumentation
    nb_data = len(train_image) * gen_num
    val_nb_data = len(val_image) * gen_num
    #train_gen = ImageDataGenerator(preprocessing_function = myprocess_pixel_value,width_shift_range=[-10,10],height_shift_range=[-10,10])
    train_gen = ImageDataGenerator(width_shift_range=0.1,height_shift_range=0.1)
    val_gen = ImageDataGenerator()
    
    #Setup CallBacks
    ModelCheckpoint_ = ModelCheckpoint('./model/unet_2d_weights.hdf5', monitor='val_loss', save_weights_only=True)
    early_stopping = EarlyStopping(monitor='val_loss',patience=5, verbose=1)
    ReduceLROnPlateau_ = ReduceLROnPlateau(monitor='val_loss', factor=0.2,patience=3, verbose=1, mode='auto', epsilon=0.001, cooldown=0, min_lr=0.002) 
    CSVLogger_ = keras.callbacks.CSVLogger("./model/callback/log_learning.csv", separator=',', append=False)
    TensorBoard_ = keras.callbacks.TensorBoard(log_dir="./model/log",histogram_freq=1)

    #Set CallBack
    #callbacks = [ModelCheckpoint_,ReduceLROnPlateau_,early_stopping,CSVLogger_]
    callbacks = [ModelCheckpoint_,early_stopping,CSVLogger_]
    #callbacks = [ModelCheckpoint_]
    
    #Learning
    #(generator=train_gen.flow(train_image, train_label,batch_size=batch_size,save_to_dir="E:/output/data_augumentation/1", save_prefix='img', save_format='jpg')
    model.fit_generator(generator=train_gen.flow(train_image,train_label, batch_size=batch_size),
                    steps_per_epoch=nb_data,
                    epochs=epoch,
                    validation_data=val_gen.flow(val_image, val_label,batch_size=batch_size),
                    validation_steps=val_nb_data,
                    max_queue_size=10,
                    callbacks=callbacks)

    #log保存
    with open("./model/model_detail.txt",mode='w') as f_dataset: 

        f_dataset.write("タイトル     : 適当になんか")
        f_dataset.write("作成日時     : " + days)
        f_dataset.write("画像サイズ   : " + size)
        f_dataset.write("seed         : " + seed)
        f_dataset.write("使用モデル   : unet_2d_GAM")
        f_dataset.write("\n画像枚数   :" + str(len(image)))
        f_dataset.write("\n     - train_image : " + str(len(train_image)))
        f_dataset.write("\n     - val_image   : " + str(len(val_image)))

    #以下検証
    #重みの保存位置修正
    model.load_weights('./model/unet_2d_weights.hdf5')
    try:
        score = model.evaluate(val_image, val_label, batch_size=3,verbose=1)
        print('Test loss     ',score[0])
        print('Test accuracy ',score[1])
    except:
        print("Error")
    finally:
        print("End program")
#-
model = create_model(base_model)


def train_model(epochs, train_generator):
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=final_data_img.shape[0] //
                                  BATCH_SIZE,
                                  epochs=epochs,
                                  verbose=1)


BATCH_SIZE = 32
train_datagen = ImageDataGenerator()
train_datagen.fit(final_data_img)
train_generator = train_datagen.flow(final_data_img,
                                     final_labels_keras,
                                     batch_size=BATCH_SIZE)

start = time.time()
hist = train_model(10, train_generator)
end = time.time()

model_json = model.to_json()
with open("../model_InceptionV3_10epochs.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(
    "../model_no_augmentation_all_train_InceptionV3_10epochs.h5")
print("Saved model to disk")

start = time.time()
Beispiel #33
0
        MaxPooling2D(),
        Flatten(),
        BatchNormalization(),
        Dense(512, activation='relu'),
        BatchNormalization(),
        Dense(10, activation='softmax')
        ])

model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])

gen = ImageDataGenerator()

X = X_train
y = y_train

X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.10)
batches = gen.flow(X_train, y_train, batch_size=64)
val_batches = gen.flow(X_val, y_val, batch_size=64)

print('fitting '+ str(batches.n))
history=model.fit_generator(generator=batches, steps_per_epoch=batches.n, epochs=3,
                    validation_data=val_batches, validation_steps=val_batches.n)

history_dict = history.history

predictions = model.predict_classes(X_test, verbose=0)

submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)),
                         "Label": predictions})

submissions.to_csv("DR.csv", index=False, header=True)
Beispiel #34
0
    print('Using real-time data augmentation.')
    # This will do preprocessing and realtime data augmentation:
    datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=
        False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=
        0,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=
        0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # Compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(X_train)

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                        steps_per_epoch=X_train.shape[0] // batch_size,
                        validation_data=(X_test, Y_test),
                        epochs=nb_epoch,
                        verbose=1,
                        max_q_size=100,
                        callbacks=[lr_reducer, early_stopper, csv_logger])
Beispiel #35
0
x = x.reshape(
    (1, ) + x.shape)  # this is a Numpy array with shape (1, 3, 150, 150)

# the .flow() command below generates batches of randomly transformed images
# and saves the results to the `preview/` directory
i = 0
y = np.array(['1'], dtype='int')

#============================================================
'''
y = None batch is (1, 340, 260, 3) numpy
y != None batch[0] is (1, 340, 260, 3) numpy batch[1] (1,)
'''
for batch in datagen.flow(x,
                          y,
                          batch_size=1,
                          save_to_dir='test',
                          save_prefix='cat',
                          save_format='jpeg'):
    print(type(batch))
    print(batch[0].shape)
    print(batch[1].shape)
    i += 1
    if i > 1:
        break  # otherwise the generator would loop indefinitely
#============================================================

#============================================================
# for batch,label in datagen.flow_from_directory('test',target_size=(150, 150),batch_size=1):
#     print(type(batch))
#     print(type(label))
#     i += 1
Beispiel #36
0
            model = load_model(models[i])
            gpus_num = len(args.gpus.split(','))
            if gpus_num != 1:
                multi_gpu_model(model, gpus=gpus_num)
            #model.compile(loss="categorical_crossentropy", optimizer='adam', metrics=['accuracy'])
            loss_func = weighted_binary_crossentropy(alpha[j])
            loss_weights = None
            #metrics = [weighted_acc]
            model.compile(loss=loss_func,
                          optimizer='adam',
                          loss_weights=loss_weights,
                          metrics=metrics)
            model.summary()

            train_generator = datagen.flow(X_train,
                                           y_train[:, j],
                                           batch_size=batch_size)
            val_generator = datagen.flow(X_test,
                                         y_test[:, j],
                                         batch_size=batch_size)
            model.fit_generator(
                train_generator,
                steps_per_epoch=int(X_train.shape[0] /
                                    (batch_size * gpus_num)),
                epochs=nb_epoch,
                validation_data=val_generator,
                validation_steps=int(X_test.shape[0] /
                                     (batch_size * gpus_num)),
                callbacks=[
                    ModelCheckpoint(
                        '../models/imagenet_models/GoogLeNet_PETA/iter50model_weightedloss_'
Beispiel #37
0
    Xpath = img_path + "{}.png".format(namesimg[i])
    Ypath = label_path + "{}.png".format(namesimg[i])
    print(Xpath)

    x = load_data_org(Xpath, 'data')
    y = load_data_org(Ypath, 'label')

    x = x.reshape((1, ) + x.shape)
    y = y.reshape((1, ) + y.shape)

    # Adapt ImageDataGenerator flow method for data augmentation.
    _ = np.zeros(B_SIZE)
    seed = random.randrange(1, 1000)

    x_tmp_gen = x_data_gen.flow(np.array(x), _, batch_size=B_SIZE, seed=seed)
    y_tmp_gen = y_data_gen.flow(np.array(y), _, batch_size=B_SIZE, seed=seed)

    # Finally, yield x, y data.
    for j in range(GEN_NUM):
        x_result, _ = next(x_tmp_gen)
        y_result, _ = next(y_tmp_gen)
        x_res = x_result[0]
        y_res = y_result[0]
        y_res = np.squeeze(y_res, axis=2)
        img = image.array_to_img(x_res)
        lab_arr = y_res.astype('uint8')
        lab = palette.genlabelpal(lab_arr)
        X_res_path = gen_img_path + "{}".format(
            namesimg[i]) + '_' + str(j) + ".png"
        Y_res_path = gen_lab_path + "{}".format(
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

model = VGGNet.build(width=IMAGE_DIMS[1],
                     height=IMAGE_DIMS[0],
                     depth=IMAGE_DIMS[2],
                     classes=len(lb.classes_))

opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)

model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                        validation_data=(testX, testY),
                        steps_per_epoch=len(trainX),
                        epochs=EPOCHS,
                        verbose=1)

model.save('./pokemon/model')
f = open('./pokemon/model/labels.pickle', "wb")
f.write(pickle.dumps(lb))
f.close()

plt.style.use("ggplot")
plt.figure()
N = EPOCHS
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
Beispiel #39
0
#!/usr/bin/python
# coding:utf8

from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
datagen = ImageDataGenerator(rotation_range=40,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')
img = load_img('C:\\Users\\ailab\\PycharmProjects\\\drone\\pic\\image4.jpg')
x = img_to_array(img)
x = x.reshape((1, ) + x.shape)

i = 0
for batch in datagen.flow(
        x,
        batch_size=1,
        save_to_dir='C:\\Users\\ailab\PycharmProjects\\drone\\pic\\4',
        save_prefix='4',
        save_format='jpg'):
    i += 1
    if i > 60:
        break
image_datagen.fit(img_train, augment=True, seed=seed)
mask_datagen.fit(mask_train, augment=True, seed=seed)

img_train_n = img_train[:, :, :, 0]
mask_train_n = mask_train[:, :, :, 0]

print(img_train_n.shape)

img_train_mod = np.transpose(img_train_n, (2, 1, 0))
mask_train_mod = np.transpose(mask_train_n, (2, 1, 0))

print(img_train_mod.shape)
print(mask_train_mod.shape)

k = 0
for X_batch in image_datagen.flow(img_train, batch_size=size_aug, seed=seed):
    for i in range(0, size_aug):
        img_train_mod = np.concatenate(
            (img_train_mod, X_batch[i].reshape(128, 128, 1)), axis=2)

    print(img_train_mod.shape)
    k = k + 1
    print(k)

    if (k > 1700):
        break

k = 0
for Y_mask in mask_datagen.flow(mask_train, batch_size=size_aug, seed=seed):
    for i in range(0, size_aug):
        mask_train_mod = np.concatenate(
aug = ImageDataGenerator(rotation_range=40, width_shift_range=0.1, height_shift_range=0.1, 
	shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest")

print("[INFO] compiling model...")
# opt = SGD(lr=0.05)
opt = SGD(lr=0.05, decay=0.05 / 200, momentum=0.9, nesterov=True)
model = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames))
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

checkpoint = ModelCheckpoint(args["weights"], monitor="val_loss", mode="min", save_best_only=True, verbose=1)
callbacks = [checkpoint]

print("[INFO] training network...")
# H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
H = model.fit_generator(aug.flow(trainX, trainY, batch_size=32), validation_data=(testX, testY), 
	steps_per_epoch=len(trainX) // 32, epochs=200, verbose=1)

print("[INFO] evaluating network...")
preds = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), preds.argmax(axis=1), target_names=classNames))

# 保存可视化训练结果
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, 200), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, 200), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, 200), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, 200), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("# Epoch")
Beispiel #42
0
        width_shift_range=
        0.1,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0.1,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    model1.compile(optimizer='Adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy', 'top_k_categorical_accuracy'])

    model1.summary()

    # history = model1.fit(x_train, y_train, batch_size=batchSize, epochs=epochs, verbose=2, validation_split=0.15)
    history = model1.fit_generator(datagen.flow(x_train,
                                                y_train,
                                                batch_size=batchSize),
                                   epochs=epochs,
                                   verbose=2,
                                   validation_data=(x_test, y_test),
                                   workers=4)
    score = model1.evaluate(x_test, y_test)

    plt.figure(figsize=[8, 6])
    plt.plot(history.history['loss'], 'r', linewidth=3.0)
    plt.plot(history.history['val_loss'], 'b', linewidth=3.0)
    plt.legend(['Training loss', 'Validation Loss'], fontsize=18)
    plt.xlabel('Epochs ', fontsize=16)
    plt.ylabel('Loss', fontsize=16)
    plt.title('Loss Curves', fontsize=16)
    plt.show()
Beispiel #43
0
def do_it(args):
    # initialize the number of epochs to train for, initial learning rate,
    # and batch size
    EPOCHS = args["epochs"]
    INIT_LR = args["learn_rate"]
    BS = args["batch_size"]

    image_size = args["train_image_size"]

    filepath = args["output"]
    path_valid = ensure_dir(filepath)
    if path_valid is not None:
        print("Invalid output dir: {} -- {}".format(filepath, path_valid))
        return

    # initialize the data and labels
    print("[INFO] loading images...")
    dataset_path = args["dataset"]

    # grab the image paths and randomly shuffle them
    data, labels = args["data"] if "data" in args else load_data(
        dataset_path, image_size)

    dataset_size = len(data)

    print("[INFO] EPOCHS: {}".format(EPOCHS))
    print("[INFO] INIT_LR: {}".format(INIT_LR))
    print("[INFO] BATCH_SIZE: {}".format(BS))
    print("[INFO] DATASET_SIZE: {}".format(dataset_size))

    params_info = "_EPOCHS{}_LR{}_BS{}_TIS{}_DSS{}".format(
        EPOCHS, INIT_LR, BS, image_size, dataset_size)
    model_name = "{}{}".format(args["model"], params_info)
    model_name = os.path.sep.join((filepath, model_name))
    print("[INFO] model file name: {}".format(model_name))
    plot_name = args["model"] if args["plot"] is None else args["plot"]
    plot_name = "{}{}.png".format(plot_name, params_info)
    plot_name = os.path.sep.join((filepath, plot_name))
    print("[INFO] plot file name: {}".format(plot_name))

    # partition the data into trainint and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # convert the labels from integers to vectors
    class_num = len(train_labels) + 1
    trainY = to_categorical(trainY, num_classes=class_num)
    testY = to_categorical(testY, num_classes=class_num)

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')

    # initial the model
    print("[INFO] compiling model...")
    model = MyCNN.build(width=image_size,
                        height=image_size,
                        depth=3,
                        classes=class_num)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX),
                            epochs=EPOCHS,
                            verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save(model_name)

    plot(EPOCHS, H, plot_name)
    for layer in base_model.layers[:GAP_LAYER+1]:
        layer.trainable = False
    for layer in base_model.layers[GAP_LAYER+1:]:
        layer.trainable = True
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy',fmeasure,recall,precision])

from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(width_shift_range = 0.1, 
                                 height_shift_range = 0.1, 
                                 zoom_range = 0.1,
                                 horizontal_flip= True)
val_datagen = ImageDataGenerator()   

batch_size = 128

train_generator = train_datagen.flow(X_train2,y_train2,batch_size=batch_size,shuffle=False) 
val_generator = val_datagen.flow(X_val,y_val,batch_size=batch_size,shuffle=False)

checkpointer = ModelCheckpoint(filepath='weights_best_simple_model.hdf5', 
                            monitor='val_fmeasure',verbose=1, save_best_only=True, mode='max')
reduce = ReduceLROnPlateau(monitor='val_fmeasure',factor=0.5,patience=2,verbose=1,min_lr=1e-4)

model = multi_gpu_model(model, gpus=4)
setup_to_transfer_learning(model, base_model)
history_t1 = model.fit_generator(train_generator,
                                steps_per_epoch=274,
                                validation_data = val_generator,
                                epochs=10,
                                callbacks=[reduce],
                                verbose=1
                               )
                  len(arr[f]['y']),
                  ' of ',
                  len(dic[f]),
                  end='\r')
    print(len(arr['train']['x']))

    from keras.preprocessing.image import ImageDataGenerator
    from keras.utils import to_categorical
    #ims(arr['train']['x'][1], _class[arr['train']['y'][1]])
    train_gen = ImageDataGenerator(rescale=1 / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
    val_gen = ImageDataGenerator(rescale=1 / 255)
    train = train_gen.flow(np.asarray(arr['train']['x']),
                           y=to_categorical(arr['train']['y'],
                                            num_classes=len(_class)),
                           batch_size=32)
    val = val_gen.flow(np.asarray(arr['val']['x']),
                       y=to_categorical(arr['val']['y'],
                                        num_classes=len(_class)),
                       batch_size=32)
    name = args.model + '_' + args.classes
    from keras.callbacks import CSVLogger
    csvlogger = CSVLogger('log/' + name + '.csv')
    history = model.fit_generator(train,
                                  epochs=25,
                                  steps_per_epoch=len(train),
                                  validation_data=val,
                                  validation_steps=len(val),
                                  use_multiprocessing=True,
                                  callbacks=[csvlogger])
Beispiel #46
0
    rescale=1. / 255,  #Scale the image between 0 and 1
    rotation_range=40,
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
)

validacao_datagen = ImageDataGenerator(
    rescale=1. /
    255)  #We do not augment validation data. we only perform rescale

#IMAGE GENERATORS
treino_generator = treino_datagen.flow(X_treino,
                                       y_treino,
                                       batch_size=batch_size)
validacao_generator = validacao_datagen.flow(X_validacao,
                                             y_validacao,
                                             batch_size=batch_size)

#FUNÇÃO DE INICIO DO TREINO
history = model.fit(treino_generator,
                    steps_per_epoch=ntrain // batch_size,
                    epochs=Epocas,
                    validation_data=validacao_generator,
                    validation_steps=nval // batch_size)

#SALVAR MODELO
model.save('modelo.h5')
Beispiel #47
0
def test_train_deep_nn(train_file='gs://dataset-jesus-bucket/DataSet/',
                       job_dir='gs://dataset-jesus-bucket/',
                       **args):
    # create model

    # Read the data containing gs paths
    global acc_path_to_save
    file_stream = file_io.FileIO(
        "gs://data-daisy/full_gs_paths_subset3.pickle", mode='rb')
    data_frame = pickle.load(file_stream)

    # Will split the training due to memory consumption
    datasets = [[
        '/000/', '/001/', '/003/', '/004/', '/006/', '/005/', '/007/', '/008/',
        '/010/', '/009/', '/011/', 'full_exc_002_2'
    ]]

    split = split_sequence(data_frame, 60, 15)

    # get numpy arrays
    classifier = create_trail_model()

    for subset_data in datasets:
        subset_path = "deepnn_subset_" + subset_data[-1] + ".h5"

        train_x, train_y, normalized_check = read_data_file_io(
            split[0], subset_data[:-1], data_type="train")
        test_x, test_y, normalized_check = read_data_file_io(split[1],
                                                             subset_data[:-1],
                                                             data_type="train")

        if normalized_check:
            scale_factor = 1.0
        else:
            scale_factor = 1.0 / 255.0

        # ImageDataGenerator is used to append additional data that is rescaled
        # sheared, zoomed, and rotated for test and training sets
        train_datagen = ImageDataGenerator(rescale=scale_factor,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           rotation_range=18)

        test_datagen = ImageDataGenerator(scale_factor)
        acc_path_to_save = 'gs://data-daisy/model_accuracy_' + subset_data[
            0][:-1] + '.pickle'

        # train_set = train_datagen.flow_from_dataframe(train_df,
        #                                           target_size=(101, 101),
        #                                           batch_size=32,
        #                                           class_mode='categorical')
        # test_set = test_datagen.flow_from_dataframe(test_df,
        #                                          target_size=(101, 101),
        #                                          batch_size=32,
        #                                          class_mode='categorical')
        # Importing the Keras libraries and packages

        # classifier.save("deepnn_trail_60_15_ds2short.h5")

        # Save data to google cloud storage: - subset1: files 000 and 003 are trained
        #                                    - subset2: All files are trained

        # Augements the data. Note that we the previous version uses flow_from_dataframe
        # however since this is being run on Google Cloud, the data is first imported into arrays
        # therefore train_datagen.flow() is used.

        train_set = train_datagen.flow(train_x,
                                       train_y,
                                       batch_size=128,
                                       shuffle=True)
        test_set = test_datagen.flow(test_x,
                                     test_y,
                                     batch_size=128,
                                     shuffle=True)

        classifier.fit_generator(train_set,
                                 steps_per_epoch=train_x.shape[0] // 128,
                                 epochs=4,
                                 validation_data=test_set,
                                 validation_steps=test_x.shape[0] // 128)

        print("Done training dataset subset: " + subset_path)

        classifier.save(subset_path)

    with file_io.FileIO(subset_path, mode='rb') as f:
        with file_io.FileIO(os.path.join('gs://data-daisy/', subset_path),
                            mode='wb+') as of:
            of.write(f.read())
            of.close()
            print('saved')
        f.close()

    print("Now testing model")

    datasets = [
        '/000/', '/001/', '/002/', '/003/', '/004/', '/005/', '/006/', '/007/',
        '/008/', '/009/', '/010/', '/011/'
    ]
    acc_dict = {}
    for dataset in datasets:
        x, y, _ = read_data_file_io(data_frame, dataset, data_type="test")
        loss, accuracy = classifier.evaluate(x, y)
        acc_dict[dataset] = accuracy
    x = 0
    y = 0
    print("done testing dataset now doing knn")
Beispiel #48
0
model.add(Conv2D(256, (2, 3), kernel_regularizer=regularizers.l2(0.03)))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2,3), padding='same'))
model.add(Dropout(0.5))

model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Dropout(0.5))

model.add(Dense(3))
model.add(Activation("softmax"))

#sgd = optimizers.SGD(lr=0.04, decay=1e-10, momentum=0.9, nesterov=True)
ada = optimizers.Adam(lr=3e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0.0, amsgrad=False)
model.compile(loss="categorical_crossentropy",
              optimizer=ada,
              metrics=["accuracy"])

# Data augmentation
datagen = ImageDataGenerator(width_shift_range=0.4)
datagen.fit(x_train)

# Optimization
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
                    epochs=150, validation_data=(x_test, y_test))

model.summary()

Beispiel #49
0
classifier.add(Flatten())

classifier.add(Dense(activation='relu', units=256))
classifier.add(Dropout(0.4))
classifier.add(Dense(activation='relu', units=128))
classifier.add(Dropout(0.4))
classifier.add(Dense(activation='relu', units=128))
classifier.add(Dropout(0.4))

classifier.add(Dense(activation='relu', units=10))

classifier.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

from keras.preprocessing.image import ImageDataGenerator

datagen = ImageDataGenerator(featurewise_center=False,
                             featurewise_std_normalization=False,
                             rotation_range=20,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=False,
                             vertical_flip=False)
datagen.fit(X_train)

classifier.fit_generator(datagen.flow(X_train, Y_train),
                         nb_epoch=20,
                         validation_data=(X_test, Y_test))
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]
    options = parse_args(argv)

    print("[INFO] loading images...")

    loader = SimpleDatasetLoader(preprocessors=[
        SimplePreprocessor(width=img_cols, height=img_rows),
        ImageToArrayPreprocessor(),
    ])
    data, labels = loader.load(
        driving_log_path=options.driving_log,
        data_path=options.dataset,
        verbose=True,
    )
    data = data.astype('float32')
    import ipdb
    ipdb.set_trace()

    # # horizontal reflection for augmentation
    # data = np.append(data, data[:, :, ::-1], axis=0)
    # labels = np.append(labels, -labels, axis=0)

    # split train and validation
    data, labels = shuffle(data, labels)
    x_train, x_test, y_train, y_test = train_test_split(
        data,
        labels,
        random_state=13,
        test_size=0.1,
    )
    # x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    # x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)

    lb = LabelBinarizer()
    y_train = lb.fit_transform(y_train)
    y_test = lb.transform(y_test)

    label_names = ['straight', 'left', 'right']

    aug = ImageDataGenerator(
        rotation_range=1,
        width_shift_range=0.1,
        height_shift_range=0.1,
        zoom_range=0.2,
        horizontal_flip=False,
        fill_mode="nearest",
    )

    print('[INFO] compiling model...')
    # model = NvidiaNet.build(width=img_cols, height=img_rows, depth=1)
    # model = TinyNet.build(width=img_cols, height=img_rows, depth=1)
    # model = ShallowNet.build(width=img_cols, height=img_rows, depth=1, classes=len(label_names))
    model = MiniVGGNet.build(width=img_cols,
                             height=img_rows,
                             depth=1,
                             classes=len(label_names))

    opt = SGD(lr=learning_rate,
              momentum=0.9,
              decay=learning_rate / nb_epoch,
              nesterov=True)
    # opt = SGD(lr=learning_rate)
    # opt = Adam(lr=learning_rate)
    # model.compile(
    #     loss='mean_squared_error',
    #     metrics=["accuracy"],
    #     optimizer=opt,
    # )
    model.compile(
        loss='categorical_crossentropy',
        metrics=['accuracy'],
        optimizer=opt,
    )

    history = model.fit_generator(
        aug.flow(x_train, y_train, batch_size=batch_size),
        # history = model.fit(
        #     x_train, y_train,
        nb_epoch=nb_epoch,
        # batch_size=batch_size,
        steps_per_epoch=(len(x_train) // batch_size),
        verbose=1,
        validation_data=(x_test, y_test),
    )

    predictions = model.predict(x_test, batch_size=batch_size)
    print(
        classification_report(
            y_test.argmax(axis=1),
            predictions.argmax(axis=1),
            target_names=label_names,
        ))

    plt.style.use("ggplot")
    fig, ax_acc = plt.subplots(1, 1)

    ax_acc.set_xlabel("Epoch #")

    ax_loss = ax_acc.twinx()
    ax_loss.grid(None)
    ax_loss.set_ylabel("Loss")

    ax_acc.grid(None)
    ax_acc.set_ylabel("Accuracy")
    ax_acc.set_ylim([0, 1])

    ax_loss.plot(np.arange(0, nb_epoch),
                 history.history["loss"],
                 label="train_loss")
    ax_loss.plot(np.arange(0, nb_epoch),
                 history.history["val_loss"],
                 label="val_loss")
    ax_acc.plot(np.arange(0, nb_epoch),
                history.history["acc"],
                label="train_acc")
    ax_acc.plot(np.arange(0, nb_epoch),
                history.history["val_acc"],
                label="val_acc")
    fig.suptitle("Training Loss and Accuracy")
    fig.legend()
    plt.show()

    model.save(options.model)

    return 0
classification.fit(predicts_train,
                   class_train,
                   batch_size=128,
                   epochs=5,
                   validation_data=(predicts_test, class_test))

result = classification.evaluate(predicts_test, class_test)

gerador_train = ImageDataGenerator(rotation_range=7,
                                   horizontal_flip=True,
                                   shear_range=0.2,
                                   height_shift_range=0.07,
                                   zoom_range=0.2)
gerador_test = ImageDataGenerator()

base_train = gerador_train.flow(predicts_train, predicts_test, batch_size=128)
base_test = gerador_test.flow(class_train, class_test, batch_size=128)

classification.fit_generator(base_train,
                             steps_per_epoch=60000 / 128,
                             validation_data=base_test,
                             epochs=5,
                             validation_steps=10000 / 128)
classification.layers[0].get_weights()

print(result)

print(class_test)
print(class_test)

exit()
    	def on_epoch_end(self, epoch, logs={}):
        	xt, yt, xtr, ytr = self.test_data

        	losst, acct = self.model.evaluate(xt, yt, verbose=0)
        	print('\nTesting loss: {}, acc: {}\n'.format(losst, acct))

		losstr, acctr = self.model.evaluate(xtr, ytr, verbose=0)
        	print('\nTraining loss: {}, acc: {}\n'.format(losstr, acctr))

		file = open('/work/vsankar/Project-Luna/Codes/training_out_file.txt','a') 
 		file.write('\nepoch: {}'.format(epoch)	
		file.write('Testing loss: {}, acc: {}\n'.format(losst, acct)) 
		file.write('Training loss: {}, acc: {}\n'.format(losstr, acctr))

		file.close() 


    if not data_augmentation:
        print('Not using data augmentation.')
        
	

        model.fit(X_train,Y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  validation_data=(X_test, Y_test),
                  shuffle=True,
                  verbose=1,
                  callbacks=[TestCallback((X_test, Y_test,X_train,Y_train))])
    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=True,  # apply ZCA whitening
            rotation_range=90,  # randomly rotate images in the range (degrees, 0 to 180)
            #shear_range = 0.34,  # value in radians, equivalent to 20 deg
            #zoom_range = [1/1.6, 1.6],   #same as in NIPS 2015 paper.
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        #datagen.fit(X_train) #Not required as it is Only required if featurewise_center or featurewise_std_normalization or zca_whitening.

        # fit the model on the batches generated by datagen.flow() and save the loss and acc data history in the hist variable
        
        filepath = "/work/vsankar/Project-Luna/luna_weights_1.hdf5"
        save_model_per_epoch = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')

        hist = model.fit_generator(datagen.flow(X_train, Y_train,
                            batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch,
                            validation_data=(X_test, Y_test),
                            callbacks=[save_model_per_epoch])

        

        # serialize model to JSON
        
     
batch_size = 2
nb_classes = 2
nb_epoch = 300
data_augmentation = False
# input image dimensions
img_rows, img_cols = 96,96
# the imgCLEF images are grey
img_channels = 1
run(batch_size,nb_classes,nb_epoch,data_augmentation,img_rows,img_cols,img_channels)
# model_architecture(img_rows,img_cols,img_channels,nb_classes)
                               patience=5,
                               min_lr=1e-5)
model_checkpoint = ModelCheckpoint(weights_file,
                                   monitor="val_acc",
                                   save_best_only=True,
                                   save_weights_only=True,
                                   verbose=1)

csv = CSVLogger("DenseNet-40-12-FashionMNIST.csv", separator=',')

callbacks = [lr_reducer, model_checkpoint, csv]
try:
    if augment == 'true':
        print("Training with data augmentation...")
        model.fit_generator(generator.flow(trainX,
                                           Y_train,
                                           batch_size=batch_size),
                            steps_per_epoch=len(trainX) // batch_size,
                            epochs=nb_epoch,
                            callbacks=callbacks,
                            validation_data=(testX, Y_test),
                            validation_steps=testX.shape[0] // batch_size,
                            verbose=1)
    else:
        print("Training without data augmentation...")
        model.fit(trainX,
                  Y_train,
                  batch_size=batch_size,
                  epochs=nb_epoch,
                  callbacks=callbacks,
                  validation_data=(testX, Y_test),
Beispiel #54
0
model.add(layers.Dense(10, activation='softmax'))

model.compile(loss='sparse_categorical_crossentropy',
              optimizer=optimizers.adam(lr=0.0001),
              metrics=['acc'])

# Generators
train_datagen = ImageDataGenerator(rescale=1. / 255)

val_datagen = ImageDataGenerator(
    rescale=1. /
    255)  # We do not augment validation data, we only perform rescale

# Creating generators
train_generator = train_datagen.flow(X_train, y_train, batch_size=batch_size)
val_generator = val_datagen.flow(X_val, y_val, batch_size=batch_size)

# Training our model
history = model.fit_generator(train_generator,
                              steps_per_epoch=ntrain // batch_size,
                              epochs=14,
                              validation_data=val_generator,
                              validation_steps=nval // batch_size,
                              callbacks=[es, cp])

model.save(
    'Centered_BalancedTraining_BalancedValidation_sparseCategoricalCrossentropy_NoES_SimpleTrainGen_Model.HDF5'
)

# Plotting the training and validation curves
Beispiel #55
0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
    zoom_range=0.1,  # Randomly zoom image
    width_shift_range=
    0.1,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=
    0.1,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=False,  # randomly flip images
    vertical_flip=False)  # randomly flip images

# 训练模型
datagen.fit(X_train)
history = mnistModel.fit_generator(
    datagen.flow(X_train, Y_train, batch_size=batch_size),
    epochs=epochs,
    validation_data=(X_val, Y_val),
    verbose=2,
    steps_per_epoch=X_train.shape[0] // batch_size,
    callbacks=[learning_rate_reduction,
               TensorBoard(log_dir='./log')])

# 保存训练好的模型
mnistModel.save('./CNN_Mnist.h5')
print('模型保存成功!')

# 对训练好的模型进行评估
score = mnistModel.evaluate(X_test, y_test, verbose=0)
print(score)
Beispiel #56
0
class Augmentation:
    def __init__(self, sp_img_shape):

        self.dics = []
        self.sp_img_shape = sp_img_shape
        self.resize = Preprocessor_resize(sp_img_shape[0], sp_img_shape[1])

        data_gen_args = dict(featurewise_center=True,
                             featurewise_std_normalization=True,
                             rotation_range=90,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             zoom_range=0.2)

        self.image_datagen = ImageDataGenerator(**data_gen_args)
        self.mask_datagen = ImageDataGenerator(**data_gen_args)

        #before loading, make sure augment json file is deleted
        sdl = DatasetLoader(config, sp_img_shape)
        self.image_paths, self.labels = sdl.load(
        )  #labels are fit to processed image

    def generate(self, No_images=10):
        '''
        write the augmented images to the folder config.Augmented_output
        write its labels points  in the json file
        '''
        for img_counter in range(len(self.labels)):
            path = self.image_paths[img_counter]
            label = self.labels[img_counter]
            if img_counter == No_images:
                break

            image = cv2.imread(path)
            image = self.resize.preprocess(image)
            image = img_to_array(image)
            image = np.expand_dims((image), axis=0)
            mask = np.zeros(self.sp_img_shape, dtype='uint8')
            mask = self.draw_circle(mask, label)
            mask = img_to_array(mask)
            mask = np.expand_dims(mask, axis=0)

            seed = 1
            image_generator = self.image_datagen.flow(image,
                                                      batch_size=1,
                                                      seed=seed)
            mask_generator = self.mask_datagen.flow(mask,
                                                    batch_size=1,
                                                    seed=seed)

            self.write_augm_images(img_counter, image_generator)
            self.write_augm_masks(img_counter, mask_generator)

            if img_counter % 50 == 0:
                print("[INFO] 50 new augmented images added...")

        self.write_json(self.dics)  #save json file

    def draw_circle(self, mask, label):
        '''
        draw label points on the mask, before augmentation 
        '''

        for i in range(0, 64, 2):
            x = int(round(label[i]))
            y = int(round(label[i + 1]))
            cv2.circle(mask, tuple([x, y]), radius=1, color=255, thickness=-1)
            if i == 64:
                break

        return mask

    def write_augm_images(self, img_counter, image_generator):
        '''
        write the augmeneted images in the address config.Augmented_output
        '''
        total = 0
        for img_gen in image_generator:

            img_filename = config.Augmented_output + "img_" + str(
                img_counter) + "_aug_%04i.jpg" % total
            cv2.imwrite(img_filename, img_gen[0, :, :, :])
            total += 1
            if total == 5:
                break

    def write_augm_masks(self, img_counter, mask_generator):
        total = 0
        for mask_gen in mask_generator:
            img_filename = config.Augmented_output + "img_" + str(
                img_counter) + "_aug_%04i.jpg" % total
            # cv2.imwrite(img_filename, mask_gen[0,:,:,:])
            label_flat = self.find_label_points(mask_gen)

            D = {"label": label_flat, "file": img_filename}
            self.dics.append(D)

            total += 1
            if total == 5:
                break

    def find_label_points(self, mask_gen):
        '''
        find x,y pixel coordinates of label points in the mask file
        '''

        mask_gen = mask_gen[0, :, :, 0].astype('uint8')
        (thresh, im_bw) = cv2.threshold(mask_gen, 128, 255,
                                        cv2.THRESH_BINARY | cv2.THRESH_OTSU)

        indices = []
        for i in range(self.sp_img_shape[0]):
            for j in range(self.sp_img_shape[1]):

                if im_bw[i, j] == 255:
                    flag = self.unique_label(j, i, indices)
                    if flag:
                        indices.append([j, i])

        # print("number of points before filtering={}".format(len(indices)))
        # label points should be 32, if any extra should be filter out
        extra_points = len(indices) - 32

        for i in range(np.abs(extra_points)):
            index = random.randint(0, 32)
            if extra_points > 0:
                indices.pop(index)
            else:
                indices.append(indices[0])

        # print("number of points after filtering={}".format(len(indices)))
        label_flat = list(itertools.chain(*indices))
        return label_flat

    def unique_label(self, j, i, indices):

        for count in range(len(indices)):
            dist = np.sqrt(
                ((indices[count][0] - j)**2 + (indices[count][1] - i)**2))
            if dist < 2.3:
                # print("i={},j={},min_dis={}".format(i,j,dist))
                return False

        return True

    def write_json(self, dics):
        '''
         save the labels and image file names as json file
        '''
        f = open(config.LABEL_AUGM_PATH, "w")
        f.write(json.dumps(dics))
        f.close()

    def check_code(self):
        '''
        checking the above code
        '''
        json_file = "D:\\NRC\\LaneDetection\\TuSimple\\training_dataset\\aug_lab.json"
        json_gts = [json.loads(line) for line in open(json_file)][0]

        image_paths = []
        labels = []
        for json_gt in json_gts:
            image_paths.append(json_gt['file'])
            labels.append(json_gt['label'])

        img_test = image_paths[20]
        img = cv2.imread(img_test)
        label_test = labels[20]

        for i in range(0, 64, 2):
            x = label_test[i]
            y = label_test[i + 1]
            cv2.circle(img, (x, y), radius=1, color=(0, 255, 0))
            if i == 64:
                break

        cv2.imshow("", img)
label_csv[2] = label_csv[1].map(lambda x: label2num[x])
label_csv[0] = label_csv[0].map(lambda x: train_path + x)
label_csv = label_csv.drop( [1],  axis=1)

for index, row in label_csv.iterrows():
    if row[2] == 3:
        x = image.load_img( row[0] )
        x = np.array(x)
        x = x[np.newaxis,:,:,:]
        datagen.fit(x)
        num = 30
        i = 1
        if not os.path.exists('./%s/' % row[2]):
            os.makedirs('./%s/' % row[2])
        for x_batch in datagen.flow( x, batch_size = 2,
                                     save_to_dir = './%s/' % row[2],
                                     save_prefix = num,
                                     save_format = 'jpg'):
            num = num+1
            i += 1
            if i > 1:
                break
    if row[2] == 2:
        x = image.load_img( row[0] )
        x = np.array(x)
        x = x[np.newaxis,:,:,:]
        datagen.fit(x)
        num = 20
        i = 1
        if not os.path.exists('./%s/' % row[2]):
            os.makedirs('./%s/' % row[2])
        for x_batch in datagen.flow( x, batch_size = 2,
# !/usr/bin/python
# encoding: utf-8
# author: zhangtong

from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
'''
    图片预处理图片随机变幻 数据集较少可以使用
'''
train_datagen = ImageDataGenerator(
    # rescale=1./255,             # 图像乘以1./255 缩放到0-1之间
    rotation_range=60,  # 角度值,图像随机旋转的角度范围
    width_shift_range=0.3,  # 水平方向上平移的范围
    height_shift_range=0.1,  # 垂直方向上平移的范围
    shear_range=0.2,  # 随机错切变换的角度
    zoom_range=0.1,  # 图像随机缩放的范围
    horizontal_flip=True,
    fill_mode='nearest')  # 随机将一半图像水平翻转
img_path = 'D:/1/project/tensorflow_NO1/wusun/train/Unsuspended/0610500200952810903.jpg'
img = image.load_img(img_path, target_size=(400, 400))
x = image.img_to_array(img)
x = x.reshape((1, ) + x.shape)
i = 0
for batch in train_datagen.flow(x, batch_size=1):
    img = image.array_to_img(batch[0])
    img.save('./{}.jpg'.format(i))
    i += 1
    if i >= 5:
        break
Beispiel #59
0
                           zoom_range=0.5,        #zoom in-out 5%
                           width_shift_range=0.5, #shift 5%
                           height_shift_range=0.5,
                           horizontal_flip=False,  #randomly flip images
                           vertical_flip=False,
                           )
aug_data.fit(train_x)

# make patient early stopping
print('[INFO] initiating tools for early stopping and saving')
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)
mc = ModelCheckpoint(args['model'], monitor='val_acc', mode='max', verbose=1, save_best_only=True)

# train network
print('[INFO] training network')
history = model.fit_generator(aug_data.flow(train_x, train_y, batch_size=BS), 
                                            validation_data=(val_x, val_y),
                                            steps_per_epoch=len(train_x) // BS,
                                            epochs=EPOCHS,
                                            verbose=1,
                                            callbacks=[es, mc])

# saving network
print('[INFO] saving network...')
model.save(args['model'])

# saving label binarizer
print('[INFO] saving label binarizer')
f = open(args['labels'], 'wb')
f.write(pickle.dumps(lb))
f.close()
Beispiel #60
0
def fitModel(model,
             net_settings,
             X_train,
             y_train,
             X_val,
             y_val,
             save_history_path='',
             batch_size=1,
             epochs=2,
             data_augmentation=1,
             verbose=1):

    normalizer = get_normalizer(np.uint8(X_train[0]))
    X_train = [normalize_patch(np.uint8(x), normalizer) for x in X_train]
    X_val = [normalize_patch(np.uint8(x), normalizer) for x in X_val]

    X_train = standardPreprocess(X_train)
    X_val = standardPreprocess(X_val)

    if not data_augmentation:
        history = model.fit(X_train,
                            y_train,
                            batch_size=net_settings['batch_size'],
                            epochs=net_settings['epochs'],
                            verbose=net_settings['verbose'],
                            validation_data=(X_val, y_val),
                            callbacks=[
                                SGDLearningRateTracker(),
                                CSVLogger(
                                    os.path.join(save_history_path,
                                                 'train_stats.log'))
                            ])

        print('[models] Training history keys stored: ',
              history.history.keys())
        # Plotting info about accuracy
        plt.figure()
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('Model Accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        #plt.show()
        plt.savefig(os.path.join(save_history_path, 'trainingAccuracy.png'))
        plt.close()
        # Plotting info about loss
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('Model Loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        #plt.show()
        plt.savefig(os.path.join(save_history_path, 'trainingLoss.png'))
        plt.close()
        return history

    else:
        print('[models] [NEW!] Using real-time data augmentation.')
        # Data Augmentation: new module!
        datagen = ImageDataGenerator(
            contrast_stretching=False,  #####
            histogram_equalization=False,  #####
            random_hue=True,  #####
            random_saturation=False,  #####
            random_brightness=True,  #####
            random_contrast=False,  #####
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            0.0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.0,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.0,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=True)  # randomly flip images

        # Compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(X_train)
        os.mkdir(os.path.join(save_history_path, 'adata'))
        # Fit the model on the batches generated by datagen.flow().
        history = model.fit_generator(  #normalize_batch(
            datagen.flow(X_train,
                         y_train,
                         batch_size=net_settings['batch_size']),
            epochs=net_settings['epochs'],
            steps_per_epoch=len(X_train) // net_settings['batch_size'] * 3,
            validation_data=(X_val, y_val),
            callbacks=[
                SGDLearningRateTracker(),
                CSVLogger(os.path.join(save_history_path, 'train_stats.log'))
            ])

        #verbose=net_settings['verbose'], max_q_size=200) #,
        #callbacks=[lr_reducer, early_stopper, csv_logger]) validation_data=(X_val, y_val),
        print(history.history.keys())
        # Plotting info about accuracy
        plt.figure()
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('Model Accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'val'], loc='upper left')
        #plt.show()
        plt.savefig(os.path.join(save_history_path, 'trainingAccuracy.png'))
        plt.close()
        # Plotting info about loss
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('Model Loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'val'], loc='upper left')
        #plt.show()
        plt.savefig(os.path.join(save_history_path, 'trainingLoss.png'))
        plt.close()

        # Plotting info about training loss
        plt.plot(history.history['loss'])
        plt.title('Model Loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train'], loc='upper left')
        #plt.show()
        plt.savefig(os.path.join(save_history_path, 'ONLYtrainingLoss.png'))
        plt.close()

        model.save_weights('model.h5')
        print 'Model saved to disk'

    return history