0,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=
        0,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=False,  # randomly flip images
        vertical_flip=False)  # randomly flip images

    # Compute quantities required for feature-wise normalization
    # (std, mean, and principal components if ZCA whitening is applied).
    datagen.fit(x_train)

    total_samples_train = getNumSamples(variants[num_variant][0][0:4] + '.h5')
    model.fit_generator(generate_arrays(TRAIN_SET,
                                        batch_size=BATCH_SIZE,
                                        max_sample=total_samples_train,
                                        new_size=INPUT_FRAME_SIZE),
                        BATCH_SIZE,
                        EPOCHS,
                        verbose=2,
                        callbacks=[best_model],
                        validation_data=(x_test, y_test))
    print("Finished fitting model")
    score = model.evaluate(x_test, y_test, verbose=1)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    print('All metrics', score)

    x_train = HDF5Matrix(TRAIN_SET, 'data')
    y_train = HDF5Matrix(TRAIN_SET, 'labels')

    res = model.predict(x_test)
    res_label = np.argmax(res, 1)
示例#2
0
def main():
    # Parameters
    if len(sys.argv) == 4:
        superclass = sys.argv[1]
        imgmove = sys.argv[2]
        if imgmove == 'False':
            imgmove = False
        else:
            imgmove = True
        lr = float(sys.argv[3])
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    trainpath = 'trainval_' + superclass + '/train'
    valpath = 'trainval_' + superclass + '/val'

    if not os.path.exists('model'):
        os.mkdir('model')

    # Train/validation data preparation
    if imgmove:
        os.mkdir('trainval_' + superclass)
        os.mkdir(trainpath)
        os.mkdir(valpath)
        sourcepath = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date+'_crop'\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
        categories = os.listdir(sourcepath)
        for eachclass in categories:
            if eachclass[0] == superclass[0]:
                print(eachclass)
                os.mkdir(trainpath + '/' + eachclass)
                os.mkdir(valpath + '/' + eachclass)
                imgs = os.listdir(sourcepath + '/' + eachclass)
                idx = 0
                for im in imgs:
                    if idx % 8 == 0:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            valpath + '/' + eachclass + '/' + im)
                    else:
                        shutil.copyfile(
                            sourcepath + '/' + eachclass + '/' + im,
                            trainpath + '/' + eachclass + '/' + im)
                    idx += 1

    # Train and validation ImageDataGenerator
    batchsize = 32

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       rotation_range=15,
                                       width_shift_range=5,
                                       height_shift_range=5,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(trainpath,
                                                        target_size=(72, 72),
                                                        batch_size=batchsize)

    valid_generator = test_datagen.flow_from_directory(valpath,
                                                       target_size=(72, 72),
                                                       batch_size=batchsize)

    # Train MobileNet
    model = Xception(include_top=True,
                     weights=None,
                     input_tensor=None,
                     input_shape=(72, 72, 3),
                     pooling=None,
                     classes=classNum[superclass[0]])
    model.summary()
    model.compile(optimizer=SGD(lr=lr, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    steps_per_epoch = int(train_generator.n / batchsize)
    validation_steps = int(valid_generator.n / batchsize)

    weightname = 'model/mobile_' + superclass + '_wgt.h5'

    if os.path.exists(weightname):
        model.load_weights(weightname)

    checkpointer = ModelCheckpoint(weightname,
                                   monitor='val_loss',
                                   verbose=0,
                                   save_best_only=True,
                                   save_weights_only=True,
                                   mode='auto',
                                   period=1)
    model.fit_generator(train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=100,
                        validation_data=valid_generator,
                        validation_steps=validation_steps,
                        callbacks=[checkpointer])
示例#3
0
    save_weights_only=False,
    mode='auto'
)

early_stopping = EarlyStopping(monitor='val_loss', mode='auto',patience=7)
#history = model.fit(
#    X_train,Y_train,
#    epochs=5,
#    batch_size=BATCH_SIZE,
#    validation_data=(X_val, Y_val),
#    callbacks=[checkpoint,kappa_metrics,early_stopping]
#)
history = model.fit_generator(
    data_generator,
    steps_per_epoch=X_train.shape[0] / BATCH_SIZE,
    epochs=50,
    validation_data=(X_val, Y_val),
    callbacks=[checkpoint,early_stopping]
)


# In[ ]:


with open(historyfilename, 'w') as f:
    json.dump(history.history, f)

history_df = pd.DataFrame(history.history)
history_df.to_csv('history.csv')
history_df[['loss', 'val_loss']].plot()
history_df[['acc', 'val_acc']].plot()
示例#4
0
model = Model(base_model.input, x)

model.summary()

for layer in base_model.layers:
    layer.trainable = False

model.summary()

model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])
model.fit_generator(train_gen,
                    validation_data=test_gen,
                    steps_per_epoch=947 // 64,
                    validation_steps=406 // 64,
                    epochs=2)

###################################################################

from matplotlib.image import imread

train_arr = []
label_arr = []

for path, subdirs, files in os.walk(root):
    for name in files:
        full_path = os.path.join(path, name)
        train_arr.append(full_path)
        lrate = initial_lrate * (drop ** np.floor(epoch/epochs_drop)) 
    else:
        lrate = initial_lrate * (drop ** np.floor((epoch-1)/epochs_drop)) 

    print(lrate)
    return lrate

lrate = LearningRateScheduler(step_decay)
	
Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0, amsgrad=False)

model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])

csv_logger = CSVLogger('./training_Xception.log', append=True)
checkpointer = ModelCheckpoint(filepath='./weights_Xception.hdf5', verbose=1, save_best_only=True, monitor = 'val_acc')
callbacks_list = [checkpointer,csv_logger,lrate]

model.fit_generator(traning_set, epochs=100, steps_per_epoch=len(x_train) / 50, validation_steps=len(x_valid) / 50, verbose=1, validation_data=validation_set,callbacks=callbacks_list)

score = model.evaluate(x_valid, y_valid, verbose=0)

print(score)








示例#6
0

def top_5(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=5)


if __name__ == '__main__':
    batch_size = 16

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    train_generator = train_datagen.flow_from_directory(
        'data',
        target_size=(224, 224),
        batch_size=batch_size,
        class_mode='categorical',
        shuffle=True)

    model = Xception(include_top=True, weights=None, classes=10)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy', top_5])

    model.fit_generator(train_generator,
                        steps_per_epoch=125 // batch_size,
                        epochs=10)
    model.save('xception10.h5')
                               factor=0.1,
                               patience=5,
                               verbose=1,
                               epsilon=0.0001,
                               mode='min'),
             ModelCheckpoint(monitor='val_loss',
                             filepath=root_dir + 'weights/' + weight_name,
                             save_best_only=True,
                             save_weights_only=True,
                             mode='min') ,
             TQDMCallback()]

history = model.fit_generator(generator=train_generator(batch_size),
                              steps_per_epoch=1,#int(np.ceil(train_df.shape[0]/batch_size)/300),#344,
                              epochs=60,
                              verbose=2,
                              callbacks=callbacks,
                              validation_data=valid_generator(batch_size),
                              validation_steps=1)#int(np.ceil(valid_df.shape[0]/batch_size))*20)


model.load_weights(root_dir + 'weights/'+ weight_name)

test_paths = glob(os.path.join(root_dir , 'test/audio/*wav'))


def test_generator(test_batch_size):
    while True:
        for start in range(0, len(test_paths), test_batch_size):
            x_batch = []
            end = min(start + test_batch_size, len(test_paths))
示例#8
0
model_name = os.path.join(args.model_dir,
                          "doc_detect.{epoch:02d}-{val_loss:.2f}.hdf5")
checkpointer = ModelCheckpoint(model_name,
                               monitor='val_acc',
                               verbose=1,
                               save_best_only=True,
                               save_weights_only=False,
                               mode='auto',
                               period=1)

train_datagen = ImageDataGenerator(rescale=1. / 255)

test_datagen = ImageDataGenerator(rescale=1. / 255)

train_generator = train_datagen.flow_from_directory(args.train_dir,
                                                    target_size=(img_w, img_w),
                                                    batch_size=args.batch_size)

validation_generator = test_datagen.flow_from_directory(
    args.test_dir, target_size=(img_w, img_w), batch_size=args.batch_size)

print train_generator.class_indices

model.fit_generator(train_generator,
                    steps_per_epoch=train_size // args.batch_size,
                    epochs=args.epochs,
                    validation_data=validation_generator,
                    validation_steps=test_size // args.batch_size,
                    workers=7,
                    callbacks=[checkpointer])
def train_with_sift_features(batch_size, input_shape, x_train, y_train,
                             x_valid, y_valid, sift_features_train,
                             sift_features_valid, model_name, num_workers,
                             resume):
    print('Found {} images belonging to {} classes'.format(len(x_train), 128))
    print('Found {} images belonging to {} classes'.format(len(x_valid), 128))
    train_generator = AugmentedDatasetWithSiftFeatures(x_train,
                                                       y_train,
                                                       sift_features_train,
                                                       batch_size=batch_size,
                                                       input_shape=input_shape)
    valid_generator = DatasetWithSiftFeatures(x_valid,
                                              y_valid,
                                              sift_features_valid,
                                              batch_size=batch_size,
                                              input_shape=input_shape)
    class_weight = compute_class_weight('balanced', np.unique(y_train),
                                        y_train)
    class_weight_dict = dict.fromkeys(np.unique(y_train))
    for key in class_weight_dict.keys():
        class_weight_dict.update({key: class_weight[key]})

    filepath = 'checkpoint/{}/sift_iter1.hdf5'.format(model_name)
    save_best = ModelCheckpoint(filepath=filepath,
                                verbose=1,
                                monitor='val_acc',
                                save_best_only=True,
                                mode='max')
    save_on_train_end = ModelCheckpoint(filepath=filepath,
                                        verbose=1,
                                        monitor='val_acc',
                                        period=args.epochs)
    reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                  factor=0.2,
                                  patience=2,
                                  verbose=1)
    callbacks = [save_best, save_on_train_end, reduce_lr]

    if resume == 'True':
        print('\nResume training from the last checkpoint')
        model = load_model(filepath)
        trainable_count = int(
            np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
        print('Trainable params: {:,}'.format(trainable_count))
        model.fit_generator(generator=train_generator,
                            epochs=args.epochs,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            class_weight=class_weight_dict,
                            workers=num_workers)
    else:
        model = Xception(include_top=False, pooling='max')
        sift_features = Input(shape=(512, ))
        x = Concatenate()([model.layers[-1].output, sift_features])
        x = Dense(units=128,
                  activation='linear',
                  name='predictions',
                  kernel_regularizer=regularizers.l2(0.0001))(x)
        model = Model([model.layers[0].input, sift_features], x)

        for layer in model.layers[:-1]:
            layer.trainable = False

        model.compile(optimizer=Adam(lr=0.001),
                      loss='categorical_hinge',
                      metrics=['acc'])
        model.fit_generator(generator=train_generator,
                            epochs=5,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            class_weight=class_weight_dict,
                            workers=num_workers)
        K.clear_session()

        print("\nFine-tune the network")
        model = load_model(filepath)
        for layer in model.layers:
            layer.trainable = True
        trainable_count = int(
            np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
        print('Trainable params: {:,}'.format(trainable_count))
        model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                      loss='categorical_hinge',
                      metrics=['acc'])
        model.fit_generator(generator=train_generator,
                            epochs=30,
                            callbacks=callbacks,
                            validation_data=valid_generator,
                            class_weight=class_weight_dict,
                            workers=num_workers)
        K.clear_session()
示例#10
0
                                     dtype='float32')
        datagen.fit(x_train)
        """ Training loop """

        t0 = time.time()

        best_acc = 1e-10
        for epoch in range(nb_epoch):
            t1 = time.time()
            print("### Model Fitting.. ###")
            print('epoch = {} / {}'.format(epoch + 1, nb_epoch))
            print('check point = {}'.format(epoch))

            hist = model.fit_generator(datagen.flow(x_train,
                                                    y_train,
                                                    batch_size=batch_size),
                                       steps_per_epoch=x_train.shape[0] //
                                       batch_size,
                                       validation_data=(x_val, y_val))

            t2 = time.time()
            print(hist.history)
            print('Training time for one epoch : %.1f' % ((t2 - t1)))
            train_acc = hist.history['categorical_accuracy'][0]
            train_loss = hist.history['loss'][0]
            val_acc = hist.history['val_categorical_accuracy'][0]
            val_loss = hist.history['val_loss'][0]

            # must??
            best_model = (val_acc > best_acc)
            if best_model:
                nsml.save('best')
示例#11
0
def fine_tune(name,
              name_ext,
              lr=1e-4,
              reduce_lr_factor=0.1,
              reduce_lr_patience=3,
              epochs=10,
              batch_size=32,
              l2_reg=0,
              dropout_p=0.5,
              num_freeze_layers=0,
              save_best_only=True,
              loss_stop_val=0.00001):

    data_info = load_organized_data_info(imgs_dim=HEIGHT, name=name)
    tr_datagen = ImageDataGenerator(
        preprocessing_function=preprocess_input,
        rotation_range=180,
        vertical_flip=True,
        horizontal_flip=True,
        # width_shift_range=0.1,
        # height_shift_range=0.1,
        # zoom_range=0.1,
        # shear_range=0.3,
        # fill_mode='reflect'
    )
    val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
    batch_size = 32

    def dir_datagen(dir_, gen):
        return gen.flow_from_directory(directory=dir_,
                                       target_size=(HEIGHT, WIDTH),
                                       class_mode='categorical',
                                       batch_size=batch_size,
                                       shuffle=True)

    dir_tr, num_tr = data_info['dir_tr'], data_info['num_tr']
    dir_val, num_val = data_info['dir_val'], data_info['num_val']

    top_classifier_file = join(MODELS_DIR, TOP_CLASSIFIER_FILE.format(name))
    model_file = join(MODELS_DIR, MODEL_FILE.format(name, name_ext))

    model = Xception(weights='imagenet', include_top=False, pooling='avg')
    top_classifier = _top_classifier(l2_reg=l2_reg,
                                     dropout_p=dropout_p,
                                     input_shape=(2048, ))
    top_classifier.load_weights(top_classifier_file)
    model = Model(inputs=model.input, outputs=top_classifier(model.output))
    model.compile(Adam(lr=lr), loss='categorical_crossentropy')

    # model has 134 layers
    for layer in model.layers[:num_freeze_layers]:
        layer.trainable = False

    log_dir = join(EXPERIMENTS_DIR, 'xception_fine_tuned_{:s}'.format(name))
    callbacks = [
        EarlyStoppingByLoss(monitor='loss', value=loss_stop_val),
        ReduceLROnPlateau(factor=reduce_lr_factor,
                          patience=reduce_lr_patience),
        ModelCheckpoint(model_file, save_best_only=save_best_only),
        TensorBoard(log_dir=log_dir, write_graph=False)
    ]

    model.fit_generator(generator=dir_datagen(dir_tr, tr_datagen),
                        steps_per_epoch=ceil(num_tr / batch_size),
                        epochs=epochs,
                        validation_data=dir_datagen(dir_val, val_datagen),
                        validation_steps=ceil(num_val / batch_size),
                        callbacks=callbacks)
示例#12
0
class FaceEmotion:
    """Class for recognizing emotion using default Deep Learning Xception model"""
    def __init__(self, input_shape=(200, 200, 3)):  # TODO: Check input_shape
        """Initialize main parameters of FaceEmotion class
        :param input_shape: Input images shape
        """
        self.input_shape = input_shape

        self.model = Xception(include_top=False, input_shape=input_shape)
        self.model = self.add_classificator(self.model)

    @staticmethod
    def add_classificator(base_model):
        """Add a classificator to a model
        :param base_model: Keras model object
        """
        layer = base_model.output
        layer = GlobalAveragePooling2D(name="classificator_block_pool")(layer)
        layer = Dense(512,
                      activation='relu',
                      name='classificator_block_dense_1')(layer)
        layer = Dense(64,
                      activation='relu',
                      name='classificator_block_dense_2')(layer)
        layer = Dense(6, activation='relu',
                      name='classificator_block_dense_3')(layer)

        model = Model(inputs=base_model.input, outputs=layer)

        # freeze early layers
        for l in base_model.layers:
            l.trainable = False

        model.compile(optimizer='sgd',
                      loss='mean_squared_error',
                      metrics=['accuracy'])

        return model

    def model_architecture(self, filename=None):
        """Show model architecture and save it to file
        :param filename: Path to the model architecture image file
        """
        list_summary = []
        self.model.summary(print_fn=lambda x: list_summary.append(x))
        summary = "\n".join(list_summary)

        if filename:
            with open(filename + '.txt', 'w') as f:
                f.write(summary)

            from keras.utils import plot_model
            plot_model(self.model, filename + '.jpg')

        return summary

    # noinspection PyShadowingNames
    def train(self, generator, epochs, steps_per_epoch):
        """Train model
        :param generator: Data generator compatible with Keras model
        :param epochs: Number of epochs to train model
        :param steps_per_epoch: Number of faces used in one step
        """
        stopper = EarlyStopping(patience=100)  # , restore_best_weights=True)
        save_dir = "training/e{epoch:02d}-a{acc:.2f}.ckpt"
        saver = ModelCheckpoint(save_dir)  # , save_best_only=True)

        self.model.fit_generator(generator,
                                 steps_per_epoch,
                                 epochs,
                                 callbacks=[stopper, saver])

    def get_emotion(self, image):
        emotions = []
        for top, right, bottom, left in face_locations(image):
            emotion = self.model.predict(image[top:bottom, left:right])
            emotions.append(emotion)
        return emotions