Example #1
0
    def train_top_model(self, train_data_dir, val_data_dir, epochs=defaults['epochs'],
                        batch_size=defaults['batch_size']):
        train_generator = util.get_generator(train_data_dir, self.img_width, self.img_height, batch_size)
        val_generator = util.get_generator(val_data_dir, self.img_width, self.img_height, batch_size)

        train_data = util.load_bottleneck_features(self.train_bottleneck_features_path)
        val_data = util.load_bottleneck_features(self.val_bottleneck_features_path)

        num_classes = train_generator.num_classes

        train_labels = train_generator.classes
        train_labels = to_categorical(train_labels, num_classes=num_classes)
        val_labels = val_generator.classes
        val_labels = to_categorical(val_labels, num_classes=num_classes)

        model = self.get_top_model(train_data.shape[1:], num_classes)
        model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
        util.save_model_plot(self.top_model_plot_path, model)

        history = model.fit(train_data, train_labels,
                            epochs=epochs,
                            batch_size=batch_size,
                            validation_data=(val_data, val_labels))
        model.save(self.model_path)
        util.save_history(self.history_path, history)
        util.eval_model_loss_acc(model, val_data, val_labels, batch_size)
Example #2
0
    def fine_tune(self, train_data_dir, val_data_dir, epochs, batch_size):
        train_generator = util.get_categorical_generator(
            train_data_dir, self.img_width, self.img_height, batch_size)
        val_generator = util.get_categorical_generator(val_data_dir,
                                                       self.img_width,
                                                       self.img_height,
                                                       batch_size)

        base_model = self.get_base_model()
        base_model.trainable = True
        set_trainable = False
        for layer in base_model.layers:
            if layer.name == 'block5_conv1':
                set_trainable = True
            if set_trainable:
                layer.trainable = True
            else:
                layer.trainable = False

        top_model = self.get_top_model(base_model.output_shape[1:],
                                       train_generator.num_classes)
        top_model.load_weights(self.top_model_weights_path)
        top_model.trainable = False

        model = Sequential()
        model.add(base_model)
        model.add(top_model)
        model.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        util.save_model_plot(self.model_plot_path, model)

        history = model.fit_generator(
            train_generator,
            verbose=1,
            steps_per_epoch=train_generator.samples / batch_size,
            epochs=epochs,
            validation_data=val_generator,
            validation_steps=val_generator.samples / batch_size)
        model.save_weights(self.model_weights_path)
        model.save(self.model_path)
        util.save_history(self.history_path, history)