コード例 #1
0
def _cleanup():
    # remove unnecessary weights files
    files_to_remove = [
        config.bf_train_path, config.bf_valid_path,
        config.get_top_model_weights_path()
    ]
    for f in files_to_remove:
        os.remove(f)

    # move final model weights for further using
    os.rename(config.get_fine_tuned_weights_path(checkpoint=True),
              config.get_fine_tuned_weights_path())
コード例 #2
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(
            loss='categorical_crossentropy',
            optimizer=Adam(lr=1e-5),
            metrics=['accuracy'])
        self.model.summary()

        train_data = self.get_train_datagen(rotation_range=30., shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(), patience=self.fine_tuning_patience)

        if util.is_keras2():
            self.model.fit_generator(
                train_data,
                steps_per_epoch=config.nb_train_samples / float(self.batch_size),
                epochs=self.nb_epoch,
                validation_data=self.get_validation_datagen(),
                validation_steps=config.nb_validation_samples / float(self.batch_size),
                callbacks=callbacks,
                class_weight=self.class_weight)
        else:
            self.model.fit_generator(
                train_data,
                samples_per_epoch=config.nb_train_samples,
                nb_epoch=self.nb_epoch,
                validation_data=self.get_validation_datagen(),
                nb_val_samples=config.nb_validation_samples,
                callbacks=callbacks,
                class_weight=self.class_weight)

        self.model.save(config.get_model_path())
コード例 #3
0
 def load(self):
     print("Creating model")
     self.load_classes()
     self._create()
     self.model.load_weights(config.get_fine_tuned_weights_path())
     self.loaded_model = True
     return self.model
コード例 #4
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(
            loss='categorical_crossentropy',
            optimizer=Adam(lr=1e-5),
            metrics=['accuracy'])
        self.model.summary()
        plot_model(self.model, to_file='model1.png', show_shapes=True)
        self.model.fit_generator(
            self.get_train_datagen(rotation_range=15.,
                                   shear_range=0.1,
                                   zoom_range=0.2,
                                   width_shift_range=0.05,
                                   height_shift_range=0.05,
                                   horizontal_flip=True,
                                   preprocessing_function=self.preprocess_input),
            samples_per_epoch=config.nb_train_samples,
            nb_epoch=self.nb_epoch,
            validation_data=self.get_validation_datagen(preprocessing_function=self.preprocess_input),
            nb_val_samples=config.nb_validation_samples,
            callbacks=self.get_callbacks(config.get_fine_tuned_weights_path(),os.path.join(os.path.abspath("."),"checkpoint"),patience=self.fine_tuning_patience),
            class_weight=self.class_weight)

        self.model.save(config.get_model_path())
コード例 #5
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=SGD(lr=1e-4,
                                         decay=1e-6,
                                         momentum=0.9,
                                         nesterov=True),
                           metrics=['accuracy'])

        self.model.fit_generator(
            self.get_train_datagen(
                rotation_range=30.,
                shear_range=0.2,
                zoom_range=0.2,
                horizontal_flip=True,
                preprocessing_function=self.preprocess_input),
            samples_per_epoch=config.nb_train_samples,
            nb_epoch=self.nb_epoch,
            validation_data=self.get_validation_datagen(
                preprocessing_function=self.preprocess_input),
            nb_val_samples=config.nb_validation_samples,
            callbacks=self.get_callbacks(config.get_fine_tuned_weights_path(),
                                         patience=self.fine_tuning_patience),
            class_weight=self.class_weight)

        self.model.save(config.get_model_path())
コード例 #6
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(
            loss='categorical_crossentropy',
            optimizer=Adam(lr=1e-5),
            metrics=['accuracy'])
        self.model.summary()
        # save the model to png in order to visualize
        plot_model(self.model, to_file='model1.png', show_shapes=True)
        train_data = self.get_train_datagen(rotation_range=30., shear_range=0.2, zoom_range=0.2, horizontal_flip=True,
                                            preprocessing_function=self.preprocess_input)
        # early stopping
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(), patience=self.fine_tuning_patience)

        if util.is_keras2():
            self.model.fit_generator(
                train_data,
                steps_per_epoch=config.nb_train_samples / float(self.batch_size),
                epochs=self.nb_epoch,
                validation_data=self.get_validation_datagen(),
                validation_steps=config.nb_validation_samples / float(self.batch_size),
                callbacks=callbacks,
                class_weight=self.class_weight)
        else:
            self.model.fit_generator(
                train_data,
                samples_per_epoch=config.nb_train_samples,
                nb_epoch=self.nb_epoch,
                validation_data=self.get_validation_datagen(),
                nb_val_samples=config.nb_validation_samples,
                callbacks=callbacks,
                class_weight=self.class_weight)

        self.model.save(config.get_model_path())
コード例 #7
0
 def load(self):
     print("Creating model")
     self.load_classes()
     self._create()  #define model in resnet50.py
     self.model.load_weights(
         config.get_fine_tuned_weights_path()
     )  #model load weitht from "G:\keras-transfer-learning-for-oxford102\trained\fine-tuned-resnet50-weights.h5"
     return self.model
コード例 #8
0
 def train(self, auto_load_fine_tune=False, visual=False):
     if auto_load_fine_tune and \
             os.path.exists(config.get_fine_tuned_weights_path()):
         self.load()
     else:
         print("Creating model...")
         self._create()
     print("Model is created")
     print("Fine tuning...")
     self._fine_tuning(visual=visual)
     self.save_classes()
     print("Classes are saved")
コード例 #9
0
def fine_tune_top_2_inception_blocks(model, X_train, Y_train, X_test, Y_test,
                                     datagen):
    # we chose to train the top 2 inception blocks, i.e. we will freeze
    # the first 172 layers and unfreeze the rest:
    for layer in model.layers[:172]:
        layer.trainable = False
    for layer in model.layers[172:]:
        layer.trainable = True

    # we need to recompile the model for these modifications to take effect
    # we use SGD with a low learning rate
    # print("Compiling model...")
    model.compile(optimizer=SGD(lr=0.0001,
                                decay=1e-6,
                                momentum=0.9,
                                nesterov=True),
                  loss='categorical_crossentropy',
                  metrics=["accuracy"])

    # train_datagen = ImageDataGenerator(
    #     featurewise_center=False,
    #     samplewise_center=False,
    #     featurewise_std_normalization=False,
    #     samplewise_std_normalization=False,
    #     zca_whitening=False,
    #     # rotation_range=0,
    #     width_shift_range=0.125,
    #     height_shift_range=0.125,
    #     horizontal_flip=True,
    #     vertical_flip=False,
    #     fill_mode='nearest',
    #     rotation_range=30., shear_range=0.2, zoom_range=0.2,
    #     # horizontal_flip=True
    # )
    # train_gen, val_gen = _get_data_generators(train_datagen)
    callbacks = _get_callbacks(config.get_fine_tuned_weights_path(),
                               patience=30)
    test_datagen = ImageDataGenerator()

    # we train our model again (this time fine-tuning the top 2 inception blocks
    # alongside the top Dense layers
    model.fit_generator(datagen.flow(X_train, Y_train, shuffle=True),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=fine_tune_nb_epoch,
                        validation_data=test_datagen.flow(X_test, Y_test),
                        nb_val_samples=X_test.shape[0],
                        callbacks=callbacks)

    model.save(config.get_model_path())
コード例 #10
0
    def test(self):
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=1e-5),
                           metrics=['accuracy'])
        self.model.summary()

        train_data = self.get_train_datagen()
        #print(train_data.next())
        #train_data = self.get_train_datagen(rotation_range=30., shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(),
                                       patience=self.fine_tuning_patience)

        score = self.model.evaluate_generator(train_data,
                                              steps=config.nb_train_samples /
                                              float(self.batch_size))
        print("Score: ", score)
コード例 #11
0
def tune(lr=0.0001, class_weight=None):
    model = load_model(nb_class=len(config.classes), weights_path=config.get_top_model_weights_path())

    model.compile(
        loss='categorical_crossentropy',
        optimizer=SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    # prepare data augmentation configuration
    train_datagen = ImageDataGenerator(
        rotation_range=30.,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)
    util.apply_mean(train_datagen)

    train_generator = train_datagen.flow_from_directory(
        config.train_dir,
        target_size=config.img_size,
        classes=config.classes)

    test_datagen = ImageDataGenerator()
    util.apply_mean(test_datagen)

    validation_generator = test_datagen.flow_from_directory(
        config.validation_dir,
        target_size=config.img_size,
        classes=config.classes)

    early_stopping = EarlyStopping(verbose=1, patience=30, monitor='val_loss')
    model_checkpoint = ModelCheckpoint(config.get_fine_tuned_weights_path(checkpoint=True),
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss')
    history = model.fit_generator(
        train_generator,
        samples_per_epoch=config.nb_train_samples,
        nb_epoch=fine_tuning_nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=config.nb_validation_samples,
        callbacks=[early_stopping, model_checkpoint],
        class_weight=class_weight)

    util.save_history(history=history, prefix='fine-tuning')
    util.save_classes(config.classes)

    _cleanup()
コード例 #12
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(
            loss='binary_crossentropy',
            optimizer=Adam(lr=1e-5),
            #optimizer=SGD(lr=5e-6, momentum=0.9),
            metrics=['binary_accuracy'])

        train_data = self.get_train_datagen(
            rescale=1. / 255,
            rotation_range=60.,
            #shear_range=0.2,
            #zoom_range=0.2,
            width_shift_range=0.2,
            height_shift_range=0.2,
            horizontal_flip=True,
            vertical_flip=True)
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(),
                                       patience=self.fine_tuning_patience)

        if util.is_keras2():
            hist = self.model.fit_generator(
                train_data,
                steps_per_epoch=config.nb_train_samples /
                float(self.batch_size),
                epochs=self.nb_epoch,
                validation_data=self.get_validation_datagen(rescale=1. / 255),
                #validation_data=self.get_validation_datagen(),
                validation_steps=config.nb_validation_samples /
                float(self.batch_size),
                callbacks=callbacks,
                class_weight=self.class_weight)
        else:
            hist = self.model.fit_generator(
                train_data,
                samples_per_epoch=config.nb_train_samples,
                nb_epoch=self.nb_epoch,
                validation_data=self.get_validation_datagen(),
                nb_val_samples=config.nb_validation_samples,
                callbacks=callbacks,
                class_weight=self.class_weight)
        print(hist.history)
        util.save_history(history=hist, prefix=time.time())
        self.model.save(config.get_model_path())
コード例 #13
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=SGD(lr=1e-4,
                                         decay=1e-6,
                                         momentum=0.9,
                                         nesterov=True),
                           metrics=['accuracy'])

        history = self.model.fit_generator(
            self.get_train_datagen(rotation_range=30.,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True),
            samples_per_epoch=config.nb_train_samples,
            nb_epoch=self.nb_epoch,
            validation_data=self.get_validation_datagen(),
            nb_val_samples=config.nb_validation_samples,
            callbacks=self.get_callbacks(config.get_fine_tuned_weights_path(),
                                         patience=self.fine_tuning_patience),
            class_weight=self.class_weight)
        self.model.save(config.get_model_path())

        # list all data in history
        print(history.history.keys())
        # summarize history for accuracy
        plt.plot(history.history['acc'])
        plt.plot(history.history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()
        # summarize history for loss
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'test'], loc='upper left')
        plt.show()
コード例 #14
0
    def _fine_tuning(self):
        self.freeze_top_layers()

        self.model.compile(
            loss='categorical_crossentropy',
            optimizer=SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True),
            metrics=['accuracy'])

        self.model.fit_generator(
            self.get_train_datagen(rotation_range=30.,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True,
                                   preprocessing_function=self.preprocess_input),
            samples_per_epoch=config.nb_train_samples,
            nb_epoch=self.nb_epoch,
            validation_data=self.get_validation_datagen(preprocessing_function=self.preprocess_input),
            nb_val_samples=config.nb_validation_samples,
            callbacks=self.get_callbacks(config.get_fine_tuned_weights_path(), patience=self.fine_tuning_patience),
            class_weight=self.class_weight)

        self.model.save(config.get_model_path())
コード例 #15
0
def load_trained():
    util.load_classes()
    model = load_model(nb_class=len(config.classes))
    model.load_weights(config.get_fine_tuned_weights_path())
    return model
コード例 #16
0
import numpy as np
from sklearn.externals import joblib
import os
from keras.callbacks import *
# import visualize
import config
import util
import tensorflow as tf
import cv2
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from keras.models import load_model
    train_data = get_train_datagen(rotation_range=30., shear_range=0.2,
                                            zoom_range=0.2, horizontal_flip=True)
    checkpoint_dir = os.path.join(os.path.abspath('.'), 'checkpoint')
    callbacks = get_callbacks(config.get_fine_tuned_weights_path(), checkpoint_dir,
                                       patience=fine_tuning_patience)

    # input_target = Input(shape=(None,))
    # centers = Embedding(len(config.classes), 4096)(input_target)  # Embedding层用来存放中心
    # print('center:', centers)
    # center_loss = Lambda(lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True), name='center_loss')(
    #     [feature, centers])
    '''
    centers = get_center_loss(0.2,len(config.classes),x,x.shape[1])
    center_loss = Lambda(lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True), name='l2_loss')(
        [x, centers])
    '''
    # center_model = Model(inputs=[base_model.input, input_target], outputs=[predictions, center_loss])

    center_model = load_model('/home/yuzhg/keras-transfer-learning-for-oxford102/trained/last-model-vgg16.h5')
コード例 #17
0
 def load(self):
     print("loading finetuned model")
     self.load_classes()
     self._create()
     self.model.load_weights(config.get_fine_tuned_weights_path())
     return self.model
コード例 #18
0
ファイル: base_model.py プロジェクト: yzgrfsy/inceptionv3
    def _fine_tuning(self):
        self.freeze_top_layers1()
        train_data = self.get_train_datagen(
            rotation_range=30.,
            shear_range=0.2,
            zoom_range=0.2,
            horizontal_flip=True,
            preprocessing_function=self.preprocess_input)
        checkpoint_dir = os.path.join(os.path.abspath('.'), 'checkpoint')
        callbacks = self.get_callbacks(config.get_fine_tuned_weights_path(),
                                       checkpoint_dir,
                                       patience=self.fine_tuning_patience)

        if util.is_keras2():
            if config.isCenterLoss:
                self.center_model.load_weights(
                    '/home/yuzhg/Inception-v3/trained/fine-tuned-best-inception-weights.h5',
                    by_name=True)
                self.center_model.compile(loss=[
                    'categorical_crossentropy', lambda y_true, y_pred: y_pred
                ],
                                          loss_weights=[1, 0.2],
                                          metrics=['accuracy'],
                                          optimizer=Adam(lr=1e-5))
                self.center_model.summary()
                self.history = self.center_model.fit_generator(
                    util.clone_y_generator(train_data),
                    steps_per_epoch=config.nb_train_samples /
                    float(self.batch_size),
                    epochs=self.nb_epoch,
                    validation_data=util.clone_y_generator(
                        self.get_validation_datagen()),
                    validation_steps=config.nb_validation_samples /
                    float(self.batch_size),
                    callbacks=callbacks,
                    class_weight=self.class_weight)
            elif config.isTripletLoss:
                self.triplet_model.load_weights(
                    '/home/yuzhg/Inception-v3/trained/fine-tuned-best-inception-weights.h5',
                    by_name=True)
                #self.triplet_model.compile(loss=self.hard_triplet_loss, optimizer=Adam(lr=1e-5), metrics=['accuracy'])
                self.triplet_model.compile(
                    optimizer=Adam(lr=1e-5),
                    loss=['categorical_crossentropy', self.hard_triplet_loss],
                    loss_weights=[1.0, 1.0],
                    metrics=['accuracy'])
                self.triplet_model.summary()
                valid_data = self.get_validation_datagen(
                    rotation_range=30.,
                    shear_range=0.2,
                    zoom_range=0.2,
                    horizontal_flip=True,
                    preprocessing_function=self.preprocess_input)

                # util.clone_y_generator1(train_data),
                self.history = self.triplet_model.fit_generator(
                    #util.triplet_transformed_generator(train_data, 4096),
                    util.clone_y_generator1(train_data),
                    steps_per_epoch=config.nb_train_samples /
                    float(self.batch_size),
                    epochs=self.nb_epoch,
                    #validation_data=util.triplet_transformed_generator(valid_data, 4096),
                    validation_data=util.clone_y_generator1(valid_data),
                    validation_steps=config.nb_validation_samples /
                    float(self.batch_size),
                    callbacks=callbacks,
                    class_weight=self.class_weight)
            else:
                self.model.load_weights(
                    '/home/yuzhg/Inception-v3/trained/fine-tuned-best-inception-weights.h5',
                    by_name=True)
                self.model.compile(loss='categorical_crossentropy',
                                   optimizer=Adam(lr=1e-5),
                                   metrics=['accuracy'])

                self.model.summary()
                self.history = self.model.fit_generator(
                    train_data,
                    steps_per_epoch=config.nb_train_samples /
                    float(self.batch_size),
                    epochs=self.nb_epoch,
                    validation_data=self.get_validation_datagen(
                        rotation_range=30.,
                        shear_range=0.2,
                        zoom_range=0.2,
                        horizontal_flip=True,
                        preprocessing_function=self.preprocess_input),
                    validation_steps=config.nb_validation_samples /
                    float(self.batch_size),
                    callbacks=callbacks,
                    class_weight=self.class_weight)

        # else:
        #     if config.isCenterLoss:
        #         self.center_model.compile(loss=['categorical_crossentropy', lambda y_true, y_pred:y_pred],
        #                            loss_weights=[1, 0.2], metrics=['accuracy'],
        #                            optimizer=Adam(lr=1e-5))
        #         self.center_model.summary()
        #         self.history = self.center_model.fit_generator(
        #             util.clone_y_generator(train_data),
        #             samples_per_epoch=config.nb_train_samples,
        #             nb_epoch=self.nb_epoch,
        #             validation_data=util.clone_y_generator(self.get_validation_datagen()),
        #             nb_val_samples=config.nb_validation_samples,
        #             callbacks=callbacks,
        #             class_weight=self.class_weight)
        #     elif config.isTripletLoss:
        #         self.triplet_model.compile(loss=triplet_loss, optimizer=Adam(lr=1e-5))
        #         self.triplet_model.summary()
        #         self.history = self.triplet_model.fit_generator(
        #             util.clone_y_generator(train_data),
        #             steps_per_epoch=config.nb_train_samples / float(self.batch_size),
        #             epochs=self.nb_epoch,
        #             validation_data=util.clone_y_generator(self.get_validation_datagen()),
        #             validation_steps=config.nb_validation_samples / float(self.batch_size),
        #             callbacks=callbacks,
        #             class_weight=self.class_weight
        #         )
        #     else:
        #         self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-5), metrics=['accuracy'])
        #         self.model.summary()
        #         self.history = self.model.fit_generator(
        #             train_data,
        #             steps_per_epoch=config.nb_train_samples / float(self.batch_size),
        #             epochs=self.nb_epoch,
        #             validation_data=self.get_validation_datagen(),
        #             validation_steps=config.nb_validation_samples / float(self.batch_size),
        #             callbacks=callbacks,
        #             class_weight=self.class_weight
        #         )

        if config.isCenterLoss:
            #self.center_model.save_weights('vgg16-model-weights.h5')
            self.center_model.save(config.get_model_path())
            util.save_history(self.history, self.center_model)
        elif config.isTripletLoss:
            self.triplet_model.save(config.get_model_path())
            util.save_history(self.history, self.triplet_model)
        else:
            self.model.save(config.get_model_path())
            util.save_history(self.history, self.model)
コード例 #19
0
    def _fine_tuning(self):
        # self.model.load_weights(config.get_fine_tuned_weights_path())
        self.freeze_top_layers()
        # let's decide the mode and set the matched compile method and data generator
        # the under codes may seemed to be repeated, but I prefer to make it easier to understand and read.
        if self.loss_choice == 0:
            self.model.compile(
                loss=['categorical_crossentropy', self.triplet_loss_fn],
                optimizer=Adam(lr=1e-5),
                loss_weights=[1, 1],
                # SGD(lr=1e-5, decay=1e-6, momentum=0.8, nesterov=True),#Adadelta(),#Adam(lr=1e-5),
                metrics=['accuracy'])
            train_data_generator = util.triplet_transformed_generator(
                self.get_train_datagen(
                    rotation_range=10.,
                    shear_range=0.05,
                    zoom_range=0.1,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    horizontal_flip=True,
                    preprocessing_function=self.preprocess_input),
                self.noveltyDetectionLayerSize)
            validation_data_generator = util.triplet_transformed_generator(
                self.get_validation_datagen(
                    preprocessing_function=self.preprocess_input),
                self.noveltyDetectionLayerSize)
        elif self.loss_choice == 1:
            self.model.compile(
                loss=["categorical_crossentropy", self.center_loss_fn],
                optimizer=Adam(lr=1e-5),
                loss_weights=[1, 0.15],
                metrics=['accuracy'])
            train_data_generator = util.centerloss_transformed_generator(
                self.get_train_datagen(
                    rotation_range=10.,
                    shear_range=0.05,
                    zoom_range=0.1,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    horizontal_flip=True,
                    preprocessing_function=self.preprocess_input))
            validation_data_generator = util.centerloss_transformed_generator(
                self.get_validation_datagen(
                    preprocessing_function=self.preprocess_input))
        elif self.loss_choice == 2:
            self.model.compile(loss='categorical_crossentropy',
                               optimizer=Adam(lr=1e-5),
                               metrics=['accuracy'])
            train_data_generator = self.get_train_datagen(
                rotation_range=10.,
                shear_range=0.05,
                zoom_range=0.1,
                width_shift_range=0.05,
                height_shift_range=0.05,
                horizontal_flip=True,
                preprocessing_function=self.preprocess_input)
            validation_data_generator = self.get_validation_datagen(
                preprocessing_function=self.preprocess_input)

        # print the mode construction and key info
        self.model.summary()
        # fit the generator in batch sequence mode
        self.model.fit_generator(
            train_data_generator,
            steps_per_epoch=config.nb_train_samples / self.batch_size,
            epochs=self.nb_epoch,
            validation_data=validation_data_generator,
            validation_steps=config.nb_validation_samples / self.batch_size,
            callbacks=self.get_callbacks(
                config.get_fine_tuned_weights_path(),
                patience=self.fine_tuning_patience,
                embedding_mode=(True if self.loss_choice is not 2 else False),
                target_object=self,
                visual_embedding=[self.noveltyDetectionLayerName],
                center_loss_input=(True if self.loss_choice is 1 else False)),
            class_weight=self.class_weight)
        #self.model.save(config.get_model_path())
        keras.models.save_model(self.model, config.get_model_path())
コード例 #20
0
 def load(self):
     print("Creating model")
     self.load_classes()
     self._create()
     self.model.load_weights(config.get_fine_tuned_weights_path())
     return self.model