Пример #1
0
    def train_stage1(self):
        """
        In this stage, we will freeze all the convolution blocks and train
        only the newly added dense layers. We will add a global spatial average
        pooling layer, we will add fully connected dense layers on the output
        of the base models. We will freeze the convolution base and train only
        the top layers. We will set all the convolution layers to false, the model
        should be compiled when all the convolution layers are set to false.
        
        Arguments:
            
            -input_params  :  This parameter will contain all the information that the user will
                              input through the terminal
        """

        print(
            "\nTraining the model by freezing the convolution block and tuning the top layers..."
        )
        st = dt.now()

        utils_obj = Utility(self.input_params, self.path_dict)

        #Put if statement here. If model_name != custom then run this block, or else. Do something else.

        if (self.input_params['model_name'] != 'custom'):
            base_model = utils_obj.load_imagenet_model()

            #Adding a global spatial average pooling layer
            x = base_model.output
            x = GlobalAveragePooling2D()(x)

            #Adding a fully-connected dense layer
            #x = Dense(self.input_params['dense_neurons'], activation='relu', kernel_initializer='he_normal')(x)
            #Adding the custom layers
            customlayers = self.input_params['customlayers']
            #Adding a final dense output final layer
            x = customlayers(x)
            n = utils_obj.no_of_classes()
            output_layer = Dense(
                n,
                activation=self.input_params['outputlayer_activation'],
                kernel_initializer='glorot_uniform')(x)
            model_stg1 = Model(inputs=base_model.input, outputs=output_layer)
            #Define the model
            model_stg1 = Model(inputs=base_model.input, outputs=output_layer)

            #Here we will freeze the convolution base and train only the top layers
            #We will set all the convolution layers to false, the model should be
            #compiled when all the convolution layers are set to false
            for layer in base_model.layers:
                layer.trainable = False

        else:
            model_stg1 = self.input_params['custom_model']

        #Compiling the model
        model_stg1.compile(
            optimizer=optimizers.Adam(lr=self.input_params['stage1_lr']),
            loss='categorical_crossentropy',
            metrics=[self.input_params['metric']])

        #Normalize the images
        train_datagen = ImageDataGenerator(
            preprocessing_function=utils_obj.init_preprocess_func())
        val_datagen = ImageDataGenerator(
            preprocessing_function=utils_obj.init_preprocess_func())

        df_train = utils_obj.load_data("train")
        df_val = utils_obj.load_data("val")

        train_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train,
            directory=self.path_dict['source'],
            target_size=utils_obj.init_sizes(),
            x_col="filenames",
            y_col="class_label",
            batch_size=self.input_params['batch_size'],
            class_mode='categorical',
            color_mode='rgb',
            shuffle=True)

        val_generator = val_datagen.flow_from_dataframe(
            dataframe=df_val,
            directory=self.path_dict['source'],
            target_size=utils_obj.init_sizes(),
            x_col="filenames",
            y_col="class_label",
            batch_size=self.input_params['batch_size'],
            class_mode='categorical',
            color_mode='rgb',
            shuffle=True)

        nb_train_samples = len(train_generator.classes)
        nb_val_samples = len(val_generator.classes)

        history = model_stg1.fit_generator(
            generator=train_generator,
            steps_per_epoch=nb_train_samples //
            self.input_params['batch_size'],
            epochs=self.input_params['epochs1'],
            validation_data=val_generator,
            validation_steps=nb_val_samples // self.input_params['batch_size'],
            callbacks=TrainingUtils.callbacks_list(self, 1),
            workers=self.input_params['nworkers'],
            use_multiprocessing=False,
            max_queue_size=20)  #1 for stage 1

        hist_df = pd.DataFrame(history.history)
        hist_csv_file = self.path_dict['model_path'] + "stage{}/".format(
            1) + "{}_history_stage_{}.csv".format(
                self.input_params['model_name'], 1)
        with open(hist_csv_file, mode='w') as file:
            hist_df.to_csv(file, index=None)

        #model_stg1.load_weights(self.path_dict['model_path'] + "stage{}/".format(1) + "{}_weights_stage_{}.hdf5".format(self.input_params['model_name'], 1))
        model_stg1.save(
            self.path_dict['model_path'] + "stage{}/".format(1) +
            "{}_model_stage_{}.h5".format(self.input_params['model_name'], 1))

        TrainingUtils.save_summary(self, model_stg1, 1)
        TrainingUtils.plot_layer_arch(self, model_stg1, 1)

        stage1_params = dict()
        stage1_params['train_generator'] = train_generator
        stage1_params['val_generator'] = val_generator
        stage1_params['nb_train_samples'] = nb_train_samples
        stage1_params['nb_val_samples'] = nb_val_samples

        print("\nTime taken to train the model in stage 1: ", dt.now() - st)

        #Start model evaluation for Stage 1
        eval_utils = EvalUtils(self.input_params, self.path_dict, 1)
        eval_utils.predict_on_test()

        return model_stg1, stage1_params
Пример #2
0
    def predict_on_test(self):
        """
        This function will load the test dataset, pre-process the test
        images and check the performance of the trained models on unseen
        data. This will also save the confusion matrix and classification
        report as csv file in seperate dataframes for each model and for
        each stage, in the evaluation directory.
        
        Arguments:                    
            
            -size_dict    : Contains information about the image input image sizes for each of the models
                
            -model_name   : Name of the model, for example - vgg16, inception_v3, resnet50 etc
                          
            -stage_no     : The training stage of the model. You will have a choice to select the number
                            of training stages. In stage 1, we only fine tune the top 2 dense layers by
                            freezing the convolution base. In stage 2, we will re adjust the weights trained
                            in stage 1 by training the top convolution layers, by freezing the dense layers.
        """

        print("\nStarting model evaluation for stage {}..".format(
            self.stage_no))

        #Create an utility class object to access the class methods
        utils_obj = Utility(self.input_params, self.path_dict)

        df_test = utils_obj.load_data("test")

        test_datagen = ImageDataGenerator(
            preprocessing_function=utils_obj.init_preprocess_func())

        test_generator = test_datagen.flow_from_dataframe(
            dataframe=df_test,
            directory=self.path_dict['source'],
            target_size=utils_obj.init_sizes(),
            x_col="filenames",
            y_col="class_label",
            batch_size=1,
            class_mode='categorical',
            color_mode='rgb',
            shuffle=False)

        nb_test_samples = len(test_generator.classes)

        model = utils_obj.get_models(self.stage_no)
        class_indices = test_generator.class_indices

        def label_class(cat_name):
            return (class_indices[cat_name])

        df_test['true'] = df_test['class_label'].apply(
            lambda x: label_class(str(x)))
        y_true = df_test['true'].values

        #Predictions (Probability Scores and Class labels)
        y_pred_proba = model.predict_generator(test_generator,
                                               nb_test_samples // 1)
        y_pred = np.argmax(y_pred_proba, axis=1)

        df_test['predicted'] = y_pred
        df_test.to_csv(self.path_dict["eval_path"] +
                       "stage{}/".format(self.stage_no) +
                       '{}_predictions_stage_{}.csv'.format(
                           self.input_params['model_name'], self.stage_no))
        dictionary = dict(zip(df_test.true.values, df_test.class_label.values))

        #Confusion Matrixs
        cm = metrics.confusion_matrix(y_true, y_pred)
        df_cm = pd.DataFrame(cm).transpose()
        df_cm = df_cm.rename(mapper=dict,
                             index=dictionary,
                             columns=dictionary,
                             copy=True,
                             inplace=False)
        df_cm.to_csv(self.path_dict["eval_path"] +
                     "stage{}/".format(self.stage_no) +
                     '{}_cm_stage_{}.csv'.format(
                         self.input_params['model_name'], self.stage_no))
        print('Confusion matrix prepared and saved..')

        #Classification Report
        report = metrics.classification_report(y_true,
                                               y_pred,
                                               target_names=list(
                                                   class_indices.keys()),
                                               output_dict=True)

        df_rep = pd.DataFrame(report).transpose()
        df_rep.to_csv(self.path_dict["eval_path"] +
                      "stage{}/".format(self.stage_no) +
                      '{}_class_report_stage_{}.csv'.format(
                          self.input_params['model_name'], self.stage_no))
        print('Classification report prepared and saved..')

        EvalUtils.plot_confusion_matrix(
            self, y_true, y_pred, list(test_generator.class_indices.keys()))

        #General Metrics
        df_metrics = EvalUtils.get_metrics(self, y_true, y_pred)
        df_metrics.to_csv(self.path_dict["eval_path"] +
                          "stage{}/".format(self.stage_no) +
                          '{}_metrics_stage_{}.csv'.format(
                              self.input_params['model_name'], self.stage_no))

        history_df = pd.read_csv(
            self.path_dict["model_path"] + "stage{}/".format(self.stage_no) +
            "{}_history_stage_{}.csv".format(self.input_params['model_name'],
                                             self.stage_no))

        #Get the train vs validation loss for all epochs
        EvalUtils.plt_epoch_error(self, history_df)

        #Generate a complete report and save it as an HTML file in the evaluation folder location
        EvalUtils.get_complete_report(self, y_true, y_pred, class_indices)