def build_model( model_class, image_size, nb_channel, input_size, output_dim, weights=None, patience=5, ): input_layer, inputs = build_input(image_size, nb_channel, input_size) base_model = model_class( input_tensor=input_layer, input_shape=(input_size, input_size, nb_channel), weights=weights, include_top=False, ) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation="relu", kernel_initializer="he_normal")(x) x = Dropout(0.3)(x) x = Dense(512, activation="relu", kernel_initializer="he_normal")(x) x = Dropout(0.3)(x) x = Dense(128, activation="relu", kernel_initializer="he_normal")(x) x = Dropout(0.3)(x) predictions = Dense( output_dim, activation="softmax" if output_dim > 2 else "sigmoid")(x) model = Model(inputs=inputs, outputs=predictions) es = EarlyStopping( monitor="val_loss", verbose=1, mode="min", patience=patience, restore_best_weights=True, ) # compile the model (should be done *after* setting layers to non-trainable) # We need to recompile the model for these modifications to take effect es.set_model(model) loss = "binary_crossentropy" if output_dim == 2 else "categorical_crossentropy" # optimizer = Lookahead(RectifiedAdam(), sync_period=6, slow_step_size=0.5) model.compile(optimizer="rmsprop", loss=loss) return model, [es]
def fit(self, *, X, y, X_valid=None, y_valid=None, callbacks=[], **kwargs): """ Fit method, to train the model Parameters ---------- X : numpy matrice Training data y : numpy array target for training X_valid : numpy matrice, optional validation data, by default None y_valid : numpy array, optional validation data, by default None callbacks : list, optional Keras callbacks, by default [] Returns ------- history Keras fit history """ if self.use_snn: self.network = build_keras_snn( self.n_layer, self.in_dim, self.out_dim, self.mul_input, self.loss, self.activation, self.metrics, self.cat_emb_dims, self.dropout, self.max_emb, self.noise, ) else: self.network = build_keras_mlp( self.n_layer, self.in_dim, self.out_dim, self.mul_input, self.loss, self.activation, self.metrics, self.cat_emb_dims, self.dropout, self.normalize, self.max_emb, self.noise, ) prepared_inputs = self.prepare_input_(X) val_data = () if X_valid is not None and y_valid is not None: prepared_val_inputs = self.prepare_input_(X_valid) val_data = (prepared_val_inputs, y_valid) all_cbs = [*callbacks] if self.patience is not None: es = EarlyStopping( monitor="val_loss", verbose=1, mode="min", patience=self.patience, restore_best_weights=True, ) es.set_model(self.network) all_cbs.append(es) history = self.network.fit(prepared_inputs, y, validation_data=val_data, callbacks=all_cbs, **kwargs) gc.collect() return history
epoch_dict["precision"] = precision_dict epoch_dict["recall"] = recall_dict epoch_dict["f1score"] = f1score_dict epoch_dict["confusion_matrix"] = conf_mat logs["epoch_{}".format(epoch)] = epoch_dict return logs model.stop_training = False model.compile(loss="sparse_categorical_crossentropy", optimizer=tf.keras.optimizers.Adam(), metrics=["accuracy"]) earlystop = EarlyStopping(monitor="val_loss", patience=20, verbose=1) earlystop.set_model(model) earlystop.on_train_begin() modelcheckpoint = ModelCheckpoint(filepath="weights/", monitor="val_loss", verbose=1, save_best_only=True) modelcheckpoint.set_model(model) modelcheckpoint.on_train_begin() reduce_lr = ReduceLROnPlateau(monitor="val_loss", patience=10, verbose=1) reduce_lr.set_model(model) reduce_lr.on_train_begin() tensorboard = TensorBoard(log_dir="logs/") tensorboard.set_model(model)