def train_model(subdataset, species): dataset_dir = os.path.join(dasaset_base, subdataset, species) dataset = CustomDataset( dataset_dir, 'training', img_generator=img_data_gen, mask_generator=mask_data_gen, preprocessing_function=preprocess_input ) dataset_valid = CustomDataset( dataset_dir, 'validation', preprocessing_function=preprocess_input ) train_dataset = tf.data.Dataset.from_generator( lambda: dataset, output_types=(tf.float32, tf.float32), output_shapes=([img_h, img_w, 3], [img_h, img_w, 1]) ).batch(bs).repeat() valid_dataset = tf.data.Dataset.from_generator( lambda: dataset_valid, output_types=(tf.float32, tf.float32), output_shapes=([img_h, img_w, 3], [img_h, img_w, 1]) ).batch(bs).repeat() num_classes = 3 model = create_model(img_h, img_w, num_classes=num_classes) model.summary() # Loss # Sparse Categorical Crossentropy to use integers (mask) instead of one-hot encoded labels loss = tf.keras.losses.SparseCategoricalCrossentropy() # learning rate optimizer = tf.keras.optimizers.Adam(learning_rate=lr) # Validation metrics metrics = ['accuracy', gen_meanIoU(num_classes)] # Compile Model model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # ---- Callbacks ---- exps_dir = "experiments" if not os.path.exists(exps_dir): os.makedirs(exps_dir) model_dir = os.path.join(exps_dir, MODEL_NAME) if not os.path.exists(model_dir): os.makedirs(model_dir) exp_dir = os.path.join(model_dir, subdataset, species, str(now)) if not os.path.exists(exp_dir): os.makedirs(exp_dir) callbacks_list = [] # Model checkpoint if CHECKPOINTS: callbacks_list.append(callbacks.checkpoints(exp_dir)) # Early stopping if EARLY_STOP: callbacks_list.append(callbacks.early_stopping(patience=10)) # Save best model # ---------------- best_checkpoint_path = None if SAVE_BEST: best_checkpoint_path, save_best_callback = callbacks.save_best(exp_dir) callbacks_list.append(save_best_callback) model.fit( x=train_dataset, epochs=epochs, steps_per_epoch=len(dataset), validation_data=valid_dataset, validation_steps=len(dataset_valid), callbacks=callbacks_list ) if PLOT: if best_checkpoint_path: model.load_weights(best_checkpoint_path) # ---- Prediction ---- plot_predictions(model, valid_dataset, num_classes)
if not os.path.exists(exp_dir): os.makedirs(exp_dir) callbacks_list = [] # Model checkpoint if CHECKPOINTS: callbacks_list.append(callbacks.checkpoints(exp_dir)) # Early stopping if EARLY_STOP: callbacks_list.append(callbacks.early_stopping(patience=7)) # Tensorboard if TENSORBOARD: callbacks_list.append(callbacks.tensorboard(exp_dir)) # Save best model # ---------------- if SAVE_BEST: callbacks_list.append(callbacks.save_best(exp_dir)) model.fit( x=train_dataset, epochs=1000, steps_per_epoch=len(train_flow), validation_data=validation_dataset, validation_steps=len(validation_flow), callbacks=callbacks_list, )
os.makedirs(exp_dir) current_k_idx_dir = os.path.join(exp_dir, f"k_{i}") if not os.path.exists(current_k_idx_dir): os.makedirs(current_k_idx_dir) callbacks_list = [] # Early stopping if EARLY_STOP: callbacks_list.append(callbacks.early_stopping(patience=10)) # Save best model # ---------------- best_checkpoint_path = None if SAVE_BEST: best_checkpoint_path, save_best_callback = callbacks.save_best(current_k_idx_dir) callbacks_list.append(save_best_callback) model.fit( x=train_dataset, epochs=epochs, steps_per_epoch=len(dataset) // bs, validation_data=valid_dataset, validation_steps=len(dataset_valid) // bs, callbacks=callbacks_list ) # Clear tensorflow session to release memory, otherwise it keeps rising after each fold K.clear_session()