def train(config, dataset_train, dataset_val, epochs, tune_epochs, init_with='coco'): # Create model in training mode model = mrcnn.model.MaskRCNN(mode="training", config=config, model_dir=model_dir) # Which weights to start with? imagenet, coco, or last if init_with == "imagenet": model.load_weights(model.get_imagenet_weights(), by_name=True) elif init_with == "coco": # Load weights trained on MS COCO, but skip layers that # are different due to the different number of classes # See README for instructions to download the COCO weights model.load_weights(coco_model_path, by_name=True, exclude=[ 'conv1', 'mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox', 'mrcnn_mask' ]) elif init_with == "last": # Load the last model you trained and continue training model.load_weights(model.find_last(), by_name=True) # Train the head branches # Passing layers="heads" freezes all layers except the head # layers. You can also pass a regular expression to select # which layers to train by name pattern. model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=epochs, layers=r'(conv1)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)') # Fine tune all layers # Passing layers="all" trains all layers. You can also # pass a regular expression to select which layers to # train by name pattern. model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE / 10, epochs=tune_epochs, layers='all') return model
def main(args): config = MRIConfig() model = mrcnn.model.MaskRCNN(mode='training', config=config, model_dir=args.model_dir) if args.weights_path == 'last': model.load_weights(model.find_last(), by_name=True) else: model.load_weights(args.weights_path, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask", "rpn_model" ]) data_train = MRIDataset() data_train.load_mri(args.train_data_list, args.classes) data_train.prepare() data_val = MRIDataset() data_val.load_mri(args.val_data_list, args.classes) data_val.prepare() augmentation = iaa.SomeOf((0, None), [ iaa.Fliplr(0.5), iaa.Multiply((0.8, 1.2)), iaa.ContrastNormalization((0.8, 1.2)), ]) if args.train_head: model.train(data_train, data_val, learning_rate=config.LEARNING_RATE, epochs=20, augmentation=augmentation, layers='heads') model.train(data_train, data_val, learning_rate=config.LEARNING_RATE, epochs=50, augmentation=augmentation, layers='all')
def train(model, config, dataset_train, dataset_val, skip_heads=False): # Image augmentation # http://imgaug.readthedocs.io/en/latest/source/augmenters.html augmentation = iaa.SomeOf((0, 2), [ iaa.Fliplr(0.5), iaa.Flipud(0.5), iaa.OneOf([ iaa.Affine(rotate=90), iaa.Affine(rotate=180), iaa.Affine(rotate=270) ]), iaa.Multiply((0.8, 1.5)), iaa.GaussianBlur(sigma=(0.0, 5.0)) ]) # *** This training schedule is an example. Update to your needs *** # If starting from imagenet, train heads only for a bit # since they have random weights if not skip_heads: print('Train network heads') model.train( dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, #epochs=20, epochs=40, #augmentation=augmentation, layers='heads') print('Train all layers') model.train( dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, #epochs=40, epochs=80, #augmentation=augmentation, layers='all')
trainImagesPath = trainImagesPaths[index] trainAnnotationsPath = trainAnnotationsPaths[index] assert os.path.exists(trainImagesPath) assert os.path.exists(trainAnnotationsPath) dataset_train.load_coco(trainImagesPath, trainAnnotationsPath) dataset_train.prepare() dataset_val = dataset.Dataset() for index in range(len(valImagesPaths)): valImagesPath = valImagesPaths[index] valAnnotationsPath = valAnnotationsPaths[index] assert os.path.exists(valImagesPath) assert os.path.exists(valAnnotationsPath) dataset_val.load_coco(valImagesPath, valAnnotationsPath) dataset_val.prepare() for run in range(runs): print("Performing training run {} of {}.".format(run, runs)) currentLayers = layers[run] currentLearningRate = learningRates[run] currentEpochs = epochs[run] print("Training layers {} with learning rate {} for {} epochs".format( currentLayers, currentLearningRate, currentEpochs)) model.train(dataset_train, dataset_val, learning_rate=currentLearningRate, epochs=currentEpochs, layers=currentLayers)
train_set.load_dataset(dataset_dir='E:\\Mask_RCNN-tf2\\kangaroo', is_train=True) train_set.prepare() # prepare test/val set valid_dataset = KangarooDataset() valid_dataset.load_dataset(dataset_dir='E:\\Mask_RCNN-tf2\\kangaroo', is_train=False) valid_dataset.prepare() # prepare config kangaroo_config = KangarooConfig() # define the model model = mrcnn.model.MaskRCNN(mode='training', model_dir='./', config=kangaroo_config) model.load_weights(filepath='mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) model.train(train_dataset=train_set, val_dataset=valid_dataset, learning_rate=kangaroo_config.LEARNING_RATE, epochs=20, layers='heads') model_path = 'Kangaro_mask_rcnn_trained.h5' model.keras_model.save_weights(model_path)
# Validation validation_dataset = BalloonDataset() validation_dataset.load_balloon( dataset_dir= 'C:/Users/Choi Jun Ho/maskrcnn/Mask-RCNN-TF2/lane_detection/lane', subset='val') validation_dataset.prepare() # Model Configuration lane_config = LaneConfig() # Build the Mask R-CNN Model Architecture model = mrcnn.model.MaskRCNN(mode='training', model_dir='./', config=lane_config) model.load_weights( filepath='C:/Users/Choi Jun Ho/maskrcnn/Mask-RCNN-TF2/mask_rcnn_coco.h5', by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask" ]) model.train(train_dataset=train_dataset, val_dataset=validation_dataset, learning_rate=lane_config.LEARNING_RATE, epochs=1, layers='heads') model_path = 'C:/Users/Choi Jun Ho/maskrcnn/Mask-RCNN-TF2/lane_detection/lane_mask_rcnn_trained1.h5' model.keras_model.save_weights(model_path)
# # Training - Stage 2 # # Finetune layers from ResNet stage 4 and up # print("Fine tune Resnet stage 4 and up") # model.train(dataset_train, dataset_val, # learning_rate=config.LEARNING_RATE, # epochs=120, # layers='4+', # augmentation=augmentation) # # Training - Stage 3 # Fine tune all layers print("Fine tune all layers") model.train( dataset_train, dataset_val, learning_rate=config.LEARNING_RATE / 10, # learning_rate=config.LEARNING_RATE / 20, epochs=160, layers='all', augmentation=augmentation) elif args.command == "evaluate": # Validation dataset dataset_val = CocoDataset() val_type = "val" if args.year in '2017' else "minival" coco = dataset_val.load_coco(args.dataset, val_type, year=args.year, return_coco=True, auto_download=args.download) dataset_val.prepare() print("Running COCO evaluation on {} images.".format(args.limit))