def prep_coco_dataset(type,
                      config,
                      load_class_ids=None,
                      class_ids=None,
                      generator=False,
                      shuffle=True,
                      augment=False,
                      return_coco=False):
    # dataset_train, train_generator = coco_dataset(["train",  "val35k"], mrcnn_config)

    # if args.command == "train":
    # Training dataset. Use the training set and 35K from the validation set, as as in the Mask RCNN paper.
    dataset = CocoDataset()

    # dataset_test.load_coco(COCO_DATASET_PATH,  "train", class_ids=mrcnn_config.COCO_CLASSES)
    for i in type:
        dataset.load_coco(config.COCO_DATASET_PATH,
                          i,
                          class_ids=class_ids,
                          load_class_ids=load_class_ids,
                          return_coco=return_coco)

    # all datasets loaded, now prep the final dataset
    dataset.prepare()

    results = dataset

    if generator:
        generator = data_generator(dataset,
                                   config,
                                   batch_size=config.BATCH_SIZE,
                                   shuffle=shuffle,
                                   augment=augment)
        results = [dataset, generator]
    return results
Beispiel #2
0
def prep_oldshapes_train(init_with = None, FCN_layers = False, batch_sz = 5, epoch_steps = 4, training_folder= "mrcnn_oldshape_training_logs"):
    import mrcnn.shapes    as shapes
    MODEL_DIR = os.path.join(TRAINING_DIR, training_folder)

    # Build configuration object -----------------------------------------------
    config = shapes.ShapesConfig()
    config.BATCH_SIZE      = batch_sz                  # Batch size is 2 (# GPUs * images/GPU).
    config.IMAGES_PER_GPU  = batch_sz                  # Must match BATCH_SIZE
    config.STEPS_PER_EPOCH = epoch_steps
    config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]

    # Build shape dataset        -----------------------------------------------
    dataset_train = shapes.ShapesDataset(config)
    dataset_train.load_shapes(3000) 
    dataset_train.prepare()

    # Validation dataset
    dataset_val  = shapes.ShapesDataset(config)
    dataset_val.load_shapes(500)
    dataset_val.prepare()
    
    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR, FCN_layers = FCN_layers)

    print(' COCO Model Path       : ', COCO_TRAINING_DIR)
    print(' Checkpoint folder Path: ', MODEL_DIR)
    print(' Model Parent Path     : ', TRAINING_DIR)
    print(' Resent Model Path     : ', RESNET_TRAINING_DIR)

    model.load_model_weights(init_with = init_with)

    train_generator = data_generator(dataset_train, model.config, shuffle=True,
                                     batch_size=model.config.BATCH_SIZE,
                                     augment = False)
    val_generator = data_generator(dataset_val, model.config, shuffle=True, 
                                    batch_size=model.config.BATCH_SIZE,
                                    augment=False)                                 
    model.config.display()     
    return [model, dataset_train, dataset_val, train_generator, val_generator, config]                                 
Beispiel #3
0
def prep_newshapes_train(init_with = "last", FCN_layers= False, batch_sz =5, epoch_steps = 4, folder_name= "mrcnn_newshape_training_logs"):
    import mrcnn.new_shapes as new_shapes
    MODEL_DIR = os.path.join(MODEL_PATH, folder_name)

    # Build configuration object -----------------------------------------------
    config = new_shapes.NewShapesConfig()
    config.BATCH_SIZE      = batch_sz                  # Batch size is 2 (# GPUs * images/GPU).
    config.IMAGES_PER_GPU  = batch_sz                  # Must match BATCH_SIZE
    config.STEPS_PER_EPOCH = epoch_steps
    config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]

    # Build shape dataset        -----------------------------------------------
    # Training dataset
    dataset_train = new_shapes.NewShapesDataset()
    dataset_train.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
    dataset_train.prepare()

    # Validation dataset
    dataset_val = new_shapes.NewShapesDataset()
    dataset_val.load_shapes(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
    dataset_val.prepare()

    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)

    print('MODEL_PATH        : ', MODEL_PATH)
    print('COCO_MODEL_PATH   : ', COCO_MODEL_PATH)
    print('RESNET_MODEL_PATH : ', RESNET_MODEL_PATH)
    print('MODEL_DIR         : ', MODEL_DIR)
    print('Last Saved Model  : ', model.find_last())

    load_model(model, init_with = 'last')

    train_generator = data_generator(dataset_train, model.config, shuffle=True,
                                 batch_size=model.config.BATCH_SIZE,
                                 augment = False)    
    config.display()     
    return [model, dataset_train, train_generator, config]
Beispiel #4
0
def prep_newshapes_test(init_with = 'last', FCN_layers = False, batch_sz = 5, epoch_steps = 4,folder_name= "mrcnn_newshape_test_logs"):
    import mrcnn.new_shapes as new_shapes
    MODEL_DIR = os.path.join(MODEL_PATH, folder_name)

    # Build configuration object -----------------------------------------------
    config = new_shapes.NewShapesConfig()
    config.BATCH_SIZE      = batch_sz                  # Batch size is 2 (# GPUs * images/GPU).
    config.IMAGES_PER_GPU  = batch_sz                  # Must match BATCH_SIZE
    config.STEPS_PER_EPOCH = epoch_steps
    config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
 
    # Build shape dataset        -----------------------------------------------
    # Training dataset
    dataset_test = new_shapes.NewShapesDataset()
    dataset_test.load_shapes(3000, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
    dataset_test.prepare()


    # Recreate the model in inference mode
    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    model = modellib.MaskRCNN(mode="inference", 
                              config=config,
                              model_dir=MODEL_DIR, 
                              FCN_layers = FCN_layers )
        
    print(' COCO Model Path       : ', COCO_MODEL_PATH)
    print(' Checkpoint folder Path: ', MODEL_DIR)
    print(' Model Parent Path     : ', MODEL_PATH)
    print(' Resent Model Path     : ', RESNET_MODEL_PATH)

    load_model(model, init_with = init_with)

    test_generator = data_generator(dataset_test, model.config, shuffle=True,
                                     batch_size=model.config.BATCH_SIZE,
                                     augment = False)
    model.config.display()     
    return [model, dataset_test, test_generator, config]                                 
Beispiel #5
0
def prep_newshapes2_dataset(config,
                            image_count,
                            shuffle=True,
                            augment=False,
                            generator=False):
    '''
    '''
    dataset = NewImagesDataset(config)
    dataset.load_images(image_count)
    dataset.prepare()

    results = dataset

    if generator:
        generator = data_generator(dataset,
                                   config,
                                   batch_size=config.BATCH_SIZE,
                                   shuffle=True,
                                   augment=False)
        return [dataset, generator]
    else:
        return results
Beispiel #6
0
def mrcnn_coco_train(init_weights = 'last', mode = 'train', FCN_layers = False, 
                     batch_sz = 5, epoch_steps = 4, training_folder = "mrcnn_coco_dev"):

    TRAINING_PATH = os.path.join(TRAINING_DIR, training_folder)

    ##------------------------------------------------------------------------------------
    ## Build configuration object 
    ##------------------------------------------------------------------------------------

    mrcnn_config = CocoInferenceConfig()
    mrcnn_config.NAME               = 'mrcnn'              
    mrcnn_config.TRAINING_PATH      = TRAINING_PATH
    mrcnn_config.COCO_MODEL_PATH    = COCO_MODEL_PATH   
    mrcnn_config.RESNET_MODEL_PATH  = RESNET_MODEL_PATH 
    mrcnn_config.VGG16_MODEL_PATH   = VGG16_MODEL_PATH  
    mrcnn_config.COCO_CLASSES       = None 
    # mrcnn_config.COCO_CLASSES       = [1,2,3,4,5,6,7,8,9,10]
    # mrcnn_config.NUM_CLASSES        = len(mrcnn_config.COCO_CLASSES) + 1
    
    # config.BATCH_SIZE             = batch_sz                  # Batch size is 2 (# GPUs * images/GPU).
    # config.IMAGES_PER_GPU         = batch_sz                  # Must match BATCH_SIZE
    # config.STEPS_PER_EPOCH        = epoch_steps
    # config.FCN_INPUT_SHAPE        = config.IMAGE_SHAPE[0:2]
    # config.DETECTION_MIN_CONFIDENCE = 0.1
    
    ##------------------------------------------------------------------------------------
    ## Build shape dataset for Training and Validation       
    ##------------------------------------------------------------------------------------
    # if args.command == "train":
    # Training dataset. Use the training set and 35K from the
    # validation set, as as in the Mask RCNN paper.
    dataset_train = CocoDataset()
    dataset_train.load_coco(COCO_DATASET_PATH,  "train", class_ids=mrcnn_config.COCO_CLASSES)
    dataset_train.load_coco(COCO_DATASET_PATH, "val35k", class_ids=mrcnn_config.COCO_CLASSES) 
    dataset_train.prepare()

    # Validation dataset
    dataset_val = CocoDataset()
    dataset_val.load_coco(COCO_DATASET_PATH, "minival", class_ids=mrcnn_config.COCO_CLASSES)
    dataset_val.prepare()

    # Recreate the model in inference mode
    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    mrcnn_model = mrcnn_modellib.MaskRCNN(mode=mode, config=mrcnn_config, model_dir=TRAINING_PATH)

    ##------------------------------------------------------------------------------------
    ## Load Mask RCNN Model Weight file
    ##------------------------------------------------------------------------------------
    mrcnn_model.load_model_weights( init_with = init_weights)   

    print('==========================================')
    print(" MRCNN MODEL Load weight file COMPLETE    ")
    print('==========================================')
    # print('\n\n\n')
    # print(' Checkpoint directory  : ', TRAINING_DIR)
    # print(' Checkpoint folder     : ', TRAINING_PATH)
    # print(' COCO   Model Path     : ', COCO_MODEL_PATH)
    # print(' ResNet Model Path     : ', RESNET_MODEL_PATH)
    # print(' VGG16  Model Path     : ', COCO_MODEL_PATH)
    
    # mrcnn_config.display()  
    mrcnn_model.layer_info()
    print('\n Outputs: ') 
    # pp.pprint(mrcnn_model.keras_model.outputs)
    
    trainable = mrcnn_model.get_trainable_layers()
    for i in trainable:
        print(' Layer:', i.name)

    train_generator = data_generator(dataset_train, mrcnn_model.config, shuffle=True,
                                     batch_size = mrcnn_model.config.BATCH_SIZE,
                                     augment = False)   


    val_generator   = data_generator(dataset_val, mrcnn_model.config, shuffle=True, 
                                     batch_size = mrcnn_model.config.BATCH_SIZE,
                                     augment=False)                                           
    mrcnn_config.display()     
    
    return [mrcnn_model, dataset_train, dataset_val, train_generator, val_generator, mrcnn_config]
Beispiel #7
0
def prep_newshapes_train(init_with = "last", FCN_layers= False, batch_sz =5, epoch_steps = 4, training_folder= None):

    MODEL_DIR = os.path.join(TRAINING_DIR, training_folder)

    # Build configuration object -----------------------------------------------
    config = new_shapes.NewShapesConfig()
    config.BATCH_SIZE      = batch_sz                  # Batch size is 2 (# GPUs * images/GPU).
    config.IMAGES_PER_GPU  = batch_sz                  # Must match BATCH_SIZE
    config.STEPS_PER_EPOCH = epoch_steps
    config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]

    # Build shape dataset        -----------------------------------------------
    # Training dataset
    dataset_train = new_shapes.NewShapesDataset(config)
    dataset_train.load_shapes(10000) 
    dataset_train.prepare()

    # Validation dataset
    dataset_val = new_shapes.NewShapesDataset(config)
    dataset_val.load_shapes(2500)
    dataset_val.prepare()

    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR,FCN_layers = FCN_layers)

    print('TRAINING_DIR        : ', TRAINING_DIR)
    print('COCO_TRAINING_DIR   : ', COCO_TRAINING_DIR)
    print('RESNET_TRAINING_DIR : ', RESNET_TRAINING_DIR)
    print('MODEL_DIR         : ', MODEL_DIR)
    print('Last Saved Model  : ', model.find_last())
    # exclude_layers = \
           # ['fcn_block1_conv1' 
           # ,'fcn_block1_conv2' 
           # ,'fcn_block1_pool' 
           # ,'fcn_block2_conv1'
           # ,'fcn_block2_conv2' 
           # ,'fcn_block2_pool'  
           # ,'fcn_block3_conv1' 
           # ,'fcn_block3_conv2' 
           # ,'fcn_block3_conv3' 
           # ,'fcn_block3_pool'  
           # ,'fcn_block4_conv1' 
           # ,'fcn_block4_conv2' 
           # ,'fcn_block4_conv3' 
           # ,'fcn_block4_pool'  
           # ,'fcn_block5_conv1' 
           # ,'fcn_block5_conv2' 
           # ,'fcn_block5_conv3' 
           # ,'fcn_block5_pool'  
           # ,'fcn_fc1'          
           # ,'dropout_1'        
           # ,'fcn_fc2'          
           # ,'dropout_2'        
           # ,'fcn_classify'     
           # ,'fcn_bilinear'     
           # ,'fcn_heatmap_norm' 
           # ,'fcn_scoring'      
           # ,'fcn_heatmap'      
           # ,'fcn_norm_loss']
    # load_model(model, init_with = 'last', exclude = exclude_layers)
    model.load_model_weights(init_with = init_with)
    
    # print('=====================================')
    # print(" Load second weight file ?? ")
    # model.keras_model.load_weights('E:/Models/vgg16_weights_tf_dim_ordering_tf_kernels.h5', by_name= True)
    
    
    
    train_generator = data_generator(dataset_train, model.config, shuffle=True,
                                 batch_size=model.config.BATCH_SIZE,
                                 augment = False)   


    val_generator = data_generator(dataset_val, model.config, shuffle=True, 
                                    batch_size=model.config.BATCH_SIZE,
                                    augment=False)                                           
    config.display()     
    return [model, dataset_train, dataset_val, train_generator, val_generator, config]
Beispiel #8
0
def prep_newshapes_train2(init_with = "last",  config=None):

    import mrcnn.new_shapes as new_shapes
    config.CHECKPOINT_FOLDER = os.path.join(TRAINING_DIR, config.CHECKPOINT_FOLDER)

    # Build shape dataset        -----------------------------------------------
    # Training dataset
    dataset_train = new_shapes.NewShapesDataset(config)
    dataset_train.load_shapes(config.TRAINING_IMAGES) 
    dataset_train.prepare()

    # Validation dataset
    dataset_val = new_shapes.NewShapesDataset(config)
    dataset_val.load_shapes(config.VALIDATION_IMAGES)
    dataset_val.prepare()

    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    model = modellib.MaskRCNN(mode="training", config=config, model_dir=config.CHECKPOINT_FOLDER, FCN_layers = config.FCN_LAYERS)

    print('TRAINING_DIR        : ', TRAINING_DIR)
    print('COCO_TRAINING_DIR   : ', COCO_TRAINING_DIR)
    print('RESNET_TRAINING_DIR : ', RESNET_TRAINING_DIR)
    print('CHECKPOINT_DIR    : ', config.CHECKPOINT_FOLDER)
    print('Last Saved Model  : ', model.find_last())
    # exclude_layers = \
           # ['fcn_block1_conv1' 
           # ,'fcn_block1_conv2' 
           # ,'fcn_block1_pool' 
           # ,'fcn_block2_conv1'
           # ,'fcn_block2_conv2' 
           # ,'fcn_block2_pool'  
           # ,'fcn_block3_conv1' 
           # ,'fcn_block3_conv2' 
           # ,'fcn_block3_conv3' 
           # ,'fcn_block3_pool'  
           # ,'fcn_block4_conv1' 
           # ,'fcn_block4_conv2' 
           # ,'fcn_block4_conv3' 
           # ,'fcn_block4_pool'  
           # ,'fcn_block5_conv1' 
           # ,'fcn_block5_conv2' 
           # ,'fcn_block5_conv3' 
           # ,'fcn_block5_pool'  
           # ,'fcn_fc1'          
           # ,'dropout_1'        
           # ,'fcn_fc2'          
           # ,'dropout_2'        
           # ,'fcn_classify'     
           # ,'fcn_bilinear'     
           # ,'fcn_heatmap_norm' 
           # ,'fcn_scoring'      
           # ,'fcn_heatmap'      
           # ,'fcn_norm_loss']
    # load_model(model, init_with = 'last', exclude = exclude_layers)
    model.load_model_weights(init_with = init_with)
    
    # print('=====================================')
    # print(" Load second weight file ?? ")
    # model.keras_model.load_weights('E:/Models/vgg16_weights_tf_dim_ordering_tf_kernels.h5', by_name= True)
    
    
    
    train_generator = data_generator(dataset_train, model.config, shuffle=True,
                                 batch_size=model.config.BATCH_SIZE,
                                 augment = False)   


    val_generator = data_generator(dataset_val, model.config, shuffle=True, 
                                    batch_size=model.config.BATCH_SIZE,
                                    augment=False)                                           
    config.display()     
    return [model, dataset_train, dataset_val, train_generator, val_generator, config]
Beispiel #9
0
def prep_newshapes_test(init_with = 'last', FCN_layers = False, batch_sz = 5, epoch_steps = 4,training_folder= "mrcnn_newshape_test_logs"):

    MODEL_DIR = os.path.join(TRAINING_DIR, training_folder)

    # Build configuration object -----------------------------------------------
    config = new_shapes.NewShapesConfig()
    config.BATCH_SIZE      = batch_sz                  # Batch size is 2 (# GPUs * images/GPU).
    config.IMAGES_PER_GPU  = batch_sz                  # Must match BATCH_SIZE
    config.STEPS_PER_EPOCH = epoch_steps
    config.FCN_INPUT_SHAPE = config.IMAGE_SHAPE[0:2]
    config.DETECTION_MIN_CONFIDENCE = 0.1
    # Build shape dataset        -----------------------------------------------
    # Training dataset
    dataset_test = new_shapes.NewShapesDataset(config)
    dataset_test.load_shapes(3000)
    dataset_test.prepare()


    # Recreate the model in inference mode
    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    model = modellib.MaskRCNN(mode="inference", 
                              config=config,
                              model_dir=MODEL_DIR, 
                              FCN_layers = FCN_layers )
        
    print(' COCO Model Path       : ', COCO_TRAINING_DIR)
    print(' Checkpoint folder Path: ', MODEL_DIR)
    print(' Model Parent Path     : ', TRAINING_DIR)
    print(' Resent Model Path     : ', RESNET_TRAINING_DIR)
    # exclude_layers = \
           # ['fcn_block1_conv1' 
           # ,'fcn_block1_conv2' 
           # ,'fcn_block1_pool' 
           # ,'fcn_block2_conv1'
           # ,'fcn_block2_conv2' 
           # ,'fcn_block2_pool'  
           # ,'fcn_block3_conv1' 
           # ,'fcn_block3_conv2' 
           # ,'fcn_block3_conv3' 
           # ,'fcn_block3_pool'  
           # ,'fcn_block4_conv1' 
           # ,'fcn_block4_conv2' 
           # ,'fcn_block4_conv3' 
           # ,'fcn_block4_pool'  
           # ,'fcn_block5_conv1' 
           # ,'fcn_block5_conv2' 
           # ,'fcn_block5_conv3' 
           # ,'fcn_block5_pool'  
           # ,'fcn_fc1'          
           # ,'dropout_1'        
           # ,'fcn_fc2'          
           # ,'dropout_2'        
           # ,'fcn_classify'     
           # ,'fcn_bilinear'     
           # ,'fcn_heatmap_norm' 
           # ,'fcn_scoring'      
           # ,'fcn_heatmap'      
           # ,'fcn_norm_loss']
    
    # load_model(model, init_with = init_with, exclude = exclude_layers)
    model.load_model_weights(init_with = init_with) 

    # print('=====================================')
    # print(" Load second weight file ?? ")
    # model.keras_model.load_weights('E:/Models/vgg16_weights_tf_dim_ordering_tf_kernels.h5', by_name= True )
    
    test_generator = data_generator(dataset_test, model.config, shuffle=True,
                                     batch_size=model.config.BATCH_SIZE,
                                     augment = False)
    model.config.display()     
    return [model, dataset_test, test_generator, config]                                 
Beispiel #10
0
def mrcnn_coco_test(init_weights = 'last', mode = 'inference' , batch_sz = 5, epoch_steps = 4, training_folder = "mrcnn_coco_dev"):

    TRAINING_PATH = os.path.join(TRAINING_DIR, training_folder)

    mrcnn_config = CocoInferenceConfig()
    mrcnn_config.NAME               = 'mrcnn'              
    mrcnn_config.TRAINING_PATH      = TRAINING_PATH
    mrcnn_config.COCO_MODEL_PATH    = COCO_MODEL_PATH   
    mrcnn_config.RESNET_MODEL_PATH  = RESNET_MODEL_PATH 
    mrcnn_config.VGG16_MODEL_PATH   = VGG16_MODEL_PATH  
    mrcnn_config.COCO_CLASSES       = None 
    # mrcnn_config.NUM_CLASSES      = len(mrcnn_config.COCO_CLASSES) + 1
    # mrcnn_config.NUM_CLASSES      = len(mrcnn_config.COCO_CLASSES) + 1

    ##------------------------------------------------------------------------------------
    ## Build shape dataset for Training and Validation       
    ##------------------------------------------------------------------------------------
    # if args.command == "train":
    # Training dataset. Use the training set and 35K from the validation set, as as in the Mask RCNN paper.
    dataset_test = CocoDataset()
    # dataset_test.load_coco(COCO_DATASET_PATH,  "train", class_ids=mrcnn_config.COCO_CLASSES)
    dataset_test.load_coco(COCO_DATASET_PATH, "val", class_ids=mrcnn_config.COCO_CLASSES) 
    dataset_test.prepare()

    # Validation dataset
    # dataset_val = CocoDataset()
    # dataset_val.load_coco(COCO_DATASET_PATH, "minival", class_ids=mrcnn_config.COCO_CLASSES)
    # dataset_val.prepare()

    # Recreate the model in inference mode
    try :
        del model
        print('delete model is successful')
        gc.collect()
    except: 
        pass
    KB.clear_session()
    mrcnn_model = mrcnn_modellib.MaskRCNN(mode=mode, config=mrcnn_config, model_dir=TRAINING_PATH)

    ##------------------------------------------------------------------------------------
    ## Load Mask RCNN Model Weight file
    ##------------------------------------------------------------------------------------
    # mrcnn_model.load_model_weights( init_with = init_weights)   

    print('==========================================')
    print(" MRCNN MODEL Load weight file COMPLETE    ")
    print('==========================================')

    
    # mrcnn_config.display()  
    mrcnn_model.layer_info()
    print('\n Outputs: ') 
    # pp.pprint(mrcnn_model.keras_model.outputs)
    
    for i in  mrcnn_model.get_trainable_layers():
        print(' Layer:', i.name)

    test_generator = data_generator(dataset_test, mrcnn_model.config, shuffle=True,
                                     batch_size = mrcnn_model.config.BATCH_SIZE,
                                     augment = False)   

    mrcnn_config.display()     
    
    return [mrcnn_model, dataset_test, test_generator, mrcnn_config]
def build_heatmap_files(mrcnn_model,
                        dataset,
                        iterations=5,
                        start_from=0,
                        dest_path=None):
    '''
    train_dataset:  Training Dataset objects.

    '''
    assert mrcnn_model.mode == "trainfcn", "Create model in training mode."
    log("Starting for  {} iterations - batch size of each iteration: {}".
        format(iterations, batch_size))
    log(" Output destination: {}".format(dest_path))
    tr_generator = data_generator(dataset,
                                  mrcnn_model.config,
                                  shuffle=False,
                                  augment=False,
                                  batch_size=mrcnn_model.config.BATCH_SIZE,
                                  image_index=start_from)

    ## Start main loop
    epoch_idx = 0
    for epoch_idx in range(iterations):
        tm_start = time.time()

        train_batch_x, train_batch_y = next(tr_generator)
        print(
            ' ==> mrcnn_model: step {} of {} iterations, image_id: {} '.format(
                epoch_idx, iterations, train_batch_x[1][:, 0]))

        # print('   length of train_batch_x:', len(train_batch_x), ' number of things in batch x :', train_batch_x[1].shape)
        # for i in train_batch_x:
        # print('       ', i.shape)
        # print('length of train_batch_y:', len(train_batch_y))

        # results = get_layer_output_1(mrcnn_model.keras_model, train_batch_x, [0,1,2,3], 1)

        results = mrcnn_model.keras_model.predict(train_batch_x)

        # pr_hm_norm, gt_hm_norm, pr_hm_scores, gt_hm_scores = results[:4]

        for i in range(batch_size):
            # print('  pr_hm_norm shape   :', results[0][i].shape)
            # print('  pr_hm_scores shape :', results[1][i].shape)
            # print('  gt_hm_norm shape   :', results[2][i].shape)
            # print('  gt_hm_scores shape :', results[3][i].shape)
            image_id = train_batch_x[1][i, 0]

            coco_image_id = dataset.image_info[image_id]['id']
            coco_filename = os.path.basename(
                dataset.image_info[image_id]['path'])

            ## If we want to save the files with a sequence # 0,1,2,.... which is the index of dataset.image_info[index] use this:
            # filename = 'hm_{:012d}.npz'.format(image_id)

            ## If we want to use the coco_id as the file name, use the following:
            filename = 'hm_{:012d}.npz'.format(coco_image_id)

            print(
                '  output: {}  image_id: {}  coco_image_id: {} coco_filename: {} output file: {}'
                .format(i, image_id, coco_image_id, coco_filename, filename))
            #             print('  output file: ',os.path.join(dest_path, filename))
            np.savez_compressed(os.path.join(dest_path, filename),
                                input_image_meta=train_batch_x[1][i],
                                pr_hm_norm=results[0][i],
                                pr_hm_scores=results[1][i],
                                gt_hm_norm=results[2][i],
                                gt_hm_scores=results[3][i],
                                coco_info=np.array(
                                    [coco_image_id, coco_filename]))
        tm_stop = time.time()
        print(' ==> Elapsed time {:.4f}s #        of items in results: {} '.
              format(tm_stop - tm_start, len(train_batch_x)))

    print('Final : mrcnn_model epoch_idx{}   iterations {}'.format(
        epoch_idx, iterations))
    return
Beispiel #12
0
    def train_in_batches(self,
                         train_dataset,
                         val_dataset,
                         learning_rate,
                         layers,
                         losses=None,
                         epochs_to_run=1,
                         batch_size=0,
                         steps_per_epoch=0):
        '''
        Train the model.
        train_dataset, 
        val_dataset:    Training and validation Dataset objects.
        
        learning_rate:  The learning rate to train with
        
        epochs:         Number of training epochs. Note that previous training epochs
                        are considered to be done already, so this actually determines
                        the epochs to train in total rather than in this particaular
                        call.
                        
        layers:         Allows selecting wich layers to train. It can be:
                        - A regular expression to match layer names to train
                        - One of these predefined values:
                        heads: The RPN, classifier and mask heads of the network
                        all: All the layers
                        3+: Train Resnet stage 3 and up
                        4+: Train Resnet stage 4 and up
                        5+: Train Resnet stage 5 and up
        '''
        assert self.mode == "training", "Create model in training mode."

        # Use Pre-defined layer regular expressions
        # if layers in self.layer_regex.keys():
        # layers = self.layer_regex[layers]
        print(layers)
        train_regex_list = [self.layer_regex[x] for x in layers]
        print(train_regex_list)
        layers = '|'.join(train_regex_list)
        print('layers regex :', layers)

        if batch_size == 0:
            batch_size = self.config.BATCH_SIZE

        if steps_per_epoch == 0:
            steps_per_epoch = self.config.STEPS_PER_EPOCH

        # Data generators
        train_generator = data_generator(train_dataset,
                                         self.config,
                                         shuffle=True,
                                         batch_size=batch_size)
        val_generator = data_generator(val_dataset,
                                       self.config,
                                       shuffle=True,
                                       batch_size=batch_size,
                                       augment=False)

        log("    Last epoch completed : {} ".format(self.epoch))
        log("    Starting from epoch  : {} for {} epochs".format(
            self.epoch, epochs_to_run))
        log("    Learning Rate        : {} ".format(learning_rate))
        log("    Steps per epoch      : {} ".format(steps_per_epoch))
        log("    Batchsize            : {} ".format(batch_size))
        log("    Checkpoint Folder    : {} ".format(self.checkpoint_path))
        epochs = self.epoch + epochs_to_run

        from tensorflow.python.platform import gfile
        if not gfile.IsDirectory(self.log_dir):
            log('Creating checkpoint folder')
            gfile.MakeDirs(self.log_dir)
        else:
            log('Checkpoint folder already exists')

        self.set_trainable(layers)
        self.compile(learning_rate, self.config.LEARNING_MOMENTUM, losses)

        # copied from \keras\engine\training.py
        # def _get_deduped_metrics_names(self):
        ## get metrics from keras_model.metrics_names
        out_labels = self.get_deduped_metrics_names()
        print(' ====> out_labels : ', out_labels)

        ## setup Progress Bar callback
        callback_metrics = out_labels + ['val_' + n for n in out_labels]
        print(' Callback metrics monitored by progbar')
        pp.pprint(callback_metrics)

        progbar = keras.callbacks.ProgbarLogger(count_mode='steps')
        progbar.set_model(self.keras_model)
        progbar.set_params({
            'epochs': epochs,
            'steps': steps_per_epoch,
            'verbose': 1,
            'do_validation': False,
            'metrics': callback_metrics,
        })

        progbar.set_model(self.keras_model)

        ## setup Checkpoint callback
        chkpoint = keras.callbacks.ModelCheckpoint(self.checkpoint_path,
                                                   monitor='val_loss',
                                                   verbose=1,
                                                   save_best_only=True,
                                                   save_weights_only=True)
        chkpoint.set_model(self.keras_model)

        progbar.on_train_begin()
        epoch_idx = self.epoch

        if epoch_idx >= epochs:
            print(
                'Final epoch {} has already completed - Training will not proceed'
                .format(epochs))
        else:
            while epoch_idx < epochs:
                progbar.on_epoch_begin(epoch_idx)

                for steps_index in range(steps_per_epoch):
                    batch_logs = {}
                    # print(' self.epoch {}   epochs {}  step {} '.format(self.epoch, epochs, steps_index))
                    batch_logs['batch'] = steps_index
                    batch_logs['size'] = batch_size
                    progbar.on_batch_begin(steps_index, batch_logs)

                    train_batch_x, train_batch_y = next(train_generator)

                    outs = self.keras_model.train_on_batch(
                        train_batch_x, train_batch_y)

                    if not isinstance(outs, list):
                        outs = [outs]
                    for l, o in zip(out_labels, outs):
                        batch_logs[l] = o

                    progbar.on_batch_end(steps_index, batch_logs)

                    # print(outs)
                progbar.on_epoch_end(epoch_idx, {})
                # if (epoch_idx % 10) == 0:
                chkpoint.on_epoch_end(epoch_idx, batch_logs)
                epoch_idx += 1

            # if epoch_idx != self.epoch:
            # chkpoint.on_epoch_end(epoch_idx -1, batch_logs)
            self.epoch = max(epoch_idx - 1, epochs)

            print('Final : self.epoch {}   epochs {}'.format(
                self.epoch, epochs))
Beispiel #13
0
    def train(self,
              train_dataset,
              val_dataset,
              learning_rate,
              layers=None,
              losses=None,
              epochs=0,
              epochs_to_run=0,
              batch_size=0,
              steps_per_epoch=0):
        '''
        Train the model.
        train_dataset, 
        val_dataset:    Training and validation Dataset objects.
        
        learning_rate:  The learning rate to train with
        
        layers:         Allows selecting wich layers to train. It can be:
                        - A regular expression to match layer names to train
                        - One of these predefined values:
                        heads: The RPN, classifier and mask heads of the network
                        all: All the layers
                        3+: Train Resnet stage 3 and up
                        4+: Train Resnet stage 4 and up
                        5+: Train Resnet stage 5 and up
        
        losses:         List of losses to monitor.
        
        epochs:         Number of training epochs. Note that previous training epochs
                        are considered to be done already, so this actually determines
                        the epochs to train in total rather than in this particaular
                        call.
        
        epochs_to_run:  Number of epochs to run, will update the 'epochs parm.                        
                        
        '''
        assert self.mode == "training", "Create model in training mode."

        if batch_size == 0:
            batch_size = self.config.BATCH_SIZE
        if epochs_to_run > 0:
            epochs = self.epoch + epochs_to_run
        if steps_per_epoch == 0:
            steps_per_epoch = self.config.STEPS_PER_EPOCH

        # use Pre-defined layer regular expressions
        # if layers in self.layer_regex.keys():
        # layers = self.layer_regex[layers]
        print(layers)
        # train_regex_list = []
        # for x in layers:
        # print( ' layers ias : ',x)
        # train_regex_list.append(x)
        train_regex_list = [self.layer_regex[x] for x in layers]
        print(train_regex_list)
        layers = '|'.join(train_regex_list)
        print('layers regex :', layers)

        # Data generators
        train_generator = data_generator(train_dataset,
                                         self.config,
                                         shuffle=True,
                                         batch_size=batch_size)
        val_generator = data_generator(val_dataset,
                                       self.config,
                                       shuffle=True,
                                       batch_size=batch_size,
                                       augment=False)

        # my_callback = MyCallback()

        # Callbacks
        ## call back for model checkpoint was originally (?) loss. chanegd to val_loss (which is default) 2-5-18
        callbacks = [
            keras.callbacks.TensorBoard(log_dir=self.log_dir,
                                        histogram_freq=0,
                                        batch_size=32,
                                        write_graph=True,
                                        write_grads=False,
                                        write_images=True,
                                        embeddings_freq=0,
                                        embeddings_layer_names=None,
                                        embeddings_metadata=None),
            keras.callbacks.ModelCheckpoint(self.checkpoint_path,
                                            mode='auto',
                                            period=1,
                                            monitor='val_loss',
                                            verbose=1,
                                            save_best_only=True,
                                            save_weights_only=True),
            keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                              mode='auto',
                                              factor=0.3,
                                              cooldown=30,
                                              patience=50,
                                              min_lr=0.00001,
                                              verbose=1),
            keras.callbacks.EarlyStopping(monitor='val_loss',
                                          mode='auto',
                                          min_delta=1e-5,
                                          patience=200,
                                          verbose=1)
        ]

        # Train

        self.set_trainable(layers)
        self.compile(learning_rate, self.config.LEARNING_MOMENTUM, losses)

        log("Starting at epoch {} of {} epochs. LR={}\n".format(
            self.epoch, epochs, learning_rate))
        log("Steps per epochs {} ".format(steps_per_epoch))
        log("Batch size       {} ".format(batch_size))
        log("Checkpoint Path: {} ".format(self.checkpoint_path))

        self.keras_model.fit_generator(
            train_generator,
            initial_epoch=self.epoch,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
            callbacks=callbacks,
            validation_data=next(val_generator),
            validation_steps=self.config.VALIDATION_STEPS,
            max_queue_size=100,
            workers=1,  # max(self.config.BATCH_SIZE // 2, 2),
            use_multiprocessing=False)
        self.epoch = max(self.epoch, epochs)

        print('Final : self.epoch {}   epochs {}'.format(self.epoch, epochs))