def train_model(dataset_dir: str, anno_path: str, model: modellib.MaskRCNN,
                training_set: List[str], validation_set: List[str]) -> None:
    """Train Mask-RCNN model

    Args:
        dataset_dir: data set directory
        anno_path: path of annotation file
        model: Mask RCNN model to train
        training_set: image file names in training set
        validation_set: image file names in validation set
    """
    # Training dataset.
    dataset_train = CarDamageDataset()
    dataset_train.load_car_damage(dataset_dir, anno_path, fnames=training_set)
    dataset_train.prepare()

    # Validation dataset
    dataset_val = CarDamageDataset()
    dataset_val.load_car_damage(dataset_dir, anno_path, fnames=validation_set)
    dataset_val.prepare()

    # Since we're using a very small dataset, and starting from
    # COCO trained weights, we don't need to train too long. Also,
    # no need to train all layers, just the heads should do it.
    print("Training network heads")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=10,
                layers='heads')
    return None
def train(config=SeptinConfig()):
    """Train the model."""
    # Training dataset.
    dataset_train = SeptinDataset()
    dataset_train.load_Septin("train")
    dataset_train.prepare()
    print('Train: %d' % len(dataset_train.image_ids))

    # Validation dataset
    dataset_val = SeptinDataset()
    dataset_val.load_Septin("test")
    dataset_val.prepare()
    print('Test: %d' % len(dataset_val.image_ids))

    config = SeptinConfig()
    config.display()

    # define the model
    model = MaskRCNN(mode='training', model_dir='./', config=config)
    # load weights (mscoco) and exclude the output layers
    model.load_weights('mask_rcnn_coco.h5',
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])
    # train weights (output layers or 'heads')
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=8,
                layers='heads')
Exemple #3
0
def main():
    # load training set (75/15/15 split between train/test/validation)
    train_set = prep_dataset(os.path.join(DATA_PATH, 'train'))
    test_set = prep_dataset(os.path.join(DATA_PATH, 'test'))

    # generate model
    config = LicensePlateConfig()
    model = MaskRCNN(mode='training',
                     model_dir=os.path.join(WEIGHT_PATH, 'log/'),
                     config=config)

    # load pre-trained MS COCO weights
    model.load_weights(os.path.join(WEIGHT_PATH, 'mask_rcnn_coco.h5'),
                       by_name=True,
                       exclude=[
                           'mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox',
                           'mrcnn_mask'
                       ])

    # train top layer
    model.train(train_set,
                test_set,
                learning_rate=config.LEARNING_RATE,
                epochs=10,
                layers='heads')

    # adjust learning rate for finetuning to avoid overfitting
    config.LEARNING_RATE = 1e-5

    # finetune all layers
    model.train(train_set,
                test_set,
                learning_rate=config.LEARNING_RATE,
                epochs=5,
                layers='all')
Exemple #4
0
def train(model: MaskRCNN,
          learning_rate=config.LEARNING_RATE,
          epochs=1,
          layers='all'):
    logging.warning(f"Start {layers} training...")
    with tf.device("/device:GPU:0"):
        model.train(dataset_train,
                    dataset_val,
                    learning_rate=learning_rate,
                    epochs=epochs,
                    layers=layers)
Exemple #5
0
def train():
    df = pd.read_csv(config.train_path)
    df = df.sample(frac=1).reset_index(drop=True)
    x_train = df['img_path'].values.tolist()
    y_train = df['box'].values.tolist()
    dev_sample_index = -1 * int(0.1 * float(len(y_train)))
    x_train, x_val = x_train[:dev_sample_index], x_train[dev_sample_index:]
    y_train, y_val = y_train[:dev_sample_index], y_train[dev_sample_index:]
    print('train:{}, val:{}, all:{}'.format(len(y_train), len(y_val),
                                            df.shape[0]))

    # load the train dataset
    train_set = MyDataset()
    train_set.load_dataset(x_train, y_train, mode='train')
    train_set.prepare()

    # load the val dataset
    val_set = MyDataset()
    val_set.load_dataset(x_val, y_val, mode='val')
    val_set.prepare()

    model_config = config.TrainConfig()
    # define the model
    model = MaskRCNN(mode='training', model_dir='./', config=model_config)
    model.keras_model.metrics_tensors = []

    # load weights (mscoco) and exclude the output layers
    model.load_weights(config.mrcnn_model_path,
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])

    # save checkpoint
    ModelCheckpoint = keras.callbacks.ModelCheckpoint(config.keras_model_dir,
                                                      verbose=0,
                                                      save_best_only=True,
                                                      save_weights_only=True)
    callbacks = [ModelCheckpoint]
    model.train(train_set,
                val_set,
                learning_rate=model_config.LEARNING_RATE,
                epochs=config.epochs_num,
                layers='heads',
                custom_callbacks=callbacks)
def main(ann_file_path, images_path, learning_rate, epochs, val_percentage):
    # train set
    train_set = OurDataset()
    train_set.load_dataset(images_path,
                           ann_file_path,
                           is_train=True,
                           val_percentage=val_percentage)
    train_set.prepare()
    print('Train images: %d' % len(train_set.image_ids))

    # val set
    val_set = OurDataset()
    val_set.load_dataset(images_path,
                         ann_file_path,
                         is_train=False,
                         val_percentage=val_percentage)
    val_set.prepare()
    print('Validation images: %d' % len(val_set.image_ids))

    # prepare config
    config = TrainConfig()

    # define the model
    model = MaskRCNN(mode='training',
                     model_dir='/home/test/data/trained_models/',
                     config=config)

    # load weights (mscoco)
    model.load_weights('/home/test/data/mask_rcnn_coco.h5',
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])

    print('START TRAINING')

    # train weights (output layers or 'heads')
    model.train(train_set,
                val_set,
                learning_rate=config.LEARNING_RATE,
                epochs=epochs,
                layers='all')

    print('Training DONE')
Exemple #7
0
def train(model: MaskRCNN, path_to_dataset: str = paths.IMAGES_PATH) -> None:
    # Training dataset
    dataset_train = PlateDataset()
    dataset_train.load_plates(path_to_dataset, "all_pics_aug/",
                              path_to_dataset + "all_pics_aug/ann.json")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = PlateDataset()
    dataset_val.load_plates(path_to_dataset, "all_pics_aug/",
                            path_to_dataset + "all_pics_aug/hundred.json")
    dataset_val.prepare()

    print("Training network heads")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=EPOCHS_NUMBER,
                layers='heads')
def createModel(layer):
    # prepare config
    config = HumanConfig()
    config.display()
    # define the model
    model = MaskRCNN(mode='training', model_dir='./model', config=config)
    # load weights (mscoco) and exclude the output layers
    model.load_weights('./mask_rcnn_coco.h5',
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask", "conv1"
                       ])
    for lay in layer:
        # train weights (output layers or 'heads')
        model.train(train_set,
                    test_set,
                    learning_rate=config.LEARNING_RATE,
                    epochs=35,
                    layers=lay)
Exemple #9
0
def main():
    config = ObjectConfig()
    model = MaskRCNN(mode='training', model_dir='./', config=config)
    model.load_weights('../mrcnn/mask_rcnn_coco.h5',
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])

    train_set = ObjectDataset()
    train_set.load_dataset('train')
    train_set.prepare()
    print('Train: %d' % len(train_set.image_ids))

    model.train(train_set,
                train_set,
                learning_rate=config.LEARNING_RATE,
                epochs=1,
                layers='heads')
Exemple #10
0
    def fit(self, train_dataset: DigitDataset, val_dataset: DigitDataset,
            epochs: int = 20, layers: str = 'heads', logs_path: str = paths.WEIGHT_LOGS_PATH,
            weights_path: str = paths.WEIGHTS_PATH + 'pretrained/mask_rcnn_coco.h5') -> None:
        train_dataset.load()
        train_dataset.prepare()
        val_dataset.load()
        val_dataset.prepare()

        self.config.EPOCHS = epochs
        self.config.STEPS_PER_EPOCH = train_dataset.set_size

        model = MaskRCNN(mode='training', config=self.config, model_dir=logs_path)
        model.load_weights(weights_path, by_name=True, exclude=[
            "mrcnn_class_logits", "mrcnn_bbox_fc",
            "mrcnn_bbox", "mrcnn_mask"])

        print("Training network")
        model.train(train_dataset, val_dataset,
                    learning_rate=self.config.LEARNING_RATE,
                    epochs=self.config.EPOCHS,
                    layers=layers)
Exemple #11
0
def run(path_to_csv="train.csv"):
    wandb.init()
    data = utils.load(path_to_csv)
    classes = utils.determine_classes(data)

    train, test = train_test_split(data, test_size=0.2)
    train_set = prepare_dataset(train, "./train/", classes)
    valid_set = prepare_dataset(test, "./train/", classes)

    print('Train: %d' % len(train_set.image_ids))
    print('Test: %d' % len(valid_set.image_ids))

    config = TrainingConfig()
    callbacks = []
    if wandb_found:
        callbacks.append(WandbCallback())
        config.STEPS_PER_EPOCH = wandb.config.STEPS_PER_EPOCH
        config.LEARNING_RATE = wandb.config.LEARNING_RATE
        config.LEARNING_MOMENTUM = wandb.config.LEARNING_MOMENTUM
        config.WEIGHT_DECAY = wandb.config.WEIGHT_DECAY
    else:
        # configure params through directly editing the TrainingConfig class
        pass
    model = MaskRCNN(mode="training", model_dir="train", config=config)
    model.load_weights("mask_rcnn_coco.h5",
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])
    # tb = keras.callbacks.TensorBoard(log_dir="./logs")
    model.train(train_set,
                valid_set,
                learning_rate=config.LEARNING_RATE,
                epochs=wandb.config.EPOCHS,
                layers="heads",
                custom_callbacks=callbacks)
Exemple #12
0
class MaskR:

    def __init__(self, model_dir, init_with='coco'):

        self.model_dir  = model_dir
        self.init_with  = init_with
        self.test_model = None



    def train(self, dataset_train, dataset_val, epochs):
        '''
        '''
        t0 = time.time()

        print ('GPU available:', tf.test.is_gpu_available())


        self.mode = 'training'
        self.config = C.TrainingConfig()

        if not os.path.exists(self.model_dir):
            os.mkdir(self.model_dir)

        self.model = MaskRCNN(self.mode, self.config, self.model_dir)

        # Which weights to start with?
        # imagenet, coco, or last

        # Local path to trained weights file
        COCO_MODEL_PATH = os.path.join(self.model_dir, "mask_rcnn_coco.h5")
        # Download COCO trained weights from Releases if needed
        if not os.path.exists(COCO_MODEL_PATH):
            utils.download_trained_weights(COCO_MODEL_PATH)

        if self.init_with == "imagenet":
            self.model.load_weights(self.model.get_imagenet_weights(), by_name=True)
        elif self.init_with == "coco":
            # Load weights trained on MS COCO, but skip layers that
            # are different due to the different number of classes
            # See README for instructions to download the COCO weights
            self.model.load_weights(COCO_MODEL_PATH, by_name=True,
                               exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", 
                                        "mrcnn_bbox", "mrcnn_mask"])
        elif self.init_with == "last":
            # Load the last model you trained and continue training
            self.model.load_weights(model.find_last(), by_name=True)

        print ('MaskRCNN Setup complete after', time.time()-t0, 'seconds')



        t0 = time.time()

        history = History()

        self.model.train(dataset_train, dataset_val, custom_callbacks=[history],
                         learning_rate=self.config.LEARNING_RATE,
                         epochs=epochs,
                         layers='heads')

        p.dump(history.history, open(os.path.join(self.model_dir, "history.p"), "wb"))

        print ('MaskRCNN Training complete after', time.time()-t0, 'seconds')

        return history



    def predict(self, images, verbose=False):
        '''
        '''

        if not self.test_model:

            model = MaskRCNN(mode="inference", 
                              config=C.TestingConfig(),
                              model_dir=self.model_dir)

            weights = model.find_last()

            model.load_weights(weights, by_name=True)

            self.test_model = model

        results = []
        for image in images:
            results.append(self.test_model.detect([image])[0])

        if verbose:
            r = results[0]
            visualize.display_instances(images[0], r['rois'], r['masks'], r['class_ids'], 
                                        ["",""], r['scores'],figsize=(10,10))


        return results
Exemple #13
0
# define the model, load weights and run training
model = MaskRCNN(mode='training', model_dir='./', config=config)

try:
    weights, old_epochs = find_last(model)
except FileNotFoundError:
    pr('Using initial weights from', C.initial_weights)
    if os.path.isfile('train.log'):
        pr('Backing up training log')
        os.rename('train.log', 'train.log.old')
    model.load_weights(C.initial_weights,
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])
    eps = C.epochs
    pr('Training', eps, 'epochs.')
else:
    pr('Using weights from: ', weights)
    model.load_weights(weights, by_name=True)
    eps = int(old_epochs) + C.epochs
    pr('Training from epoch', old_epochs, 'to epoch', eps)

model.train(train_set,
            test_set,
            custom_callbacks=[logger],
            learning_rate=config.LEARNING_RATE,
            epochs=eps,
            layers=C.train_layers)
Exemple #14
0
        inf=self.image_info[image_id]
        path=inf["path"]
        return path
tk="/content/drive/My Drive/My_dataset/pictures_1.json"
ih="/content/drive/My Drive/My_dataset/"
train_set = HandwrittingDataset(tk)
train_set.load_dataset(ih, Train=True)
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))

test_set = HandwrittingDataset(tk)
test_set.load_dataset(ih, Train=False)
test_set.prepare()
print('Test: %d' % len(test_set.image_ids))
class HandwrittingConfig(Config):
   NAME="HandwrittingConfig"
   NUM_CLASSES=1+1
   STEPS_PER_EPOCH=274
   IMAGE_PER_GPU=2
#weights directory in google drive
w="/content/drive/My Drive/mask_rcnn_coco.h5"
config=HandwrittingConfig()
#config.display()
model=MaskRCNN(mode="training",model_dir="/content/drive/My Drive/handwritting_models/",config=config)
#load pretrained weights
model.load_weights(w,by_name=True,exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",  "mrcnn_bbox", "mrcnn_mask"])
#define augumentation techniques
augumentation=iaa.Sequential([iaa.Sometimes(0.6,iaa.GaussianBlur(sigma=(0,0.4))),iaa.Flipud(0.2)),iaa.Fliplr(0.5)],random_order=True)
#train model
model.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=20,layers="heads",augmentation=augumentation)
Exemple #15
0
# initialize the image augmentation process
aug = iaa.SomeOf(
    (0, 2), [iaa.Fliplr(0.5),
             iaa.Flipud(0.5),
             iaa.Affine(rotate=(-10, 10))])

# define the model
model = MaskRCNN(mode='training', model_dir='./', config=config)
# load weights (mscoco) and exclude the output layers
model.load_weights('mask_rcnn_coco.h5',
                   by_name=True,
                   exclude=[
                       "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                       "mrcnn_mask"
                   ])
# train weights (output layers or 'heads')
# train *just* the layer heads
model.train(train_set,
            test_set,
            learning_rate=config.LEARNING_RATE,
            epochs=20,
            layers='heads',
            augmentation=aug)
# unfreeze the body of the network and train *all* layers
model.train(train_set,
            test_set,
            learning_rate=config.LEARNING_RATE / 10,
            epochs=20,
            layers='all',
            augmentation=aug)
Exemple #16
0
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))
# prepare test/val set
test_set = ObjectDataset()
test_set.load_dataset('/home/student/charcoal_hearth_hill', is_train=False)
test_set.prepare()
print('Test: %d' % len(test_set.image_ids))
# prepare config
config = ObjectConfig()
config.display()
# define the model
model = MaskRCNN(mode='training', model_dir='/storage/model/', config=config)
# load weights (mscoco) and exclude the output layers
model.load_weights('/home/student/data_5000_project/programs/R-CNN/Mask_RCNN/mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",  "mrcnn_bbox", "mrcnn_mask"])
# train weights (output layers or 'heads')
model.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=12, layers='heads')
history1 = model.keras_model.history.history 

#https://github.com/crowdAI/crowdai-mapping-challenge-mask-rcnn/blob/master/Training.ipynb
#Sharada Mohanty [email protected]
#print("Fine tune Resnet stage 4 and up")
model.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=24, layers='4+')
history2 = model.keras_model.history.history 

#Rosebrock, Adrian. Deep Learning for Computer Vision with Python Vol. 3, Vol. 3. 2019. p360
#epochs must be epochs+n below
model.train(train_set, test_set, learning_rate=config.LEARNING_RATE /10, epochs=36, layers="all")

#Thanks to Renu Khandelwal https://towardsdatascience.com/object-detection-using-mask-r-cnn-on-a-custom-dataset-4f79ab692f6d 
history3 = model.keras_model.history.history 
Exemple #17
0
    dataset_train.prepare()

    # Validation dataset
    dataset_val = CloudsDataset()
    dataset_val.load_batch(validation_images)
    dataset_val.prepare()

    #Prepare configuration
    config = CloudsConfig()

    #Create model in training model
    model = MaskRCNN(mode='training', model_dir='./', config=config)
    #Load pre-trained weights (MSCOCO) and exclude the output layers
    model.load_weights('mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",  "mrcnn_bbox", "mrcnn_mask"])
    #Train weights (output layers or 'heads')
    model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=10, layers='heads', augmentation=image_augmentation)
    
    #Create instance of inference configuration class
    inference_config = InferenceConfig()

    #Recreate the model in inference mode
    model = MaskRCNN(mode='inference', config=inference_config, model_dir='./')

    #Get path to saved weights
    model_path = 'mask_rcnn_clouds_config_0001.h5'

    #Load trained weights
    print("Loading weights from ", model_path)
    model.load_weights(model_path, by_name=True)
    
    #Test on random validation image
Exemple #18
0
class RCNNDetector(IObjectDetection):
    def __init__(self, dataset_path, dataset_name):
        super(RCNNDetector, self).__init__(dataset_path, dataset_name)
        self.train_set = ClassDataset()
        self.test_set = ClassDataset()
        # self.train_set = KangarooDataset()
        # self.test_set = KangarooDataset()
        self.model = "rcnn"
        self.modelWeights = None
        self.config = Config()

    def transform(self):
        # fn.organizeDataset(self.DATASET_NAME, self.OUTPUT_PATH, self.DATASET)
        self.train_set.load_dataset(
            os.path.join(self.OUTPUT_PATH, self.DATASET_NAME), True)
        # self.train_set.load_dataset(dataset_path, True)
        self.train_set.prepare()
        #self.test_set.load_dataset(os.path.join(self.OUTPUT_PATH,self.DATASET_NAME), False)
        # self.test_set.load_dataset(dataset_path, False)
        #self.test_set.prepare()

    # def organize(self, train_percentage):
    #     super(RCNNDetector, self).organize( train_percentage)

    def createModel(self):
        # En este caso tambien debe ser output por que ya se ha hecho la division y se ha guardado
        classes_file = os.path.join(self.OUTPUT_PATH, self.DATASET_NAME,
                                    "classes.names")
        file = open(os.path.join(classes_file))
        classes = []
        for line in file:
            classes.append(line)
        n_classes = fn.count_classes(classes)
        n_images = len(
            glob.glob(
                os.path.join(self.OUTPUT_PATH, self.DATASET_NAME,
                             "train/JPEGImages/*.jpg")))
        ClassConfig.NUM_CLASSES += n_classes
        ClassConfig.NAME = self.DATASET_NAME

        ClassConfig.N_IMAGES = n_images
        ClassConfig.STEPS_PER_EPOCH = n_images // (ClassConfig.GPU_COUNT *
                                                   ClassConfig.IMAGES_PER_GPU)

        self.config = ClassConfig()
        # Por lo mismo de antes. El dataset ya esta procesado y guardado ahi. Es donde se tiene que trabajar con el en este caso
        # self.modelWeights = MaskRCNN(mode='training', model_dir=os.path.join(self.OUTPUT_PATH,"model"), config=self.config)
        if not os.path.exists(
                os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "models")):
            os.mkdir(
                os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "models"))
        self.modelWeights = MaskRCNN(mode='training',
                                     model_dir=os.path.join(
                                         self.OUTPUT_PATH, self.DATASET_NAME,
                                         "models"),
                                     config=self.config)
        if not os.path.exists(
                'objectDetectors/RCNNObjectDetector/mask_rcnn_coco.h5'):
            wget.download(
                "https://www.dropbox.com/s/12ou730jt730qvu/mask_rcnn_coco.h5?dl=1",
                'objectDetectors/RCNNObjectDetector/mask_rcnn_coco.h5')
        self.modelWeights.load_weights(
            'objectDetectors/RCNNObjectDetector/mask_rcnn_coco.h5',
            by_name=True,
            exclude=[
                "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                "mrcnn_mask"
            ])

    def train(self, framework_path=None, n_gpus=1):
        ClassConfig.GPU_COUNT = n_gpus
        # self.model.train(self.TRAIN_SET, self.TEST_SET, learning_rate=self.CONFIG.LEARNING_RATE, epochs=5, layers='heads')
        self.modelWeights.train(self.train_set,
                                self.train_set,
                                learning_rate=self.config.LEARNING_RATE,
                                epochs=5,
                                layers='heads')
        results = []
        # Path(os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "models")).rglob(".h5")
        for r in glob.glob(
                os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "models",
                             "**", "*5.h5")):
            results.append(r)
        # results = [p for p in os.listdir(os.path.join(self.OUTPUT_PATH, self.DATASET_NAME,"models")) if p.endswith(".h5") and "mask_rcnn_" + self.DATASET_NAME + "_0005" in p]
        shutil.copy2(
            results[0],
            os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "models",
                         "mask_rcnn_" + self.DATASET_NAME + "_0005.h5"))

    def evaluate(self):
        rcnnPredict = RCNNPredict(
            os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "models",
                         "mask_rcnn_" + self.DATASET_NAME.lower() +
                         "_0005.h5"),
            os.path.join(self.OUTPUT_PATH, self.DATASET_NAME, "classes.names"))
        map = Map(rcnnPredict, self.DATASET_NAME,
                  os.path.join(self.OUTPUT_PATH, self.DATASET_NAME),
                  self.model)
        map.evaluate()
Exemple #19
0
def main(argv):
    mode = ''
    weights = 'mask_rcnn_coco.h5'
    image = ''
    dataset = 'data/'
    try:
        opts, args = getopt.getopt(argv, 'hm:w:i:d:',
                                   ['mode,weights,image,dataset'])
    except getopt.GetoptError:
        print(
            'mask_rcnn.py -m <mode=train, eval or splash> -w <weights> -i <image> -d <dataset>'
        )
        sys.exit()

    for opt, arg in opts:
        if opt == '-h':
            print(
                'mask_rcnn.py -m <mode=train, eval or splash> -w <weights> -i <image> -d <dataset>'
            )
            sys.exit()
        elif opt in ('-m', '--mode'):
            mode = arg
        elif opt in ('-w', '--weights'):
            weights = arg
        elif opt in ('-i', '--image'):
            image = arg
        elif opt in ('-d', '--dataset'):
            dataset = arg

    if mode == 'train':
        print('TRAINING MODE:')
        print('WITH WEIGHTS: ', weights)
        # prepare train set
        train_set = HumanInVesselDangerDataset()
        train_set.load_dataset('data/', is_train=True)
        train_set.prepare()
        print('Train: %d' % len(train_set.image_ids))
        # prepare test set
        test_set = HumanInVesselDangerDataset()
        test_set.load_dataset('data/', is_train=False)
        test_set.prepare()
        print('Test: %d' % len(test_set.image_ids))

        # prepare config
        config = VesselConfig()
        config.display()
        # # define the model
        model = MaskRCNN(mode='training', model_dir='./models/', config=config)
        # load weights (mscoco) and exclude the output layers
        model.load_weights(weights,
                           by_name=True,
                           exclude=[
                               "mrcnn_class_logits", "mrcnn_bbox_fc",
                               "mrcnn_bbox", "mrcnn_mask"
                           ])
        # train weights (output layers or 'heads')
        model.train(train_set,
                    test_set,
                    learning_rate=config.LEARNING_RATE,
                    epochs=10,
                    layers='heads')
    elif mode == 'eval':
        print('EVALUATION MODE:')
        print('WITH WEIGHTS: ', weights)
        # prepare validation set
        validation_set = HumanInVesselDangerDataset()
        validation_set.load_dataset('validation/', is_validation=True)
        validation_set.prepare()
        print('Validation: %d' % len(validation_set.image_ids))

        # prepare config
        config = VesselEvalConfig()
        config.display()

        # define the model
        model = MaskRCNN(mode='inference', model_dir='./', config=config)
        # load model weights
        model.load_weights(weights, by_name=True)

        # evaluate model on validation dataset
        val_mAP = evaluate_model(validation_set, model, config)
        print("Validation mAP: %.3f" % val_mAP)
    elif mode == 'splash':
        print("SPLASH MODE: ")
        if image == '':
            print('use `-i <loc/of/img>` to splash an image')
            sys.exit()
        print('WITH WEIGHTS: ', weights)
        print('USING DATASET: ', dataset)
        print('SPLASHING IMAGE: ', image)

        # prepare validation set
        validation_set = HumanInVesselDangerDataset()
        validation_set.load_dataset(dataset, is_validation=True)
        validation_set.prepare()
        print('Validation: %d' % len(validation_set.image_ids))
        # prepare config
        config = VesselEvalConfig()
        config.display()

        # define the model
        model = MaskRCNN(mode='inference', model_dir='./', config=config)
        # load model weights
        model.load_weights(weights, by_name=True)

        detect_and_color_splash(model, image)
            router_train = DatasetXML()
            router_train.load_dataset(dataset_dir, label, train_imgs)
            router_train.prepare()
            print('Train: %d' % len(router_train.image_ids))

            router_test = DatasetXML()
            router_test.load_dataset(dataset_dir, label, test_imgs)
            router_test.prepare()
            print('Test: %d' % len(router_test.image_ids))
        else:  # pkl format
            file_name = 'data_val2017-laptop'
            pkldata = pkl.load(open(os.path.join(dataset_dir, file_name+'.pkl'), 'rb'))





        # =============== train models ===============
        # prepare config
        config = Config()
        config.NAME = label + "_cfg"  # Give the configuration a recognizable name
        config.STEPS_PER_EPOCH = n_steps  # Number of training steps per epoch
        config.NUM_CLASSES = 1 + 1  # Number of classes (background + router)

        model = MaskRCNN(mode='training', model_dir=model_root_dir, config=config)
        model.load_weights(model_dir, by_name=True,
                           exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"])
        model.train(router_train, router_test, learning_rate=config.LEARNING_RATE, epochs=n_epochs, layers='heads')

Exemple #21
0
# session stuff
session = tf.Session(config=config)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())

# initialiazing model
model = MaskRCNN(mode='training',
                 config=severstal_config,
                 model_dir='modeldir')

# we will retrain starting with the coco weights
model.load_weights('mask_rcnn_coco.h5',
                   by_name=True,
                   exclude=[
                       'mrcnn_bbox_fc', 'mrcnn_class_logits', 'mrcnn_mask',
                       'mrcnn_bbox'
                   ])
### %time

# ignore UserWarnongs
import warnings
warnings.filterwarnings('ignore', category=UserWarning)

# training at last
model.train(dataset_train,
            dataset_validate,
            epochs=1,
            layers='heads',
            learning_rate=severstal_config.LEARNING_RATE)

history = model.keras_model.history.history
Exemple #22
0
                     config=config,
                     model_dir=config.MODEL_DIR)

    weights_path = config.COCO_WEIGHTS_PATH
    # Download weights file
    if not os.path.exists(weights_path):
        utils.download_trained_weights(weights_path)

    # don't load last layers because we are going to train it
    model.load_weights(weights_path,
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])

    dataset_train = Data()
    dataset_train.load(config.DATA_DIR, "train")
    dataset_train.prepare()

    dataset_val = Data()
    dataset_val.load(config.DATA_DIR, "val")
    dataset_val.prepare()

    # training just heads layer is enough
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=30,
                layers='heads')
Exemple #23
0
def train(args):
    from mrcnn.model import MaskRCNN
    # Retrieve arguments
    output = args.output
    samples_number = args.dataset_keep
    preview_only = args.preview_only
    dataset_config_path = args.dataset_config_file
    epochs = args.epochs
    steps_per_epoch = args.steps_per_epoch
    layers = args.layers
    pretrain = args.pretrain

    # Create config class
    train_config = LeafSegmentorConfig()

    # Create model
    if steps_per_epoch > 0:
        train_config.STEPS_PER_EPOCH = steps_per_epoch

    if not os.path.exists(output):
        os.makedirs(output, exist_ok=True)
    model = MaskRCNN(mode="training", config=train_config, model_dir=output)

    # Assemble output directories
    samples_output_dir = os.path.join(model.log_dir, "samples")

    # Create dataset
    with open(dataset_config_path) as dataset_config_file:
        dataset_config = json.load(dataset_config_file)
    dataset_class = dataset_config["dataset_module"] + "." + dataset_config[
        "dataset_class"]
    dataset_class = locate(dataset_class)

    dataset_config = dataset_config["config"]
    dataset_train = dataset_class.from_config(dataset_config["train"],
                                              train_config.IMAGE_SHAPE[0],
                                              train_config.IMAGE_SHAPE[1])
    dataset_valid = dataset_class.from_config(dataset_config["valid"],
                                              train_config.IMAGE_SHAPE[0],
                                              train_config.IMAGE_SHAPE[1])

    # Save train samples
    if samples_number != 0:
        save_samples(dataset_train, samples_number, path=samples_output_dir)

    if preview_only:
        return  # finish here

    # Start training from COCO or from previously trained model
    if pretrain == "COCO":
        load_coco_weights(model)
    else:
        model.load_weights(pretrain, by_name=True)

    # Start train
    model.train(dataset_train,
                dataset_valid,
                learning_rate=train_config.LEARNING_RATE,
                epochs=epochs,
                layers=layers)

    # Add metadata to model files (.h5)
    add_config_to_model(model.log_dir)
                       translate_percent={
                           "x": (-0.1, 0.1),
                           "y": (-0.1, 0.1)
                       },
                       rotate=(-30, 30))),
        iaa.Grayscale(alpha=(0.0, 0.7))
    ],
    random_order=True)

LRScheduler = LearningRateScheduler(lrSchedule)
callbacks_list = [LRScheduler]

model.train(dataset_train,
            dataset_validate,
            epochs=epoch_size,
            layers='heads',
            augmentation=imgaug_set,
            custom_callbacks=callbacks_list,
            learning_rate=steel_defect_config.LEARNING_RATE,
            optimizer='Adam')

train_hist = model.keras_model.history

print("Training is completed ............")

# Save loss to csv file
hist_df = pd.DataFrame(train_hist.history)
with open('history.csv', mode='w') as f:
    hist_df.to_csv(f)

# plot the training loss and accuracy
plt.style.use("ggplot")
Exemple #25
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('-t', action='store_true', help="train the model")
    parser.add_argument('-v', action='store_true', help="evaluate the model")
    parser.add_argument('-d',
                        action='store_true',
                        help="debug functions of the model")
    args = parser.parse_args()

    if args.t or args.v or args.d:
        # train set
        train_set = PhilipsDataset()
        train_set.load_dataset('shaver', is_train=True)
        train_set.load_dataset('smart-baby-bottle', is_train=True)
        train_set.load_dataset('toothbrush', is_train=True)
        train_set.load_dataset('wake-up-light', is_train=True)
        train_set.prepare()
        print('Train: %d' % len(train_set.image_ids))

        # test/val set
        test_set = PhilipsDataset()
        test_set.load_dataset('shaver', is_train=False)
        test_set.load_dataset('smart-baby-bottle', is_train=False)
        test_set.load_dataset('toothbrush', is_train=False)
        test_set.load_dataset('wake-up-light', is_train=False)
        test_set.prepare()
        print('Test: %d' % len(test_set.image_ids))

    if args.v:
        # create config
        cfg = PredictionConfig()
        # define the model
        model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
        # load model weights
        model.load_weights('mask_rcnn_philips_cfg_0004.h5', by_name=True)

        predict(test_set, model, cfg)

    elif args.d:
        train_set.extract_boxes("philips_data/shaver/annots/image4.xml")

    elif args.t:
        #train
        #prepare config
        config = PhilipsConfig()
        config.display()
        # define the model
        model = MaskRCNN(mode='training', model_dir='./', config=config)
        # load weights (mscoco) and exclude the output layers
        model.load_weights('mask_rcnn_philips_cfg_0004.h5',
                           by_name=True,
                           exclude=[
                               "mrcnn_class_logits", "mrcnn_bbox_fc",
                               "mrcnn_bbox", "mrcnn_mask"
                           ])
        # train weights (output layers or 'heads')
        model.train(train_set,
                    test_set,
                    learning_rate=config.LEARNING_RATE,
                    epochs=5,
                    layers='heads')
    else:
        # create config
        cfg = PredictionConfig()
        # define the model
        model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
        # load model weights
        model.load_weights('mask_rcnn_philips_cfg_0004.h5', by_name=True)
        predict_validation(model, cfg)
def main():  
    # train set
    train_set = AnimalDataset()
    train_set.load_dataset('kangaroo', is_train=True)
    train_set.prepare()
    print('Train: %d' % len(train_set.image_ids))
     
    # test/val set
    test_set = AnimalDataset()
    test_set.load_dataset('kangaroo', is_train=False)
    test_set.prepare()
    print('Test: %d' % len(test_set.image_ids))
    
     
    # load an image
    image_id = 0
    image = train_set.load_image(image_id)
    print(image.shape)
    # load image mask
    mask, class_ids = train_set.load_mask(image_id)
    print(mask.shape)
    # plot image
    pyplot.imshow(image)
    # plot mask
    pyplot.imshow(mask[:, :, 0], cmap='gray', alpha=0.5)
    pyplot.show()
    
     
    # define image id
    image_id = 1
    # load the image
    image = train_set.load_image(image_id)
    # load the masks and the class ids
    mask, class_ids = train_set.load_mask(image_id)
    # extract bounding boxes from the masks
    bbox = extract_bboxes(mask)
    # display image with masks and bounding boxes
    display_instances(image, bbox, mask, class_ids, train_set.class_names)
     
    # prepare config
    config = AnimalConfig()
    config.display()
    # define the model
    model = MaskRCNN(mode='training', model_dir='./', config=config)
    # load weights (mscoco) and exclude the output layers
    model.load_weights('mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",  "mrcnn_bbox", "mrcnn_mask"])
    # train weights (output layers or 'heads')
    model.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=1, layers='heads')
    
     
    # create config
    cfg = AnimalConfig()
    # define the model
    model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
    # load model weights
    model.load_weights('mask_rcnn_kangaroo_cfg_0002.h5', by_name=True)
    # evaluate model on training dataset
    train_mAP = evaluate_model(train_set, model, cfg)
    print("Train mAP: %.3f" % train_mAP)
    # evaluate model on test dataset
    test_mAP = evaluate_model(test_set, model, cfg)
    print("Test mAP: %.3f" % test_mAP)
    #
    ## load model weights
    #model_path = 'mask_rcnn_kangaroo_cfg_0005.h5'
    #model.load_weights(model_path, by_name=True)
    # plot predictions for train dataset
    plot_actual_vs_predicted('train_actual_vs_pred.png',train_set, model, cfg)
    # plot predictions for test dataset
    plot_actual_vs_predicted('test_actual_vs_pred.png',test_set, model, cfg)
    config = Shogo_Makishima_Config()

    print("[MODEL][STATE][TRAINING]")
    model = MaskRCNN(
        mode="training",
        model_dir="I:\GitHub\Shogo-Makishima\Datasets\Models\Shogo-Makishima\\",
        config=config)
    model.load_weights(
        "I:\GitHub\Shogo-Makishima\Datasets\Models\\mask_rcnn_coco.h5",
        by_name=True,
        exclude=[
            "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"
        ])
    model.train(train_dataset,
                test_dataset,
                learning_rate=config.LEARNING_RATE,
                epochs=5,
                layers="heads")
else:
    config = Shogo_Makishima_Predictor_Config()

    print("[MODEL][STATE][TEST]")
    model = MaskRCNN(
        mode="inference",
        model_dir="I:\GitHub\Shogo-Makishima\Datasets\Models\Shogo-Makishima\\",
        config=config)
    model.load_weights(
        "I:\GitHub\Shogo-Makishima\Datasets\Models\Shogo-Makishima\shogo_makishima_cfg20200714T1540\\mask_rcnn_shogo_makishima_cfg_0005.h5",
        by_name=True)

    # evaluate model on train and test dataset
Exemple #28
0
# prepare train set
train_set = TableBankDataset()
# <- MODIFY -> Replace mydataset with the name of the folder containing your images and annotations
train_set.load_dataset('mydataset', is_train=True)
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))
# prepare config
config = TableBankConfig()
config.display()
# define the model
model = MaskRCNN(mode='training', model_dir='./', config=config)
# load weights (mscoco) and exclude the output layers
model.load_weights('mask_rcnn_tablebank_cfg_0002.h5',
                   by_name=True,
                   exclude=[
                       "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                       "mrcnn_mask"
                   ])
# train weights (output layers or 'heads')
model.train(train_set,
            None,
            learning_rate=config.LEARNING_RATE,
            epochs=2,
            layers='heads')
# unfreeze the body of the network and train *all* layers
model.train(train_set,
            None,
            epochs=5,
            layers="all",
            learning_rate=config.LEARNING_RATE / 10)
Exemple #29
0
		return info['path']

# define a configuration for the model
class KangarooConfig(Config):
	# define the name of the configuration
	NAME = "kangaroo_cfg"
	# number of classes (background + kangaroo)
	NUM_CLASSES = 1 + 1
	# number of training steps per epoch
	STEPS_PER_EPOCH = 131

# prepare train set
train_set = KangarooDataset()
train_set.load_dataset('kangaroo', is_train=True)
train_set.prepare()
print('Train: %d' % len(train_set.image_ids))
# prepare test/val set
test_set = KangarooDataset()
test_set.load_dataset('kangaroo', is_train=False)
test_set.prepare()
print('Test: %d' % len(test_set.image_ids))
# prepare config
config = KangarooConfig()
config.display()
# define the model
model = MaskRCNN(mode='training', model_dir='./', config=config)
# load weights (mscoco) and exclude the output layers
model.load_weights('mask_rcnn_coco.h5', by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",  "mrcnn_bbox", "mrcnn_mask"])
# train weights (output layers or 'heads')
model.train(train_set, test_set, learning_rate=config.LEARNING_RATE, epochs=5, layers='heads')
    data_val = SteelDataset(df_defects)
    data_val.load_dataset(val_ids,
                          TRAIN_IMAGES_DIR)  # val images are in the train dir
    data_val.prepare()

    steel_config = SteelConfig()

    model = MaskRCNN(mode="training",
                     config=steel_config,
                     model_dir=str(MODEL_DIR))

    if args.model.lower() == "last":
        model_path = model.find_last()  # Find last trained weights
    elif args.model.lower() == "default":
        model_path = str(COCO_MODEL_PATH)

    # Exclude the last layers because they require a matching number of classes
    # ^ Original comment at:
    # https://github.com/matterport/Mask_RCNN/blob/master/samples/balloon/balloon.py
    model.load_weights(filepath=model_path,
                       by_name=True,
                       exclude=[
                           "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox",
                           "mrcnn_mask"
                       ])

    model.train(train_dataset=data_train,
                val_dataset=data_val,
                learning_rate=steel_config.LEARNING_RATE,
                epochs=steel_config.NUM_EPOCHS,
                layers="heads")