Exemple #1
0
def _main_(args):
    config_path = args.conf
    weights_path = args.weights

    keras.backend.tensorflow_backend.set_session(get_session())

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    if weights_path == '':
        weights_path = config['train']['pretrained_weights"']

    ###############################
    #   Parse the annotations
    ###############################
    without_valid_imgs = False
    if config['parser_annotation_type'] == 'xml':
        # parse annotations of the training set
        train_imgs, train_labels = parse_annotation_xml(
            config['train']['train_annot_folder'],
            config['train']['train_image_folder'], config['model']['labels'])

        # parse annotations of the validation set, if any.
        if os.path.exists(config['valid']['valid_annot_folder']):
            valid_imgs, valid_labels = parse_annotation_xml(
                config['valid']['valid_annot_folder'],
                config['valid']['valid_image_folder'],
                config['model']['labels'])
        else:
            without_valid_imgs = True

    elif config['parser_annotation_type'] == 'csv':
        # parse annotations of the training set
        train_imgs, train_labels = parse_annotation_csv(
            config['train']['train_csv_file'], config['model']['labels'],
            config['train']['train_csv_base_path'])

        # parse annotations of the validation set, if any.
        if os.path.exists(config['valid']['valid_csv_file']):
            valid_imgs, valid_labels = parse_annotation_csv(
                config['valid']['valid_csv_file'], config['model']['labels'],
                config['valid']['valid_csv_base_path'])
        else:
            without_valid_imgs = True
    else:
        raise ValueError(
            "'parser_annotations_type' must be 'xml' or 'csv' not {}.".format(
                config['parser_annotations_type']))

    #remove samples without objects in the image
    for i in range(len(train_imgs) - 1, 0, -1):
        if len(train_imgs[i]['object']) == 0:
            del train_imgs[i]

    if len(config['model']['labels']) > 0:
        overlap_labels = set(config['model']['labels']).intersection(
            set(train_labels.keys()))

        print('Seen labels:\t', train_labels)
        print('Given labels:\t', config['model']['labels'])
        print('Overlap labels:\t', overlap_labels)

        if len(overlap_labels) < len(config['model']['labels']):
            print(
                'Some labels have no annotations! Please revise the list of labels in the config.json file!'
            )
            return
    else:
        print('No labels are provided. Evaluate on all seen labels.')
        config['model']['labels'] = train_labels.keys()
        with open("labels.json", 'w') as outfile:
            json.dump({"labels": list(train_labels.keys())}, outfile)

    ###############################
    #   Construct the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=(config['model']['input_size_h'],
                            config['model']['input_size_w']),
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'],
                gray_mode=config['model']['gray_mode'])

    ###############################
    #   Load the pretrained weights (if any)
    ###############################

    if weights_path != '':
        print("Loading pre-trained weights in", weights_path)
        yolo.load_weights(weights_path)
    elif os.path.exists(config['train']['pretrained_weights']):
        print("Loading pre-trained weights in",
              config['train']['pretrained_weights'])
        yolo.load_weights(config['train']['pretrained_weights'])
    else:
        raise Exception("No pretrained weights found.")

    ###############################
    #   Evaluate the network
    ###############################

    print("calculing mAP for iou threshold = {}".format(args.iou))
    generator_config = {
        'IMAGE_H': yolo.input_size[0],
        'IMAGE_W': yolo.input_size[1],
        'IMAGE_C': yolo.input_size[2],
        'GRID_H': yolo.grid_h,
        'GRID_W': yolo.grid_w,
        'BOX': yolo.nb_box,
        'LABELS': yolo.labels,
        'CLASS': len(yolo.labels),
        'ANCHORS': yolo.anchors,
        'BATCH_SIZE': 4,
        'TRUE_BOX_BUFFER': yolo.max_box_per_image,
    }
    if not without_valid_imgs:
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=yolo.feature_extractor.normalize,
                                         jitter=False)
        valid_eval = YOLO.MAP_evaluation(yolo,
                                         valid_generator,
                                         iou_threshold=args.iou)

        mAP, average_precisions = valid_eval.evaluate_mAP()
        for label, average_precision in average_precisions.items():
            print(yolo.labels[label], '{:.4f}'.format(average_precision))
        print('validation dataset mAP: {:.4f}\n'.format(mAP))

    train_generator = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=yolo.feature_extractor.normalize,
                                     jitter=False)
    train_eval = YOLO.MAP_evaluation(yolo,
                                     train_generator,
                                     iou_threshold=args.iou)

    mAP, average_precisions = train_eval.evaluate_mAP()
    for label, average_precision in average_precisions.items():
        print(yolo.labels[label], '{:.4f}'.format(average_precision))
    print('training dataset mAP: {:.4f}'.format(mAP))
Exemple #2
0
    def train(self, train_imgs,     # the list of images to train the model
                    valid_imgs,     # the list of images used to validate the model
                    train_times,    # the number of time to repeat the training set, often used for small datasets
                    valid_times,    # the number of times to repeat the validation set, often used for small datasets
                    nb_epoch,       # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    warmup_bs,      # number of initial batches to let the model familiarize with the new dataset
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    saved_weights_name='best_weights.h5',
                    debug=False):

        self.batch_size = batch_size
        self.warmup_bs  = warmup_bs

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size,
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
        }

        # batch generater
        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        # early stopping
        early_stop = EarlyStopping(
                monitor='val_loss',
                min_delta=0.001,
                patience=2,
                mode='min',
                verbose=1)
        # checkpoint
        saved_weights_name = "./models/" + saved_weights_name.replace(" ", "")
        checkpoint = ModelCheckpoint(
                saved_weights_name,
                monitor='val_loss',
                verbose=1,
                save_best_only=True,
                mode='min',
                period=1)

        # TensorBoard counter
        dir_name = "yolo_" + self.architecture
        tb_counter  = len([log for log in os.listdir("./logs") if dir_name in log]) + 1
        log_dir = "./logs/" + dir_name + "_" + str(tb_counter)
        # TensorBoard dir
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        # TensorBoard
        tensorboard = TensorBoard(log_dir=log_dir,
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=False)


        # model fit
        self.model.fit_generator(generator        = train_batch,
                                 steps_per_epoch  = len(train_batch) * train_times,
                                 epochs           = nb_epoch,
                                 verbose          = 1,
                                 validation_data  = valid_batch,
                                 validation_steps = len(valid_batch) * valid_times,
                                 callbacks        = [early_stop, checkpoint, tensorboard],
                                 workers          = 2,
                                 max_queue_size   = 8)
Exemple #3
0
    'BATCH_SIZE'      : BATCH_SIZE,
    'TRUE_BOX_BUFFER' : 50,
}

def normalize(image):
    return image / 255.

train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
#with open('train_imgs', 'wb') as fp:
#    pickle.dump(train_imgs, fp)

### read saved pickle of parsed annotations
#with open ('train_imgs', 'rb') as fp:
#    train_imgs = pickle.load(fp)
train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)

valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
#with open('valid_imgs', 'wb') as fp:
#    pickle.dump(valid_imgs, fp)

### read saved pickle of parsed annotations
#with open ('valid_imgs', 'rb') as fp:
#    valid_imgs = pickle.load(fp)
valid_batch = BatchGenerator(valid_imgs, generator_config, norm=normalize, jitter=False)

"""**Setup a few callbacks and start the training**"""

early_stop = EarlyStopping(monitor='val_loss', 
                           min_delta=0.001, 
    'CLASS': len(LABELS),
    'ANCHORS': ANCHORS,
    'BATCH_SIZE': 2,
    'TRUE_BOX_BUFFER': 50,
}

#Path for training data and annotations
image_path = './dataset-master/train/'
annot_path = './dataset-master/microanno/'
#print(os.listdir(annot_path))
#train_imgs, seen_labels = parse_annotation_new(annot_path, image_path, LABELS)
train_imgs, seen_labels = parse_annotation_txt(annot_path, image_path, LABELS)

train_img = train_imgs[0:126]
valid_img = train_imgs[126:]
train_batch = BatchGenerator(train_img, generator_config, jitter=False)
valid_batch = BatchGenerator(valid_img, generator_config, jitter=False)

# **Setup a few callbacks and start the training**

early_stop = EarlyStopping(monitor='loss',
                           min_delta=0.001,
                           patience=10,
                           mode='min',
                           verbose=1)

checkpoint = ModelCheckpoint('weights_yolo_adam_validdata_temp.h5',
                             monitor='loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min',
Exemple #5
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False,
            workers=3,
            max_queue_size=8,
            early_stop=True,
            custom_callback=[],
            tb_logdir="./"):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size[0],
            'IMAGE_W': self.input_size[1],
            'IMAGE_C': self.input_size[2],
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop_cb = EarlyStopping(monitor='val_loss',
                                      min_delta=0.001,
                                      patience=3,
                                      mode='min',
                                      verbose=1)
        checkpoint_cb = ModelCheckpoint(saved_weights_name,
                                        monitor='val_loss',
                                        verbose=1,
                                        save_best_only=True,
                                        mode='min',
                                        period=1)
        tensorboard_cb = TensorBoard(
            log_dir=tb_logdir,
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        root, ext = os.path.splitext(saved_weights_name)
        map_evaluator_cb = self.MAP_evaluation(self,
                                               valid_generator,
                                               save_best=True,
                                               save_name=root + "_bestMap" +
                                               ext,
                                               tensorboard=tensorboard_cb)

        if not isinstance(custom_callback, list):
            custom_callback = [custom_callback]
        callbacks = [checkpoint_cb, tensorboard_cb, map_evaluator_cb
                     ] + custom_callback
        if early_stop: callbacks.append(early_stop_cb)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=2 if debug else 1,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=callbacks,
            workers=workers,
            max_queue_size=max_queue_size)
Exemple #6
0
# define paths for training
valid_image_folder = 'data/Validation_Images/'
valid_annot_folder = 'data/Validation_Annotations/'


# define normalize for images
def normalize(image):
    return image / 255.


# define the image and label datasets for training
valid_imgs, valid_labels = parse_annotation(valid_annot_folder,
                                            valid_image_folder,
                                            labels=LABELS)
valid_generator = BatchGenerator(valid_imgs,
                                 generator_config,
                                 norm=normalize,
                                 jitter=False)

# load new model from training
model = load_model('new_model_6.h5',
                   custom_objects={
                       'tf': tf,
                       'custom_loss': custom_loss
                   })

average_precisions = evaluate(model, valid_generator)

# print evaluation
print('mAP: {:.4f}'.format(
    sum(average_precisions.values()) / len(average_precisions)))
Exemple #7
0
    def train(self, train_imgs, valid_imgs, train_times, valid_times,
              object_scale, no_object_scale, coord_scale, class_scale,
              nb_epochs, learning_rate, batch_size, warmup_epochs, multi_gpu,
              saved_weights_dir, save_every_n_epoch, debug):
        self.batch_size = batch_size
        self.multi_gpu = multi_gpu
        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale
        self.debug = debug

        if self.multi_gpu:
            self.model = multi_gpu_model(self.model, gpus=2)
        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss,
                           optimizer=optimizer,
                           metrics=['accuracy', self.recall_metric])

        ############################################
        # Make a few callbacks
        ############################################

        monitor_metric, metric_mode = 'val_recall', 'max'

        early_stop = EarlyStopping(monitor=monitor_metric,
                                   min_delta=0.0001,
                                   patience=20,
                                   mode=metric_mode,
                                   verbose=1)

        reduce_lrt = ReduceLROnPlateau(monitor=monitor_metric,
                                       verbose=1,
                                       patience=5,
                                       mode=metric_mode,
                                       min_lr=1e-07,
                                       factor=0.8)

        CheckpointModel = ModelCheckpointDetached if self.multi_gpu else ModelCheckpoint

        if not os.path.exists(saved_weights_dir):
            os.mkdir(saved_weights_dir)
        checkpoint = CheckpointModel(os.path.join(
            saved_weights_dir, 'weights.{epoch:02d}-{val_loss:.2f}.h5'),
                                     monitor=monitor_metric,
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode=metric_mode,
                                     period=save_every_n_epoch)

        backend_name = self.backend.replace(' ', '_').lower()
        tb_counter = len([
            log for log in os.listdir(os.path.expanduser('~/logs/'))
            if backend_name in log
        ]) + 1
        tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/') +
                                  backend_name + '_' + str(tb_counter),
                                  histogram_freq=0,
                                  batch_size=self.batch_size,
                                  write_graph=True,
                                  write_images=True)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=1 if debug else 2,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=[
                early_stop, reduce_lrt,
                TerminateOnNaN(), checkpoint, tensorboard
            ],
            workers=8,
            initial_epoch=0,
            use_multiprocessing=False,
            max_queue_size=8)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
def _main_(args):
    config_path = args.conf
    weights_path = args.weights

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    # parse annotations of the validation set, if any, otherwise raise an error
    if os.path.exists(config['valid']['valid_annot_folder']):
        valid_imgs, valid_labels = parse_annotation(
            config['valid']['valid_annot_folder'],
            config['valid']['valid_image_folder'], config['model']['labels'])
    else:
        print('Folder ' + config['valid']['valid_annot_folder'] +
              'does not exist')

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    yolo.load_weights(weights_path)

    ############################################
    # Make validation generators
    ############################################
    generator_config = {
        'IMAGE_H': yolo.input_size,
        'IMAGE_W': yolo.input_size,
        'GRID_H': yolo.grid_h,
        'GRID_W': yolo.grid_w,
        'BOX': yolo.nb_box,
        'LABELS': yolo.labels,
        'CLASS': len(yolo.labels),
        'ANCHORS': yolo.anchors,
        'BATCH_SIZE': config['train']['batch_size'],
        'TRUE_BOX_BUFFER': yolo.max_box_per_image,
    }

    valid_generator = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=yolo.feature_extractor.normalize,
                                     jitter=False)

    ############################################
    # Compute mAP on the validation set
    ############################################
    average_precisions = yolo.evaluate(valid_generator)

    # print evaluation
    for label, average_precision in average_precisions.items():
        print(yolo.labels[label], '{:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Exemple #9
0
    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    yolo.load_weights(config['train']['pretrained_weights'])

    generator_config = {
        'IMAGE_H': config['model']['input_size'],
        'IMAGE_W': config['model']['input_size'],
        'GRID_H': config['model']['input_size'] // 32,
        'GRID_W': config['model']['input_size'] // 32,
        'BOX': len(config['model']['anchors']),
        'LABELS': config['model']['labels'],
        'CLASS': len(config['model']['labels']),
        'ANCHORS': config['model']['anchors'],
        'BATCH_SIZE': 1,
        'TRUE_BOX_BUFFER': config['model']['max_box_per_image'],
    }
    val_generator = BatchGenerator(
        valid_imgs,
        generator_config,
        norm=normalize,
        flipflop=False,
        shoechanger=False,
        zeropad=False,
    )
    yolo.evaluate(val_generator)
Exemple #10
0
    def train(
            self,
            train_imgs,  # list of images to train the model
            valid_imgs,  # list of images used to validate the model
            train_times,  # number of time to repeat the training set, often used for small datasets
            valid_times,  # number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # learning rate
            batch_size,  # size of the batch
            warmup_bs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_bs

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################
        date = datetime.today().strftime('%m-%d_%H%M')
        os.mkdir(os.path.join('logs', date))
        """
        early_stop = EarlyStopping(monitor='val_loss',
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        """
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tensorboard = TensorBoard(log_dir=os.path.join('logs', date),
                                  histogram_freq=1,
                                  write_graph=False,
                                  write_images=True,
                                  write_grads=True)
        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.2,
                                      patience=3,
                                      min_lr=5e-7)
        pauser = TrainPauseCallback()
        self.pauser = pauser

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times,
            epochs=nb_epoch,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times,
            callbacks=[reduce_lr, checkpoint, tensorboard, pauser],
            workers=3,
            max_queue_size=8)
Exemple #11
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            logdir,
            saving_freq,
            saved_weights_name='weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_epochs * (train_times *
                                          (len(train_imgs) / batch_size + 1) +
                                          valid_times *
                                          (len(valid_imgs) / batch_size + 1))

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        if warmup_epochs > 0:
            nb_epoch = warmup_epochs  # if it's warmup stage, don't train more than warmup_epochs

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss,
                           metrics=['accuracy', self.custom_loss],
                           optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size[1],
            'IMAGE_W': self.input_size[0],
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        #Добавим информацию для графиков

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(
            monitor='val_loss',
            min_delta=0.00005,  #0.0008
            patience=5,
            mode='min',
            verbose=1)

        checkpoint_best = ModelCheckpoint(logdir + saved_weights_name +
                                          '.hdf5',
                                          monitor='val_loss',
                                          verbose=1,
                                          save_best_only=True,
                                          save_weights_only=True,
                                          mode='min',
                                          period=1)

        tensorboard = TensorBoard(log_dir=os.path.expanduser(logdir),
                                  histogram_freq=0,
                                  write_graph=False,
                                  write_grads=False,
                                  write_images=False)

        # моё добавление https://keras.io/callbacks/#modelcheckpoint
        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.5,
                                      patience=3,
                                      min_lr=1e-5,
                                      verbose=1)

        time_callback = TimeCallback(logdir=logdir)

        csv_logger = CSVLogger('log.csv', append=True, separator=';')

        my_callbacks = [
            checkpoint_best, tensorboard, reduce_lr, time_callback, csv_logger
        ]
        if saving_freq > 0:
            checkpoint_period = ModelCheckpoint(logdir +
                                                'weights{epoch:03d}.hdf5',
                                                monitor='val_loss',
                                                verbose=1,
                                                save_best_only=False,
                                                save_weights_only=True,
                                                mode='min',
                                                period=saving_freq)
            my_callbacks.append(checkpoint_period)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times,
            epochs=nb_epoch,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times,
            callbacks=my_callbacks,
            workers=3,
            max_queue_size=8)
Exemple #12
0
    def train(self,
              train_imgs,
              valid_imgs,
              train_times,
              valid_times,
              nb_epochs,
              learning_rate,
              batch_size,
              warmup_epochs,
              object_scale,
              no_object_scale,
              coord_scale,
              class_scale,
              saved_weights_name="best_weights.h5",
              train=True):
        self.batch_size = batch_size
        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        generator_config = {
            "IMAGE_H": self.input_size,
            "IMAGE_W": self.input_size,
            "GRID_H": self.grid_h,
            "GRID_W": self.grid_w,
            "BOX": self.nb_box,
            "LABELS": self.labels,
            "CLASS": len(self.labels),
            "ANCHORS": self.anchors,
            "BATCH_SIZE": self.batch_size,
            "TRUE_BOX_BUFFER": self.max_box_per_img
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)

        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)

        self.model.compile(loss=self.custom_loss, optimizer="adam")

        early_stopping = EarlyStopping(monitor="loss",
                                       patience=20,
                                       mode="min",
                                       verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor="loss",
                                     verbose=1,
                                     save_best_only=False,
                                     mode="min")

        if train:
            self.model.fit_generator(
                generator=train_generator,
                steps_per_epoch=len(train_generator) * train_times,
                epochs=nb_epochs,
                validation_data=valid_generator,
                validation_steps=len(valid_generator) * valid_times,
                callbacks=[early_stopping, checkpoint])
Exemple #13
0
    def train(self,
              image_fps,
              image_annotations,
              number_of_images_to_use=20,
              use_all_imgs=False):

        if use_all_imgs:
            image_fps_list = list(image_fps)
        else:
            image_fps_list = list(image_fps[:number_of_images_to_use])

        # split dataset into training vs. validation dataset
        # split ratio is set to 0.9 vs. 0.1 (train vs. validation, respectively)
        sorted(image_fps_list)
        random.seed(42)
        random.shuffle(image_fps_list)

        validation_split = 0.1
        split_index = int((1 - validation_split) * len(image_fps_list))

        image_fps_train = image_fps_list[:split_index]
        image_fps_val = image_fps_list[split_index:]
        image_annotations_train = {
            k: v
            for k, v in image_annotations.items() if k in image_fps_train
        }
        image_annotations_val = {
            k: v
            for k, v in image_annotations.items() if k in image_fps_val
        }

        train_generator = BatchGenerator(self.config, image_fps_train,
                                         image_annotations_train)
        test_generator = BatchGenerator(self.config, image_fps_val,
                                        image_annotations_val)

        optimizer = Adam(lr=0.5e-8,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        # optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9)
        # optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)

        file_path = "weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"

        self.model.compile(loss=self.custom_loss,
                           optimizer=optimizer,
                           metrics=['accuracy'])

        checkpoint = ModelCheckpoint(file_path,
                                     monitor='val_acc',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='max')

        self.model.fit_generator(train_generator,
                                 steps_per_epoch=len(train_generator),
                                 epochs=10000,
                                 verbose=1,
                                 validation_data=test_generator,
                                 validation_steps=len(test_generator),
                                 callbacks=[checkpoint])
    def eval(
            self,
            valid_imgs,  # the list of images to train the model
            test_imgs,  # the list of images used to validate the model
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        test_generator = BatchGenerator(test_imgs,
                                        generator_config,
                                        norm=self.feature_extractor.normalize,
                                        jitter=False)

        self.warmup_batches = 0

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        # early_stop = EarlyStopping(monitor='val_loss',
        #                    min_delta=0.0001,
        #                    patience=5,
        #                    mode='min',
        #                    verbose=1)
        # checkpoint = ModelCheckpoint(saved_weights_name,
        #                              monitor='val_loss',
        #                              verbose=1,
        #                              save_best_only=True,
        #                              mode='min',
        #                              period=1)
        # tensorboard = TensorBoard(log_dir=os.path.expanduser('./logs/'),
        #                           histogram_freq=0,
        #                           #write_batch_performance=True,
        #                           write_graph=True,
        #                           write_images=False)

        ############################################
        # Start the training process
        ############################################

        # self.model.fit_generator(generator        = train_generator,
        #                          steps_per_epoch  = len(train_generator) * train_times,
        #                          epochs           = warmup_epochs + nb_epochs,
        #                          verbose          = 2 if debug else 1,
        #                          validation_data  = valid_generator,
        #                          validation_steps = len(valid_generator) * valid_times,
        #                          callbacks        = [early_stop, checkpoint, tensorboard],
        #                          workers          = 3,
        #                          max_queue_size   = 8)

        ############################################
        # Compute mAP on the validation set
        ############################################
        print()
        print('Valid Evaluate')
        average_precisions, timeHistory = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('Valid mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))

        print()
        print('Total Images: ', len(timeHistory))
        print('Elapse Time: {:.4f}'.format(sum(timeHistory)))
        print('Avg Image Time: {:.4f}'.format(
            sum(timeHistory) / len(timeHistory)))
        print('FPS: ', int(1 / (sum(timeHistory) / len(timeHistory))))

        print()
        print('Test Evaluate')
        average_precisions, timeHistory = self.evaluate(test_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('Test mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))

        timeHistory = timeHistory[1:]

        print()
        print('Total Images: ', len(timeHistory))
        print('Elapse Time: {:.4f}'.format(sum(timeHistory)))
        print('Avg Image Time: {:.4f}'.format(statistics.mean(timeHistory)))
        print('Desvio Padrão: {:.4f}'.format(statistics.stdev(timeHistory)))
        print('FPS: ', int(1 / (sum(timeHistory) / len(timeHistory))))
Exemple #15
0
    def train(self, train_imgs, valid_imgs,
                    train_times,    # the number of time to repeat the training set, often used for small datasets
                    valid_times,    # the number of times to repeat the validation set, often used for small datasets
                    nb_epochs,      # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    full_log_dir,
                    early_stop_patience,
                    early_stop_min_delta,
                    learning_rate_decay_factor,
                    learning_rate_decay_patience,
                    learning_rate_decay_min_lr,
                    saved_weights_name='best_weights.h5',
                    debug=False,
                    sequence_length=10):

        self.batch_size = batch_size
        self.sequence_length = sequence_length

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        self.full_log_dir = full_log_dir
        self.early_stop_patience = early_stop_patience
        self.early_stop_min_delta = early_stop_min_delta
        self.learning_rate_decay_factor = learning_rate_decay_factor
        self.learning_rate_decay_patience = learning_rate_decay_patience
        self.learning_rate_decay_min_lr = learning_rate_decay_min_lr


        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size, 
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,  
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
            'SEQUENCE_LENGTH' : self.sequence_length
        }    

        train_generator = BatchGenerator(train_imgs, 
                                     generator_config, 
                                     norm=self.feature_extractor.normalize,
                                     debug=self.debug)
        valid_generator = BatchGenerator(valid_imgs, 
                                     generator_config, 
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)   

        self.warmup_batches  = warmup_epochs * (train_times*len(train_generator) + valid_times*len(valid_generator)) / 4 
        print("Using %d warmup batches" % self.warmup_batches)


        ############################################
        # Define your callbacks
        ############################################

        #HS HSS With a patience of 100 you finish in 200 epochs so I changed it to 400
        early_stop = EarlyStopping(monitor='val_loss', 
                           min_delta=self.early_stop_min_delta, 
                           patience=self.early_stop_patience , 
                           verbose=1)

        #This didnt work with multi gpu
        checkpoint = ModelCheckpoint('{name}_{{epoch:02d}}.h5'.format(name=saved_weights_name), 
                                     monitor='val_loss', 
                                     verbose=0, 
                                     save_best_only=True, 
                                     mode='min', 
                                     period=1)

        # define by Anuar because above didn't work with multi GPU
        checkpoint_multi = MultiGPUCheckpoint(
                        '{name}_{{epoch:02d}}_multi.h5'.format(name=saved_weights_name),
                        verbose=1,
                        save_best_only=True,
                        mode='min',
                        period=1)


        #defined by hs for best val
        checkpoint_multi_hs = MultiGPUCheckpoint(
                        '{name}_{{epoch:02d}}_hsBb_valLoss-{{val_loss:.2f}}.h5'.format(name=saved_weights_name),
                        verbose=1,
                        save_best_only=True,)


        #defined by HS
        # HS HSS originally i used monitor='val_loss', factor=0.5, patience=20, min_lr=1e-6
        reduce_lr_hs = ReduceLROnPlateau(monitor='val_loss',
                                         factor=self.learning_rate_decay_factor,
                                         patience=self.learning_rate_decay_patience,
                                         min_lr=self.learning_rate_decay_min_lr)

        # written by Anuar
        evaluate_callback_train = EvaluateCallback(train_generator, self.evaluate)
        evaluate_callback_val = EvaluateCallback(valid_generator, self.evaluate)
        # written by Anuar
        decay_lr = DecayLR(27, 31, 0.2)


        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08,
                         decay=0.0)



        ############################################
        # Compile the model
        ############################################
        with tf.device("/cpu:0"):
            self.model.compile(loss=self.custom_loss, optimizer=optimizer)


        ############################################
        # Start the training process
        ############################################  

        steps_per_epoch = len(train_generator) * train_times

        parallel_model = multi_gpu_model(self.model, gpus=2)
        parallel_model.compile(loss=self.custom_loss, optimizer=optimizer)
        parallel_model.fit_generator(
                                 generator=train_generator, 
                                 steps_per_epoch  = steps_per_epoch, 
                                 epochs           = warmup_epochs + nb_epochs, 
                                 verbose          = 2 if debug else 1,
                                 validation_data  = valid_generator,
                                 validation_steps = len(valid_generator) * valid_times,
                                 callbacks        = [
                                    early_stop,
                                    checkpoint_multi_hs, 
                                    TrainValTensorBoard_HS(self.full_log_dir,
                                            write_graph=False, write_images=True),
                                    ValOnlyProgbarLogger(verbose=1, count_mode='steps'),
                                    reduce_lr_hs,
                                    evaluate_callback_val], 
                         
                                 workers          = 4,
                                 max_queue_size   = 10,
                                 use_multiprocessing=True)      
        
        self.model.save(saved_weights_name + "_final.h5")

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator, iou_threshold=0.5,
                                           score_threshold=0.5)
        for label, average_precision in list(average_precisions.items()):
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))         

        average_precisions = self.evaluate(valid_generator, iou_threshold=0.3,
                                           score_threshold=0.3)
        for label, average_precision in list(average_precisions.items()):
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))         
Exemple #16
0
    def train(self, train_imgs,     # the list of images to train the model
                    valid_imgs,     # the list of images used to validate the model
                    nb_epochs,      # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    saved_weights_name='best_weights.h5',
                    debug=False):

        self.batch_size = batch_size

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size,
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ACTIONS'         : self.actions,
            'MOVES'           : len(self.actions),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss=self.losses, loss_weights=self.lossWeights, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                           min_delta=0.001,
                           patience=3,
                           mode='min',
                           verbose=1)

        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)

        tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/'),
                                  histogram_freq=0,
                                  #write_batch_performance=True,
                                  write_graph=True,
                                  write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(generator        = train_generator,
                                 steps_per_epoch  = len(train_generator),
                                 epochs           = nb_epochs,
                                 verbose          = 2 if debug else 1,
                                 validation_data  = valid_generator,
                                 validation_steps = len(valid_generator),
                                 callbacks        = [early_stop, checkpoint, tensorboard])
Exemple #17
0
s_train_val_split = int(0.8 * len(sup))

train_sup = sup[:s_train_val_split]  #model
val_sup = sup[s_train_val_split:]  #model

# splits
# sup_train_imgs = train_imgs[:SUP_NUM_IMAGES]
# # split the training set (supervised date) into train and validation 80%, 20% respectively:
# train = sup_train_imgs[:int(SUP_NUM_IMAGES*0.8)]
# val = sup_train_imgs[-int(SUP_NUM_IMAGES*0.2):] #takes the last 20% images from the training
# ae_unsup = train_imgs[-UNSUP_NUM_IMAGES:]
# ae_train = ae_unsup[:int(UNSUP_NUM_IMAGES*0.8)]
# ae_val = ae_unsup[-int(UNSUP_NUM_IMAGES*0.2):]

train_batch = BatchGenerator(train_sup, generator_config, norm=normalize)

valid_batch = BatchGenerator(val_sup,
                             generator_config,
                             norm=normalize,
                             jitter=False)

#for the AE:
"""we use the unsupervised data to train the AE (which we get from the end of the training set"""
#todo: play with the jitter -- input true output false
tohar_train_batch = ToharGenerator(
    train, generator_config, norm=normalize,
    jitter=False)  # outputs (input,input) rather than (input, ground truth)
tohar_valid_batch = ToharGenerator(
    valid, generator_config, norm=normalize,
    jitter=False)  # outputs (input,input) rather than (input, ground truth)
Exemple #18
0
    all_imgs = all_imgs[:10]
train_valid_split = int(0.8 * len(all_imgs))
#train-sup,un , val, eval
train = all_imgs[:train_valid_split]
not_train = all_imgs[train_valid_split:]

sup = train[:int(SUP * len(train))]
unsup = train[int(SUP * len(train)):]
val = not_train[:int(0.5 * len(not_train))]
eval = not_train[int(0.5 * len(not_train)):]

if len(eval) == 0:
    raise ()

#todo: normalize?
train_batch = BatchGenerator(sup, generator_config, norm=normalize)
valid_batch = BatchGenerator(val,
                             generator_config,
                             norm=normalize,
                             jitter=False)
ae_train_batch = ToharGenerator(
    train, generator_config, norm=normalize, jitter=False
)  #AE trained on all of the train set (supervised and unsupervised)
ae_valid_batch = ToharGenerator(val,
                                generator_config,
                                norm=normalize,
                                jitter=False)

eval_batch = BatchGenerator(eval,
                            generator_config,
                            norm=normalize,
annotated_imgs += [(row.dict)
                   for index, row in df[(df['fish_number'].notnull()) & (
                       ~df.video_id.isin(list(cpt_zero.keys())))].iterrows()]

blank_imgs = random.sample(blank_imgs,
                           int(len(blank_imgs) * (1 - REMOVE_NEGATIVE_ITEMS)))
all_imgs = annotated_imgs + blank_imgs

train_imgs, valid_imgs = train_test_split(all_imgs,
                                          test_size=VALID_SHARE,
                                          random_state=42)

del all_imgs, blank_imgs, annotated_imgs, df

train_batch = BatchGenerator(train_imgs,
                             generator_config,
                             jitter=False,
                             aug_freq=AUG_FREQ)
valid_batch = BatchGenerator(valid_imgs,
                             generator_config,
                             jitter=False,
                             aug_freq=AUG_FREQ)

early_stop = EarlyStopping(monitor='val_loss',
                           min_delta=0.001,
                           patience=3,
                           mode='min',
                           verbose=1)

checkpoint = ModelCheckpoint('weights/yolo_fish_3.h5',
                             monitor='val_loss',
                             verbose=1,
Exemple #20
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('~/logs_2018_04_30/'),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        run_meta = tf.RunMetadata()

        with tf.contrib.tfprof.ProfileContext(
                '/home/ubuntu/keras_tf/tain_log') as pctx:

            # High level API, such as slim, Estimator, etc.

            self.model.fit_generator(
                generator=train_generator,
                steps_per_epoch=len(train_generator) * train_times,
                epochs=warmup_epochs + nb_epochs,
                verbose=2 if debug else 1,
                validation_data=valid_generator,
                validation_steps=len(valid_generator) * valid_times,
                callbacks=[early_stop, checkpoint, tensorboard],
                workers=3,
                max_queue_size=8)

            #Profiling ...
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            flops = tf.profiler.profile(run_meta=run_meta,
                                        cmd='scope',
                                        options=opts)

            opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(
            )
            params = tf.profiler.profile(run_meta=run_meta,
                                         cmd='scope',
                                         options=opts)

        print("{:,} --- {:,}".format(flops.total_float_ops,
                                     params.total_parameters))

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        evlog = "eval_" + ".log"

        with open(evlog, 'w') as ef:

            for label, average_precision in average_precisions.items():
                print(self.labels[label],
                      '{:.4f}'.format(average_precision),
                      file=ef)
            print('mAP: {:.4f}'.format(
                sum(average_precisions.values()) / len(average_precisions)),
                  file=ef)
Exemple #21
0
def makemodel():
    # Layer 1
    x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
    x = BatchNormalization(name='norm_1')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 2
    x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x)
    x = BatchNormalization(name='norm_2')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 3
    x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x)
    x = BatchNormalization(name='norm_3')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 4
    x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x)
    x = BatchNormalization(name='norm_4')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 5
    x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x)
    x = BatchNormalization(name='norm_5')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 6
    x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)
    x = BatchNormalization(name='norm_6')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 7
    x = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x)
    x = BatchNormalization(name='norm_7')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 8
    x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x)
    x = BatchNormalization(name='norm_8')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 9
    x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x)
    x = BatchNormalization(name='norm_9')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 10
    x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x)
    x = BatchNormalization(name='norm_10')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 11
    x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x)
    x = BatchNormalization(name='norm_11')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 12
    x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x)
    x = BatchNormalization(name='norm_12')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 13
    x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x)
    x = BatchNormalization(name='norm_13')(x)
    x = LeakyReLU(alpha=0.1)(x)

    skip_connection = x

    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 14
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x)
    x = BatchNormalization(name='norm_14')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 15
    x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x)
    x = BatchNormalization(name='norm_15')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 16
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x)
    x = BatchNormalization(name='norm_16')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 17
    x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x)
    x = BatchNormalization(name='norm_17')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 18
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x)
    x = BatchNormalization(name='norm_18')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 19
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x)
    x = BatchNormalization(name='norm_19')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 20
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x)
    x = BatchNormalization(name='norm_20')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 21
    skip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection)
    skip_connection = BatchNormalization(name='norm_21')(skip_connection)
    skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
    skip_connection = Lambda(space_to_depth_x2)(skip_connection)

    x = concatenate([skip_connection, x])

    # Layer 22
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x)
    x = BatchNormalization(name='norm_22')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 23
    x = Conv2D(BOX * (4 + 1 + CLASS), (1,1), strides=(1,1), padding='same', name='conv_23')(x)
    output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)

    # small hack to allow true_boxes to be registered when Keras build the model 
    # for more information: https://github.com/fchollet/keras/issues/2790
    output = Lambda(lambda args: args[0])([output, true_boxes])

    model = Model([input_image, true_boxes], output)


    train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
    ### write parsed annotations to pickle for fast retrieval next time
    #with open('train_imgs', 'wb') as fp:
    #    pickle.dump(train_imgs, fp)

    ### read saved pickle of parsed annotations
    #with open ('train_imgs', 'rb') as fp:
    #    train_imgs = pickle.load(fp)
    train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)
    
    valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
    ### write parsed annotations to pickle for fast retrieval next time
    #with open('valid_imgs', 'wb') as fp:
    #    pickle.dump(valid_imgs, fp)

    ### read saved pickle of parsed annotations
    #with open ('valid_imgs', 'rb') as fp:
    #    valid_imgs = pickle.load(fp)

    optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    #optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9)
    #optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)

    model.compile(loss=custom_loss, optimizer=optimizer)

    return model
Exemple #22
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_bs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_bs

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss,
                           optimizer=optimizer,
                           metrics=['accuracy'])

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        """
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss', 
                                     verbose=1,
                                     save_best_only=True, 
                                     mode='min', 
                                     period=1)
        """
        # prepare loss and acc cauculation callback
        histories = Histories()

        logs_path = os.getcwd() + "/logs/"
        if not os.path.exists(logs_path):
            logs_path = os.path.expanduser('~/logs/')
        tb_counter = len(
            [log for log in os.listdir(logs_path) if 'yolo' in log]) + 1
        tensorboard = TensorBoard(
            log_dir=logs_path + 'yolo' + '_' + str(tb_counter),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################
        #from IPython.core.debugger import Pdb; Pdb().set_trace()

        history = self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times // self.gpus,
            epochs=nb_epoch * self.gpus,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times // self.gpus,
            callbacks=[early_stop,
                       histories],  #[early_stop, checkpoint, tensorboard], 
            workers=3,
            max_queue_size=8)

        print('Loss: ', history)

        self.orgmodel.save_weights(saved_weights_name)
Exemple #23
0
    'BATCH_SIZE'      : BATCH_SIZE,
    'TRUE_BOX_BUFFER' : 50,
}

def normalize(image):
    return image / 255.

train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
#with open('train_imgs', 'wb') as fp:
#    pickle.dump(train_imgs, fp)

### read saved pickle of parsed annotations
#with open ('train_imgs', 'rb') as fp:
#    train_imgs = pickle.load(fp)
train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)

# valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
# ### write parsed annotations to pickle for fast retrieval next time
# #with open('valid_imgs', 'wb') as fp:
# #    pickle.dump(valid_imgs, fp)

# ### read saved pickle of parsed annotations
# #with open ('valid_imgs', 'rb') as fp:
# #    valid_imgs = pickle.load(fp)
# valid_batch = BatchGenerator(valid_imgs, generator_config, norm=normalize, jitter=False)

# len(train_imgs)
generator= train_batch
generator
Exemple #24
0
# image = batches[0][0][0][0]
# plt.imshow(image.astype('uint8'))

# ** Split the dataset into the training set and the validation set **


def normalize(image):
    return image / 255.


# In[21]:

train_valid_split = int(0.8 * len(all_imgs))

train_batch = BatchGenerator(all_imgs[:train_valid_split], generator_config)
valid_batch = BatchGenerator(all_imgs[train_valid_split:],
                             generator_config,
                             norm=normalize)

# train_batch = train_batch[:1]
# # Construct the network

# In[22]:


# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
    return tf.space_to_depth(x, block_size=2)

Exemple #25
0
def _main_(args):
    config_path = args.conf
    weights_path = args.weights

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    yolo.load_weights(weights_path)

    ###############################
    #   Parse the annotations
    ###############################
    # parse annotations of the validation set, if any, otherwise split the training set
    if os.path.exists(config['valid']['valid_annot_folder']):
        valid_imgs, valid_labels = parse_annotation(
            config['valid']['valid_annot_folder'],
            config['valid']['valid_image_folder'], config['model']['labels'])
    else:
        raise ValueError(
            'Validation folder does not exist or is not specified')

    ############################################
    # Make validation generators
    ############################################
    generator_config = {
        'IMAGE_H': yolo.input_size,
        'IMAGE_W': yolo.input_size,
        'GRID_H': yolo.grid_h,
        'GRID_W': yolo.grid_w,
        'BOX': yolo.nb_box,
        'LABELS': yolo.labels,
        'CLASS': len(yolo.labels),
        'ANCHORS': yolo.anchors,
        'BATCH_SIZE': config['train']['batch_size'],
        'TRUE_BOX_BUFFER': yolo.max_box_per_image,
    }

    generator = BatchGenerator(valid_imgs,
                               generator_config,
                               norm=yolo.feature_extractor.normalize,
                               jitter=False)

    y_true = []
    y_predicted = []
    for i in range(generator.size()):
        raw_image = generator.load_image(i)
        raw_height, raw_width, raw_channels = raw_image.shape

        # make the boxes and the labels
        pred_boxes = yolo.predict(raw_image)

        score = np.array([box.score for box in pred_boxes])
        pred_labels = np.array([box.label for box in pred_boxes])

        if len(pred_boxes) > 0:
            pred_boxes = np.array([[
                box.xmin * raw_width, box.ymin * raw_height,
                box.xmax * raw_width, box.ymax * raw_height, box.score
            ] for box in pred_boxes])
        else:
            pred_boxes = np.array([[]])

        # sort the boxes and the labels according to scores
        score_sort = np.argsort(-score)
        pred_labels = pred_labels[score_sort]
        pred_boxes = pred_boxes[score_sort]

        # store predicted label for the image i.
        # since multiple boxes may be predicted, choose the one with the highest score
        # TODO: find out why there are no predictions at all for certain images
        if pred_labels.any():
            y_predicted.append(pred_labels[0])
        else:
            y_predicted.append(4)

        # load true image annotations
        annotations = generator.load_annotation(i)

        if annotations.shape[0] > 1:
            raise ValueError('Multiple objects exist per image not supported')

        ### store the true label for the image i
        y_true.append(annotations[0, 4])

    print('Processed ' + str(len(y_true)) + 'imgaes')

    print('Confusion Matrix')
    print(confusion_matrix(y_true, y_predicted))
    print('Classification Report')

    # added NoPrediction label to number of classes as yolo model returned null prediction for some images
    target_names = config['model']['labels'] + ['NoPrediction']
    print(classification_report(y_true, y_predicted,
                                target_names=target_names))
    def train(self, train_imgs, valid_imgs,
              train_times, valid_times,
              nb_epoch,
              learning_rate,
              batch_size,
              warmup_epochs,
              object_scale, no_object_scale,
              coord_scale,
              class_scale,
              saved_weights_name='best_weights.h5',
              debug=False):
        ##########################
        # Save the training parameters
        ##########################
        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################
        generator_config = {
            'IMAGE_H'           :self.input_size,
            'IMAGE_W'           :self.input_size,
            'GRID_H'            :self.grid_h,
            'GRID_W'            :self.grid_w,
            'BOX'               :self.nb_box,
            'LABELS'            :self.labels,
            'CLASS'             :len(self.labels),
            'ANCHORS'           :self.anchors,
            'BATCH_SIZE'        :self.batch_size,
            'TRUE_BOX_BUFFER'   :self.max_box_per_image
        }
        train_generator = BatchGenerator(images=train_imgs,
                                         config=generator_config,
                                         norm=self.feature_extractor.normalize,
                                         shuffle=True, jitter=True)
        valid_generator = BatchGenerator(images=valid_imgs,
                                         config=generator_config,
                                         norm=self.feature_extractor.normalize,
                                         shuffle=True, jitter=False)
        self.warmup_batches = warmup_epochs * (train_times * len(train_generator) + valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################
        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=False)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Start the training process
        ############################################
        callbacks = self.create_callbacks(saved_weights_name=saved_weights_name, log_dir='./log_dir/')
        train_history = self.model.fit_generator(generator=train_generator,
                                                 steps_per_epoch=len(train_generator) * train_times,
                                                 epochs=warmup_epochs + nb_epoch,
                                                 validation_data=valid_generator,
                                                 validation_steps=len(valid_generator) * valid_times,
                                                 callbacks=callbacks,
                                                 verbose=2 if debug else 1, workers=3, max_queue_size=8)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(generator=valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))

        return train_history
Exemple #27
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_epochs * (train_times *
                                          (len(train_imgs) / batch_size + 1) +
                                          valid_times *
                                          (len(valid_imgs) / batch_size + 1))

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        if warmup_epochs > 0:
            nb_epoch = warmup_epochs  # if it's warmup stage, don't train more than warmup_epochs

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tb_counter = len([
            log for log in os.listdir(os.path.expanduser('./logs/'))
            if 'yolo' in log
        ]) + 1
        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('./logs/') + 'yolo' + '_' +
            str(tb_counter),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times,
            epochs=nb_epoch,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times,
            callbacks=[early_stop, checkpoint, tensorboard],
            workers=3,
            max_queue_size=8)
Exemple #28
0
    def train(self, train_imgs,     # the list of images to train the model
                    valid_imgs,     # the list of images used to validate the model
                    train_times,    # the number of time to repeat the training set, often used for small datasets
                    valid_times,    # the number of times to repeat the validation set, often used for small datasets
                    nb_epoch,       # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    warmup_bs,      # number of initial batches to let the model familiarize with the new dataset
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    debug):     

        self.batch_size = batch_size
        self.warmup_bs  = warmup_bs 

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size, 
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,  
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
        }    

        train_batch = BatchGenerator(train_imgs, generator_config)
        valid_batch = BatchGenerator(valid_imgs, generator_config, jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss', 
                           min_delta=0.001, 
                           patience=3, 
                           mode='min', 
                           verbose=1)
        checkpoint = ModelCheckpoint('best_weights.h5', 
                                     monitor='val_loss', 
                                     verbose=1, 
                                     save_best_only=True, 
                                     mode='min', 
                                     period=1)
        tensorboard = TensorBoard(log_dir='~/logs/yolo/', 
                                  histogram_freq=0, 
                                  write_graph=True, 
                                  write_images=False)

        ############################################
        # Start the training process
        ############################################        

        self.model.fit_generator(generator        = train_batch.get_generator(), 
                                 steps_per_epoch  = train_batch.get_dateset_size() * train_times, 
                                 epochs           = nb_epoch, 
                                 verbose          = 1,
                                 validation_data  = valid_batch.get_generator(),
                                 validation_steps = valid_batch.get_dateset_size() * valid_times,
                                 callbacks        = [early_stop, checkpoint, tensorboard], 
                                 max_queue_size   = 3)
Exemple #29
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of times to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=1,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     save_weights_only=False,
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('~/logs/'),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=2 if debug else 1,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=[early_stop, checkpoint, tensorboard],
            workers=3,
            max_queue_size=8)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
    def train(self):

        ############################################
        # Make train and validation generators
        ############################################

        objectReader = GroundTruth(self.config)
        objectReader.load_json()

        objectReader.objects_all()

        data = objectReader.objects_all()

        np.random.shuffle(data)

        size = int(len(data) * 0.8)

        train_instances, validation_instances = data[:size], data[size:]

        np.random.shuffle(train_instances)
        np.random.shuffle(validation_instances)

        checkpoint = ModelCheckpoint('weights_coco.h5',
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='auto',
                                     period=1)

        train_generator = BatchGenerator(train_instances,
                                         self.config['generator_config'],
                                         norm=self.normalize)
        valid_generator = BatchGenerator(validation_instances,
                                         self.config['generator_config'],
                                         norm=self.normalize,
                                         jitter=False)

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=self.config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)


        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(generator=train_generator,
                                 steps_per_epoch=len(train_generator),
                                 epochs= self.config['train']['nb_epochs'],
                                 verbose=2 if self.config['train']['debug'] else 1,
                                 validation_data=valid_generator,
                                 validation_steps=len(valid_generator),
                                 workers=3,
                                 callbacks=[checkpoint],
                                 max_queue_size=16)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print('car', '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))