def get_data():
    image_configs = {
    'IMAGE_H': CONFIG_DICT['H'],
    'IMAGE_W': CONFIG_DICT['W'],
    'GRID_H': CONFIG_DICT['GRID_H'],
    'GRID_W': CONFIG_DICT['GRID_W'],
    'BOX': CONFIG_DICT['BOXES'],
    'LABELS': LABELS,
    'CLASS': CONFIG_DICT['NO_CLASSES'],
    'ANCHORS': CONFIG_DICT['ANCHORS'],
    'BATCH_SIZE': CONFIG_DICT['BATCH_SIZE'],
    'TRUE_BOX_BUFFER': CONFIG_DICT['GT_BOX_BUFFER']
}
    train_data = CONFIG_DICT['TRAIN_DATA']
    train_annotations = CONFIG_DICT['TRAIN_ANNOTATIONS']
    val_data = CONFIG_DICT['VAL_DATA']
    val_annotations = CONFIG_DICT['VAL_ANNOTATIONS']

    # train_imgs, seen_train_labels = parse_annotation(train_annotations, train_data, labels=LABELS)
    # Pickle Dump train images
    # with open('train_imgs', 'wb') as fp:
        # pickle.dump(train_imgs, fp)
    with open ('train_imgs', 'rb') as fp:
        train_imgs = pickle.load(fp)
    train_batch = BatchGenerator(train_imgs, image_configs, norm=normalize)
    # val_imgs, seen_valid_labels = parse_annotation(val_annotations, val_data, labels=LABELS)
    # Pickle Dump train images
    # with open('val_imgs', 'wb') as fp:
        # pickle.dump(val_imgs, fp)
    with open ('val_imgs', 'rb') as fp:
        val_imgs = pickle.load(fp)
    val_batch = BatchGenerator(val_imgs, image_configs, norm=normalize, jitter=False)
    return [train_batch, val_batch]
Beispiel #2
0
    def compute_loss(self, train_imgs):
        generator_config = {
            "IMAGE_H": self.input_size,
            "IMAGE_W": self.input_size,
            "GRID_H": self.grid_h,
            "GRID_W": self.grid_w,
            "BOX": self.nb_box,
            "LABELS": self.labels,
            "CLASS": len(self.labels),
            "ANCHORS": self.anchors,
            "BATCH_SIZE": self.batch_size,
            "TRUE_BOX_BUFFER": self.max_box_per_img
        }
        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        [x, b], y = train_generator.__getitem__(0)
        print("The first batch loss is " +
              str(self.model.evaluate([x, b], y, batch_size=self.batch_size)))
        print(self.batch_size)
        sum_l = 0.
        for i in range(len(x)):

            test_x = x[i]
            test_b = b[i]
            test_y = y[i]
            from matplotlib import pyplot as plt

            l=self.model.evaluate([np.expand_dims(test_x,0),np.expand_dims(test_b,0)],\
                                          np.expand_dims(test_y,0),batch_size=1)
            print("The first img loss is " + str(l))
            sum_l += l
        print(sum_l)
        #plt.imshow(test_x)
        #plt.show()
        return [test_x, test_b], test_y
def _main_(args):
    config_path = args.conf
    weights_path = args.weights

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    yolo.load_weights(weights_path)

    valid_imgs, valid_labels = parse_annotation(
        config['valid']['valid_annot_folder'],
        config['valid']['valid_image_folder'], config['model']['labels'])
    yolo.batch_size = config['train']['batch_size']
    yolo.sequence_length = 1
    generator_config = {
        'IMAGE_H': yolo.input_size,
        'IMAGE_W': yolo.input_size,
        'GRID_H': yolo.grid_h,
        'GRID_W': yolo.grid_w,
        'BOX': yolo.nb_box,
        'LABELS': yolo.labels,
        'CLASS': len(yolo.labels),
        'ANCHORS': yolo.anchors,
        'BATCH_SIZE': yolo.batch_size,
        'TRUE_BOX_BUFFER': yolo.max_box_per_image,
        'SEQUENCE_LENGTH': yolo.sequence_length
    }
    valid_generator = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=yolo.feature_extractor.normalize,
                                     jitter=False)
    ave_precisions = yolo.evaluate(valid_generator,
                                   iou_threshold=0.3,
                                   score_threshold=0.2)
    print("ave precisions: ", ave_precisions)
    print('mAP: {:.4f}'.format(
        sum(ave_precisions.values()) / len(ave_precisions)))
Beispiel #4
0
    def get_generator_from_data(self, validation_data):
        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        valid_generator = BatchGenerator(validation_data,
                                         generator_config,
                                         norm=gen_norm,
                                         jitter=False)

        return valid_generator
        """
Beispiel #5
0
    def train(self, train_imgs, valid_imgs, train_times, valid_times,
              object_scale, no_object_scale, coord_scale, class_scale,
              nb_epochs, learning_rate, batch_size, warmup_epochs, multi_gpu,
              saved_weights_dir, save_every_n_epoch, debug):
        self.batch_size = batch_size
        self.multi_gpu = multi_gpu
        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale
        self.debug = debug

        if self.multi_gpu:
            self.model = multi_gpu_model(self.model, gpus=2)
        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss,
                           optimizer=optimizer,
                           metrics=['accuracy', self.recall_metric])

        ############################################
        # Make a few callbacks
        ############################################

        monitor_metric, metric_mode = 'val_recall', 'max'

        early_stop = EarlyStopping(monitor=monitor_metric,
                                   min_delta=0.0001,
                                   patience=20,
                                   mode=metric_mode,
                                   verbose=1)

        reduce_lrt = ReduceLROnPlateau(monitor=monitor_metric,
                                       verbose=1,
                                       patience=5,
                                       mode=metric_mode,
                                       min_lr=1e-07,
                                       factor=0.8)

        CheckpointModel = ModelCheckpointDetached if self.multi_gpu else ModelCheckpoint

        if not os.path.exists(saved_weights_dir):
            os.mkdir(saved_weights_dir)
        checkpoint = CheckpointModel(os.path.join(
            saved_weights_dir, 'weights.{epoch:02d}-{val_loss:.2f}.h5'),
                                     monitor=monitor_metric,
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode=metric_mode,
                                     period=save_every_n_epoch)

        backend_name = self.backend.replace(' ', '_').lower()
        tb_counter = len([
            log for log in os.listdir(os.path.expanduser('~/logs/'))
            if backend_name in log
        ]) + 1
        tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/') +
                                  backend_name + '_' + str(tb_counter),
                                  histogram_freq=0,
                                  batch_size=self.batch_size,
                                  write_graph=True,
                                  write_images=True)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=1 if debug else 2,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=[
                early_stop, reduce_lrt,
                TerminateOnNaN(), checkpoint, tensorboard
            ],
            workers=8,
            initial_epoch=0,
            use_multiprocessing=False,
            max_queue_size=8)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
Beispiel #6
0
# define paths for training
valid_image_folder = 'data/Validation_Images/'
valid_annot_folder = 'data/Validation_Annotations/'


# define normalize for images
def normalize(image):
    return image / 255.


# define the image and label datasets for training
valid_imgs, valid_labels = parse_annotation(valid_annot_folder,
                                            valid_image_folder,
                                            labels=LABELS)
valid_generator = BatchGenerator(valid_imgs,
                                 generator_config,
                                 norm=normalize,
                                 jitter=False)

# load new model from training
model = load_model('new_model_6.h5',
                   custom_objects={
                       'tf': tf,
                       'custom_loss': custom_loss
                   })

average_precisions = evaluate(model, valid_generator)

# print evaluation
print('mAP: {:.4f}'.format(
    sum(average_precisions.values()) / len(average_precisions)))
Beispiel #7
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            logdir,
            saving_freq,
            saved_weights_name='weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_epochs * (train_times *
                                          (len(train_imgs) / batch_size + 1) +
                                          valid_times *
                                          (len(valid_imgs) / batch_size + 1))

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        if warmup_epochs > 0:
            nb_epoch = warmup_epochs  # if it's warmup stage, don't train more than warmup_epochs

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss,
                           metrics=['accuracy', self.custom_loss],
                           optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size[1],
            'IMAGE_W': self.input_size[0],
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        #Добавим информацию для графиков

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(
            monitor='val_loss',
            min_delta=0.00005,  #0.0008
            patience=5,
            mode='min',
            verbose=1)

        checkpoint_best = ModelCheckpoint(logdir + saved_weights_name +
                                          '.hdf5',
                                          monitor='val_loss',
                                          verbose=1,
                                          save_best_only=True,
                                          save_weights_only=True,
                                          mode='min',
                                          period=1)

        tensorboard = TensorBoard(log_dir=os.path.expanduser(logdir),
                                  histogram_freq=0,
                                  write_graph=False,
                                  write_grads=False,
                                  write_images=False)

        # моё добавление https://keras.io/callbacks/#modelcheckpoint
        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.5,
                                      patience=3,
                                      min_lr=1e-5,
                                      verbose=1)

        time_callback = TimeCallback(logdir=logdir)

        csv_logger = CSVLogger('log.csv', append=True, separator=';')

        my_callbacks = [
            checkpoint_best, tensorboard, reduce_lr, time_callback, csv_logger
        ]
        if saving_freq > 0:
            checkpoint_period = ModelCheckpoint(logdir +
                                                'weights{epoch:03d}.hdf5',
                                                monitor='val_loss',
                                                verbose=1,
                                                save_best_only=False,
                                                save_weights_only=True,
                                                mode='min',
                                                period=saving_freq)
            my_callbacks.append(checkpoint_period)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times,
            epochs=nb_epoch,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times,
            callbacks=my_callbacks,
            workers=3,
            max_queue_size=8)
Beispiel #8
0
    all_imgs = []
    for i in range(0, len(LABELS)):
        image_path = '/Volumes/JS/UECFOOD100_JS/' + str(i+1) + '/'
        annot_path = '/Volumes/JS/UECFOOD100_JS/' + str(i+1) + '/' + '/annotations_new/'

        folder_imgs, seen_labels = parse_annotation(annot_path, image_path)
        all_imgs.extend(folder_imgs)
    print(np.array(all_imgs).shape)

    # add extensions to image name
    for img in all_imgs:
        img['filename'] = img['filename']

    print('=> Generate BatchGenerator.')
    batches = BatchGenerator(all_imgs, generator_config)

    img = batches[0][0][0][5]
    plt.imshow(img.astype('uint8'))
    # plt_example_batch(batches, BATCH_SIZE)

    ''' Start training '''
    train_valid_split = int(0.8 * len(all_imgs))

    train_batch = BatchGenerator(all_imgs[:train_valid_split], generator_config, norm=normalize, jitter=False)
    valid_batch = BatchGenerator(all_imgs[train_valid_split:], generator_config, norm=normalize, jitter=False)

    input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))
    true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER, 4))

    model = get_pretrained_mn1()
def _main_(args):
    config_path = args.conf
    weights_path = args.weights

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    # parse annotations of the validation set, if any, otherwise raise an error
    if os.path.exists(config['valid']['valid_annot_folder']):
        valid_imgs, valid_labels = parse_annotation(
            config['valid']['valid_annot_folder'],
            config['valid']['valid_image_folder'], config['model']['labels'])
    else:
        print('Folder ' + config['valid']['valid_annot_folder'] +
              'does not exist')

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    yolo.load_weights(weights_path)

    ############################################
    # Make validation generators
    ############################################
    generator_config = {
        'IMAGE_H': yolo.input_size,
        'IMAGE_W': yolo.input_size,
        'GRID_H': yolo.grid_h,
        'GRID_W': yolo.grid_w,
        'BOX': yolo.nb_box,
        'LABELS': yolo.labels,
        'CLASS': len(yolo.labels),
        'ANCHORS': yolo.anchors,
        'BATCH_SIZE': config['train']['batch_size'],
        'TRUE_BOX_BUFFER': yolo.max_box_per_image,
    }

    valid_generator = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=yolo.feature_extractor.normalize,
                                     jitter=False)

    ############################################
    # Compute mAP on the validation set
    ############################################
    average_precisions = yolo.evaluate(valid_generator)

    # print evaluation
    for label, average_precision in average_precisions.items():
        print(yolo.labels[label], '{:.4f}'.format(average_precision))
    print('mAP: {:.4f}'.format(
        sum(average_precisions.values()) / len(average_precisions)))
Beispiel #10
0
    'BATCH_SIZE'      : BATCH_SIZE,
    'TRUE_BOX_BUFFER' : 50,
}

def normalize(image):
    return image / 255.

train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
#with open('train_imgs', 'wb') as fp:
#    pickle.dump(train_imgs, fp)

### read saved pickle of parsed annotations
#with open ('train_imgs', 'rb') as fp:
#    train_imgs = pickle.load(fp)
train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)

# valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
# ### write parsed annotations to pickle for fast retrieval next time
# #with open('valid_imgs', 'wb') as fp:
# #    pickle.dump(valid_imgs, fp)

# ### read saved pickle of parsed annotations
# #with open ('valid_imgs', 'rb') as fp:
# #    valid_imgs = pickle.load(fp)
# valid_batch = BatchGenerator(valid_imgs, generator_config, norm=normalize, jitter=False)

# len(train_imgs)
generator= train_batch
generator
Beispiel #11
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            save_model=False,
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=10,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('~/logs/'),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=2 if debug else 1,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=[early_stop, checkpoint, tensorboard],
            workers=3,
            max_queue_size=8)

        ############################################
        # Compute mAP on the validation set
        ############################################

        #self.load_weights(saved_weights_name)

        average_precisions, average_speed = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
        print('speed: {:.4f}'.format(average_speed))

        self.model.save_weights(saved_weights_name)

        if save_model:
            config = self.model.to_json()
            cfg_file = 'yolo_best_model.cfg'
            with open(cfg_file, 'wb') as f:
                pickle.dump(config, f)
            #docking compatible weight file
            weights = self.model.get_weights()
            weight_file = 'yolo_best_weights.hd5'
            with open(weight_file, 'wb') as f:
                pickle.dump(weights, f)
Beispiel #12
0
# image = batches[0][0][0][0]
# plt.imshow(image.astype('uint8'))

# ** Split the dataset into the training set and the validation set **


def normalize(image):
    return image / 255.


# In[21]:

train_valid_split = int(0.8 * len(all_imgs))

train_batch = BatchGenerator(all_imgs[:train_valid_split], generator_config)
valid_batch = BatchGenerator(all_imgs[train_valid_split:],
                             generator_config,
                             norm=normalize)

# train_batch = train_batch[:1]
# # Construct the network

# In[22]:


# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
    return tf.space_to_depth(x, block_size=2)

Beispiel #13
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            evts_per_file,  # the number of events in each file
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='',
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_C': self.input_shape[0],
            'IMAGE_H': self.input_shape[1],
            'IMAGE_W': self.input_shape[2],
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         evts_per_file,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         evts_per_file,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        # self.model.summary()

        # self.model.compile(loss='mean_squared_error', optimizer=optimizer)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint('weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tensorboard = TensorBoard(
            log_dir='./logs',
            histogram_freq=0,
            # write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=2 if debug else 1,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=[early_stop, checkpoint, tensorboard],
            workers=1,
            max_queue_size=5)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            logger.info(self.labels[label], '{:.4f}'.format(average_precision))
        logger.info('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
Beispiel #14
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_bs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_bs

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss,
                           optimizer=optimizer,
                           metrics=['accuracy'])

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        """
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss', 
                                     verbose=1,
                                     save_best_only=True, 
                                     mode='min', 
                                     period=1)
        """
        # prepare loss and acc cauculation callback
        histories = Histories()

        logs_path = os.getcwd() + "/logs/"
        if not os.path.exists(logs_path):
            logs_path = os.path.expanduser('~/logs/')
        tb_counter = len(
            [log for log in os.listdir(logs_path) if 'yolo' in log]) + 1
        tensorboard = TensorBoard(
            log_dir=logs_path + 'yolo' + '_' + str(tb_counter),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################
        #from IPython.core.debugger import Pdb; Pdb().set_trace()

        history = self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times // self.gpus,
            epochs=nb_epoch * self.gpus,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times // self.gpus,
            callbacks=[early_stop,
                       histories],  #[early_stop, checkpoint, tensorboard], 
            workers=3,
            max_queue_size=8)

        print('Loss: ', history)

        self.orgmodel.save_weights(saved_weights_name)
Beispiel #15
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('~/logs_2018_04_30/'),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        run_meta = tf.RunMetadata()

        with tf.contrib.tfprof.ProfileContext(
                '/home/ubuntu/keras_tf/tain_log') as pctx:

            # High level API, such as slim, Estimator, etc.

            self.model.fit_generator(
                generator=train_generator,
                steps_per_epoch=len(train_generator) * train_times,
                epochs=warmup_epochs + nb_epochs,
                verbose=2 if debug else 1,
                validation_data=valid_generator,
                validation_steps=len(valid_generator) * valid_times,
                callbacks=[early_stop, checkpoint, tensorboard],
                workers=3,
                max_queue_size=8)

            #Profiling ...
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            flops = tf.profiler.profile(run_meta=run_meta,
                                        cmd='scope',
                                        options=opts)

            opts = tf.profiler.ProfileOptionBuilder.trainable_variables_parameter(
            )
            params = tf.profiler.profile(run_meta=run_meta,
                                         cmd='scope',
                                         options=opts)

        print("{:,} --- {:,}".format(flops.total_float_ops,
                                     params.total_parameters))

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        evlog = "eval_" + ".log"

        with open(evlog, 'w') as ef:

            for label, average_precision in average_precisions.items():
                print(self.labels[label],
                      '{:.4f}'.format(average_precision),
                      file=ef)
            print('mAP: {:.4f}'.format(
                sum(average_precisions.values()) / len(average_precisions)),
                  file=ef)
Beispiel #16
0
    all_imgs = all_imgs[:10]
train_valid_split = int(0.8 * len(all_imgs))
#train-sup,un , val, eval
train = all_imgs[:train_valid_split]
not_train = all_imgs[train_valid_split:]

sup = train[:int(SUP * len(train))]
unsup = train[int(SUP * len(train)):]
val = not_train[:int(0.5 * len(not_train))]
eval = not_train[int(0.5 * len(not_train)):]

if len(eval) == 0:
    raise ()

#todo: normalize?
train_batch = BatchGenerator(sup, generator_config, norm=normalize)
valid_batch = BatchGenerator(val,
                             generator_config,
                             norm=normalize,
                             jitter=False)
ae_train_batch = ToharGenerator(
    train, generator_config, norm=normalize, jitter=False
)  #AE trained on all of the train set (supervised and unsupervised)
ae_valid_batch = ToharGenerator(val,
                                generator_config,
                                norm=normalize,
                                jitter=False)

eval_batch = BatchGenerator(eval,
                            generator_config,
                            norm=normalize,
Beispiel #17
0
    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    yolo.load_weights(config['train']['pretrained_weights'])

    generator_config = {
        'IMAGE_H': config['model']['input_size'],
        'IMAGE_W': config['model']['input_size'],
        'GRID_H': config['model']['input_size'] // 32,
        'GRID_W': config['model']['input_size'] // 32,
        'BOX': len(config['model']['anchors']),
        'LABELS': config['model']['labels'],
        'CLASS': len(config['model']['labels']),
        'ANCHORS': config['model']['anchors'],
        'BATCH_SIZE': 1,
        'TRUE_BOX_BUFFER': config['model']['max_box_per_image'],
    }
    val_generator = BatchGenerator(
        valid_imgs,
        generator_config,
        norm=normalize,
        flipflop=False,
        shoechanger=False,
        zeropad=False,
    )
    yolo.evaluate(val_generator)
Beispiel #18
0
def makemodel():
    # Layer 1
    x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
    x = BatchNormalization(name='norm_1')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 2
    x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x)
    x = BatchNormalization(name='norm_2')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 3
    x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x)
    x = BatchNormalization(name='norm_3')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 4
    x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x)
    x = BatchNormalization(name='norm_4')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 5
    x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x)
    x = BatchNormalization(name='norm_5')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 6
    x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)
    x = BatchNormalization(name='norm_6')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 7
    x = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x)
    x = BatchNormalization(name='norm_7')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 8
    x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x)
    x = BatchNormalization(name='norm_8')(x)
    x = LeakyReLU(alpha=0.1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 9
    x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x)
    x = BatchNormalization(name='norm_9')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 10
    x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x)
    x = BatchNormalization(name='norm_10')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 11
    x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x)
    x = BatchNormalization(name='norm_11')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 12
    x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x)
    x = BatchNormalization(name='norm_12')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 13
    x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x)
    x = BatchNormalization(name='norm_13')(x)
    x = LeakyReLU(alpha=0.1)(x)

    skip_connection = x

    x = MaxPooling2D(pool_size=(2, 2))(x)

    # Layer 14
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x)
    x = BatchNormalization(name='norm_14')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 15
    x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x)
    x = BatchNormalization(name='norm_15')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 16
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x)
    x = BatchNormalization(name='norm_16')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 17
    x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x)
    x = BatchNormalization(name='norm_17')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 18
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x)
    x = BatchNormalization(name='norm_18')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 19
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x)
    x = BatchNormalization(name='norm_19')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 20
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x)
    x = BatchNormalization(name='norm_20')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 21
    skip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection)
    skip_connection = BatchNormalization(name='norm_21')(skip_connection)
    skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
    skip_connection = Lambda(space_to_depth_x2)(skip_connection)

    x = concatenate([skip_connection, x])

    # Layer 22
    x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x)
    x = BatchNormalization(name='norm_22')(x)
    x = LeakyReLU(alpha=0.1)(x)

    # Layer 23
    x = Conv2D(BOX * (4 + 1 + CLASS), (1,1), strides=(1,1), padding='same', name='conv_23')(x)
    output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)

    # small hack to allow true_boxes to be registered when Keras build the model 
    # for more information: https://github.com/fchollet/keras/issues/2790
    output = Lambda(lambda args: args[0])([output, true_boxes])

    model = Model([input_image, true_boxes], output)


    train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
    ### write parsed annotations to pickle for fast retrieval next time
    #with open('train_imgs', 'wb') as fp:
    #    pickle.dump(train_imgs, fp)

    ### read saved pickle of parsed annotations
    #with open ('train_imgs', 'rb') as fp:
    #    train_imgs = pickle.load(fp)
    train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)
    
    valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
    ### write parsed annotations to pickle for fast retrieval next time
    #with open('valid_imgs', 'wb') as fp:
    #    pickle.dump(valid_imgs, fp)

    ### read saved pickle of parsed annotations
    #with open ('valid_imgs', 'rb') as fp:
    #    valid_imgs = pickle.load(fp)

    optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    #optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9)
    #optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)

    model.compile(loss=custom_loss, optimizer=optimizer)

    return model
    'CLASS': len(LABELS),
    'ANCHORS': ANCHORS,
    'BATCH_SIZE': 2,
    'TRUE_BOX_BUFFER': 50,
}

#Path for training data and annotations
image_path = './dataset-master/train/'
annot_path = './dataset-master/microanno/'
#print(os.listdir(annot_path))
#train_imgs, seen_labels = parse_annotation_new(annot_path, image_path, LABELS)
train_imgs, seen_labels = parse_annotation_txt(annot_path, image_path, LABELS)

train_img = train_imgs[0:126]
valid_img = train_imgs[126:]
train_batch = BatchGenerator(train_img, generator_config, jitter=False)
valid_batch = BatchGenerator(valid_img, generator_config, jitter=False)

# **Setup a few callbacks and start the training**

early_stop = EarlyStopping(monitor='loss',
                           min_delta=0.001,
                           patience=10,
                           mode='min',
                           verbose=1)

checkpoint = ModelCheckpoint('weights_yolo_adam_validdata_temp.h5',
                             monitor='loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min',
    def train(self, train_imgs, valid_imgs,
              train_times, valid_times,
              nb_epoch,
              learning_rate,
              batch_size,
              warmup_epochs,
              object_scale, no_object_scale,
              coord_scale,
              class_scale,
              saved_weights_name='best_weights.h5',
              debug=False):
        ##########################
        # Save the training parameters
        ##########################
        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################
        generator_config = {
            'IMAGE_H'           :self.input_size,
            'IMAGE_W'           :self.input_size,
            'GRID_H'            :self.grid_h,
            'GRID_W'            :self.grid_w,
            'BOX'               :self.nb_box,
            'LABELS'            :self.labels,
            'CLASS'             :len(self.labels),
            'ANCHORS'           :self.anchors,
            'BATCH_SIZE'        :self.batch_size,
            'TRUE_BOX_BUFFER'   :self.max_box_per_image
        }
        train_generator = BatchGenerator(images=train_imgs,
                                         config=generator_config,
                                         norm=self.feature_extractor.normalize,
                                         shuffle=True, jitter=True)
        valid_generator = BatchGenerator(images=valid_imgs,
                                         config=generator_config,
                                         norm=self.feature_extractor.normalize,
                                         shuffle=True, jitter=False)
        self.warmup_batches = warmup_epochs * (train_times * len(train_generator) + valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################
        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, amsgrad=False)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Start the training process
        ############################################
        callbacks = self.create_callbacks(saved_weights_name=saved_weights_name, log_dir='./log_dir/')
        train_history = self.model.fit_generator(generator=train_generator,
                                                 steps_per_epoch=len(train_generator) * train_times,
                                                 epochs=warmup_epochs + nb_epoch,
                                                 validation_data=valid_generator,
                                                 validation_steps=len(valid_generator) * valid_times,
                                                 callbacks=callbacks,
                                                 verbose=2 if debug else 1, workers=3, max_queue_size=8)

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(generator=valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))

        return train_history
Beispiel #21
0
def _main_(args):
    config_path = args.conf
    weights_path = args.weights

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    ###############################
    #   Make the model
    ###############################

    yolo = YOLO(backend=config['model']['backend'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    ###############################
    #   Load trained weights
    ###############################

    yolo.load_weights(weights_path)

    ###############################
    #   Parse the annotations
    ###############################
    # parse annotations of the validation set, if any, otherwise split the training set
    if os.path.exists(config['valid']['valid_annot_folder']):
        valid_imgs, valid_labels = parse_annotation(
            config['valid']['valid_annot_folder'],
            config['valid']['valid_image_folder'], config['model']['labels'])
    else:
        raise ValueError(
            'Validation folder does not exist or is not specified')

    ############################################
    # Make validation generators
    ############################################
    generator_config = {
        'IMAGE_H': yolo.input_size,
        'IMAGE_W': yolo.input_size,
        'GRID_H': yolo.grid_h,
        'GRID_W': yolo.grid_w,
        'BOX': yolo.nb_box,
        'LABELS': yolo.labels,
        'CLASS': len(yolo.labels),
        'ANCHORS': yolo.anchors,
        'BATCH_SIZE': config['train']['batch_size'],
        'TRUE_BOX_BUFFER': yolo.max_box_per_image,
    }

    generator = BatchGenerator(valid_imgs,
                               generator_config,
                               norm=yolo.feature_extractor.normalize,
                               jitter=False)

    y_true = []
    y_predicted = []
    for i in range(generator.size()):
        raw_image = generator.load_image(i)
        raw_height, raw_width, raw_channels = raw_image.shape

        # make the boxes and the labels
        pred_boxes = yolo.predict(raw_image)

        score = np.array([box.score for box in pred_boxes])
        pred_labels = np.array([box.label for box in pred_boxes])

        if len(pred_boxes) > 0:
            pred_boxes = np.array([[
                box.xmin * raw_width, box.ymin * raw_height,
                box.xmax * raw_width, box.ymax * raw_height, box.score
            ] for box in pred_boxes])
        else:
            pred_boxes = np.array([[]])

        # sort the boxes and the labels according to scores
        score_sort = np.argsort(-score)
        pred_labels = pred_labels[score_sort]
        pred_boxes = pred_boxes[score_sort]

        # store predicted label for the image i.
        # since multiple boxes may be predicted, choose the one with the highest score
        # TODO: find out why there are no predictions at all for certain images
        if pred_labels.any():
            y_predicted.append(pred_labels[0])
        else:
            y_predicted.append(4)

        # load true image annotations
        annotations = generator.load_annotation(i)

        if annotations.shape[0] > 1:
            raise ValueError('Multiple objects exist per image not supported')

        ### store the true label for the image i
        y_true.append(annotations[0, 4])

    print('Processed ' + str(len(y_true)) + 'imgaes')

    print('Confusion Matrix')
    print(confusion_matrix(y_true, y_predicted))
    print('Classification Report')

    # added NoPrediction label to number of classes as yolo model returned null prediction for some images
    target_names = config['model']['labels'] + ['NoPrediction']
    print(classification_report(y_true, y_predicted,
                                target_names=target_names))
Beispiel #22
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epoch,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size
        self.warmup_bs = warmup_epochs * (train_times *
                                          (len(train_imgs) / batch_size + 1) +
                                          valid_times *
                                          (len(valid_imgs) / batch_size + 1))

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        if warmup_epochs > 0:
            nb_epoch = warmup_epochs  # if it's warmup stage, don't train more than warmup_epochs

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)
        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)
        tb_counter = len([
            log for log in os.listdir(os.path.expanduser('./logs/'))
            if 'yolo' in log
        ]) + 1
        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('./logs/') + 'yolo' + '_' +
            str(tb_counter),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_batch,
            steps_per_epoch=len(train_batch) * train_times,
            epochs=nb_epoch,
            verbose=1,
            validation_data=valid_batch,
            validation_steps=len(valid_batch) * valid_times,
            callbacks=[early_stop, checkpoint, tensorboard],
            workers=3,
            max_queue_size=8)
Beispiel #23
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            train_times,  # the number of time to repeat the training set, often used for small datasets
            valid_times,  # the number of times to repeat the validation set, often used for small datasets
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False,
            workers=3,
            max_queue_size=8,
            early_stop=True,
            custom_callback=[],
            tb_logdir="./"):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size[0],
            'IMAGE_W': self.input_size[1],
            'IMAGE_C': self.input_size[2],
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        self.warmup_batches = warmup_epochs * (
            train_times * len(train_generator) +
            valid_times * len(valid_generator))

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop_cb = EarlyStopping(monitor='val_loss',
                                      min_delta=0.001,
                                      patience=3,
                                      mode='min',
                                      verbose=1)
        checkpoint_cb = ModelCheckpoint(saved_weights_name,
                                        monitor='val_loss',
                                        verbose=1,
                                        save_best_only=True,
                                        mode='min',
                                        period=1)
        tensorboard_cb = TensorBoard(
            log_dir=tb_logdir,
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        root, ext = os.path.splitext(saved_weights_name)
        map_evaluator_cb = self.MAP_evaluation(self,
                                               valid_generator,
                                               save_best=True,
                                               save_name=root + "_bestMap" +
                                               ext,
                                               tensorboard=tensorboard_cb)

        if not isinstance(custom_callback, list):
            custom_callback = [custom_callback]
        callbacks = [checkpoint_cb, tensorboard_cb, map_evaluator_cb
                     ] + custom_callback
        if early_stop: callbacks.append(early_stop_cb)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator) * train_times,
            epochs=warmup_epochs + nb_epochs,
            verbose=2 if debug else 1,
            validation_data=valid_generator,
            validation_steps=len(valid_generator) * valid_times,
            callbacks=callbacks,
            workers=workers,
            max_queue_size=max_queue_size)
Beispiel #24
0
    'ANCHORS': ANCHORS,
    'BATCH_SIZE': BATCH_SIZE,
    'TRUE_BOX_BUFFER': 50,
}

train_imgs, seen_train_labels = parse_annotation(train_annot_folder,
                                                 train_image_folder,
                                                 labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
#with open('train_imgs', 'wb') as fp:
#    pickle.dump(train_imgs, fp)

### read saved pickle of parsed annotations
#with open ('train_imgs', 'rb') as fp:
#    train_imgs = pickle.load(fp)
train_batch = BatchGenerator(train_imgs, generator_config)

valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder,
                                                 valid_image_folder,
                                                 labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
#with open('valid_imgs', 'wb') as fp:
#    pickle.dump(valid_imgs, fp)

### read saved pickle of parsed annotations
#with open ('valid_imgs', 'rb') as fp:
#    valid_imgs = pickle.load(fp)
valid_batch = BatchGenerator(valid_imgs, generator_config, jitter=False)

early_stop = EarlyStopping(monitor='val_loss',
                           min_delta=0.001,
Beispiel #25
0
    def train(self, train_imgs,     # the list of images to train the model
                    valid_imgs,     # the list of images used to validate the model
                    train_times,    # the number of time to repeat the training set, often used for small datasets
                    valid_times,    # the number of times to repeat the validation set, often used for small datasets
                    nb_epoch,       # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    warmup_bs,      # number of initial batches to let the model familiarize with the new dataset
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    saved_weights_name='best_weights.h5',
                    debug=False):

        self.batch_size = batch_size
        self.warmup_bs  = warmup_bs

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size,
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
        }

        # batch generater
        train_batch = BatchGenerator(train_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize)
        valid_batch = BatchGenerator(valid_imgs,
                                     generator_config,
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)

        # early stopping
        early_stop = EarlyStopping(
                monitor='val_loss',
                min_delta=0.001,
                patience=2,
                mode='min',
                verbose=1)
        # checkpoint
        saved_weights_name = "./models/" + saved_weights_name.replace(" ", "")
        checkpoint = ModelCheckpoint(
                saved_weights_name,
                monitor='val_loss',
                verbose=1,
                save_best_only=True,
                mode='min',
                period=1)

        # TensorBoard counter
        dir_name = "yolo_" + self.architecture
        tb_counter  = len([log for log in os.listdir("./logs") if dir_name in log]) + 1
        log_dir = "./logs/" + dir_name + "_" + str(tb_counter)
        # TensorBoard dir
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        # TensorBoard
        tensorboard = TensorBoard(log_dir=log_dir,
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=False)


        # model fit
        self.model.fit_generator(generator        = train_batch,
                                 steps_per_epoch  = len(train_batch) * train_times,
                                 epochs           = nb_epoch,
                                 verbose          = 1,
                                 validation_data  = valid_batch,
                                 validation_steps = len(valid_batch) * valid_times,
                                 callbacks        = [early_stop, checkpoint, tensorboard],
                                 workers          = 2,
                                 max_queue_size   = 8)
Beispiel #26
0
    def train(self, train_imgs,     # the list of images to train the model
                    valid_imgs,     # the list of images used to validate the model
                    train_times,    # the number of time to repeat the training set, often used for small datasets
                    valid_times,    # the number of times to repeat the validation set, often used for small datasets
                    nb_epoch,       # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    warmup_bs,      # number of initial batches to let the model familiarize with the new dataset
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    debug):     

        self.batch_size = batch_size
        self.warmup_bs  = warmup_bs 

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size, 
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,  
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
        }    

        train_batch = BatchGenerator(train_imgs, generator_config)
        valid_batch = BatchGenerator(valid_imgs, generator_config, jitter=False)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss', 
                           min_delta=0.001, 
                           patience=3, 
                           mode='min', 
                           verbose=1)
        checkpoint = ModelCheckpoint('best_weights.h5', 
                                     monitor='val_loss', 
                                     verbose=1, 
                                     save_best_only=True, 
                                     mode='min', 
                                     period=1)
        tensorboard = TensorBoard(log_dir='~/logs/yolo/', 
                                  histogram_freq=0, 
                                  write_graph=True, 
                                  write_images=False)

        ############################################
        # Start the training process
        ############################################        

        self.model.fit_generator(generator        = train_batch.get_generator(), 
                                 steps_per_epoch  = train_batch.get_dateset_size() * train_times, 
                                 epochs           = nb_epoch, 
                                 verbose          = 1,
                                 validation_data  = valid_batch.get_generator(),
                                 validation_steps = valid_batch.get_dateset_size() * valid_times,
                                 callbacks        = [early_stop, checkpoint, tensorboard], 
                                 max_queue_size   = 3)
Beispiel #27
0
def normalize(image):
    return image / 255.


train_imgs, seen_train_labels = parse_annotation(train_annot_folder,
                                                 train_image_folder,
                                                 labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
with open('train_imgs', 'wb') as fp:
    pickle.dump(train_imgs, fp)

### read saved pickle of parsed annotations
with open('train_imgs', 'rb') as fp:
    train_imgs = pickle.load(fp)
train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize)

valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder,
                                                 valid_image_folder,
                                                 labels=LABELS)
### write parsed annotations to pickle for fast retrieval next time
with open('valid_imgs', 'wb') as fp:
    pickle.dump(valid_imgs, fp)

### read saved pickle of parsed annotations
with open('valid_imgs', 'rb') as fp:
    valid_imgs = pickle.load(fp)
valid_batch = BatchGenerator(valid_imgs,
                             generator_config,
                             norm=normalize,
                             jitter=False)
Beispiel #28
0
    def train(self, train_imgs, valid_imgs,
                    train_times,    # the number of time to repeat the training set, often used for small datasets
                    valid_times,    # the number of times to repeat the validation set, often used for small datasets
                    nb_epochs,      # number of epoches
                    learning_rate,  # the learning rate
                    batch_size,     # the size of the batch
                    warmup_epochs,  # number of initial batches to let the model familiarize with the new dataset
                    object_scale,
                    no_object_scale,
                    coord_scale,
                    class_scale,
                    full_log_dir,
                    early_stop_patience,
                    early_stop_min_delta,
                    learning_rate_decay_factor,
                    learning_rate_decay_patience,
                    learning_rate_decay_min_lr,
                    saved_weights_name='best_weights.h5',
                    debug=False,
                    sequence_length=10):

        self.batch_size = batch_size
        self.sequence_length = sequence_length

        self.object_scale    = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale     = coord_scale
        self.class_scale     = class_scale

        self.debug = debug

        self.full_log_dir = full_log_dir
        self.early_stop_patience = early_stop_patience
        self.early_stop_min_delta = early_stop_min_delta
        self.learning_rate_decay_factor = learning_rate_decay_factor
        self.learning_rate_decay_patience = learning_rate_decay_patience
        self.learning_rate_decay_min_lr = learning_rate_decay_min_lr


        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H'         : self.input_size, 
            'IMAGE_W'         : self.input_size,
            'GRID_H'          : self.grid_h,  
            'GRID_W'          : self.grid_w,
            'BOX'             : self.nb_box,
            'LABELS'          : self.labels,
            'CLASS'           : len(self.labels),
            'ANCHORS'         : self.anchors,
            'BATCH_SIZE'      : self.batch_size,
            'TRUE_BOX_BUFFER' : self.max_box_per_image,
            'SEQUENCE_LENGTH' : self.sequence_length
        }    

        train_generator = BatchGenerator(train_imgs, 
                                     generator_config, 
                                     norm=self.feature_extractor.normalize,
                                     debug=self.debug)
        valid_generator = BatchGenerator(valid_imgs, 
                                     generator_config, 
                                     norm=self.feature_extractor.normalize,
                                     jitter=False)   

        self.warmup_batches  = warmup_epochs * (train_times*len(train_generator) + valid_times*len(valid_generator)) / 4 
        print("Using %d warmup batches" % self.warmup_batches)


        ############################################
        # Define your callbacks
        ############################################

        #HS HSS With a patience of 100 you finish in 200 epochs so I changed it to 400
        early_stop = EarlyStopping(monitor='val_loss', 
                           min_delta=self.early_stop_min_delta, 
                           patience=self.early_stop_patience , 
                           verbose=1)

        #This didnt work with multi gpu
        checkpoint = ModelCheckpoint('{name}_{{epoch:02d}}.h5'.format(name=saved_weights_name), 
                                     monitor='val_loss', 
                                     verbose=0, 
                                     save_best_only=True, 
                                     mode='min', 
                                     period=1)

        # define by Anuar because above didn't work with multi GPU
        checkpoint_multi = MultiGPUCheckpoint(
                        '{name}_{{epoch:02d}}_multi.h5'.format(name=saved_weights_name),
                        verbose=1,
                        save_best_only=True,
                        mode='min',
                        period=1)


        #defined by hs for best val
        checkpoint_multi_hs = MultiGPUCheckpoint(
                        '{name}_{{epoch:02d}}_hsBb_valLoss-{{val_loss:.2f}}.h5'.format(name=saved_weights_name),
                        verbose=1,
                        save_best_only=True,)


        #defined by HS
        # HS HSS originally i used monitor='val_loss', factor=0.5, patience=20, min_lr=1e-6
        reduce_lr_hs = ReduceLROnPlateau(monitor='val_loss',
                                         factor=self.learning_rate_decay_factor,
                                         patience=self.learning_rate_decay_patience,
                                         min_lr=self.learning_rate_decay_min_lr)

        # written by Anuar
        evaluate_callback_train = EvaluateCallback(train_generator, self.evaluate)
        evaluate_callback_val = EvaluateCallback(valid_generator, self.evaluate)
        # written by Anuar
        decay_lr = DecayLR(27, 31, 0.2)


        optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08,
                         decay=0.0)



        ############################################
        # Compile the model
        ############################################
        with tf.device("/cpu:0"):
            self.model.compile(loss=self.custom_loss, optimizer=optimizer)


        ############################################
        # Start the training process
        ############################################  

        steps_per_epoch = len(train_generator) * train_times

        parallel_model = multi_gpu_model(self.model, gpus=2)
        parallel_model.compile(loss=self.custom_loss, optimizer=optimizer)
        parallel_model.fit_generator(
                                 generator=train_generator, 
                                 steps_per_epoch  = steps_per_epoch, 
                                 epochs           = warmup_epochs + nb_epochs, 
                                 verbose          = 2 if debug else 1,
                                 validation_data  = valid_generator,
                                 validation_steps = len(valid_generator) * valid_times,
                                 callbacks        = [
                                    early_stop,
                                    checkpoint_multi_hs, 
                                    TrainValTensorBoard_HS(self.full_log_dir,
                                            write_graph=False, write_images=True),
                                    ValOnlyProgbarLogger(verbose=1, count_mode='steps'),
                                    reduce_lr_hs,
                                    evaluate_callback_val], 
                         
                                 workers          = 4,
                                 max_queue_size   = 10,
                                 use_multiprocessing=True)      
        
        self.model.save(saved_weights_name + "_final.h5")

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator, iou_threshold=0.5,
                                           score_threshold=0.5)
        for label, average_precision in list(average_precisions.items()):
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))         

        average_precisions = self.evaluate(valid_generator, iou_threshold=0.3,
                                           score_threshold=0.3)
        for label, average_precision in list(average_precisions.items()):
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))         
Beispiel #29
0
    def train(
            self,
            train_imgs,  # the list of images to train the model
            valid_imgs,  # the list of images used to validate the model
            nb_epochs,  # number of epoches
            learning_rate,  # the learning rate
            batch_size,  # the size of the batch
            object_scale,
            no_object_scale,
            coord_scale,
            class_scale,
            saved_weights_name='best_weights.h5',
            debug=False):

        self.batch_size = batch_size

        self.object_scale = object_scale
        self.no_object_scale = no_object_scale
        self.coord_scale = coord_scale
        self.class_scale = class_scale

        self.debug = debug

        ############################################
        # Make train and validation generators
        ############################################

        generator_config = {
            'IMAGE_H': self.input_size,
            'IMAGE_W': self.input_size,
            'GRID_H': self.grid_h,
            'GRID_W': self.grid_w,
            'BOX': self.nb_box,
            'LABELS': self.labels,
            'CLASS': len(self.labels),
            'ANCHORS': self.anchors,
            'BATCH_SIZE': self.batch_size,
            'TRUE_BOX_BUFFER': self.max_box_per_image,
        }

        train_generator = BatchGenerator(train_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize)
        valid_generator = BatchGenerator(valid_imgs,
                                         generator_config,
                                         norm=self.feature_extractor.normalize,
                                         jitter=False)

        ############################################
        # Compile the model
        ############################################

        optimizer = Adam(lr=learning_rate,
                         beta_1=0.9,
                         beta_2=0.999,
                         epsilon=1e-08,
                         decay=0.0)
        self.model.compile(loss=self.custom_loss, optimizer=optimizer)

        ############################################
        # Make a few callbacks
        ############################################

        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=0.001,
                                   patience=3,
                                   mode='min',
                                   verbose=1)

        checkpoint = ModelCheckpoint(saved_weights_name,
                                     monitor='val_loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min',
                                     period=1)

        tensorboard = TensorBoard(
            log_dir=os.path.expanduser('~/logs/'),
            histogram_freq=0,
            #write_batch_performance=True,
            write_graph=True,
            write_images=False)

        ############################################
        # Start the training process
        ############################################

        self.model.fit_generator(
            generator=train_generator,
            steps_per_epoch=len(train_generator),
            epochs=nb_epochs,
            verbose=2 if debug else 1,
            validation_data=valid_generator,
            validation_steps=len(valid_generator),
            callbacks=[early_stop, checkpoint, tensorboard])

        ############################################
        # Compute mAP on the validation set
        ############################################
        average_precisions = self.evaluate(valid_generator)

        # print evaluation
        for label, average_precision in average_precisions.items():
            print(self.labels[label], '{:.4f}'.format(average_precision))
        print('mAP: {:.4f}'.format(
            sum(average_precisions.values()) / len(average_precisions)))
Beispiel #30
0
s_train_val_split = int(0.8 * len(sup))

train_sup = sup[:s_train_val_split]  #model
val_sup = sup[s_train_val_split:]  #model

# splits
# sup_train_imgs = train_imgs[:SUP_NUM_IMAGES]
# # split the training set (supervised date) into train and validation 80%, 20% respectively:
# train = sup_train_imgs[:int(SUP_NUM_IMAGES*0.8)]
# val = sup_train_imgs[-int(SUP_NUM_IMAGES*0.2):] #takes the last 20% images from the training
# ae_unsup = train_imgs[-UNSUP_NUM_IMAGES:]
# ae_train = ae_unsup[:int(UNSUP_NUM_IMAGES*0.8)]
# ae_val = ae_unsup[-int(UNSUP_NUM_IMAGES*0.2):]

train_batch = BatchGenerator(train_sup, generator_config, norm=normalize)

valid_batch = BatchGenerator(val_sup,
                             generator_config,
                             norm=normalize,
                             jitter=False)

#for the AE:
"""we use the unsupervised data to train the AE (which we get from the end of the training set"""
#todo: play with the jitter -- input true output false
tohar_train_batch = ToharGenerator(
    train, generator_config, norm=normalize,
    jitter=False)  # outputs (input,input) rather than (input, ground truth)
tohar_valid_batch = ToharGenerator(
    valid, generator_config, norm=normalize,
    jitter=False)  # outputs (input,input) rather than (input, ground truth)