예제 #1
0
def train():
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    # split for train/val
    val_split = 0.2
    lines, _, _ = get_pascal_detection_data(input_path=dataset_path)
    np.random.seed(2019)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # create model, load the pre-trained weights
    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape=input_shape, yolo_loss=yolo_loss, anchors=anchors, freeze_body=0,
                                  num_classes=num_classes, load_pretrained=True, weights_path=pretrained_weights)
    else:
        model = create_model(input_shape=input_shape, yolo_loss=yolo_loss, anchors=anchors, freeze_body=0,
                             num_classes=num_classes, load_pretrained=True, weights_path=pretrained_weights)  # make sure you know what you freeze
    model.summary()
    # set callback functions
    callback_builder = SnapshotCallbackBuilder(nb_epochs=1000, nb_snapshots=20, init_lr=1e-4)
    callbacks = callback_builder.get_callbacks(log_dir=log_dir)
    callbacks.append(CSVLogger(log_dir + 'record.csv'))
    callbacks.append(TensorBoard(log_dir=log_dir))

    # train
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    model.compile(optimizer=SGD(lr=1e-4), loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
    model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
                        steps_per_epoch=max(1, num_train // 1),
                        validation_data=data_generator_wrapper(lines[num_train:], 1, input_shape, anchors,
                                                               num_classes),
                        validation_steps=max(1, num_val // 1),
                        epochs=1000,
                        initial_epoch=0,
                        callbacks=callbacks)
    model.save_weights(log_dir + 'trained_weights_final.h5')
예제 #2
0
    if n == 0 or batch_size <= 0: return None
    return data_generator(h5_file, batch_size, data_indexes, anchors,
                          num_classes, input_shape)


if __name__ == '__main__':
    from anchors.get_anchors import get_anchors, get_classes
    from utils.pascal_voc_parser import get_pascal_detection_data

    dataset_path = '/home/qkh/data/gang_jin/VOC_gangjin/'
    anchors_path = 'anchors/anchors.txt'
    classes_path = 'anchors/classes.txt'

    lines, _, _ = get_pascal_detection_data(input_path=dataset_path)
    # new_lines = []
    # for line in lines:
    #     if line['filepath'].endswith('D8146090.jpg'):
    #         new_lines.append(line)
    batch_size = 2
    input_shape = (416, 416)
    anchors = get_anchors(anchors_path)
    num_classes = len(get_classes(classes_path))

    x = data_generator_wrapper(lines, batch_size, input_shape, anchors,
                               num_classes)
    data, _ = x.__next__()
    data, _ = x.__next__()
    data, _ = x.__next__()
    data, _ = x.__next__()
    data, _ = x.__next__()
예제 #3
0
def train():
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    # split for train/val
    val_split = 0.1
    h5_file = h5py.File(data_file_path, 'r')
    data_indexes = [i for i in range(h5_file['image_data'].shape[0])]
    print('{} images gotten'.format(len(data_indexes)))
    np.random.seed(2019)
    np.random.shuffle(data_indexes)
    num_val = int(len(data_indexes) * val_split)
    num_train = len(data_indexes) - num_val

    # lines, _, _ = get_pascal_detection_data(input_path=dataset_path)
    # np.random.seed(2019)
    # np.random.shuffle(lines)
    # np.random.seed(None)
    # num_val = int(len(lines) * val_split)
    # num_train = len(lines) - num_val

    # create model, load the pre-trained weights
    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape=input_shape,
                                  yolo_loss=yolo_loss,
                                  anchors=anchors,
                                  freeze_body=2,
                                  num_classes=num_classes,
                                  load_pretrained=True)
    else:
        model = create_model(
            input_shape=input_shape,
            yolo_loss=yolo_loss,
            anchors=anchors,
            freeze_body=2,
            num_classes=num_classes,
            load_pretrained=True)  # make sure you know what you freeze
    # model = multi_gpu_model(model, gpus=2)
    model.summary()
    # set callback functions
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        verbose=1,
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=2)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.2,
                                  patience=5,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)
    csv_logger = CSVLogger(log_dir + 'record.csv')
    tensorboard = TensorBoard(log_dir=log_dir)

    # train the last layers
    # # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    model.compile(
        optimizer=Adam(lr=1e-3),
        loss={
            # use custom yolo_loss Lambda layer.
            'yolo_loss': lambda y_true, y_pred: y_pred
        })
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        num_train, num_val, batch_size * 4))
    model.fit_generator(
        data_generator_wrapper(h5_file, batch_size * 4,
                               data_indexes[:num_train], anchors, num_classes,
                               input_shape),
        steps_per_epoch=max(1, num_train // (batch_size * 4)),
        validation_data=data_generator_wrapper(h5_file, batch_size,
                                               data_indexes[num_train:],
                                               anchors, num_classes,
                                               input_shape),
        validation_steps=max(1, num_val // batch_size),
        epochs=20,
        initial_epoch=0,
        callbacks=[checkpoint, reduce_lr, early_stopping, tensorboard])
    # model.save_weights(log_dir + 'trained_weights_final.h5')

    # train all the layers
    for i in range(len(model.layers)):
        model.layers[i].trainable = True
    model.compile(optimizer=Adam(lr=1e-3),
                  loss={
                      'yolo_loss': lambda y_true, y_pred: y_pred
                  })  # recompile to apply the change
    print('Unfreeze all of the layers.')
    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        num_train, num_val, batch_size))
    model.fit_generator(data_generator_wrapper(h5_file, batch_size,
                                               data_indexes[:num_train],
                                               anchors, num_classes,
                                               input_shape),
                        steps_per_epoch=max(1, num_train // batch_size),
                        validation_data=data_generator_wrapper(
                            h5_file, batch_size, data_indexes[num_train:],
                            anchors, num_classes, input_shape),
                        validation_steps=max(1, num_val // batch_size),
                        epochs=1000,
                        initial_epoch=20,
                        callbacks=[
                            checkpoint, reduce_lr, early_stopping, csv_logger,
                            tensorboard
                        ])
    model.save_weights(log_dir + 'trained_weights_final.h5')
예제 #4
0
파일: yolo.py 프로젝트: isee15/yolov3-keras
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze the darknet body or freeze all but 2 output layers.
            num = (20, len(model_body.layers) - 2)[freeze_body - 1]
            for i in range(num):
                model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(
                num, len(model_body.layers)))

    model_loss = Lambda(yolo_loss,
                        output_shape=(1, ),
                        name='yolo_loss',
                        arguments={
                            'anchors': anchors,
                            'num_classes': num_classes,
                            'ignore_thresh': 0.7
                        })([*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model


if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    model = create_model(input_shape=(1280, 1280),
                         yolo_loss=yolo_loss,
                         anchors=get_anchors('../anchors/anchors.txt'),
                         num_classes=1,
                         load_pretrained=False)
    model.summary()