示例#1
0
    def __init__(self, image_ann: str, class_num: int, anchors: str, in_hw: tuple, out_hw: tuple, validation_split=0.1):
        self.in_hw = np.array(in_hw)
        assert self.in_hw.ndim == 2
        self.out_hw = np.array(out_hw)
        assert self.out_hw.ndim == 2
        self.validation_split = validation_split  # type:float
        if image_ann == None:
            self.train_list = None
            self.test_list = None
        else:
            img_ann_list = np.load(image_ann, allow_pickle=True)
            num = int(len(img_ann_list) * self.validation_split)
            self.train_list = img_ann_list[num:]  # type:np.ndarray
            self.test_list = img_ann_list[:num]  # type:np.ndarray
            self.train_total_data = len(self.train_list)  # type:int
            self.test_total_data = len(self.test_list)  # type:int
        self.grid_wh = (1 / self.out_hw)[:, [1, 0]]  # hw 转 wh 需要交换两列
        if class_num:
            self.class_num = class_num  # type:int
        if anchors:
            self.anchors = np.load(anchors)  # type:np.ndarray
            self.anchor_number = len(self.anchors[0])
            self.output_number = len(self.anchors)
            self.xy_offset = Helper._coordinate_offset(self.anchors, self.out_hw)  # type:np.ndarray
            self.wh_scale = Helper._anchor_scale(self.anchors, self.grid_wh)  # type:np.ndarray

        self.output_shapes = [tf.TensorShape([None] + list(self.out_hw[i]) +
                                             [len(self.anchors[i]), self.class_num + 5])
                              for i in range(len(self.anchors))]

        self.iaaseq = iaa.OneOf([
            iaa.Fliplr(0.7),  # 50% 镜像
            iaa.Crop(percent=(0, 0.1)),  # random crops
            iaa.Affine(scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
                       translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
                       rotate=(-7, 7))
        ])
        self.colormap = [
            (255, 82, 0), (0, 255, 245), (0, 61, 255), (0, 255, 112), (0, 255, 133),
            (255, 0, 0), (255, 163, 0), (255, 102, 0), (194, 255, 0), (0, 143, 255),
            (51, 255, 0), (0, 82, 255), (0, 255, 41), (0, 255, 173), (10, 0, 255),
            (173, 255, 0), (0, 255, 153), (255, 92, 0), (255, 0, 255), (255, 0, 245),
            (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
            (0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
            (192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
            (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128),
            (61, 230, 250), (255, 6, 51), (11, 102, 255), (255, 7, 71), (255, 9, 224),
            (9, 7, 230), (220, 220, 220), (255, 9, 92), (112, 9, 255), (8, 255, 214),
            (7, 255, 224), (255, 184, 6), (10, 255, 71), (255, 41, 10), (7, 255, 255),
            (224, 255, 8), (102, 8, 255), (255, 61, 6), (255, 194, 7), (255, 122, 8),
            (0, 255, 20), (255, 8, 41), (255, 5, 153), (6, 51, 255), (235, 12, 255),
            (160, 150, 20), (0, 163, 255), (140, 140, 140), (250, 10, 15), (20, 255, 0),
            (31, 255, 0), (255, 31, 0), (255, 224, 0), (153, 255, 0), (0, 0, 255),
            (255, 71, 0), (0, 235, 255), (0, 173, 255), (31, 0, 255), (11, 200, 200)]
示例#2
0
    np.random.seed(10101)
    tf.set_random_seed(10101)
    num_train = len(lines) - int(len(lines) * val_split)
    num_val = int(len(lines) * val_split)
    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.

    model.compile(optimizer=Adam(lr=1e-3), loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
    train_set = create_dataset(lines[:num_train], batch_size, input_shape, anchors, num_classes)
    vail_set = create_dataset(lines[num_train:], batch_size, input_shape, anchors, num_classes)

    shapes = (tuple([ins.shape for ins in model.input]), tuple(tf.TensorShape([batch_size, ])))

    train_set = train_set.apply(assert_element_shape(shapes))
    vail_set = vail_set.apply(assert_element_shape(shapes))

    try:
        model.fit(train_set,
                  epochs=10,
                  validation_data=vail_set, validation_steps=40,
                  steps_per_epoch=max(1, num_train // batch_size),
                  callbacks=[logging, checkpoint],
                  verbose=0)
    except KeyboardInterrupt:
        pass

    # train_set = YOLOSequence(lines[:num_train], batch_size, input_shape, anchors, num_classes)
示例#3
0
文件: train.py 项目: svija/K210-yolo3
def main(annotation_path, classes_path, anchors_path, alpha, weights_path, learning_rate, epochs, augment):
    # annotation_path = 'train.txt'
    # classes_path = 'model_data/voc_classes.txt'
    # anchors_path = 'model_data/tiny_yolo_anchors.txt'
    log_dir = Path('logs')
    log_dir = log_dir / datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (224, 320)  # multiple of 32, hw
    batch_size = 16

    """ Set the Model """
    # model = create_tiny_model(input_shape, anchors, num_classes, weights_path='model_data/tiny_yolo_weights.h5')
    # model = create_model(input_shape, anchors, num_classes, weights_path='model_data/yolo_weights.h5')  # make sure you know what you freeze
    model, model_body = create_mobile_yolo(input_shape, anchors, num_classes, alpha, weights_path)  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(str(log_dir) + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                                 monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    tf.set_random_seed(10101)
    num_train = len(lines) - int(len(lines) * val_split)
    num_val = int(len(lines) * val_split)
    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.

    model.compile(optimizer=Adam(lr=learning_rate), loss={
        # use custom yolo_loss Lambda layer.
        'yolo_loss': lambda y_true, y_pred: y_pred})

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
    train_set = create_dataset(lines[:num_train], batch_size, input_shape, anchors, num_classes, augment == 'True')
    vail_set = create_dataset(lines[num_train:], batch_size, input_shape, anchors, num_classes, random=False)

    shapes = (tuple([ins.shape for ins in model.input]), tuple(tf.TensorShape([batch_size, ])))

    train_set = train_set.apply(assert_element_shape(shapes))
    vail_set = vail_set.apply(assert_element_shape(shapes))

    try:
        model.fit(train_set,
                  epochs=epochs,
                  validation_data=vail_set, validation_steps=40,
                  steps_per_epoch=max(1, num_train // batch_size),
                  callbacks=[logging, checkpoint],
                  verbose=1)
    except KeyboardInterrupt:
        pass

    # train_set = YOLOSequence(lines[:num_train], batch_size, input_shape, anchors, num_classes)
    # model.fit_generator(train_set,
    #                     epochs=20,
    #                     steps_per_epoch=max(1, num_train // batch_size),
    #                     callbacks=[logging, checkpoint],
    #                     use_multiprocessing=True)
    save_model(model, str(log_dir / 'yolo_model.h5'))
    save_model(model_body, str(log_dir / 'yolo_model_body.h5'))