Пример #1
0
    def __init__(self, **kwargs):
        self.__dict__.update(self._defaults)
        for name, value in kwargs.items():
            setattr(self, name, value)

        #---------------------------------------------------#
        #   获得种类和先验框的数量
        #---------------------------------------------------#
        self.class_names, self.num_classes = get_classes(self.classes_path)
        self.anchors, self.num_anchors = get_anchors(self.anchors_path)

        #---------------------------------------------------#
        #   画框设置不同的颜色
        #---------------------------------------------------#
        hsv_tuples = [(x / self.num_classes, 1., 1.)
                      for x in range(self.num_classes)]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))
        self.input_image_shape = K.placeholder(shape=(2, ))

        self.sess = K.get_session()
        self.boxes, self.scores, self.classes = self.generate()

        show_config(**self._defaults)
Пример #2
0
 def __init__(self, **kwargs):
     super(YOLO, self).__init__()
     self.__dict__.update(self._defaults)  # 还能这样?
     self.__dict__.update(kwargs)  # update with user overrides
     self.class_names = get_classes(self.classes_path)
     self.anchors = get_anchors(self.anchors_path)
     self.colors = get_colors(self.class_names)
     K.set_learning_phase(0)
     self.inference_model = self._generate_model()
Пример #3
0
    def __init__(self):

        anchors = utils.get_anchors(flags.FLAGS.anchors_path)
        num_anchors = len(anchors)
        anchors = np.array(anchors, dtype=np.float32)

        super(CocoModel, self).__init__(
            image_size=flags.FLAGS.image_size,
            image_channels=flags.FLAGS.image_channels,
            num_classes=flags.FLAGS.num_classes,
            anchors=anchors,
            batch_size=distribution_utils.per_device_batch_size(
                flags.FLAGS.batch_size, flags_core.get_num_gpus(flags.FLAGS)),
            num_anchors=num_anchors,
            learning_rate=flags.FLAGS.learning_rate,
            backbone=flags.FLAGS.backbone,
            norm=flags.FLAGS.norm,
            threshold=flags.FLAGS.threshold,
            max_num_boxes_per_image=flags.FLAGS.max_num_boxes_per_image,
            confidence_score=flags.FLAGS.confidence_score,
            data_format=flags.FLAGS.data_format,
            dtype=flags_core.get_tf_dtype(flags.FLAGS))
Пример #4
0
    def __init__(self, **kwargs):
        self.__dict__.update(self._defaults)
        for name, value in kwargs.items():
            setattr(self, name, value)
            self._defaults[name] = value 
            
        #---------------------------------------------------#
        #   获得种类和先验框的数量
        #---------------------------------------------------#
        self.class_names, self.num_classes  = get_classes(self.classes_path)
        self.anchors, self.num_anchors      = get_anchors(self.anchors_path)
        self.bbox_util                      = DecodeBox(self.anchors, self.num_classes, (self.input_shape[0], self.input_shape[1]), self.anchors_mask)

        #---------------------------------------------------#
        #   画框设置不同的颜色
        #---------------------------------------------------#
        hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))
        self.generate()
        
        show_config(**self._defaults)
Пример #5
0
    #   判断当前使用的GPU数量与机器上实际的GPU数量
    #------------------------------------------------------#
    if ngpus_per_node > 1 and ngpus_per_node > len(gpus):
        raise ValueError("The number of GPUs specified for training is more than the GPUs on the machine")
        
    if ngpus_per_node > 1:
        strategy = tf.distribute.MirroredStrategy()
    else:
        strategy = None
    print('Number of devices: {}'.format(ngpus_per_node))

    #----------------------------------------------------#
    #   获取classes和anchor
    #----------------------------------------------------#
    class_names, num_classes = get_classes(classes_path)
    anchors, num_anchors     = get_anchors(anchors_path)

    #----------------------------------------------------#
    #   判断是否多GPU载入模型和预训练权重
    #----------------------------------------------------#
    if ngpus_per_node > 1:
        with strategy.scope():
            #------------------------------------------------------#
            #   创建yolo模型
            #------------------------------------------------------#
            model_body  = yolo_body((None, None, 3), anchors_mask, num_classes, backbone, alpha, weight_decay=weight_decay)
            if model_path != '':
                #------------------------------------------------------#
                #   载入预训练权重
                #------------------------------------------------------#
                print('Load weights {}.'.format(model_path))
Пример #6
0
def _main():

    # parse command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--train_from_checkpoint',
        type=str,
        help=
        "The path to where a previously trained model's weights are stored. To use the default\n\
        coco weights, use the path 'model_weights/coco_pretrained_weights.ckpt'. Otherwise, the model\n\
        weights will be initialized randomly. ")
    parser.add_argument(
        '--class_path',
        default='utils/coco_classes.txt',
        type=str,
        help=
        'The path that points towards where the class names for the dataset are stored.\n\
        The default path is "utils/coco_classes.txt".')
    parser.add_argument(
        '--anchors_path',
        default='utils/anchors.txt',
        type=str,
        help=
        'The path that points towards where the anchor values for the model are stored.\n\
        The default path is "utils/anchors.txt", which contains anchors trained on the coco dataset.'
    )
    parser.add_argument(
        '--data_path',
        default='training_data/image_paths_and_box_info.txt',
        type=str,
        help=
        'The path that points towards where the training data text file is stored.\n\
        The default path is "training_data/image_paths_and_box_info.txt".')
    parser.add_argument(
        '--input_height',
        default=416,
        type=int,
        help=
        'The input height of the yolov3 model. The height must be a multiple of 32.\n\
        The default height is 416.')
    parser.add_argument(
        '--input_width',
        default=416,
        type=int,
        help=
        'The input width of the yolov3 model. The width must be a mutliple of 32.\n\
        The default width is 416.')
    parser.add_argument(
        '--batch_size',
        default=32,
        type=int,
        help=
        'The training batch size, whose default value is set to 32 images per batch.'
    )
    parser.add_argument(
        '--max_num_boxes_per_image',
        default=20,
        type=int,
        help=
        'The max number of boxes that can be detected within one image. Default is 20.'
    )
    parser.add_argument(
        '--num_training_epochs',
        default=150,
        type=int,
        help='The number of training epochs. The default is 150.')
    parser.add_argument(
        '--learning_rate',
        default=0.001,
        type=float,
        help='The learning rate of the model. The default is 0.001.')
    parser.add_argument(
        '--ignore_threshold',
        default=0.5,
        type=float,
        help=
        'Impacts how the loss is calculated. Must be between zero and one, and the default is set to 0.5.'
    )
    parser.add_argument(
        '--train_val_data_split',
        default=0.9,
        type=float,
        help=
        'The split between the data that will be used for training and data that will be used\n\
        for validation. Default value is 0.9.')
    parser.add_argument(
        '--train_save_path',
        default='model_weights/',
        help=
        "The training model's checkpoint save path. The default path is 'model_weights/'."
    )
    parser.add_argument(
        '--model_name',
        default='model.ckpt',
        help=
        'The name that should be given to the checkpoint file. The default name is "model.ckpt".'
    )
    parser.add_argument(
        '--tensorboard_save_path',
        default='tensorboard/tensorboard_train/',
        help=
        'The path where the event files to be used with tensorboard will be saved at. The default\n\
        path is "tensorboard/tensorboard_train/".')
    parser.add_argument(
        '--test_model_overfit',
        nargs='?',
        default=False,
        type=str2bool,
        const=True,
        help=
        'Whether or not to purposefully overfit the model by training it on only one image.\n\
        This option is useful in testing out if the loss function is working correctly.'
    )
    parser.add_argument(
        '--save_every_x_iterations',
        default=100,
        type=int,
        help=
        "How frequently the model's training weights are saved. The default value is every\n\
        100 iterations.")
    parser.add_argument(
        '--log_every_x_iterations',
        default=5,
        type=int,
        help=
        "How frequently the model's loss is logged for it to be inspected in Tensorboard.\n\
        The default value is every 5 iterations.")
    args = vars(parser.parse_args())
    args[
        'data_path'] = '/home/yl/CNN/Yolo/keras-yolo3-fine-tune/dataset/train_label.txt'

    # read inputs
    h = args['input_height']
    w = args['input_width']
    ignore_thresh = args['ignore_threshold']
    max_num_boxes_per_image = args['max_num_boxes_per_image']
    anchors = get_anchors(args['anchors_path'])
    lr = args['learning_rate']
    num_anchors_per_detector = len(anchors) // 3
    num_detectors_per_image = num_anchors_per_detector * (((h / 32) *
                                                           (w / 32)) +
                                                          ((h / 16) *
                                                           (w / 16)) +
                                                          ((h / 8) * (w / 8)))
    class_names = get_classes(args['class_path'])
    num_classes = len(class_names)
    tb_train_path = args['tensorboard_save_path'] + 'train/'
    tb_val_path = args['tensorboard_save_path'] + 'val/'
    training_data, validation_data, batch_size = prepare_data(
        args['data_path'], args['train_val_data_split'], args['batch_size'],
        args['test_model_overfit'])

    tf.reset_default_graph()

    # build graph
    with tf.variable_scope('y_true'):
        y_true_data = tf.placeholder(
            dtype=tf.float32,
            shape=[None, num_detectors_per_image, num_classes + 5])
    with tf.variable_scope('y_true_boxes'):
        y_true_box_data = tf.placeholder(dtype=tf.float32,
                                         shape=[
                                             None, max_num_boxes_per_image *
                                             num_anchors_per_detector, 4
                                         ])
    with tf.variable_scope('x_input'):
        X = tf.placeholder(dtype=tf.float32, shape=[None, h, w, 3])

    yolo_outputs = yolo_v3(inputs=X,
                           num_classes=len(class_names),
                           anchors=anchors,
                           h=h,
                           w=w,
                           training=True)  # output
    loss = yolo_v3_loss(yolo_outputs,
                        y_true_data,
                        y_true_box_data,
                        ignore_threshold=ignore_thresh,
                        anchors=anchors,
                        num_classes=num_classes,
                        h=h,
                        w=w,
                        batch_size=batch_size)

    tf.summary.scalar('loss', loss)
    global_step = tf.get_variable(name='global_step',
                                  trainable=False,
                                  initializer=0,
                                  dtype=tf.int32)

    # returns a varlist containing only the vars of the conv layers right before the yolo layers
    trainable_var_list = tf.trainable_variables()
    last_layer_var_list = [
        i for i in trainable_var_list
        if i.shape[-1] == (5 + num_classes) * num_anchors_per_detector
    ]
    train_op_with_frozen_variables = tf.train.AdamOptimizer(
        learning_rate=lr).minimize(loss,
                                   global_step=global_step,
                                   var_list=last_layer_var_list)
    train_op_with_all_variables = tf.train.AdamOptimizer(
        learning_rate=lr).minimize(loss,
                                   global_step=global_step,
                                   var_list=trainable_var_list)
    summ = tf.summary.merge_all()

    # info
    print('--info--')
    print('model weights will be saved with filename: ', args['model_name'])
    print('tensorboard event files located at path: ',
          args['tensorboard_save_path'])

    # build training loop
    with tf.Session() as sess:

        train_writer = tf.summary.FileWriter(tb_train_path, sess.graph)
        val_writer = tf.summary.FileWriter(tb_val_path)

        # initialize model weights either randomly or from a saved checkpoint
        saver = tf.train.Saver()
        if args['train_from_checkpoint'] is None:
            print('initializing variables...')
            sess.run(tf.global_variables_initializer())
        else:
            print('restoring weights from checkpoint: ',
                  args['train_from_checkpoint'])
            saver.restore(sess, args['train_from_checkpoint'])

        num_iterations = args['num_training_epochs'] * len(training_data)

        print('beginning to train the model...')
        for i in range(num_iterations):

            input_images, y_true, y_true_boxes = get_training_batch(
                training_data,
                anchors,
                num_classes,
                batch_size=batch_size,
                h=h,
                w=w,
                random=not args['test_model_overfit'])

            # For the first epochs, train with the frozen layers. Then, unfreeze the entire graph.
            if i < num_iterations // 3:
                sess.run(train_op_with_frozen_variables,
                         feed_dict={
                             X: input_images,
                             y_true_data: y_true,
                             y_true_box_data: y_true_boxes
                         })
            else:
                sess.run(train_op_with_all_variables,
                         feed_dict={
                             X: input_images,
                             y_true_data: y_true,
                             y_true_box_data: y_true_boxes
                         })

            if i % args['log_every_x_iterations'] == 0:
                # write the training loss to tensorboard
                lt, st = sess.run(
                    [loss, summ],
                    feed_dict={
                        X: input_images,
                        y_true_data: y_true,
                        y_true_box_data: y_true_boxes
                    })
                train_writer.add_summary(st, i)

                #write the validation loss to tensorboard if we are not in overfit mode
                if not args['test_model_overfit']:
                    input_images, y_true, y_true_boxes = get_training_batch(
                        validation_data,
                        anchors,
                        num_classes,
                        batch_size=batch_size,
                        h=h,
                        w=w,
                        random=not args['test_model_overfit'])
                    lv, sv = sess.run(
                        [loss, summ],
                        feed_dict={
                            X: input_images,
                            y_true_data: y_true,
                            y_true_box_data: y_true_boxes
                        })
                    val_writer.add_summary(sv, i)
                    print("iteration: " + str(i) + ", training loss: " +
                          str(round(lt, 2)) + ", validation loss: " +
                          str(round(lv, 2)))
                else:
                    print("iteration: " + str(i) + ", training loss: " +
                          str(round(lt, 2)))

            if i % args['save_every_x_iterations'] == 0:
                print('saving model weights at path: ',
                      args['train_save_path'])
                saver.save(
                    sess,
                    os.path.join(args['train_save_path'], args['model_name']),
                    global_step)

    train_writer.close()
    val_writer.close()
Пример #7
0
def parse_fn(image_id,
             dataset,
             anchors_path,
             augmentation=None,
             dtype=np.float32,
             max_num_boxes_per_image=20,
             image_size=416):
    """Load and return ground truth data for an image (image, mask, bounding boxes)."""

    image = dataset.load_image(image_id)
    # original_shape = image.shape
    image, window, scale, padding, crop = utils.resize_image(
        image, min_dim=0, min_scale=0, max_dim=image_size, mode='square')

    mask, class_ids = dataset.load_mask(image_id)

    mask = utils.resize_mask(mask, scale, padding, crop)

    if augmentation:
        import imgaug

        # Augmenters that are safe to apply to masks
        # Some, such as Affine, have settings that make them unsafe, so always
        # test your augmentation on masks
        MASK_AUGMENTERS = [
            "Sequential", "SomeOf", "OneOf", "Sometimes", "Fliplr", "Flipud",
            "CropAndPad", "Affine", "PiecewiseAffine"
        ]

        def hook(images, augmenter, parents, default):
            """Determines which augmenters to apply to masks."""
            return augmenter.__class__.__name__ in MASK_AUGMENTERS

        # Store shapes before augmentation to compare
        image_shape = image.shape
        mask_shape = mask.shape
        # Make augmenters deterministic to apply similarly to images and masks
        det = augmentation.to_deterministic()
        image = det.augment_image(image)
        # Change mask to np.uint8 because imgaug doesn't support np.bool
        mask = det.augment_image(mask.astype(np.uint8),
                                 hooks=imgaug.HooksImages(activator=hook))
        # Verify that shapes didn't change
        assert image.shape == image_shape, "Augmentation shouldn't change image size"
        assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
        # Change mask back to bool
        mask = mask.astype(np.bool)

    # Note that some boxes might be all zeros if the corresponding mask got cropped out.
    # and here is to filter them out
    _idx = np.sum(mask, axis=(0, 1)) > 0
    mask = mask[:, :, _idx]
    class_ids = class_ids[_idx]
    # Bounding boxes. Note that some boxes might be all zeros
    # if the corresponding mask got cropped out.
    # bbox: [num_instances, (y1, x1, y2, x2)]
    bbox = utils.extract_bboxes(mask)

    if mask.shape[-1] > max_num_boxes_per_image:
        ids = np.random.choice(np.arange(mask.shape[-1]),
                               max_num_boxes_per_image,
                               replace=False)
        class_ids = class_ids[ids]
        bbox = bbox[ids, :]

    # confs = np.ones((bbox.shape[0], 1), dtype=dtype)
    # bbox = np.concatenate([bbox, confs], axis=-1)

    # Active classes
    # Different datasets have different classes, so track the
    # classes supported in the dataset of this image.
    # active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
    # source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
    # active_class_ids[source_class_ids] = 1

    # image_meta = utils.compose_image_meta(image_id, original_shape, image.shape,
    #                                       window, scale, active_class_ids)
    # image_meta.astype(dtype)

    # gt_mask = np.zeros((mask.shape[0], mask.shape[1], 20), mask.dtype)
    gt_class_ids = np.zeros(max_num_boxes_per_image, class_ids.dtype)
    gt_bbox = np.zeros((max_num_boxes_per_image, bbox.shape[1]), bbox.dtype)
    # gt_data = np.zeros((max_num_boxes_per_image, bbox.shape[1] + dataset.num_classes), dtype=dtype)

    if class_ids.shape[0] > 0:
        gt_class_ids[:class_ids.shape[0]] = class_ids
        # gt_mask[:, :, :mask.shape[-1]] = mask
        gt_bbox[:bbox.shape[0], :] = bbox

    gt_class_ids = np.expand_dims(gt_class_ids, axis=-1).astype(dtype)

    gt_bbox = np.concatenate([gt_bbox, gt_class_ids], axis=-1)

    anchors = utils.get_anchors(anchors_path)
    anchors = np.array(anchors, dtype=np.float32)

    boxes_yx = (gt_bbox[:, 0:2] + gt_bbox[:, 2:4]) // 2
    boxes_hw = gt_bbox[:, 2:4] - gt_bbox[:, 0:2]

    gt_bbox[:, 0] = boxes_yx[..., 1] / image_size
    gt_bbox[:, 1] = boxes_yx[..., 0] / image_size
    gt_bbox[:, 2] = boxes_hw[..., 1] / image_size
    gt_bbox[:, 3] = boxes_hw[..., 0] / image_size

    hw = np.expand_dims(boxes_hw, -2)
    anchors_broad = np.expand_dims(anchors, 0)

    anchor_maxes = anchors_broad / 2.
    anchor_mins = -anchor_maxes
    box_maxes = hw / 2.
    box_mins = -box_maxes
    intersect_mins = np.maximum(box_mins, anchor_mins)
    intersect_maxes = np.minimum(box_maxes, anchor_maxes)
    intersect_hw = np.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_area = intersect_hw[..., 0] * intersect_hw[..., 1]
    box_area = hw[..., 0] * hw[..., 1]
    anchor_area = anchors[..., 0] * anchors[..., 1]
    iou = intersect_area / (box_area + anchor_area - intersect_area)
    best_anchors = np.argmax(iou, axis=-1)

    # TODO: write a function to calculate the stride automatically.
    large_obj_image_size = image_size // 32
    medium_obj_image_size = image_size // 16
    small_obj_image_size = image_size // 8

    large_obj_detectors, large_obj_boxes = get_detector_heatmap_each_scale(
        gt_bbox,
        best_anchors_=best_anchors,
        anchors_mask=[6, 7, 8],
        grid_size=(large_obj_image_size, large_obj_image_size),
        num_classes=dataset.num_classes)

    medium_obj_detectors, medium_obj_boxes = get_detector_heatmap_each_scale(
        gt_bbox,
        best_anchors_=best_anchors,
        anchors_mask=[3, 4, 5],
        grid_size=(medium_obj_image_size, medium_obj_image_size),
        num_classes=dataset.num_classes)

    small_obj_detectors, small_obj_boxes = get_detector_heatmap_each_scale(
        gt_bbox,
        best_anchors_=best_anchors,
        anchors_mask=[0, 1, 2],
        grid_size=(small_obj_image_size, small_obj_image_size),
        num_classes=dataset.num_classes)

    yolo_true_data = np.concatenate(
        [large_obj_detectors, medium_obj_detectors, small_obj_detectors],
        axis=0).reshape([-1])
    yolo_true_boxes = np.concatenate(
        [large_obj_boxes, medium_obj_boxes, small_obj_boxes],
        axis=0).reshape([-1])

    yolo_gt = np.concatenate([yolo_true_data, yolo_true_boxes], axis=-1)

    return image.astype(dtype) / 255., yolo_gt.astype(dtype)
Пример #8
0
def _main():

    # parse command line arguments
    parser = argparse.ArgumentParser()
    requiredNamed = parser.add_argument_group('required named arguments')
    requiredNamed.add_argument(
        '--path_to_input_image',
        type=str,
        required=True,
        help=
        'The path to the input image on which object detection will be performed on.\n\
        This argument is required.')
    parser.add_argument(
        '--path_to_trained_model',
        default='model_weights/coco_pretrained_weights.ckpt',
        type=str,
        help=
        "The path to the location of pretrained model weights, which will be loaded into\n\
        the model and then used for object detection. The default pretrained weights path is\n\
        'model_weights/coco_pretrained_weights.ckpt', which contains weights trained on\n\
        the coco dataset.")
    parser.add_argument(
        '--save_as',
        type=str,
        default=None,
        help=
        'The filename for the image on which object detection was performed. If no filename\n\
        is provided, the image will be saved as "[original_name] + _yolo_v3.jpg".'
    )
    parser.add_argument('--tensorboard_save_path',
                        default='tensorboard/tensorboard_detect/',
                        help="")
    parser.add_argument(
        '--class_path',
        default='utils/coco_classes.txt',
        type=str,
        help=
        'The path that points towards where the class names for the dataset are stored.\n\
        The default path is "utils/coco_classes.txt".')
    parser.add_argument(
        '--anchors_path',
        default='utils/anchors.txt',
        type=str,
        help=
        'The path that points towards where the anchor values for the model are stored.\n\
        The default path is "utils/anchors.txt", which contains anchors trained on the coco dataset.'
    )
    parser.add_argument(
        '--input_height',
        default=416,
        type=int,
        help=
        'The input height of the yolov3 model. The height must be a multiple of 32.\n\
        The default height is 416.')
    parser.add_argument(
        '--input_width',
        default=416,
        type=int,
        help=
        'The input width of the yolov3 model. The width must be a mutliple of 32.\n\
        The default width is 416.')
    args = vars(parser.parse_args())

    h = args['input_height']
    w = args['input_width']
    anchors = get_anchors(args['anchors_path'])
    classes = get_classes(args['class_path'])
    save_as = args['save_as']
    if save_as is None:
        filename_w_ext = os.path.basename(args['path_to_input_image'])
        filename, file_extension = os.path.splitext(filename_w_ext)
        save_as = filename + '_yolo_v3' + file_extension

    image, original_im = process_image(args['path_to_input_image'], h, w)

    tf.reset_default_graph()

    # build graph
    with tf.variable_scope('x_input'):
        X = tf.placeholder(dtype=tf.float32, shape=[None, h, w, 3])

    yolo_outputs = yolo_v3(inputs=X,
                           num_classes=len(classes),
                           anchors=anchors,
                           h=h,
                           w=w,
                           training=False)  # output

    with tf.variable_scope('obj_detections'):
        raw_outputs = tf.concat(yolo_outputs, axis=1)

    # pass image through model
    with tf.Session() as sess:

        writer = tf.summary.FileWriter(args['tensorboard_save_path'],
                                       sess.graph)
        writer.close()

        saver = tf.train.Saver()
        print('restoring model weights...')
        saver.restore(sess, save_path=args['path_to_trained_model'])
        print('feeding image found at filepath: ', args['path_to_input_image'])
        start = time.time()
        ro = sess.run(raw_outputs,
                      feed_dict={X: [np.array(image, dtype=np.float32)]})
        end = time.time()
        total_time = end - start
        print("total inference time was: " + str(round(total_time, 2)) +
              " seconds (that's " + str(round(60.0 / total_time, 2)) +
              " fps!)")

    # convert box coordinates, apply nms, and draw boxes
    boxes = convert_box_coordinates(ro)
    filtered_boxes = non_max_suppression(boxes,
                                         confidence_threshold=0.5,
                                         iou_threshold=0.4)
    draw_boxes(save_as, args['class_path'], filtered_boxes, original_im, image)

    print('image with detections saved as: ', save_as)
Пример #9
0
    def train(self, classes_path, anchors_path):
        '''
        Args:
            classes_path:classes路径
            anchors_path:anchor路径
        '''
        classes_names = get_classes(classes_path)
        num_classes = len(classes_names)
        anchors = get_anchors(anchors_path)

        is_tiny_version = len(anchors) == 6  # default setting
        if is_tiny_version:
            model = create_tiny_model(input_shape,
                                      anchors,
                                      num_classes,
                                      freeze_body=2)
        else:
            model = create_model(input_shape,
                                 anchors,
                                 num_classes,
                                 load_pretrained=False)

        logging = TensorBoard(log_dir=log_dir)
        # checkpoint = ModelCheckpoint(log_dir + 'car_mobilenet_yolov3.ckpt',
        #    monitor='val_loss', save_weights_only=False, period=1)
        checkpoint = ModelCheckpoint(
            log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
            monitor='val_loss',
            save_weights_only=False,
            save_best_only=True,
            period=3)

        # reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)

        reduce_lr = ReduceLROnPlateau(monitor="val_loss",
                                      factor=0.1,
                                      min_lr=1e-9,
                                      patience=5,
                                      verbose=1)
        # early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)

        with open(train_path) as t_f:
            t_lines = t_f.readlines()
        np.random.seed(666)
        np.random.shuffle(t_lines)
        num_val = int(len(t_lines) * val_split)
        num_train = len(t_lines) - num_val
        t_lines = t_lines[:num_train]
        v_lines = t_lines[num_train:]

        # train with frozen layers first ,to get a stable loss.
        # adjust num epochs to your dataset,This step is enough to obtrain a not bad model
        if True:
            model.compile(
                optimizer=Adam(lr=1e-3),
                loss={
                    # use custom yolo_loss Lambda layer.
                    'yolo_loss': lambda y_true, y_pred: y_pred
                })

        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_num))
        model.fit_generator(data_generator_wrapper(t_lines, batch_num,
                                                   input_shape, anchors,
                                                   num_classes),
                            steps_per_epoch=max(1, num_train // batch_num),
                            validation_data=data_generator_wrapper(
                                v_lines, batch_num, input_shape, anchors,
                                num_classes),
                            validation_steps=max(1, num_val // batch_num),
                            epochs=epochs,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save(log_dir + 'trained_weights_stage_1.h5')

        # Unfreeze and continue training, to fine-tune.
        # Train longer if the result is not good.
        if True:
            print("Unfreeze and continue training, to fine-tune.")
            for i in range(len(model.layers)):
                model.layers[i].trainable = True
            model.compile(optimizer=Adam(lr=1e-4),
                          loss={
                              'yolo_loss': lambda y_true, y_pred: y_pred
                          })  # recompile to apply the change
            batch_size = 16  # note that more GPU memory is required after unfreezing the body
            print(
                'Train on {} samples, val on {} samples, with batch size {}.'.
                format(num_train, num_val, batch_size))
            model.fit_generator(
                data_generator_wrapper(t_lines, batch_size, input_shape,
                                       anchors, num_classes),
                steps_per_epoch=max(1, num_train // batch_size),
                validation_data=data_generator_wrapper(v_lines, batch_size,
                                                       input_shape, anchors,
                                                       num_classes),
                validation_steps=max(1, num_val // batch_size),
                epochs=20,
                initial_epoch=0,
                callbacks=[logging, checkpoint, reduce_lr])
            model.save(log_dir + 'trained_weights_final.h5')
Пример #10
0
def _main():
    # parse command line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--train-from-checkpoint', type=str,
        help="the path to where a previously trained model's weights are stored")
    parser.add_argument('--class-path', default='utils/coco_classes.txt', type=str,
                        help='the path that points to the class names for the dataset')
    parser.add_argument(
        '--anchors-path', default='utils/anchors.txt', type=str,
        help='the path that points to the anchor values for the models')
    parser.add_argument(
        '--data-path', default='data/image_paths_and_box_info.txt', type=str,
        help='the path that points to the training data text file')
    parser.add_argument(
        '--input-height', default=416, type=int,
        help='the input height of the yolov3 model. The height must be a multiple of 32')
    parser.add_argument(
        '--input-width', default=416, type=int,
        help='The input width of the yolov3 model. The width must be a multiple of 32')
    parser.add_argument(
        '--batch-size', default=32, type=int,
        help='the training batch size')
    parser.add_argument(
        '--max-num-boxes-per-image', default=20, type=int,
        help='the max number of boxes that can be detected within one image')
    parser.add_argument(
        '--num-training-epochs', default=150, type=int,
        help='the number of training epochs')
    parser.add_argument(
        '--learning-rate', default=0.001, type=float,
        help='the learning rate')
    parser.add_argument(
        '--ignore-threshold', default=0.5, type=float,
        help='impacts how the loss is calculated. Must be between zero and one')
    parser.add_argument(
        '--train-val-data-split', default=0.9, type=float,
        help='the split between the data that will be used for training and data that will be used\n\
        for validation')
    parser.add_argument(
        '--train-save-path', default='model-weights/',
        help="the training model's checkpoint save path")
    parser.add_argument(
        '--model-name', default='model.ckpt',
        help='the name that should be given to the checkpoint file')
    parser.add_argument(
        '--tensorboard-save-path', default='tb-logs/tb-train/',
        help='the path where the event files to be used with tensorboard will be saved at')
    parser.add_argument(
        '--test-model-overfit', nargs='?', default=False, type=str2bool, const=True,
        help='this option is useful in testing out if the loss function is working correctly')
    parser.add_argument(
        '--save-iterations', default=100, type=int,
        help="how frequently the model's training weights are saved")
    parser.add_argument(
        '--log-iterations', default=5, type=int,
        help="how frequently the model's loss is logged for it to be inspected in Tensorboard")
    args = parser.parse_args()

    # args info
    print('[i] checkpoint: ', args.train_from_checkpoint)
    print('[i] path of class file: ', args.class_path)
    print('[i] path of anchors file: ', args.anchors_path)

    # read inputs
    h = args.input_height
    w = args.input_width
    ignore_thresh = args.ignore_threshold
    max_num_boxes_per_image = args.max_num_boxes_per_image
    anchors = get_anchors(args.anchors_path)

    lr = args.learning_rate
    num_anchors_per_detector = len(anchors) // 3
    num_detectors_per_image = num_anchors_per_detector * (
            ((h / 32) * (w / 32)) + ((h / 16) * (w / 16)) + ((h / 8) * (w / 8)))

    class_names = get_classes(args.class_path)
    num_classes = len(class_names)

    # tensorboard path
    tb_train_path = args.tensorboard_save_path + 'train/'
    tb_val_path = args.tensorboard_save_path + 'val/'
    training_data, validation_data, batch_size = prepare_data(
        args.data_path,
        args.train_val_data_split,
        args.batch_size,
        args.test_model_overfit)

    tf.reset_default_graph()

    # build graph
    with tf.variable_scope('y_true'):
        y_true_data = tf.placeholder(dtype=tf.float32, shape=[None, num_detectors_per_image, num_classes + 5])
    with tf.variable_scope('y_true_boxes'):
        y_true_box_data = tf.placeholder(dtype=tf.float32,
                                         shape=[None, max_num_boxes_per_image * num_anchors_per_detector, 4])
    with tf.variable_scope('x_input'):
        X = tf.placeholder(dtype=tf.float32, shape=[None, h, w, 3])

    yolo_outputs = yolo_v3(inputs=X, num_classes=len(class_names), anchors=anchors, h=h, w=w, training=True)  # output
    loss = yolo_v3_loss(yolo_outputs, y_true_data, y_true_box_data, ignore_threshold=ignore_thresh, anchors=anchors,
                        num_classes=num_classes, h=h, w=w, batch_size=batch_size)

    tf.summary.scalar('loss', loss)
    global_step = tf.get_variable(name='global_step', trainable=False, initializer=0, dtype=tf.int32)

    # returns a varlist containing only the vars of the conv layers right before the yolo layers
    trainable_var_list = tf.trainable_variables()
    last_layer_var_list = [i for i in trainable_var_list if i.shape[-1] == (5 + num_classes) * num_anchors_per_detector]
    train_op_with_frozen_variables = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, global_step=global_step,
                                                                                       var_list=last_layer_var_list)
    train_op_with_all_variables = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, global_step=global_step,
                                                                                    var_list=trainable_var_list)
    summ = tf.summary.merge_all()

    # info
    print('-------info-------')
    print('[i] model weights will be saved with filename: ', args.model_name)
    print('[i] tensorboard event files located at path: ', args.tensorboard_save_path)
    # build training loop
    with tf.Session() as sess:

        train_writer = tf.summary.FileWriter(tb_train_path, sess.graph)
        val_writer = tf.summary.FileWriter(tb_val_path)

        # initialize model weights either randomly or from a saved checkpoint
        saver = tf.train.Saver()
        if args['train_from_checkpoint'] is None:
            print('[i] initializing variables...')
            sess.run(tf.global_variables_initializer())
        else:
            print('[i] restoring weights from checkpoint: ', args.train_from_checkpoint)
            saver.restore(sess, args.train_from_checkpoint)

        num_iterations = args.num_epochs * len(training_data)

        print('[i] beginning to train the model...')
        for i in range(num_iterations):

            input_images, y_true, y_true_boxes = get_training_batch(training_data, anchors, num_classes,
                                                                    batch_size=batch_size, h=h, w=w,
                                                                    random=not args.test_model_overfit)

            # For the first epochs, train with the frozen layers. Then, unfreeze the entire graph.
            if i < num_iterations // 3:
                sess.run(train_op_with_frozen_variables,
                         feed_dict={X: input_images, y_true_data: y_true, y_true_box_data: y_true_boxes})
            else:
                sess.run(train_op_with_all_variables,
                         feed_dict={X: input_images, y_true_data: y_true, y_true_box_data: y_true_boxes})

            if i % args.log_iterations == 0:
                # write the training loss to tensorboard
                lt, st = sess.run([loss, summ],
                                  feed_dict={X: input_images, y_true_data: y_true, y_true_box_data: y_true_boxes})
                train_writer.add_summary(st, i)

                # write the validation loss to tensorboard if we are not in overfit mode
                if not args.test_model_overfit:
                    input_images, y_true, y_true_boxes = get_training_batch(validation_data, anchors, num_classes,
                                                                            batch_size=batch_size, h=h, w=w,
                                                                            random=not args.test_model_overfit)
                    lv, sv = sess.run([loss, summ],
                                      feed_dict={X: input_images, y_true_data: y_true, y_true_box_data: y_true_boxes})
                    val_writer.add_summary(sv, i)
                    print(
                        "| iteration: " + str(i) + ", training loss: " + str(round(lt, 2)) + ", validation loss: " + str(
                            round(lv, 2)))
                else:
                    print("| iteration: " + str(i) + ", training loss: " + str(round(lt, 2)))

            if i % args.save_iterations == 0:
                print('[i] saving model weights at path: ', args.train_save_path)
                saver.save(sess, os.path.join(args.train_save_path, args.model_name), global_step)

            print('################################################################################')

    train_writer.close()
    val_writer.close()
Пример #11
0
def yolo_main(flags_obj, model_function, input_function, dataset,
              augmentation):
    """Shared main loop for yolo Models.

    Args:
        flags_obj: An object containing parsed flags. See define_yolo_flags()
            for details.
        model_function: the function that instantiates the Model and builds the
            ops for train/eval. This will be passed directly into the estimator.
        input_function: the function that processes the dataset and returns a
            dataset that the estimator can train on. This will be wrapped with
            all the relevant flags for running and passed to estimator.
        dataset: A dataset for training and evaluation.
        augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
            For example, passing imgaug.augmenters.Fliplr(0.5) flips images
            right/left 50% of the time.
      """

    model_helpers.apply_clean(flags_obj)

    # Ensures flag override logic is only executed if explicitly triggered.
    if flags_obj.tf_gpu_thread_mode:
        override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)

    # Creates session config. allow_soft_placement = True, is required for
    # multi-GPU and is not harmful for other modes.
    session_config = tf.ConfigProto(
        log_device_placement=True,
        inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
        intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
        allow_soft_placement=True)

    session_config.gpu_options.allow_growth = True

    distribution_strategy = distribution_utils.get_distribution_strategy(
        flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)

    run_config = tf.estimator.RunConfig(train_distribute=distribution_strategy,
                                        session_config=session_config,
                                        save_checkpoints_secs=60 * 60 * 24)

    # Initializes model with all but the dense layer from pretrained ResNet.
    if flags_obj.pretrained_model_checkpoint_path is not None:
        warm_start_settings = tf.estimator.WarmStartSettings(
            flags_obj.pretrained_model_checkpoint_path,
            vars_to_warm_start='^(?!.*dense)')
    else:
        warm_start_settings = None

    anchors = np.array(utils.get_anchors(flags_obj.anchors_path))

    detector = tf.estimator.Estimator(
        model_fn=model_function,
        model_dir=flags_obj.model_dir,
        config=run_config,
        warm_start_from=warm_start_settings,
        params={
            'num_classes':
            flags_obj.num_classes,
            'data_format':
            flags_obj.data_format,
            'batch_size':
            distribution_utils.per_device_batch_size(
                flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
            'image_size':
            int(flags_obj.image_size),
            'loss_scale':
            flags_core.get_loss_scale(flags_obj),
            'dtype':
            flags_core.get_tf_dtype(flags_obj),
            'fine_tune':
            flags_obj.fine_tune,
            'anchors':
            anchors,
            'num_anchors':
            len(anchors),
            'max_num_boxes_per_image':
            flags_obj.max_num_boxes_per_image,
            'threshold':
            flags_obj.threshold,
            'train':
            dataset.num_images,
            'learning_rate':
            flags_obj.learning_rate
        })

    # if flags_obj.use_synthetic_data:
    #     dataset_name = dataset_name + '-synthetic'

    def input_fn_train(num_epochs):
        return input_function(
            data_set=dataset,
            is_training=True,
            batch_size=distribution_utils.per_device_batch_size(
                flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
            anchors_path=flags_obj.anchors_path,
            num_epochs=num_epochs,
            augmentation=augmentation,
            dtype=tf.float32,
            max_num_boxes_per_image=flags_obj.max_num_boxes_per_image,
            image_size=flags_obj.image_size,
            datasets_num_private_threads=flags_obj.
            datasets_num_private_threads,
            num_parallel_batches=flags_obj.datasets_num_parallel_batches)

    '''
    def input_fn_eval():
        return input_function(
            is_training=False,
            data_dir=flags_obj.data_dir,
            batch_size=distribution_utils.per_device_batch_size(
                flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
            num_epochs=1,
            dtype=flags_core.get_tf_dtype(flags_obj))
            '''

    if flags_obj.eval_only or not flags_obj.train_epochs:
        # If --eval_only is set, perform a single loop with zero train epochs.
        schedule, n_loops = [0], 1
    else:
        # Compute the number of times to loop while training. All but the last
        # pass will train for `epochs_between_evals` epochs, while the last will
        # train for the number needed to reach `training_epochs`. For instance if
        #   train_epochs = 25 and epochs_between_evals = 10
        # schedule will be set to [10, 10, 5]. That is to say, the loop will:
        #   Train for 10 epochs and then evaluate.
        #   Train for another 10 epochs and then evaluate.
        #   Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
        n_loops = math.ceil(flags_obj.train_epochs /
                            flags_obj.epochs_between_evals)
        schedule = [
            flags_obj.epochs_between_evals for _ in range(int(n_loops))
        ]
        schedule[-1] = flags_obj.train_epochs - sum(
            schedule[:-1])  # over counting.

    for cycle_index, num_train_epochs in enumerate(schedule):
        tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops))

        if num_train_epochs:
            detector.train(input_fn=lambda: input_fn_train(num_train_epochs),
                           max_steps=flags_obj.max_train_steps)
    '''