def test_read_classes_duplicate_name():
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,1\nb,2\na,3'))
        except ValueError as e:
            assert str(e).startswith('line 3: duplicate class name')
            raise
示例#2
0
def test_read_classes_duplicate_name():
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,1\nb,2\na,3'))
        except ValueError as e:
            assert str(e).startswith('line 3: duplicate class name')
            raise
def test_read_classes_wrong_format():
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,b,c'))
        except ValueError as e:
            assert str(e).startswith('line 1: format should be')
            raise
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,1\nb,c,d'))
        except ValueError as e:
            assert str(e).startswith('line 2: format should be')
            raise
示例#4
0
def test_read_classes_wrong_format():
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,b,c'))
        except ValueError as e:
            assert str(e).startswith('line 1: format should be')
            raise
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,1\nb,c,d'))
        except ValueError as e:
            assert str(e).startswith('line 2: format should be')
            raise
def test_read_classes_malformed_class_id():
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,b'))
        except ValueError as e:
            assert str(e).startswith("line 1: malformed class ID:")
            raise

    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,1\nb,c'))
        except ValueError as e:
            assert str(e).startswith('line 2: malformed class ID:')
            raise
示例#6
0
def test_read_classes_malformed_class_id():
    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,b'))
        except ValueError as e:
            assert str(e).startswith("line 1: malformed class ID:")
            raise

    with pytest.raises(ValueError):
        try:
            csv_generator._read_classes(csv_str('a,1\nb,c'))
        except ValueError as e:
            assert str(e).startswith('line 2: malformed class ID:')
            raise
示例#7
0
 def read_classes(self):
     """Read class file in case of multi-class training. If no file has been created, DeepForest assume there is 1 class, Tree"""
     # parse the provided class file
     self.labels = {}
     try:
         with open(self.classes_file, 'r') as file:
             self.classes = _read_classes(csv.reader(file, delimiter=','))
         for key, value in self.classes.items():
             self.labels[value] = key
     except:
         self.labels[0] = "Tree"
示例#8
0
def read_class_csv(csv_class_file):
    """
    function that reads in a class csv file that is used as an input to the retinanet training routine, and outputs
    a list of class names which is the format required by Keras ImageGenerator
    :param csv_class_file: the filename of the csv containing class names and indices
    :return: list of classnames
    """
    try:
        with csv_generator._open_for_csv(csv_class_file) as file:
            classes = csv_generator._read_classes(
                csv.reader(file, delimiter=','))
            class_list = list(classes.keys())
            for c in classes:
                class_idx = classes[c]
                class_list[class_idx] = c
            return class_list
    except ValueError as e:
        raise_from(
            ValueError('invalid CSV class file: {}: {}'.format(
                csv_class_file, e)), None)
示例#9
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    #generator = create_generator(args)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model, backbone_name=args.backbone)

    # optionally convert the model
    if args.convert_model:
        model = models.convert_model(model, anchor_params=anchor_params)

    # print model summary
    print(model.summary())

    print("annotations", args.annotations)
    print("classes", args.classes)
    print("min", args.image_min_side)
    print("max", args.image_max_side)
    print("configs", args.config)
    iou_threshold = args.iou_threshold
    score_threshold = args.score_threshold
    max_detections = args.max_detections

    image_names = []
    image_data = {}

    # Take base_dir from annotations file if not explicitly specified.

    # parse the provided class file
    try:
        with _open_for_csv(args.classes) as file:
            classes = _read_classes(csv.reader(file, delimiter=','))
    except ValueError as e:
        raise_from(
            ValueError('invalid CSV class file: {}: {}'.format(
                args.classes, e)), None)

    labels = {}
    for key, value in classes.items():
        labels[value] = key

    # csv with img_path, x1, y1, x2, y2, class_name
    try:
        with _open_for_csv(args.annotations) as file:
            file_annotations = _read_annotations(
                csv.reader(file, delimiter=','), classes)
    except ValueError as e:
        raise_from(
            ValueError('invalid CSV annotations file: {}: {}'.format(
                args.annotations, e)), None)
    image_names = list(file_annotations.keys())

    num_classes = len(labels)

    all_detections = [[None for i in range(num_classes) if i in labels]
                      for j in range(len(image_names))]
    for image_index in range(len(image_names)):
        """ Load annotations for an image_index.
		"""
        path = file_annotations[image_names[image_index]]
        annotations = {'labels': np.empty((0, )), 'bboxes': np.empty((0, 4))}

        for idx, annot in enumerate(
                file_annotations[image_names[image_index]]):
            for key, value in classes.items():
                if annot['class'] == key:
                    break

            annotations['labels'] = np.concatenate(
                (annotations['labels'], [value]))
            annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[
                float(annot['x1']),
                float(annot['y1']),
                float(annot['x2']),
                float(annot['y2']),
            ]]))

    f = []
    for label in range(num_classes):
        for cls in classes:
            if classes[cls] == label:
                print('class', cls)
                break
        f.append(
            open(
                "results/comp4_det_" +
                args.annotations.split("/")[-1].split(".")[0] + "_" + cls +
                ".txt", 'w'))

    for i in range(0, len(image_names)):
        print('image num', i)
        file_name = image_names[i]

        # load image
        image = read_image_bgr(file_name)
        image = preprocess_image(image)
        image, scale = resize_image(image)
        boxes, scores, labels = model.predict_on_batch(
            np.expand_dims(image, axis=0))
        boxes /= scale

        # select indices which have a score above the threshold
        indices = np.where(scores[0, :] > score_threshold)[0]

        # select those scores
        scores = scores[0][indices]

        # find the order with which to sort the scores
        scores_sort = np.argsort(-scores)[:max_detections]

        # select detections
        image_boxes = boxes[0, indices[scores_sort], :]
        image_scores = scores[scores_sort]
        image_labels = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([
            image_boxes,
            np.expand_dims(image_scores, axis=1),
            np.expand_dims(image_labels, axis=1)
        ],
                                          axis=1)
        # copy detections to all_detections
        for label in range(num_classes):
            if not label in labels:
                continue
            dets = image_detections[image_detections[:, -1] == label, :-1]
            for cls in classes:
                if classes[cls] == label:
                    print('class', cls)
                    break
            for scr in dets:
                f[label].write(
                    file_name.split("/")[-1].split(".")[0] + " " +
                    str(scr[4]) + " " + str(scr[0]) + " " + str(scr[1]) + " " +
                    str(scr[2]) + " " + str(scr[3]) + "\n")

    for label in range(num_classes):
        f[label].close()
def test_read_classes():
    assert csv_generator._read_classes(csv_str('')) == {}
    assert csv_generator._read_classes(csv_str('a,1')) == {'a': 1}
    assert csv_generator._read_classes(csv_str('a,1\nb,2')) == {'a': 1, 'b': 2}
示例#11
0
def test_read_classes():
    assert csv_generator._read_classes(csv_str('')) == {}
    assert csv_generator._read_classes(csv_str('a,1')) == {'a': 1}
    assert csv_generator._read_classes(csv_str('a,1\nb,2')) == {'a': 1, 'b': 2}