예제 #1
0
    def random_transform_group_entry(self, image, annotations, transform=None):
        """ Randomly transforms image and annotation.
        """
        # randomly transform both image and annotations
        if transform or self.transform_generator:
            if transform is None:
                transform = adjust_transform_for_image(
                    next(self.transform_generator), image,
                    self.transform_parameters.relative_translation)

            # apply transformation to image
            image = apply_transform(transform, image,
                                    self.transform_parameters)

            # randomly transform the masks and expand so to have a fake channel dimension
            for i, mask in enumerate(annotations['masks']):
                annotations['masks'][i] = apply_transform(
                    transform, mask, self.transform_parameters)
                annotations['masks'][i] = np.expand_dims(
                    annotations['masks'][i], axis=2)

            # Transform the bounding boxes in the annotations.
            annotations['bboxes'] = annotations['bboxes'].copy()
            for index in range(annotations['bboxes'].shape[0]):
                annotations['bboxes'][index, :] = transform_aabb(
                    transform, annotations['bboxes'][index, :])

        return image, annotations
예제 #2
0
    def random_transform_group_entry(self, image, annotations, masks):
        # randomly transform both image and annotations
        if self.transform_generator:
            transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation)
            image     = apply_transform(transform, image, self.transform_parameters)

            # randomly transform the masks and expand so to have a fake channel dimension
            for m in range(len(masks)):
                masks[m] = apply_transform(transform, masks[m], self.transform_parameters)
                masks[m] = np.expand_dims(masks[m], axis=2)

            # randomly transform the bounding boxes
            annotations = annotations.copy()
            for index in range(annotations.shape[0]):
                annotations[index, :4] = transform_aabb(transform, annotations[index, :4])

        return image, annotations, masks
    def random_transform_group_entry(self, image, annotations, transform=None):
        """ Randomly transforms image and annotation.
        """
        # randomly transform both image and annotations
        if transform is not None or self.transform_generator:
            if transform is None:
                transform = adjust_transform_for_image(next(self.transform_generator), image,
                                                       self.transform_parameters.relative_translation)

            # apply transformation to image
            image = apply_transform(transform, image, self.transform_parameters)

            # Transform the bounding boxes in the annotations.
            annotations['bboxes'] = annotations['bboxes'].copy()
            for index in range(annotations['bboxes'].shape[0]):
                annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])

        return image, annotations
예제 #4
0
from keras_retinanet.utils.transform import random_transform
from keras_retinanet.utils.image import apply_transform, TransformParameters, adjust_transform_for_image, read_image_bgr
from matplotlib.image import imsave

if __name__ == "__main__":
    img = read_image_bgr('./TestFile/foo.jpg')
    transform = random_transform(flip_x_chance=1, flip_y_chance=0)
    print(transform)
    transform_parameters = TransformParameters()
    transform = adjust_transform_for_image(
        transform, img, transform_parameters.relative_translation)
    img_transformed = apply_transform(transform, img, transform_parameters)
    imsave('./TestFile/foo_transformed.jpg', img_transformed)
    exit(0)
def evaluate_image(model,
                   img_path,
                   save_path,
                   labels_to_names={
                       0: "no fracture",
                       1: "fracture"
                   },
                   true_coords=None,
                   IoU_threshold=0.8,
                   score_threshold=0.6,
                   transformation_matrix=None,
                   crop_type=0):
    image = read_image_bgr(img_path)
    draw = image.copy()
    draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
    image = preprocess_image(image)
    image, scale = resize_image(image)

    if transformation_matrix is not None:
        image = apply_transform(transformation_matrix, image, None)
        if true_coords is not None:
            true_coords = transform_aabb(transformation_matrix, true_coords)

    #TODO
    if crop_type > 0:
        image, true_coords = crop_image(image, true_coords, crop_type)

    start = time.time()
    boxes, scores, labels = model.predict_on_batch(
        np.expand_dims(image, axis=0))
    #print("processing time: ", time.time()-start)

    boxes /= scale

    num_truth_w_threshold = 0
    num_pred_w_threshold = 0
    pos_sensitivity = 0
    num_true_coords = 0
    if true_coords is not None:
        coords_copy = true_coords.copy(
        )  # allows removing truth boxes that already have an associated prediction
    for box, score, label in zip(boxes[0], scores[0], labels[0]):
        if score < score_threshold:
            break
        #color = label_color(label)
        color = (255, 0, 0)  # red for bad prediction
        for i in range(len(coords_copy)):
            coords = coords_copy[i]
            if IoU(box, coords) > IoU_threshold:
                print("Prediction: " + str(box) + "; Truth: " + str(coords) +
                      "; Score: " + str(score) + "; IoU: " +
                      str(IoU(box, coords)))
                num_truth_w_threshold += 1
                coords_copy = np.delete(coords_copy, i, 0)
                print(coords_copy)
                color = (0, 255, 0)  # green for good prediction
                break
        b = box.astype(int)
        draw_box(draw, b, color=color)
        #caption = "{}{:.3f}".format(labels_to_names[label], score)
        #draw_caption(draw, b, caption)
        num_pred_w_threshold += 1
    for coords in true_coords:
        draw_box(draw, coords, color=(0, 0, 255))  # blue for true
    pos_sensitivity = num_truth_w_threshold / len(true_coords)
    num_true_coords = len(true_coords)
    plt.figure(figsize=(15, 15))
    plt.axis('off')
    plt.imshow(draw)
    plt.savefig(save_path + ".jpg")
    #plt.show()
    plt.close()
    try:
        pos_pred_val = num_truth_w_threshold / num_pred_w_threshold
    except ZeroDivisionError:
        pos_pred_val = 0
    return pos_sensitivity, pos_pred_val, num_truth_w_threshold, num_true_coords, num_pred_w_threshold