コード例 #1
0
  def store_image_with_bboxes(self, image, filename, detections):
    for ground_truth in self.ground_truths:
      label_display = LABEL_NAMES[ground_truth.class_number-1]
      # Adding extra lines to display_str_list so that label_display
      # will not get covered up by the detected box
      vis_util.draw_bounding_box_on_image(
        image,
        ground_truth.ymin,
        ground_truth.xmin,
        ground_truth.ymax,
        ground_truth.xmax,
        color=GROUND_TRUTH_COLOR,
        display_str_list=[label_display, label_display, label_display])

    for detection in detections:
      label_display = LABEL_NAMES[detection.class_number-1]
      if detection.score > MIN_SCORE_THRESH:
        vis_util.draw_bounding_box_on_image(
          image,
          detection.box[0],
          detection.box[1],
          detection.box[2],
          detection.box[3],
          color=PREDICTION_COLOR,
          display_str_list=[label_display])

    image.save(os.path.join(
      ERROR_IMAGE_OUTPUT_DIR, '{}.JPEG'.format(filename)))
コード例 #2
0
def draw_objects_on_image( image = None, objects = None, class_colors = None, line_thickness = 5, no_scores=False ):
    # sort by score ([5]) so high score boxes overlay low scores
    objects.sort( key = lambda x: x[5])
    for box in objects:
        if box[0] in class_colors:
            class_color = class_colors[ box[0] ]
        else:
            print( "No class color for category: {}".format( box[0] ) )
            class_color = 'yellow'
            
        if no_scores:
            display_str_list=[ '{}'.format( box[0] ) ]
        else:
            display_str_list=[ '{} {}%'.format( box[0], str( round( box[5] * 100 ) ) ) ]
        
        # note unusual order of co-ords
        vis_util.draw_bounding_box_on_image(
            image, 
            box[2], 
            box[1], 
            box[4], 
            box[3],
            color=class_color, 
            thickness=line_thickness,
            display_str_list=display_str_list,
            use_normalized_coordinates=False 
        ) 
コード例 #3
0
ファイル: example_utils.py プロジェクト: fmiusov/camera-api
def display_detection(image_tensor, class_names, threshold, prediction):
    """
    utilility to display image & prediction
    input:  image - EagerTensor
            class names - dict {class id: b'name'}
            threshold - 0.0 - 1.0  (keep if score > threshold)
            prediction - model response for this instance
    """
    # post processed prediction
    # - without the threshold applied
    detect_scores = np.asarray(prediction['detection_scores'],
                               dtype=np.float32)
    detect_classes = np.asarray(prediction['detection_classes'], dtype=np.int8)
    detect_boxes = np.asarray(prediction['detection_boxes'], dtype=np.float32)

    # keep == w/ threshold applied
    keep_idx = np.argwhere(detect_scores > threshold)
    class_ids = detect_classes[keep_idx]
    (obj_count, discard) = class_ids.shape
    # get the class names you need to keep
    obj_class_ids = class_ids.reshape(-1)
    obj_class_names = []
    for i in obj_class_ids:
        obj_class_names.append(class_names[i])
    obj_class_names = np.asarray(obj_class_names)
    # get the bounding boxes you need to keep
    bboxes = detect_boxes[keep_idx]

    # just keep data from here on
    image_decoded_tensor = tf.image.decode_jpeg(image_tensor)
    image = image_decoded_tensor.numpy()
    pil_image = Image.fromarray(image)
    bb = bboxes.reshape(-1, 4)
    ymins = bb[:, 0]
    xmins = bb[:, 1]
    ymaxs = bb[:, 2]
    xmaxs = bb[:, 3]

    print("detected:", obj_count, obj_class_names)

    for idx in range(obj_count):
        draw_bounding_box_on_image(pil_image,
                                   ymins[idx],
                                   xmins[idx],
                                   ymaxs[idx],
                                   xmaxs[idx],
                                   color=STANDARD_COLORS[obj_class_ids[idx]],
                                   thickness=4,
                                   display_str_list=[obj_class_names[idx]],
                                   use_normalized_coordinates=True)
    display.display(pil_image)
コード例 #4
0
  def test_draw_bounding_box_on_image(self):
    test_image = self.create_colorful_test_image()
    test_image = Image.fromarray(test_image)
    width_original, height_original = test_image.size
    ymin = 0.25
    ymax = 0.75
    xmin = 0.4
    xmax = 0.6

    visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin, ymax,
                                                   xmax)
    width_final, height_final = test_image.size

    self.assertEqual(width_original, width_final)
    self.assertEqual(height_original, height_final)
コード例 #5
0
    def test_draw_bounding_box_on_image(self):
        test_image = self.create_colorful_test_image()
        test_image = Image.fromarray(test_image)
        width_original, height_original = test_image.size
        ymin = 0.25
        ymax = 0.75
        xmin = 0.4
        xmax = 0.6

        visualization_utils.draw_bounding_box_on_image(test_image, ymin, xmin,
                                                       ymax, xmax)
        width_final, height_final = test_image.size

        self.assertEqual(width_original, width_final)
        self.assertEqual(height_original, height_final)
コード例 #6
0
    def on_annotated_image_available(self, image, image_annotation,
                                     object_annotations):
        for object_annotation in object_annotations:
            viz_utils.draw_bounding_box_on_image(
                image,
                object_annotation.ymin,
                object_annotation.xmin,
                object_annotation.ymax,
                object_annotation.xmax,
                color='white',
                thickness=1,
                display_str_list=(object_annotation.class_id, ),
                use_normalized_coordinates=False)

        # Invoke the next ImageProcessor(s).
        for listener in self.annotated_image_listeners:
            listener.on_annotated_image_available(image, image_annotation,
                                                  object_annotations)
コード例 #7
0
def process_detections(detections_record, categories):
    record_iterator = tf.python_io.tf_record_iterator(path=detections_record)
    data_parser = tf_example_parser.TfExampleDetectionAndGTParser()

    confusion_matrix = np.zeros(shape=(len(categories) + 1,
                                       len(categories) + 1))

    num_shown = 0
    image_index = 0

    os.makedirs(detectionDirName, exist_ok=True)

    for string_record in record_iterator:
        example = tf.train.Example()
        example.ParseFromString(string_record)
        decoded_dict = data_parser.parse(example)

        image_index += 1

        if decoded_dict:
            groundtruth_boxes = decoded_dict[
                standard_fields.InputDataFields.groundtruth_boxes]
            groundtruth_classes = decoded_dict[
                standard_fields.InputDataFields.groundtruth_classes]

            detection_scores = decoded_dict[
                standard_fields.DetectionResultFields.detection_scores]
            detection_classes = decoded_dict[
                standard_fields.DetectionResultFields.detection_classes][
                    detection_scores >= CONFIDENCE_THRESHOLD]
            detection_boxes = decoded_dict[
                standard_fields.DetectionResultFields.detection_boxes][
                    detection_scores >= CONFIDENCE_THRESHOLD]

            if num_shown < NUM_TO_SHOW:

                # Convert encoded image in example TF Record to image
                testing = TfDecoder()
                features = testing.decode(string_record)
                image = features['image']
                with tf.Session() as sess:
                    image = image.eval()

                im = PIL.Image.fromarray(image)
                for box in groundtruth_boxes:
                    vis_util.draw_bounding_box_on_image(
                        im,
                        box[0] * IMAGE_HEIGHT,
                        box[1] * IMAGE_WIDTH,
                        box[2] * IMAGE_HEIGHT,
                        box[3] * IMAGE_WIDTH,
                        color='red',
                        thickness=1,
                        use_normalized_coordinates=False)
                for box in detection_boxes:
                    vis_util.draw_bounding_box_on_image(
                        im,
                        box[0] * IMAGE_HEIGHT,
                        box[1] * IMAGE_WIDTH,
                        box[2] * IMAGE_HEIGHT,
                        box[3] * IMAGE_WIDTH,
                        color='blue',
                        thickness=1,
                        use_normalized_coordinates=False)

                # UNCOMMENT TO DISPLAY IMAGES W/ BoundingBox
                #plt.imshow(np.asarray(im))
                #plt.show()

                # Code to create directory & save images w/ bounding boxes

                filename = decoded_dict['key']

                im.save(detectionDirName + "/" + filename)

                num_shown += 1

            matches = []

            if image_index % 100 == 0:
                print("Processed %d images" % (image_index))
            for i in range(len(groundtruth_boxes)):
                for j in range(len(detection_boxes)):
                    iou = compute_iou(groundtruth_boxes[i], detection_boxes[j])

                    if iou > IOU_THRESHOLD:
                        matches.append([i, j, iou])

            matches = np.array(matches)
            if matches.shape[0] > 0:
                # Sort list of matches by descending IOU so we can remove duplicate detections
                # while keeping the highest IOU entry.
                matches = matches[matches[:, 2].argsort()[::-1][:len(matches)]]

                # Remove duplicate detections from the list.
                matches = matches[np.unique(matches[:, 1],
                                            return_index=True)[1]]

                # Sort the list again by descending IOU. Removing duplicates doesn't preserve
                # our previous sort.
                matches = matches[matches[:, 2].argsort()[::-1][:len(matches)]]

                # Remove duplicate ground truths from the list.
                matches = matches[np.unique(matches[:, 0],
                                            return_index=True)[1]]

            for i in range(len(groundtruth_boxes)):
                if matches.shape[0] > 0 and matches[matches[:, 0] ==
                                                    i].shape[0] == 1:
                    confusion_matrix[groundtruth_classes[i] - 1][
                        detection_classes[int(matches[matches[:, 0] == i,
                                                      1][0])] - 1] += 1
                else:
                    confusion_matrix[groundtruth_classes[i] -
                                     1][confusion_matrix.shape[1] - 1] += 1

            for i in range(len(detection_boxes)):
                if matches.shape[0] > 0 and matches[matches[:, 1] ==
                                                    i].shape[0] == 0:
                    confusion_matrix[confusion_matrix.shape[0] -
                                     1][detection_classes[i] - 1] += 1
        else:
            print("Skipped image %d" % (image_index))

    print("Processed %d images" % (image_index))

    return confusion_matrix
コード例 #8
0
ファイル: box_viz.py プロジェクト: ahangchen/Cattle_ssd
os.system('mkdir detect4video')
os.system('rm detect4video/*')
import numpy as np
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import tensorflow as tf
from PIL import Image
from file_helper import read_lines
from object_detection.utils import visualization_utils as vis_util


def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
        (im_height, im_width, 3)).astype(np.uint8)


lines = read_lines('oid_cattle.txt')
examples = list()
last_path = ''
for line in lines:
    infos = line.split()
    img_path = infos[0]
    box = list()
    for bi in infos[1:5]:
        box.append(float(bi))
    image = Image.open(img_path)
    draw = ImageDraw.Draw(image)
    vis_util.draw_bounding_box_on_image(image, box[1], box[0], box[3], box[2])
    image.show()
    raw_input(img_path)
コード例 #9
0
ファイル: longmao.py プロジェクト: houweidong/models
    else:
        raise Exception("Unsupported dataset: {}".format(args.dataset))

    print('Start to visualize dataset: {}'.format(args.dataset))
    print("Type 'x' to stop.")
    while True:
        ind = random.randint(0, len(ds) - 1)
        img, sample = ds[ind]

        print("Visualizing image {}".format(sample["img"].split("/")[-1]))
        if multi_bbox_per_image:
            people = sample["people"]
        else:
            people = [sample]

        for person in people:
            x, y, w, h = person["bbox"]
            display_str = [
                "{}/{}".format(k, v) for k, v in person["attrs"].items()
            ]
            draw_bounding_box_on_image(img,
                                       y,
                                       x, (y + h), (x + w),
                                       display_str_list=display_str,
                                       thickness=3,
                                       use_normalized_coordinates=False)

        # Convert pil image to opencv format for display control
        image_bgr = pil_to_cv_image(img)
        interactive_imshow(image_bgr)