Example #1
0
def run(generator, args):
    # display images, one at a time
    for i in range(generator.size()):
        # load the data
        image = generator.load_image(i)
        annotations, masks = generator.load_annotations(i)

        # apply random transformations
        if args.random_transform:
            image, annotations, masks = generator.random_transform_group_entry(
                image, annotations, masks)

        # resize the image and annotations
        if args.resize:
            image, image_scale = generator.resize_image(image)
            annotations[:, :4] *= image_scale
            for m in range(len(masks)):
                masks[m], _ = generator.resize_image(masks[m])

        # draw anchors on the image
        if args.anchors:
            anchors = generator.generate_anchors(image.shape)
            labels, _, _ = generator.compute_anchor_targets(
                anchors, [image], [annotations], generator.num_classes())
            draw_boxes(image,
                       anchors[np.max(labels[0], axis=1) == 1], (255, 255, 0),
                       thickness=1)

        # draw annotations on the image
        if args.annotations:
            # draw annotations in red
            draw_annotations(image,
                             annotations,
                             color=(0, 0, 255),
                             label_to_name=generator.label_to_name)

            # draw regressed anchors in green to override most red annotations
            # result is that annotations without anchors are red, with anchors are green
            anchors = generator.generate_anchors(image.shape)
            labels, _, boxes = generator.compute_anchor_targets(
                anchors, [image], [annotations], generator.num_classes())
            draw_boxes(image, boxes[0, np.max(labels[0], axis=1) == 1],
                       (0, 255, 0))

        # Draw masks over the image with random colours
        if args.masks:
            for m in range(len(masks)):
                # crop the mask with the related bbox size, and then draw them
                box = annotations[m, :4].astype(int)
                mask = masks[m][box[1]:box[3], box[0]:box[2]]
                draw_mask(image, box, mask, annotations[m, 4].astype(int))
                # add the label caption
                caption = '{}'.format(
                    generator.label_to_name(annotations[m, 4]))
                draw_caption(image, box, caption)

        cv2.imshow('Image', image)
        if cv2.waitKey() == ord('q'):
            return False
    return True
Example #2
0
def run(generator, args):
    """ Main loop.

    Args
        generator: The generator to debug.
        args: parseargs args object.
    """
    # display images, one at a time
    # for i in np.random.choice(generator.size(), 200):
    for i in range(generator.size()):
        # load the data
        image = generator.load_image(i)
        annotations = generator.load_annotations(i)

        # apply random transformations
        if args.random_transform:
            image, annotations = generator.random_transform_group_entry(
                image, annotations)

        # resize the image and annotations
        if args.resize:
            image, image_scale = generator.resize_image(image)
            annotations[:, :4] *= image_scale

        image = image[..., image.shape[-2] // 2, 0]
        image = (image - image.min()) / image.max() * 255.0
        image = np.repeat(image.reshape((512, 512, 1)), 3, axis=2)  # to rgb
        image = image.astype(np.uint8).copy()

        anchors = anchors_for_shape(image.shape)

        labels_batch, regression_batch, boxes_batch = generator.compute_anchor_targets(
            anchors, [image], [annotations], generator.num_classes())
        anchor_states = labels_batch[0, :, -1]

        # draw anchors on the image
        if args.anchors:
            draw_boxes(image,
                       boxes_batch[0, anchor_states == 1, :], (0, 255, 0),
                       thickness=1)

        # draw annotations on the image
        if args.annotations:
            # draw annotations in red
            draw_annotations(image,
                             annotations,
                             color=(0, 0, 255),
                             label_to_name=generator.label_to_name)

            # draw regressed anchors in green to override most red annotations
            # result is that annotations without anchors are red, with anchors are green
            draw_boxes(image, boxes_batch[0, anchor_states == 1, :],
                       (0, 255, 0))

        # cv2.imshow('Image', image)
        cv2.imwrite(os.path.join('debug', '{}.png'.format(i)), image)
        # if cv2.waitKey() == ord('q'):
        # return False
    return True
Example #3
0
def run(generator, args):
    """ Main loop.

    Args
        generator: The generator to debug.
        args: parseargs args object.
    """
    # display images, one at a time
    for i in range(generator.size()):
        # load the data
        image = generator.load_image(i)
        annotations = generator.load_annotations(i)

        # apply random transformations
        if args.random_transform:
            image, annotations = generator.random_transform_group_entry(
                image, annotations)

        # resize the image and annotations
        if args.resize:
            image, image_scale = generator.resize_image(image)
            annotations[:, :4] *= image_scale

        anchors = anchors_for_shape(image.shape)

        labels_batch, regression_batch, boxes_batch = generator.compute_anchor_targets(
            anchors, [image], [annotations], generator.num_classes())
        anchor_states = labels_batch[0, :, -1]

        # draw anchors on the image
        if args.anchors:
            draw_boxes(image,
                       anchors[anchor_states == 1], (255, 255, 0),
                       thickness=1)

        # draw annotations on the image
        if args.annotations:
            # draw annotations in red
            draw_annotations(image,
                             annotations,
                             color=(0, 0, 255),
                             label_to_name=generator.label_to_name)

            # draw regressed anchors in green to override most red annotations
            # result is that annotations without anchors are red, with anchors are green
            draw_boxes(image, boxes_batch[0, anchor_states == 1, :],
                       (0, 255, 0))

        cv2.imshow('Image', image)
        if cv2.waitKey() == ord('q'):
            return False
    return True
Example #4
0
def run(generator, args, anchor_params):
    # display images, one at a time
    for i in range(generator.size()):
        # load the data
        image       = generator.load_image(i)
        annotations = generator.load_annotations(i)

        # apply random transformations
        if args.random_transform:
            image, annotations = generator.random_transform_group_entry(image, annotations)

        # resize the image and annotations
        if args.resize:
            image, image_scale  = generator.resize_image(image)
            annotations['bboxes'] *= image_scale
            for m in range(len(annotations['masks'])):
                annotations['masks'][m], _ = generator.resize_image(annotations['masks'][m])

        anchors = anchors_for_shape(image.shape, anchor_params=anchor_params)
        positive_indices, _, max_indices = compute_gt_annotations(anchors, annotations['bboxes'])

        # draw anchors on the image
        if args.anchors:
            anchors = generator.generate_anchors(image.shape)
            positive_indices, _, max_indices = compute_gt_annotations(anchors, annotations['bboxes'])
            draw_boxes(image, anchors[positive_indices], (255, 255, 0), thickness=1)

        # draw annotations on the image
        if args.annotations:
            # draw annotations in red
            draw_annotations(image, annotations, color=(0, 0, 255), label_to_name=generator.label_to_name)

            # draw regressed anchors in green to override most red annotations
            # result is that annotations without anchors are red, with anchors are green
            draw_boxes(image, annotations['bboxes'][max_indices[positive_indices], :], (0, 255, 0))

        # Draw masks over the image with random colours
        if args.masks:
            for m in range(len(annotations['masks'])):
                # crop the mask with the related bbox size, and then draw them
                box = annotations['bboxes'][m].astype(int)
                mask = annotations['masks'][m][box[1]:box[3], box[0]:box[2]]
                draw_mask(image, box, mask, annotations['labels'][m].astype(int))
                # add the label caption
                caption = '{}'.format(generator.label_to_name(annotations['labels'][m]))
                draw_caption(image, box, caption)

        cv2.imshow('Image', image)
        if cv2.waitKey() == ord('q'):
            return False
    return True
Example #5
0
def _get_detections(generator,
                    model,
                    score_threshold=0.05,
                    max_detections=300,
                    save_path=None,
                    experiment=None,
                    postprocess=False):
    """ Get the detections from the model using the generator.

    The result is a list of lists such that the size is:
        all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]

    # Arguments
        generator       : The generator used to run images through the model.
        model           : The model to run on the images.
        score_threshold : The score confidence threshold to use.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save the images with visualized detections to.
        experiment    : Comet ML experiment
    # Returns
        A list of lists containing the detections for each image in the generator.
    """
    all_detections = [[None for i in range(generator.num_classes())]
                      for j in range(generator.size())]

    for i in range(generator.size()):
        raw_image = generator.load_image(i)
        plot_image = copy.deepcopy(raw_image)

        #Format name and save
        image_name = generator.image_names[i]
        row = generator.image_data[image_name]
        lfname = os.path.splitext(row["tile"])[0] + "_" + str(
            row["window"]) + "raw_image"

        #Skip if missing a component data source
        if raw_image is False:
            print("Empty image, skipping")
            continue

        #Store plotting images
        plot_rgb = plot_image[:, :, :3].copy()

        #predict
        image = generator.preprocess_image(raw_image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(
            np.expand_dims(image, axis=0))[:3]

        # correct boxes for image scale
        boxes /= scale

        # select indices which have a score above the threshold
        indices = np.where(scores[0, :] > score_threshold)[0]

        # select those scores
        scores = scores[0][indices]

        # find the order with which to sort the scores
        scores_sort = np.argsort(-scores)[:max_detections]

        # select detections
        image_boxes = boxes[0, indices[scores_sort], :]
        image_scores = scores[scores_sort]
        image_labels = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([
            image_boxes,
            np.expand_dims(image_scores, axis=1),
            np.expand_dims(image_labels, axis=1)
        ],
                                          axis=1)

        #name image
        image_name = generator.image_names[i]
        row = generator.image_data[image_name]
        fname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"])

        #drape boxes if they exist
        if len(image_boxes) > 0:
            #get lidar cloud if a new tile, or if not the same tile as previous image.
            if generator.with_lidar:
                if i == 0:
                    generator.load_lidar_tile()
                elif not generator.image_data[i][
                        "tile"] == generator.image_data[i - 1]["tile"]:
                    generator.load_lidar_tile()

            #The tile could be the full tile, so let's check just the 400 pixel crop we are interested
            #Not the best structure, but the on-the-fly generator always has 0 bounds
            if hasattr(generator, 'hf'):
                bounds = generator.hf["utm_coords"][generator.row["window"]]
            else:
                bounds = []

        if save_path is not None:
            draw_annotations(plot_rgb,
                             generator.load_annotations(i),
                             label_to_name=generator.label_to_name)
            draw_detections(plot_rgb,
                            image_boxes,
                            image_scores,
                            image_labels,
                            label_to_name=generator.label_to_name,
                            score_threshold=score_threshold)

            #name image
            image_name = generator.image_names[i]
            row = generator.image_data[image_name]
            fname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"])

            #Write RGB
            cv2.imwrite(os.path.join(save_path, '{}.png'.format(fname)),
                        plot_rgb)

            if experiment:
                experiment.log_image(os.path.join(save_path,
                                                  '{}.png'.format(fname)),
                                     name=fname)

        # copy detections to all_detections
        for label in range(generator.num_classes()):
            all_detections[i][label] = image_detections[
                image_detections[:, -1] == label, :-1]

    return all_detections
Example #6
0
def _get_detections(generator,
                    model,
                    score_threshold=0.05,
                    max_detections=300,
                    save_path=None,
                    experiment=None,
                    postprocess=True):
    """ Get the detections from the model using the generator.

    The result is a list of lists such that the size is:
        all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]

    # Arguments
        generator       : The generator used to run images through the model.
        model           : The model to run on the images.
        score_threshold : The score confidence threshold to use.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save the images with visualized detections to.
        experiment    : Comet ML experiment
    # Returns
        A list of lists containing the detections for each image in the generator.
    """
    all_detections = [[None for i in range(generator.num_classes())]
                      for j in range(generator.size())]

    for i in range(generator.size()):
        raw_image = generator.load_image(i)
        plot_image = generator.retrieve_window()
        plot_image = copy.deepcopy(plot_image[:, :, ::-1])

        #Skip if missing a component data source
        if raw_image is False:
            print("Empty image, skipping")
            continue

        #predict
        image = generator.preprocess_image(raw_image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(
            np.expand_dims(image, axis=0))[:3]

        # correct boxes for image scale
        boxes /= scale

        # select indices which have a score above the threshold
        indices = np.where(scores[0, :] > score_threshold)[0]

        # select those scores
        scores = scores[0][indices]

        # find the order with which to sort the scores
        scores_sort = np.argsort(-scores)[:max_detections]

        # select detections
        image_boxes = boxes[0, indices[scores_sort], :]
        image_scores = scores[scores_sort]
        image_labels = labels[0, indices[scores_sort]]
        image_detections = np.concatenate([
            image_boxes,
            np.expand_dims(image_scores, axis=1),
            np.expand_dims(image_labels, axis=1)
        ],
                                          axis=1)

        #name image
        image_name = generator.image_names[i]
        row = generator.image_data[image_name]
        fname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"])

        if save_path is not None:
            draw_annotations(plot_image,
                             generator.load_annotations(i),
                             label_to_name=generator.label_to_name)
            draw_detections(plot_image,
                            image_boxes,
                            image_scores,
                            image_labels,
                            label_to_name=generator.label_to_name,
                            score_threshold=score_threshold)

            #name image
            image_name = generator.image_names[i]
            row = generator.image_data[image_name]
            fname = os.path.splitext(row["tile"])[0] + "_" + str(row["window"])

            #Write RGB
            cv2.imwrite(os.path.join(save_path, '{}.png'.format(fname)),
                        plot_image)

            if experiment:
                experiment.log_image(os.path.join(save_path,
                                                  '{}.png'.format(fname)),
                                     name=fname)

        # copy detections to all_detections
        for label in range(generator.num_classes()):
            all_detections[i][label] = image_detections[
                image_detections[:, -1] == label, :-1]

    return all_detections