Ejemplo n.º 1
0
    def detect_image(self, image):
        # Generate colors for drawing bounding boxes.
        hsv_tuples = [(x * 1.0 / len(self.class_names), 1., 1.)
                      for x in range(len(self.class_names))]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))
        random.seed(10101)  # Fixed seed for consistent colors across runs.
        random.shuffle(
            self.colors)  # Shuffle colors to decorrelate adjacent classes.
        random.seed(None)  # Reset seed to default.
        if self.is_fixed_size:
            assert self.INPUT_SIZE[0] % 32 == 0, 'Multiples of 32 required'
            assert self.INPUT_SIZE[1] % 32 == 0, 'Multiples of 32 required'
            boxed_image, image_shape = letterbox_image(
                image, tuple(reversed(self.INPUT_SIZE)))
            # boxed_image, image_shape = resize_image(image, tuple(reversed(self.INPUT_SIZE)))
            #boxed_image.save("/home/yz2499/v2_yolo3/test.jpg")
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image, image_shape = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        image_data /= 255.
        inputs = np.expand_dims(image_data, 0)  # Add batch dimension.
        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.x: inputs,
                self.image_shape: image_shape,
            })
        print('Found {} boxes in the image'.format(len(out_boxes)))
        left = 0
        top = 0
        right = 0
        bottom = 0

        # Visualisation########################################################################################
        font = ImageFont.truetype(font=font_file,
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype(np.int32))
        thickness = (image.size[0] + image.size[1]) // 500  # do day cua BB

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box  # y_min, x_min, y_max, x_max
            top = max(0, np.floor(top + 0.5).astype(np.int32))
            left = max(0, np.floor(left + 0.5).astype(np.int32))
            bottom = min(image.size[1],
                         np.floor(bottom + 0.5).astype(np.int32))
            right = min(image.size[0], np.floor(right + 0.5).astype(np.int32))
            print(label, (left, top),
                  (right, bottom))  # (x_min, y_min), (x_max, y_max)

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for j in range(thickness):
                draw.rectangle([left + j, top + j, right - j, bottom - j],
                               outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw
        return image, left, top, right, bottom
def predict_all_to_json(out_file,
                        model,
                        img_height,
                        img_width,
                        classes_to_cats,
                        data_generator,
                        batch_size,
                        data_generator_mode='resize',
                        mode='ssd300',
                        confidence_thresh=0.01,
                        iou_threshold=0.45,
                        top_k=200,
                        pred_coords='centroids',
                        normalize_coords=True):
    '''
    Runs detection predictions over the whole dataset given a model and saves them in a JSON file
    in the MS COCO detection results format.
    Arguments:
        out_file (str): The file name (full path) under which to save the results JSON file.
        model (Keras model): A Keras SSD model object.
        img_height (int): The input image height for the model.
        img_width (int): The input image width for the model.
        classes_to_cats (dict): A dictionary that maps the consecutive class IDs predicted by the model
            to the non-consecutive original MS COCO category IDs.
        data_generator (DataGenerator): A `DataGenerator` object with the evaluation dataset.
        batch_size (int): The batch size for the evaluation.
        data_generator_mode (str, optional): Either of 'resize' or 'pad'. If 'resize', the input images will
            be resized (i.e. warped) to `(img_height, img_width)`. This mode does not preserve the aspect ratios of the images.
            If 'pad', the input images will be first padded so that they have the aspect ratio defined by `img_height`
            and `img_width` and then resized to `(img_height, img_width)`. This mode preserves the aspect ratios of the images.
        model_mode (str, optional): The mode in which the model was created, i.e. 'training', 'inference' or 'inference_fast'.
            This is needed in order to know whether the model output is already decoded or still needs to be decoded. Refer to
            the model documentation for the meaning of the individual modes.
        confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
            positive class in order to be considered for the non-maximum suppression stage for the respective class.
            A lower value will result in a larger part of the selection process being done by the non-maximum suppression
            stage, while a larger value will result in a larger part of the selection process happening in the confidence
            thresholding stage.
        iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`
            with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
            to the box score.
        top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
            non-maximum suppression stage. Defaults to 200, following the paper.
        input_coords (str, optional): The box coordinate format that the model outputs. Can be either 'centroids'
            for the format `(cx, cy, w, h)` (box center coordinates, width, and height), 'minmax' for the format
            `(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
        normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
            and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
            relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
            Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
            coordinates. Requires `img_height` and `img_width` if set to `True`.
    Returns:
        None.
    '''

    convert_to_3_channels = ConvertTo3Channels()
    resize = Resize(height=img_height, width=img_width)
    if data_generator_mode == 'resize':
        transformations = [convert_to_3_channels, resize]
    elif data_generator_mode == 'pad':
        random_pad = RandomPadFixedAR(patch_aspect_ratio=img_width /
                                      img_height,
                                      clip_boxes=False)
        transformations = [convert_to_3_channels, random_pad, resize]
    else:
        raise ValueError(
            "Unexpected argument value: `data_generator_mode` can be either of 'resize' or 'pad', but received '{}'."
            .format(data_generator_mode))

    # Set the generator parameters.
    generator = data_generator.generate(
        batch_size=batch_size,
        shuffle=False,
        transformations=transformations,
        label_encoder=None,
        returns={'processed_images', 'image_ids', 'inverse_transform'},
        keep_images_without_gt=True)
    # Put the results in this list.
    results = []
    # Compute the number of batches to iterate over the entire dataset.
    n_images = data_generator.get_dataset_size()
    print("Number of images in the evaluation dataset: {}".format(n_images))
    n_batches = int(ceil(n_images / batch_size))
    # Loop over all batches.
    tr = trange(n_batches, file=sys.stdout)
    tr.set_description('Producing results file')
    for i in tr:
        # Generate batch.
        batch_X, batch_image_ids, batch_inverse_transforms = next(generator)
        # pre-process for yolo model
        if mode in ['yolo320', 'yolo416', 'yolo608']:
            tmp = []
            for item in batch_X:
                item = Image.fromarray(item)
                tmp.append(
                    np.array(letterbox_image(item, (img_width, img_height))))
            batch_X = tmp
        # Predict.
        y_pred = model.predict(batch_X)

        # Filter out the all-zeros dummy elements of `y_pred`.
        y_pred_filtered = []
        for i in range(len(y_pred)):
            y_pred_filtered.append(y_pred[i][y_pred[i, :, 0] != 0])
        y_pred = y_pred_filtered
        # Convert the predicted box coordinates for the original images.
        y_pred = apply_inverse_transforms(y_pred, batch_inverse_transforms)

        # Convert each predicted box into the results format.
        for k, batch_item in enumerate(y_pred):
            for box in batch_item:
                class_id = box[0]
                # Transform the consecutive class IDs back to the original COCO category IDs.
                cat_id = classes_to_cats[class_id]
                # Round the box coordinates to reduce the JSON file size.
                xmin = float(round(box[2], 1))
                ymin = float(round(box[3], 1))
                xmax = float(round(box[4], 1))
                ymax = float(round(box[5], 1))
                width = xmax - xmin
                height = ymax - ymin
                bbox = [xmin, ymin, width, height]
                result = {}
                result['image_id'] = batch_image_ids[k]
                result['category_id'] = cat_id
                result['score'] = float(round(box[1], 3))
                result['bbox'] = bbox
                results.append(result)

    with open(out_file, 'w') as f:
        json.dump(results, f)

    print("Prediction results saved in '{}'".format(out_file))
Ejemplo n.º 3
0
def get_training_data(annotation_path,
                      data_path,
                      input_shape,
                      anchors,
                      num_classes,
                      max_boxes=1):
    """
    processes the data into standard shape
    :param annotation_path: path_to_image box1,box2,...,boxN with boxX: x_min,y_min,x_max,y_max,class_index
    :param data_path: saver at "/home/minh/stage/train.npz"
    :param input_shape: (416, 416)
    :param max_boxes: 100: maximum number objects of an image
    :param load_previous: for 2nd, 3th, .. using
    :return: image_data [N, 416, 416, 3] not yet normalized, N: number of image
             box_data: box format: [N, 100, 5], 100: maximum number of an image
                                                5: top_left{x_min,y_min},bottom_right{x_max,y_max},class_index (no space)
                                                /home/minh/keras-yolo3/VOCdevkit/VOC2007/JPEGImages/000012.jpg 156,97,351,270,6
    """
    image_data = []
    box_data = []
    image_shape = []
    with open(annotation_path) as f:
        GG = f.readlines()
        np.random.shuffle(GG)
        for line in (GG):
            line = line.split('$$')
            filename = line[0]
            if filename[-1] == '\n':
                filename = filename[:-1]
            image = Image.open(filename)
            # shape_image = [360 640], boxed_image = [416 416]
            boxed_image, shape_image = letterbox_image(
                image, tuple(reversed(input_shape)))
            image_data.append(
                np.array(boxed_image,
                         dtype=np.uint8))  # pixel: [0:255] uint8:[-128, 127]
            image_shape.append(np.array(shape_image))

            boxes = np.zeros((max_boxes, 5), dtype=np.int32)
            # correct the BBs to the image resize
            if len(line) == 1:  # if there is no object in this image
                box_data.append(boxes)
            for i, box in enumerate(line[1:]):
                # take first n boxes into account. Here n = max_boxes.
                if i < max_boxes:
                    boxes[i] = np.array(list(map(int, box.split(','))))
                else:
                    break
                # image_size = [640 360]
                image_size = np.array(image.size)
                # Reverse the input_shape array. input_size = [416 416]
                input_size = np.array(input_shape[::-1])
                # new_size = [416 234]. It is the image after resizing.
                new_size = (image_size *
                            np.min(input_size * 1.0 / image_size)).astype(
                                np.int32)
                # Correct BB to new image. As the image resizes, the BBs should also resize accordingly.
                boxes[i:i + 1, 0:2] = (
                    boxes[i:i + 1, 0:2] * new_size * 1.0 / image_size +
                    (input_size - new_size) / 2.).astype(np.int32)
                boxes[i:i + 1, 2:4] = (
                    boxes[i:i + 1, 2:4] * new_size * 1.0 / image_size +
                    (input_size - new_size) / 2.).astype(np.int32)
            box_data.append(boxes)
    image_shape = np.array(image_shape)
    image_data = np.array(image_data)
    box_data = (np.array(box_data))
    y_true = preprocess_true_boxes(box_data, input_shape[0], anchors,
                                   num_classes)
    # # np.savez(data_path, image_data=image_data, box_data=box_data, image_shape=image_shape)
    # np.savez(data_path, image_data=image_data, box_data=box_data, image_shape=image_shape, y_true0=y_true[0], y_true1=y_true[1], y_true2=y_true[2])
    # # np.savez(data_path, image_data=image_data, box_data=box_data, image_shape=image_shape, y_true=y_true)
    # print('Saving training data into ' + data_path)
    # return image_data, box_data, image_shape

    #  image_data = [N, 416,416,color_channels], box_data = [N,max_boxes, 5], image_shape = [N,2]
    return image_data, box_data, image_shape, y_true
Ejemplo n.º 4
0
    def detect_image(self, image):
        start = time.time()

        # Generate colors for drawing bounding boxes.
        hsv_tuples = [(x / len(self.class_names), 1., 1.)
                      for x in range(len(self.class_names))]
        self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        self.colors = list(
            map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                self.colors))
        random.seed(10101)  # Fixed seed for consistent colors across runs.
        random.shuffle(
            self.colors)  # Shuffle colors to decorrelate adjacent classes.
        random.seed(None)  # Reset seed to default.

        if self.is_fixed_size:
            assert self.INPUT_SIZE[0] % 32 == 0, 'Multiples of 32 required'
            assert self.INPUT_SIZE[1] % 32 == 0, 'Multiples of 32 required'
            boxed_image, image_shape = letterbox_image(
                image, tuple(reversed(self.INPUT_SIZE)))
            # boxed_image, image_shape = resize_image(image, tuple(reversed(self.INPUT_SIZE)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image, image_shape = letterbox_image(image, new_image_size)
            # boxed_image, image_shape = resize_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        print("heights, widths:", image_shape)
        image_data /= 255.
        inputs = np.expand_dims(image_data, 0)  # Add batch dimension. #

        # Generate output tensor targets for filtered bounding boxes.
        x = tf.placeholder(tf.float32,
                           shape=[None, Input_shape, Input_shape, channels])
        # image_shape = np.array([image.size[0], image.size[1]])  # tf.placeholder(tf.float32, shape=[2,])

        # Generate output tensor targets for filtered bounding boxes.
        scale1, scale2, scale3 = YOLOv3(x, len(
            self.class_names)).feature_extractor()
        scale_total = [scale1, scale2, scale3]

        # detect
        boxes, scores, classes = predict(scale_total,
                                         self.anchors,
                                         len(self.class_names),
                                         image_shape,
                                         score_threshold=self.threshold,
                                         iou_threshold=self.ignore_thresh)

        # Add ops to save and restore all the variables.
        saver = tf.train.Saver()

        writer = tf.summary.FileWriter('./graphs', tf.get_default_graph())
        # tensorboard --logdir="./graphs" --port 6006
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            # Restore variables from disk.
            epoch = input('Entrer a check point at epoch(%10=0):')
            checkpoint = "/home/minh/stage/saver_model/model" + str(
                epoch) + ".ckpt"
            try:
                my_abs_path = Path(checkpoint).resolve()
                saver.restore(sess, checkpoint)
            except FileNotFoundError:
                print("Not yet training!")
            else:
                print("already training!")

            out_boxes, out_scores, out_classes = sess.run(
                [boxes, scores, classes], feed_dict={x: inputs})

        writer.close()
        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        # Visualisation#################################################################################################
        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                  size=np.floor(3e-2 * image.size[1] +
                                                0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 400  # do day cua BB

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box  # y_min, x_min, y_max, x_max
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top),
                  (right, bottom))  # (x_min, y_min), (x_max, y_max)

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            # My kingdom for a good redistributable image drawing library.
            for j in range(thickness):
                draw.rectangle([left + j, top + j, right - j, bottom - j],
                               outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin),
                 tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        end = time.time()
        print(end - start)
        return image
    def detect_image(self, image):
        image_shape = np.array(np.shape(image)[0:2])

        crop_img = np.array(
            letterbox_image(
                image, (self.model_image_size[0], self.model_image_size[1])))
        photo = np.array(crop_img, dtype=np.float32)
        photo /= 255.0
        photo = np.transpose(photo, (2, 0, 1))
        photo = photo.astype(np.float32)
        images = []
        images.append(photo)
        images = np.asarray(images)

        with torch.no_grad():
            images = torch.from_numpy(images)
            if self.cuda:
                images = images.cuda()
            outputs = self.net(images)

        output_list = []
        for i in range(3):
            output_list.append(self.yolo_decodes[i](outputs[i]))
        output = torch.cat(output_list, 1)
        batch_detections = non_max_suppression(output,
                                               len(self.class_names),
                                               conf_thres=self.confidence,
                                               nms_thres=0.3)
        try:
            batch_detections = batch_detections[0].cpu().numpy()
        except:
            return [], [], []

        top_index = batch_detections[:,
                                     4] * batch_detections[:,
                                                           5] > self.confidence
        top_conf = batch_detections[top_index, 4] * batch_detections[top_index,
                                                                     5]
        top_label = np.array(batch_detections[top_index, -1], np.int32)
        top_bboxes = np.array(batch_detections[top_index, :4])
        top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(
            top_bboxes[:, 0],
            -1), np.expand_dims(top_bboxes[:, 1], -1), np.expand_dims(
                top_bboxes[:, 2], -1), np.expand_dims(top_bboxes[:, 3], -1)

        # 去掉灰条
        boxes = yolo_correct_boxes(
            top_ymin, top_xmin, top_ymax, top_xmax,
            np.array([self.model_image_size[0], self.model_image_size[1]]),
            image_shape)

        rectangle_class_names = []
        rectangle_scores = []
        rectangle_boxes = []

        for i, c in enumerate(top_label):
            rectangle_class_names.append(self.class_names[c])
            rectangle_scores.append(top_conf[i])

            top, left, bottom, right = boxes[i]
            top = top - 5
            left = left - 5
            bottom = bottom + 5
            right = right + 5

            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(
                np.shape(image)[0],
                np.floor(bottom + 0.5).astype('int32'))
            right = min(
                np.shape(image)[1],
                np.floor(right + 0.5).astype('int32'))

            rectangle_boxes.append([top, left, bottom, right])
        # print(rectangle_class_names)
        # print(rectangle_scores)
        # print(rectangle_boxes)

        return rectangle_class_names, rectangle_scores, rectangle_boxes