Пример #1
0
def local_eval(func, model, image_size, test_path, name_path, verbose):
    tmp_path = os.path.join('tmp' +
                            time.strftime("%Y%m%d%H%M", time.localtime()))
    if os.path.exists(tmp_path):
        os.remove(tmp_path)

    with open(test_path) as f:
        lines = f.readlines()
    paths = [line.split()[0] for line in lines]

    infer_time = []
    with open(tmp_path, 'a+') as f:
        for i, path in enumerate(paths, 1):
            if i == 1:
                sys.stdout.write('\n')
            sys.stdout.write('\r' + keras_bar(i, len(paths)))
            image = read_image(path)
            h, w = image.shape[:2]
            image = preprocess_image(image, (image_size, image_size)).astype(
                np.float32)
            images = np.expand_dims(image, axis=0)

            tic = time.time()
            bboxes, scores, classes, valid_detections = model.predict(images)
            toc = time.time()
            infer_time.append(toc - tic)

            bboxes = bboxes[0][:valid_detections[0]]
            scores = scores[0][:valid_detections[0]]
            classes = classes[0][:valid_detections[0]]

            bboxes *= image_size
            _, bboxes = preprocess_image_inv(image, (w, h), bboxes)

            line = path
            for bbox, score, cls in zip(bboxes, scores, classes):
                x1, y1, x2, y2 = bbox
                line += " {:.2f},{:.2f},{:.2f},{:.2f},{},{:.4f}".format(
                    x1, y1, x2, y2, int(cls), score)

            f.write(line + '\n')

    ans = func(test_path, tmp_path, name_path, verbose)
    # remove tmp
    os.remove(tmp_path)

    if verbose:
        if len(infer_time) > 5:
            s = np.mean(infer_time[5:])
        else:
            s = np.mean(infer_time)

        print('Inference time', s * 1000, 'ms')

    return ans
Пример #2
0
    def _getitem(self, sub_idx):
        path, bboxes, labels = self.annotation[sub_idx]
        image = read_image(path)

        if len(bboxes) != 0:
            bboxes, labels = np.array(bboxes), np.array(labels)
        else:
            bboxes, labels = np.zeros((0, 4)), np.zeros((0,))

        image, bboxes = preprocess_image(image, (self._image_size, self._image_size), bboxes)
        labels = augment.onehot(labels, self.num_classes, self.label_smoothing)

        return image, bboxes, labels
Пример #3
0
    def _getitem(self, sub_idx):
        path, bboxes, labels = self.annotation[sub_idx]
        image = read_image(path)
        bboxes, labels = np.array(bboxes), np.array(labels)

        image = augment.random_distort(image)
        image = augment.random_grayscale(image)
        image, bboxes = augment.random_flip_lr(image, bboxes)
        image, bboxes = augment.random_rotate(image, bboxes)
        image, bboxes, labels = augment.random_crop_and_zoom(
            image, bboxes, labels, (self._image_size, self._image_size))

        #image, bboxes = preprocess_image(image, (self._image_size, self._image_size), bboxes)

        labels = augment.onehot(labels, self.num_classes, self.label_smoothing)

        return image, bboxes, labels
Пример #4
0
def main(_argv):
    # read config
    print('Config File From:', FLAGS.config)
    print('Media From:', FLAGS.media)
    print('Use GPU:', FLAGS.gpu)

    if not FLAGS.gpu:
        import os
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    cfg = decode_cfg(FLAGS.config)

    model_type = cfg['yolo']['type']
    if model_type == 'yolov3':
        from core.model.one_stage.yolov3 import YOLOv3 as Model
    elif model_type == 'yolov3_tiny':
        from core.model.one_stage.yolov3 import YOLOv3_Tiny as Model
    elif model_type == 'yolov4':
        from core.model.one_stage.yolov4 import YOLOv4 as Model
    elif model_type == 'yolov4_tiny':
        from core.model.one_stage.yolov4 import YOLOv4_Tiny as Model
    elif model_type == 'yolox':
        from core.model.one_stage.custom import YOLOX as Model
    else:
        raise NotImplementedError()

    _, model = Model(cfg)
    model.summary()

    init_weight_path = cfg['test']['init_weight_path']
    if init_weight_path:
        print('Load Weights File From:', init_weight_path)
        load_weights(model, init_weight_path)
    else:
        raise SystemExit('init_weight_path is Empty !')

    # assign colors for difference labels
    shader = Shader(cfg['yolo']['num_classes'])
    names = cfg['yolo']['names']
    image_size = cfg['test']['image_size'][0]

    #model.save('E:/dm/repo/yolox/ckpts/tmp/voc_yolov4_tiny_SM_DM_CIoU_FL/yolov4_tiny_best/yolov4_tiny.h5')

    # full_model = tf.function(lambda Input: model(Input))
    # full_model = full_model.get_concrete_function(tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
    #
    # # Get frozen ConcreteFunction
    # frozen_func = convert_variables_to_constants_v2(full_model)
    # frozen_func.graph.as_graph_def()
    #
    # layers = [op.name for op in frozen_func.graph.get_operations()]
    # print("-" * 50)
    # print("Frozen model layers: ")
    # for layer in layers:
    #     print(layer)
    #
    # print("-" * 50)
    # print("Frozen model inputs: ")
    # print(frozen_func.inputs)
    # print("Frozen model outputs: ")
    # print(frozen_func.outputs)
    #
    # # Save frozen graph from frozen ConcreteFunction to hard drive
    # tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
    #                   logdir="./frozen_models",
    #                   name="yolov4_tiny_tf.pb",
    #                   as_text=False)

    def inference(image):

        h, w = image.shape[:2]
        image = preprocess_image(image,
                                 (image_size, image_size)).astype(np.float32)
        images = np.expand_dims(image, axis=0)

        tic = time.time()
        bboxes, scores, classes, valid_detections = model.predict(images)
        toc = time.time()

        bboxes = bboxes[0][:valid_detections[0]]
        scores = scores[0][:valid_detections[0]]
        classes = classes[0][:valid_detections[0]]

        # bboxes *= image_size
        _, bboxes = postprocess_image(image, (w, h), bboxes)

        return (toc - tic) * 1000, bboxes, scores, classes

    if FLAGS.media.startswith('rtsp') or FLAGS.media.isdigit(
    ) or FLAGS.media.endswith('.mp4') or FLAGS.media.endswith('.avi'):
        from collections import deque

        d = deque(maxlen=10)
        media = read_video(FLAGS.media)

        while True:

            ret, image = media.read()
            if not ret: break

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            ms, bboxes, scores, classes = inference(image)
            image = draw_bboxes(image, bboxes, scores, classes, names, shader)
            d.append(ms)

            mms = np.mean(d)
            print('Inference Time:', mms, 'ms')
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            image = cv2.putText(image, "{:.2f} ms".format(mms), (0, 30),
                                cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0),
                                2)
            cv2.imshow('Image', image)
            if cv2.waitKey(33) == ord('q'):
                break

        media.release()

    elif FLAGS.media.endswith('.jpg') or FLAGS.media.endswith('.png'):
        image = read_image(FLAGS.media)

        ms, bboxes, scores, classes = inference(image)
        image = draw_bboxes(image, bboxes, scores, classes, names, shader)

        print('Inference Time:', ms, 'ms')
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow('Image', image)
        cv2.waitKey()
Пример #5
0
def main(_argv):
    # read config
    print('Config File From:', FLAGS.config)
    print('Media From:', FLAGS.media)
    print('Use GPU:', FLAGS.gpu)

    if not FLAGS.gpu:
        import os
        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'

    cfg = decode_cfg(FLAGS.config)

    model_type = cfg['yolo']['type']
    if model_type == 'yolov3':
        from core.model.one_stage.yolov3 import YOLOv3 as Model
    elif model_type == 'yolov3_tiny':
        from core.model.one_stage.yolov3 import YOLOv3_Tiny as Model
    elif model_type == 'yolov4':
        from core.model.one_stage.yolov4 import YOLOv4 as Model
    elif model_type == 'yolov4_tiny':
        from core.model.one_stage.yolov4 import YOLOv4_Tiny as Model
    else:
        raise NotImplementedError()

    _, model = Model(cfg)
    model.summary()

    init_weight_path = cfg['test']['init_weight_path']
    if init_weight_path:
        print('Load Weights File From:', init_weight_path)
        load_weights(model, init_weight_path)
    else:
        raise SystemExit('init_weight_path is Empty !')

    # assign colors for difference labels
    shader = Shader(cfg['yolo']['num_classes'])
    names = cfg['yolo']['names']
    image_size = cfg['test']['image_size'][0]

    def inference(image):

        h, w = image.shape[:2]
        image = preprocess_image(image,
                                 (image_size, image_size)).astype(np.float32)
        images = np.expand_dims(image, axis=0)

        tic = time.time()
        bboxes, scores, classes, valid_detections = model.predict(images)
        toc = time.time()

        bboxes = bboxes[0][:valid_detections[0]]
        scores = scores[0][:valid_detections[0]]
        classes = classes[0][:valid_detections[0]]

        # bboxes *= image_size
        _, bboxes = postprocess_image(image, (w, h), bboxes)

        return (toc - tic) * 1000, bboxes, scores, classes

    if FLAGS.media.startswith('rtsp') or FLAGS.media.isdigit(
    ) or FLAGS.media.endswith('.mp4') or FLAGS.media.endswith('.avi'):
        from collections import deque

        d = deque(maxlen=10)
        media = read_video(FLAGS.media)

        while True:

            ret, image = media.read()
            if not ret: break

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            ms, bboxes, scores, classes = inference(image)
            image = draw_bboxes(image, bboxes, scores, classes, names, shader)
            d.append(ms)

            mms = np.mean(d)
            print('Inference Time:', mms, 'ms')
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            image = cv2.putText(image, "{:.2f} ms".format(mms), (0, 30),
                                cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0),
                                2)
            cv2.imshow('Image', image)
            if cv2.waitKey(33) == ord('q'):
                break

        media.release()

    elif FLAGS.media.endswith('.jpg') or FLAGS.media.endswith('.png'):
        image = read_image(FLAGS.media)

        ms, bboxes, scores, classes = inference(image)
        image = draw_bboxes(image, bboxes, scores, classes, names, shader)

        print('Inference Time:', ms, 'ms')
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow('Image', image)
        cv2.waitKey()