Exemple #1
0
def main():
    import sys
    import cv2

    namesfile = None
    if len(sys.argv) == 6:
        n_classes = int(sys.argv[1])
        weightfile = sys.argv[2]
        imgfile = sys.argv[3]
        height = int(sys.argv[4])
        width = int(sys.argv[5])
    elif len(sys.argv) == 7:
        n_classes = int(sys.argv[1])
        weightfile = sys.argv[2]
        imgfile = sys.argv[3]
        height = sys.argv[4]
        width = int(sys.argv[5])
        namesfile = int(sys.argv[6])
    else:
        print('Usage: ')
        print('  python models.py num_classes weightfile imgfile namefile')

    model = Yolov4(yolov4conv137weight=None, n_classes=n_classes, inference=True)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        elif n_classes == 1:
            namesfile = 'data/smoke.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
    def main(self):
        frame = cv2.imread(id)
        try:
            img, orig_im, dim = prep_image(frame, 160)

            im_dim = torch.FloatTensor(dim).repeat(1, 2)
            if self.CUDA:  #### If you have a gpu properly installed then it will run on the gpu
                im_dim = im_dim.cuda()
                img = img.cuda()
            frame = cv2.imread(self.id)

            output = self.model(img)

            from tool.utils import post_processing, plot_boxes_cv2
            bounding_boxes = post_processing(img, self.confidence,
                                             self.nms_thesh, output)
            frame = plot_boxes_cv2(frame,
                                   bounding_boxes[0],
                                   savename=None,
                                   class_names=self.classes,
                                   color=None,
                                   colors=self.colors)

        except:
            pass

        cv2.imwrite('IMG_0748-1.JPG', frame)
        torch.cuda.empty_cache()
Exemple #3
0
    def predict(self, image, session_id):
        ''' predicts objects on image and return an image with bounding box on it '''

        out_path = f'./out/{session_id}.jpg'
        t1 = time.time()
        image_resized_arr = self.preprocess(image)
        boxes = do_detect(self.model, image_resized_arr, 0.4, 0.6,
                          self.use_cuda)
        plot_boxes_cv2(image_resized_arr, boxes[0], out_path, self.class_names)
        t2 = time.time()

        print("----------")
        print(f"total latency: {str((t2-t1)*1000)} ms")
        print("----------")

        return out_path
Exemple #4
0
    def predict(self, image, session_id):
        ''' predicts objects on image and return an image with bounding box on it '''
        out_path = f'./out/{session_id}.jpg'

        t1 = time.time()
        img = Image.open(io.BytesIO(image))

        boxes_arr = self.detect_boxes(image)
        plot_boxes_cv2(np.array(img), boxes_arr, out_path, self.class_names)
        t2 = time.time()

        print("----------")
        print(f"total latency: {str((t2-t1)*1000)} ms")
        print("----------")

        return out_path
    def main(self):
        q = queue.Queue()
        while True:

            def frame_render(queue_from_cam):
                frame = self.cap.read(
                )  # If you capture stream using opencv (cv2.VideoCapture()) the use the following line
                # ret, frame = self.cap.read()
                frame = cv2.resize(frame, (self.width, self.height))
                queue_from_cam.put(frame)

            cam = threading.Thread(target=frame_render, args=(q, ))
            cam.start()
            cam.join()
            frame = q.get()
            q.task_done()
            fps = FPS().start()

            try:
                img, orig_im, dim = prep_image(frame, 160)

                im_dim = torch.FloatTensor(dim).repeat(1, 2)
                if self.CUDA:  #### If you have a gpu properly installed then it will run on the gpu
                    im_dim = im_dim.cuda()
                    img = img.cuda()
                # with torch.no_grad():               #### Set the model in the evaluation mode

                output = self.model(img)
                from tool.utils import post_processing, plot_boxes_cv2
                bounding_boxes = post_processing(img, self.confidence,
                                                 self.nms_thesh, output)
                frame = plot_boxes_cv2(frame,
                                       bounding_boxes[0],
                                       savename=None,
                                       class_names=self.classes,
                                       color=None,
                                       colors=self.colors)

            except:
                pass

            fps.update()
            fps.stop()
            print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
            print("[INFO] approx. FPS: {:.1f}".format(fps.fps()))

            cv2.imshow("Object Detection Window", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue
            torch.cuda.empty_cache()
Exemple #6
0
def detect(img):
    namesfile = 'data/coco.names'
    n_classes = 80
    weightfile = './yolov4.pth'
    imgfile = img
    height = 512
    width = 512

    model = Yolov4(yolov4conv137weight=None, n_classes=n_classes, inference=True)

    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)  # 输出框位置和名字

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
    return boxes[0]
    def detect(self):
        """Detect if image is Aadhaar and save image if detected"""

        logger.info(f"The model expects input shape: {model.get_inputs()[0].shape}")

        image_src = cv2.imread(self.image_path)
        IN_IMAGE_H = model.get_inputs()[0].shape[2]
        IN_IMAGE_W = model.get_inputs()[0].shape[3]

        # Input
        resized = cv2.resize(
            image_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR
        )
        img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
        img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
        img_in = np.expand_dims(img_in, axis=0)
        img_in /= 255.0
        logger.info(f"Shape of the network input after preprocessing: {img_in.shape}")

        # Compute
        input_name = model.get_inputs()[0].name

        outputs = model.run(None, {input_name: img_in})

        boxes = post_processing(img_in, 0.4, 0.6, outputs)
        logger.info(f"Post Processing output : {boxes}")

        if np.array(boxes).size:

            namesfile = cfg.NAMESFILE
            class_names = load_class_names(namesfile)
            if plot_boxes_cv2(
                image_src, boxes[0], savename=self.filename, class_names=class_names
            ):  # Detect image and save image with bounding boxes if Aadhaar card detected
                return 1
            else:
                return 0

        else:
            logger.info("Uploaded Image is not Aadhaar")
            return 0
Exemple #8
0
                        2);

            # Display FPS on frame
            cv2.putText(img, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50),
                        2);

            # Display result
            cv2.imshow("Tracking", img)
            cv2.waitKey(1)
        start = time.time()
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)
        finish = time.time()
        print('Predicted in %f seconds.' % (finish - start))
        print('Fps: %d' % int(1 / (finish - start)))
        class_names = load_class_names(namesfile)
        result_img = plot_boxes_cv2(img, boxes[0], savename=None, class_names=class_names)
        if(len(boxes[0])!=0):
            print("b")
            box = boxes[0][0]
            x1 = int(box[0] * 640)
            y1 = int(box[1] * 480)
            x2 = int(box[2] * 640)
            y2 = int(box[3] * 480)
            w=x2-x1
            h=y2-y1
            bbox=(x1,y1,w,h)
            nbox=(x1,y1,int(w/2),int(h/2))
            ok = tracker.init(img, bbox)
            tr=1

Exemple #9
0
def demo_tensorflow(tfpb_file="./weight/yolov4.pb",
                    image_path=None,
                    print_sensor_name=False):
    graph_name = 'yolov4'
    tf.compat.v1.disable_eager_execution()
    with tf.compat.v1.Session() as persisted_sess:
        print("loading graph...")
        with gfile.FastGFile(tfpb_file, 'rb') as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())

        persisted_sess.graph.as_default()
        tf.import_graph_def(graph_def, name=graph_name)

        # print all sensor_name
        if print_sensor_name:
            tensor_name_list = [
                tensor.name for tensor in
                tf.compat.v1.get_default_graph().as_graph_def().node
            ]
            for tensor_name in tensor_name_list:
                print(tensor_name)

        inp = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                      'input:0')
        print(inp.shape)
        out1 = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                       'output_1:0')
        out2 = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                       'output_2:0')
        out3 = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                       'output_3:0')
        print(out1.shape, out2.shape, out3.shape)

        # image_src = np.random.rand(1, 3, 608, 608).astype(np.float32)  # input image
        # Input
        image_src = cv2.imread(image_path)
        resized = cv2.resize(image_src, (inp.shape[2], inp.shape[3]),
                             interpolation=cv2.INTER_LINEAR)
        img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
        img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
        img_in = np.expand_dims(img_in, axis=0)
        img_in /= 255.0
        print("Shape of the network input: ", img_in.shape)

        feed_dict = {inp: img_in}

        outputs = persisted_sess.run([out1, out2, out3], feed_dict)
        print(outputs[0].shape)
        print(outputs[1].shape)
        print(outputs[2].shape)

        boxes = post_processing(img_in, 0.4, outputs)

        num_classes = 80
        if num_classes == 20:
            namesfile = 'data/voc.names'
        elif num_classes == 80:
            namesfile = 'data/coco.names'
        else:
            namesfile = 'data/names'

        class_names = load_class_names(namesfile)
        result = plot_boxes_cv2(image_src,
                                boxes,
                                savename=None,
                                class_names=class_names)
        cv2.imshow("tensorflow predicted", result)
        cv2.waitKey()
Exemple #10
0
    width, height = (darknet_model.width, darknet_model.height)
    darknet_model.load_weights(weightfile)
    if use_cuda:
        darknet_model.cuda()
    class_names = load_class_names(namesfile)

    t1 = time.time()
    total_time = round(t1 - t0, 2)
    print("1 - Initiated DepthModel. -- {} minutes {} seconds".format(
        total_time // 60, total_time % 60))

    print("====================================")
    print("====================================")
    print("====================================")
    # Inference
    img = cv2.imread(imgfile)
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
    start = time.time()
    boxes = do_detect(darknet_model, sized, 0.4, 0.6, use_cuda)
    finish = time.time()
    print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))
    print(boxes)
    plot_boxes_cv2(img,
                   boxes[0],
                   savename='demo_yolo.jpg',
                   class_names=class_names)
    print("====================================")
    print("====================================")
    print("====================================")
Exemple #11
0
                    y2 / float(height), None, "", id
                ])

            ############################  detect: start   ##################################

            img = cv2.imread(os.path.join(dataset_dir, imgfile))

            # Inference input size is 416*416 does not mean training size is the same
            # Training size could be 608*608 or even other sizes
            # Optional inference sizes:
            #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
            #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
            sized = cv2.resize(img, (width, height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            to_plot = plot_boxes_cv2(img, gt_boxes, class_names=class_names)

            if eval_model:
                with torch.no_grad():
                    # for i in range(2):  # This 'for' loop is for speed check
                    #                     # Because the first iteration is usually longer
                    #     boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)
                    boxes = do_detect(model, sized, 0.4, 0.6, use_device)
                    pred = plot_boxes_cv2(img,
                                          boxes[0],
                                          class_names=class_names)
                    to_plot = np.concatenate([to_plot, pred], axis=1)

            # to_plot = (255 * np.clip(to_plot, 0, 1)).astype(np.uint8)

            ############################  detect: end   ##################################
 def visualize_boxes_cv2(self, boxes, cv_image, output_file_name):
     plot_boxes_cv2(cv_image, boxes[0], output_file_name, self.class_names)
Exemple #13
0
def eval_mskim():
    import os
    import cv2
    import pickle
    import argparse
    import numpy as np

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    parser = argparse.ArgumentParser()
    parser.add_argument('--n_classes', default=1)
    parser.add_argument('--weightfile', default='checkpoints/Yolov4_epoch10.pth')
    parser.add_argument('--imgfile', default='data/mskim/test90_10percent.txt')
    parser.add_argument('--size', type=int, nargs='+', default=[608, 608])
    parser.add_argument('--namesfile', default='data/smoke.names')
    parser.add_argument('--nms_th', default=0.6)
    parser.add_argument('--conf_th', default=0.4)
    parser.add_argument('--save_path', default='predictions/')
    parser.add_argument('--save_image', default=False, action='store_true')
    args = parser.parse_args()

    target_h, target_w = args.size

    model = Yolov4(yolov4conv137weight=None, n_classes=args.n_classes, inference=True)

    pretrained_dict = torch.load(args.weightfile, map_location=torch.device('cuda'))
    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    with open(args.imgfile, 'r') as f:
        img_file_lst = [line.strip() for line in f.readlines()]
    common_path = os.path.commonpath(img_file_lst)

    prediction_results = {}
    class_names = load_class_names(args.namesfile)
    for img_file in img_file_lst:
        # origin image
        img = cv2.imread(img_file)
        
        origin_h, origin_w = img.shape[:2]

        sized = cv2.resize(img, (target_w, target_h))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        # argument of do_detect: (model, img, conf_thresh, nms_thresh, use_cuda=1)
        boxes = do_detect(model, sized, args.conf_th, args.nms_th, use_cuda)
        boxes_np = np.asarray(boxes) # shape: (1, 0) or (1, N, 7) [7: xmin, ymin, xmax, ymax, score, id]

        if boxes_np.shape[1] == 0:
            pred = np.zeros((1, 5))
        else:
            # box coordinates are normalized to 0 ~ 1
            pred = boxes_np[0, :, :-1]
            # resize to origin image scale
            pred[:, 0] = pred[:, 0] * origin_w
            pred[:, 1] = pred[:, 1] * origin_h
            pred[:, 2] = pred[:, 2] * origin_w
            pred[:, 3] = pred[:, 3] * origin_h
        prediction_results[os.path.relpath(img_file, common_path)] = pred

        # save prediction results
        if args.save_image:
            filename = os.path.basename(img_file)
            dirname = os.path.dirname(img_file)
            rel_dir = os.path.relpath(dirname, common_path)
            _save_path = os.path.join(args.save_path, rel_dir)
            os.makedirs(_save_path, exist_ok=True)
            plot_boxes_cv2(img, boxes[0], os.path.join(_save_path, filename), class_names)

    with open(os.path.join(args.save_path, 'prediction_results.pickle'), 'wb') as f:
        pickle.dump(prediction_results, f)
Exemple #14
0
    for box in boxes:
        x = (box[2] + box[0]) / 2 * width
        y = (box[3] + box[1]) / 2 * height
        print()

    return image


if __name__ == '__main__':
    model = Yolov4(None, n_classes=80, inference=True)
    state = torch.load('16.pth')
    model.load_state_dict(state['model'])
    del state
    model.eval()

    rawimg = cv2.imread(
        '/media/palm/data/coco/images/val2017/000000289343.jpg')
    rawimg = cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
    rawimg, bboxes = resize_image(rawimg, None, 608, 608)
    img = rawimg.copy().transpose(2, 0, 1)

    inputs = torch.from_numpy(np.expand_dims(img,
                                             0).astype('float32')).div(255.0)
    output = model(inputs)
    boxes = post_processing(img, 0.4, 0.4, output)
    img = plot_boxes_cv2(rawimg.astype('uint8'), boxes[0])
    img = plot_lines(img, boxes[0])
    cv2.imshow('a', img)
    cv2.waitKey()
Exemple #15
0
    use_cuda = True
    if use_cuda:
        model.cuda()

    for path, _, files in os.walk(img_root):

        if len(_) > 0:
            continue

        for file in files:

            if os.path.splitext(file)[1] not in ['.jpg', 'jpeg', 'png']:
                continue

            imgfile = os.path.join(path, file)

            img = cv2.imread(imgfile)

            # Inference input size is 416*416 does not mean training size is the same
            # Training size could be 608*608 or even other sizes
            # Optional inference sizes:
            #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
            #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
            sized = cv2.resize(img, (width, height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)
            plot_boxes_cv2(img, boxes[0], os.path.join(img_out, file),
                           class_names)
            # Optional inference sizes:
            #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
            #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
            sized = cv2.resize(img, (args.width, args.height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            from tool.utils import load_class_names, plot_boxes_cv2
            from tool.torch_utils import do_detect

            for i in range(2):  # This 'for' loop is for speed check
                # Because the first iteration is usually longer
                boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)

            if namesfile == None:
                if args.n_classes == 20:
                    namesfile = 'data/voc.names'
                elif args.n_classes == 80:
                    namesfile = 'data/coco.names'
                else:
                    print("please give namefile")

            class_names = load_class_names(namesfile)
            # print(class_names)
            plot_boxes_cv2(
                img, boxes[0],
                'predicitons_imgs/predictions_' + tmp_img[-10:-4] + '.jpg',
                class_names)
            i += 1
            if i == 100:
                break
Exemple #17
0
    for i, abs_name in tqdm(enumerate(abs_names)):
        img = cv2.imread(abs_name)

        # Inference input size is 416*416 does not mean training size is the same
        # Training size could be 608*608 or even other sizes
        # Optional inference sizes:
        #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
        #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
        sized = cv2.resize(img, (width, height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        from tool.utils import load_class_names, plot_boxes_cv2
        from tool.torch_utils import do_detect

        # for i in range(2):  # This 'for' loop is for speed check
        #                     # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.1, 0.4, use_cuda)
        boxes = post_process_boxes(boxes)

        if namesfile == None:
            if n_classes == 20:
                namesfile = 'data/voc.names'
            elif n_classes == 80:
                namesfile = '../data/coco.names'
            else:
                print("please give namefile")

        class_names = load_class_names(namesfile)
        save_path = f'../preds/golf_gray_coco_train/{i}.jpg'
        plot_boxes_cv2(img, boxes, save_path, class_names)
import cv2
import numpy as np
import glob
from models import Yolov4
from tool import utils, torch_utils
import torch

f_names = glob.glob("test2/*.png")

model = Yolov4(yolov4conv137weight=None, n_classes=1, inference=True)
model.load_state_dict(torch.load("Yolov4.pth"))
model.cuda().eval()

for f in f_names:
    im = cv2.imread(f)
    im = cv2.resize(im, (512, 320))

    with torch.no_grad():
        boxes = torch_utils.do_detect(model,
                                      cv2.cvtColor(im, cv2.COLOR_BGR2RGB), 0.4,
                                      0.4, 1)
        utils.plot_boxes_cv2(im, boxes[0], "b" + f, color=(0, 0, 255))
def main():
    n_classes = 18
    weightfile = r'D:\DeepBlueProjects\chem-lab\pytorch-YOLOv4-master\checkpoints\Yolov4_epoch6.pth'
    imgfile = r'D:\Data\CMS01_single-end\val\JPEGImages\frontfront_0518.jpg'
    base_dir = r'D:\Data\chem-yolov4\eval-dataset\top'
    gt_path = os.path.join(base_dir, 'gt.json')
    name_id_path = os.path.join(base_dir, 'name_id.json')
    with open(gt_path, 'r') as f:
        gt_dict = json.load(f)
    with open(name_id_path, 'r') as f:
        name_id_dict = json.load(f)

    input_size = (960, 960)

    model = Yolov4(yolov4conv137weight=None,
                   n_classes=n_classes,
                   inference=True)
    pretrained_dict = torch.load(weightfile, map_location=torch.device('cuda'))

    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in pretrained_dict.items():
        name = k[7:]  # remove `module.`,表面从第7个key值字符取到最后一个字符,正好去掉了module.
        new_state_dict[name] = v  # 新字典的key值对应的value为一一对应的值。

    model.load_state_dict(pretrained_dict)

    use_cuda = True
    if use_cuda:
        model.cuda()

    data_txt = os.path.join(base_dir, 'data.txt')
    save_dir = os.path.join(base_dir, 'JPEGImages_pred')
    result_dir = os.path.join(base_dir, 'result_txt')
    with open(data_txt, 'r') as f:
        imgfiles = f.readlines()

    box_list = []
    for imgfile in imgfiles:

        img = cv2.imread(imgfile.strip('\n'))
        img_h, img_w, _ = img.shape

        img_name = imgfile.split('\\')[-1].strip('\n')
        img_id = name_id_dict[img_name]
        result_txt = os.path.join(result_dir, img_name[:-4] + '.txt')
        result_f = open(result_txt, 'w')
        # Inference input size is 416*416 does not mean training size is the same
        # Training size could be 608*608 or even other sizes
        # Optional inference sizes:
        #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
        #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
        sized = cv2.resize(img, (input_size[1], input_size[0]))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        # for i in range(2):  # This 'for' loop is for speed check
        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.01, 0.3, use_cuda)

        for box in boxes[0]:
            x1 = int((box[0] - box[2] / 2.0) * img_w)
            y1 = int((box[1] - box[3] / 2.0) * img_h)
            x2 = int((box[0] + box[2] / 2.0) * img_w)
            y2 = int((box[1] + box[3] / 2.0) * img_h)
            w = x2 - x1
            h = y2 - y1

            if len(box) >= 7:
                cls_conf = box[5]
                cls_id = box[6]
                box_list.append({
                    "image_id": img_id,
                    "category_id": int(cls_id),
                    "bbox": [x1, y1, w, h],
                    "score": float(cls_conf)
                })
                string = ','.join([
                    str(cls_id),
                    str(x1),
                    str(y1),
                    str(x2),
                    str(y2),
                    str(cls_conf)
                ]) + '\n'
                result_f.write(string)

                if cls_conf > 0.3:
                    cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)),
                                  (255, 0, 255), 1)
                    cv2.putText(img, str(cls_id), (int(x1 + 10), int(y1 + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255),
                                1)
                    cv2.putText(img, str(round(cls_conf, 3)),
                                (int(x1 + 30), int(y1 + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (20, 240, 0), 1)
            else:
                print('????')

        result_f.close()
        namesfile = 'data/chem.names'
        class_names = load_class_names(namesfile)
        save_name = os.path.join(save_dir, img_name)
        plot_boxes_cv2(img, boxes[0], save_name, class_names)
        # cv2.imshow('result', img)
        # cv2.waitKey(0)

    # cv2.destroyAllWindows()

    info, map_iou0_5 = get_coco_mAP(gt_dict, box_list)
    # print("---base_eval---epoch%d"%real_epoch)
    print(info)
Exemple #20
0
            for file in files_list:
                img_path = os.path.join(parent, file)
                rel_img_path = os.path.join(parent_path, file)

                img = cv2.imread(img_path)

                sized = cv2.resize(img, (args.width, args.height))
                sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

                boxes = do_detect(model, sized, 0.4, 0.6, use_cuda, False)
                class_names = load_class_names(args.namesfile)

                output_path = os.path.join(args.output, rel_img_path)
                os.makedirs(os.path.dirname(output_path), exist_ok=True)
                plot_boxes_cv2(img, boxes[0], output_path, class_names)

    else:
        img = cv2.imread(args.input)

        # Inference input size is 608*608 does not mean training size is the same
        # Training size could be 608*608 or even other sizes
        # Optional inference sizes:
        #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
        #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
        sized = cv2.resize(img, (args.width, args.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)
        class_names = load_class_names(args.namesfile)
            boxes, period = do_detect(model, sized, 0.4, 0.6, use_cuda)

        # print(len(boxes))
        # print(boxes)
        # print(len(box))
        # print(box)
        box = boxes[0]
        pred_fname = 'predictions/mAp512_30/' + imgfile[imgfile.rfind('\\') +
                                                        1:]
        output_fname = 'predictions/mAp512_30/' + imgfile[
            imgfile.rfind('\\') + 1:imgfile.rfind('.png')] + '.txt'

        with open(output_fname, 'w') as outfile:
            if len(box) >= 1:
                class_names = load_class_names(namesfile)
                plot_boxes_cv2(img, box, pred_fname, class_names)
                cls_conf = box[0][5]
                cls_id = box[0][6]
                print('%s: %f' % (class_names[cls_id], cls_conf))
                blist = map(str, box[0][0:4])
                outline = str(cls_id) + ' ' + ' '.join(blist) + ' ' + str(
                    cls_conf) + '\n'
                # print('')
                outfile.write(outline)
                # print('===\n %f \n===' % period)
                time.append(period)
            else:
                print('no result')
                outline = '-1\n'
                outfile.write(outline)
    print(np.mean(time))
Exemple #22
0
	def infer(self,data):
		sized = cv2.resize(data, (self.width, self.height))
		sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
		boxes = do_detect(self.model, sized, 0.4, 0.6, self.use_cuda,self.is_half)
		plot_boxes_cv2(data, boxes[0], savename='./data/predictions.jpg', class_names=self.class_names)
Exemple #23
0
    if use_cuda:
        model.cuda()

    img = cv2.imread(imgfile)

    # Inference input size is 416*416 does not mean training size is the same
    # Training size could be 608*608 or even other sizes
    # Optional inference sizes:
    #   Hight in {320, 416, 512, 608, ... 320 + 96 * n}
    #   Width in {320, 416, 512, 608, ... 320 + 96 * m}
    sized = cv2.resize(img, (width, height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    from tool.utils import load_class_names, plot_boxes_cv2
    from tool.torch_utils import do_detect

    for i in range(2):  # This 'for' loop is for speed check
                        # Because the first iteration is usually longer
        boxes = do_detect(model, sized, 0.4, 0.6, use_cuda)

    if namesfile == None:
        if n_classes == 20:
            namesfile = 'data/voc.names'
        elif n_classes == 80:
            namesfile = 'data/coco.names'
        else:
            print("please give namefile")

    class_names = load_class_names(namesfile)
    plot_boxes_cv2(img, boxes[0], 'predictions.jpg', class_names)
Exemple #24
0
    def inference(self, img_path, plot=False, j=0):
        def convert_rgb(dmap):
            x = np.asarray([dmap] * 3)
            return np.transpose(x, (1, 2, 0))

        t0 = time.time()

        # Depth estimation
        data, original_size = load_data_bts(img_path)
        disp = predict_bts(self.depth_model, data, original_size)
        # print(disp)

        # Bounding box
        img, boxes = self.bbox_inference(img_path)
        img = plot_boxes_cv2(img,
                             boxes[0],
                             class_names=self.class_names,
                             disp=disp)

        # Construct message
        # df = construct_object_table(img, boxes, self.class_names, disp)
        # msg = get_instructions(df)

        # Plot
        if plot:
            cv2.imwrite('output/_{}'.format(os.path.basename(img_path)), disp)

            # plt.imshow(disp, cmap='plasma')
            # plt.savefig('frame/depth_map{}.png'.format(j))
            plot_boxes_cv2(img,
                           boxes[0],
                           savename='output/bbox{}.png'.format(j),
                           class_names=self.class_names,
                           disp=disp)

        def create_grid(gimg, n=6, cl=(91, 235, 52)):
            h, w = gimg.shape[0:2]
            ys = [int(h * j / n) for j in range(1, n)]
            xs = [int(w * j / n) for j in range(1, n)]
            for x in xs:
                cv2.line(gimg, (x, 0), (x, h), cl, 1, 1)
            for y in ys:
                cv2.line(gimg, (0, y), (w, y), cl, 1, 1)

            sensor_gimg = gimg.copy()
            start_xs = [int(h * j / n) for j in range(0, n)]
            start_ys = [int(w * j / n) for j in range(0, n)]
            end_xs = [int(h * j / n) for j in range(1, n + 1)]
            end_ys = [int(w * j / n) for j in range(1, n + 1)]
            for x1, x2 in zip(start_xs, end_xs):
                for y1, y2 in zip(start_ys, end_ys):
                    sensor_gimg[x1:x2,
                                y1:y2, :] = np.mean(sensor_gimg[x1:x2,
                                                                y1:y2, :])

            return gimg, sensor_gimg

        dmap = convert_rgb(disp)
        grid_dmap = dmap.copy()
        grid_dmap, sensor_gimg = create_grid(grid_dmap)

        t1 = time.time()
        total_time = round(t1 - t0, 2)
        self.log.info(
            "1 - Done inference_image {}. -- {} minutes {} seconds".format(
                img_path, total_time // 60, total_time % 60))

        # return msg, df
        return dmap, img, grid_dmap, sensor_gimg