コード例 #1
0
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()
    #necessary
    # if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
    #     img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
    # elif type(img) == np.ndarray and len(img.shape) == 4:
    #     img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    # else:
    #     print("unknow image type")
    #     exit(-1)
    #
    # if use_cuda:
    #     img = img.cuda()
    # img = torch.autograd.Variable(img)

    t1 = time.time()

    #NC UPDATE
    model.conf = conf_thresh
    model.iou = nms_thresh
    output = model(img)  #predictions inference is diferent with tensor img

    t2 = time.time()

    print('-----------------------------------')
    print('           Preprocess : %f' % (t1 - t0))
    print('      Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')

    return utils.post_processing(img, conf_thresh, nms_thresh, output)
コード例 #2
0
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()

    # with torch.autograd.profiler.profile(use_cuda=True) as prof:
    #     output = model(img)
    # print(prof.key_averages().table(sort_by="self_cpu_time_total"))

    output = model(img)
    # torch.cuda.synchronize()

    t2 = time.time()

    print('-----------------------------------')
    print('           Preprocess : %f' % (t1 - t0))
    print('      Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')

    return utils.post_processing(img, conf_thresh, nms_thresh, output)
コード例 #3
0
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    with torch.no_grad():
        t0 = time.time()

        if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
            img = torch.from_numpy(img.transpose(
                2, 0, 1)).float().div(255.0).unsqueeze(0)
        elif type(img) == np.ndarray and len(img.shape) == 4:
            img = torch.from_numpy(img.transpose(0, 3, 1,
                                                 2)).float().div(255.0)
        else:
            print("unknow image type")
            exit(-1)

        if use_cuda:
            img = img.cuda()
        img = torch.autograd.Variable(img)

        t1 = time.time()

        output = model(img)

        t2 = time.time()

        print('-----------------------------------')
        print('           Preprocess : %f' % (t1 - t0))
        print('      Model Inference : %f' % (t2 - t1))
        print('-----------------------------------')

        return utils.post_processing(img, conf_thresh, nms_thresh, output)
コード例 #4
0
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1, verbose=True):
    model.eval()
    t0 = time.time()
    # print(type(img))
    if type(img) == Image.Image:
        img = np.array(img)
    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    elif type(img) == torch.Tensor:
        pass
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()
    output = model(img)
    t2 = time.time()

    if verbose:
        print("input shape : ", img.shape)
        print("model output shape : ", output[0].shape)
        print('-----------------------------------')
        print('           Preprocess : %f' % (t1 - t0))
        print('      Model Inference : %f' % (t2 - t1))
        print('-----------------------------------')

    return utils.post_processing(img, conf_thresh, nms_thresh, output, verbose)
    def main(self):
        frame = cv2.imread(id)
        try:
            img, orig_im, dim = prep_image(frame, 160)

            im_dim = torch.FloatTensor(dim).repeat(1, 2)
            if self.CUDA:  #### If you have a gpu properly installed then it will run on the gpu
                im_dim = im_dim.cuda()
                img = img.cuda()
            frame = cv2.imread(self.id)

            output = self.model(img)

            from tool.utils import post_processing, plot_boxes_cv2
            bounding_boxes = post_processing(img, self.confidence,
                                             self.nms_thesh, output)
            frame = plot_boxes_cv2(frame,
                                   bounding_boxes[0],
                                   savename=None,
                                   class_names=self.classes,
                                   color=None,
                                   colors=self.colors)

        except:
            pass

        cv2.imwrite('IMG_0748-1.JPG', frame)
        torch.cuda.empty_cache()
コード例 #6
0
def do_detect(model, img, conf_thresh, n_classes, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if isinstance(img, Image.Image):
        width = img.width
        height = img.height
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
        img = img.view(height, width,
                       3).transpose(0, 1).transpose(0, 2).contiguous()
        img = img.view(1, 3, height, width)
        img = img.float().div(255.0)
    elif type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()

    boxes_and_confs = model(img)

    # print(boxes_and_confs)
    output = []

    for i in range(len(boxes_and_confs)):
        output.append([])
        output[-1].append(boxes_and_confs[i][0].cpu().detach().numpy())
        output[-1].append(boxes_and_confs[i][1].cpu().detach().numpy())
        output[-1].append(boxes_and_confs[i][2].cpu().detach().numpy())

    t2 = time.time()

    print('-----------------------------------')
    print('          Preprocess : %f' % (t1 - t0))
    print('     Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')
    '''
    for i in range(len(boxes_and_confs)):
        output.append(boxes_and_confs[i].cpu().detach().numpy())
    '''

    return utils.post_processing(img, conf_thresh, n_classes, nms_thresh,
                                 output)
コード例 #7
0
    def main(self):
        q = queue.Queue()
        while True:

            def frame_render(queue_from_cam):
                frame = self.cap.read(
                )  # If you capture stream using opencv (cv2.VideoCapture()) the use the following line
                # ret, frame = self.cap.read()
                frame = cv2.resize(frame, (self.width, self.height))
                queue_from_cam.put(frame)

            cam = threading.Thread(target=frame_render, args=(q, ))
            cam.start()
            cam.join()
            frame = q.get()
            q.task_done()
            fps = FPS().start()

            try:
                img, orig_im, dim = prep_image(frame, 160)

                im_dim = torch.FloatTensor(dim).repeat(1, 2)
                if self.CUDA:  #### If you have a gpu properly installed then it will run on the gpu
                    im_dim = im_dim.cuda()
                    img = img.cuda()
                # with torch.no_grad():               #### Set the model in the evaluation mode

                output = self.model(img)
                from tool.utils import post_processing, plot_boxes_cv2
                bounding_boxes = post_processing(img, self.confidence,
                                                 self.nms_thesh, output)
                frame = plot_boxes_cv2(frame,
                                       bounding_boxes[0],
                                       savename=None,
                                       class_names=self.classes,
                                       color=None,
                                       colors=self.colors)

            except:
                pass

            fps.update()
            fps.stop()
            print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
            print("[INFO] approx. FPS: {:.1f}".format(fps.fps()))

            cv2.imshow("Object Detection Window", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue
            torch.cuda.empty_cache()
コード例 #8
0
def do_detect(model, mid_result, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()

    y1, x10, x18 = (mid_result[0]), (mid_result[1]), mid_result[2]
    y1 = (torch.from_numpy(y1[0]).cuda(), torch.from_numpy(y1[1]).cuda())
    x10 = torch.from_numpy(x10).cuda()
    y2 = model.head.yolo3(x10)
    x18 = torch.from_numpy(x18).cuda()
    y3 = model.head.yolo3(x18)

    # output = model(img)
    output = get_region_boxes([y1, y2, y3])

    return utils.post_processing(img, conf_thresh, nms_thresh, output)
コード例 #9
0
    def detect(self):
        """Detect if image is Aadhaar and save image if detected"""

        logger.info(f"The model expects input shape: {model.get_inputs()[0].shape}")

        image_src = cv2.imread(self.image_path)
        IN_IMAGE_H = model.get_inputs()[0].shape[2]
        IN_IMAGE_W = model.get_inputs()[0].shape[3]

        # Input
        resized = cv2.resize(
            image_src, (IN_IMAGE_W, IN_IMAGE_H), interpolation=cv2.INTER_LINEAR
        )
        img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
        img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
        img_in = np.expand_dims(img_in, axis=0)
        img_in /= 255.0
        logger.info(f"Shape of the network input after preprocessing: {img_in.shape}")

        # Compute
        input_name = model.get_inputs()[0].name

        outputs = model.run(None, {input_name: img_in})

        boxes = post_processing(img_in, 0.4, 0.6, outputs)
        logger.info(f"Post Processing output : {boxes}")

        if np.array(boxes).size:

            namesfile = cfg.NAMESFILE
            class_names = load_class_names(namesfile)
            if plot_boxes_cv2(
                image_src, boxes[0], savename=self.filename, class_names=class_names
            ):  # Detect image and save image with bounding boxes if Aadhaar card detected
                return 1
            else:
                return 0

        else:
            logger.info("Uploaded Image is not Aadhaar")
            return 0
コード例 #10
0
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()

    #with profiler.profile(profile_memory=True, record_shapes=True) as prof:
    output = model(img)
    #print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
    #print(torch.cuda.memory_allocated())
    #print(torch.cuda.memory_cached())
    t2 = time.time()

    print('-----------------------------------')
    print('           Preprocess : %f' % (t1 - t0))
    print('      Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')
    out0 = output[0].cpu().detach().numpy()
    out1 = output[1].cpu().detach().numpy()

    output = [out0, out1]
    cdevice = torch.device('cpu')
    img = img.to(cdevice)
    torch.cuda.empty_cache()
    return utils.post_processing(conf_thresh, nms_thresh, out0, out1)
コード例 #11
0
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()  # 模型做推理
    t0 = time.time()

    ################## 可以考虑处理 PIL image start #################
    # if isinstance(img, Image.Image):
    #     width = img.width
    #     height = img.height
    #     img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
    #     img = img.view(height, width, 3).transpose(0, 1).transpose(0, 2).contiguous()   # C * H * W
    #     img = img.view(1, 3, height, width)     # 对图像进行维度变换,B * C * H * W
    #     img = img.float().div(255.0)        # [0, 255] -> [0, 1]
    ################## 可以考虑处理 PIL image  end  #################
    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type")
        exit(-1)

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)  # 主要是调用了模型的 forward 操作,返回三个 yolo 层的输出

    t1 = time.time()
    output = model(img)
    t2 = time.time()

    print('-----------------------------------')
    print('           Preprocess : %f' % (t1 - t0))
    print('      Model Inference : %f' % (t2 - t1))
    print('-----------------------------------')

    # 返回给 tool/utils.py 中, 进行检测,返回 boxes
    return utils.post_processing(img, conf_thresh, nms_thresh, output)
コード例 #12
0
def do_detect(model, img, obj_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    # transpose channels to fit the model
    if type(img) == np.ndarray and len(img.shape) == 3:  # cv2 image
        img = torch.from_numpy(img.transpose(
            2, 0, 1)).float().div(255.0).unsqueeze(0)
    elif type(img) == np.ndarray and len(img.shape) == 4:
        img = torch.from_numpy(img.transpose(0, 3, 1, 2)).float().div(255.0)
    else:
        print("unknow image type or dims")
        exit(-1)

    # push to GPU
    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)

    t1 = time.time()

    output = model(img)

    t2 = time.time()

    print("-----------------------------------")
    print("           Preprocess : %f" % (t1 - t0))
    print("      Model Inference : %f" % (t2 - t1))
    print("-----------------------------------")

    if model.model_type == "Yolov4":
        return utils.post_processing(img, obj_thresh, nms_thresh, output)
    elif model.model_type in ["BEV_grid", "BEV_flat"]:
        return utils.nms_BEV(output, obj_thresh, nms_thresh, iou_type="rgIoU")
    else:
        print("model type not recognized in do_detect()")
        quit(1)
コード例 #13
0
def demo_tensorflow(tfpb_file="./weight/yolov4.pb",
                    image_path=None,
                    print_sensor_name=False):
    graph_name = 'yolov4'
    tf.compat.v1.disable_eager_execution()
    with tf.compat.v1.Session() as persisted_sess:
        print("loading graph...")
        with gfile.FastGFile(tfpb_file, 'rb') as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())

        persisted_sess.graph.as_default()
        tf.import_graph_def(graph_def, name=graph_name)

        # print all sensor_name
        if print_sensor_name:
            tensor_name_list = [
                tensor.name for tensor in
                tf.compat.v1.get_default_graph().as_graph_def().node
            ]
            for tensor_name in tensor_name_list:
                print(tensor_name)

        inp = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                      'input:0')
        print(inp.shape)
        out1 = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                       'output_1:0')
        out2 = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                       'output_2:0')
        out3 = persisted_sess.graph.get_tensor_by_name(graph_name + '/' +
                                                       'output_3:0')
        print(out1.shape, out2.shape, out3.shape)

        # image_src = np.random.rand(1, 3, 608, 608).astype(np.float32)  # input image
        # Input
        image_src = cv2.imread(image_path)
        resized = cv2.resize(image_src, (inp.shape[2], inp.shape[3]),
                             interpolation=cv2.INTER_LINEAR)
        img_in = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
        img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
        img_in = np.expand_dims(img_in, axis=0)
        img_in /= 255.0
        print("Shape of the network input: ", img_in.shape)

        feed_dict = {inp: img_in}

        outputs = persisted_sess.run([out1, out2, out3], feed_dict)
        print(outputs[0].shape)
        print(outputs[1].shape)
        print(outputs[2].shape)

        boxes = post_processing(img_in, 0.4, outputs)

        num_classes = 80
        if num_classes == 20:
            namesfile = 'data/voc.names'
        elif num_classes == 80:
            namesfile = 'data/coco.names'
        else:
            namesfile = 'data/names'

        class_names = load_class_names(namesfile)
        result = plot_boxes_cv2(image_src,
                                boxes,
                                savename=None,
                                class_names=class_names)
        cv2.imshow("tensorflow predicted", result)
        cv2.waitKey()
コード例 #14
0
ファイル: evaluate.py プロジェクト: dang-qi/pytorch_yolov4
def evaluate_nms(model, data_loader, cfg, device, human_patch=False, json_gt=None, **kwargs):
    """ finished, tested
    """
    # cpu_device = torch.device("cpu")
    model.eval()
    # header = 'Test:'

    if json_gt is None:
        coco = convert_to_coco_api(data_loader.dataset, bbox_fmt='coco')
        coco_evaluator = CocoEvaluator(coco, iou_types = ["bbox"], bbox_fmt='coco')
    else:
        coco_gt = COCO(json_gt)

    result_json = []
    for images, targets in tqdm.tqdm(data_loader, desc='testing'):
        model_input = [[cv2.resize(img, (cfg.w, cfg.h))] for img in images]
        model_input = np.concatenate(model_input, axis=0)
        model_input = model_input.transpose(0, 3, 1, 2)
        model_input = torch.from_numpy(model_input).div(255.0)
        model_input = model_input.to(device)
        #targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        if torch.cuda.is_available():
            torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(model_input)
        outputs = utils.post_processing(conf_thresh=0.001, nms_thresh=0.5, output=outputs)

        # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        # outputs = outputs.cpu().detach().numpy()
        res = {}
        # for img, target, output in zip(images, targets, outputs):
        for img, target, output in zip(images, targets, outputs):
            img_height, img_width = img.shape[:2]
            #human_box = target['human_box']
            # boxes = output[...,:4].copy()  # output boxes in yolo format
            if len(output) == 0:
                continue
            boxes = output[:,:4]
            scores = output[:,-2]
            labels = output[:,-1]

            boxes[...,2:] = boxes[...,2:] - boxes[...,:2] # Transform [x1, y1, x2, y2] to [x1, y1, w, h]
            if human_patch:
                human_box = target['human_box']#.cpu().detach().numpy()
                boxes[...,0] = boxes[...,0]*img_width + human_box[0]
                boxes[...,1] = boxes[...,1]*img_height + human_box[1]
            else:
                boxes[...,0] = boxes[...,0]*img_width 
                boxes[...,1] = boxes[...,1]*img_height 
            boxes[...,2] = boxes[...,2]*img_width
            boxes[...,3] = boxes[...,3]*img_height

            if json_gt is None:
                boxes = torch.as_tensor(boxes, dtype=torch.float32)
                # confs = output[...,4:].copy()
                labels = torch.as_tensor(labels, dtype=torch.int64)
                scores = torch.as_tensor(scores, dtype=torch.float32)
                res[target["image_id"]] = {
                    "boxes": boxes.unsqueeze(1),
                    "scores": scores,
                    "labels": labels,
                }
            else:
                for box, label, score in zip(boxes, labels, scores):
                    single_result = {'image_id': int(target['image_id']),
                                    'bbox': box.tolist(),
                                    'category_id': int(label+1),
                                    'score':float(score)}
                    result_json.append(single_result)
        evaluator_time = time.time()
        if json_gt is None:
            if len(res) != 0:
                coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time

    if json_gt is None:
        # gather the stats from all processes
        coco_evaluator.synchronize_between_processes()

        # accumulate predictions from all images
        coco_evaluator.accumulate()
        coco_evaluator.summarize()
        return coco_evaluator
    else:
        with open('temp_result.json', 'w') as f:
            json.dump(result_json, f)
        coco_dt = coco_gt.loadRes('temp_result.json')
        cocoEval = COCOeval(coco_gt,coco_dt,'bbox')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        return cocoEval
コード例 #15
0
ファイル: evaluate.py プロジェクト: dang-qi/pytorch_yolov4
def evaluate_nms_patch(model, data_loader, cfg, device, **kwargs):
    """ finished, tested
    """
    # cpu_device = torch.device("cpu")
    model.eval()
    # header = 'Test:'

    coco = convert_to_coco_api(data_loader.dataset, bbox_fmt='coco')
    coco_evaluator = CocoEvaluator(coco, iou_types = ["bbox"], bbox_fmt='coco')

    for images, targets in tqdm.tqdm(data_loader):
        model_input = [[cv2.resize(img, (cfg.w, cfg.h))] for img in images]
        model_input = np.concatenate(model_input, axis=0)
        model_input = model_input.transpose(0, 3, 1, 2)
        model_input = torch.from_numpy(model_input).div(255.0)
        model_input = model_input.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        if torch.cuda.is_available():
            torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(model_input)
        outputs = utils.post_processing(conf_thresh=0.001, nms_thresh=0.5, output=outputs)

        # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        # outputs = outputs.cpu().detach().numpy()
        res = {}
        # for img, target, output in zip(images, targets, outputs):
        for img, target, output in zip(images, targets, outputs):
            img_height, img_width = img.shape[:2]
            #human_box = target['human_box']
            # boxes = output[...,:4].copy()  # output boxes in yolo format
            boxes = output[:,:4]
            scores = output[:,-2]
            labels = output[:,-1]

            human_box = target['human_box'].cpu().detach().numpy()
            boxes[...,2:] = boxes[...,2:] - boxes[...,:2] # Transform [x1, y1, x2, y2] to [x1, y1, w, h]
            boxes[...,0] = boxes[...,0]*img_width + human_box[0]
            boxes[...,1] = boxes[...,1]*img_height + human_box[1]
            boxes[...,2] = boxes[...,2]*img_width
            boxes[...,3] = boxes[...,3]*img_height
            boxes = torch.as_tensor(boxes, dtype=torch.float32).unsqueeze(0)
            # confs = output[...,4:].copy()
            labels = torch.as_tensor(labels, dtype=torch.int64)
            scores = torch.as_tensor(scores, dtype=torch.float32)
            res[target["image_id"].item()] = {
                "boxes": boxes,
                "scores": scores,
                "labels": labels,
            }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time

    # gather the stats from all processes
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()

    return coco_evaluator
コード例 #16
0
def evaluate(model, data_loader, cfg, device, logger=None, **kwargs):
    """ finished, tested
    """
    # cpu_device = torch.device("cpu")
    model.eval()
    # header = 'Test:'

    coco = convert_to_coco_api(data_loader.dataset, bbox_fmt='coco')
    coco_evaluator = CocoEvaluator(coco, iou_types=["bbox"], bbox_fmt='coco')

    for images, targets in data_loader:
        model_input = [[cv2.resize(img, (cfg.w, cfg.h))] for img in images]
        model_input = np.concatenate(model_input, axis=0)
        model_input = model_input.transpose(0, 3, 1, 2)
        model_input = torch.from_numpy(model_input).div(255.0)
        model_input = model_input.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        if torch.cuda.is_available():
            torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(model_input)

        # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        # outputs = outputs.cpu().detach().numpy()
        res = {}
        # for img, target, output in zip(images, targets, outputs):
        for img, target, boxes, confs in zip(images, targets, outputs[0],
                                             outputs[1]):
            img_height, img_width = img.shape[:2]
            # boxes = output[...,:4].copy()  # output boxes in yolo format
            boxes = boxes.squeeze(2).cpu().detach().numpy()
            boxes[..., 2:] = boxes[..., 2:] - boxes[
                ..., :2]  # Transform [x1, y1, x2, y2] to [x1, y1, w, h]
            boxes[..., 0] = boxes[..., 0] * img_width
            boxes[..., 1] = boxes[..., 1] * img_height
            boxes[..., 2] = boxes[..., 2] * img_width
            boxes[..., 3] = boxes[..., 3] * img_height
            boxes = torch.as_tensor(boxes, dtype=torch.float32)
            # confs = output[...,4:].copy()
            confs = confs.cpu().detach().numpy()
            labels = np.argmax(confs, axis=1).flatten()
            labels = torch.as_tensor(labels, dtype=torch.int64)
            scores = np.max(confs, axis=1).flatten()
            scores = torch.as_tensor(scores, dtype=torch.float32)
            res[target["image_id"].item()] = {
                "boxes": boxes,
                "scores": scores,
                "labels": labels,
            }

        debug = kwargs.get("debug", [])
        if isinstance(debug, str):
            debug = [debug]
        debug = [item.lower() for item in debug]
        if 'iou' in debug:
            from tool.utils_iou_test import bboxes_iou_test
            ouput_boxes = np.array(
                post_processing(None, 0.5, 0.5, outputs)[0])[..., :4]
            img_height, img_width = images[0].shape[:2]
            ouput_boxes[..., 0] = ouput_boxes[..., 0] * img_width
            ouput_boxes[..., 1] = ouput_boxes[..., 1] * img_height
            ouput_boxes[..., 2] = ouput_boxes[..., 2] * img_width
            ouput_boxes[..., 3] = ouput_boxes[..., 3] * img_height
            # coco format to yolo format
            truth_boxes = targets[0]['boxes'].numpy().copy()
            truth_boxes[
                ..., :2] = truth_boxes[..., :2] + truth_boxes[..., 2:] / 2
            iou = bboxes_iou_test(torch.Tensor(ouput_boxes),
                                  torch.Tensor(truth_boxes),
                                  fmt='yolo')
            print(f"iou of first image = {iou}")
        if len(debug) > 0:
            return

        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time

    # gather the stats from all processes
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()

    return coco_evaluator
コード例 #17
0
ファイル: test_video.py プロジェクト: Ongoza/Wingu
            if (skip_counter > 0): continue
            else: break
        start = time.time()
        counter += 1

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # Tensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
        # frame = cv2.imread(imgfile)
        frame_sm = cv2.resize(frame, (detector.width, detector.height))
        # frame_cuda = transforms.ToTensor()(frame_sm).to(device).unsqueeze(0)
        frame_cuda = torch.from_numpy(frame_sm.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
        # for batches
        # frame_sm = torch.from_numpy(frame_sm.transpose(0, 3, 1, 2)).float().div(255.0)
        with torch.no_grad():
            detections = get_region_boxes(detector(frame_cuda))
            detections_suppression = post_processing(frame_sm, conf_thresh, nms_thresh, detections, True)

            tracks = tracker.update(detections_suppression[0], frame_sm)
            # print(outputs)
        frame_out = frame_sm
        # for box in tracks:
            # frame_out = cv2.rectangle(frame_out, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1)
            # frame_out = cv2.putText(frame_out,str(box[4]),(box[0],box[1]+10), cv2.FONT_HERSHEY_PLAIN, 2, [0,255,0], 2)

        for track in tracks:
            # print(track.mean[:4])
            if(not track.is_confirmed() or track.time_since_update > 1):
                # if(track.time_since_update > life_frame_limit): track.state = 3 # if missed to long than delete id
                continue 
            x1y1 = (int(track.mean[0]), int(track.mean[1]))
            clr = (255, 255, 0) # default color
コード例 #18
0
ファイル: predict.py プロジェクト: DableUTeeF/eightpoints
    for box in boxes:
        x = (box[2] + box[0]) / 2 * width
        y = (box[3] + box[1]) / 2 * height
        print()

    return image


if __name__ == '__main__':
    model = Yolov4(None, n_classes=80, inference=True)
    state = torch.load('16.pth')
    model.load_state_dict(state['model'])
    del state
    model.eval()

    rawimg = cv2.imread(
        '/media/palm/data/coco/images/val2017/000000289343.jpg')
    rawimg = cv2.cvtColor(rawimg, cv2.COLOR_BGR2RGB)
    rawimg, bboxes = resize_image(rawimg, None, 608, 608)
    img = rawimg.copy().transpose(2, 0, 1)

    inputs = torch.from_numpy(np.expand_dims(img,
                                             0).astype('float32')).div(255.0)
    output = model(inputs)
    boxes = post_processing(img, 0.4, 0.4, output)
    img = plot_boxes_cv2(rawimg.astype('uint8'), boxes[0])
    img = plot_lines(img, boxes[0])
    cv2.imshow('a', img)
    cv2.waitKey()