Exemplo n.º 1
0
def detect_cv2(cfgfile, weightfile, imgfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    img = cv2.imread(imgfile)
    sized = cv2.resize(img, (m.width, m.height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.4, 0.6, False)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    boxes_bird = []
    for box in boxes[0]:
        if box[6] == 14:
            boxes_bird.append(box)

    print(len(boxes_bird))
    plot_boxes_cv2(img, boxes_bird, savename='predictions', class_names=class_names)
Exemplo n.º 2
0
def detect(cfgfile, weightfile, imgfile):
    # 根据 配置文件 初始化网络
    m = Darknet(cfgfile)
    # 打印网络框架信息(每层网络结构、卷积核数、输入特征图尺度及通道数、输出特征图尺度及通道数)
    m.print_network()
    # 加载 模型权重
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    # 默认使用 coco类别
    namesfile = 'data/coco.names'

    # 默认CPU
    use_cuda = 0
    if use_cuda:
        m.cuda()

    # 读取 测试图片并转为 RGB通道
    img = Image.open(imgfile).convert('RGB')
    # 测试图像 调整尺度,以便输入网络
    sized = img.resize((m.width, m.height))
    # 统计第二次运行结果 的时间更稳定,更具代表性
    for i in range(2):
        start = time.time()
        #默认CPU  conf_thresh:0.5   nms_thresh:0.4
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))
    # 加载类别名称,为 bbox打类别标签
    class_names = load_class_names(namesfile)
    # 将bbox及类别 绘制到 测试图像并保存
    plot_boxes(img, boxes, 'img/predictions.jpg', class_names)
Exemplo n.º 3
0
def detect_cv2(cfgfile, weightfile, imgfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    img = cv2.imread(imgfile)
    sized = cv2.resize(img, (m.width, m.height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.0003, 0.005, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    plot_boxes_cv2(img, boxes[0], savename='predictions.jpg', class_names=class_names)
Exemplo n.º 4
0
def detect(cfgfile, weightfile, imgfile):
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    num_classes = 80
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/names'

    use_cuda = 0
    if use_cuda:
        m.cuda()

    # img = Image.open(imgfile).convert('RGB')

    # sized = img.resize((m.width, m.height))
    w_im = imgfile.shape[1]
    h_im = imgfile.shape[0]

    boxes = np.array(do_detect(m, imgfile, 0.5, 0.4, use_cuda))
    boxes[:, 0] *= w_im
    boxes[:, 1] *= h_im
    boxes[:, 2] *= w_im
    boxes[:, 3] *= h_im
    return boxes
Exemplo n.º 5
0
def detect_skimage(cfgfile, weightfile, imgfile):
    from skimage import io
    from skimage.transform import resize
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    img = io.imread(imgfile)
    sized = resize(img, (m.width, m.height)) * 255

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.4, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    plot_boxes_cv2(img, boxes, savename='predictions.jpg', class_names=class_names)
def init_model(transform):

    parser = argparse.ArgumentParser()
    parser.add_argument("--confidence", dest="confidence", help="Object Confidence to filter predictions", default=0.25)
    parser.add_argument("--nms_thresh", dest="nms_thresh", help="NMS Threshhold", default=0.4)
    parser.add_argument("--reso", dest='reso', help=
    "Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
                        default="160", type=str)
    args, unknown = parser.parse_known_args()

    cfgfile = "./cfg/yolov4.cfg"
    weightsfile = "./weights/yolov4.pth"

    confidence = float(args.confidence)
    nms_thesh = float(args.nms_thresh)
    CUDA = torch.cuda.is_available()
    num_classes = 80
    # bbox_attrs = 5 + num_classes
    class_names = load_class_names("./data/coco.names")

    model = Darknet(cfgfile)
    model.load_weights(weightsfile)

    if CUDA:
        model.cuda()

    model.eval()
    return (model, class_names,CUDA), None
    def detect_cv2(self, cfgfile, weightfile, imgfile):
        import cv2
        m = Darknet(cfgfile)

        m.print_network()
        m.load_weights(weightfile)
        print('Loading weights from %s... Done!' % (weightfile))

        if use_cuda:
            m.cuda()

        num_classes = m.num_classes
        namesfile = 'cfg/ball.names'
        class_names = load_class_names(namesfile)

        (img_h, img_w) = imgfile[0].shape[:2]
        result = dict()
        for i, img in enumerate(imgfile):
            #img = cv2.imread(imgfile[i])
            sized = cv2.resize(img, (m.width, m.height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)
            boxes[0].sort(key=lambda s: s[4], reverse=True)
            if boxes[0]:
                for box in boxes[0]:
                    x1 = int(box[0] * img_w)
                    y1 = int(box[1] * img_h)
                    w = int(box[2] * img_w) - x1
                    h = int(box[3] * img_h) - y1
                    bbox = (x1, y1, w, h)
                    result['frame'] = i
                    result['bbox'] = bbox
                    self.bbox_set.append(result.copy())
Exemplo n.º 8
0
def detect(cfgfile, weightfile, imgfile):
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    num_classes = 80
    if num_classes == 20:
        namesfile = os.path.dirname(__file__) + '/data/voc.names'
    elif num_classes == 80:
        namesfile = os.path.dirname(__file__) + '/data/coco.names'
    else:
        namesfile = os.path.dirname(__file__) + '/data/names'

    use_cuda = 0
    if use_cuda:
        m.cuda()

    img = Image.open(imgfile).convert('RGB')
    sized = img.resize((m.width, m.height))

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    class_names = load_class_names(namesfile)
Exemplo n.º 9
0
def detect_imges(cfgfile,
                 weightfile,
                 imgfile_list=['data/dog.jpg', 'data/giraffe.jpg']):
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    imges = []
    imges_list = []
    for imgfile in imgfile_list:
        img = Image.open(imgfile).convert('RGB')
        imges_list.append(img)
        sized = img.resize((m.width, m.height))
        imges.append(np.expand_dims(np.array(sized), axis=0))

    images = np.concatenate(imges, 0)
    for i in range(2):
        start = time.time()
        boxes = do_detect(m, images, 0.5, num_classes, 0.4, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    class_names = load_class_names(namesfile)
    for i, (img, box) in enumerate(zip(imges_list, boxes)):
        plot_boxes(img, box, 'predictions{}.jpg'.format(i), class_names)
Exemplo n.º 10
0
def detect_cv2_camera(cfgfile, weightfile, videofile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    # cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture(videofile)
    # cap.set(3, 1280)
    # cap.set(4, 720)
    print("Starting the YOLO loop...")

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    conf_thresh = 0.2
    detect_fps = 5
    detect_interval_msec = 1000 / detect_fps
    next_detect_msec = 0

    # Save original image
    assert os.path.isdir('/track_data/img')
    while True:
        ret = cap.grab()
        if not ret:
            break

        video_msec = cap.get(cv2.CAP_PROP_POS_MSEC)
        if video_msec > next_detect_msec:
            next_detect_msec += detect_interval_msec

            ret, img = cap.retrieve()
            sized = cv2.resize(img, (m.width, m.height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            start = time.time()
            boxes = do_detect(m, sized, conf_thresh, 0.6, use_cuda)
            finish = time.time()
            print('Predicted in %f seconds.' % (finish - start))

            cv2.imwrite(f'/track_data/img/{video_msec:010.2f}.jpg', img)

            # Save detection result image
            plot_boxes_cv2(img,
                           boxes[0],
                           savename=f'/track_data/detect/{video_msec:010.2f}',
                           class_names=class_names)

    cap.release()
Exemplo n.º 11
0
def transform_to_onnx(cfgfile, weightfile, onnx_file_name):
    model = Darknet(cfgfile)

    model.print_network()
    model.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))
    batch_size = 1
    input_names = ["input"]
    output_names = ['boxes', 'confs']
    if 'grey' in onnx_file_name:
        channels = 1
    else:
        channels = 3
    print('channels={}'.format(channels))
    x = torch.randn((batch_size, channels, model.height, model.width),
                    requires_grad=True)
    torch.onnx.export(model,
                      x,
                      onnx_file_name,
                      export_params=True,
                      opset_version=11,
                      do_constant_folding=True,
                      input_names=input_names,
                      output_names=output_names,
                      dynamic_axes=None)

    print('Onnx model exporting done')
Exemplo n.º 12
0
def detect_img_folder(cfgfile, weightfile, imgfolder, specialnms, gpu):
    import cv2
    m = Darknet(cfgfile)
    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    use_cuda = gpu
    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/obj.names'
    class_names = load_class_names(namesfile)

    img_list = os.listdir(imgfolder)
    for imgfile in img_list:
        if imgfile[-3:] == 'jpg' or imgfile[-3:] == 'png':
            img = cv2.imread(imgfolder + imgfile)
            sized = cv2.resize(img, (m.width, m.height))
            sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

            start = time.time()
            boxes = do_detect(m, sized, 0.25, 0.45, use_cuda, specialnms)
            finish = time.time()

            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))
            # print("bboxes: ", boxes[0])
            """write predicted bboxes into txt file with absolute coordinate form: [cls, conf, x, y, w, h]"""
            cp_boxes = copy.deepcopy(
                boxes[0])  # [[x1, y1, x2, y2, conf, cls], [], ...]
            img = np.copy(img)
            width = img.shape[1]
            height = img.shape[0]

            # normalised [x1, y1, x2, y2] --> original [x, y, w, h]
            for i in range(len(cp_boxes)):
                cp_boxes[i][0] = (boxes[0][i][0] + boxes[0][i][2]) / 2 * width
                cp_boxes[i][1] = (boxes[0][i][1] + boxes[0][i][3]) / 2 * height
                cp_boxes[i][2] = (boxes[0][i][2] - boxes[0][i][0]) * width
                cp_boxes[i][3] = (boxes[0][i][3] - boxes[0][i][1]) * height

            with open((imgfolder + imgfile[:-4] + '.txt'), 'a+') as a:
                if len(cp_boxes) == 0:
                    a.write("0 0 0 0 0 0")
                else:
                    for j in range(len(cp_boxes)):
                        a.write(str(cp_boxes[j][5]) + ' ' + str(cp_boxes[j][4]) + ' ' \
                              + str(cp_boxes[j][0]) + ' ' + str(cp_boxes[j][1]) + ' ' \
                              + str(cp_boxes[j][2]) + ' ' + str(cp_boxes[j][3]) + '\n')

            plot_boxes_cv2(img,
                           boxes[0],
                           savename='predictions.jpg',
                           class_names=class_names)
Exemplo n.º 13
0
def load_model():
    m = Darknet(CFG)
    m.load_weights(WEIGHTS)

    if use_cuda:
        m.cuda()
    return m
Exemplo n.º 14
0
def detect_cv2_camera(cfgfile, weightfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture("./test.mp4")
    print("Starting the YOLO loop...")

    while True:
        ret, img = cap.read()
        sized = cv2.resize(img, (m.width, m.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        start = time.time()
        boxes = do_detect(m, sized, 0.5, num_classes, 0.4, use_cuda)
        finish = time.time()
        print('Predicted in %f seconds.' % (finish - start))

        class_names = load_class_names(namesfile)
        result_img = plot_boxes_cv2(img,
                                    boxes,
                                    savename=None,
                                    class_names=class_names)

        cv2.imshow('Yolo demo', result_img)
        cv2.waitKey(1)

    cap.release()
Exemplo n.º 15
0
def transform_to_onnx(cfgfile, weightfile, batch_size=1, dynamic=False):
    model = Darknet(cfgfile)

    model.print_network()
    model.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    # model.cuda()

    x = torch.randn((batch_size, 3, model.height, model.width),
                    requires_grad=True)  # .cuda()

    if dynamic:

        onnx_file_name = "yolov4_{}_3_{}_{}_dyna.onnx".format(
            batch_size, model.height, model.width)
        input_names = ["input"]
        output_names = ['boxes', 'confs']

        dynamic_axes = {
            "input": {
                0: "batch_size"
            },
            "boxes": {
                0: "batch_size"
            },
            "confs": {
                0: "batch_size"
            }
        }
        # Export the model
        print('Export the onnx model ...')
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=input_names,
                          output_names=output_names,
                          dynamic_axes=dynamic_axes)

        print('Onnx model exporting done')
        return onnx_file_name

    else:
        onnx_file_name = "yolov4_{}_3_{}_{}_static.onnx".format(
            batch_size, model.height, model.width)
        torch.onnx.export(model,
                          x,
                          onnx_file_name,
                          export_params=True,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=['input'],
                          output_names=['boxes', 'confs'],
                          dynamic_axes=None)

        print('Onnx model exporting done')
        return onnx_file_name
Exemplo n.º 16
0
def detect_cv2(cfgfile, weightfile, imgfile, outfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'D:/work_source/CV_Project/datasets/xi_an_20201125/all/names_xi_an_20201125.txt'
    class_names = load_class_names(namesfile)

    img = cv2.imread(imgfile)
    # print('demo pic size:', img.shape)
    sized = cv2.resize(img, (m.width, m.height))
    # print('demo pic resize to:',sized.shape)
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    boxes = []
    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    plot_boxes_cv2(img, boxes[0], savename=outfile, class_names=class_names)
Exemplo n.º 17
0
def transform_to_onnx(cfgfile, weightfile, batch_size=1):
    model = Darknet(cfgfile)

    model.print_network()
    model.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    # model.cuda()

    x = torch.randn((batch_size, 3, model.height, model.width),
                    requires_grad=True)  # .cuda()

    onnx_file_name = "yolov4_{}_3_{}_{}.onnx".format(batch_size, model.height,
                                                     model.width)

    # Export the model
    print('Export the onnx model ...')
    torch.onnx.export(model,
                      x,
                      onnx_file_name,
                      export_params=True,
                      opset_version=11,
                      do_constant_folding=True,
                      input_names=['input'],
                      output_names=['output'],
                      dynamic_axes=None)

    print('Onnx model exporting done')
    return onnx_file_name
Exemplo n.º 18
0
def detect_cv2(cfgfile, weightfile, imgfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    cap = cv2.VideoCapture('1.mp4')
    w = int(cap.get(3))
    h = int(cap.get(4))
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter('output_1_3.avi', fourcc, 15, (w, h))
    list_file = open('detection_rslt.txt', 'w')
    frame_index = 0
    min_time = 10.0
    max_time_1 = 0.0
    max_time_2 = 0.0
    avg_time = 0.0
    while True:
        frame_index += 1
        start = time.time()
        ret, img = cap.read()
        if not ret:
            break
        sized = cv2.resize(img, (m.width, m.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        # for i in range(2):
        boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)

        plot_boxes_cv2(img, boxes[0], class_names=class_names, out=out)
        finish = time.time()
        infer_time = finish - start
        min_time = min(min_time, infer_time)
        max_time_2 = max(max_time_2, infer_time)
        max_time_1 = max(max_time_1, infer_time) if max_time_1 != max_time_2 else infer_time
        avg_time += infer_time
        print('{}: Predicted in {} seconds.'.format(imgfile, infer_time))

    cap.release()
    print('min : {}\n'
          'max : {}\n'
          'avg : {}'.format(min_time, max_time_1, avg_time / frame_index))
Exemplo n.º 19
0
def detect_cv2(cfgfile, weightfile, imgfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = '/home/dreamer/Private/ObjectDetection/yolo-series/darknet-A-version/yolov4-rongwen20201203.names'
    class_names = load_class_names(namesfile)

    img = cv2.imread(imgfile)
    sized = cv2.resize(img, (m.width, m.height))

    #===============================================
    # rh = 608.0
    # rw = 608.0
    # h, w = img.shape[:2]
    # ratio = min(rh / h, rw / w)
    #
    # re_img = cv2.resize(img, (int(w * ratio), int(h * ratio)))
    # pad_board = np.zeros([int(rh), int(rw), 3], np.uint8)
    # if w > h:
    #     pad_board[int(rh / 2 - h * ratio / 2): int(rh / 2 + h * ratio / 2), :] = re_img
    # else:
    #     pad_board[:, int(rw / 2 - w * ratio / 2):int(rw / 2 + w * ratio / 2)] = re_img
    # # pad_board = pad_board.astype(np.float32)
    # # pad_board /= 255.0
    # sized = cv2.cvtColor(pad_board, cv2.COLOR_BGR2RGB)
    # ===============================================

    # img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)
    # img_in = np.expand_dims(img_in, axis=0)
    # img_in /= 255.0
    #

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.03, 0.45, use_cuda)
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    plot_boxes_cv2(img,
                   boxes[0],
                   savename='predictions.jpg',
                   class_names=class_names)
Exemplo n.º 20
0
def init_darknet(cfgfile, weightfile):
    global m , use_cuda
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()
Exemplo n.º 21
0
def detect_cv2_camera(cfgfile, weightfile):
    import cv2
    m = Darknet(cfgfile)
    # mot_tracker = Sort()

    m.print_network()
    m.load_weights(weightfile)
    if args.torch:
        m.load_state_dict(torch.load(weightfile))
    else:
        m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    # cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture('rtsp://192.168.1.75:8554/mjpeg/1')
    # cap = cv2.VideoCapture("./test.mp4")
    cap.set(3, 1280)
    cap.set(4, 720)
    print("Starting the YOLO loop...")

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    while True:
        ret, img = cap.read()
        sized = cv2.resize(img, (m.width, m.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
        # piling = Image.fromarray(sized)

        start = time.time()
        boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)
        if boxes is not None:
            # tracked_object = mot_tracker.update(tensorQ)
            finish = time.time()
            print('Predicted in %f seconds.' % (finish - start))
            result_img = plot_boxes_cv2(img,
                                        boxes[0],
                                        savename=None,
                                        class_names=class_names)

        cv2.imshow('Yolo demo', result_img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 22
0
def call_yolov4(cfgfile='../yolov4.cfg',
                weightfile='../yolov4.weights',
                use_cuda=True):
    m = Darknet(cfgfile)
    m.load_weights(weightfile)

    if use_cuda:
        m.cuda().eval()
    else:
        m.eval()

    return m
Exemplo n.º 23
0
class Detector_fields:
    def __init__(self):
        opt = get_config()
        self.model = Darknet(opt.cfg)
        self.model.load_weights(opt.weights)
        self.model.to(opt.device)
        self.class_names = load_class_names(opt.names)
        self.size = (self.model.width, self.model.height)
        self.num_classes = 6
        print(self.class_names)

    def detect(self, img, thresh=0.6):
        res = {}
        resimg = {}
        for x in self.class_names:
            resimg[x] = []
            res[x] = []
        im0 = img.copy()
        size = (img.shape[0], img.shape[1])
        img = cv2.resize(img, self.size)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        boxes = []

        type_obj = []
        score = []
        boxes = do_detect(self.model, img, thresh, self.num_classes, thresh, 1)
        res_box = []
        ims = []
        classes = []
        for box in boxes:
            #if(int(box[6])==2  or int(box[6])==3  or int(box[6])==5  or int(box[6])==7  ): # 2 3 5 7  is vehicle
            if (self.class_names[int(box[6])] == "date"):
                margin = 0
            if (self.class_names[int(box[6])] == "id"):
                margin = 3
                #print("id")
            else:
                margin = 0
            x1 = max(int((box[0] - box[2] / 2.0) * size[1]) - margin, 0)
            y1 = max(int((box[1] - box[3] / 2.0) * size[0]) - margin - 1, 0)
            x2 = min(int((box[0] + box[2] / 2.0) * size[1] + margin),
                     im0.shape[1])
            y2 = min(int((box[1] + box[3] / 2.0) * size[0] + margin),
                     im0.shape[0])
            imm = im0[y1:y2, x1:x2]

            # if(imm.shape[0]>20 and imm.shape[1]>20):
            # res_box.append([x1,y1,x2,y2])
            # ims.append(imm)
            # classes.append(self.class_names[int(box[6])])
            res[self.class_names[int(box[6])]].append([x1, y1, x2, y2])
            resimg[self.class_names[int(box[6])]].append(imm)
        return res, resimg
Exemplo n.º 24
0
def detect_cv2_camera(cfgfile, weightfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    if args.torch:
        m.load_state_dict(torch.load(weightfile))
    else:
        m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture("./test.mp4")
    cap.set(3, 1280)
    cap.set(4, 720)
    print("Starting the YOLO loop...")

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    while True:
        ret, img = cap.read()
        sized = cv2.resize(img, (m.width, m.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

        start = time.time()
        boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)
        finish = time.time()
        print('Predicted in %f seconds.' % (finish - start))

        result_img = plot_boxes_cv2(img,
                                    boxes[0],
                                    savename=None,
                                    class_names=class_names)

        cv2.imshow('Yolo demo', result_img)
        cv2.waitKey(1)

    cap.release()
Exemplo n.º 25
0
def detect_cv2(cfgfile, weightfile, imgfile):
    import cv2
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    if args.torch:
        m.load_state_dict(torch.load(weightfile))
    else:
        m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    while True:
        val = input("\n numero da imagem: ")
        pred_init_time = time.time()
        named_file = "../fotos_geladeira_4/opencv_frame_" + val + ".png"
        print(named_file)
        img = cv2.imread(named_file)
        # img = cv2.imread(imgfile)
        sized = cv2.resize(img, (m.width, m.height))
        sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
        for i in range(2):
            start = time.time()
            boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)
            finish = time.time()
            if i == 1:
                print('%s: Predicted in %f seconds.' % (imgfile,
                                                        (finish - start)))

        plot_boxes_cv2(img,
                       boxes[0],
                       savename='predictions.jpg',
                       class_names=class_names)
        count_total_in_image(boxes[0], class_names)
        print("\n Total inference time {0} seconds".format(time.time() -
                                                           pred_init_time))
Exemplo n.º 26
0
def load_network(cfgfile, weightfile):
    m = Darknet(cfgfile)
    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))
    if use_cuda:
        m.cuda()
    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)
    return m, class_names
Exemplo n.º 27
0
def detect(json_dir, video_dir, save_dir):
    starttime = timeit.default_timer()

    Path(save_dir).mkdir(parents=True, exist_ok=True)

    cfgfile = config['detector']['cfgfile']
    weightfile = config['detector']['weightfile']

    model = Darknet(cfgfile)
    model.load_weights(weightfile)
    model.cuda()

    class_names = config['detector']['originclassnames']
    cam_datas = get_list_data(json_dir)

    for cam_data in cam_datas:
        cam_name = cam_data['camName']
        roi_poly = Polygon(cam_data['shapes'][0]['points'])

        video_path = os.path.join(video_dir, cam_name + '.mp4')
        video_cap = cv2.VideoCapture(video_path)
        num_frames = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))

        imgs = []
        for i in tqdm(range(num_frames),
                      desc='Extracting {}'.format(cam_name)):
            success, img = video_cap.read()
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            imgs.append(img)

        boxes = detect_yolo(model, class_names, imgs, cam_name,
                            config['detector']['batchsize'])

        # remove bboxes out of MOI
        if config['remove_not_intersec_moi']:
            boxes = [
                check_intersect_box(box_list, roi_poly) for box_list in boxes
            ]

        if save_dir:
            filepath = os.path.join(save_dir, cam_name)
            boxes = np.array(boxes)
            np.save(filepath, boxes)

    endtime = timeit.default_timer()

    print('Detect time: {} seconds'.format(endtime - starttime))
def detect_cv2_img(cfgfile, weightfile, img_file):
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    # print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    img = cv2.imread(img_file)
    sized = cv2.resize(img, (m.width, m.height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
    boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)
    boxes_darknet_format = to_darknet_format(boxes, img.shape[0], img.shape[1])
    print(boxes[0])
    print(boxes_darknet_format)
    return boxes[0]
Exemplo n.º 29
0
def detect_cv2(cfgfile, weightfile, img):
    m = Darknet(cfgfile)

    m.print_network()
    m.load_weights(weightfile)
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)

    sized = cv2.resize(img, (m.width, m.height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    start = time.time()

    boxes = do_detect(m, sized, 0.4, 0.6, use_cuda)

    objects = []
    for i, box in enumerate(boxes[0]):
        dic = {}
        dic['kind'] = class_names[box[6]]
        dic['confidence'] = box[4]
        dic.update(get_bbox_coordinates(img, box))

        cropped_img = crop_box(img, box)
        dic['feature'] = extract_feature(cropped_img)
        objects.append(dic)

    finish = time.time()
    print('Predicted in %f seconds.' % (finish - start))

    plot_boxes_cv2(img, boxes[0], savename='predictions.jpg', class_names=class_names)
    return({'objects': objects})
Exemplo n.º 30
0
def detect_cv2(cfgfile, weightfile, imgfile):
    import cv2
    m = Darknet(cfgfile)  # 创建 Darknet 模型对象 m
    m.print_network()  # 打印网络结构信息
    m.load_weights(weightfile)  # 加载网络权重值       在 tools/darknet2pytorch.py 函数中
    print('Loading weights from %s... Done!' % (weightfile))

    if use_cuda:
        m.cuda()  # 如果使用 cuda,则将模型对象拷贝至显存

    num_classes = m.num_classes
    if num_classes == 20:
        namesfile = 'data/voc.names'
    elif num_classes == 80:
        namesfile = 'data/coco.names'
    else:
        namesfile = 'data/x.names'
    class_names = load_class_names(namesfile)  # 加载类别名

    # 如果用 PIL 打开图像
    # img = Image.open(imgfile).convert('RGB')
    # sized = img.resize((m.width, m.height))
    img = cv2.imread(imgfile)
    cv2.imwrite('./debug/img.jpg', img)
    # print(m.width, m.height)        # (608, 608)
    sized = cv2.resize(img, (m.width, m.height))
    sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)

    for i in range(2):
        start = time.time()
        boxes = do_detect(m, sized, 0.4, 0.6,
                          use_cuda)  # 做检测,返回的 boxes 是最晚 NMS 后的检测框
        finish = time.time()
        if i == 1:
            print('%s: Predicted in %f seconds.' % (imgfile, (finish - start)))

    plot_boxes_cv2(img,
                   boxes[0],
                   savename='./debug/predictions.jpg',
                   class_names=class_names)  # raw