예제 #1
0
def main():
    # parse arguments
    args = cli()

    # setup processor and visualizer
    processor = Processor(model=args['model'])
    visualizer = Visualizer()

    # fetch input
    print('image arg', args['image'])
    img = cv2.imread('inputs/{}'.format(args['image']))

    # inference
    output = processor.detect(img)
    img = cv2.resize(img, (640, 640))

    # object visualization
    object_grids = processor.extract_object_grids(output)
    visualizer.draw_object_grid(img, object_grids, 0.1)

    # class visualization
    class_grids = processor.extract_class_grids(output)
    visualizer.draw_class_grid(img, class_grids, 0.01)

    # bounding box visualization
    boxes = processor.extract_boxes(output)
    visualizer.draw_boxes(img, boxes)

    # final results
    boxes, confs, classes = processor.post_process(output)
    visualizer.draw_results(img, boxes, confs, classes)
예제 #2
0
def main():
    # parse arguments
    args = cli()
    # setup processor and visualizer
    processor = Processor(model=args['model'])
    visualizer = Visualizer()

    # fetch input
    print('image arg', args['image'])
    # img = cv2.imread('inputs/{}'.format(args['image']))
    input_image_paths = []
    folder_path = args['image']
    if os.path.isdir(folder_path):
        ls = os.listdir(folder_path)
        for file_name in sorted(ls, key=lambda x: str(x.split('.jpg')[0])):
            input_image_paths.append(os.path.join(folder_path, file_name))
    for input_image_path in input_image_paths:
        img = cv2.imread(input_image_path)

        # inference
        output = processor.detect(img)

        # final results
        boxes, confs, classes = processor.post_process(output, conf_thres=0.3, iou_thres=0.4, origin_w=img.shape[1], origin_h=img.shape[0])
        visualizer.draw_results(img, boxes, confs, classes)
예제 #3
0
def main():
    # parse arguments
    args = cli()

    # setup processor and visualizer
    processor = Processor(model=args['model'])
    visualizer = Visualizer()

    # fetch input
    print('image arg', args['image'])
    #img = cv2.imread('inputs/{}'.format(args['image']))
    img = cv2.imread("/home/jiqing/jq/bottle/33/3 (3).jpg")
    cap = cv2.VideoCapture(0)
    while 1:
        ret, frame = cap.read()
        #print(type(img))

        # inference
        #output = processor.detect(img)
        #img = cv2.resize(img, (640, 640))

        output = processor.detect(frame)
        img = cv2.resize(frame, (640, 640))

        # object visualization
        object_grids = processor.extract_object_grids(output)
        #visualizer.draw_object_grid(img, object_grids, 0.1)

        # class visualization
        class_grids = processor.extract_class_grids(output)
        #visualizer.draw_class_grid(img, class_grids, 0.01)

        # bounding box visualization
        boxes = processor.extract_boxes(output)
        #visualizer.draw_boxes(img, boxes)

        # final results
        boxes, confs, classes = processor.post_process(output)
        #print(classes)

        #label = f'{names[int(classes)]} {confs:.2f}'
        visualizer.draw_results(img, boxes, confs, classes)
예제 #4
0
        help='whether to archive picture and discovery in archve folder')
    parser.add_argument('--device',
                        default='',
                        help='device id (i.e. 0 or 0,1) or cpu')
    parser.add_argument('--fake',
                        default=False,
                        type=bool,
                        help='whether to simulate zone discovery')
    parser.add_argument('--language',
                        default='eng',
                        type=str,
                        help='detection language')
    opt = parser.parse_args()

    processor = Processor(opt.weights,
                          opt.info_path,
                          opt.device,
                          fake_mode=opt.fake,
                          archive_mode=opt.archive)

    while True:

        with torch.no_grad():
            processor.detect(opt.img_path)

        # wait
        time.sleep(5)

        # delete old processed files
        delete_old_files(opt.info_path)
예제 #5
0
def main():
    # parse arguments
    args = cli()

    # setup processor and visualizer
    processor = Processor(model=args['model'])
    # visualizer = Visualizer()

    catigories = [
        "no_violence",  # 0
        "yes_violence"  # 1
    ]

    print("Open camera...")
    # cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture('no1_xvid.avi')
    # cap = cv2.VideoCapture('output.avi')
    # cap = cv2.VideoCapture('fi1_xvid.avi')
    cap = cv2.VideoCapture('yes_and_no.mp4')
    # cap = cv2.VideoCapture('reallife.mp4')

    print(cap)

    # set a lower resolution for speed up
    # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)

    # env variables
    full_screen = False
    cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(WINDOW_NAME, 640, 480)
    cv2.moveWindow(WINDOW_NAME, 0, 0)
    cv2.setWindowTitle(WINDOW_NAME, WINDOW_NAME)

    t = None
    index = 0
    i_frame = -1
    print("Ready!")
    while True:
        i_frame += 1
        _, img = cap.read()  # (480, 640, 3) 0 ~ 255
        print(img.shape)
        if i_frame % 2 == 0:
            t1 = time.time()
            idx = processor.detect(img)
            print("cls:", catigories[idx])
            t2 = time.time()
            current_time = t2 - t1
            print("current_time", current_time)

        img = cv2.resize(img, (640, 480))
        img = img[:, ::-1]
        height, width, _ = img.shape
        label = np.zeros([height // 10, width, 3]).astype('uint8') + 255

        cv2.putText(label, 'Prediction: ' + catigories[idx],
                    (0, int(height / 16)), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 0, 0), 2)
        cv2.putText(label, '{:.1f} Vid/s'.format(1 / current_time),
                    (width - 170, int(height / 16)), cv2.FONT_HERSHEY_SIMPLEX,
                    0.7, (0, 0, 0), 2)

        img = np.concatenate((img, label), axis=0)
        cv2.imshow(WINDOW_NAME, img)

        key = cv2.waitKey(1)
        if key & 0xFF == ord('q') or key == 27:  # exit
            break
        elif key == ord('F') or key == ord('f'):  # full screen
            print('Changing full screen option!')
            full_screen = not full_screen
            if full_screen:
                print('Setting FS!!!')
                cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_FULLSCREEN)
            else:
                cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN,
                                      cv2.WINDOW_NORMAL)

        # t5 = time.time()
        # print("t5-t4:", t5 - t4)
        if t is None:
            t = time.time()
        else:
            nt = time.time()
            index += 1
            t = nt

    cap.release()
    cv2.destroyAllWindows()