Example #1
0
    # pose module
    print('Loading pose model')
    sys.stdout.flush()
    pose_model = inference.pose_detection()

    print('Starting webcam demo, press Ctrl + C to terminate...')
    sys.stdout.flush()
    im_names_desc = loop()  # tqdm(loop())
    for i in im_names_desc:
        try:
            start_time = getTime()
            begin = time.time()

            # ##################################  Get Frames  ####################################
            print('\n******************* Frame:%d ********************' % i)
            img_0 = fvs_0.read()
            img_1 = fvs_1.read()
            # 可视化显示变成1/2
            fvis_0 = resize_vis(img_0)
            fvis_1 = resize_vis(img_1)
            # YOLO处理图片时变成1/4
            frame_0 = resize_yolo(img_0)
            frame_1 = resize_yolo(img_1)
            single_height = frame_0.shape[
                0]  # print(frame_0.shape) # (432, 768, 3)

            # ##################################  Pre Process  ####################################
            img, orig_img, dim, im_dim_list = preprocess(frame_0, frame_1)

            # ##################################  Detection  ####################################
            with torch.no_grad():
Example #2
0
        pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
    else:
        pose_model = InferenNet(4 * 1 + 1, pose_dataset)
    pose_model.cuda()
    pose_model.eval()

    runtime_profile = {'ld': [], 'dt': [], 'dn': [], 'pt': [], 'pn': []}

    print('Starting webcam demo, press Ctrl + C to terminate...')
    sys.stdout.flush()
    im_names_desc = tqdm(loop())
    for i in im_names_desc:
        try:
            start_time = getTime()

            (img, orig_img, inp, im_dim_list) = fvs.read()
            ckpt_time, load_time = getTime(start_time)
            runtime_profile['ld'].append(load_time)
            with torch.no_grad():
                # Human Detection
                img = Variable(img).cuda()
                im_dim_list = im_dim_list.cuda()

                prediction = det_model(img, CUDA=True)
                ckpt_time, det_time = getTime(ckpt_time)
                runtime_profile['dt'].append(det_time)
                # NMS process
                dets = dynamic_write_results(prediction,
                                             opt.confidence,
                                             opt.num_classes,
                                             nms=True,
    pose_model.eval()

    # reid module
    # reid_model = reid_interface.ReID(is_folder=False)

    # Running time of each module
    runtime_profile = {'ld': [], 'dt': [], 'dn': [], 'pt': [], 'pn': []}

    print('Starting webcam demo, press Ctrl + C to terminate...')
    sys.stdout.flush()
    im_names_desc = tqdm(loop())
    for i in im_names_desc:
        try:
            begin = time.time()
            start_time = getTime()
            frame_0 = fvs_0.read()
            frame_1 = fvs_1.read()
            single_height = frame_0.shape[0]
            print(frame_0.shape)  # (432, 768, 3)

            # pre-process
            frame = np.concatenate([frame_0, frame_1], 0)
            inp_dim = int(args.inp_dim)  # default=608
            img, orig_img, dim = prep_frame(frame, inp_dim)
            #print('img:',img.shape)  # torch.Size([1, 3, 608, 608])
            # print('orig_img:',orig_img.shape)  # (864, 768, 3)
            # print('dim',dim)    # (768, 864)

            inp = im_to_torch(orig_img)
            im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
            # print(im_dim_list) # tensor([[768., 864., 768., 864.]])