Exemple #1
0
def video(args):
    config = load_config(args)
    model = create_model(args, config)

    cap = cv2.VideoCapture(0)
    # cap = cv2.VideoCapture("/home/fabian/Documents/dataset/videos/test4.mp4")
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))
    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    fps_time = 0
    degree = 0
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    while cap.isOpened():
        degree += 5
        degree = degree % 360
        ret_val, image = cap.read()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = cv2.resize(image, model.insize)
        with chainer.using_config('autotune', True):
            humans = estimate(model,
                              image.transpose(2, 0, 1).astype(np.float32),
                              detection_thresh, min_num_keypoints)
        pilImg = Image.fromarray(image)
        pilImg = draw_humans(
            model.keypoint_names,
            model.edges,
            pilImg,
            humans,
            mask=mask.rotate(degree) if mask else None,
            visbbox=config.getboolean('predict', 'visbbox'),
        )
        img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR)
        msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
        msg += ' ' + config.get('model_param', 'model_name')
        cv2.putText(img_with_humans,
                    'FPS: % f' % (1.0 / (time.time() - fps_time)), (10, 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        # img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1]))
        img_with_humans = cv2.resize(
            img_with_humans, (1 * model.insize[0], 1 * model.insize[1]))
        cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
        fps_time = time.time()
        # press Esc to exit
        if cv2.waitKey(1) == 27:
            break
Exemple #2
0
def compute_accuracy(t0, t1, x, y):
    predicted = []
    for i in range(len(x)):
        predicted.append(estimate(t0, t1, x[i]))
    delta = y - predicted
    mse = mean(delta**2)
    mae = mean(abs(delta))
    r2 = 1 - (sum(delta**2) / sum((y - mean(y))**2))
    print(f"""Here are the various metrics for accuracy:
    \tMAE (Mean Absolute Error) = {round(mae, 2)}
    \tMSE (Mean Square Error) = {round(mse, 2)}
    \tRMSE (Root Mean Squared Error) = {round(sqrt(mse), 2)}
    \tR-squared (Coefficient of determination) = {round(r2, 2)}""")
Exemple #3
0
def main():
    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')

    # load model
    model = create_model(config)

    cap = cv2.VideoCapture(0)
    if cap.isOpened() is False:
        print('Error opening video stream or file')
        exit(1)

    logger.info('camera will capture {} FPS'.format(cap.get(cv2.CAP_PROP_FPS)))
    if os.path.exists('mask.png'):
        mask = Image.open('mask.png')
        mask = mask.resize((200, 200))
    else:
        mask = None

    fps_time = 0
    degree = 0
    while cap.isOpened():
        degree += 5
        degree = degree % 360
        ret_val, image = cap.read()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = cv2.resize(image, model.insize)
        with chainer.using_config('autotune', True):
            humans = estimate(model,
                              image.transpose(2, 0, 1).astype(np.float32))
        pilImg = Image.fromarray(image)
        pilImg = draw_humans(
            model.keypoint_names,
            model.edges,
            pilImg,
            humans,
            mask=mask.rotate(degree) if mask else None
        )
        img_with_humans = cv2.cvtColor(np.asarray(pilImg), cv2.COLOR_RGB2BGR)
        msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
        msg += ' ' + config.get('model_param', 'model_name')
        cv2.putText(img_with_humans, 'FPS: % f' % (1.0 / (time.time() - fps_time)),
                    (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        img_with_humans = cv2.resize(img_with_humans, (3 * model.insize[0], 3 * model.insize[1]))
        cv2.imshow('Pose Proposal Network' + msg, img_with_humans)
        fps_time = time.time()
        # press Esc to exit
        if cv2.waitKey(1) == 27:
            break
Exemple #4
0
    def video_handle(video_file, video_output):
        global FPS_list
        # Video reader
        cam = cv2.VideoCapture(video_file)
        input_fps = cam.get(cv2.CAP_PROP_FPS)
        ret_val, input_image = cam.read()
        video_length = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))

        ending_frame = video_length

        # Video writer
        frame_rate_ratio = 1
        output_fps = input_fps / frame_rate_ratio
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        # 后面两者是写入的图片大小
        out = cv2.VideoWriter(video_output, fourcc, output_fps, (672, 672))

        i = 0  # default is 0
        while (cam.isOpened()) and ret_val == True and i < ending_frame:
            if i % frame_rate_ratio == 0:

                tic = time.time()
                mask = None

                input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
                input_image = cv2.resize(input_image, model.insize)
                humans = estimate(
                    model,
                    input_image.transpose(2, 0, 1).astype(np.float32))
                pilImg = Image.fromarray(input_image)
                pilImg = draw_humans(
                    model.keypoint_names,
                    model.edges,
                    pilImg,
                    humans,
                    mask=mask.rotate(degree) if mask else None)
                img_with_humans = cv2.cvtColor(np.asarray(pilImg),
                                               cv2.COLOR_RGB2BGR)
                msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
                msg += ' ' + config.get('model_param', 'model_name')
                FPS = round(1.0 / (time.time() - tic), 2)
                FPS_list.append(FPS)
                cv2.putText(img_with_humans,
                            'FPS: % f' % (1.0 / (time.time() - tic)), (10, 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                img_with_humans = cv2.resize(
                    img_with_humans,
                    (3 * model.insize[0], 3 * model.insize[1]))
                #  cv2.imshow('Pose Proposal Network' + msg, img_with_humans)

                print('Processing frame: {}/{}'.format(
                    i, video_length / frame_rate_ratio))
                toc = time.time()
                print('processing time is %.5f' % (toc - tic))

                out.write(img_with_humans)
            # 每次在这里变化的
            ret_val, input_image = cam.read()
            i += 1
        # compute average FPS
        average_fps = sum(FPS_list) / len(FPS_list)
        print('total {} frame, {} frame per second\n\n\n'.format(
            len(FPS_list), round(average_fps, 1)))
Exemple #5
0
def t1_sigma(t0, t1, x, y, size):
    t1_sum = 0
    for i in range(size):
        t1_sum += (estimate(t0, t1, x[i]) - y[i]) * x[i]
    return t1_sum
Exemple #6
0
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    # 后面两者是写入的图片大小
    out = cv2.VideoWriter(video_output, fourcc, output_fps, (672, 672))

    i = 0  # default is 0
    while (cam.isOpened()) and ret_val == True and i < ending_frame:
        # 每隔几帧处理一次帧图片
        if i % frame_rate_ratio == 0:

            tic = time.time()
            mask = None

            input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
            input_image = cv2.resize(input_image, model.insize)
            humans = estimate(
                model,
                input_image.transpose(2, 0, 1).astype(np.float32))
            pilImg = Image.fromarray(input_image)
            pilImg = draw_humans(model.keypoint_names,
                                 model.edges,
                                 pilImg,
                                 humans,
                                 mask=mask.rotate(degree) if mask else None)
            img_with_humans = cv2.cvtColor(np.asarray(pilImg),
                                           cv2.COLOR_RGB2BGR)
            msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
            msg += ' ' + config.get('model_param', 'model_name')
            cv2.putText(img_with_humans,
                        'FPS: % f' % (1.0 / (time.time() - tic)), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            img_with_humans = cv2.resize(