Пример #1
0
def highlightPerson(frame, personMask, cx, cy, cropLength, frameBlurDarkVid):
    # Dilate the mask
    kernel = np.ones((3, 3), np.uint8)
    personMask = cv2.dilate(personMask, kernel, iterations=3)
    # personMask = cv2.GaussianBlur(personMask, (15, 15), 0)
    # cv2.imshow("personMask", cv2.resize(personMask, (resizeWidth, resizeHeight)))

    # Darken the background and blur it
    percDarker = 0.6  # 60% darker
    darkBg = cv2.addWeighted(frame, 1 - percDarker, np.zeros_like(frame),
                             percDarker, 0)
    darkBg = cv2.GaussianBlur(darkBg, (15, 15), 0)
    darkBg = cv2.GaussianBlur(darkBg, (15, 15), 0)
    # Make a person shaped hole in the bg
    darkBgHole = cv2.bitwise_and(darkBg, darkBg, mask=255 - personMask)

    # darkBgHole = alphaMask(darkBg, 255-personMask)
    # cv2.imshow('darkBg', cv2.resize(darkBg, (resizeWidth, resizeHeight)))
    # cv2.imshow('darkBgHole', cv2.resize(darkBgHole, (resizeWidth, resizeHeight)))

    # Fill person shaped hole
    personRevealed = cv2.bitwise_and(frame, frame, mask=personMask)
    # personRevealed = alphaMask(frameNoEllipse, personMask)

    # Add the two together
    personRevealed = cv2.add(darkBgHole, personRevealed)

    frameBlurDarkVid.write(personRevealed)

    # Get boundary
    x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
        frame.shape[0], frame.shape[1], cx, cy, cropLength)
    return personRevealed[y1:y2, x1:x2]
Пример #2
0
def bounce_to_gif(db, bounce):
    from helpers.consts import bouncesRootPath
    gifFilepath = bouncesRootPath + '{}.gif'.format(bounce.id)
    if os.path.exists(gifFilepath):
        print("Image exists: {}".format(gifFilepath))
        return

    import imageio
    routine = bounce.routine

    cap = helper_funcs.open_video(routine.path)
    cap.set(cv2.CAP_PROP_POS_FRAMES, bounce.start_frame)
    peak = bounce.apex_frame
    peaksIndex = 0

    images = []
    while 1:
        _ret, frame = cap.read()

        if int(cap.get(cv2.CAP_PROP_POS_FRAMES)) >= bounce.end_frame:
            break

        if peak == int(cap.get(cv2.CAP_PROP_POS_FRAMES)):
            peaksIndex = len(images)

        try:
            frame_data = db.query(Frame).filter_by(routine_id=routine.id, frame_num=cap.get(cv2.CAP_PROP_POS_FRAMES)).one()
        except NoResultFound:
            continue

        cx = frame_data.center_pt_x
        cy = frame_data.center_pt_y

        x1, x2, y1, y2 = helper_funcs.crop_points_constrained(routine.video_height, routine.video_width, cx, cy, routine.crop_length)
        if not frame_data.pose:
            continue
        pose = np.array(json.loads(frame_data.pose))

        frameCropped = frame[y1:y2, x1:x2]
        frameCropped = cv2.resize(frameCropped, (256, 256))
        # cv2.putText(frameCropped, '{}'.format(prevBounceName), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
        cv2.putText(frameCropped, '{}'.format(frame_data.frame_num), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
        frameCropped = _draw_pose_on_frame(pose, frameCropped)

        images.append(cv2.cvtColor(frameCropped, cv2.COLOR_BGR2RGB))

    print("Writing to: {}".format(gifFilepath))
    imageio.mimwrite(gifFilepath, images[::2])

    if peaksIndex == 0:
        peaksIndex = int(len(images) / 2)
    jpgFilepath = bouncesRootPath + '{}.jpg'.format(bounce.id)
    imageio.imwrite(jpgFilepath, images[peaksIndex])
Пример #3
0
def get_single_mask(db, routine):
    import numpy as np
    import json
    frame_data = db.query(Frame).filter(Frame.routine_id == routine.id,
                                        Frame.frame_num == 280).one()
    personMasks = helper_funcs.load_zipped_pickle(routine.personMasksPath())
    # mask = cv2.cvtColor(np.array(json.loads(personMasks[280])), cv2.COLOR_GRAY2BGR)
    mask = np.array(json.loads(personMasks[280]), dtype=np.uint8)
    cx = frame_data.center_pt_x
    cy = frame_data.center_pt_y
    x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
        routine.video_height, routine.video_width, cx, cy, routine.crop_length)
    frameCropped = mask[y1:y2, x1:x2]
    frameCropped = cv2.resize(frameCropped, (256, 256))
    cv2.imwrite(consts.thesisImgPath + "save_frames_mask.png", frameCropped)
Пример #4
0
def save_cropped_frames(db, routine, frames, suffix=None):
    routineDirPath = routine.getAsDirPath(suffix, create=True)

    # plt.figure()
    position = np.array([routine.video_height - frame_data.center_pt_y for frame_data in frames])
    scaleWithPerson = False
    # scaleWithPerson = frames[0].hull_max_length is not None
    cropLengths = []
    cropLength = 0
    if scaleWithPerson:  # hull length
        # Compensate... something
        cropLengths = np.array([frame_data.hull_max_length for frame_data in frames])

        cropLengths[np.nonzero(position < np.median(position))] = int(np.average(cropLengths) * 1.1)
        # plt.plot(cropLengths, label="Hull Length", color="blue")
        # # plt.axhline(np.average(cropLengths), label="Average Length", color="blue")
        # # plt.axhline(np.median(cropLengths), label="Med Length", color="purple")
    else:  # Ellipse lengths
        hullLengths = [frame_data.hull_max_length for frame_data in frames]
        # plt.plot(hullLengths, label="Hull Length", color="blue")

        cropLength = helper_funcs.get_crop_length(hullLengths)
        routine.crop_length = cropLength
        db.commit()

        # # plt.axhline(np.average(hullLengths), label="Average Length", color="blue")
        # plt.axhline(cropLength, label="Percentile", color="blue")
        # plt.axhline(routine.crop_length, label="routine.crop_length", color="orange")

    # plt.plot(position, label="Position", color="green")
    # plt.xlabel("Time (s)")
    # plt.legend(loc="best")
    # plt.show(block=False)

    trampolineTouches = np.array([frame.trampoline_touch for frame in routine.frames])
    trampolineTouches = helper_funcs.trim_touches_by_2(trampolineTouches)

    personMasks = helper_funcs.load_zipped_pickle(routine.personMasksPath())

    # Create videos
    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'DIB ')
    frameCroppedVid = cv2.VideoWriter(consts.videosRootPath + 'savedFrames.avi', fourcc, 30.0, (256, 256))

    cap = helper_funcs.open_video(routine.path)
    frame = []
    for i, frame_data in enumerate(frames):
        # ignore frames where trampoline is touched
        if trampolineTouches[i] == 1:
            continue
        # ignore any frame that aren't tracked
        while frame_data.frame_num != cap.get(cv2.CAP_PROP_POS_FRAMES):
            ret, frame = cap.read()
            if not ret:
                print('Something went wrong')
                return

        cx = frame_data.center_pt_x
        cy = frame_data.center_pt_y
        # Use original background or darken
        if True:
            frameCropped = track.highlightPerson(frame, np.array(json.loads(personMasks[frame_data.frame_num]), dtype=np.uint8), cx, cy, cropLength)
        else:
            x1, x2, y1, y2 = helper_funcs.crop_points_constrained(routine.video_height, routine.video_width, cx, cy, cropLength)
            frameCropped = frame[y1:y2, x1:x2]
        frameCropped = cv2.resize(frameCropped, (256, 256))
        frameCroppedVid.write(frameCropped)

        # cv2.imshow('Track ', frameCropped)
        # k = cv2.waitKey(50) & 0xff

        imgName = routineDirPath + "frame_{0:04}.png".format(frame_data.frame_num)
        # print("Writing frame to {}".format(imgName))
        # cv2.imwrite(imgName, frameCropped)

    # Done
    cap.release()
    print("Done saving frames")
Пример #5
0
def play_frames(db,
                routine,
                start_frame=1,
                end_frame=-1,
                show_pose=True,
                show_full=False):
    # temp
    saveReportImages = False

    waitTime = 40
    goOneFrame = False
    paused = False
    prevBounceName = ''

    cap = helper_funcs.open_video(routine.path)
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
    if end_frame == -1:
        end_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)

    # f, axarr = plt.subplots(12, sharex=True)
    # f.canvas.set_window_title(routine.prettyName())

    fourcc = cv2.VideoWriter_fourcc(*'DIB ')
    frameCroppedVid = cv2.VideoWriter(
        consts.videosRootPath + 'posedCropped.avi', fourcc, 30.0, (256, 256))
    while True:
        if goOneFrame or not paused:

            _ret, frame = cap.read()

            # Loop forever
            if cap.get(cv2.CAP_PROP_POS_FRAMES) >= end_frame:
                break
                cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

            try:
                frame_data = db.query(Frame).filter_by(
                    routine_id=routine.id,
                    frame_num=cap.get(cv2.CAP_PROP_POS_FRAMES)).one()
            except NoResultFound:
                continue

            thisBounce = frame_data.bounce
            if thisBounce and prevBounceName != thisBounce.skill_name:
                # plot_frame_angles(thisBounce.skill_name, thisBounce.getFrames(db), axarr)
                prevBounceName = thisBounce.skill_name

            cx = frame_data.center_pt_x
            cy = frame_data.center_pt_y

            x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
                routine.video_height, routine.video_width, cx, cy,
                routine.crop_length)

            if frame_data.pose is not None:
                pose = np.array(json.loads(frame_data.pose))
                # Show full frame
                if show_full:
                    for p_idx in range(14):
                        pose_x = int((cx - routine.padding) + pose[0, p_idx])
                        pose_y = int((cy - routine.padding) + pose[1, p_idx])
                        color = consts.poseColors[
                            calc_angles.pose_aliai['hourglass'][p_idx]][1]
                        cv2.circle(frame, (pose_x, pose_y),
                                   5,
                                   color,
                                   thickness=cv2.FILLED)
                    cv2.putText(frame, '{}'.format(frame_data.frame_num),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                (255, 255, 255))
                    cv2.imshow('HG Smooth', frame)

                # Show cropped
                else:
                    frameCropped = frame[y1:y2, x1:x2]
                    frameCropped = cv2.resize(frameCropped, (256, 256))
                    frameCropped = _draw_pose_on_frame(pose, frameCropped)
                    if saveReportImages:  # p (print). Saves image for the report
                        cv2.imwrite(consts.thesisImgPath + "viz_pose.png",
                                    frameCropped)
                        print("wait")
                    # cv2.putText(frameCropped, '{}'.format(prevBounceName), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                    # cv2.putText(frameCropped, '{}'.format(frame_data.frame_num), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                    frameCroppedVid.write(frameCropped)
                    cv2.imshow(routine.prettyName(), frameCropped)

            else:
                # Ignore frames that haven't got pose info
                if show_pose:
                    continue

                cv2.circle(frame, (cx, cy), 3, (0, 0, 255), cv2.FILLED)
                if show_full:
                    cv2.imshow(routine.prettyName(), frame)
                else:
                    frameCropped = frame[y1:y2, x1:x2]
                    cv2.imshow(routine.prettyName(), frameCropped)

            if goOneFrame:
                goOneFrame = False

        k = cv2.waitKey(waitTime) & 0xff
        if k == ord('k'):  # play pause
            paused = not paused
        elif k == ord('j'):  # prev frame
            goOneFrame = True
            cap.set(cv2.CAP_PROP_POS_FRAMES,
                    cap.get(cv2.CAP_PROP_POS_FRAMES) - 2)
        elif k == ord('l'):  # next frame
            goOneFrame = True
        elif k == ord('.'):  # speed up
            waitTime -= 5
            print(waitTime)
        elif k == ord(','):  # slow down
            waitTime += 5
            print(waitTime)
        elif k >= ord('0') and k <= ord('9'):
            num = k - ord('0')
            frameToJumpTo = (cap.get(cv2.CAP_PROP_FRAME_COUNT) / 10) * num
            cap.set(cv2.CAP_PROP_POS_FRAMES, frameToJumpTo)
            goOneFrame = True
        elif k == ord('\n') or k == ord('\r'):  # return/enter key
            cv2.destroyAllWindows()
            break
        elif k == ord('q') or k == 27:  # q/ESC
            print("Exiting...")
            exit()
Пример #6
0
def play_frames_of_2(db,
                     routine1,
                     routine2,
                     start_frame1=1,
                     end_frame1=-1,
                     start_frame2=1,
                     end_frame2=-1,
                     show_full=False):
    waitTime = 80
    playOneFrame = False
    paused = False

    cap1 = helper_funcs.open_video(routine1.path)
    if end_frame1 == -1:
        end_frame1 = cap1.get(cv2.CAP_PROP_FRAME_COUNT)

    cap2 = helper_funcs.open_video(routine2.path)
    if end_frame2 == -1:
        end_frame2 = cap2.get(cv2.CAP_PROP_FRAME_COUNT)

    show_pose = True

    bothFrames = np.zeros(shape=(256, 256 * 2, 3), dtype=np.uint8)

    # Create a list of frames from each to be played
    frame_datas1 = db.query(Frame).filter(Frame.routine_id == routine1.id,
                                          Frame.frame_num >= start_frame1,
                                          Frame.frame_num < end_frame1,
                                          Frame.pose != None).all()
    frame_datas2 = db.query(Frame).filter(Frame.routine_id == routine2.id,
                                          Frame.frame_num >= start_frame2,
                                          Frame.frame_num < end_frame2,
                                          Frame.pose != None).all()
    frame_nums1 = [frame_data.frame_num for frame_data in frame_datas1]
    frame_nums2 = [frame_data.frame_num for frame_data in frame_datas2]
    num_frames1 = len(frame_datas1)
    num_frames2 = len(frame_datas2)
    frame_data1_ptr = 0
    frame_data2_ptr = 0

    cap1.set(cv2.CAP_PROP_POS_FRAMES, frame_nums1[0])
    cap2.set(cv2.CAP_PROP_POS_FRAMES, frame_nums2[0])
    _ret, frame1 = cap1.read()
    _ret, frame2 = cap2.read()

    while True:
        if playOneFrame or not paused:

            frame_data_ptr_video = [
                cap1.get(cv2.CAP_PROP_POS_FRAMES),
                cap2.get(cv2.CAP_PROP_POS_FRAMES)
            ]
            frame_data1 = frame_datas1[frame_data1_ptr]
            frame_data2 = frame_datas2[frame_data2_ptr]

            cx1 = frame_data1.center_pt_x
            cy1 = frame_data1.center_pt_y

            cx2 = frame_data2.center_pt_x
            cy2 = frame_data2.center_pt_y

            if show_pose:
                # if pose is None, skip showing this frame
                try:
                    pose1 = np.array(json.loads(frame_data1.pose))
                    pose2 = np.array(json.loads(frame_data2.pose_unfiltered))
                except TypeError:
                    continue

                # Show full frame
                if show_full:
                    pass

                # Show cropped
                else:
                    x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
                        routine1.video_height, routine1.video_width, cx1, cy1,
                        routine1.crop_length)
                    frameCropped1 = frame1[y1:y2, x1:x2]
                    frameCropped1 = cv2.resize(frameCropped1, (256, 256))

                    x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
                        routine2.video_height, routine2.video_width, cx2, cy2,
                        routine2.crop_length)
                    frameCropped2 = frame2[y1:y2, x1:x2]
                    frameCropped2 = cv2.resize(frameCropped2, (256, 256))

                    frameCropped1 = _draw_pose_on_frame(pose1, frameCropped1)
                    bothFrames[0:256, 0:256] = frameCropped1
                    cv2.putText(bothFrames,
                                '{}'.format(frame_nums1[frame_data1_ptr]),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                (255, 255, 255))

                    frameCropped2 = _draw_pose_on_frame(pose2, frameCropped2)
                    bothFrames[0:256, 256:512] = frameCropped2
                    cv2.putText(bothFrames,
                                '{}'.format(frame_nums2[frame_data2_ptr]),
                                (266, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                (255, 255, 255))

                    cv2.putText(
                        bothFrames,
                        '{}'.format(max(frame_data1_ptr,
                                        frame_data2_ptr)), (10, 35),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                    cv2.imshow('Pose', bothFrames)

            frame_data1_ptr += 1
            frame_data2_ptr += 1
            if playOneFrame:
                playOneFrame = False

        k = cv2.waitKey(waitTime) & 0xff
        if k == ord('k'):  # play pause
            paused = not paused
        elif k == ord('j'):  # prev frame
            playOneFrame = True
            frame_data1_ptr = helper_funcs.clip_wrap(frame_data1_ptr - 2, 0,
                                                     num_frames1 - 1)
            frame_data2_ptr = helper_funcs.clip_wrap(frame_data2_ptr - 2, 0,
                                                     num_frames2 - 1)
            cap1.set(cv2.CAP_PROP_POS_FRAMES, frame_nums1[frame_data1_ptr])
            cap2.set(cv2.CAP_PROP_POS_FRAMES, frame_nums2[frame_data2_ptr])
        elif k == ord('l'):  # next frame
            playOneFrame = True
        elif k == ord('.'):  # speed up
            waitTime -= 5
            print(waitTime)
        elif k == ord(','):  # slow down
            waitTime += 5
            print(waitTime)
        elif k == ord('\n') or k == ord('\r'):  # return/enter key
            break
        elif k == ord('q') or k == 27:  # q/ESC
            print("Exiting...")
            exit()

        # Loop forever, allowing shorter sequence to pause for longer sequence
        if frame_data1_ptr >= num_frames1 and frame_data2_ptr >= num_frames2:
            frame_data1_ptr = 0
            frame_data2_ptr = 0
            cap1.set(cv2.CAP_PROP_POS_FRAMES, frame_nums1[frame_data1_ptr])
            cap2.set(cv2.CAP_PROP_POS_FRAMES, frame_nums2[frame_data2_ptr])
        elif frame_data1_ptr >= num_frames1:
            frame_data1_ptr -= 1
        elif frame_data2_ptr >= num_frames2:
            frame_data2_ptr -= 1

        # Let video capture catch up
        if frame_data1_ptr <= num_frames1:
            while True:
                desiredFNum = frame_nums1[frame_data1_ptr]
                vidFNum = cap1.get(cv2.CAP_PROP_POS_FRAMES)
                if vidFNum < desiredFNum:
                    _ret, frame1 = cap1.read()
                elif vidFNum == desiredFNum:
                    break
                elif vidFNum > desiredFNum:  # if the video is further ahead than we want, force it back. This is slow...
                    cap1.set(cv2.CAP_PROP_POS_FRAMES, desiredFNum)

        if frame_data2_ptr <= num_frames2:
            while True:
                desiredFNum = frame_nums2[frame_data2_ptr]
                vidFNum = cap2.get(cv2.CAP_PROP_POS_FRAMES)
                if vidFNum < desiredFNum:
                    _ret, frame2 = cap2.read()
                elif vidFNum == desiredFNum:
                    break
                elif vidFNum > desiredFNum:  # if the video is further ahead than we want, force it back. This is slow...
                    cap2.set(cv2.CAP_PROP_POS_FRAMES, desiredFNum)