コード例 #1
0
def add_routine(originalFilePath, routineFullPath):
    if not os.path.exists(routineFullPath):
        print("File not found:", routineFullPath)
        return

    # Open database
    db = getDb()

    routineRelPath = routineFullPath.replace(consts.videosRootPath, '')
    originalRelPath = originalFilePath.replace(consts.videosRootPath, '')
    competition = routineFullPath.split(os.sep)[-3]

    # Use OpenCV to get video meta data
    cap = helper_funcs.open_video(routineFullPath)
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    fps = cap.get(cv2.CAP_PROP_FPS)
    frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)

    try:
        routine = Routine(routineRelPath, originalRelPath, competition, height,
                          width, fps, frame_count)
        db.add(routine)
        db.commit()
        print("Routine addded:", routineFullPath)

        output_images.generate_thumbnail(routine)

    except sqlite3.IntegrityError:
        print("Routine already in db:", routineFullPath)
コード例 #2
0
def generate_thumbnail(routine):
    thumbFilePath = os.path.join(consts.thumbDir, '{}.jpg'.format(routine.id))
    # if os.path.exists(thumbFilePath):
    #     print('Found existing thumb')
    #     return

    cap = helper_funcs.open_video(routine.path)
    _ret, frame = cap.read()

    # Calculate dimens
    percentToKeep = 0.35
    percentFromCenter = percentToKeep / 2
    height, width = frame.shape[:2]
    cropTop = int(height * percentFromCenter)
    cropBottom = int(height * (1 - percentFromCenter))
    cropLeft = int(width * percentFromCenter)
    cropRight = int(width * (1 - percentFromCenter))

    # Crop
    frame = frame[cropTop:cropBottom, cropLeft:cropRight]
    # Shrink
    frame = cv2.resize(frame, (frame.shape[1] / 2, frame.shape[0] / 2))
    cv2.imwrite(thumbFilePath, frame)

    print('Thumbnail created: {}'.format(thumbFilePath))
コード例 #3
0
def assign_sex_designations(db):
    import cv2
    for routine in db.query(Routine).filter(Routine.sex == None):
        cap = helper_funcs.open_video(routine.path)
        while 1:
            _ret, frame = cap.read()
            cv2.imshow('Visualise', frame)
            k = cv2.waitKey(10) & 0xff
            if k == ord('m'):
                routine.sex = 'male'
                db.commit()
                break
            elif k == ord('f'):
                routine.sex = 'female'
                db.commit()
                break
            elif k == ord('\n') or k == ord('\r'):  # return/enter key
                break
            elif k == ord('q') or k == 27:  # q/ESC
                print("Exiting...")
                exit()

            if cap.get(cv2.CAP_PROP_POS_FRAMES) == cap.get(
                    cv2.CAP_PROP_FRAME_COUNT):
                cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
コード例 #4
0
def bounce_to_gif(db, bounce):
    from helpers.consts import bouncesRootPath
    gifFilepath = bouncesRootPath + '{}.gif'.format(bounce.id)
    if os.path.exists(gifFilepath):
        print("Image exists: {}".format(gifFilepath))
        return

    import imageio
    routine = bounce.routine

    cap = helper_funcs.open_video(routine.path)
    cap.set(cv2.CAP_PROP_POS_FRAMES, bounce.start_frame)
    peak = bounce.apex_frame
    peaksIndex = 0

    images = []
    while 1:
        _ret, frame = cap.read()

        if int(cap.get(cv2.CAP_PROP_POS_FRAMES)) >= bounce.end_frame:
            break

        if peak == int(cap.get(cv2.CAP_PROP_POS_FRAMES)):
            peaksIndex = len(images)

        try:
            frame_data = db.query(Frame).filter_by(routine_id=routine.id, frame_num=cap.get(cv2.CAP_PROP_POS_FRAMES)).one()
        except NoResultFound:
            continue

        cx = frame_data.center_pt_x
        cy = frame_data.center_pt_y

        x1, x2, y1, y2 = helper_funcs.crop_points_constrained(routine.video_height, routine.video_width, cx, cy, routine.crop_length)
        if not frame_data.pose:
            continue
        pose = np.array(json.loads(frame_data.pose))

        frameCropped = frame[y1:y2, x1:x2]
        frameCropped = cv2.resize(frameCropped, (256, 256))
        # cv2.putText(frameCropped, '{}'.format(prevBounceName), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
        cv2.putText(frameCropped, '{}'.format(frame_data.frame_num), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
        frameCropped = _draw_pose_on_frame(pose, frameCropped)

        images.append(cv2.cvtColor(frameCropped, cv2.COLOR_BGR2RGB))

    print("Writing to: {}".format(gifFilepath))
    imageio.mimwrite(gifFilepath, images[::2])

    if peaksIndex == 0:
        peaksIndex = int(len(images) / 2)
    jpgFilepath = bouncesRootPath + '{}.jpg'.format(bounce.id)
    imageio.imwrite(jpgFilepath, images[peaksIndex])
コード例 #5
0
def skill_into_filmstrip(bounce):
    cap = helper_funcs.open_video(bounce.routine.path)

    capWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    capHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    frameClipping = 8
    numFrames = 6

    start = bounce.start_frame + 6
    end = bounce.end_frame - 0
    step = (end - start) / numFrames
    step = int(round(step, 0))
    framesToSave = np.arange(start + (step / 2),
                             end - (step / 2),
                             step,
                             dtype=int)

    whitespace = 4
    width = 255

    leftCrop = int((capWidth * 0.5) - width / 2)
    rightCrop = int(leftCrop + width)
    filmStrip = np.ones(shape=(capHeight * 0.8, (width * len(framesToSave)) +
                               (whitespace * len(framesToSave) - 1), 3),
                        dtype=np.uint8)
    filmStrip[:] = 250

    for i, frameNum in enumerate(framesToSave):
        cap.set(cv2.CAP_PROP_POS_FRAMES, frameNum)
        _ret, frame = cap.read()

        # possible improvement
        trackPerson = frame[0:int(capHeight * 0.8), leftCrop:rightCrop]
        start = ((whitespace + width) * i)
        filmStrip[0:int(capHeight * 0.8), start:start + width] = trackPerson

    cv2.imshow('Filmstrip', filmStrip)
    cv2.waitKey(50)

    # imgName = "C:/Users/psdco/Videos/{}/{}.png".format(bounce.routine.getAsDirPath(), bounce.skill_name)
    imgName = consts.thesisImgPath + "{}_strip.png".format(bounce.skill_name)
    print("Writing frame to {}".format(imgName))
    ret = cv2.imwrite(imgName, filmStrip)
    if not ret:
        print("Couldn't write image {}\nAbort!".format(imgName))
コード例 #6
0
# Add frame count to database table
routines = db.query(Routine).all()
for routine in routines:
    pathToVideo = os.path.join(consts.videosRootPath, routine.path)
    cap = cv2.VideoCapture(pathToVideo)
    routine.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
db.commit()
exit()

routines = db.query(Routine).filter(Routine.use == 1).all()
# routines = db.query(Routine).filter(Routine.id == 35).all()
# if False:
# Delete frames coming from some other routine... :/
for routine in routines:
    cap = helper_funcs.open_video(routine.path)
    frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    if routine.frames[-1].frame_num > frame_count:
        for frame in routine.frames:
            if frame.frame_num > frame_count:
                db.delete(frame)
db.commit()

# else:
# Delete duplicate entries for routine in frame_data (Won't catch back to back)
for routine in routines:
    frameDataIds = [frame.id for frame in routine.frames]
    diff = np.diff(frameDataIds)
    boundaryIndex = np.nonzero(diff > 1)[0]
    if boundaryIndex:
コード例 #7
0
ファイル: trampoline.py プロジェクト: psdcon/fyp
def find_trampoline(routine):
    cap = helper_funcs.open_video(routine.path)

    # print("Trampoline Top has not yet been set!")
    print(
        "Use the awsd keys to adjust the cross hairs. Use qe keys to adjust width"
    )
    print("Press ENTER to finish, and ESC/'q' to quit")

    _ret, frame = cap.read()

    # Take a best guess at where it might be
    trampoline = {
        'top':
        routine.trampoline_top
        if routine.trampoline_top else _trampoline_top_best_guess(frame),
        'center':
        routine.trampoline_center
        if routine.trampoline_center else routine.video_width /
        2,  # TODO this is a reasonable default, a poor solution
        'width':
        routine.trampoline_width if routine.trampoline_width else 236,
    }
    trampoline['ends'] = calcTrampolineEnds(trampoline['width'])
    use = routine.use

    while 1:  # video will loop back to the start
        _ret, frame = cap.read()

        maskLeftBorder = int(trampoline['center'] - (trampoline['ends'] / 2))
        maskRightBorder = int(trampoline['center'] + (trampoline['ends'] / 2))
        maskAroundTrampoline = np.zeros(shape=(routine.video_height,
                                               routine.video_width),
                                        dtype=np.uint8)
        maskAroundTrampoline[
            0:trampoline['top'],
            maskLeftBorder:maskRightBorder] = 255  # [y1:y2, x1:x2]
        frameCropped = cv2.bitwise_and(frame, frame, mask=maskAroundTrampoline)

        # (x1 y1), (x2, y2)
        cv2.line(frame, (0, trampoline['top']),
                 (routine.video_width, trampoline['top']), (0, 255, 0), 1)
        # Vertical Lines
        cv2.line(frame, (trampoline['center'], 0),
                 (trampoline['center'], routine.video_height), (0, 255, 0), 1)
        cv2.line(frame, (trampoline['center'] + (trampoline['width'] / 2), 0),
                 (trampoline['center'] +
                  (trampoline['width'] / 2), routine.video_height),
                 (0, 0, 255), 1)
        cv2.line(frame, (trampoline['center'] - (trampoline['width'] / 2), 0),
                 (trampoline['center'] -
                  (trampoline['width'] / 2), routine.video_height),
                 (0, 0, 255), 1)
        cv2.line(frame, (trampoline['center'] + (trampoline['ends'] / 2), 0),
                 (trampoline['center'] +
                  (trampoline['ends'] / 2), routine.video_height), (0, 255, 0),
                 1)
        cv2.line(frame, (trampoline['center'] - (trampoline['ends'] / 2), 0),
                 (trampoline['center'] -
                  (trampoline['ends'] / 2), routine.video_height), (0, 255, 0),
                 1)

        cv2.imshow('Frame', frame)
        cv2.imshow('Frame Cropped', frameCropped)

        k = cv2.waitKey(10)
        # TODO this is broken... DONE: Fixed it with asdw
        # print 'You pressed %d (0x%x), LSB: %d (%s)' % (k, k, k % 256,repr(chr(k % 256)) if k % 256 < 128 else '?')
        if k == ord('u'):  # 2490368:  # up
            use = 1 if use == 0 else 0
            print("Use ", use)
        elif k == ord('w'):  # 2490368:  # up
            trampoline['top'] -= 1
        elif k == ord('s'):  # 2621440:  # down
            trampoline['top'] += 1
        elif k == ord('a'):  # 2424832:  # left
            trampoline['center'] -= 1
        elif k == ord('d'):  # 2555904:  # right
            trampoline['center'] += 1
        elif k == ord('e'):  # widen the width
            trampoline['width'] += 2
            trampoline['ends'] = calcTrampolineEnds(trampoline['width'])
        elif k == ord('q'):  # narrow the width
            trampoline['width'] -= 2
            trampoline['ends'] = calcTrampolineEnds(trampoline['width'])
        elif k == ord('\n') or k == ord('\r'):  # return/enter key
            cv2.imwrite(consts.thesisImgPath + "trampoline_detect_2.png",
                        frame)
            break
        elif k == ord('q') or k == 27:  # q/ESC
            print("Exiting...")
            exit()
        elif k == ord('p'):  # p (print). Saves image for the report
            cv2.imwrite(consts.thesisImgPath + "trampoline_detect_2.png",
                        frame)

        # Loop until return or exit pressed
        if cap.get(cv2.CAP_PROP_POS_FRAMES) == cap.get(
                cv2.CAP_PROP_FRAME_COUNT):
            cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

    cv2.destroyAllWindows()
    cap.release()
    return trampoline
コード例 #8
0
def save_cropped_frames(db, routine, frames, suffix=None):
    routineDirPath = routine.getAsDirPath(suffix, create=True)

    # plt.figure()
    position = np.array([routine.video_height - frame_data.center_pt_y for frame_data in frames])
    scaleWithPerson = False
    # scaleWithPerson = frames[0].hull_max_length is not None
    cropLengths = []
    cropLength = 0
    if scaleWithPerson:  # hull length
        # Compensate... something
        cropLengths = np.array([frame_data.hull_max_length for frame_data in frames])

        cropLengths[np.nonzero(position < np.median(position))] = int(np.average(cropLengths) * 1.1)
        # plt.plot(cropLengths, label="Hull Length", color="blue")
        # # plt.axhline(np.average(cropLengths), label="Average Length", color="blue")
        # # plt.axhline(np.median(cropLengths), label="Med Length", color="purple")
    else:  # Ellipse lengths
        hullLengths = [frame_data.hull_max_length for frame_data in frames]
        # plt.plot(hullLengths, label="Hull Length", color="blue")

        cropLength = helper_funcs.get_crop_length(hullLengths)
        routine.crop_length = cropLength
        db.commit()

        # # plt.axhline(np.average(hullLengths), label="Average Length", color="blue")
        # plt.axhline(cropLength, label="Percentile", color="blue")
        # plt.axhline(routine.crop_length, label="routine.crop_length", color="orange")

    # plt.plot(position, label="Position", color="green")
    # plt.xlabel("Time (s)")
    # plt.legend(loc="best")
    # plt.show(block=False)

    trampolineTouches = np.array([frame.trampoline_touch for frame in routine.frames])
    trampolineTouches = helper_funcs.trim_touches_by_2(trampolineTouches)

    personMasks = helper_funcs.load_zipped_pickle(routine.personMasksPath())

    # Create videos
    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'DIB ')
    frameCroppedVid = cv2.VideoWriter(consts.videosRootPath + 'savedFrames.avi', fourcc, 30.0, (256, 256))

    cap = helper_funcs.open_video(routine.path)
    frame = []
    for i, frame_data in enumerate(frames):
        # ignore frames where trampoline is touched
        if trampolineTouches[i] == 1:
            continue
        # ignore any frame that aren't tracked
        while frame_data.frame_num != cap.get(cv2.CAP_PROP_POS_FRAMES):
            ret, frame = cap.read()
            if not ret:
                print('Something went wrong')
                return

        cx = frame_data.center_pt_x
        cy = frame_data.center_pt_y
        # Use original background or darken
        if True:
            frameCropped = track.highlightPerson(frame, np.array(json.loads(personMasks[frame_data.frame_num]), dtype=np.uint8), cx, cy, cropLength)
        else:
            x1, x2, y1, y2 = helper_funcs.crop_points_constrained(routine.video_height, routine.video_width, cx, cy, cropLength)
            frameCropped = frame[y1:y2, x1:x2]
        frameCropped = cv2.resize(frameCropped, (256, 256))
        frameCroppedVid.write(frameCropped)

        # cv2.imshow('Track ', frameCropped)
        # k = cv2.waitKey(50) & 0xff

        imgName = routineDirPath + "frame_{0:04}.png".format(frame_data.frame_num)
        # print("Writing frame to {}".format(imgName))
        # cv2.imwrite(imgName, frameCropped)

    # Done
    cap.release()
    print("Done saving frames")
コード例 #9
0
def play_frames(db,
                routine,
                start_frame=1,
                end_frame=-1,
                show_pose=True,
                show_full=False):
    # temp
    saveReportImages = False

    waitTime = 40
    goOneFrame = False
    paused = False
    prevBounceName = ''

    cap = helper_funcs.open_video(routine.path)
    cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
    if end_frame == -1:
        end_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)

    # f, axarr = plt.subplots(12, sharex=True)
    # f.canvas.set_window_title(routine.prettyName())

    fourcc = cv2.VideoWriter_fourcc(*'DIB ')
    frameCroppedVid = cv2.VideoWriter(
        consts.videosRootPath + 'posedCropped.avi', fourcc, 30.0, (256, 256))
    while True:
        if goOneFrame or not paused:

            _ret, frame = cap.read()

            # Loop forever
            if cap.get(cv2.CAP_PROP_POS_FRAMES) >= end_frame:
                break
                cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)

            try:
                frame_data = db.query(Frame).filter_by(
                    routine_id=routine.id,
                    frame_num=cap.get(cv2.CAP_PROP_POS_FRAMES)).one()
            except NoResultFound:
                continue

            thisBounce = frame_data.bounce
            if thisBounce and prevBounceName != thisBounce.skill_name:
                # plot_frame_angles(thisBounce.skill_name, thisBounce.getFrames(db), axarr)
                prevBounceName = thisBounce.skill_name

            cx = frame_data.center_pt_x
            cy = frame_data.center_pt_y

            x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
                routine.video_height, routine.video_width, cx, cy,
                routine.crop_length)

            if frame_data.pose is not None:
                pose = np.array(json.loads(frame_data.pose))
                # Show full frame
                if show_full:
                    for p_idx in range(14):
                        pose_x = int((cx - routine.padding) + pose[0, p_idx])
                        pose_y = int((cy - routine.padding) + pose[1, p_idx])
                        color = consts.poseColors[
                            calc_angles.pose_aliai['hourglass'][p_idx]][1]
                        cv2.circle(frame, (pose_x, pose_y),
                                   5,
                                   color,
                                   thickness=cv2.FILLED)
                    cv2.putText(frame, '{}'.format(frame_data.frame_num),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                (255, 255, 255))
                    cv2.imshow('HG Smooth', frame)

                # Show cropped
                else:
                    frameCropped = frame[y1:y2, x1:x2]
                    frameCropped = cv2.resize(frameCropped, (256, 256))
                    frameCropped = _draw_pose_on_frame(pose, frameCropped)
                    if saveReportImages:  # p (print). Saves image for the report
                        cv2.imwrite(consts.thesisImgPath + "viz_pose.png",
                                    frameCropped)
                        print("wait")
                    # cv2.putText(frameCropped, '{}'.format(prevBounceName), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                    # cv2.putText(frameCropped, '{}'.format(frame_data.frame_num), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                    frameCroppedVid.write(frameCropped)
                    cv2.imshow(routine.prettyName(), frameCropped)

            else:
                # Ignore frames that haven't got pose info
                if show_pose:
                    continue

                cv2.circle(frame, (cx, cy), 3, (0, 0, 255), cv2.FILLED)
                if show_full:
                    cv2.imshow(routine.prettyName(), frame)
                else:
                    frameCropped = frame[y1:y2, x1:x2]
                    cv2.imshow(routine.prettyName(), frameCropped)

            if goOneFrame:
                goOneFrame = False

        k = cv2.waitKey(waitTime) & 0xff
        if k == ord('k'):  # play pause
            paused = not paused
        elif k == ord('j'):  # prev frame
            goOneFrame = True
            cap.set(cv2.CAP_PROP_POS_FRAMES,
                    cap.get(cv2.CAP_PROP_POS_FRAMES) - 2)
        elif k == ord('l'):  # next frame
            goOneFrame = True
        elif k == ord('.'):  # speed up
            waitTime -= 5
            print(waitTime)
        elif k == ord(','):  # slow down
            waitTime += 5
            print(waitTime)
        elif k >= ord('0') and k <= ord('9'):
            num = k - ord('0')
            frameToJumpTo = (cap.get(cv2.CAP_PROP_FRAME_COUNT) / 10) * num
            cap.set(cv2.CAP_PROP_POS_FRAMES, frameToJumpTo)
            goOneFrame = True
        elif k == ord('\n') or k == ord('\r'):  # return/enter key
            cv2.destroyAllWindows()
            break
        elif k == ord('q') or k == 27:  # q/ESC
            print("Exiting...")
            exit()
コード例 #10
0
def play_frames_of_2(db,
                     routine1,
                     routine2,
                     start_frame1=1,
                     end_frame1=-1,
                     start_frame2=1,
                     end_frame2=-1,
                     show_full=False):
    waitTime = 80
    playOneFrame = False
    paused = False

    cap1 = helper_funcs.open_video(routine1.path)
    if end_frame1 == -1:
        end_frame1 = cap1.get(cv2.CAP_PROP_FRAME_COUNT)

    cap2 = helper_funcs.open_video(routine2.path)
    if end_frame2 == -1:
        end_frame2 = cap2.get(cv2.CAP_PROP_FRAME_COUNT)

    show_pose = True

    bothFrames = np.zeros(shape=(256, 256 * 2, 3), dtype=np.uint8)

    # Create a list of frames from each to be played
    frame_datas1 = db.query(Frame).filter(Frame.routine_id == routine1.id,
                                          Frame.frame_num >= start_frame1,
                                          Frame.frame_num < end_frame1,
                                          Frame.pose != None).all()
    frame_datas2 = db.query(Frame).filter(Frame.routine_id == routine2.id,
                                          Frame.frame_num >= start_frame2,
                                          Frame.frame_num < end_frame2,
                                          Frame.pose != None).all()
    frame_nums1 = [frame_data.frame_num for frame_data in frame_datas1]
    frame_nums2 = [frame_data.frame_num for frame_data in frame_datas2]
    num_frames1 = len(frame_datas1)
    num_frames2 = len(frame_datas2)
    frame_data1_ptr = 0
    frame_data2_ptr = 0

    cap1.set(cv2.CAP_PROP_POS_FRAMES, frame_nums1[0])
    cap2.set(cv2.CAP_PROP_POS_FRAMES, frame_nums2[0])
    _ret, frame1 = cap1.read()
    _ret, frame2 = cap2.read()

    while True:
        if playOneFrame or not paused:

            frame_data_ptr_video = [
                cap1.get(cv2.CAP_PROP_POS_FRAMES),
                cap2.get(cv2.CAP_PROP_POS_FRAMES)
            ]
            frame_data1 = frame_datas1[frame_data1_ptr]
            frame_data2 = frame_datas2[frame_data2_ptr]

            cx1 = frame_data1.center_pt_x
            cy1 = frame_data1.center_pt_y

            cx2 = frame_data2.center_pt_x
            cy2 = frame_data2.center_pt_y

            if show_pose:
                # if pose is None, skip showing this frame
                try:
                    pose1 = np.array(json.loads(frame_data1.pose))
                    pose2 = np.array(json.loads(frame_data2.pose_unfiltered))
                except TypeError:
                    continue

                # Show full frame
                if show_full:
                    pass

                # Show cropped
                else:
                    x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
                        routine1.video_height, routine1.video_width, cx1, cy1,
                        routine1.crop_length)
                    frameCropped1 = frame1[y1:y2, x1:x2]
                    frameCropped1 = cv2.resize(frameCropped1, (256, 256))

                    x1, x2, y1, y2 = helper_funcs.crop_points_constrained(
                        routine2.video_height, routine2.video_width, cx2, cy2,
                        routine2.crop_length)
                    frameCropped2 = frame2[y1:y2, x1:x2]
                    frameCropped2 = cv2.resize(frameCropped2, (256, 256))

                    frameCropped1 = _draw_pose_on_frame(pose1, frameCropped1)
                    bothFrames[0:256, 0:256] = frameCropped1
                    cv2.putText(bothFrames,
                                '{}'.format(frame_nums1[frame_data1_ptr]),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                (255, 255, 255))

                    frameCropped2 = _draw_pose_on_frame(pose2, frameCropped2)
                    bothFrames[0:256, 256:512] = frameCropped2
                    cv2.putText(bothFrames,
                                '{}'.format(frame_nums2[frame_data2_ptr]),
                                (266, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,
                                (255, 255, 255))

                    cv2.putText(
                        bothFrames,
                        '{}'.format(max(frame_data1_ptr,
                                        frame_data2_ptr)), (10, 35),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                    cv2.imshow('Pose', bothFrames)

            frame_data1_ptr += 1
            frame_data2_ptr += 1
            if playOneFrame:
                playOneFrame = False

        k = cv2.waitKey(waitTime) & 0xff
        if k == ord('k'):  # play pause
            paused = not paused
        elif k == ord('j'):  # prev frame
            playOneFrame = True
            frame_data1_ptr = helper_funcs.clip_wrap(frame_data1_ptr - 2, 0,
                                                     num_frames1 - 1)
            frame_data2_ptr = helper_funcs.clip_wrap(frame_data2_ptr - 2, 0,
                                                     num_frames2 - 1)
            cap1.set(cv2.CAP_PROP_POS_FRAMES, frame_nums1[frame_data1_ptr])
            cap2.set(cv2.CAP_PROP_POS_FRAMES, frame_nums2[frame_data2_ptr])
        elif k == ord('l'):  # next frame
            playOneFrame = True
        elif k == ord('.'):  # speed up
            waitTime -= 5
            print(waitTime)
        elif k == ord(','):  # slow down
            waitTime += 5
            print(waitTime)
        elif k == ord('\n') or k == ord('\r'):  # return/enter key
            break
        elif k == ord('q') or k == 27:  # q/ESC
            print("Exiting...")
            exit()

        # Loop forever, allowing shorter sequence to pause for longer sequence
        if frame_data1_ptr >= num_frames1 and frame_data2_ptr >= num_frames2:
            frame_data1_ptr = 0
            frame_data2_ptr = 0
            cap1.set(cv2.CAP_PROP_POS_FRAMES, frame_nums1[frame_data1_ptr])
            cap2.set(cv2.CAP_PROP_POS_FRAMES, frame_nums2[frame_data2_ptr])
        elif frame_data1_ptr >= num_frames1:
            frame_data1_ptr -= 1
        elif frame_data2_ptr >= num_frames2:
            frame_data2_ptr -= 1

        # Let video capture catch up
        if frame_data1_ptr <= num_frames1:
            while True:
                desiredFNum = frame_nums1[frame_data1_ptr]
                vidFNum = cap1.get(cv2.CAP_PROP_POS_FRAMES)
                if vidFNum < desiredFNum:
                    _ret, frame1 = cap1.read()
                elif vidFNum == desiredFNum:
                    break
                elif vidFNum > desiredFNum:  # if the video is further ahead than we want, force it back. This is slow...
                    cap1.set(cv2.CAP_PROP_POS_FRAMES, desiredFNum)

        if frame_data2_ptr <= num_frames2:
            while True:
                desiredFNum = frame_nums2[frame_data2_ptr]
                vidFNum = cap2.get(cv2.CAP_PROP_POS_FRAMES)
                if vidFNum < desiredFNum:
                    _ret, frame2 = cap2.read()
                elif vidFNum == desiredFNum:
                    break
                elif vidFNum > desiredFNum:  # if the video is further ahead than we want, force it back. This is slow...
                    cap2.set(cv2.CAP_PROP_POS_FRAMES, desiredFNum)
コード例 #11
0
def track_gymnast(db, routine):
    # temp for report
    global saveReportImages
    global joinNearbyContours

    centerPoints = {}
    hullLengths = {}
    trampolineTouches = {}
    personMasks = {}

    print("Starting to track gymnast")
    cap = helper_funcs.open_video(routine.path)

    font = cv2.FONT_HERSHEY_SIMPLEX
    framesToAverage = 300

    # Keyboard stuff
    visualise = True  # show windows rendering video
    waitTime = 15  # delay for keyboard input
    paused = False
    goOneFrame = False

    # Window vars
    scalingFactor = 0.4
    capWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    capHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    resizeWidth = int(capWidth * scalingFactor)
    resizeHeight = int(capHeight * scalingFactor)

    # Create videos
    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'DIB ')
    frameOriginalOut = cv2.VideoWriter(
        consts.videosRootPath + 'framesOriginal.avi', fourcc, 30.0,
        (capWidth, capHeight))
    frameBlurDarkVid = cv2.VideoWriter(
        consts.videosRootPath + 'framesBlurDark.avi', fourcc, 30.0,
        (capWidth, capHeight))
    fgMaskOut = cv2.VideoWriter(consts.videosRootPath + 'fgMask.avi', fourcc,
                                30.0, (capWidth, capHeight))
    frameFgMaskMorphedOut = cv2.VideoWriter(
        consts.videosRootPath + 'frameFgMaskMorphed.avi', fourcc, 30.0,
        (capWidth, capHeight))
    frameFgMaskMorphedCroppedOut = cv2.VideoWriter(
        consts.videosRootPath + 'frameFgMaskMorphedCropped.avi', fourcc, 30.0,
        (capWidth, capHeight))
    bgModelOut = cv2.VideoWriter(consts.videosRootPath + 'bgModel.avi', fourcc,
                                 30.0, (capWidth, capHeight))
    biggestContourOut = cv2.VideoWriter(
        consts.videosRootPath + 'biggestContour.avi', fourcc, 30.0,
        (capWidth, capHeight))
    biggestContourWithHullOut = cv2.VideoWriter(
        consts.videosRootPath + 'biggestContourWithHull.avi', fourcc, 30.0,
        (capWidth, capHeight))
    frameWithHullOut = cv2.VideoWriter(
        consts.videosRootPath + 'frameWithHull&CoM.avi', fourcc, 30.0,
        (capWidth, capHeight))

    # For masking to the right and left of the trampoline
    maskLeftBorder = int(routine.trampoline_center - (
        trampoline.calcTrampolineEnds(routine.trampoline_width) / 2))
    maskRightBorder = int(routine.trampoline_center + (
        trampoline.calcTrampolineEnds(routine.trampoline_width) / 2))

    # Create array for tiled window
    processVisImgs = np.zeros(shape=(resizeHeight * 2, resizeWidth * 2, 3),
                              dtype=np.uint8)  # (h * 3, w, CV_8UC3);
    prevPersonFgMask = np.zeros(shape=(capHeight, capWidth), dtype=np.uint8)

    # Create mask around trampoline
    maskAboveTrmpl = np.zeros(shape=(capHeight, capWidth),
                              dtype=np.uint8)  # cv2.CV_8U
    maskAboveTrmpl[0:routine.trampoline_top,
                   maskLeftBorder:maskRightBorder] = 255  # [y1:y2, x1:x2]
    maskBelowTrmpl = np.zeros(shape=(capHeight, capWidth), dtype=np.uint8)
    maskBelowTrmpl[routine.trampoline_top:capHeight,
                   maskLeftBorder:maskRightBorder] = 255  # [y1:y2, x1:x2]
    # trampolineAreaPx = sq_area((routine.trampoline_top, capHeight), (maskLeftBorder, maskRightBorder))

    # Pick a default crop len if none saved
    # cropLength = routine.crop_length if routine.crop_length else 200

    # Background extractor. Ignore shadow
    pKNN = cv2.createBackgroundSubtractorKNN()
    pKNN.setShadowValue(0)
    # Prepare background by pre-training the bg sub
    prepareBgSubt(pKNN, cap, framesToAverage)

    print("Press v to toggle showing visuals")
    print("Press ENTER to finish, and ESC/'q' to quit")

    lastContours = None  # used to remember last contour if area goes too small
    start_t = time.time()
    while 1:
        if goOneFrame or not paused:

            _ret, frame = cap.read()
            # frameOriginalOut.write(frame)

            # TODO if mask is really noisy (area is large/ high num contours), could increase the learning rate?
            # frame = cv2.bitwise_and(frame, frame, mask=maskAboveTrmpl)
            frameFgMask = pKNN.apply(frame)
            # fgMaskOut.write(frameFgMask)

            # Crop fg mask detail to be ROI (region of interest) above the trampoline
            frameFgMaskMorphed = erode_dilate(frameFgMask)
            # frameFgMaskMorphedOut.write(frameFgMaskMorphed)
            frameFgMaskMorphed = cv2.bitwise_and(frameFgMaskMorphed,
                                                 frameFgMaskMorphed,
                                                 mask=maskAboveTrmpl)
            # frameFgMaskMorphedCroppedOut.write(frameFgMaskMorphed)

            # Create mask of the common regions in this and in the prevPersonFgMask
            fgMaskPrevPersonOverlap = cv2.bitwise_and(frameFgMaskMorphed,
                                                      frameFgMaskMorphed,
                                                      mask=prevPersonFgMask)

            if visualise:
                # Show current background model
                bgModel = pKNN.getBackgroundImage()
                # bgModelOut.write(bgModel)
                processVisImgs[0:resizeHeight, 0:resizeWidth] = cv2.resize(
                    bgModel, (resizeWidth, resizeHeight))
                cv2.putText(processVisImgs, 'Background Model', (10, 20), font,
                            0.4, (255, 255, 255))
                # Show fg mask
                frameFgMask4Vis = cv2.cvtColor(
                    cv2.resize(frameFgMask, (resizeWidth, resizeHeight)),
                    cv2.COLOR_GRAY2RGB)
                cv2.line(
                    frameFgMask4Vis,
                    (0, int(routine.trampoline_top * scalingFactor)),
                    (resizeWidth, int(routine.trampoline_top * scalingFactor)),
                    (0, 255, 0), 1)
                cv2.putText(frameFgMask4Vis, 'Foreground Mask', (10, 20), font,
                            0.4, (255, 255, 255))
                processVisImgs[resizeHeight * 1:resizeHeight * 2, resizeWidth *
                               0:resizeWidth * 1] = frameFgMask4Vis

            # Find contours in masked image
            _img, contours, _hierarchy = cv2.findContours(
                np.copy(frameFgMaskMorphed), cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE)
            if len(contours) == 0:
                print("Using last contour because none found in this frame")
                contours = lastContours

            # Sort DESC, so biggest contour is first
            contours = sorted(contours, key=cv2.contourArea, reverse=True)
            # If contour is less than min area, replace it with previous contour
            if cv2.contourArea(
                    contours[0]
            ) < consts.minContourArea and lastContours is not None:
                print("Using last contour because found is too small")
                contours = lastContours
            else:
                lastContours = contours

            personContourConcat, personContours, prevPersonFgMask = getPersonContour(
                fgMaskPrevPersonOverlap, contours)
            blobHull = cv2.convexHull(personContourConcat)

            if visualise:
                # Convert the foreground mask to color so the biggest can be coloured in
                biggestContour = cv2.cvtColor(frameFgMaskMorphed,
                                              cv2.COLOR_GRAY2RGB)
                # Draw the biggest one in red
                for contour in personContours:
                    cv2.drawContours(biggestContour, [contour], 0, (0, 0, 255),
                                     cv2.FILLED)
                # biggestContourOut.write(biggestContour)

                # Draw the outline of the convex blobHull for the person
                cv2.drawContours(biggestContour, [blobHull], 0, (0, 255, 0), 2)
                # biggestContourWithHullOut.write(biggestContour)

                frameWithHull = np.copy(frame)
                cv2.drawContours(frameWithHull, [blobHull], 0, (0, 255, 0), 2)

                # Resize and show it
                biggestContour = cv2.resize(biggestContour,
                                            (resizeWidth, resizeHeight))
                cv2.putText(biggestContour, 'Blob Detection', (10, 20), font,
                            0.4, (255, 255, 255))
                processVisImgs[resizeHeight * 1:resizeHeight * 2, resizeWidth *
                               1:resizeWidth * 2] = biggestContour
                # cv2.imshow('personMask', personMask)

            cx, cy = helper_funcs.calc_contour_center(personContourConcat)
            if cx and cy:
                centerPoints[int(cap.get(cv2.CAP_PROP_POS_FRAMES))] = [cx, cy]

                # Save person mask so it can be used when outputting frames
                # prevPersonFgMask is, at the moment, the current person fg mask. It's already been updated.
                finerPersonMask = cv2.bitwise_and(frameFgMask,
                                                  frameFgMask,
                                                  mask=prevPersonFgMask)
                personMasks[int(cap.get(
                    cv2.CAP_PROP_POS_FRAMES))] = json.dumps(
                        finerPersonMask.tolist())

                # Get max dimension of person
                _img, finerContours, _h = cv2.findContours(
                    np.copy(finerPersonMask), cv2.RETR_TREE,
                    cv2.CHAIN_APPROX_SIMPLE)
                if len(finerContours) > 0:
                    finerContours = sorted(finerContours,
                                           key=cv2.contourArea,
                                           reverse=True)
                    finerHull = cv2.convexHull(finerContours[0])
                # Use finer hull because blob morph opperations will change height
                hullMaxLen = getMaxHullLength(finerHull)
                hullLengths[int(cap.get(cv2.CAP_PROP_POS_FRAMES))] = hullMaxLen

                touchingTrmpl = isTouchingTrmpl(routine.trampoline_top,
                                                finerHull)
                trampolineTouches[int(cap.get(
                    cv2.CAP_PROP_POS_FRAMES))] = touchingTrmpl

                if visualise:
                    # Show trampoline touch detection
                    finerPersonMask4Vis = cv2.cvtColor(finerPersonMask,
                                                       cv2.COLOR_GRAY2RGB)
                    cv2.drawContours(finerPersonMask4Vis, [finerHull], 0,
                                     (0, 255, 0), 2)
                    if touchingTrmpl:
                        cv2.line(finerPersonMask4Vis,
                                 (0, routine.trampoline_top),
                                 (routine.video_width, routine.trampoline_top),
                                 (0, 255, 0), 5)
                    else:
                        cv2.line(finerPersonMask4Vis,
                                 (0, routine.trampoline_top),
                                 (routine.video_width, routine.trampoline_top),
                                 (0, 0, 255), 5)
                    finerPersonMask4Vis = cv2.resize(
                        finerPersonMask4Vis, (resizeWidth, resizeHeight))
                    cv2.imshow('finerPersonMask4Vis', finerPersonMask4Vis)

                    if touchingTrmpl:
                        cv2.line(frameWithHull, (0, routine.trampoline_top),
                                 (routine.video_width, routine.trampoline_top),
                                 (0, 255, 0), 3)
                    else:
                        cv2.line(frameWithHull, (0, routine.trampoline_top),
                                 (routine.video_width, routine.trampoline_top),
                                 (0, 0, 255), 3)
                    cv2.circle(frameWithHull, (cx, cy), 5, (0, 0, 255), -1)
                    # cv2.imshow("frameWithHull", frameWithHull)
                    # frameWithHullOut.write(frameWithHull)

                    # Show person drawing the center of mass
                    # cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
                    # cropLength = helper_funcs.getCropLength(hullLengths.values())
                    trackedPerson = highlightPerson(frame, finerPersonMask, cx,
                                                    cy, 250, frameBlurDarkVid)
                    trackedPerson = cv2.resize(trackedPerson, (256, 256))
                    cv2.imshow("Track", trackedPerson)
            else:
                print("Skipping center point. Couldn't find moment")

            # End stuff
            if visualise:
                # Add the trampoline_top line
                cv2.line(frame, (0, routine.trampoline_top),
                         (routine.video_width, routine.trampoline_top),
                         (0, 255, 0), 2)
                frameSm = cv2.resize(frame, (resizeWidth, resizeHeight))
                cv2.putText(
                    frameSm,
                    'Frame {}'.format(int(cap.get(cv2.CAP_PROP_POS_FRAMES))),
                    (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
                processVisImgs[resizeHeight * 0:resizeHeight * 1,
                               resizeWidth * 1:resizeWidth * 2] = frameSm
                cv2.imshow('Visualise Processing', processVisImgs)
                if saveReportImages:  # p (print). Saves image for the report
                    cv2.imwrite(consts.thesisImgPath + "bgsub_1.png", frame)

            # If we went one frame, stop from going another
            if goOneFrame:
                goOneFrame = False

        k = cv2.waitKey(waitTime) & 0xff
        if k == ord('v'):
            visualise = not visualise
        elif k == ord('k'):  # play pause
            paused = not paused
        elif k == ord('j'):  # prev frame
            cap.set(cv2.CAP_PROP_POS_FRAMES,
                    cap.get(cv2.CAP_PROP_POS_FRAMES) - 2)
            goOneFrame = True
        elif k == ord('l'):  # next frame
            goOneFrame = True
        elif k == ord('.'):  # speed up
            waitTime -= 5
            print(waitTime)
        elif k == ord(','):  # slow down
            waitTime += 5
            print(waitTime)
        elif ord('0') <= k <= ord('9'):
            num = k - ord('0')
            frameToJumpTo = (cap.get(cv2.CAP_PROP_FRAME_COUNT) / 10) * num
            cap.set(cv2.CAP_PROP_POS_FRAMES, frameToJumpTo)
            goOneFrame = True
        elif k == ord('u'):
            routine.use = 0 if routine.use else 1
            db.commit()
            print("use updated to", routine.use)
        elif k == ord('\n') or k == ord('\r'):  # return/enter key
            break
        elif k == ord('q') or k == 27:  # q/ESC
            print("Exiting...")
            exit()
        elif k == ord('n'):
            joinNearbyContours = not joinNearbyContours

        if saveReportImages:
            saveReportImages = False

        if k == ord('p'):
            saveReportImages = True
            if paused:
                goOneFrame = True

                # Finish playing the video when we get to the end.
        if cap.get(cv2.CAP_PROP_POS_FRAMES) == cap.get(
                cv2.CAP_PROP_FRAME_COUNT):
            break

        # Calc fps
        end_t = time.time()
        time_taken = end_t - start_t
        start_t = end_t
        if time_taken != 0:
            fps = 1. / time_taken
        print('Loop fps: {}'.format(fps))

    cap.release()
    # fgMaskOut.release()
    cv2.destroyAllWindows()

    return centerPoints, hullLengths, trampolineTouches, personMasks