def __init__(self):
        self.camera = cvwindows.create('camera')

        self.reader = Reader()
        self.params = Obj()

        # setting up reader, detect_homes and capture_balls params
        self.__setUp_Reader__()
        detect_homes.setUp({"debugging": args.debugging})
        transform.setUp({"debugging": args.debugging})
        capture_balls.setUp({"debugging": args.debugging})

        self.useKmeans = False
        self.kmeans_k = 3
Exemple #2
0
import detect_homes
from util import transform
import capture_balls
from util.cvinput import cvwindows
from util.parse_args import args
from util.utils import Reader, Obj, HomePlate, Ball
from util.utils import show_contours, homeAVG, kmeans, draw_finalResult, plot_fit, draw_strikeZone, fit_velocity, plot_velocity, draw_SideTrajectory
from util.filtering import filter_img
from util import ransac
import get_results

params = Obj(useKmeans=False,
             kmeans_k=6,
             transform_resolution=(600, 1024),
             camera_fps=187.,
             home_large=43.18,
             ball_diameter=7.2644,
             camera_hight=225,
             strike_zone_up=107.8738,
             strike_zone_down=39.4462)


def main2():
    from pitch_training import PitchTrainig
    pitchTrainig = PitchTrainig()
    while cvwindows.event_loop():
        home = pitchTrainig.calibrateHome()

        PTM, user_homePlate_cnt = pitchTrainig.computeTransform(home)

        pitchTrainig.waitBalls(PTM)
import os
import numpy as np
import cv2
from util.utils import Obj, Ball
from util.utils import show_contours, draw_ball, draw_balls, kmeans

params = Obj(
    debugging=False,
    useKmeans=False,
    kmeans_k=6,
    max_radiusPercent=.01,
    min_radiusPercent=.0015,
    home_begin=913.76,
    fgbg=cv2.createBackgroundSubtractorMOG2(),
    kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)),
    aproxContour=
    0  # 0 It is a straight rectangle, it doesn't consider the rotation of the contour
    # 1 Drawn with minimum area, so it considers the rotation also.
    # 2 It is a circle which completely covers the contour with minimum area
)


def get_balls(frame, frame_number):
    mask = get_mask(frame)

    contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)

    if params.debugging:
        show_contours(contours, frame, 'all contours')
Exemple #4
0
import cv2
from util.utils import Obj

params = Obj(blur_algorithm=cv2.medianBlur, win_size=5, iter_number=1)


def filter_img(frame):
    blur = frame.copy()
    for _ in range(params.iter_number):
        blur = params.blur_algorithm(blur, params.win_size)
    return blur


def setUp(nparams):
    params.setattr(nparams)
Exemple #5
0
import numpy as np
import cv2
from util.utils import Obj, HomePlate
from util.utils import show_contours

params = Obj(debugging=False,
             transform_resolution=(1024, 600),
             size_homePercenct=1. / 6,
             use_rectApprox=False)


def homePlate_transform(frame, home):
    # obtain a square in a consistent points order
    square = get_home_square(home)

    # compute the destination points of the perspective transform square
    home_width = params.transform_resolution[1] * params.size_homePercenct
    x14 = params.transform_resolution[0] - params.transform_resolution[0] * .01
    x23 = x14 - home_width
    y12 = (params.transform_resolution[1] + home_width) / 2.
    y34 = y12 - home_width
    dst = np.array([[x14, y12], [x23, y12], [x23, y34], [x14, y34]],
                   dtype="float32")

    # compute the perspective transform matrix
    PTM = cv2.getPerspectiveTransform(square, dst)

    # find the new home plate contour
    homePlate_cnt = np.array([[x14, params.transform_resolution[1] / 2.],
                              [x23 + home_width / 2., y12], [x23, y12],
                              [x23, y34], [x23 + home_width / 2., y34]])
Exemple #6
0
import os
import cv2
import numpy as np
from util.utils import Obj, HomePlate
from util.utils import show_contours, draw_home_lines, refining_corners, angle, get_dist

params = Obj(debugging=False,
             thresh_blockSize=31,
             max_percentArea=10,
             min_percentArea=1,
             numberOfSizes=5,
             useHull=True,
             percentSideRatio=20,
             diff_rectAngles=5,
             diff_maxAngles=5)


def get_homes(frame):
    thresh = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                   cv2.THRESH_BINARY, params.thresh_blockSize,
                                   7)

    if params.debugging:
        cv2.imshow('Thresh', thresh)

    contours_img = thresh.copy()
    contours, _ = cv2.findContours(contours_img, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)

    if params.debugging:
        show_contours(contours, frame, 'all contours')
import numpy as np
import cv2
import json
import glob
from util.utils import Obj

params = Obj(chessboard_size=(9, 6), winSize=(11, 11))


def calibrate():
    # termination criteria
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,5,0)
    objp = np.zeros((params.chessboard_size[1] * params.chessboard_size[0], 3),
                    np.float32)
    objp[:, :2] = np.mgrid[0:params.chessboard_size[0],
                           0:params.chessboard_size[1]].T.reshape(-1, 2)

    # Arrays to store object points and image points from all the images.
    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.

    images = glob.glob('chessboard_images/*.jpg')
    found = 1
    images.sort(key=lambda image: int(image[-5:-4])
                if image[-6] == '/' else int(image[-6:-4]))
    for fname in images:
        img = cv2.imread(fname)  # Capture frame-by-frame
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
class PitchTrainig():
    def __init__(self):
        self.camera = cvwindows.create('camera')

        self.reader = Reader()
        self.params = Obj()

        # setting up reader, detect_homes and capture_balls params
        self.__setUp_Reader__()
        detect_homes.setUp({"debugging": args.debugging})
        transform.setUp({"debugging": args.debugging})
        capture_balls.setUp({"debugging": args.debugging})

        self.useKmeans = False
        self.kmeans_k = 3

    def start(self):
        while cvwindows.event_loop():
            home = self.calibrateHome()

            PTM, user_homePlate_cnt = self.computeTransform(home)

            self.waitBalls(PTM)

            # self.__draw_result__()

    def calibrateHome(self):
        home_tracking = []
        while len(home_tracking) < 200:
            # reading a frame
            frame = self.reader.read()
            if frame is None:
                break

            # removing noise from image
            gray = filter_img(frame)

            # using kmeans on the image
            if self.useKmeans:
                gray = kmeans(frame, self.kmeans_k)
                if args.debugging:
                    cv2.imshow('kmeans', gray)
                    cv2.waitKey(0)

            # finding a list of homes
            contours = detect_homes.get_homes(gray)
            if contours is None or len(contours) == 0:
                print(self.reader.get_frameNumber())
                continue

            # keeping the best home
            home = self.__homeAVG__(contours)
            home_tracking.append(home)

            self.__drawHomes__(frame, contours)

            if len(contours) > 1:
                print("len = ", len(contours))
                print(self.reader.get_frameNumber())

        return HomePlate(self.__homeAVG__(home_tracking))

    def computeTransform(self, home):
        gray = filter_img(self.reader.actualFrame)
        PTM, user_homePlate_cnt = transform.homePlate_transform(gray, home)
        return PTM, user_homePlate_cnt

    def waitBalls(self, PTM):
        ball_tracking = []
        while True:
            # reading a frame
            frame = self.reader.read()
            if frame is None:
                break

            # removing noise from image
            gray = filter_img(frame)

            # using kmeans on the image
            if self.useKmeans:
                gray = kmeans(frame, self.kmeans_k)
                if args.debugging:
                    cv2.imshow('kmeans', gray)
                    cv2.waitKey(0)

            # transform the frame
            warped = cv2.warpPerspective(gray, PTM,
                                         transform.params.transform_resolution)

            # finding the ball
            balls = capture_balls.get_balls(warped)
            if len(balls) > 0:
                ball_tracking.append(balls)

    def __homeAVG__(self, homes):
        home = np.mean(homes, 0)
        return home

    def __drawHomes__(self, frame, contours):
        contours_img = frame.copy()
        cv2.drawContours(contours_img, contours.astype('int32'), -1,
                         (0, 0, 255), 2)
        self.camera.show(contours_img)

    def __draw_result__(self, homePlate_cnt, ball_tracking, ball_func):
        user_img = cv2.cvtColor(
            np.zeros(self.params.transform_resolution, 'float32'),
            cv2.COLOR_GRAY2BGR)
        cv2.drawContours(user_img, [homePlate_cnt.astype('int32')], -1,
                         (255, 255, 255), -1)

        for balls in ball_tracking:
            for center, radius in balls:
                cv2.circle(user_img, (int(center[0]), int(center[1])),
                           int(radius), (0, 255, 0), -1)

        return user_img

    def setUp(self, nparams):
        self.params.setattr(nparams)

    def __setUp_Reader__(self):
        folder_path = os.listdir("videos")
        folder_path.sort()
        path = 'videos/' + folder_path[args.test_folder] + '/'
        reader_params = {}
        reader_params["folder_path"] = path
        self.reader.setUp(reader_params)
Exemple #9
0
import numpy as np
import scipy
from util.utils import Obj, QuadraticLeastSquaresModel, Ball
import matplotlib.pyplot as plt

params = Obj(
    debugging=False,
    # a model that can be fitted to data points
    model=QuadraticLeastSquaresModel(),
    # the minimum number of data values required to fit the model
    n=3,
    # the maximum number of iterations allowed in the algorithm
    max_iters=100,
    # a threshold value for determining when a data point fits a model
    eps=1e3,
    # a percent of close data values required to assert that a model fits well to data
    closeData_percent=9. / 10)


def ransac(data):
    """fit model parameters to data using the RANSAC algorithm
    Params:
        data - a set of observed data points
    Return:
        bestdata - best fit data by the model(or nil if no good model is found)"""

    # num_closeData - the number of close data values required to assert that a model fits well to data
    num_closeData = data.shape[0] * params.closeData_percent - params.n
    # num_closeData = data.shape[0] - params.n
    iterations = 0
    bestfit = None