예제 #1
0
def generatePieces(im, chessBoardPoints, whitePos, w):
    '''
    im -- image containing the chess pieces / game.
    chessBoardPoints -- position of the squares on the board.    
    whitePos -- one of 'T', 'B', 'L', 'R' for top, bottom, left, 
    and right.  This will be detected based upon the location of the pieces at
    the beginning of the game.
    w -- the learned weights required to find the chess pieces
    return -- a ChessGame object which has the pieces identified by location 
        and color, but not type.
    '''
    pieces = find_pieces.find_pieces(im, chessBoardPoints, w)
    
    # print pieces
    
    game = chessBoard.ChessGame()
    
    for row in xrange(8):
        for col in xrange(8):
            if pieces[row][col] != 0:
                if pieces[row][col] == -1:
                    pcolor = chessBoard.PieceColor.Black
                else:
                    pcolor = chessBoard.PieceColor.White
                
                # Determine square of the piece
                if whitePos == 'B':
                    Rank = row
                    File = col
                elif whitePos == 'T':
                    Rank = 7-row
                    File = col
                elif whitePos == 'L':
                    Rank = col
                    File = 7-row
                elif whitePos == 'R':
                    Rank = 7-col
                    File = 7-row
                else:
                    assert(False, 'Invalid White Position (generatePieces)')

                sq = chr(ord('a') + File) + chr(ord('1') + Rank)
                
                # Set the piece as being detected.
                game.setSquare(sq, color=pcolor)
    
    print game
    return game
예제 #2
0
def main(args):
    if args.method == "test_waldo":
        try:
            image = norm(np_from_img(args.finished), 1)
        except FileNotFoundError:
            print("Invalid finished image path: " + args.finished)

        try:
            kernel = norm(np_from_img(args.scattered),2) - 1
        except FileNotFoundError:
            print("Invalid scattered image path: " + args.scattered)
        val,idx = find_waldo(image, kernel)
        print(val,idx)
        display_waldo(image,kernel,idx)
        return

    if args.method == "test_blob":
        try:
            scattered_image = norm(np_from_img(args.scattered), 255)
        except FileNotFoundError:
            print("Invalid finished image path: " + args.scattered)
        pieces = find_pieces(scattered_image)
        for p in range(0, len(pieces)):
            ## Grab the piece's image, invert, show ##
            piece = pieces[p].descriptor
            piece = 1 - np.uint8(piece)
            plt.imshow(piece)
            plt.show()

            ## Center point of the piece's image chunk ##
            shap = np.shape(piece)
            cent_x = shap[0]//2
            cent_y = shap[1]//2 

            ## Find the puzzle piece's contour ##
            contours, h = cv2.findContours(piece, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
            contour_cart = contours[0]
            len_contour = np.shape(contour_cart)
            len_contour = len_contour[0]
            contour_pol = np.zeros((len_contour, 2))

            ## Convert from cartesian to polar, also np.shit to np.nice formatting ##
            for i in range(len(contour_cart)):
                x = contour_cart[i][0][0] - cent_x
                y = contour_cart[i][0][1] - cent_y
                contour_pol[i][0] = math.sqrt((x**2 + y**2))
                contour_pol[i][1] = math.atan2(y, x)*360/(2*np.pi) + 180 # (-180, 180) -> (0, 360)

            ## Grab the average radial distance for the contour ##
            avg_dist = 0
            for i in range(0, len_contour-1):
                avg_dist += contour_pol[i][0]
            avg_dist /= len_contour
            min_dist = contour_pol.min(axis=0)
            min_dist = min_dist[0]
            print("Avg dist from center: {0}, min dist from center: {1}".format(avg_dist, min_dist))

            ## Find all points below avg, and above avg ##
            hole_points = []
            bump_points = []
            avg_min_radius = avg_dist - min_dist
            for i in range(0, len_contour-1):
                if contour_pol[i][0] < avg_dist:
                    hole_points.append(contour_pol[i])
                elif contour_pol[i][0] > avg_dist + avg_min_radius:
                    bump_points.append(contour_pol[i])

            ## Find start and end angles/points for each hole ##
            hole_bounds = []
            max_angle_separation = 10
            last_angle = hole_points[0][1]
            for i in range(0, len(hole_points)-1):
                angle_between = min_angle_between(last_angle, hole_points[i][1])#min(abs(last_angle - hole_points[i][1]), abs(hole_points[i][1] - last_angle))
                print("last_angle: {0}, this_angle: {1}, angle_between: {2}".format(last_angle, hole_points[i][1], angle_between))
                if angle_between > max_angle_separation:
                    hole_bounds.append(i)
                last_angle = hole_points[i][1]
            print(hole_bounds)


            hole_count = len(hole_bounds)//2
            hole = 0
            hole_angles = []
            hole_avg = 0
            while hole < hole_count:
                for i in range(hole_bounds[hole*2], hole_bounds[(hole*2)+1]):
                    hole_avg += hole_points[i][1]
                    hole_avg /= 2.0
                print("Found one hole at theta = {0}".format(hole_avg))
                hole += 1



            ## Show shit ##
            piece[cent_x][cent_y] = 0
            plt.imshow(piece)
            plt.show()
예제 #3
0
def _mainLoop(videoStream, histDuration = 15, 
              saveVideo=False, saveName='processed_mhi.avi',
              trainingSize=20, transcribeGame=True, transcriptionName='transcribed.pgn'):
    '''This function is unfortunately bloated, and will probably remain so
    even after the project is complete.  However, it handles the main logic of
    the porgram.  Namely, it takes an inputted video stream of a chess game,
    and performs the main logic of grabbing frames, computing obstructions,
    finding piece locations, updating the board, and recording any output
    video / notation.
    '''
    
    # Debug code
    DEBUG = False
    if DEBUG:
        print 'saveVideo', saveVideo
        print 'saveName', saveName
    
    # Train the piece finder and grab chess board location statistics
    numLocations = 2*81
    cornerStats, w, frames = _trainPieceDetection( videoStream )
    print 'Training Phase complete -- entering actual detection'

    # Prepare the video stream and motion history images.    
    s, prev_im = video.read()
    if not s:
        exit(1)
    prev_gray = cvtColor(prev_im, COLOR_RGB2GRAY)
    
    s, next_im = video.read()
    if not s:
        exit(1)

    if transcribeGame:
        outFile = open(transcriptionName, 'w')
    
    mhi = np.zeros((len(next_im), len(next_im[0])), np.float32)
    
    priorMotionFlag = False

    # Grab a starting location
    activeSet = chessBoard.ChessGame()
    #chessBoardPoints = find_board.find_board(next_im)
    chessBoardPoints = cornerStats.mean()
    pieces = find_pieces.find_pieces(next_im, chessBoardPoints, w)
    whiteLocation = 'B'
    if pieces[3][0] == 1:
        whiteLocation = 'L'
    elif pieces[0][3] == 1:
        whiteLocation = 'T'
    elif pieces[3][7] == 1:
        whiteLocation = 'R'
    
    print pieces, whiteLocation
    currentPosition = chessBoard.ChessGame()
    currentPosition.newGame()
    
    # prepare to save videos
    if saveVideo:
        FPS = video.get(cv.CV_CAP_PROP_FPS)
        fheight = video.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
        fwidth = video.get(cv.CV_CAP_PROP_FRAME_WIDTH)
        print FPS, fwidth, fheight
        videoOut = VideoWriter(filename=saveName, 
                               fourcc=cv.CV_FOURCC('P', 'I', 'M', '1'),
                               fps=FPS, frameSize=(int(fwidth), int(fheight)),
                               isColor=True )
    
    
    # Main Loop -- repeatedly determine whether
    loop = 0
    moveNumber = 1
    boardFound = False
    while s:
        next_gray = cvtColor(next_im, COLOR_RGB2GRAY)
        mask = absdiff( prev_gray, next_gray )
        ret, mask_t = threshold( mask, 50, 255, THRESH_BINARY )
        updateMotionHistory(mask_t, mhi, loop, histDuration)
        
        # flag which states if the board has been detected.
        detectionFlag = False 
        
        if loop % 50 == 0:
#            try:
            chessBoardPoints = find_board.find_board(next_im)
#            detectionFlag = True
#
#        if detectionFlag:
#            # check for outlier status, and remove outliers
#            stdCorners = np.maximum(cornerStats.std(), 
#                                    np.ones( cornerStats.dim ) )
#            lowerBound = cornerStats.mean() - 5*stdCorners 
#            upperBound = cornerStats.mean() + 5*stdCorners
#            if  ( np.count_nonzero( chessBoardPoints <= upperBound ) \
#                    >= numLocations - 1) and \
#                ( np.count_nonzero( chessBoardPoints >= lowerBound ) )\
#                    >= numLocations - 1:
#                includeInStatistics = True
#            else:
#                print 'OUTLIER DETECTED -- IGNORING'
#                includeInStatistics = False
#            
#            if includeInStatistics == True:
#                print 'Computing Statistics'
#                cornerStats.append( chessBoardPoints )
#            else:
#                print 'Successfully ignored'
#                
#        meanCorners = cornerStats.mean()
        # Currently, not using any statistics tricks
        meanCorners = chessBoardPoints
        tlc, trc, blc, brc = _findBoundaries( meanCorners )
        
        motionPresent, mhi_processed = \
            _determineMotionPresence(mhi, tlc, trc, blc, brc)
        
        if priorMotionFlag and (not motionPresent):
            # Place logic here for updating the position of the board
            newPosition = generatePieces(next_im, meanCorners, whiteLocation, w)
            possibleMoves = currentPosition.discoverMoves(newPosition)
            if len(possibleMoves) > 0:
                # Currently, we are not handling ambiguous moves.
                if currentPosition.whiteToMove:
                    if transcribeGame:
                        outFile.write('%d. ' % moveNumber)
                    moveNumber += 1
                if transcribeGame:
                    outFile.write('%s ' % (possibleMoves[0], ))
                    
                print 'Move performed: ', possibleMoves[0]
                currentPosition.performMove(possibleMoves[0])
            else:
                print 'whoops, no legal move found'
                print 'currentPosition (as internalized):'
                print currentPosition
                if currentPosition.plausibleNextPosition( newPosition ):
                    try:
                        moves = raw_input('Please type the missed moves in one line (enter for none):  ')
                    except (EOFError):
                        print 'EOFerror -- completing all video processing.'
                        break
                    print
                    moves = moves.split();
                    for mv in moves:
                        if currentPosition.whiteToMove:
                            if transcribeGame:
                                outFile.write('%d. ' % moveNumber)
                            moveNumber += 1
                        if transcribeGame:
                            outFile.write('%s ' % (mv, ))
                            outFile.write('{move missed by vision system} ')

                        currentPosition.performMove( mv )
                        print 'move(s) missed by vision system: ', mv
        
        if saveVideo:
            mhi_color = np.minimum(mhi_processed, 255)
            mhi_color = cvtColor(mhi_processed, COLOR_GRAY2RGB)
            next_im_rot, theta_c, tlc_aa_c, brc_aa_c \
                = _setBoundary(next_im, tlc, trc, blc, brc)
            output_frame = np.uint8(mhi_color)/2 + next_im_rot/2
            videoOut.write( np.uint8(output_frame) )

        if DEBUG:        
            # Debugging -- Display what should be in the output video.
            next_im_rot, theta_c, tlc_aa_c, brc_aa_c \
                = _setBoundary(next_im, tlc, trc, blc, brc)
            imshow('test1', mhi_processed)
            imshow('test2', next_im_rot)
            waitKey(1)
            
        # update variables for next loop iteration
        loop += 1
        priorMotionFlag = motionPresent
        prev_im = next_im
        prev_gray = next_gray
        s, next_im = video.read()
    if saveVideo:
        del videoOut

    if DEBUG:    
        # Debugging
        destroyAllWindows()
        
    outFile.close()