Ejemplo n.º 1
0
def _trainPieceDetection(video, numFrames=50):
    '''
    This function will handle training the piece detection and collecting
    initial statistics on the board location.

    video -- the video file which will be used for training,
    numFrames -- the number of frames which will be grabbed from the
        video file for the purposes of training the piece detector

    returns:  cornerStats, w, frames
    cornerStats -- object of class stats which will keeps track of the 
        location of the chess board square corners.
    w -- the weight learned from training the detector of pieces.
    frames -- the frames which were drawn from the video for the purposes of 
        training.
    '''
    pieceLocationsF = []
    for col in [0, 1, 6, 7]:
        for row in xrange(8):
            pieceLocationsF.append( (row, col) )
    
    pieceLocations = [pieceLocationsF for i in xrange(numFrames)]

    cornerStats = stats( (81, 2) )

    frames = []
    for i in xrange(numFrames):
        s, f = video.read()
        if not s:
            print 'Error:  The video was too short'

        chessBoardPoints = find_board.find_board(f)
        cornerStats.append(chessBoardPoints)
        frames.append(f)

    #print chessBoardPoints
    #print
    #print cornerStats.mean()

    w = find_pieces.train_detector( frames, pieceLocations, cornerStats.mean() )

    return cornerStats, w, frames
Ejemplo n.º 2
0
def test():

	print "Training a classifier..."

	# structures to hold frames and labels
	frames = []
	piece_locations = []
	
	# video to train on
	vid = cv2.VideoCapture('./data/full_game_Ruy_Lopez.avi')	

	# capture a frame and align it
	Im = vid.read()[1]

	board_corners = find_board.find_board(Im)	
	frames.append(Im)
	
	# these are not the correct locations, but roll with it...
	white_pieces = [(i,j) for i in range(8) for j in range(2)]
	black_pieces = [(i,j) for i in range(8) for j in range(6,8)]

	"""
	# now selectively correct the pieces
	white_pieces.remove((6,0))	
	white_pieces.remove((5,1))	
	white_pieces.append((5,2))
	white_pieces.append((5,3))

	black_pieces.remove((1,7))
	black_pieces.remove((3,6))
	black_pieces.append((2,5))
	black_pieces.append((3,3))
	"""

	piece_locations.append(white_pieces+black_pieces)

	w = train_detector(frames, piece_locations, board_corners)

	# now test it
	
	# video to test on
	vid2 = cv2.VideoCapture('./data/full_game_Ruy_Lopez.avi')	
	
	for frame in range(2400,100000,200):
		print frame
		vid2.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frame)

		# capture a frame and align it
		Im2 = vid2.read()[1]

		Im2_warp = find_board.align_image(Im2, board_corners)[0]
		Im2_circ = Im2_warp.copy()

		pieces = find_pieces(Im2, board_corners, w=w)

		white_pieces = zip(*nonzero(pieces == 1))
		black_pieces = zip(*nonzero(pieces == -1))

		print pieces

		for piece in white_pieces:
			cv2.circle(Im2_circ, ((piece[1]+1)*50 + 15, 15 + (1 + piece[0])*50), 6, (0,0,200))
		for piece in black_pieces:
			cv2.circle(Im2_circ, ((piece[1]+1)*50 + 15, 15 + (1 + piece[0])*50), 6, (0,2000,0))

		cv2.namedWindow('test',0)
		cv2.imshow('test', Im2_circ)
		cv2.waitKey(0)
Ejemplo n.º 3
0
def _mainLoop(videoStream, histDuration = 15, 
              saveVideo=False, saveName='processed_mhi.avi',
              trainingSize=20, transcribeGame=True, transcriptionName='transcribed.pgn'):
    '''This function is unfortunately bloated, and will probably remain so
    even after the project is complete.  However, it handles the main logic of
    the porgram.  Namely, it takes an inputted video stream of a chess game,
    and performs the main logic of grabbing frames, computing obstructions,
    finding piece locations, updating the board, and recording any output
    video / notation.
    '''
    
    # Debug code
    DEBUG = False
    if DEBUG:
        print 'saveVideo', saveVideo
        print 'saveName', saveName
    
    # Train the piece finder and grab chess board location statistics
    numLocations = 2*81
    cornerStats, w, frames = _trainPieceDetection( videoStream )
    print 'Training Phase complete -- entering actual detection'

    # Prepare the video stream and motion history images.    
    s, prev_im = video.read()
    if not s:
        exit(1)
    prev_gray = cvtColor(prev_im, COLOR_RGB2GRAY)
    
    s, next_im = video.read()
    if not s:
        exit(1)

    if transcribeGame:
        outFile = open(transcriptionName, 'w')
    
    mhi = np.zeros((len(next_im), len(next_im[0])), np.float32)
    
    priorMotionFlag = False

    # Grab a starting location
    activeSet = chessBoard.ChessGame()
    #chessBoardPoints = find_board.find_board(next_im)
    chessBoardPoints = cornerStats.mean()
    pieces = find_pieces.find_pieces(next_im, chessBoardPoints, w)
    whiteLocation = 'B'
    if pieces[3][0] == 1:
        whiteLocation = 'L'
    elif pieces[0][3] == 1:
        whiteLocation = 'T'
    elif pieces[3][7] == 1:
        whiteLocation = 'R'
    
    print pieces, whiteLocation
    currentPosition = chessBoard.ChessGame()
    currentPosition.newGame()
    
    # prepare to save videos
    if saveVideo:
        FPS = video.get(cv.CV_CAP_PROP_FPS)
        fheight = video.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
        fwidth = video.get(cv.CV_CAP_PROP_FRAME_WIDTH)
        print FPS, fwidth, fheight
        videoOut = VideoWriter(filename=saveName, 
                               fourcc=cv.CV_FOURCC('P', 'I', 'M', '1'),
                               fps=FPS, frameSize=(int(fwidth), int(fheight)),
                               isColor=True )
    
    
    # Main Loop -- repeatedly determine whether
    loop = 0
    moveNumber = 1
    boardFound = False
    while s:
        next_gray = cvtColor(next_im, COLOR_RGB2GRAY)
        mask = absdiff( prev_gray, next_gray )
        ret, mask_t = threshold( mask, 50, 255, THRESH_BINARY )
        updateMotionHistory(mask_t, mhi, loop, histDuration)
        
        # flag which states if the board has been detected.
        detectionFlag = False 
        
        if loop % 50 == 0:
#            try:
            chessBoardPoints = find_board.find_board(next_im)
#            detectionFlag = True
#
#        if detectionFlag:
#            # check for outlier status, and remove outliers
#            stdCorners = np.maximum(cornerStats.std(), 
#                                    np.ones( cornerStats.dim ) )
#            lowerBound = cornerStats.mean() - 5*stdCorners 
#            upperBound = cornerStats.mean() + 5*stdCorners
#            if  ( np.count_nonzero( chessBoardPoints <= upperBound ) \
#                    >= numLocations - 1) and \
#                ( np.count_nonzero( chessBoardPoints >= lowerBound ) )\
#                    >= numLocations - 1:
#                includeInStatistics = True
#            else:
#                print 'OUTLIER DETECTED -- IGNORING'
#                includeInStatistics = False
#            
#            if includeInStatistics == True:
#                print 'Computing Statistics'
#                cornerStats.append( chessBoardPoints )
#            else:
#                print 'Successfully ignored'
#                
#        meanCorners = cornerStats.mean()
        # Currently, not using any statistics tricks
        meanCorners = chessBoardPoints
        tlc, trc, blc, brc = _findBoundaries( meanCorners )
        
        motionPresent, mhi_processed = \
            _determineMotionPresence(mhi, tlc, trc, blc, brc)
        
        if priorMotionFlag and (not motionPresent):
            # Place logic here for updating the position of the board
            newPosition = generatePieces(next_im, meanCorners, whiteLocation, w)
            possibleMoves = currentPosition.discoverMoves(newPosition)
            if len(possibleMoves) > 0:
                # Currently, we are not handling ambiguous moves.
                if currentPosition.whiteToMove:
                    if transcribeGame:
                        outFile.write('%d. ' % moveNumber)
                    moveNumber += 1
                if transcribeGame:
                    outFile.write('%s ' % (possibleMoves[0], ))
                    
                print 'Move performed: ', possibleMoves[0]
                currentPosition.performMove(possibleMoves[0])
            else:
                print 'whoops, no legal move found'
                print 'currentPosition (as internalized):'
                print currentPosition
                if currentPosition.plausibleNextPosition( newPosition ):
                    try:
                        moves = raw_input('Please type the missed moves in one line (enter for none):  ')
                    except (EOFError):
                        print 'EOFerror -- completing all video processing.'
                        break
                    print
                    moves = moves.split();
                    for mv in moves:
                        if currentPosition.whiteToMove:
                            if transcribeGame:
                                outFile.write('%d. ' % moveNumber)
                            moveNumber += 1
                        if transcribeGame:
                            outFile.write('%s ' % (mv, ))
                            outFile.write('{move missed by vision system} ')

                        currentPosition.performMove( mv )
                        print 'move(s) missed by vision system: ', mv
        
        if saveVideo:
            mhi_color = np.minimum(mhi_processed, 255)
            mhi_color = cvtColor(mhi_processed, COLOR_GRAY2RGB)
            next_im_rot, theta_c, tlc_aa_c, brc_aa_c \
                = _setBoundary(next_im, tlc, trc, blc, brc)
            output_frame = np.uint8(mhi_color)/2 + next_im_rot/2
            videoOut.write( np.uint8(output_frame) )

        if DEBUG:        
            # Debugging -- Display what should be in the output video.
            next_im_rot, theta_c, tlc_aa_c, brc_aa_c \
                = _setBoundary(next_im, tlc, trc, blc, brc)
            imshow('test1', mhi_processed)
            imshow('test2', next_im_rot)
            waitKey(1)
            
        # update variables for next loop iteration
        loop += 1
        priorMotionFlag = motionPresent
        prev_im = next_im
        prev_gray = next_gray
        s, next_im = video.read()
    if saveVideo:
        del videoOut

    if DEBUG:    
        # Debugging
        destroyAllWindows()
        
    outFile.close()