Exemplo n.º 1
0
    def __init__(self):

        super(Vision, self).__init__()

        self.objectsMsgType = visionMsg

        robotConfigPathStr = rospy.get_param('/robot_config', None)

        if robotConfigPathStr is None:
            raise TypeError('Required robot config.')

        #	get model object
        #	positive threshold is 0.70
        self.predictor = HOG_SVM(FootballModelPath,
                                 GoalModelPath,
                                 0.80,
                                 rectangleThreshold=30)

        #	get color definition from color config ( get only values )
        colorConfigList = configobj.ConfigObj(
            robotConfigPathStr)["ColorDefinitions"]
        colorConfigList = colorConfigList.values()

        #	create color definition for using segmentation
        self.colorDefList = createColorDefFromDict(colorConfigList)
Exemplo n.º 2
0
	def __init__( self ):
		super( ImageProcessing, self ).__init__()

		#   define message
		self.objectsMsgType = visionMsg

		#	get color config file path from rosparam
		robotConfigPathStr = rospy.get_param( '/robot_config', None )

		if robotConfigPathStr is None:
			raise TypeError( 'Required robot config.' )
		
		#	get model object
		#	positive threshold is 0.70
		self.predictor = HOG_SVM( MODEL_PATH, 0.70 )

		#	get color definition from color config ( get only values )
		colorConfigList = configobj.ConfigObj( robotConfigPathStr )[ "ColorDefinitions" ]
		colorConfigList = colorConfigList.values()

		#	create color definition for using segmentation 
		self.colorDefList = createColorDefFromDict( colorConfigList )
		
		self.contourVis1 = None
		self.contourVis2 = None
Exemplo n.º 3
0
#	image processing zone
#
cv2.namedWindow('originalImage', cv2.WINDOW_NORMAL)
cv2.namedWindow('edge', cv2.WINDOW_NORMAL)

cv2.createTrackbar('Min', 'edge', 0, 200, nothing)
cv2.createTrackbar('Max', 'edge', 0, 200, nothing)

cap = cv2.VideoCapture(1)

#	get config
#colorDict = readConfig( 'color_2.ini' )
colorList = getColorListFromConfig(pathConfig)

#	create colordef msg
colorDef = createColorDefFromDict(colorList)

while True:

    #img = imageList[ indexIndicator ]
    ret, img = cap.read()

    #blur = cv2.bilateralFilter( img, 9, 75, 75 )
    blurGaussian = cv2.GaussianBlur(img, (5, 5), 0)

    #	get marker
    marker = colorSegmentation(blurGaussian, colorDef)

    #	get field boundary
    fieldBoundary, fieldMask = findBoundary(marker, 2, flip=False)
    fieldMask *= 255
Exemplo n.º 4
0
def main():

    #	define usage of programing
    programUsage = "python %prog arg [option] [framePathStr] [modelPathStr] " + str(
        VERSIONNUMBER) + ', Copyright (C) 2018 FIBO/KMUTT'

    #	initial parser instance
    parser = optparse.OptionParser(usage=programUsage,
                                   description=PROGRAM_DESCRIPTION)

    #	add option of main script
    parser.add_option("--colorConfig",
                      dest="colorConfig",
                      action='store',
                      type='string',
                      help="Specify color config.",
                      default='../../config/color_config/colordef_test.ini')

    #	add option
    (options, args) = parser.parse_args()

    #	check number of argument from NUM_REQUIRE_ARGUMENT
    if len(args) != NUM_REQUIRE_ARGUMENT:

        #	raise error from parser
        parser.error("require {} argument(s)".format(NUM_REQUIRE_ARGUMENT))

    #########################################################
    #
    #		get option and argument
    #

    framePathStr = args[0]
    modelPathStr = args[1]

    colorConfigPathStr = options.colorConfig

    #	get abs path
    frameAbsPathStr = getImageList(framePathStr)

    #	get config
    #colorDict = readConfig( 'color_2.ini' )
    colorList = getColorListFromConfig(colorConfigPathStr)

    #	create colordef msg
    colorDef = createColorDefFromDict(colorList)

    #	load image sequence
    frameList = map(cv2.imread, frameAbsPathStr)

    #	initial index frame
    idxFrameInt = 0

    #	initial finalPosition of football
    finalPosition = None

    #	ball confidence
    ballConfidence = 0.70

    #	get model
    model = loadModel(modelPathStr)

    #	initial hog/svm instance for classifier
    #	FYI : float is confidence
    predictor = HOG_SVM(modelPathStr, ballConfidence)

    while True:

        #	get image
        img = frameList[idxFrameInt]

        #	blur image
        blurImage = cv2.blur(img, (5, 5))

        #	get marker
        marker = colorSegmentation(blurImage, colorDef)

        #	get only white object from marker
        #	NOTE :
        #		Change id of white marker, follow chattarin colordef template
        #		ID White : 5, old : 8
        #		ID Green : 2, old : 1
        whiteImageObject = np.zeros(marker.shape)
        whiteImageObject[marker == 5] = 1

        #	get field boundary
        fieldBoundary, fieldMask = findBoundary(marker, 2, flip=False)

        #	get mask only ball
        whiteObjectMask = fieldMask * whiteImageObject
        whiteObjectMask *= 255

        #	change back to uint8 (opencv support only uint8)
        whiteObjectMask = whiteObjectMask.astype(np.uint8)

        #	get contours from white object
        whiteContours = cv2.findContours(whiteObjectMask, cv2.RETR_TREE,
                                         cv2.CHAIN_APPROX_SIMPLE)[1]

        #	copy image for visualize
        visualizeImage = img.copy()

        #	extract feature all white feature
        extractStatus = predictor.extractFeature(img,
                                                 whiteContours,
                                                 objectPointLocation='bottom')

        if extractStatus == True:

            #	predict all white feature, which the ball is ?
            predictor.predict()

            # #	get bounding box list from cv2.boundingRect
            # boundingBoxList = map( cv2.boundingRect, whiteContours )

            # #	filter
            # filterFunction = lambda boundingBoxTuple : boundingBoxTuple[ 2 ] >= 10 and boundingBoxTuple[ 3 ] >= 10
            # boundingBoxFilterdList = filter( filterFunction, boundingBoxList )

            # #	filter again check rectangle
            # filterNonRectFunction = lambda boundingBoxTuple : abs( boundingBoxTuple[ 2 ] - boundingBoxTuple[ 3 ] ) <= 20
            # boundingBoxRectList = filter( filterNonRectFunction, boundingBoxFilterdList )

            # print len( boundingBoxRectList )

            # for boundingBox in boundingBoxRectList:

            # 	x1Actual, y1Actual, x2Actual, y2Actual = expandAreaBoundingBox( 10, boundingBox, img.shape[ 1 ], img.shape[ 0 ] )

            # 	# cv2.rectangle( visualizeImage, ( x, y ),
            # 	# 							   ( x + width, y + height ), ( 255, 0, 0 ), 2 )

            # 	cv2.rectangle( visualizeImage, ( x1Actual, y1Actual ),
            # 								   ( x2Actual, y2Actual ), ( 0, 255, 255 ), 2 )

            # 	#	get roi
            # 	roiCandidateImage = img[ y1Actual : y2Actual, x1Actual : x2Actual ].copy()

            # 	#	resize
            # 	roiCandidateResizedImage = cv2.resize( roiCandidateImage, ( 40, 40 ) )

            # 	#	extract feature
            # 	featureVector = extractFeatureHog( roiCandidateResizedImage ).T

            # 	#	predict
            # 	predictClass = model.predict( featureVector )

            # 	if predictClass[ 0 ] == 1:
            # 		cv2.circle( visualizeImage, ( boundingBox[ 0 ] + boundingBox[ 3 ] / 2, boundingBox[ 1 ] + boundingBox[ 3 ] / 2 ),
            # 								      boundingBox[ 3 ] , ( 0, 255, 0 ), 2 )

            #
            #	visualization zone
            #

            for boundingBoxObject in predictor.boundingBoxListObject.boundingBoxList:

                # cv2.rectangle( visualizeImage, boundingBoxObject.topLeftPositionTuple,
                # 			   boundingBoxObject.bottomRightPositionTuple, ( 255, 0, 0 ), 2 )

                if boundingBoxObject.footballProbabilityScore > ballConfidence:

                    cv2.rectangle(visualizeImage,
                                  boundingBoxObject.topLeftPositionTuple,
                                  boundingBoxObject.bottomRightPositionTuple,
                                  (255, 0, 0), 2)

                    cv2.circle(visualizeImage,
                               boundingBoxObject.object2DPosTuple, 5,
                               (0, 255, 0), -1)

                else:

                    cv2.rectangle(visualizeImage,
                                  boundingBoxObject.topLeftPositionTuple,
                                  boundingBoxObject.bottomRightPositionTuple,
                                  (0, 0, 255), 2)

                    cv2.circle(visualizeImage,
                               boundingBoxObject.object2DPosTuple, 5,
                               (0, 255, 0), -1)

                cv2.putText(
                    visualizeImage, "{0:.4f}".format(
                        boundingBoxObject.footballProbabilityScore),
                    boundingBoxObject.topLeftPositionTuple,
                    cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)

            #	get best region
            finalPosition = predictor.getBestRegion()

        if finalPosition is not None and len(finalPosition) != 0:
            cv2.circle(visualizeImage, tuple(finalPosition), 10, (255, 0, 0),
                       -1)

        #print idxFrameInt
        predictor.boundingBoxListObject.clearBoundingBoxList()
        #	draw contours
        cv2.drawContours(visualizeImage, [fieldBoundary], 0, (128, 0, 255), 1)

        #	show image
        cv2.imshow("show", visualizeImage)
        cv2.imshow("ball mask", whiteObjectMask)

        #	reset final position
        finalPosition = None

        #	waitkey and break out of the loop
        k = cv2.waitKey(50)
        if k == ord('q'):
            break
        elif k == ord('d'):
            idxFrameInt += 1
        elif k == ord('a'):
            idxFrameInt -= 1

        # #	increment index
        # idxFrameInt += 1

        # #	reset frame index
        if idxFrameInt >= len(frameList):
            idxFrameInt = len(frameList) - 1
        elif idxFrameInt <= 0:
            idxFrameInt = 0

    cv2.destroyAllWindows()
from scanLine2 import findLinearEqOfFieldBoundary

#	GLOBAL, check carefully
OutlierThreshold = 100
ConfigFilePath = '/home/neverholiday/work/ros_ws/src/hanuman_user/config/all_config/robot_config_1.ini'

#	open camera, external camera only
cap = cv2.VideoCapture( 1 )

#	get color config
colorConfigList = configobj.ConfigObj( ConfigFilePath )[ 'ColorDefinitions' ]   
colorConfigList = colorConfigList.values()

#	create color definition msg
colorDef = createColorDefFromDict( colorConfigList ) 

flag = True

while True:
	
	if flag:	
		for i in range( 10 ):
			startTime = time.time()
			#	read
			ret, img = cap.read()

			print "Used time {}".format( time.time() - startTime )
			
		flag = False
	
Exemplo n.º 6
0
							'S_min'		:	0,
							'V_min'		:	50,
							'RenderColor_RGB': '( 255, 255, 255 )',
							'MinArea_pixels' : 4 },
]

def renderColor( colorMap, colorDefList ):
	img = np.zeros( (colorMap.shape[0], colorMap.shape[1], 3), 
					dtype = np.uint8 )

	for colorDict in colorDefList:
		img[ colorMap[:,:] == colorDict.ID ] = eval( colorDict.RenderColor_RGB )

	return img

colorDefList = createColorDefFromDict( DEFAULT_COLORCONFIG )

cap = cv2.VideoCapture( 'field3.avi' )

stop = 0
ret, frame = cap.read( )

while True:

	if not stop:
		ret, frame = cap.read( )

	img = frame.copy()

	if not ret:
		print "Camera not found ..."