def main():
	
	#Define flags for face detection and background detection
	face_flag  = 0
	bg_flag    = 1
	start_flag = 0
	stop_flag  = 0
	
	#Set arbitrary limits
	wait_limit = 45 # number of frames before ending action
	area_limit = 0 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(500,50)
	bgLim = 10000
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	if cap.isOpened():
		print "Success\n"
	else:
		print "Unable to open file/webcam"
		return

	# Create some random colors
	color = np.random.randint(0,255,(100,3))
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		#print minSkin
		#print maxSkin


	#Initialize arrays, counters, masks, etc
	objectx_old = []
	objecty_old = []
	
	bg_cnt = 0
	ct1 = 0
	ct2 = 0
	start_cnt = 0
	stop_cnt  = 0
	
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	tim = np.zeros(20)
	
	#Loop through each frame
	while(cap.isOpened()):
		
		#If background subtraction is on, do that
		if bg_flag == 1:
			#if bg_cnt <= bgLim:
			fgmask = fgbg.apply(frame)
			#bg_cnt = bg_cnt+1;
				#print "Apply BG Sub"
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
			
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		#else:
		#	binary_frame = skin2BW(masked_frame)
		
		frame = cv2.line(frame,(40,200),(600,200),(0,255,0),2)
		#crop_img = frame[200:600, 40:600]
		
		binary_frame = skin2BW(frame)
		binary_frame[0:199][:] = 0
		
		cv2.imshow('BW Frame',binary_frame)
		#Find blobs in binary image
		__ , contours, contoursOut, defects = blob2(binary_frame,frame,area_limit)
		
		if not defects == []:
			#print "Defects Exist\n"
			#Check flags to see what text should be displayed...
			if start_flag == 0 and stop_flag == 0:
				#print "Looking for start"
				start_flag = startGest(frame,contoursOut,defects)
				if start_flag == 1 and start_cnt < 10:
					start_cnt = start_cnt+1
					start_flag = 0
				else:
					start_cnt = 0
				stop_flag = 0
			elif start_flag == 1 and ct1<10:
				cv2.putText(frame,"Start", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
				print "Start"
				ct1 = ct1+1
				ct2 = 0	
			elif start_flag == 1 and ct1>=10:
				#print "Looking for stop"
				stop_flag = stopGest(frame,contoursOut,defects)
				if stop_flag == 1 and stop_cnt < 10:
					stop_cnt = stop_cnt+1
					stop_flag = 0
				else:
					stop_cnt = 0
					
				(frame,objectx_old,objecty_old,mask,mask_old) = trackObj2(frame,contours,objectx_old,objecty_old,mask,mask_old,area_limit)
				
			if stop_flag == 1 and ct2 < 10:
				cv2.putText(frame,"Stop", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)			
				print "Stop"
				start_flag = 0
				#Increment frame counter
				ct1 = 0
				ct2 = ct2+1	
			elif stop_flag == 1 and ct2 >= 10:
				start_flag = 0
				stop_flag  = 0
				ct1 = 0
				ct2 = 0
				
				#Clear out all flags after being stopped for X amount of time
				start_flag = 0
				stop_flag  = 0
				ct1 = 0
				ct2 = 0
				
				print "Output mask created. \n"
				mask_output = mask_old
				#print mask_output
				objectx_old = []
				objecty_old = []
						
		else:
			print "Defects do not exist\n"
		#Show frame with magic applied
		cv2.imshow('frame',frame)
		
		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		
		ret, frame = cap.read()
		
	cap.release()
	cv2.destroyAllWindows()
Esempio n. 2
0
def main():
	
	#Define flags for face detection and background detection
	face_flag  = 0
	bg_flag    = 1
	gest_flag  = 1
	
	#Flags used for collection of data
	start_flag = 0
	stop_flag  = 0
	
	#Set arbitrary limits
	wait_limit = 45 # number of frames before ending action
	area_limit = 500 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(50000,100)
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	if cap.isOpened():
		print "Success\n"
	else:
		print "Unable to open file/webcam"
		return

	# Create some random colors
	color = np.random.randint(0,255,(100,3))
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		#print minSkin
		#print maxSkin


	#Initialize arrays, counters, masks, etc
	objectx_old = []
	objecty_old = []
	
	#Starting all of the counters
	ct = 0
	ct1 = 0
	ct2 = 0
	start_cnt = 0
	stop_cnt  = 0
	
	#Creating mask arrays
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	tim = np.zeros(20)
	
	#Loop through each frame
	while(cap.isOpened()):
		
		#If background subtraction is on, do that
		if bg_flag == 1:
			fgmask = fgbg.apply(frame)
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		#elif gest_flag == 1:
		#	frame = cv2.line(frame,(40,200),(600,200),(0,255,0),2)
		#	crop_img = frame[200:600, 40:600]
		#	binary_frame = skin2BW(crop_img)
		else:
			binary_frame = skin2BW(masked_frame)
		
		#Find blobs in binary image
		if gest_flag == 1:
			__ , contours, contoursOut, defects = blob2(binary_frame,frame,area_limit)
		else:
			frame, contours = blob(binary_frame,frame,area_limit)
		
		#Show binary image
		cv2.imshow('frame',binary_frame)
		
		#Check if a frame was found
		if not ret == False:
						
			if gest_flag == 1:
				if not defects == []:
					if start_flag == 0 and stop_flag == 0:
						print "Looking for start..."
						#cv2.waitKey(10)
						#os.system("pause")
						start_flag = startGest(frame,contoursOut,defects)
						(start_flag,start_cnt,ct2) = testGest(start_flag,start_cnt,ct2)
						stop_flag = 0
						img = frame
					elif start_flag == 1 and ct1<10:
						cv2.putText(frame,"Start",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
						print "Start"
						ct1 = ct1+1
						ct2 = 0
						stop_flag = 0
						#print "ct1"
						#print ct1
						#cv2.waitKey(27)
						(img,objectx_old,objecty_old,mask,mask_old) = trackObj2(frame,contours,objectx_old,objecty_old,mask,mask_old, area_limit)
						
						
						
					elif start_flag == 1 and ct1 >= 10:
						#Looking for stop...
						#print "Looking for stop ..."
						stop_flag = stopGest(frame,contoursOut,defects)
						(stop_flag,stop_cnt,ct1) = testGest(stop_flag,stop_cnt,ct1)
						#print stop_cnt
						if stop_flag == 1:
							start_flag = 0
							
						(img,objectx_old,objecty_old,mask,mask_old) = trackObj2(frame,contours,objectx_old,objecty_old,mask,mask_old,area_limit)
					
					
					elif stop_flag == 1 and ct2 < 1:
						#Stop Gesture collection
						cv2.putText(frame,"Stop",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
						print "Stop"
						#start_flag = 0
						ct1 = 0
						ct2 = ct2+1
										
						img = frame
					elif stop_flag == 1 and ct2 >= 10:
						#Clear out all flags after being stopped for X amount of time
						start_flag = 0
						stop_flag  = 0
						ct1 = 0
						ct2 = 0
						
						print "Output mask created. \n"
						mask_output = mask_old
						objectx_old = []
						objecty_old = []
						
						img = frame
				else: #If the defects aren't found, do nothing
					#print "Defects do not exist. \n"
					img = frame
			
			else: #If not using the gestures to start and stop the function...
				#Initialize variable which change with each frame
				(img,objectx_old,objecty_old,mask,mask_old,tim) = trackObj(frame,contours,objectx_old,objecty_old,mask,mask_old,area_limit,tim)

		#If the frame was not found o' so long ago...
		else:
			break
		
		#Increment frame counter
		ct = ct+1
		#Show frame with magic applied
		cv2.imshow('frame',img)
		
		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		
		ret, frame = cap.read()
		
	cap.release()
	cv2.destroyAllWindows()
def main():
	
	#Define flags for face detection and background detection
	face_flag = 0
	bg_flag = 1
	start_flag = 0
	stop_flag  = 0
	
	#Set arbitrary limits
	wait_limit = 500 # number of frames before ending action
	area_limit = 500 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(50000,100)
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		print minSkin
		print maxSkin
	
	#Initialize arrays, counters, masks, etc
	
	objectx_old = []
	objecty_old = []
	ct = 0
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	mask_out = np.copy(mask)
	tim = np.zeros(20)
	
	#Build filters
	n = 4
	#filters = build_filters(n)
	
	#Loop through gestures to store histograms
	ct = 0
	ct1 = 0
	ct2 = 0
	ct3 = 0
	start_cnt = 0
	stop_cnt  = 0
	gest_dir = 'C:\Users\jonat_000\Desktop\ECE5554\project\opencvimplemet\masks'
	path, dirs, files = os.walk(gest_dir).next()
	m = len(files)
	gest = np.zeros([n,m])
	fnome = os.listdir(gest_dir)
	
	while(cap.isOpened()):
		#If background subtraction is on, do that
		if bg_flag == 1:
			fgmask = fgbg.apply(frame)
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		else:
			binary_frame = skin2BW(masked_frame)
		
		cv2.imshow('BW Frame',binary_frame)
		#Find blobs in binary image
		frame, contours, contoursOut, defects = blob2(binary_frame,frame,area_limit)
		img = np.copy(frame)
		#Show binary image
		#cv2.imshow('frame',binary_frame)
		
		#Check if a frame was found
		if not defects == []:
			#print "Defects Exist\n"
			#Check flags to see what text should be displayed...
			if start_flag == 0 and stop_flag == 0:
				print "Looking for start"
				start_flag = startGest(frame,contoursOut,defects)
				if start_flag == 1 and start_cnt < 10:
					start_cnt = start_cnt+1
					start_flag = 0
				else:
					start_cnt = 0
				stop_flag = 0
			elif start_flag == 1 and ct1<10:
				cv2.putText(frame,"Start", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
				print "Start"
				ct1 = ct1+1
				ct2 = 0	
			elif start_flag == 1 and ct1>=10:
				#print "Looking for stop"
				stop_flag = stopGest(frame,contoursOut,defects)
				if stop_flag == 1 and stop_cnt < 45:
					stop_cnt = stop_cnt+1
					stop_flag = 0
				else:
					stop_cnt = 0
			
		if start_flag == 1:
			#Find centroids
			objectx, objecty = fcent(contours, area_limit)
			#Calculate distance to each previous object
			objectloc, objectval = objdist(objectx, objecty, objectx_old, objecty_old)
			img = np.copy(frame)
			
			#Check if any objects were found to match previous frame objects
			if not objectloc == []:
				
				#Ensure only one object in current frame is mapped to previous frame object
				objectval2 = prevmap(objectloc, objectval)
				mask, mask_check, tim = obj2mask(objectloc, objectval2, objectx, objecty, objectx_old, objecty_old, mask_old, wait_limit, tim)
				#frame2, mask_all = addmask2frame(mask, frame)
				mask_old = np.copy(mask)
				img = np.copy(frame2)
			
			objectx_old = np.copy(objectx)
			objecty_old = np.copy(objecty)

		if stop_flag == 1:
			start_flag = 0
			stop_flag  = 0
			ct1 = 0
			ct2 = 0
				
			#Clear out all flags after being stopped for X amount of time
			start_flag = 0
			stop_flag  = 0
			ct1 = 0
			ct2 = 0
				
			print "Output mask created. \n"
				
			sim, pat = coHi3(m,fnome,gest_dir,mask_all,ct3,0)
			cv2.imshow('Drawn mask',mask_all)
			cv2.imshow('Nearest pattern',pat)
				
			ct3 = ct3+1
			mask_output = mask_old
			objectx_old = []
			objecty_old = []
			
			for i in range(0, 19):
				mask[:,:,:,i] = 0
				mask_old = np.copy(mask)

		ct = ct+1
		#Show frame with magic applied
		
		frame2, mask_all = addmask2frame(mask, frame)
		
		cv2.imshow('frame',frame2)
		cv2.imshow('mask',mask_all)


		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		if cv2.waitKey(1) & 0xFF == ord('s'):
			start_flag = 1
			stop_flag = 0
		if cv2.waitKey(1) & 0xFF == ord('z'):
			start_flag = 0
			stop_flag = 1

		ret, frame = cap.read()
		

		
		
	cap.release()
	cv2.destroyAllWindows()
def main():

    #Define flags for face detection and background detection
    face_flag = 0
    bg_flag = 1
    start_flag = 0
    stop_flag = 0

    #Set arbitrary limits
    wait_limit = 500  # number of frames before ending action
    area_limit = 500  # pixels squared

    #About a delay with a threshold of 50 (intensity)
    fgbg = cv2.createBackgroundSubtractorMOG2(50000, 100)

    #Parse all of the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video", help="path to the (optional) video file")
    args = vars(ap.parse_args())

    # if no video, use webcam
    if not args.get("video", False):
        nome = 0
    else:
        nome = args["video"]

    # Capture first frame

    cap = cv2.VideoCapture(nome)
    ret, frame = cap.read()

    #Find face if flagged
    if face_flag == 1:
        minSkin, maxSkin = findMinMaxHand(frame)  #findMinMaxSkin(frame)
        print minSkin
        print maxSkin

    #Initialize arrays, counters, masks, etc

    objectx_old = []
    objecty_old = []
    ct = 0
    sz = np.shape(frame)
    mask = np.zeros(shape=(sz[0], sz[1], 3, 20), dtype=np.uint8)
    mask_old = np.copy(mask)
    mask_out = np.copy(mask)
    tim = np.zeros(20)

    #Build filters
    n = 4
    #filters = build_filters(n)

    #Loop through gestures to store histograms
    ct = 0
    ct1 = 0
    ct2 = 0
    ct3 = 0
    start_cnt = 0
    stop_cnt = 0
    gest_dir = 'C:\Users\jonat_000\Desktop\ECE5554\project\opencvimplemet\masks'
    path, dirs, files = os.walk(gest_dir).next()
    m = len(files)
    gest = np.zeros([n, m])
    fnome = os.listdir(gest_dir)

    while (cap.isOpened()):
        #If background subtraction is on, do that
        if bg_flag == 1:
            fgmask = fgbg.apply(frame)
            masked_frame = cv2.bitwise_and(frame, frame, mask=fgmask)
        else:
            masked_frame = frame

        #If face detection is on, use personalized skin hue for binary conversion
        if face_flag == 1:
            binary_frame = personalSkin2BW(masked_frame, minSkin, maxSkin)
        else:
            binary_frame = skin2BW(masked_frame)

        cv2.imshow('BW Frame', binary_frame)
        #Find blobs in binary image
        frame, contours, contoursOut, defects = blob2(binary_frame, frame,
                                                      area_limit)
        img = np.copy(frame)
        #Show binary image
        #cv2.imshow('frame',binary_frame)

        #Check if a frame was found
        if not defects == []:
            #print "Defects Exist\n"
            #Check flags to see what text should be displayed...
            if start_flag == 0 and stop_flag == 0:
                print "Looking for start"
                start_flag = startGest(frame, contoursOut, defects)
                if start_flag == 1 and start_cnt < 10:
                    start_cnt = start_cnt + 1
                    start_flag = 0
                else:
                    start_cnt = 0
                stop_flag = 0
            elif start_flag == 1 and ct1 < 10:
                cv2.putText(frame, "Start", (50, 50), cv2.FONT_HERSHEY_SIMPLEX,
                            2, 2)
                print "Start"
                ct1 = ct1 + 1
                ct2 = 0
            elif start_flag == 1 and ct1 >= 10:
                #print "Looking for stop"
                stop_flag = stopGest(frame, contoursOut, defects)
                if stop_flag == 1 and stop_cnt < 45:
                    stop_cnt = stop_cnt + 1
                    stop_flag = 0
                else:
                    stop_cnt = 0

        if start_flag == 1:
            #Find centroids
            objectx, objecty = fcent(contours, area_limit)
            #Calculate distance to each previous object
            objectloc, objectval = objdist(objectx, objecty, objectx_old,
                                           objecty_old)
            img = np.copy(frame)

            #Check if any objects were found to match previous frame objects
            if not objectloc == []:

                #Ensure only one object in current frame is mapped to previous frame object
                objectval2 = prevmap(objectloc, objectval)
                mask, mask_check, tim = obj2mask(objectloc, objectval2,
                                                 objectx, objecty, objectx_old,
                                                 objecty_old, mask_old,
                                                 wait_limit, tim)
                #frame2, mask_all = addmask2frame(mask, frame)
                mask_old = np.copy(mask)
                img = np.copy(frame2)

            objectx_old = np.copy(objectx)
            objecty_old = np.copy(objecty)

        if stop_flag == 1:
            start_flag = 0
            stop_flag = 0
            ct1 = 0
            ct2 = 0

            #Clear out all flags after being stopped for X amount of time
            start_flag = 0
            stop_flag = 0
            ct1 = 0
            ct2 = 0

            print "Output mask created. \n"

            sim, pat = coHi3(m, fnome, gest_dir, mask_all, ct3, 0)
            cv2.imshow('Drawn mask', mask_all)
            cv2.imshow('Nearest pattern', pat)

            ct3 = ct3 + 1
            mask_output = mask_old
            objectx_old = []
            objecty_old = []

            for i in range(0, 19):
                mask[:, :, :, i] = 0
                mask_old = np.copy(mask)

        ct = ct + 1
        #Show frame with magic applied

        frame2, mask_all = addmask2frame(mask, frame)

        cv2.imshow('frame', frame2)
        cv2.imshow('mask', mask_all)

        #Check for break code
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if cv2.waitKey(1) & 0xFF == ord('s'):
            start_flag = 1
            stop_flag = 0
        if cv2.waitKey(1) & 0xFF == ord('z'):
            start_flag = 0
            stop_flag = 1

        ret, frame = cap.read()

    cap.release()
    cv2.destroyAllWindows()