def main():
	
	#Define flags for face detection and background detection
	face_flag  = 0
	bg_flag    = 1
	start_flag = 0
	stop_flag  = 0
	
	#Set arbitrary limits
	wait_limit = 45 # number of frames before ending action
	area_limit = 0 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(500,50)
	bgLim = 10000
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	if cap.isOpened():
		print "Success\n"
	else:
		print "Unable to open file/webcam"
		return

	# Create some random colors
	color = np.random.randint(0,255,(100,3))
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		#print minSkin
		#print maxSkin


	#Initialize arrays, counters, masks, etc
	objectx_old = []
	objecty_old = []
	
	bg_cnt = 0
	ct1 = 0
	ct2 = 0
	start_cnt = 0
	stop_cnt  = 0
	
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	tim = np.zeros(20)
	
	#Loop through each frame
	while(cap.isOpened()):
		
		#If background subtraction is on, do that
		if bg_flag == 1:
			#if bg_cnt <= bgLim:
			fgmask = fgbg.apply(frame)
			#bg_cnt = bg_cnt+1;
				#print "Apply BG Sub"
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
			
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		#else:
		#	binary_frame = skin2BW(masked_frame)
		
		frame = cv2.line(frame,(40,200),(600,200),(0,255,0),2)
		#crop_img = frame[200:600, 40:600]
		
		binary_frame = skin2BW(frame)
		binary_frame[0:199][:] = 0
		
		cv2.imshow('BW Frame',binary_frame)
		#Find blobs in binary image
		__ , contours, contoursOut, defects = blob2(binary_frame,frame,area_limit)
		
		if not defects == []:
			#print "Defects Exist\n"
			#Check flags to see what text should be displayed...
			if start_flag == 0 and stop_flag == 0:
				#print "Looking for start"
				start_flag = startGest(frame,contoursOut,defects)
				if start_flag == 1 and start_cnt < 10:
					start_cnt = start_cnt+1
					start_flag = 0
				else:
					start_cnt = 0
				stop_flag = 0
			elif start_flag == 1 and ct1<10:
				cv2.putText(frame,"Start", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
				print "Start"
				ct1 = ct1+1
				ct2 = 0	
			elif start_flag == 1 and ct1>=10:
				#print "Looking for stop"
				stop_flag = stopGest(frame,contoursOut,defects)
				if stop_flag == 1 and stop_cnt < 10:
					stop_cnt = stop_cnt+1
					stop_flag = 0
				else:
					stop_cnt = 0
					
				(frame,objectx_old,objecty_old,mask,mask_old) = trackObj2(frame,contours,objectx_old,objecty_old,mask,mask_old,area_limit)
				
			if stop_flag == 1 and ct2 < 10:
				cv2.putText(frame,"Stop", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)			
				print "Stop"
				start_flag = 0
				#Increment frame counter
				ct1 = 0
				ct2 = ct2+1	
			elif stop_flag == 1 and ct2 >= 10:
				start_flag = 0
				stop_flag  = 0
				ct1 = 0
				ct2 = 0
				
				#Clear out all flags after being stopped for X amount of time
				start_flag = 0
				stop_flag  = 0
				ct1 = 0
				ct2 = 0
				
				print "Output mask created. \n"
				mask_output = mask_old
				#print mask_output
				objectx_old = []
				objecty_old = []
						
		else:
			print "Defects do not exist\n"
		#Show frame with magic applied
		cv2.imshow('frame',frame)
		
		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		
		ret, frame = cap.read()
		
	cap.release()
	cv2.destroyAllWindows()
def main():
	
	#Define flags for face detection and background detection
	face_flag = 0
	bg_flag = 1
	start_flag = 0
	stop_flag  = 0
	
	#Set arbitrary limits
	wait_limit = 500 # number of frames before ending action
	area_limit = 500 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(50000,100)
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		print minSkin
		print maxSkin
	
	#Initialize arrays, counters, masks, etc
	
	objectx_old = []
	objecty_old = []
	ct = 0
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	mask_out = np.copy(mask)
	tim = np.zeros(20)
	
	#Build filters
	n = 4
	#filters = build_filters(n)
	
	#Loop through gestures to store histograms
	ct = 0
	ct1 = 0
	ct2 = 0
	ct3 = 0
	start_cnt = 0
	stop_cnt  = 0
	gest_dir = 'C:\Users\jonat_000\Desktop\ECE5554\project\opencvimplemet\masks'
	path, dirs, files = os.walk(gest_dir).next()
	m = len(files)
	gest = np.zeros([n,m])
	fnome = os.listdir(gest_dir)
	
	while(cap.isOpened()):
		#If background subtraction is on, do that
		if bg_flag == 1:
			fgmask = fgbg.apply(frame)
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		else:
			binary_frame = skin2BW(masked_frame)
		
		cv2.imshow('BW Frame',binary_frame)
		#Find blobs in binary image
		frame, contours, contoursOut, defects = blob2(binary_frame,frame,area_limit)
		img = np.copy(frame)
		#Show binary image
		#cv2.imshow('frame',binary_frame)
		
		#Check if a frame was found
		if not defects == []:
			#print "Defects Exist\n"
			#Check flags to see what text should be displayed...
			if start_flag == 0 and stop_flag == 0:
				print "Looking for start"
				start_flag = startGest(frame,contoursOut,defects)
				if start_flag == 1 and start_cnt < 10:
					start_cnt = start_cnt+1
					start_flag = 0
				else:
					start_cnt = 0
				stop_flag = 0
			elif start_flag == 1 and ct1<10:
				cv2.putText(frame,"Start", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
				print "Start"
				ct1 = ct1+1
				ct2 = 0	
			elif start_flag == 1 and ct1>=10:
				#print "Looking for stop"
				stop_flag = stopGest(frame,contoursOut,defects)
				if stop_flag == 1 and stop_cnt < 45:
					stop_cnt = stop_cnt+1
					stop_flag = 0
				else:
					stop_cnt = 0
			
		if start_flag == 1:
			#Find centroids
			objectx, objecty = fcent(contours, area_limit)
			#Calculate distance to each previous object
			objectloc, objectval = objdist(objectx, objecty, objectx_old, objecty_old)
			img = np.copy(frame)
			
			#Check if any objects were found to match previous frame objects
			if not objectloc == []:
				
				#Ensure only one object in current frame is mapped to previous frame object
				objectval2 = prevmap(objectloc, objectval)
				mask, mask_check, tim = obj2mask(objectloc, objectval2, objectx, objecty, objectx_old, objecty_old, mask_old, wait_limit, tim)
				#frame2, mask_all = addmask2frame(mask, frame)
				mask_old = np.copy(mask)
				img = np.copy(frame2)
			
			objectx_old = np.copy(objectx)
			objecty_old = np.copy(objecty)

		if stop_flag == 1:
			start_flag = 0
			stop_flag  = 0
			ct1 = 0
			ct2 = 0
				
			#Clear out all flags after being stopped for X amount of time
			start_flag = 0
			stop_flag  = 0
			ct1 = 0
			ct2 = 0
				
			print "Output mask created. \n"
				
			sim, pat = coHi3(m,fnome,gest_dir,mask_all,ct3,0)
			cv2.imshow('Drawn mask',mask_all)
			cv2.imshow('Nearest pattern',pat)
				
			ct3 = ct3+1
			mask_output = mask_old
			objectx_old = []
			objecty_old = []
			
			for i in range(0, 19):
				mask[:,:,:,i] = 0
				mask_old = np.copy(mask)

		ct = ct+1
		#Show frame with magic applied
		
		frame2, mask_all = addmask2frame(mask, frame)
		
		cv2.imshow('frame',frame2)
		cv2.imshow('mask',mask_all)


		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		if cv2.waitKey(1) & 0xFF == ord('s'):
			start_flag = 1
			stop_flag = 0
		if cv2.waitKey(1) & 0xFF == ord('z'):
			start_flag = 0
			stop_flag = 1

		ret, frame = cap.read()
		

		
		
	cap.release()
	cv2.destroyAllWindows()
示例#3
0
def main():
	
	#Define flags for face detection and background detection
	face_flag  = 0
	bg_flag    = 1
	gest_flag  = 1
	
	#Flags used for collection of data
	start_flag = 0
	stop_flag  = 0
	
	#Set arbitrary limits
	wait_limit = 45 # number of frames before ending action
	area_limit = 500 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(50000,100)
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	if cap.isOpened():
		print "Success\n"
	else:
		print "Unable to open file/webcam"
		return

	# Create some random colors
	color = np.random.randint(0,255,(100,3))
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		#print minSkin
		#print maxSkin


	#Initialize arrays, counters, masks, etc
	objectx_old = []
	objecty_old = []
	
	#Starting all of the counters
	ct = 0
	ct1 = 0
	ct2 = 0
	start_cnt = 0
	stop_cnt  = 0
	
	#Creating mask arrays
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	tim = np.zeros(20)
	
	#Loop through each frame
	while(cap.isOpened()):
		
		#If background subtraction is on, do that
		if bg_flag == 1:
			fgmask = fgbg.apply(frame)
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		#elif gest_flag == 1:
		#	frame = cv2.line(frame,(40,200),(600,200),(0,255,0),2)
		#	crop_img = frame[200:600, 40:600]
		#	binary_frame = skin2BW(crop_img)
		else:
			binary_frame = skin2BW(masked_frame)
		
		#Find blobs in binary image
		if gest_flag == 1:
			__ , contours, contoursOut, defects = blob2(binary_frame,frame,area_limit)
		else:
			frame, contours = blob(binary_frame,frame,area_limit)
		
		#Show binary image
		cv2.imshow('frame',binary_frame)
		
		#Check if a frame was found
		if not ret == False:
						
			if gest_flag == 1:
				if not defects == []:
					if start_flag == 0 and stop_flag == 0:
						print "Looking for start..."
						#cv2.waitKey(10)
						#os.system("pause")
						start_flag = startGest(frame,contoursOut,defects)
						(start_flag,start_cnt,ct2) = testGest(start_flag,start_cnt,ct2)
						stop_flag = 0
						img = frame
					elif start_flag == 1 and ct1<10:
						cv2.putText(frame,"Start",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
						print "Start"
						ct1 = ct1+1
						ct2 = 0
						stop_flag = 0
						#print "ct1"
						#print ct1
						#cv2.waitKey(27)
						(img,objectx_old,objecty_old,mask,mask_old) = trackObj2(frame,contours,objectx_old,objecty_old,mask,mask_old, area_limit)
						
						
						
					elif start_flag == 1 and ct1 >= 10:
						#Looking for stop...
						#print "Looking for stop ..."
						stop_flag = stopGest(frame,contoursOut,defects)
						(stop_flag,stop_cnt,ct1) = testGest(stop_flag,stop_cnt,ct1)
						#print stop_cnt
						if stop_flag == 1:
							start_flag = 0
							
						(img,objectx_old,objecty_old,mask,mask_old) = trackObj2(frame,contours,objectx_old,objecty_old,mask,mask_old,area_limit)
					
					
					elif stop_flag == 1 and ct2 < 1:
						#Stop Gesture collection
						cv2.putText(frame,"Stop",(50,50),cv2.FONT_HERSHEY_SIMPLEX,2,2)
						print "Stop"
						#start_flag = 0
						ct1 = 0
						ct2 = ct2+1
										
						img = frame
					elif stop_flag == 1 and ct2 >= 10:
						#Clear out all flags after being stopped for X amount of time
						start_flag = 0
						stop_flag  = 0
						ct1 = 0
						ct2 = 0
						
						print "Output mask created. \n"
						mask_output = mask_old
						objectx_old = []
						objecty_old = []
						
						img = frame
				else: #If the defects aren't found, do nothing
					#print "Defects do not exist. \n"
					img = frame
			
			else: #If not using the gestures to start and stop the function...
				#Initialize variable which change with each frame
				(img,objectx_old,objecty_old,mask,mask_old,tim) = trackObj(frame,contours,objectx_old,objecty_old,mask,mask_old,area_limit,tim)

		#If the frame was not found o' so long ago...
		else:
			break
		
		#Increment frame counter
		ct = ct+1
		#Show frame with magic applied
		cv2.imshow('frame',img)
		
		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		
		ret, frame = cap.read()
		
	cap.release()
	cv2.destroyAllWindows()
def main():
	
	#Define flags for face detection and background detection
	face_flag = 0
	bg_flag = 1
	
	#Set arbitrary limits
	wait_limit = 45 # number of frames before ending action
	area_limit = 500 # pixels squared
	
	#About a delay with a threshold of 50 (intensity)
	fgbg = cv2.createBackgroundSubtractorMOG2(50000,100)
	
	#Parse all of the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help = "path to the (optional) video file")
	args = vars(ap.parse_args())
	
	# if no video, use webcam
	if not args.get("video", False):
		nome = 0
	else:
		nome = args["video"]
	
	# Capture first frame
	
	cap = cv2.VideoCapture(nome)
	ret, frame = cap.read()
	if cap.isOpened():
		print "Success"
	else:
		print "Unable to open file/webcam"
		return

	# Create some random colors
	color = np.random.randint(0,255,(100,3))
	
	#Find face if flagged
	if face_flag == 1:
		minSkin,maxSkin = findMinMaxHand(frame)#findMinMaxSkin(frame)
		print minSkin
		print maxSkin
	
	#Initialize arrays, counters, masks, etc
	
	objectx_old = []
	objecty_old = []
	ct = 0
	sz = np.shape(frame)
	mask = np.zeros(shape=(sz[0],sz[1],3,20), dtype = np.uint8)
	mask_old = np.copy(mask)
	tim = np.zeros(20)
	
	#Loop through each frame
	
	while(cap.isOpened()):
		
		#If background subtraction is on, do that
		if bg_flag == 1:
			fgmask = fgbg.apply(frame)
			masked_frame = cv2.bitwise_and(frame,frame,mask = fgmask)
		else:
			masked_frame = frame

		#If face detection is on, use personalized skin hue for binary conversion
		if face_flag == 1:
			binary_frame = personalSkin2BW(masked_frame,minSkin,maxSkin)
		else:
			binary_frame = skin2BW(masked_frame)
		
		#Find blobs in binary image
		frame, contours = blob(binary_frame,frame,area_limit)
		
		#Show binary image
		cv2.imshow('frame',binary_frame)
		
		#Check if a frame was found
		if not ret == False:
			
			#Initialize variable which change with each frame
			objectx = []
			objecty = []
			objectloc = []
			objectval = []
			x = []
			y = []
			img = np.copy(frame)
			
			#Find contours around blobs
			for i, c in enumerate(contours):
				area = cv2.contourArea(c)

				if area > area_limit:
			
					cent = cv2.moments(c)
					temp = cent['m00']
					if not temp == 0:
						cx = int(cent['m10']/cent['m00'])
						cy = int(cent['m01']/cent['m00'])
						objectx.append(cx)
						objecty.append(cy)
			
			#Check if any objects were found
			if not objectx_old == []:
				
				#Loop through each object in current frame and compute distance
				#to each object in previous frame
				for i, j in zip(objectx, objecty):
	
					x_dist = np.array(objectx_old)
					x_dist = x_dist - i
					y_dist = np.array(objecty_old)
					y_dist = y_dist - j
					
					y_dist = np.square(y_dist)
					x_dist = np.square(x_dist)
					
					dist = np.sqrt(x_dist+y_dist)
					
					if len(dist) > 0:
						minloc = np.argmin(dist)
						minval = dist[minloc]
						objectloc.append(minloc)
						objectval.append(minval)
						x.append(i)
						y.append(j)
						
			#Check if any objects were found to match previous frame objects
			if not objectloc == []:
				
				#Initialize parameters for loop over individual objects
				mx = np.amax(objectloc)
				mn = np.amin(objectloc)
				
				objs = unique(objectloc)
				vals = np.zeros_like(objs)+999
				locs = np.zeros_like(objs)-1
				co = len(objectloc)
				co2 = len(objs)

				#Ensure at most only 1 current object mapped to previous object
				for i in range(0, co2):
					for j in range(0, co):
						if objs[i] == objectloc[j]:
							if objectval[j] < vals[i]:
								vals[i] = objectval[j]
								locs[i] = j

				objectval2 = np.zeros_like(objectval)-1
				
				for i in range(0, co2):
					if locs[i] > -1:
						if locs[i] < co2:
							objectval2[locs[i]] = objectval[locs[i]]
				
				#Initialize mask parameters
				img = cv2.add(frame,mask[:,:,:,0])
				mask_check = np.zeros(20)
				frame2 = np.copy(frame)
				
				#Loop through each matched object and add to corresponding mask
				for i in range(0, co):
					if objectval2[i] > -1:
						hihi = 1
											
						a = np.copy(objectx_old[objectloc[i]])
						b = np.copy(objecty_old[objectloc[i]])
						c = np.copy(objectx[i])
						d = np.copy(objecty[i])
						
						dist = abs(((a-c)^2+(b-d)^2)^(1/2))
						mask_check[i] = 1
						
						if dist < 5:
							tim[i] = tim[i]+1
							if tim[i] > wait_limit:
								mask[:,:,:,i] = 0
								tim[i] = 0
								mask_check[i] = 0
						else:
							tim[i] = 0
						temp1 = np.copy(mask_old[:,:,:,objectloc[i]])
						if np.sum(mask[:,:,:,i]) > 0:
						#Connect the dots
							mask[:,:,:,i] = cv2.line(temp1, (a,b),(c,d), color[i].tolist(), 2)
						else:
						#Start point
							mask[:,:,:,i] = cv2.circle(temp1,(a,b),5,color[i].tolist(),-1)
						#Current point/End pointt
						frame2 = cv2.circle(frame2,(a,b),5,color[i].tolist(),-1)
						objectx_old[objectloc[i]] = np.copy(objectx[i])
						objecty_old[objectloc[i]] = np.copy(objecty[i])
				
				#Check if a mask disappeared
				for i in range(0, 19):
					if mask_check[i] == 0:
					#to add: if summing to nonzero, send as an output mask
						mask[:,:,:,i] = 0
					else:
						frame2 = cv2.add(frame2,mask[:,:,:,i])
				
				mask_old = np.copy(mask)
				img = np.copy(frame2)
			else:
				for i in range(0, 19):
					mask[:,:,:,i] = 0
				mask_old = np.copy(mask)

			objectx_old = np.copy(objectx)
			objecty_old = np.copy(objecty)

		else:
			break
		
		#Increment frame counter
		ct = ct+1
		#Show frame with magic applied
		cv2.imshow('frame',img)
		
		#Check for break code
		if cv2.waitKey(1) & 0xFF == ord('q'):
			break
		
		ret, frame = cap.read()
		
	cap.release()
	cv2.destroyAllWindows()
def main():

    #Define flags for face detection and background detection
    face_flag = 0
    bg_flag = 1
    start_flag = 0
    stop_flag = 0

    #Set arbitrary limits
    wait_limit = 500  # number of frames before ending action
    area_limit = 500  # pixels squared

    #About a delay with a threshold of 50 (intensity)
    fgbg = cv2.createBackgroundSubtractorMOG2(50000, 100)

    #Parse all of the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--video", help="path to the (optional) video file")
    args = vars(ap.parse_args())

    # if no video, use webcam
    if not args.get("video", False):
        nome = 0
    else:
        nome = args["video"]

    # Capture first frame

    cap = cv2.VideoCapture(nome)
    ret, frame = cap.read()

    #Find face if flagged
    if face_flag == 1:
        minSkin, maxSkin = findMinMaxHand(frame)  #findMinMaxSkin(frame)
        print minSkin
        print maxSkin

    #Initialize arrays, counters, masks, etc

    objectx_old = []
    objecty_old = []
    ct = 0
    sz = np.shape(frame)
    mask = np.zeros(shape=(sz[0], sz[1], 3, 20), dtype=np.uint8)
    mask_old = np.copy(mask)
    mask_out = np.copy(mask)
    tim = np.zeros(20)

    #Build filters
    n = 4
    #filters = build_filters(n)

    #Loop through gestures to store histograms
    ct = 0
    ct1 = 0
    ct2 = 0
    ct3 = 0
    start_cnt = 0
    stop_cnt = 0
    gest_dir = 'C:\Users\jonat_000\Desktop\ECE5554\project\opencvimplemet\masks'
    path, dirs, files = os.walk(gest_dir).next()
    m = len(files)
    gest = np.zeros([n, m])
    fnome = os.listdir(gest_dir)

    while (cap.isOpened()):
        #If background subtraction is on, do that
        if bg_flag == 1:
            fgmask = fgbg.apply(frame)
            masked_frame = cv2.bitwise_and(frame, frame, mask=fgmask)
        else:
            masked_frame = frame

        #If face detection is on, use personalized skin hue for binary conversion
        if face_flag == 1:
            binary_frame = personalSkin2BW(masked_frame, minSkin, maxSkin)
        else:
            binary_frame = skin2BW(masked_frame)

        cv2.imshow('BW Frame', binary_frame)
        #Find blobs in binary image
        frame, contours, contoursOut, defects = blob2(binary_frame, frame,
                                                      area_limit)
        img = np.copy(frame)
        #Show binary image
        #cv2.imshow('frame',binary_frame)

        #Check if a frame was found
        if not defects == []:
            #print "Defects Exist\n"
            #Check flags to see what text should be displayed...
            if start_flag == 0 and stop_flag == 0:
                print "Looking for start"
                start_flag = startGest(frame, contoursOut, defects)
                if start_flag == 1 and start_cnt < 10:
                    start_cnt = start_cnt + 1
                    start_flag = 0
                else:
                    start_cnt = 0
                stop_flag = 0
            elif start_flag == 1 and ct1 < 10:
                cv2.putText(frame, "Start", (50, 50), cv2.FONT_HERSHEY_SIMPLEX,
                            2, 2)
                print "Start"
                ct1 = ct1 + 1
                ct2 = 0
            elif start_flag == 1 and ct1 >= 10:
                #print "Looking for stop"
                stop_flag = stopGest(frame, contoursOut, defects)
                if stop_flag == 1 and stop_cnt < 45:
                    stop_cnt = stop_cnt + 1
                    stop_flag = 0
                else:
                    stop_cnt = 0

        if start_flag == 1:
            #Find centroids
            objectx, objecty = fcent(contours, area_limit)
            #Calculate distance to each previous object
            objectloc, objectval = objdist(objectx, objecty, objectx_old,
                                           objecty_old)
            img = np.copy(frame)

            #Check if any objects were found to match previous frame objects
            if not objectloc == []:

                #Ensure only one object in current frame is mapped to previous frame object
                objectval2 = prevmap(objectloc, objectval)
                mask, mask_check, tim = obj2mask(objectloc, objectval2,
                                                 objectx, objecty, objectx_old,
                                                 objecty_old, mask_old,
                                                 wait_limit, tim)
                #frame2, mask_all = addmask2frame(mask, frame)
                mask_old = np.copy(mask)
                img = np.copy(frame2)

            objectx_old = np.copy(objectx)
            objecty_old = np.copy(objecty)

        if stop_flag == 1:
            start_flag = 0
            stop_flag = 0
            ct1 = 0
            ct2 = 0

            #Clear out all flags after being stopped for X amount of time
            start_flag = 0
            stop_flag = 0
            ct1 = 0
            ct2 = 0

            print "Output mask created. \n"

            sim, pat = coHi3(m, fnome, gest_dir, mask_all, ct3, 0)
            cv2.imshow('Drawn mask', mask_all)
            cv2.imshow('Nearest pattern', pat)

            ct3 = ct3 + 1
            mask_output = mask_old
            objectx_old = []
            objecty_old = []

            for i in range(0, 19):
                mask[:, :, :, i] = 0
                mask_old = np.copy(mask)

        ct = ct + 1
        #Show frame with magic applied

        frame2, mask_all = addmask2frame(mask, frame)

        cv2.imshow('frame', frame2)
        cv2.imshow('mask', mask_all)

        #Check for break code
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if cv2.waitKey(1) & 0xFF == ord('s'):
            start_flag = 1
            stop_flag = 0
        if cv2.waitKey(1) & 0xFF == ord('z'):
            start_flag = 0
            stop_flag = 1

        ret, frame = cap.read()

    cap.release()
    cv2.destroyAllWindows()