while (cap.isOpened()):

    t = time.time()
    l = []
    press_count = 0
    #grabbing a frame
    _, img = cap.read()

    #preprocessing
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, th1 = cv2.threshold(gray, thresh, 255, cv2.THRESH_TOZERO)
    cv2.imshow('threshold', th1)

    # contour detection and getting the contours with minArea and maxArea
    _, contours, hierarchy = cv2.findContours(th1.copy(), cv2.RETR_EXTERNAL, 2)
    cnts = ut.getContourBiggerThan(contours, minArea=3000, maxArea=40000)

    if len(cnts) == 1:
        mouse_enable = True
    else:
        mouse_enable = False
    #processing the contrours
    for cnt in cnts:
        x, y, w, h = cv2.boundingRect(cnt)

        #predicting the hand pose
        _, resp = ut.getGestureImg(cnt, img, th1, model)

        #calculating the centroid of the hand
        M = cv2.moments(cnt)
        cx = int(M['m10'] / M['m00'])
	if frame_count==80:
		color=(0,255,0)
	if frame_count==100:
		thresh=cv2.mean(gray[165:315,270:370])
		thresh=thresh[0]-15
		break


while(cap.isOpened()):
	t=time.time()
	_,img=cap.read()
	gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	ret,th1 =cv2.threshold(gray,thresh,255,cv2.THRESH_TOZERO)
	cv2.imshow('threshold',th1)
	_,contours,hierarchy = cv2.findContours(th1.copy(),cv2.RETR_EXTERNAL, 2)
	cnts=ut.getContourBiggerThan(contours,minArea=3000,maxArea=40000)
	line=[]
	for cnt in cnts:
		x,y,w,h = cv2.boundingRect(cnt)
		_,resp=ut.getGestureImg(cnt,img,th1,model)
		M = cv2.moments(cnt)
		cx = int(M['m10']/M['m00'])
		cy = int(M['m01']/M['m00'])
		line.append((cx,cy))
		cv2.circle(img,(cx,cy),5,[0,255,0],-1)
		cv2.putText(img,resp,(x,y), font,1,(255,255,255),2,cv2.LINE_AA)
	if len(line)==2:
		pt1=line[0]
		pt2=line[1]
		ang=int(ut.getSlope(pt1,pt2))
		cv2.putText(img,'Angle-> '+str(ang),(400,50), font,1,(255,255,255),2,cv2.LINE_AA)