Example #1
0
    def run(self):
        cv2.namedWindow('ANALOG', flags=cv2.WINDOW_KEEPRATIO)
        cv2.setMouseCallback('ANALOG', self.on_mouse)
        while True:
            image = self.img.copy()
            text = 'Press "e" to exit and save, "r" to reset current data'
            cv2.putText(img=image, org=(100, 100), text=text,
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
                        color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
            h = 150
            for t in self.training_data:
                txt = "deg {0}, value: {1}".format(t['degree'], t['value'])

                cv2.putText(img=image, org=(100, h), text=txt,
                            fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,
                            color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
                h += 30
            cv2.imshow('ANALOG', image)
            key = cv2.waitKey(33) & 0xFF
            if key == ord('e'):
                break
            elif key == ord('r'):
                self.training_data = []

        cv2.destroyWindow('ANALOG')
        return self.training_data
    def dessinerElementCarto(self):
        for ile in self.stationBase.getCarte().getIles():
            cv2.putText(
                self.imageVirtuelle,
                ile.getForme(),
                (ile.getX() - 25, ile.getY()),
                self.police,
                0.5,
                self.getColor(ile.getCouleur()),
                1,
                cv2.LINE_AA,
            )
        for tresor in self.stationBase.getCarte().getTresors():
            cv2.putText(
                self.imageVirtuelle,
                "Tresor",
                (tresor.getX() - 25, tresor.getY()),
                self.police,
                0.5,
                self.getColor("Jaune"),
                1,
                cv2.LINE_AA,
            )

        self.dessinerRobot()
Example #3
0
def setLabel(im, text, contour):
    if showNames:
        labelSize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
        bx, by, bw, bh = cv2.boundingRect(contour)
        x, y = (bx + (bw - labelSize[0][0])/2, (by + (bh + labelSize[0][1])/2)-10)
        cv2.rectangle(im, (x, y), (x + labelSize[0][0], y - labelSize[0][1]), (255, 255, 255), -1)
        cv2.putText(im, text, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1)
 def predict(self):
     while True:
         ret, frame = self.source.read()
         if type(frame) == type(None):
             break
         frame, landmarks = self.landmark_detector.get_frame_with_landmarks(frame)
         # feature_vector = self.feature_processor.process(landmarks, ["mouth", "right-eye", "left-eye", "left-eyebrow", "right-eyebrow"])
         feature_vector = self.feature_processor.process(landmarks)
         print(len(feature_vector)/2)
         cv2.imshow("Frame", frame)
         if feature_vector:
             feature_vector = np.asarray(feature_vector).reshape(1, -1)
             feature_vector_normalized = self.normalizer.normalize(feature_vector)
             print(np.asarray(feature_vector_normalized).shape[1]/2)
             prediction = self.classifier.predict(feature_vector_normalized)
             try:
                 predicted_class = self.configuration.classes[int(prediction[0])]
                 label = predicted_class
                 font = cv2.FONT_HERSHEY_SIMPLEX
                 cv2.putText(frame, label, (100, 400), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
             except IndexError:
                 print("This class has no label yet (class index: {})".format(prediction[0]))
         cv2.imshow("Frame", frame)
         key = cv2.waitKey(1) & 0xFF
         if key == ord("q"):
             break
     cv2.destroyAllWindows()
    def plot_place_cell_id_on_map(self,map_data,place_cell_id):    
        # Plot red box where vehicle is....    
        min_x=place_cell_id[1].min()
        min_y=place_cell_id[2].min()
        ptp_y=place_cell_id[2].ptp()
        
        map_out=np.copy(map_data); # FORCE COPY SO IT DOESNT KEEP OLD MOVES!!!!!
        map_out=self.flip_rotate_color_image(map_out,0,False)
        # Loop through each place id
        for  current_place in range(0,place_cell_id[0].size): 
#            sq=self.squares_grid[place_cell_id[1][current_place]-min_x,place_cell_id[2][current_place]-min_y,:]        
            # Flipping this in y-plane            
            sq=self.squares_grid[place_cell_id[1][current_place]-min_x,np.absolute(place_cell_id[2][current_place]-min_y-ptp_y),:]        
            # Place number at bottom of square in middle.... 
            x_pos=sq[0]#+np.round(np.diff([sq[2],sq[0]])/2)
            y_pos=self.pixel_width-sq[1]+np.round(np.diff([sq[3],sq[1]])/2)
            cv2.putText(map_out, str(int(place_cell_id[0][current_place])), (int(x_pos),int(y_pos)), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0,0,255),1);
      
        
       
        textsize=cv2.getTextSize('N',cv2.FONT_HERSHEY_SIMPLEX,0.8,2)
        #cv2.fillConvexPoly(map_out,np.abs(np.array([[self.pixel_width,0],[self.pixel_width,0],[self.pixel_width,0]])-self.arrow[('heading')]),(0,0,255))
        cv2.putText(map_out, 'N', (self.pixel_width-int((textsize[0][1]/2)+10),int(30+(textsize[1]/2))), cv2.FONT_HERSHEY_SIMPLEX, 0.8,(0,0,255),2); 
                 
        cv2.imshow(self.place_cell_map,map_out)
        return map_out
Example #6
0
    def show_results(self, image, results, imshow=True, deteted_boxes_file=None,
                     detected_image_file=None):
        """Show the detection boxes"""
        img_cp = image.copy()
        if deteted_boxes_file:
            f = open(deteted_boxes_file, "w")
        #  draw boxes
        for i in range(len(results)):
            x = int(results[i][1])
            y = int(results[i][2])
            w = int(results[i][3]) // 2
            h = int(results[i][4]) // 2
            if self.verbose:
                print("   class: %s, [x, y, w, h]=[%d, %d, %d, %d], confidence=%f" % (results[i][0],
                            x, y, w, h, results[i][-1]))

                cv2.rectangle(img_cp, (x - w, y - h), (x + w, y + h), (0, 255, 0), 2)
                cv2.rectangle(img_cp, (x - w, y - h - 20), (x + w, y - h), (125, 125, 125), -1)
                cv2.putText(img_cp, results[i][0] + ' : %.2f' % results[i][5], (x - w + 5, y - h - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
            if deteted_boxes_file:
                f.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' +
                        str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
        if imshow:
            cv2.imshow('YOLO_small detection', img_cp)
            cv2.waitKey(1)
        if detected_image_file:
            cv2.imwrite(detected_image_file, img_cp)
        if deteted_boxes_file:
            f.close()
Example #7
0
    def control(self, error):
        sa = serialArduino.serialArduino()
        shape = "checkpoint"
        if (self.checkPoint() == 'm'):
            if error > 30:
                shape = "left"
                
                sa.sendChar('z')

            elif error < -30:
                shape = "right"
                
                sa.sendChar('c')
            else:
                shape = " straight"
                
                sa.sendChar('w')
        elif(self.checkPoint() == 'c' ):
            sa.sendChar('f')
            
            shape = "checkpoint"
        elif(self.checkPoint() == 'r' ):
            sa.sendChar('d')
            
            shape = "hard right"
        sa.readChar()
        print(shape)
        cv2.putText(self.image, shape, (self.cX, self.cY), cv2.FONT_HERSHEY_SIMPLEX,
                    2, (255, 255, 255), 2)
def drawObjects(frame, objects):
    global colors

    output = np.copy(frame)

    drawnBounds = []

    for i in range(len(objects)):
        item = objects[i]

        if not item.tracked:
            item.kalman_filter.predict()
            cv2.circle(output, item.kalman_filter.currentPrediction, 8, (0,0,255), 5)
            continue

        bounds = item.bounds

        if bounds.center in drawnBounds:
            continue

        cv2.rectangle(output, (bounds.x1, bounds.y1), (bounds.x2, bounds.y2), colors[i % len(colors)], 1)
        cv2.circle(output, bounds.center, 4, colors[i % len(colors)], 2)

        # KALMAN
        # print item.kalman_filter.currentPrediction
        cv2.circle(output, item.kalman_filter.currentPrediction, 8, (0,0,255), 5)

        cv2.putText(output, 'Object ' + str(i), (bounds.x1, bounds.y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colors[i % len(colors)], 1)

        drawnBounds.append(bounds.center)

    return output
Example #9
0
def classifyBlob(net_info, net, pos, im_main = None, boolShow = False):
    outputt = ''
    if net_info != []:
        posY = int(pos[0].start)
        posX = int(pos[1].start)
        output = net.sim([net_info])
        output = (output == np.max(output))[0]  
        #outputt = output.tolist().index(1)
        if output[0] == 1:
            outputt = 2
        elif output[1] == 1:
            outputt = 4
        elif output[2] == 1:
            outputt = 3
        elif output[3] == 1:
            outputt = 1
        elif output[4] == 1:
            outputt = 5
#        outputt = (np.array(output)>0.5)*1
        
        if boolShow:
            cv2.putText(im_main,
            'Tipo ' + str(outputt),
            (posX+15,posY),
            cv2.FONT_HERSHEY_DUPLEX,0.5,(100,50,255))
    
    return outputt, im_main
Example #10
0
def Target_Drawing_SRT(Target,pt3,img,Number):
	if len(Target) == 2:
		if Point_Distance(Target,pt3) <=50:
			Target=pt3
			# print "Target",Target
			font = cv2.FONT_HERSHEY_SIMPLEX
			cv2.putText(img,str(Number),(Target[0]-10,Target[1]+10), font, 1,(0,0,0),1,cv2.CV_AA)
Example #11
0
    def run(self):
        rate = rospy.Rate(10)
        done = False
        cv2.namedWindow("kinect_view")
        cv2.setMouseCallback("kinect_view", self.mouse_call)

        while (not rospy.is_shutdown() and not done):

            if self.image is None:
                continue

            image = np.copy(self.image)
            state = self.states[self.state].replace('_', ' ')
            cv2.putText(image, 'Click the {}'.format(self.target_object), (10, self.image.shape[1] - 100), self.font, 1, (255, 100, 80), 2)
            self.draw_corners(image)

            if self.is_done:
                cv2.polylines(image, np.int32([self.corners]), True, (0, 255, 0), 6)
                done = True
                print 'DONE'

            cv2.imshow("kinect_view", image)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
                rate.sleep()

            if done:
                cv2.destroyWindow("kinect_view")
Example #12
0
  def update(self, frame):
    # print "updating %d " % self.id
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    back_project = cv2.calcBackProject([hsv],[0], self.roi_hist,[0,180],1)
    
    if args.get("algorithm") == "c":
      ret, self.track_window = cv2.CamShift(back_project, self.track_window, self.term_crit)
      pts = cv2.boxPoints(ret)
      pts = np.int0(pts)
      self.center = center(pts)
      cv2.polylines(frame,[pts],True, 255,1)
      
    if not args.get("algorithm") or args.get("algorithm") == "m":
      ret, self.track_window = cv2.meanShift(back_project, self.track_window, self.term_crit)
      x,y,w,h = self.track_window
      self.center = center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])  
      cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)

    self.kalman.correct(self.center)
    prediction = self.kalman.predict()
    cv2.circle(frame, (int(prediction[0]), int(prediction[1])), 4, (255, 0, 0), -1)
    # fake shadow
    cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (11, (self.id + 1) * 25 + 1),
        font, 0.6,
        (0, 0, 0),
        1,
        cv2.LINE_AA)
    # actual info
    cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (10, (self.id + 1) * 25),
        font, 0.6,
        (0, 255, 0),
        1,
        cv2.LINE_AA)
    def draw_image(self, image, control_vect):
        # TODO: move this in the main script ?

        # Draw the motionless area
        color = (0, 0, 255)
        thickness = 1
        point1 = (self.motionless_area_range_x[0],
                  self.motionless_area_range_y[0])
        point2 = (self.motionless_area_range_x[1],
                  self.motionless_area_range_y[1])
        cv.rectangle(image, point1, point2, color, thickness)

        # Draw the control
        if control_vect is not None:
            ctrl_text = ""

            if control_vect[1] > 0:
                ctrl_text += "up "
            elif control_vect[1] < 0:
                ctrl_text += "down "

            if control_vect[0] > 0:
                ctrl_text += "right"
            elif control_vect[0] < 0:
                ctrl_text += "left"

            start_point = (15, 100)
            font = cv.FONT_HERSHEY_SIMPLEX
            font_scale = 0.75
            color = (0, 0, 255)
            thickness = 2
            line_type = cv.LINE_AA  # Anti-Aliased
            cv.putText(image, ctrl_text, start_point, font, font_scale, color,
                       thickness, line_type)
Example #14
0
def calcHog(myhog, framergb, frame, mask, nameFrame):
    roimaskbuf = []
    positionList = []
    probaList = []
    totalRect = 0
    widthROI = int(48*scale)
    heightROI = int(128*scale)
    pix_x_cell = (16, 16)
    cell_x_block = (1, 1)

    for i in range(0,frame.shape[0]-heightROI,step):
        for j in range(0,frame.shape[1]-widthROI,step):
             if (j+widthROI < frame.shape[1]) and (i+heightROI < frame.shape[0]):
                roi = mask[i:i+heightROI,j:j+widthROI]
                roimaskbuf.append(roi)
                if util.isThereMovement(roi,mask_threshold):
                    window = frame[i:i+heightROI,j:j+widthROI]
                    isPerson, proba = myhog.run(window,'Person',orient,pix_x_cell,cell_x_block)
                    totalRect += 1
                else:
                    continue

                if isPerson is True and proba[0][1] > matching_threshold:
                    positionList.append((j,i,j+widthROI,i+heightROI))
                    probaList.append(proba[0][1])
                    # if VERBOSE is True:
                    #     print 'person found: ' + str(proba)

    subjectLength = len(positionList)
    fuoriList = []

    for i in range(subjectLength - 1, -1, -1):
        for j in range(subjectLength - 1, -1, -1):
            if i != j:
                check, k = myhog.checkIntersections(positionList[i],positionList[j],probaList[i],probaList[j],minOverlap_x,minOverlap_y)
                if check is True:
                    if k == 0:
                        fuoriList.append(i)
                    else:
                        fuoriList.append(j)

    definitiveList = []
    defproba = []
    for i in range(0,len(positionList)):
        if i not in fuoriList:
            definitiveList.append(positionList[i])
            defproba.append(probaList[i])

    res = myhog.draw_detections(framergb, definitiveList, defproba, matching_threshold, 2)
    cv2.putText(framergb,str(res), (30,30), cv2.FONT_HERSHEY_DUPLEX, 0.8, (0,0,0))

    file = open(pathTest+os.path.basename(nameFrame)+'.gt','w')
    for f in definitiveList:
       file.write(str(f[0]) + ' ' + str(f[1]) + ' ' + str(f[2]) + ' ' + str(f[3]) + '\n')
    file.close()

    if REALTIME is False:
        framebuf.append(framergb)

    return totalRect, len(definitiveList)
	def draw_morphometric_line(self, text, coefficient, point, tilt=(0,0), thickness=2):
		#numpyfied
		try:
			A = point[::-1]
			cx, cy =  np.array((int(coefficient),0)), np.array((0,int(coefficient)))
			tilt = np.array(tilt)
			if point is self.topmost:
				if text is not 'ED':
					B = A + cy
					M = (A + B)/2.0 - (30,0 )
				else:
					A = (A[0], int(A[1] + self.PoL))
					B = (A[0], int(A[1] + self.ED))
					mx,my = midpoint(A,B); mx -= 30; M = (mx,my)
			elif point is self.bottommost:
				B = A - cy
				M = (A + B)/2.0 - (30,0 )

			elif point is self.leftmost:
				B = A + cx
				M = (A+B)/2.0 - (0,30)
			elif point is self.rightmost:	
				B = A - cx
				M = (A+B)/2.0 - (0,30)
			else:
				return
			A += tilt; B+=tilt; M+=tilt; M = M.astype(np.integer)
			c  = random.randint(0,255)
			cv2.line(self.drawn_img, tuple(A), tuple(B), (c,0,0), thickness)
			cv2.putText(self.drawn_img,text,tuple(M), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,(c,0,0),1)
		except NameError:
			return
def drawStuff(img, keypoints, objects):
  out = cv2.drawKeypoints(
      img, keypoints, color=(0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
  for i in xrange(len(keypoints)):
    x, y = keypoints[i].pt
    cv2.putText(out, objects[i], (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0,0,0))
  return out
Example #17
0
def detectCircles(image):
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (13, 13), 6)

    circles = cv2.HoughCircles(blur,cv2.HOUGH_GRADIENT,1,20,
                                        param1=50,param2=30,minRadius=9,maxRadius=0)


    margin = 5
    img = image.copy()
    points = None
    if not circles is None:
        points = []
        circles = np.uint16(np.around(circles))
        for circle in circles[0]:
            c = (circle[0], circle[1]) # x, y:
            r = circle[2]
            cv2.circle(img, c, r, (0, 200, 0), 4)

            pxs = img[c[1]-margin:c[1]+margin, c[0]-margin:c[0]+margin]
            v = np.amax(pxs)
            cv2.putText(img, str(v), c, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (200, 255, 0))
            points.append((c, v))

    cv2.imshow("circles", img)

    return points
Example #18
0
    def TemplateMatching(self, img, tmp):
        '''
            入力された画像とテンプレート画像でtemplate matchingを行う
        '''
        # edgeでやるとき
        # gimg = cv2.Canny(img, threshold1= 100, threshold2= 200,apertureSize = 3)         
        # tmp = cv2.Canny(tmp, threshold1= 100, threshold2= 200,apertureSize = 3) 
        
        # 普通にやるとき
        gimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        tmp = cv2.cvtColor(tmp,cv2.COLOR_BGR2GRAY)

        gimg2 = gimg
        
        

        rows = len(tmp[:,0]) 
        cols = len(tmp[0]) 
        
        methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
        'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']
        # methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED']
        # method毎で行う
        for i, meth in enumerate(methods):
            gimg = gimg2
            method = eval(meth)

            # Apply template Matching
            res = cv2.matchTemplate(gimg,tmp,method)

            # 最小値,最大値,その座標
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

            # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
            # if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
                # top_left = min_loc
            # else:
                # top_left = max_loc
            
            # draw color
            # if i == 3:
                # color = [0,0,0]
            # else:
                # color = [0,0,0]
                # color[i] = 255
            color = 255
            # rectangle result
            top_left = max_loc
            bottom_right = (top_left[0] + rows, top_left[1] + rows)
            


            cv2.rectangle(img,(top_left[0],top_left[1]), bottom_right, color, 2)
            cv2.putText(img,meth,(top_left[0],top_left[1]-5),cv2.FONT_HERSHEY_SIMPLEX,0.3,color)
            cv2.imshow(meth,res/np.amax(res))
            cv2.imshow('Srcimg',img)
            cv2.imshow('template',tmp)
            cv2.imshow('GrayImg',gimg)

            print max_val,min_val
def recognize_bill(descriptor, bill_name):
    global frame
    count = 0
    bill_center = (0, 0)
    for h, des in enumerate(descriptor):
        # des = np.array(des,np.float32).reshape((1,128))
        des = np.array(des, np.float32).reshape(1, len(des))
        # retval, results, neigh_resp, dists = knn.find_nearest(des,1)
        retval, results, neigh_resp, dists = knn.findNearest(des, 1)
        res, distance = int(results[0][0]), dists[0][0]

        x, y = kp[res].pt
        center = (int(x), int(y))

        if distance < 0.1:
            # draw matched keypoints in red color
            bill_center = center
            color = (0, 0, 255)
            count += 1
        else:
            # draw unmatched in blue color
            # print distance
            color = (255, 0, 0)

        # Draw matched key points on original image
        cv2.circle(frame, center, 2, color, -1)

    print float(count) / len(descriptor)

    # if 50% of the poins matches, write in the bill
    if float(count) / len(descriptor) >= 0.5:
        # cv2.putText(img,">>BIG BOX<<", (int(x),int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
        cv2.putText(frame, ">>" + bill_name + "<<", bill_center, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
Example #20
0
def add_text(img, text, pos, font=cv2.FONT_HERSHEY_SIMPLEX, scale=1,
             bgcolor=BLACK, fgcolor=WHITE, thickness=1):
  fgpos = tuple(pos)
  if bgcolor is not None:
    bgpos = (fgpos[0] + thickness, fgpos[1] + thickness)
    cv2.putText(img, text, bgpos, font, scale, bgcolor, thickness)
  cv2.putText(img, text, fgpos, font, scale, fgcolor, thickness)
Example #21
0
def add_bb_into_image(image, bb, color=(255,0,0), thickness=2, label=None):
    r = int(color[0])
    g = int(color[1])
    b = int(color[2])

    font = cv2.FONT_HERSHEY_SIMPLEX
    fontScale = 0.5
    fontThickness = 1

    x1,y1,x2,y2 = bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
    x1 = int(x1)
    y1 = int(y1)
    x2 = int(x2)
    y2 = int(y2)
    cv2.rectangle(image,(x1,y1),(x2,y2),(b,g,r), thickness)
    # Add label
    if label != None:
        # Get size of the text box
        (tw, th) = cv2.getTextSize(label, font, fontScale, fontThickness)[0]
        # Top-left coord of the textbox
        (xin_bb, yin_bb) = (x1+thickness, y1-th+int(12.5*fontScale))
        # Checking position of the text top-left (outside or inside the bb)
        if yin_bb - th <= 0: # if outside the image
            yin_bb = y1+th # put it inside the bb
        r_Xin = x1-int(thickness/2) 
        r_Yin = y1-th-int(thickness/2) 
        # Draw filled rectangle to put the text in it
        cv2.rectangle(image,(r_Xin,r_Yin-thickness), (r_Xin+tw+thickness*3,r_Yin+th+int(12.5*fontScale)), (b,g,r), -1)
        cv2.putText(image,label, (xin_bb, yin_bb), font, fontScale, (0,0,0), fontThickness, cv2.LINE_AA)
    return image   
def add_text_watermark(image, text):
	height,width = original.shape[:2]
	# add some text 5 pixels in from the bottom left
	# options available: cv2.FONT_HERSHEY_SCRIPT_COMPLEX, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
	# cv2.FONT_HERSHEY_COMPLEX_SMALL, cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_HERSHEY_COMPLEX,
	# cv2.FONT_HERSHEY_DUPLEX, cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN
	cv2.putText(image, text, (5, height-15), cv2.FONT_HERSHEY_SCRIPT_COMPLEX, fontScale=1.0, color=(0,0,0), thickness=1)
Example #23
0
    def draw_legend(self):
        font = cv2.FONT_HERSHEY_SIMPLEX
        fontscale = self.font_scale
        width = 0
        height = self.top_margin + self.bottom_margin
        row_height_max = self.swatch_min_height
        for (mode, colour) in self.tuples:
            if mode is None:
                mode = "Unknown"
            ((tw,th),tb) = cv2.getTextSize(mode, font, fontscale, 1)
            width = max(width, tw)
            row_height_max = max(row_height_max, th)
        row_count = len(self.tuples)
        height = self.top_margin + row_count*row_height_max + (row_count-1)*self.row_gap + self.bottom_margin
        swatch_height = max(self.swatch_min_height, row_height_max)
        swatch_width = max(self.swatch_min_width, swatch_height)
        width += self.left_margin + self.right_margin
        width += swatch_width + self.swatch_text_gap
        img = np.zeros((height,width,3),np.uint8)
        img[:] = (255,255,255)
        cv2.rectangle(img, (0, 0), (width-1, height-1),
                     self.border_colour, self.border_width)
        y = self.top_margin
        for (mode, colour) in self.tuples:
            if mode is None:
                mode = "Unknown"
            x = self.left_margin
            cv2.rectangle(img, (x, y), (x+swatch_width-1, y+swatch_height-1), colour, -1)
            x += swatch_width
            x += self.swatch_text_gap
            cv2.putText(img, mode, (x, y+row_height_max), font, fontscale, self.text_colour)
            y += row_height_max + self.row_gap

        return img
Example #24
0
 def process_image(self, inImg):
     frame = cv2.flip(inImg,1,0)
     grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)        
     cropped = cv2.resize(grayImg, (grayImg.shape[1] / self.size, grayImg.shape[0] / self.size))        
     faces = self.haar_cascade.detectMultiScale(cropped)
     persons = []
     for i in range(len(faces)):
         face_i = faces[i]
         x = face_i[0] * self.size
         y = face_i[1] * self.size
         w = face_i[2] * self.size
         h = face_i[3] * self.size
         face = grayImg[y:y + h, x:x + w]
         face_resize = cv2.resize(face, (self.im_width, self.im_height))
         confidence = self.model.predict(face_resize)
         # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
         if confidence[1]<3100:
             person = self.names[confidence[0]]
             cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
             cv2.putText(frame, '%s - %.0f' % (person, confidence[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN,2,(0, 255, 0),2)
         else:
             person = 'Unknown'
             cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 3)
             cv2.putText(frame, person, (x-10, y-10), cv2.FONT_HERSHEY_PLAIN,2,(0, 102, 255),2)
         persons.append(person)
     return (frame, persons)
Example #25
0
 def threshold_im(self):
     self.gray_image()
     #判別分析法
     th,t_im = cv2.threshold(self.gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
     cv2.putText(t_im, str(th), (10,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),1)
     print u"閾値:"+str(th)
     cv2.imshow("threshold",t_im)
def show_video(name):
    cap = cv2.VideoCapture(name)
    rotate = False
    M = cv2.getRotationMatrix2D((480,270), 180, 1.0)
    if not cap.isOpened():
      print("Error when reading video")
    else:
        while(True):
            try:
                # Capture frame-by-frame
                ret, frame = cap.read()
                frame = cv2.resize(frame, (960,540))
                if rotate:
                    frame = cv2.warpAffine(frame, M, (960, 540))
                cv2.putText(frame,'press the escape key when done',(20,20), cv2.FONT_HERSHEY_SIMPLEX, 1,(130,130,130),2)
                cv2.imshow(name,frame)
            except:
                cap = cv2.VideoCapture(name)
                ret, frame = cap.read()
                frame = cv2.resize(frame, (960,540))
                cv2.imshow(name,frame)
            k = cv2.waitKey(20)
            if k == 27:
                break
            elif k == 114:
                rotate = False if rotate is True else True

    # When everything is done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    return rotate
Example #27
0
File: image.py Project: tf-czu/EFD
def writeLabelsInImg( img, referencePoints1, outFileName, referencePoints2 = None, resize = None ):
    num1 = 0
    #offset = np.array([100,0]) #for tae2016 only
    color1 = (0,0,255)
    font = cv2.FONT_HERSHEY_SIMPLEX
    for point in referencePoints1:
        point = tuple(point)
        #point = tuple(point - offset) #for tae2016 only
        cv2.putText(img, str(num1),point, font, 3,color1,2 )
        num1 += 1
    
    if referencePoints2:
        offset = np.array([150, 0])
        num2 = 0
        color2 = (255, 0, 0)
        for point2 in referencePoints2:
            #print point2
            point2 = point2 + offset
            #print"point", point2
            point2 = tuple(point2)
            cv2.putText(img, str(num2),point2, font, 2,color2,2 )
            num2 += 1
            
    if resize:
        img = cv2.resize(img, None, fx = resize, fy = resize, interpolation = cv2.INTER_CUBIC)
    cv2.imwrite( outFileName, img )
    return img
    def transform(self):
        if self.left == None or self.points == None:
            return
        left  = np.asarray(self.left)
        #num = pcl.pointXYZRGB(self.points)
        """ point cloud stuff
        dtype_list = [(f.name, np.float32) for f in self.points.fields]
        cloud_arr = np.fromstring(self.points.data, dtype_list)
        cloud = np.reshape(cloud_arr, (self.points.height, self.points.width))
        """

        focal = self.points.f
        baseline = self.points.T
        #next step get the disparity value d then use z = fb/d
        disp = struct.unpack(str(self.points.image.width*self.points.image.height)+'f', self.points.image.data)
        self.left = self.points = None
        detect = colorDetect.colorDetect(left, [110,30, 140], [255, 85, 255])
        boxes = detect.detect()
        for data in boxes:
            x,y,w,h = data
            cv2.rectangle(left,(x,y),(x+w,y+h),(0,255,0),10)
            minDepth = self.minDepthInRegion(disp, focal, baseline, (x, y),(x+w, y+h), left.shape[0])
            if minDepth > 0:
                cv2.putText(left, str(minDepth) + " m", (x-w/2, y+h/2), cv2.FONT_HERSHEY_COMPLEX, 1, 255)
        try:
            self.image_left_pub.publish(self.bridge.cv_to_imgmsg(cv2.cv.fromarray(left), "bgr8"))
        except CvBridgeError, e:
            print e
Example #29
0
def Spell(spell):    
    #clear all checks
    ig = [[0] for x in range(15)] 
    #Invoke IoT (or any other) actions here
    cv2.putText(mask, spell, (5, 25),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,0,0))
    if (spell=="Colovaria"):
	print "trinket_pin trigger"
	pi.write(trinket_pin,0)
	time.sleep(1)
	pi.write(trinket_pin,1)
    elif (spell=="Incendio"):
	print "switch_pin OFF"
	pi.write(switch_pin,0)
	print "nox_pin OFF"
	pi.write(nox_pin,0)
	print "incendio_pin ON"
	pi.write(incendio_pin,1)
    elif (spell=="Lumos"):
	print "switch_pin ON"
	pi.write(switch_pin,1)
	print "nox_pin OFF"
	pi.write(nox_pin,0)
	print "incendio_pin OFF"
	pi.write(incendio_pin,0)	
    elif (spell=="Nox"):
	print "switch_pin OFF"
	pi.write(switch_pin,0)
	print "nox_pin ON"
	pi.write(nox_pin,1)
	print "incendio_pin OFF"
	pi.write(incendio_pin,0)	
    print "CAST: %s" %spell
Example #30
0
def draw_all_detection(im_array, detections, class_names, scale):
    """
    visualize all detections in one image
    :param im_array: [b=1 c h w] in rgb
    :param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
    :param class_names: list of names in imdb
    :param scale: visualize the scaled image
    :return:
    """
    import cv2
    import random
    color_white = (255, 255, 255)
    im = image.transform_inverse(im_array, config.PIXEL_MEANS)
    # change to bgr
    im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
    for j, name in enumerate(class_names):
        if name == '__background__':
            continue
        color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256))  # generate a random color
        dets = detections[j]
        for det in dets:
            bbox = det[:4] * scale
            score = det[-1]
            bbox = map(int, bbox)
            cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
            cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
                        color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
    return im
Example #31
0
last_percent = 0
while (frame_idx < length):

    frame_idx += 1
    percentage = int(frame_idx * 100 / length)
    if percentage != last_percent:
        print(str(percentage) + "% done")
        last_percent = percentage

    timestamp += 1. / FPS

    # Capture frame-by-frame
    ret, frame = cap.read()

    for i, a in enumerate(getAnnotations(annotations["purple"], timestamp)):
        cv2.putText(frame, a, (630, 50 * i + 50), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (155, 0, 78), 2)

    for i, a in enumerate(getAnnotations(annotations["yellow"], timestamp)):
        cv2.putText(frame, a, (20, 50 * i + 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                    (0, 100, 120), 2)

    # Display the resulting frame
    #cv2.imshow('frame',frame)
    #if cv2.waitKey(1000/FPS) & 0xFF == ord('q'):
    #    break
    videowriter.write(frame)

# When everything done, release the capture
cap.release()
videowriter.release()
cv2.destroyAllWindows()
def draw_text(img, text, x, y):
    cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)
for c in cnts:
    # compute the center of the contour
    M = cv2.moments(c)
    if M["m00"] == 0:
        # M["m00"] = 1
        continue
    cX = int((M["m10"] / M["m00"]) * ratio)
    cY = int((M["m01"] / M["m00"]) * ratio)

    # detect the shape of the contour and label the color
    # shape = sd.detect(c)
    color = cl.label(lab, c)

    # multiply the contour (x, y)-coordinates by the resize ratio,
    # then draw the contours and the name of the shape and labeled
    # color on the image
    c = c.astype("float")
    c *= ratio
    c = c.astype("int")
    text = "{}".format(color)
    if text != "NaN":
        cv2.drawContours(image, [c], -1, (255, 255, 255), 2)
        cv2.putText(image, text, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 2)
    else:
        cv2.drawContours(image, [c], -1, (255, 255, 255), 2)

    # show the output image
    cv2.imshow("Image", image)
    cv2.waitKey(0)
def main():
	## calling argparser
	args = build_argparser().parse_args()
	# create a log file
	logging.basicConfig(filename='Project_log.log', level=logging.INFO)
	logger = logging.getLogger()

	## get args input variable 
	input_path = args.input
	## get args visualization flags
	visual_flags = args.flag_visualization

	## put all keys for visualization in dict
	Dict_visual_keys = {
		'args_face': 'fd',
		'args_land': 'fl',
		'args_head': 'hp',
		'args_gaze': 'ge',
		'args_crop': 'crop',
		'args_win': 'win'
	}


	## check if using CAMERA or video file or image
	if input_path == "CAM" or input_path=="cam":
		print("\n## You are using CAMERA right now..." + input_path + " detected!")
		logger.info("\n## You are using CAMERA right now..." + input_path + " detected!")
		feeder_in = InputFeeder(input_path.lower())
	else:
		## check if input file exists in given path
		if not os.path.isfile(input_path):
			print("\nInput file not exists in Path: " + input_path + ". Please check again !!!")
			logger.error("## Input file not exists in Path: " + input_path + ". Please check again !!!")
			exit(1)
		else:
			print('\nInput path exists: '+ input_path + '\n')
			logger.info('\nInput path exists: '+ input_path)
			feeder_in = InputFeeder("video", input_path)


	## handler for mouse moving by precision and speed
	mouse_handler = MouseController('medium', 'fast')

	## initialize 4 models
	model_fd, model_fld, model_hpe, model_ge = models_handler(logger, args)


	feeder_in.load_data()
	print("\n## Loaded Input Feeder ")
	logger.info("## Loaded Input Feeder ")

	## load face detection model
	model_fd_start_time = time.time()
	model_fd.load_model()
	model_fd_load_time = (time.time() - model_fd_start_time)*1000
	logger.info('FaceDetection load time: ' + str(round(model_fd_load_time, 3)) + ' ms')

	## load facial landmarks detection model
	model_fld_start_time = time.time()
	model_fld.load_model()
	model_fld_load_time = (time.time() - model_fld_start_time)*1000
	logger.info('FacialLandmarkDetection load time: ' + str(round(model_fld_load_time, 3)) + ' ms')

	## load head pose estimation model
	model_hpe_start_time = time.time()
	model_hpe.load_model()
	model_hpe_load_time = (time.time() - model_hpe_start_time)*1000
	logger.info('HeadPoseEstimation load time: ' + str(round(model_hpe_load_time, 3)) + ' ms')

	## load gaze estimation model
	model_ge_start_time = time.time()
	model_ge.load_model()
	model_ge_load_time, total_load_time = (time.time() - model_ge_start_time)*1000, (time.time() - model_fd_start_time)*1000
	logger.info('GazeEstimation load time: ' + str(round(model_ge_load_time, 3)) + ' ms')
	## Model load time in total 
	logger.info('Total Load time: ' + str(round(total_load_time, 3)) + ' ms')

	print('\n## All model successfully loaded!')
	logger.info('## All model successfully loaded!')

	frame_count = 0
	print("## Start inference on frame!")
	logger.info("## Start inference on frame!")
	

	## empty list for each model to accumulate infer time and later get average infer time
	fd_infer_time = []
	fld_infer_time = []
	hpe_infer_time = []
	ge_infer_time = []

	start_infer_time = time.time()
	## loop through each frame and start inference on each model
	for flag_return, frame in feeder_in.next_batch():
		# print(flag_return)
		if not flag_return:
			print('\nflag_return: ' + str(flag_return) + '. Video has reach to the end...')
			logger.error('flag_return: ' + str(flag_return) + '. Video has reach to the end...')
			break

		event_key = cv2.waitKey(60)
		## frame count add by 1
		frame_count += 1
		if args.show_info:
			print('\nNo. frame: {}'.format(frame_count))

		if event_key ==27:
			print("\nUser keyboard exit!....")
			break

		## Face detection ##
		t0 = time.time()
		cropped_face, face_coords = model_fd.predict(frame.copy(), args.prob_threshold, args.perf_counts)
		# print(cropped_face.shape)
		## face_coords 
		## top left, bottom right
		fd_infer_time.append((time.time() - t0)*1000)
		# print(fd_infer_time)
		if args.show_info:
			print("Average inference time of FaceDetection model: {} ms".format(np.average(np.asarray(fd_infer_time))))
		
		## if no face detected
		if len(face_coords)==0:
			print("## No Face detected...")
			logger.error("## No face detected. Please check once again!")
			continue
		
		## Landmarks detection ##
		t1 = time.time()
		l_eye_box, r_eye_box, eyes_coords = model_fld.predict(cropped_face.copy(), args.perf_counts)
		# print(l_eye_box.shape, r_eye_box.shape) # left eye and right eye image
		## [left eye box, right eye box] 
		## [[leye_xmin,leye_ymin,leye_xmax,leye_ymax], [reye_xmin,reye_ymin,reye_xmax,reye_ymax]]
		# print(eyes_coords)
		fld_infer_time.append((time.time()- t1)*1000)
		# print(fld_infer_time)
		if args.show_info:
			print("Average inference time of FacialLandmarkDetection model: {} ms".format(np.average(np.asarray(fld_infer_time))))
		
		
		## Head pose detection ##
		t2 = time.time()
		hpe_output = model_hpe.predict(cropped_face.copy(), args.perf_counts)
		# [6.927431583404541, -4.0265960693359375, -1.8397517204284668]
		# print(hpe_output) # yaw, pitch, roll
		hpe_infer_time.append((time.time() - t2)*1000)
		if args.show_info:
			print("Average inference time of HeadPoseEstimation model: {} ms".format(np.average(np.asarray(hpe_infer_time))))

		## Gaze estimation ##		
		t3 = time.time()
		mouse_position, gaze_vector = model_ge.predict(l_eye_box, r_eye_box, hpe_output, args.perf_counts)
		## mouse position (x, y), gaze_vector [-0.13984774, -0.38296703, -0.9055522 ]
		ge_infer_time.append((time.time() - t3)*1000)
		if args.show_info:
			print("Average inference time of GazeEstimation model: {} ms".format(np.average(np.asarray(ge_infer_time))))

		# print('@@@@@@@@@@@@@', len(visual_flags))
				
		## Visualize the result if visual_flags activated
		if len(visual_flags) > 0 and len(visual_flags) <= 6 and Dict_visual_keys['args_win'] in visual_flags:
			frame_copy = frame.copy()

			if Dict_visual_keys['args_face'] in visual_flags:
				# Face
				cv2.rectangle(frame_copy, (face_coords[0], face_coords[1]), (face_coords[2], face_coords[3]), (255,0,0), 2) 				
				
			if Dict_visual_keys['args_land'] in visual_flags:
				# Facial Landmark left right eyes
				cv2.rectangle(frame_copy, (face_coords[0] + eyes_coords[0][0], face_coords[1] + eyes_coords[0][1]), (face_coords[0]+eyes_coords[0][2], face_coords[1]+eyes_coords[0][3]),(255,255,255), 2)
				cv2.rectangle(frame_copy, (face_coords[0] + eyes_coords[1][0], face_coords[1] + eyes_coords[1][1]), (face_coords[0]+eyes_coords[1][2], face_coords[1]+eyes_coords[1][3]),(255,255,255), 2)				
			
			if Dict_visual_keys['args_crop'] in visual_flags:
				## cropped face with landmarks left and right eyes ##
				land_frame = cropped_face.copy()
				cv2.rectangle(land_frame, (eyes_coords[0][0], eyes_coords[0][1]), (eyes_coords[0][2],eyes_coords[0][3]),(0,255,0), 2)
				cv2.rectangle(land_frame, (eyes_coords[1][0], eyes_coords[1][1]), (eyes_coords[1][2],eyes_coords[1][3]),(0,255,0), 2)
				cv2.imshow('FacialLandmark', cv2.resize(land_frame, (300, 400)))

			if Dict_visual_keys['args_head'] in visual_flags:
				# Head Pose values
				cv2.putText(frame_copy, "Angles of Head Pose:", (10, 25), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 2)
				cv2.putText(frame_copy, "Yaw: {:.2f}".format(hpe_output[0]), (10, 55), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 2)
				cv2.putText(frame_copy, "Pitch: {:.2f}".format(hpe_output[1]), (10, 85), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 2)
				cv2.putText(frame_copy, "Roll: {:.2f}".format(hpe_output[2]), (10, 115), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 2)

			if Dict_visual_keys['args_gaze'] in visual_flags:
				# Gaze arrow left right eyes
				x, y = gaze_vector[0:2]
				len_add = 400
				## eye left center point (x, y)
				eye_left_center = (int(face_coords[0] + (eyes_coords[0][0]+eyes_coords[0][2])/2), int(face_coords[1] + (eyes_coords[0][1]+eyes_coords[1][3])/2))
				## eye right center point (x, y)
				eye_right_center = (int(face_coords[0] + (eyes_coords[1][0]+eyes_coords[1][2])/2), int(face_coords[1] + (eyes_coords[1][1]+eyes_coords[1][3])/2))			
				## draw arrow line for both gaze of eyes
				cv2.arrowedLine(frame_copy, eye_left_center, (int(eye_left_center[0]+x*len_add), int(eye_left_center[1]-y*len_add)), (0,0,255), 3)
				cv2.arrowedLine(frame_copy, eye_right_center, (int(eye_right_center[0]+x*len_add), int(eye_right_center[1]-y*len_add)), (0,0,255), 3)
			
			## if with '-show win' without model keys will only display normal video stream
			cv2.imshow('Visualization', cv2.resize(frame_copy, (800,700)))
		else:
			print("\n## No Visualization, Only information displaying... \n## If needs visualization please add '-show' with specific keys...")


		if frame_count % 4 == 0:
			## start move mouse each 4 frames
			mouse_handler.move(mouse_position[0], mouse_position[1])

	total_infer_time = time.time() - start_infer_time
	fps = frame_count / round(total_infer_time, 3)

	# print(args.show_info)
	if args.show_info:
		print('Total inference time: ' + str(round(total_infer_time*1000, 3)) + ' ms')
		print("Total frame: " + str(frame_count))
		print('FPS: ' + str(fps))

	## loggging into project log file
	# logger.info('Total inference time: ' + str(round(total_infer_time, 3)) + ' s')	
	logger.info("Average inference time of FaceDetection model: {} ms".format(np.average(np.asarray(fd_infer_time))))
	logger.info("Average inference time of FacialLandmarkDetection model: {} ms".format(np.average(np.asarray(fld_infer_time))))
	logger.info("Average inference time of HeadPoseEstimation model: {} ms".format(np.average(np.asarray(hpe_infer_time))))
	logger.info("Average inference time of GazeEstimation model: {} ms".format(np.average(np.asarray(ge_infer_time))))
	logger.info('Total inference time: ' + str(round(total_infer_time*1000, 3)) + ' ms')
	logger.info("Total frame: " + str(frame_count))
	logger.info('FPS: ' + str(fps))
	logger.error("### Camera Stream or Video Stream has reach to the end...###")

	cv2.destroyAllWindows()
	feeder_in.close()
Example #35
0
import numpy as np
import cv2

img = np.zeros((512, 512, 3), np.uint8)  # 绘制一个黑色图片画布

cv2.line(img, (0, 0), (512, 512), (255, 0, 0), 5)
cv2.rectangle(img, (100, 0), (510, 128), (255, 255, 0), 3)
cv2.circle(img, (447, 100), 20, (0, 255, 0), 5)
cv2.circle(img, (200, 100), 20, (0, 255, 0), -1)
cv2.ellipse(img, (200, 300), (80, 50), 0, 0, 180, 255, -1)

font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OpenCV', (10, 500), font, 3, (255, 255, 255), 2, cv2.LINE_AA)

cv2.imshow('draw_line', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
        frame_h, frame_w, _ = frame.shape
        reindex_x = lambda x: max(min(x, frame_w), 1)
        reindex_y = lambda x: max(min(x, frame_h), 1)

        # loop over detected faces
        for face in faces_hog:
            x = reindex_x(int(face.left() / scale))
            y = reindex_y(int(face.top() / scale))
            r = reindex_x(int(face.right() / scale))
            b = reindex_y(int(face.bottom() / scale))

            # draw box over face
            crop_face = frame[x: r, y: b]
            is_boss = predict(crop_face)
            if is_boss:
                cv2.putText(frame, "BOSS", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2,
                            (0, 0, 255), 4)
                cv2.rectangle(frame, (x, y), (r, b), (0, 0, 255), 2)
                boss_count += 1
                # Open your IDE application for coding
                if boss_count > 3:
                    os.system('open -a "PyCharm CE"')
                    boss_count = 0
            else:
                cv2.putText(frame, "NORMAL", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 2,
                            (0, 255, 0), 4)
                cv2.rectangle(frame, (x, y), (r, b), (0, 255, 0), 2)


        cv2.imshow("frame", frame)
        key = cv2.waitKey(1)
        if key == 27:
  
   # Creating contour to track red color 
   contours, hierarchy = cv2.findContours(red_mask, 
                                          cv2.RETR_TREE, 
                                          cv2.CHAIN_APPROX_SIMPLE) 
     
   for pic, contour in enumerate(contours): 
       area = cv2.contourArea(contour) 
       if(area > 300): 
           x, y, w, h = cv2.boundingRect(contour) 
           imageFrame = cv2.rectangle(imageFrame, (x, y),  
                                      (x + w, y + h),  
                                      (0, 0, 255), 2) 
             
           cv2.putText(imageFrame, "Red Colour", (x, y), 
                       cv2.FONT_HERSHEY_SIMPLEX, 1.0, 
                       (0, 0, 255))     
 
   # Creating contour to track green color 
   contours, hierarchy = cv2.findContours(green_mask, 
                                          cv2.RETR_TREE, 
                                          cv2.CHAIN_APPROX_SIMPLE) 
     
   for pic, contour in enumerate(contours): 
       area = cv2.contourArea(contour) 
       if(area > 300): 
           x, y, w, h = cv2.boundingRect(contour) 
           imageFrame = cv2.rectangle(imageFrame, (x, y),  
                                      (x + w, y + h), 
                                      (0, 255, 0), 2) 
             
        ########################################

        ########################################
        #####                              MLP                                 ######
        ########################################

            #神经网络动作识别阶段

        #可视化阶段
        ##########################
        ###                   原图                     ###
        ##########################
        curr_time = time.time()
        exec_time = curr_time - prev_time

        image = utils.draw_bbox(frame, only_one_bboxes)#画bouding box
        result = np.asarray(image)#好像没用
        info = "time: %.2f ms" %(1000*exec_time)
        cv2.putText(result, text=info, org=(50, 70), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1, color=(255, 0, 0), thickness=2)
        cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
        result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imshow("result", result)
        
        if cv2.waitKey(1) & 0xFF == ord('q'): 
            file_txt.close()
            break



Example #39
0
 def display(frame):
     canvas = cv2.cvtColor(frame.image, cv2.COLOR_RGB2BGR)
     cv2.putText(canvas, str(frame.header.timestamp), (10, 50),
                 cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 3)
     cv2.imshow("Image", canvas)
     cv2.waitKey(2)
Example #40
0
    def dump_video(self, vidtype, scale=4, mot_tracks=None, one_color=True):
        if vidtype not in ['calib', 'poses', 'detections', 'tracks', 'mask']:
            raise Exception('Uknown video format')

        if vidtype == 'tracks' and mot_tracks is None:
            raise Exception('No MOT tracks provided')

        glog.info('Dumping {0} video'.format(vidtype))

        fourcc = cv2.VideoWriter_fourcc(*'MP4V')
        out_file = join(self.path_to_dataset, '{0}.mp4'.format(vidtype))
        out = cv2.VideoWriter(out_file, fourcc, 20.0, (self.shape[1] // scale, self.shape[0] // scale))

        font = cv2.FONT_HERSHEY_SIMPLEX
        cmap = matplotlib.cm.get_cmap('hsv')
        if mot_tracks is not None:
            n_tracks = max(np.unique(mot_tracks[:, 1]))

        for i, basename in enumerate(tqdm(self.frame_basenames)):
            img = self.get_frame(i, dtype=np.uint8)

            if vidtype == 'poses':
                # Pose
                poses = self.poses[basename]
                draw_utils.draw_skeleton_on_image(img, poses, cmap, one_color=one_color)

            if vidtype == 'calib':
                # Calib
                cam = cam_utils.Camera('tmp', self.calib[basename]['A'], self.calib[basename]['R'], self.calib[basename]['T'], self.shape[0], self.shape[1])
                canvas, mask = draw_utils.draw_field(cam)
                canvas = cv2.dilate(canvas.astype(np.uint8), np.ones((15, 15), dtype=np.uint8)).astype(float)
                img = img * (1 - canvas)[:, :, None] + np.dstack((canvas*255, np.zeros_like(canvas), np.zeros_like(canvas)))

            elif vidtype == 'detections':
                # Detection
                bbox = self.bbox[basename].astype(np.int32)
                if self.ball[basename] is not None:
                    ball = self.ball[basename].astype(np.int32)
                else:
                    ball = np.zeros((0, 4), dtype=np.int32)

                for j in range(bbox.shape[0]):
                    cv2.rectangle(img, (bbox[j, 0], bbox[j, 1]), (bbox[j, 2], bbox[j, 3]), (255, 0, 0), 10)
                for j in range(ball.shape[0]):
                    cv2.rectangle(img, (ball[j, 0], ball[j, 1]), (ball[j, 2], ball[j, 3]), (0, 255, 0), 10)

            elif vidtype == 'tracks':
                # Tracks
                cur_id = mot_tracks[:, 0] - 1 == i
                current_boxes = mot_tracks[cur_id, :]

                for j in range(current_boxes.shape[0]):
                    track_id, x, y, w, h = current_boxes[j, 1:6]
                    clr = cmap(track_id / float(n_tracks))
                    cv2.rectangle(img, (int(x), int(y)), (int(x + w), int(y + h)),
                                  (clr[0] * 255, clr[1] * 255, clr[2] * 255), 10)
                    cv2.putText(img, str(int(track_id)), (int(x), int(y)), font, 2, (255, 255, 255), 2, cv2.LINE_AA)

            elif vidtype == 'mask':
                # Mask
                mask = self.get_mask_from_detectron(i)*255
                img = np.dstack((mask, mask, mask))

            img = cv2.resize(img, (self.shape[1] // scale, self.shape[0] // scale))
            out.write(np.uint8(img[:, :, (2, 1, 0)]))

        # Release everything if job is finished
        out.release()
        cv2.destroyAllWindows()
Example #41
0
            if matches[best_match_index]:
                name = known_face_names[best_match_index]
            face_names.append(name)

        process_this_frame = not process_this_frame
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
            cv2.rectangle(img1, (left, top), (right, bottom), (0, 0, 255), 2)
            cv2.rectangle(img1, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(img1, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)
        cv2.imshow('Video', img1)
    if (frame1):
        cv2.imshow('img1', img2)
        ret1, img2 = frame1.read()
        stabilized_frame = stabilizer.stabilize_frame(input_frame=img2,
                                                      border_size=50)
        small_frame = cv2.resize(stabilized_frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]
        if process_this_frame:
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)
            face_names = []
        for face_encoding in face_encodings:
            matches = face_recognition.compare_faces(known_face_encodings,
Example #42
0
def draw_boxes(im, boxes, labels=None, color=None):
    """
    Args:
        im (np.ndarray): a BGR image. It will not be modified.
        boxes (np.ndarray or list[BoxBase]): If an ndarray,
            must be of shape Nx4 where the second dimension is [x1, y1, x2, y2].
        labels: (list[str] or None)
        color: a 3-tuple (in range [0, 255]). By default will choose automatically.

    Returns:
        np.ndarray: a new image.
    """
    FONT = cv2.FONT_HERSHEY_SIMPLEX
    FONT_SCALE = 0.4
    if isinstance(boxes, list):
        arr = np.zeros((len(boxes), 4), dtype='int32')
        for idx, b in enumerate(boxes):
            assert isinstance(b, BoxBase), b
            arr[idx, :] = [int(b.x1), int(b.y1), int(b.x2), int(b.y2)]
        boxes = arr
    else:
        boxes = boxes.astype('int32')
    if labels is not None:
        assert len(labels) == len(boxes), "{} != {}".format(
            len(labels), len(boxes))
    areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
    sorted_inds = np.argsort(-areas)  # draw large ones first
    assert areas.min() > 0, areas.min()
    # allow equal, because we are not very strict about rounding error here
    assert boxes[:, 0].min() >= 0 and boxes[:, 1].min() >= 0 \
        and boxes[:, 2].max() <= im.shape[1] and boxes[:, 3].max() <= im.shape[0], \
        "Image shape: {}\n Boxes:\n{}".format(str(im.shape), str(boxes))

    im = im.copy()
    COLOR = (218, 218, 218) if color is None else color
    COLOR_DIFF_WEIGHT = np.asarray(
        (3, 4, 2),
        dtype='int32')  # https://www.wikiwand.com/en/Color_difference
    COLOR_CANDIDATES = PALETTE_RGB[[0, 1, 2, 3, 18, 113], :]
    if im.ndim == 2 or (im.ndim == 3 and im.shape[2] == 1):
        im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
    for i in sorted_inds:
        box = boxes[i, :]

        best_color = COLOR
        if labels is not None:
            label = labels[i]

            # find the best placement for the text
            ((linew, lineh), _) = cv2.getTextSize(label, FONT, FONT_SCALE, 1)
            bottom_left = [box[0] + 1, box[1] - 0.3 * lineh]
            top_left = [box[0] + 1, box[1] - 1.3 * lineh]
            if top_left[1] < 0:  # out of image
                top_left[1] = box[3] - 1.3 * lineh
                bottom_left[1] = box[3] - 0.3 * lineh
            textbox = IntBox(int(top_left[0]), int(top_left[1]),
                             int(top_left[0] + linew),
                             int(top_left[1] + lineh))
            textbox.clip_by_shape(im.shape[:2])
            if color is None:
                # find the best color
                mean_color = textbox.roi(im).mean(axis=(0, 1))
                best_color_ind = (np.square(COLOR_CANDIDATES - mean_color) *
                                  COLOR_DIFF_WEIGHT).sum(axis=1).argmax()
                best_color = COLOR_CANDIDATES[best_color_ind].tolist()

            cv2.putText(im,
                        label, (textbox.x1, textbox.y2),
                        FONT,
                        FONT_SCALE,
                        color=best_color,
                        lineType=cv2.LINE_AA)
        cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]),
                      color=best_color,
                      thickness=1)
    return im
Example #43
0
    def process_images(self, obj_des, conf_des):
        start_time = None
        image = None
        while self.obj_det_running:  
            rx_data_temp = None

            while True:
                # check if data to be received is ready 
                rx_data = self.comm_socket.getPacket()
                if rx_data != None:
                    rx_data_temp = rx_data
                else:
                    if rx_data_temp != None:
                        break
                    else:
                        print("Waiting for UDP packets..")

            if rx_data_temp != None:
                # data received, treat it a sa jpg, try to decode it and display
                rx_data_array = np.asarray(rx_data_temp,  dtype = np.uint8)
                # decode jpg
                frame = cv2.imdecode(np.frombuffer(rx_data_array, dtype = 'uint8'), -1)
                # check that data is ok
                if frame.size > 0:
                    image = frame

            (h, w) = image.shape[:2]
            blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5)

            # pass the blob through the network and obtain the detections and predictions
            # print("[INFO] computing object detections...")
            self.net.setInput(blob)
            detections = self.net.forward()
            
            # loop over the detections
            for i in np.arange(0, detections.shape[2]):         
                # extraction of the confidence level (i.e., probability) associated with the prediction
                confidence = detections[0, 0, i, 2]

                # filter out weak detections by ensuring the "confidence" is
                # greater than the desired confidence level
                if confidence > conf_des:
                    # extract the index of the class retrieved from "detections" variable 
                    # compute the bounding box coordinates (x, y) of the object 
                    idx = int(detections[0, 0, i, 1])
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype('int')

                    if self.CLASSES[idx] == obj_des:
                        # instant object detection
                        start_time = time.time()
                        # notice detection of the object wanted (higher confidence)
                        self.output_lock.acquire()
                        self.output = [box, 0]      # elapsed_time = 0
                        self.output_lock.release()

                    # print the information related to class and confidence of the detected object
                    label = "{}: {:.2f}%".format(self.CLASSES[idx], confidence * 100)
                    # print("[INFO] {}".format(label))
                    
                    # vertical alignment of label text with respect to bounding box
                    if startY - 15 > 15:
                        y = startY - 15
                    else:
                        y = startY + 15

                    # tracking the bounding box and writing the corresponding label
                    cv2.rectangle(image, (startX, startY), (endX, endY), self.COLORS[idx], 2)
                    cv2.putText(image, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.COLORS[idx], 2)

                    # image center coordinates
                    x_c = (endX + startX)//2
                    y_c = (endY + startY)//2

                    # lenght of the cross elements to mark the center of the box
                    delta_x = w//32
                    delta_y = h//32

                    # draw cross at the box center
                    cv2.line(image, (x_c - delta_x, y_c), (x_c + delta_x, y_c), self.COLORS[idx], 1)
                    cv2.line(image, (x_c, y_c - delta_y), (x_c, y_c + delta_y), self.COLORS[idx], 1)

            if start_time != None:
                elapsed_time = time.time() - start_time
                self.output_lock.acquire()
                self.output[1] = elapsed_time
                self.output_lock.release()

            # draw viewfinder on the image center
            # colour
            white = [255, 255, 255]

            # center image coordinates
            x_c = w//2
            y_c = h//2
            
            # semi-sides length of the rectangle
            delta_x = w//32
            delta_y = h//32

            cv2.rectangle(image, (x_c - delta_x, y_c - delta_y), (x_c + delta_x, y_c + delta_y), white, 1)
            cv2.line(image, (x_c - 2 * delta_x, y_c), (x_c - delta_x, y_c), white, 1)
            cv2.line(image, (x_c + delta_x, y_c), (x_c + 2 * delta_x, y_c), white, 1)
            cv2.line(image, (x_c, y_c - 2 * delta_y), (x_c, y_c - delta_y), white, 1)
            cv2.line(image, (x_c, y_c + delta_y), (x_c, y_c + 2 * delta_y), white, 1)

            # show on screen the image processed
            cv2.imshow('Camera', image)
            cv2.waitKey(10)

            # if the cam window is closed stop the loop
            if cv2.getWindowProperty('Camera', cv2.WND_PROP_VISIBLE) == 0:      
                break

        time.sleep(TS)
Example #44
0
def drawShapes(c, label, labelLocation):
    cv2.drawContours(image, [c], -1, (0, 255, 0), 3)
    cv2.putText(image, label, (labelLocation[0] - 50, labelLocation[1] - 50),
    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255) , 1)
Example #45
0
results = sorted(results, key=lambda r:r[0][1])


for ((startX, startY, endX, endY), text) in results:
	# display the text OCR'd by Tesseract
	print("OCR TEXT")
	print("========")
	print("{}\n".format(text))

	# strip out non-ASCII text so we can draw the text on the image
	# using OpenCV, then draw the text and a bounding box surrounding
	text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
	output = orig.copy()
	cv2.rectangle(output, (startX, startY), (endX, endY),
		(0, 0, 255), 2)
	cv2.putText(output, text, (startX, startY - 20),
		cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 0, 255), 3)

# update FPS counter
	fps.update()

	# show the output frame
	cv2.imshow("Text Detection", orig)
	key = cv2.waitKey(1) & 0xFF

	# if the `q` key was pressed BREAK
	if key == ord("s"):
		break

# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
Example #46
0
                    to.counted = True

                # if the direction is positive (indicating the object
                # is moving down) AND the centroid is below the
                # center line, count the object
                elif direction > 0 and centroid[1] > H // 2:
                    totalDown += 1
                    to.counted = True

        # store the trackable object in our dictionary
        trackableObjects[objectID] = to

        # draw both the ID of the object and the centroid of the
        # object on the output frame
        text = "ID {}".format(objectID)
        cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

    # construct a tuple of information we will be displaying on the
    # frame
    info = [
        ("Up", totalUp),
        ("Down", totalDown),
        ("Status", status),
    ]

    # loop over the info tuples and draw them on our frame
    for (i, (k, v)) in enumerate(info):
        text = "{}: {}".format(k, v)
        cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
Example #47
0
        elif (initTracking):
            cv2.rectangle(frame, (ix, iy), (ix + w, iy + h), (0, 255, 255), 2)

            tracker.init([ix, iy, w, h], frame)

            initTracking = False
            onTracking = True
        elif (onTracking):
            t0 = time()
            boundingbox = tracker.update(
                frame)  #frame had better be contiguous
            t1 = time()

            boundingbox = map(int, boundingbox)
            cv2.rectangle(frame, (boundingbox[0], boundingbox[1]),
                          (boundingbox[0] + boundingbox[2],
                           boundingbox[1] + boundingbox[3]), (0, 255, 255), 1)

            duration = 0.8 * duration + 0.2 * (t1 - t0)
            #duration = t1-t0
            cv2.putText(frame, 'FPS: ' + str(1 / duration)[:4].strip('.'),
                        (8, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        cv2.imshow('tracking', frame)
        c = cv2.waitKey(inteval) & 0xFF
        if c == 27 or c == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Example #48
0
def test_main():
    while(1):
        cap = cv2.VideoCapture(0)
        # Take each frame
        _, frame = cap.read()

        # Convert BGR to HSV
        hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)

        # define range of blue color in HSV
        lower_blue = np.array([75,75,50])
        upper_blue = np.array([130,200,255])

        # Threshold the HSV image to get only blue colors
        mask = cv2.inRange(hsv, lower_blue, upper_blue)

        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(frame,frame, mask= mask)
        img = cv2.cv.CreateMat(639, 479,1)
        
        global score
        score = 0
        
        global rect_count
        rect_count = 0
        # R = rectangle((380,234),50,50,100,27,"green")

        # print img_np
        # print img_np.shape
        # print type(img_np)
        # print img_np.size
        flip = cv2.flip(frame,180)
        seconds = 60 - time.clock()

        if seconds <= 0:
            break

        cv2.putText(flip, "Score %s" %(score), (5, 30), cv2.FONT_HERSHEY_TRIPLEX, 1.0, (0,0,0),thickness=4, lineType=cv2.CV_AA)
        cv2.putText(flip, "Time %d" %(seconds), (495, 30), cv2.FONT_HERSHEY_TRIPLEX, 1.0, (0,0,0),thickness=4, lineType=cv2.CV_AA)

        blur = cv2.GaussianBlur(res,(5,5),0)
        rect = Rectangle()
        if rect_count == 0:
            number = random.randint(1,4)
            if number == 1:
                a = rect.generate()
                R1 = rectangle(a[0], a[1], a[2], a[3], a[4], a[5])
                rect_count += 1
            if number == 2:
                a = Rectangle.generate()
                R1 = rectangle(a[0], a[1], a[2], a[3], a[4], a[5])
                b = Rectangle.generate()
                R2 = rectangle(b[0], b[1], b[2], b[3], b[4], b[5])
                rect_count += 2
            if number == 3:
                a = Rectangle.generate()
                R1 = rectangle(a[0], a[1], a[2], a[3], a[4], a[5])
                b = Rectangle.generate()
                R2 = rectangle(b[0], b[1], b[2], b[3], b[4], b[5])
                c = Rectangle.generate()
                R3 = rectangle(c[0], c[1], c[2], c[3], c[4], c[5])
                rect_count += 3
            if number == 4:
                a = Rectangle.generate()
                R1 = rectangle(a[0], a[1], a[2], a[3], a[4], a[5])
                b = Rectangle.generate()
                R2 = rectangle(b[0], b[1], b[2], b[3], b[4], b[5])
                c = Rectangle.generate()
                R3 = rectangle(c[0], c[1], c[2], c[3], c[4], c[5])
                d = Rectangle.generate()
                R4 = rectangle(d[0], d[1], d[2], d[3], d[4], d[5])
                rect_count += 4
        else:
            if number == 1:
                R1.draw()
                R1.move()
                R1.regenerate()
            if number == 2:
                R1.draw()
                R1.move()
                R1.regenerate()
                R2.draw()
                R2.move()
                R2.regenerate()
            if number == 3:
                R1.draw()
                R1.move()
                R1.regenerate()
                R2.draw()
                R2.move()
                R2.regenerate()
                R3.draw()
                R3.move()
                R3.regenerate()
            if number == 4:
                R1.draw()
                R1.move()
                R1.regenerate()
                R2.draw()
                R2.move()
                R2.regenerate()
                R3.draw()
                R3.move()
                R3.regenerate()
                R4.draw()
                R4.move()
                R4.regenerate()

        cv2.imshow('frame',flip)
        #cv2.imshow('mask',mask)
        #cv2.imshow('res',blur)
        k = cv2.waitKey(5) & 0xFF
        if k == 27:
            break

    cv2.destroyAllWindows()
  if len(contours) > 0:
    #Calculates the centroid of the max contour
    centroid = max(contours, key=cv2.contourArea)
    
    #Finds the x, y, and radius of the max contour
    ((x, y), radius) = cv2.minEnclosingCircle(centroid)
    
    #Output x and y coordinates
    #print(int(x), int(y))
    
 	#Process to draw rectangle around tracked object
    if radius > 5:
      #Draws a yellow rectangle around tracked object
      cv2.rectangle(frame, (int(x)-int(radius), int(y)-int(radius)),(int(x)+int(radius), int(y)+int(radius)), (0, 255, 255), 6)
      #Outputs text "ACTIVE"
      cv2.putText(frame, 'TRACKING SYSTEM: ACTIVE', (10,650), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3)
    else:
        #Outputs text "INACTIVE" since the radius of the contour is too small to read
      cv2.putText(frame, 'TRACKING SYSTEM: INACTIVE', (10,650), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3)
  else:
    #Outpus text "INACTIVE" since no contours are read
    cv2.putText(frame, 'TRACKING SYSTEM: INACTIVE', (10,650), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3)
 
  #Shows the current frame to the screen
  cv2.imshow("Live Video", frame)
 
  #Shows the HSV frame
  #cv2.imshow("HSV", mask)
 
  #If q is pressed, the video stream stops
  if cv2.waitKey(1) == ord("q"):