コード例 #1
0
ファイル: main.py プロジェクト: johnden/rio
def power(src, angle, a, b):
    dx = (b[0]-a[0])/math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)
    dy = (b[1]-a[1])/math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)
    sum = 0.0
    n   = 0
    out = copy.copy(src)
    
    for i in range(int(math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2))):
        y = a[1] + i*dy
        x = a[0] + i*dx
        
        if angle[y][x]*(180.0/pi)<361 and angle[y][x]*(180.0/pi)>-1:
            e = math.acos(abs(dx*math.cos(angle[y][x]) + dy*math.sin(angle[y][x])))*(180.0/pi)
            sum = sum + e
            n = n + 1
            ddx = 30*math.cos(angle[y][x])
            ddy = 30*math.sin(angle[y][x])
            
            cv2.circle(out, (x,y),2,(0,255,255),2,8)
            cv2.line(out, (x,y),(x+ddx,y+ddy),(0,255,255),2,8)
            cv2.line(out, a,b,(255,255,0),2,8)
    if n==0:
        return 90
    else:
        return sum/float(n)
コード例 #2
0
 def image_core(self):
   #time1 = time.time()
   val,im = self.vid.read()
   #cv2.imshow("image2",im)
   posX,posY=0,0
   if val:
       im2=self.image_filter(im)
       #r,im1=cv2.threshold(im2,90,255,1)
       contours,hierarchy = cv2.findContours(im2,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
       print contours
       for h,cnt in enumerate(contours):
          area = cv2.contourArea(cnt)#error in opencv think of changing version to 2.4.2 (dang) suggest using linux
          if area > 1000:
           posX = int((cv2.moments(cnt)['m10']) / (cv2.moments(cnt)['m00']))
           posY = int((cv2.moments(cnt)['m01']) / (cv2.moments(cnt)['m00']))
           '''moments = cv2.moments(cnt)
           moment00 = moments['m00']
           moment10=moments['m10']
           moment01=moments['m01']
           posX = int(moment10/moment00)
           posY = int(moment01/moment00)'''
           cv2.circle(im,(int((posX)),int((posY))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX+5)),int((posY+5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX-5)),int((posY-5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX+5)),int((posY-5))),40,(0,0,255),2,1)
           cv2.circle(im,(int((posX-5)),int((posY+5))),40,(0,0,255),2,1)
          else:
           posX,posY=0,0
       im1=cv.fromarray(im)
       #cv2.imshow("image1",im)
       cv2.waitKey(10)
       #time2 = time.time()
       #print ((time2-time1)*1000.0)
       return im1,posX,posY
コード例 #3
0
ファイル: pose_estimation.py プロジェクト: CoffeRobot/fato
    def draw_motion(self, im=None, draw_outliers=False):

        show_image = False

        if im is None:
            im = self.result_image
            show_image = True

        if draw_outliers:
            print("points shape: " + str(self.prev_points.shape[0]) + " outliers " +
                  str(self.outliers.shape))

        for i in range(0, self.prev_points.shape[0]):
            prev_pt = self.prev_points[i]
            next_pt = self.next_points[i]

            color = np.array([0, 255, 0])

            if draw_outliers:
                id = 2*i
                if (self.outliers[id] or self.outliers[id+1]) and draw_outliers:
                    color = np.array([0, 0, 255])
                else:
                    color = np.array([0, 255, 0])

            cv2.circle(im, (prev_pt[0], prev_pt[1]), 2, color, -1)
            cv2.line(im, (prev_pt[0], prev_pt[1]), (next_pt[0], next_pt[1]), np.array([255, 0, 0]), 1)

        if show_image:
            cv2.imshow("", im)
            cv2.waitKey(0)

        return im
コード例 #4
0
def create_circle(thresh,cnt,cx,cy):
    thresh = cv2.circle(thresh,(cx,cy), 5, (0,0,255), -1)        
    (x,y),radius = cv2.minEnclosingCircle(cnt)
    center = (int(x),int(y))
    radius = int(radius)
    thresh = cv2.circle(thresh,center,radius,(0,255,0),2)
    return thresh
コード例 #5
0
def isInCircle(x,y, circle):
	dist = math.sqrt((math.pow(x-circle[0],2))+(math.pow(y-circle[1],2)))
	if dist <= int(100):
		return True
	else:
		cv2.circle(original,(x,y),1,(255,255,255))
		return False
コード例 #6
0
def featureMatching(input_keypoint_list, input_xy_list, input_img, template_img):
  #inputとtemplateのkeypoint_binary,xy_listを読み込む
  template_keypoint_list = []
  template_keypoint = csv.reader(open('template_keypoint_binary.csv', 'r'))
  for temp_key in template_keypoint:
    template_keypoint_list.append(map(int,temp_key))
  template_xy_list = []
  template_xy = csv.reader(open('template_keypoint_xy.csv', 'r'))
  for temp_xy in template_xy:
    template_xy_list.append(map(int,temp_xy))
  x_sum_img = len(input_img[0]) + len(template_img[0])
  sum_img = np.zeros((len(input_img), x_sum_img), np.uint8)
  sum_img = np.hstack((input_img, template_img))
  template_add_xsize = []
  #ハミング距離をとって0になったらマッチングとする
  for temp_y in xrange(0,len(template_keypoint_list)):
    for inp_y in xrange(0,len(input_keypoint_list)):
      calc_list = template_keypoint_list[temp_y] - input_keypoint_list[inp_y]
      #print 'template_xy' + str(template_xy_list[temp_y])
      #print 'input_xy' + str(input_xy_list[inp_y])
      if  all((x == 0 for x in calc_list)) == True:
        #マッチングした時のkeypoint_bineryと同じ場所のx,yの値を画像平面上にマッピングし、結果を出力
        print 'get match'
        temp_xy = template_xy_list[temp_y]
        inp_xy = input_xy_list[inp_y]
        template_add_xsize = [temp_xy[1] + len(input_img[0]),temp_xy[0]]
        input_change_xy = [inp_xy[1],inp_xy[0]]
        add_input = tuple(input_change_xy)
        add_template = tuple(template_add_xsize)
        cv2.circle(sum_img,add_input,3,(0,0,0),-1)
        cv2.circle(sum_img,add_template,3,(0,0,255),-1)
        #cv2.line(sum_img,add_input,add_template,(255,255,0),1)
        #print 'add_input = ' +str(add_input) + ' add_template = ' +str(add_template)
  print 'quit calc'
  cv2.imwrite('sum_img.tif', sum_img)
コード例 #7
0
ファイル: plane_tracker.py プロジェクト: MCobias/opencv
    def run(self):
        while True:
            playing = not self.paused and not self.rect_sel.dragging
            if playing or self.frame is None:
                ret, frame = self.cap.read()
                if not ret:
                    break
                self.frame = frame.copy()

            vis = self.frame.copy()
            if playing:
                tracked = self.tracker.track(self.frame)
                for tr in tracked:
                    cv2.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2)
                    for (x, y) in np.int32(tr.p1):
                        cv2.circle(vis, (x, y), 2, (255, 255, 255))

            self.rect_sel.draw(vis)
            cv2.imshow('plane', vis)
            ch = cv2.waitKey(1)
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.tracker.clear()
            if ch == 27:
                break
コード例 #8
0
ファイル: Assignment2.py プロジェクト: MartinFaartoft/sigb
def showImageandPlot(N):
    #A simple attenmpt to get mouse inputs and display images using matplotlib
    I = cv2.imread('groundfloor.bmp')
    drawI = I.copy()
    #make figure and two subplots
    fig = figure(1)
    ax1  = subplot(1,2,1)
    ax2  = subplot(1,2,2)
    ax1.imshow(I)
    ax2.imshow(drawI)
    ax1.axis('image')
    ax1.axis('off')
    points = fig.ginput(5)
    fig.hold('on')

    for p in points:
        #Draw on figure
        subplot(1,2,1)
        plot(p[0],p[1],'rx')
        #Draw in image
        cv2.circle(drawI,(int(p[0]),int(p[1])),2,(0,255,0),10)
    ax2.cla
    ax2.imshow(drawI)
    draw() #update display: updates are usually defered
    show()
    savefig('somefig.jpg')
    cv2.imwrite("drawImage.jpg", drawI)
コード例 #9
0
  def spin(self):
    time.sleep(1.0)
    started = time.time()
    counter = 0
    cvim = numpy.zeros((480, 640, 1), numpy.uint8)
    ball_xv = 10
    ball_yv = 10
    ball_x = 100
    ball_y = 100

    cvb = CvBridge()

    while not rospy.core.is_shutdown():

      cvim.fill(0)
      cv2.circle(cvim, (ball_x, ball_y), 10, 255, -1)

      ball_x += ball_xv
      ball_y += ball_yv
      if ball_x in [10, 630]:
        ball_xv = -ball_xv
      if ball_y in [10, 470]:
        ball_yv = -ball_yv

      self.pub.publish(cvb.cv2_to_imgmsg(cvim))

      time.sleep(0.03)
コード例 #10
0
def show_circle(img, img_file_name):
    temp_img = cv2.medianBlur(img, 5)
    c_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    circles = find_circle(temp_img, 10, 100, 30)
    ix, iy = 0, 0

    if circles is None:
        circle_out_str = "_circle_%d-%d.png" % (ix, iy)
        circle_out_file = re.sub(r'\.jpg', circle_out_str, img_file_name)
        cv2.imwrite(circle_out_file, c_img)
        return False

    for i in circles[0, :]:
        center = (i[0], i[1])
        ix, iy = i[0], i[1]
        radius = i[2]
        circle_color = (0, 255, 0)
        cv2.circle(c_img, center, radius, circle_color, 1)
        center_color = (0, 0, 255)
        cv2.circle(c_img, center, 2, center_color, 1)

    circle_out_str = "_circle_%d-%d.png" % (ix, iy)
    circle_out_file = re.sub(r'\.jpg', circle_out_str, img_file_name)
    cv2.imwrite(circle_out_file, c_img)
    return True
コード例 #11
0
ファイル: find_robot.py プロジェクト: Zenohm/ACORL_MazeSolver
def draw_largest_contour(image, center, radius, min_radius=10, color=(0, 0, 0)):
    # only proceed if the radius meets a minimum size
    if radius > min_radius:
        # draw the circle and centroid on the frame,
        # then update the list of tracked points
        cv2.circle(image, center, radius, color, 2)
    return image
コード例 #12
0
ファイル: image.py プロジェクト: joemirizio/WENDE
    def process(self):
        self.last_frame = self.img_source.read()

        if self.avg_frame is None:
            self.avg_frame = self.last_frame

        # Find objects from the image source
        self.last_frame, self.avg_frame, img_data = self.odm.findObjects(
            self.last_frame, self.avg_frame, self.frame_type)

        # Display calibration points 
        if self.cal_data and self.frame_type == 'main':
            for num, cal_point in enumerate(self.cal_data.image_points, 1):
                point = (cal_point[0], cal_point[1])
                color_intensity = ((num - 1) % 3) / 3.0 * 200 + 55
                color = (0, 0, color_intensity) if num > 3 else (0, color_intensity, 0)
                cv.circle(self.last_frame, point, 5, color, thickness=-1)
                cv.circle(self.last_frame, point, 5, [0, 0, 0], thickness=2)

        # Display calibration status
        cal_status_color = [0, 255, 0] if self.cal_data else [0, 0, 255]
        cv.circle(self.last_frame, (self.img_source.width - 25, 25), 20, [0, 0, 0], thickness=5) 
        cv.circle(self.last_frame, (self.img_source.width - 25, 25), 20, cal_status_color, thickness=-1)
        cv.circle(self.last_frame, (self.img_source.width - 25, 25), 20, [255, 255, 255], thickness=2) 

        self.data_proc.process(img_data, self)

        return self.last_frame
def img_test(forest, feature_extractor, points, colors, filename, size=512,
             radius=3, proba=True):
    img = np.zeros((size, size, 3))
    v_min = points.min()
    v_max = points.max()
    step = float(v_max - v_min) / img.shape[0]
    grid = np.arange(v_min, v_max, step)

    xy = np.array(list(itertools.product(grid, grid)))
    features = feature_extractor.apply_all(xy)

    if proba:
        r = forest.predict_proba(features)
        col = np.dot(r, colors)
    else:
        r = forest.predict(features).astype('int32')
        col = colors[r]
    img[((xy[:, 1] - v_min) / step).astype('int32'),
        ((xy[:, 0] - v_min) / step).astype('int32')] = col

    points = ((points - v_min) / step).astype('int')
    for p, r in zip(points, responses):
        col = tuple(colors[int(r)])
        cv2.circle(img, tuple(p), radius + 1, (0, 0, 0), thickness=-1)
        cv2.circle(img, tuple(p), radius, col, thickness=-1)

    cv2.imwrite(filename, img)
コード例 #14
0
ファイル: globalWB.py プロジェクト: crzyjcky/globalWB
    def draw_circles(self, circle_radius=15):
        single_channel = np.ones((self.height, self.width), np.uint8) * 255

        for cp in self.circle_positions:
            cv2.circle(single_channel, (cp[0], cp[1]), circle_radius, [0, 0, 0], -1)

        self.frame = cv2.merge((single_channel, single_channel, single_channel))
コード例 #15
0
    def show_match(self, image_test, descriptors_all):
        """
        :type image_test: Image
        :type matches: np.array
        :type distances: np.array
        """
        distances, matches = self._classifier.kneighbors(descriptors_all, return_distance=True, n_neighbors=1)

        image_test_rgb = image_test.get_rgb()  # type: Image

        for feature, matchs, distancess in zip(image_test.get_all_features(), matches, distances):
            xy1, w1 = feature.get_global_xy_w()
            for m in matchs:
                other_feature = self._features_all[m]
                image_train_rgb = other_feature.get_image().get_rgb()
                xy2, w2 = other_feature.get_global_xy_w()

                offset = image_test_rgb.shape[1]
                size = offset + image_train_rgb.shape[1]
                xy2 += [offset, 0]

                showoff = np.zeros((Image.DEFAULT_HEIGHT, size, 3), np.uint8)

                showoff[0:image_test_rgb.shape[0], 0:image_test_rgb.shape[1], :] = image_test_rgb
                showoff[0:image_train_rgb.shape[0], 0 + offset:image_train_rgb.shape[1] + offset, :] = image_train_rgb

                cv2.line(showoff, tuple(xy1), tuple(xy2), (0, 0, 255), thickness=1)
                cv2.circle(showoff, tuple(xy1), w1, (0, 0, 255), thickness=1)
                cv2.circle(showoff, tuple(xy2), w2, (0, 0, 255), thickness=1)
                plt.imshow(cv2.cvtColor(showoff, cv2.COLOR_RGB2BGR)), plt.show()
コード例 #16
0
def get_polyline(image,window_name):
    cv2.namedWindow(window_name)
    class GetPoly:
        xys = []        
        done = False
        def callback(self,event, x, y, flags, param):
            if self.done == True:
                pass
            elif event == cv2.EVENT_LBUTTONDOWN:
                self.xys.append((x,y))
            elif event == cv2.EVENT_MBUTTONDOWN:
                self.done = True
    gp = GetPoly()
    cv2.setMouseCallback(window_name,gp.callback)
    print "press middle mouse button or 'c' key to complete the polygon"
    while not gp.done:
        im_copy = image.copy()
        for (x,y) in gp.xys:
            cv2.circle(im_copy,(x,y),2,(0,255,0))
        if len(gp.xys) > 1 and not gp.done:
            cv2.polylines(im_copy,[np.array(gp.xys).astype('int32')],False,(0,255,0),1)
        cv2.imshow(window_name,im_copy)
        key = cv2.waitKey(50)
        if key == ord('c'): gp.done = True
    #cv2.destroyWindow(window_name)
    return gp.xys
def bot_position(hsv):
    
    bot_lower = np.array([0,45,255])
    bot_upper = np.array([40,255,255])
    
    #####front end masking and centroid
    mask = cv2.inRange(hsv,bot_lower, bot_upper)
    contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    contours=sorted(contours, key = cv2.contourArea, reverse = True)[:2]
    #contours,length=areacon(contours,700,300)
    #contours=sorted(contours, key = cv2.contourArea, reverse = True)[:length]
    #cv2.drawContours(frame,contours,-1,(100,100,255),1)
    cv2.imshow('bot',mask)
    #print "len ",len(contours)
    M = cv2.moments(contours[0])
    cx1 = int(M['m10']/M['m00'])
    cy1 = int(M['m01']/M['m00'])
    cv2.circle(frame,(cx1,cy1), 5, (255,0,255), -1)
    Bot_position[0][0]=cx1
    Bot_position[0][1]=cy1
    #print cx1,cy1
    #print Bot_position[0][0],
    M = cv2.moments(contours[1])
    cx2 = int(M['m10']/M['m00'])
    cy2 = int(M['m01']/M['m00'])
    cv2.circle(frame,(cx2,cy2), 5, (0,0,255), -1)
    Bot_position[1][0]=cx2
    Bot_position[1][1]=cy2

    print cx1,cy1, "1"
    print cx2,cy2, "2"
コード例 #18
0
def circularHough(gray):
    ''' Performs a circular hough transform of the image, gray and shows the  detected circles
    The circe with most votes is shown in red and the rest in green colors '''
    #See help for http://opencv.itseez.com/modules/imgproc/doc/feature_detection.html?highlight=houghcircle#cv2.HoughCircles
    blur = cv2.GaussianBlur(gray, (31,31), 11)

    dp = 6; minDist = 30
    highThr = 20 #High threshold for canny
    accThr = 850 #accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected
    maxRadius = 50
    minRadius = 155
    circles = cv2.HoughCircles(blur,cv2.cv.CV_HOUGH_GRADIENT, dp,minDist, None, highThr,accThr,maxRadius, minRadius)

    #Make a color image from gray for display purposes
    gColor = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
    if (circles !=None):
    #print circles
        all_circles = circles[0]
        M,N = all_circles.shape
        k=1
        for c in all_circles:
            cv2.circle(gColor, (int(c[0]),int(c[1])),c[2], (int(k*255/M),k*128,0))
            k=k+1
    c=all_circles[0,:]
    cv2.circle(gColor, (int(c[0]),int(c[1])),c[2], (0,0,255),5)
    cv2.imshow("hough",gColor)
コード例 #19
0
 def draw_field_circles(self):
     img = np.zeros((420, 620, 4), np.uint8)
     cv2.line(
         img,
         (110, 110),
         (510, 110),
         (255, 255, 255),
         3)
     cv2.line(
         img,
         (110, 110),
         (110, 410),
         (255, 255, 255),
         3)
     cv2.line(
         img,
         (510, 110),
         (510, 410),
         (255, 255, 255),
         3)
     cv2.line(
         img,
         (110, 410),
         (510, 410),
         (255, 255, 255),
         3)
     cv2.circle(
         img,
         (305, 410),
         60,
         (255, 255, 255),
         3)
     return img
コード例 #20
0
def GetPupil(gray,thr):
    tempResultImg = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR) #used to draw temporary results

    props = RegionProps()
    val,binI = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY_INV)
    
    #Combining Closing and Opening to the thresholded image
    st7 = cv2.getStructuringElement(cv2.MORPH_CROSS,(7,7))
    st9 = cv2.getStructuringElement(cv2.MORPH_CROSS,(9,9))
    st15 = cv2.getStructuringElement(cv2.MORPH_CROSS,(15,15))
             
    binI = cv2.morphologyEx(binI, cv2.MORPH_CLOSE, st9) #Close 
    binI= cv2.morphologyEx(binI, cv2.MORPH_OPEN, st15) #Open
    binI = cv2.morphologyEx(binI, cv2.MORPH_DILATE, st7, iterations=2) #Dialite  
    
    cv2.imshow("ThresholdPupil",binI)
    #Calculate blobs
    sliderVals = getSliderVals() #Getting slider values
    contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #Finding contours/candidates for pupil blob
    pupils = []
    pupilEllipses = []
    for cnt in contours:
        values = props.CalcContourProperties(cnt,['Area','Length','Centroid','Extend','ConvexHull']) #BUG - Add cnt.astype('int') in Windows
        if values['Area'] < sliderVals['maxSizePupil'] and values['Area'] > sliderVals['minSizePupil'] and values['Extend'] < 0.9:
            pupils.append(values)
            centroid = (int(values['Centroid'][0]),int(values['Centroid'][1]))
            cv2.circle(tempResultImg,centroid, 2, (0,0,255),4)
            pupilEllipses.append(cv2.fitEllipse(cnt))
    cv2.imshow("TempResults",tempResultImg)
    return pupilEllipses 
コード例 #21
0
def GetGlints(gray,thr):
    tempResultImg = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR) #used to draw temporary results

    props = RegionProps()
    val,binI = cv2.threshold(gray, thr, 255, cv2.THRESH_BINARY) #Using non inverted binary image
    
    #Combining opening and dialiting seems to be the best but is it ok that other glints are visible two?????!!!!!
    st7 = cv2.getStructuringElement(cv2.MORPH_CROSS,(7,7))
    st9 = cv2.getStructuringElement(cv2.MORPH_CROSS,(7,7))
    
    binI= cv2.morphologyEx(binI, cv2.MORPH_OPEN, st7)
    binI = cv2.morphologyEx(binI, cv2.MORPH_DILATE, st9, iterations=2)
    
    cv2.imshow("ThresholdGlints",binI)
    #Calculate blobs
    sliderVals = getSliderVals() #Getting slider values
    contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #Finding contours/candidates for pupil blob
    glints = []
    glintEllipses = []
    for cnt in contours:
        values = props.CalcContourProperties(cnt,['Area','Length','Centroid','Extend','ConvexHull']) #BUG - Add cnt.astype('int') in Windows
        if values['Area'] < sliderVals['maxSizeGlints'] and values['Area'] > sliderVals['minSizeGlints']:
            glints.append(values)
            centroid = (int(values['Centroid'][0]),int(values['Centroid'][1]))
            cv2.circle(tempResultImg,centroid, 2, (0,0,255),4)
            glintEllipses.append(cv2.fitEllipse(cnt))
    cv2.imshow("TempResults",tempResultImg)
    return glintEllipses
コード例 #22
0
ファイル: main.py プロジェクト: DukasGuo/pycv
  def update(self, frame):
    # print "updating %d " % self.id
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    back_project = cv2.calcBackProject([hsv],[0], self.roi_hist,[0,180],1)
    
    if args.get("algorithm") == "c":
      ret, self.track_window = cv2.CamShift(back_project, self.track_window, self.term_crit)
      pts = cv2.boxPoints(ret)
      pts = np.int0(pts)
      self.center = center(pts)
      cv2.polylines(frame,[pts],True, 255,1)
      
    if not args.get("algorithm") or args.get("algorithm") == "m":
      ret, self.track_window = cv2.meanShift(back_project, self.track_window, self.term_crit)
      x,y,w,h = self.track_window
      self.center = center([[x,y],[x+w, y],[x,y+h],[x+w, y+h]])  
      cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)

    self.kalman.correct(self.center)
    prediction = self.kalman.predict()
    cv2.circle(frame, (int(prediction[0]), int(prediction[1])), 4, (255, 0, 0), -1)
    # fake shadow
    cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (11, (self.id + 1) * 25 + 1),
        font, 0.6,
        (0, 0, 0),
        1,
        cv2.LINE_AA)
    # actual info
    cv2.putText(frame, "ID: %d -> %s" % (self.id, self.center), (10, (self.id + 1) * 25),
        font, 0.6,
        (0, 255, 0),
        1,
        cv2.LINE_AA)
コード例 #23
0
ファイル: hand.py プロジェクト: WWPOL/CV-Pong
	def captureImage(self):
		position = [0, 0]
		velocity = [0, 0]

		frame = self.cap.read()[1]

		if frame != None:
			frame = cv2.flip(frame, 1)

			self.frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

			threshold_mask = self.createMultipleThresholds(self.frame_hsv)


			contour = self.getLargestContour(threshold_mask)
			if type(contour) != int:
				cv2.drawContours(frame, contour, -1, (0, 255, 255), 2)
				position = self.getContourMoment(contour)
				cv2.circle(frame, (position[0], position[1]), 5, (0,0,255), -1)

			# calculate velocity
			velocity = [position[0] - self.oldPosition[0], position[1] - self.oldPosition[1]]
			# print velocity

			cv2.imshow("Frame", frame)
			cv2.waitKey(10)

		self.oldPosition = position
		return [position, velocity]
コード例 #24
0
ファイル: Orbit.py プロジェクト: wildweasel/UAVsim
	def calcFlightPath(self):

		# Don't calculate for pre-load parameter changes
		if self.rawOverhead is None:			
			return
		
		# Ellipse parameters
		a = float(self.majorAxis.get())
		b = float(self.minorAxis.get())
		offsetX = self.rawOverhead.shape[1]/2 + int(self.centerX.get())
		offsetY = self.rawOverhead.shape[0]/2 - int(self.centerY.get())
		alpha = float(self.axisYawAngle.get())/180*np.pi		
		height = float(self.height.get())
		
		# Flight path equation - parametric equation of elipse
		#  x = x0 + a * cos t * cos alpha - b * sin t * sin alpha 
		#  y = y0 + a * cos t * sin alpha + b * sin t * cos alpha 		
		self.flightPath = [(int(offsetX+a*np.cos(t)*np.cos(alpha)+b*np.sin(t)*np.sin(alpha)), 
						   int(offsetY+a*np.cos(t)*np.sin(alpha)-b*np.sin(t)*np.cos(alpha)), height) for t in np.linspace(0,2*np.pi,self.resolution)]
		
		# Instantaneous flight headings
		# dx/dt = - a * sin t * cos alpha - b * cos t * sin alpha 
		# dy/dt = - a * sin t * sin alpha + b * cos t * cos alpha 
		self.flightHeadings = [np.arctan2(-a*np.sin(t)*np.sin(alpha)+b*np.cos(t)*np.cos(alpha),
							   -a*np.sin(t)*np.cos(alpha)-b*np.cos(t)*np.sin(alpha)) for t in np.linspace(0,2*np.pi,self.resolution)]
		
		# We will re-use the overhead image, so make a copy to draw the flight path on
		self.overheadFlightPath = self.rawOverhead.copy()			
		for pos in self.flightPath:
			cv2.circle(self.overheadFlightPath, pos[0:2], 10, (255, 255, 0), -1)

		# Set up the camera and draw its stare
		overheadDetail = self.orientCamera()	
コード例 #25
0
def MomentDescriptor(name, thres):
    img1=cv2.imread(name)   
#     img=img1
    img=cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#   edges=cv2.Canny(img, thres, thres*2) 
    #Image to draw the contours
    drawing=np.zeros(img.shape[:2], np.uint8)
    ret,thresh = cv2.threshold(img,thres,255,0)
   
    contours, hierarchy=cv2.findContours(thresh,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    MomentVector=[]
    
    for cnt in contours:
        M=cv2.moments(cnt) #Calculate moments
        if M['m00']!=0:
            Cx=int(M['m10']/M['m00'])
            Cy=int(M['m01']/M['m00'])
            
            Moments_Area=M['m00'] # Contour area moment
            Contours_Area=cv2.contourArea(cnt) # Contour area using in_built function
           #Draw moment
            rect = cv2.minAreaRect(cnt)
            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)
#           cv2.drawContours(img1,contours, 0, (0,255,0),3) #draw contours in green color
            cv2.drawContours(img1,[box],0,(0,0,255),1)
            cv2.circle(img1, (Cx,Cy), 3,(0,255,0), -1)#draw centroids in red color
            MomentVector.append([M['m00'],Cx,Cy])
            cv2.imshow('winname',img1)
            cv2.waitKey(5000)
    print MomentVector
コード例 #26
0
ファイル: consumer.py プロジェクト: simonfojtu/cv-utils
def detectCircles(image):
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (13, 13), 6)

    circles = cv2.HoughCircles(blur,cv2.HOUGH_GRADIENT,1,20,
                                        param1=50,param2=30,minRadius=9,maxRadius=0)


    margin = 5
    img = image.copy()
    points = None
    if not circles is None:
        points = []
        circles = np.uint16(np.around(circles))
        for circle in circles[0]:
            c = (circle[0], circle[1]) # x, y:
            r = circle[2]
            cv2.circle(img, c, r, (0, 200, 0), 4)

            pxs = img[c[1]-margin:c[1]+margin, c[0]-margin:c[0]+margin]
            v = np.amax(pxs)
            cv2.putText(img, str(v), c, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (200, 255, 0))
            points.append((c, v))

    cv2.imshow("circles", img)

    return points
コード例 #27
0
def draw_match(img1, img2, p1, p2, status = None, H = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1+w2] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if H is not None:
        corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
        corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
        cv2.polylines(vis, [corners], True, (255, 255, 255))
    
    if status is None:
        status = np.ones(len(p1), np.bool_)
    green = (0, 255, 0)
    red = (0, 0, 255)
    for (x1, y1), (x2, y2), inlier in zip(np.int32(p1), np.int32(p2), status):
        col = [red, green][inlier]
        if inlier:
            cv2.line(vis, (x1, y1), (x2+w1, y2), col)
            cv2.circle(vis, (x1, y1), 2, col, -1)
            cv2.circle(vis, (x2+w1, y2), 2, col, -1)
        else:
            r = 2
            thickness = 3
            cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
            cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
            cv2.line(vis, (x2+w1-r, y2-r), (x2+w1+r, y2+r), col, thickness)
            cv2.line(vis, (x2+w1-r, y2+r), (x2+w1+r, y2-r), col, thickness)
    return vis
コード例 #28
0
ファイル: main.py プロジェクト: DarkAvenger36/BallFollower
def findCircles(thresholded, hsvFrame):
    global found, ball_state, maxRadius
    #applico Hough
    circles = cv2.HoughCircles(thresholded, cv2.cv.CV_HOUGH_GRADIENT, dp=2, minDist=60, param1=100, param2=40, minRadius=5, maxRadius=60)

    #ATTENZIONE circles e una matrice 1xnx3
    #print circles
    found = False
    x = 0
    if circles is not None:
            maxRadius = 0
            y = 0

            for i in range(circles.size/3):
                    circle=circles[0,i]
                    cv2.circle(hsvFrame, (circle[0],circle[1]), circle[2], (255,0,0),2)
                    if circle[2]>maxRadius:
                            radius=int(circle[2])
                            maxRadius=int(radius)
                            x=int(circle[0])
                            y=int(circle[1])
            found=True
            ball_state += 1
            cv2.circle(hsvFrame, (x,y), maxRadius, (0,255,0),2)
            print "Le coordinate del centro sono: ("+ str(x) +"," + str(y)+")"
    else:
        ball_state = 0

    return x
コード例 #29
0
ファイル: imgproc.py プロジェクト: benlansdell/hydra
def drawPoints(img, pts, types = None):
	npts = len(pts)
	colors = [[0, 255, 0], [255, 0, 0], [0, 0, 255]]
	if types is None:
		types = [0]*npts 
	for (i,pt) in enumerate(pts):
		cv2.circle(img,tuple(pt.astype(int)),3,colors[types[i]],-1)
コード例 #30
0
    # print(boxes[0, :, :])

    new_im, new_pts, heatmap, offset, mask = gen_gt(im, pts)
    
    print('-'*20)
    print(heatmap.shape)
    print(offset.shape)
    print(mask.shape)
    print(np.where(mask==1))
    

    # print(boxes.shape)
    for i in range(new_pts.shape[0]):
        pt = new_pts[i]
        # print(pt[0, ])
        cv2.circle(new_im, tuple(pt), 1, (0, 0, 255), -1)

    for i in range(pts.shape[0]):
        pt = pts[i]
        # print(pt[0, ])
        cv2.circle(im, tuple(pt), 1, (0, 0, 255), -1)
    
    # heatmap = np.squeeze(heatmap, axis=2)
    max_value = np.max(heatmap)
    heatmap = (heatmap*255/max_value).astype('uint8')

    max_value_ = np.max(offset)
    offset = (offset*255/max_value_).astype('uint8')
    offset[offset>0]=100
    # cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
コード例 #31
0
ファイル: ball.py プロジェクト: Terminaator/robot
while True:

    start = time.time()
    frame = pipeline.wait_for_frames()
    color_frame = frame.get_color_frame()
    frame = np.asanyarray(color_frame.get_data())
    cv2.getTrackbarPos("1", "Trackbars")
    ball = segment_colour(frame)
    rec, area = find_blob(ball)
    (x, y, w, h) = rec
    if (w * h) < 10:
        found = 0
    else:
        found = 1
        simg2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
        centre_x = x + ((w) / 2)
        centre_y = y + ((h) / 2)
        if 280 < centre_x < 360:
            print(centre_x)
        cv2.circle(frame, (int(centre_x), int(centre_y)), 3, (0, 110, 255), -1)
        centre_x -= 80
        centre_y = 6 - -centre_y
    cv2.imshow('Processed', frame)
    cv2.imshow('treshold', ball)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
コード例 #32
0
ファイル: my.py プロジェクト: kgargdun/gesture_recoginition
def count_fingers(thresholded, hand_segment):
    
    
    # Calculated the convex hull of the hand segment
    conv_hull = cv2.convexHull(hand_segment)
    
    # Now the convex hull will have at least 4 most outward points, on the top, bottom, left, and right.
    # Let's grab those points by using argmin and argmax. Keep in mind, this would require reading the documentation
    # And understanding the general array shape returned by the conv hull.

    # Find the top, bottom, left , and right.
    # Then make sure they are in tuple format
    top    = tuple(conv_hull[conv_hull[:, :, 1].argmin()][0])
    bottom = tuple(conv_hull[conv_hull[:, :, 1].argmax()][0])
    left   = tuple(conv_hull[conv_hull[:, :, 0].argmin()][0])
    right  = tuple(conv_hull[conv_hull[:, :, 0].argmax()][0])

    # In theory, the center of the hand is half way between the top and bottom and halfway between left and right
    cX = (left[0] + right[0]) // 2
    cY = (top[1] + bottom[1]) // 2

    # find the maximum euclidean distance between the center of the palm
    # and the most extreme points of the convex hull
    
    # Calculate the Euclidean Distance between the center of the hand and the left, right, top, and bottom.
    distance = pairwise.euclidean_distances([(cX, cY)], Y=[left, right, top, bottom])[0]
    
    # Grab the largest distance
    max_distance = distance.max()
    
    # Create a circle with 90% radius of the max euclidean distance
    radius = int(0.8 * max_distance)
    circumference = (2 * np.pi * radius)

    # Not grab an ROI of only that circle
    circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
    
    # draw the circular ROI
    cv2.circle(circular_roi, (cX, cY), radius, 255, 10)
    
    
    # Using bit-wise AND with the cirle ROI as a mask.
    # This then returns the cut out obtained using the mask on the thresholded hand image.
    circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)

    # Grab contours in circle ROI
    image, contours, hierarchy = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

    # Finger count starts at 0
    count = 0

    # loop through the contours to see if we count any more fingers.
    for cnt in contours:
        
        # Bounding box of countour
        (x, y, w, h) = cv2.boundingRect(cnt)

        # Increment count of fingers based on two conditions:
        
        # 1. Contour region is not the very bottom of hand area (the wrist)
        out_of_wrist = ((cY + (cY * 0.25)) > (y + h))
        
        # 2. Number of points along the contour does not exceed 25% of the circumference of the circular ROI (otherwise we're counting points off the hand)
        limit_points = ((circumference * 0.25) > cnt.shape[0])
        
        
        if  out_of_wrist and limit_points:
            count += 1

    return count
コード例 #33
0
import os
import cv2 as cv
import numpy as np  

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

photos_path = os.path.join(BASE_DIR, 'photos')
videos_path = os.path.join(BASE_DIR, 'videos')

img = cv.imread(os.path.join(photos_path, 'cats.jpg'))
cv.imshow('cats', img)

height = img.shape[0]
width = img.shape[1]

blank = np.zeros(shape=(height, width), dtype='uint8')
cv.imshow('blank', blank)

rectangle = cv.rectangle(blank.copy(), (width//4, height//4), ((width//4)*3, (height//4)*3), 255, -1)
circle = cv.circle(blank.copy(), (width//2, height//2), 150, 255, -1)
cv.imshow('rectangle', rectangle)

print('rectangle shpae', rectangle.shape)

cropped_circle = cv.bitwise_and(img, img, mask=circle)
cv.imshow('cropped_circle', cropped_circle)

cv.waitKey(0)
コード例 #34
0
    ret1,th2=cv2.threshold(th1,127,255,cv2.THRESH_TOZERO) 
    _,contours,hierarchy=cv2.findContours(th2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #Görüntüde kontuar oluşturduk

    
    if len(contours)>0:
        centroid=max(contours,key=cv2.contourArea) #Kontuar alanı belirleniyor.
        M=cv2.moments(centroid) 
        area=cv2.contourArea(centroid)
        if area>3000:
            cx=int(M['m10']/M['m00']) #x koordinatını ve y koordinatını buluyoruz.
            cy=int(M['m01']/M['m00'])

            cv2.line(image,(cx,0),(cx,720),(255,0,0),1) #bulunan bu koordinatlara göre görüntüde line çiziliyor.
            cv2.line(image,(0,cy),(1280,cy),(255,0,0),1)
            cv2.line(image,(320,0),(320,640),(255,0,0),1)
            cv2.circle(image,(cx,cy),3,(0,0,255),-1)
            cv2.drawContours(image,contours,-1,(0,255,0),2) #Kontuar alanı çiziliyor
            if cx<500:
                print("sag yap") #Referans değeri(x koordinatı) 500 den küçük ise sağa dön
            if cx>520:
                print("SOL YAP") #Referans değeri(y koordinatı) 520 den büyük ise sola dön
            if 500<cx<520:
                print("Duz gıt") #Referans değeri(x-y koordinatı) 500 ile 520 arasında ise düz git
    cv2.imshow('İşlenmiş Görüntü',image) #işleme yaptığımız görüntülerin framelerini yansıtıyoruz
    cv2.imshow('th1',th1)
    cv2.imshow('th2',th2)
    cv2.imshow("input", res)
    key = cv2.waitKey(10) #'ESC' ye basıldıysa bütün frameleri kapat
    if key == 27:
        break
cv2.destroyAllWindows() #Bütün ekranları öldür.
コード例 #35
0
import numpy as np
import cv2

#img = cv2.imread('lena.jpg', 1)
img = np.zeros([512, 512, 3], np.uint8)

img = cv2.line(img, (0, 0), (255, 255), (147, 96, 44), 10)
img = cv2.arrowedLine(img, (0, 255), (255, 255), (255, 0, 0), 10)
img = cv2.rectangle(img, (384, 0), (510, 128), (0, 0, 255), 5)
img = cv2.circle(img, (447, 63), 63, (0, 255, 0), -1)

font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.putText(img, 'OpenCV', (10, 500), font, 4,
                  (0, 255, 255), 10, cv2.LINE_AA)

cv2.imshow('image', img)

cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #36
0
ファイル: benchmark.py プロジェクト: songys96/DeepLabCut-live
def benchmark(
    model_path,
    video_path,
    tf_config=None,
    resize=None,
    pixels=None,
    cropping=None,
    dynamic=(False, 0.5, 10),
    n_frames=1000,
    print_rate=False,
    display=False,
    pcutoff=0.0,
    display_radius=3,
    cmap="bmy",
    save_poses=False,
    save_video=False,
    output=None,
) -> typing.Tuple[np.ndarray, tuple, bool, dict]:
    """ Analyze DeepLabCut-live exported model on a video:
    Calculate inference time,
    display keypoints, or
    get poses/create a labeled video

    Parameters
    ----------
    model_path : str
        path to exported DeepLabCut model
    video_path : str
        path to video file
    tf_config : :class:`tensorflow.ConfigProto`
        tensorflow session configuration
    resize : int, optional
        resize factor. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
    pixels : int, optional
        downsize image to this number of pixels, maintaining aspect ratio. Can only use one of resize or pixels. If both are provided, will use pixels. by default None
    cropping : list of int
        cropping parameters in pixel number: [x1, x2, y1, y2]
    dynamic: triple containing (state, detectiontreshold, margin)
        If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
        then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This  window is
        expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
        current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
        enough given the movement of the animal)
    n_frames : int, optional
        number of frames to run inference on, by default 1000
    print_rate : bool, optional
        flat to print inference rate frame by frame, by default False
    display : bool, optional
        flag to display keypoints on images. Useful for checking the accuracy of exported models.
    pcutoff : float, optional
        likelihood threshold to display keypoints
    display_radius : int, optional
        size (radius in pixels) of keypoint to display
    cmap : str, optional
        a string indicating the :package:`colorcet` colormap, `options here <https://colorcet.holoviz.org/>`, by default "bmy"
    save_poses : bool, optional
        flag to save poses to an hdf5 file. If True, operates similar to :function:`DeepLabCut.benchmark_videos`, by default False
    save_video : bool, optional
        flag to save a labeled video. If True, operates similar to :function:`DeepLabCut.create_labeled_video`, by default False
    output : str, optional
        path to directory to save pose and/or video file. If not specified, will use the directory of video_path, by default None

    Returns
    -------
    :class:`numpy.ndarray`
        vector of inference times
    tuple
        (image width, image height)
    bool
        tensorflow inference flag
    dict
        metadata for video

    Example
    -------
    Return a vector of inference times for 10000 frames:
    dlclive.benchmark('/my/exported/model', 'my_video.avi', n_frames=10000)

    Return a vector of inference times, resizing images to half the width and height for inference
    dlclive.benchmark('/my/exported/model', 'my_video.avi', n_frames=10000, resize=0.5)

    Display keypoints to check the accuracy of an exported model
    dlclive.benchmark('/my/exported/model', 'my_video.avi', display=True)

    Analyze a video (save poses to hdf5) and create a labeled video, similar to :function:`DeepLabCut.benchmark_videos` and :function:`create_labeled_video`
    dlclive.benchmark('/my/exported/model', 'my_video.avi', save_poses=True, save_video=True)
    """

    ### load video

    cap = cv2.VideoCapture(video_path)
    ret, frame = cap.read()
    n_frames = (n_frames if (n_frames > 0) and
                (n_frames < cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1) else
                (cap.get(cv2.CAP_PROP_FRAME_COUNT) - 1))
    n_frames = int(n_frames)
    im_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
               cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    ### get resize factor

    if pixels is not None:
        resize = np.sqrt(pixels / (im_size[0] * im_size[1]))
    if resize is not None:
        im_size = (int(im_size[0] * resize), int(im_size[1] * resize))

    ### create video writer

    if save_video:
        colors = None
        out_dir = (output if output is not None else os.path.dirname(
            os.path.realpath(video_path)))
        out_vid_base = os.path.basename(video_path)
        out_vid_file = os.path.normpath(
            f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_LABELED.avi"
        )
        fourcc = cv2.VideoWriter_fourcc(*"DIVX")
        fps = cap.get(cv2.CAP_PROP_FPS)
        vwriter = cv2.VideoWriter(out_vid_file, fourcc, fps, im_size)

    ### check for pandas installation if using save_poses flag

    if save_poses:
        try:
            import pandas as pd

            use_pandas = True
        except:
            use_pandas = False
            warnings.warn(
                "Could not find installation of pandas; saving poses as a numpy array with the dimensions (n_frames, n_keypoints, [x, y, likelihood])."
            )

    ### initialize DLCLive and perform inference

    inf_times = np.zeros(n_frames)
    poses = []

    live = DLCLive(
        model_path,
        tf_config=tf_config,
        resize=resize,
        cropping=cropping,
        dynamic=dynamic,
        display=display,
        pcutoff=pcutoff,
        display_radius=display_radius,
        display_cmap=cmap,
    )

    poses.append(live.init_inference(frame))
    TFGPUinference = True if len(live.outputs) == 1 else False

    iterator = range(n_frames) if (print_rate) or (display) else tqdm(
        range(n_frames))
    for i in iterator:

        ret, frame = cap.read()

        if not ret:
            warnings.warn(
                "Did not complete {:d} frames. There probably were not enough frames in the video {}."
                .format(n_frames, video_path))
            break

        start_pose = time.time()
        poses.append(live.get_pose(frame))
        inf_times[i] = time.time() - start_pose

        if save_video:

            if colors is None:
                all_colors = getattr(cc, cmap)
                colors = [
                    ImageColor.getcolor(c, "RGB")[::-1] for c in
                    all_colors[::int(len(all_colors) / poses[-1].shape[0])]
                ]

            this_pose = poses[-1]
            for j in range(this_pose.shape[0]):
                if this_pose[j, 2] > pcutoff:
                    x = int(this_pose[j, 0])
                    y = int(this_pose[j, 1])
                    frame = cv2.circle(frame, (x, y),
                                       display_radius,
                                       colors[j],
                                       thickness=-1)

            if resize is not None:
                frame = cv2.resize(frame, im_size)
            vwriter.write(frame)

        if print_rate:
            print("pose rate = {:d}".format(int(1 / inf_times[i])))

    if print_rate:
        print("mean pose rate = {:d}".format(int(np.mean(1 / inf_times))))

    ### gather video and test parameterization

    # dont want to fail here so gracefully failing on exception --
    # eg. some packages of cv2 don't have CAP_PROP_CODEC_PIXEL_FORMAT
    try:
        fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC))
    except:
        fourcc = ""

    try:
        fps = round(cap.get(cv2.CAP_PROP_FPS))
    except:
        fps = None

    try:
        pix_fmt = decode_fourcc(cap.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))
    except:
        pix_fmt = ""

    try:
        frame_count = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    except:
        frame_count = None

    try:
        orig_im_size = (
            round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        )
    except:
        orig_im_size = None

    meta = {
        "video_path": video_path,
        "video_codec": fourcc,
        "video_pixel_format": pix_fmt,
        "video_fps": fps,
        "video_total_frames": frame_count,
        "original_frame_size": orig_im_size,
        "dlclive_params": live.parameterization,
    }

    ### close video and tensorflow session

    cap.release()
    live.close()

    if save_video:
        vwriter.release()

    if save_poses:

        cfg_path = os.path.normpath(f"{model_path}/pose_cfg.yaml")
        ruamel_file = ruamel.yaml.YAML()
        dlc_cfg = ruamel_file.load(open(cfg_path, "r"))
        bodyparts = dlc_cfg["all_joints_names"]
        poses = np.array(poses)

        if use_pandas:

            poses = poses.reshape(
                (poses.shape[0], poses.shape[1] * poses.shape[2]))
            pdindex = pd.MultiIndex.from_product(
                [bodyparts, ["x", "y", "likelihood"]],
                names=["bodyparts", "coords"])
            pose_df = pd.DataFrame(poses, columns=pdindex)

            out_dir = (output if output is not None else os.path.dirname(
                os.path.realpath(video_path)))
            out_vid_base = os.path.basename(video_path)
            out_dlc_file = os.path.normpath(
                f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_POSES.h5"
            )
            pose_df.to_hdf(out_dlc_file, key="df_with_missing", mode="w")

        else:

            out_vid_base = os.path.basename(video_path)
            out_dlc_file = os.path.normpath(
                f"{out_dir}/{os.path.splitext(out_vid_base)[0]}_DLCLIVE_POSES.npy"
            )
            np.save(out_dlc_file, poses)

    return inf_times, im_size, TFGPUinference, meta
コード例 #37
0
ファイル: draw_line.py プロジェクト: kingtub/OpencvExercise
''' 画一条线 '''
import cv2
import numpy as np

createdImg = np.zeros((512, 512, 3), np.uint8)
cv2.line(createdImg, (0, 0), (511, 511), color=(0, 0, 255), thickness=5)
cv2.imshow('createdImg', createdImg)

img_dir = 'C:\\D\\testImgs\\'
img = cv2.imread(img_dir + 'aa.jpg')
cv2.line(img, (0, 0), (img.shape[0] - 1, img.shape[1] - 1), color=(0, 255, 0), thickness=5)

font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OpenCV', (10, 500), font, 4, (255, 255, 255), 2)

cv2.circle(img, (200, 300), 50, (0, 0, 255), 1)
cv2.imshow('img', img)

cv2.waitKey()
cv2.destroyAllWindows()
コード例 #38
0
ファイル: handPoseVideo.py プロジェクト: AnujPathare/Vaani
    # Empty list to store the detected keypoints
    points = []

    

    for i in range(nPoints):
        # confidence map of corresponding body's part.
        probMap = output[0, i, :, :]
        probMap = cv2.resize(probMap, (frameWidth, frameHeight))

        # Find global maxima of the probMap.
        minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)

        if prob > threshold :
            cv2.circle(frameCopy, (int(point[0]), int(point[1])), 6, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
            cv2.putText(frameCopy, "{}".format(i), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, .8, (0, 0, 255), 2, lineType=cv2.LINE_AA)

            # Add the point to the list if the probability is greater than the threshold
            points.append((int(point[0]), int(point[1])))
        else :
            points.append(None)

    frame  = background
    # Draw Skeleton
    for pair in POSE_PAIRS:
        partA = pair[0]
        partB = pair[1]

        if points[partA] and points[partB]:
            cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2, lineType=cv2.LINE_AA)
コード例 #39
0
def draw_point(point, img, radius=5, color=(0, 0, 255)):
    cv2.circle(img, tuple(point), radius, color, -1)
コード例 #40
0
ファイル: paintBrush.py プロジェクト: gcmontilla/car
def draw_circle(event, x, y, flags, param):
    if event == cv2.EVENT_RBUTTONDOWN:
        cv2.circle(img, (x, y), 10, (255, 100, 0), -1)
コード例 #41
0
            center = (0, 0)
        else:
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        PixCoordX = (center[0])
        PixCoordY = (center[1])
        print("PiX coordinate: {:.2f}".format(PixCoordX),
              "  PiY coordinate: {:.2f}".format(PixCoordY))
        #                pixcoordinate

        # write data to arduino
        #                    writeDataArduino()
        # only proceed if the radius meets a minimum size
        if radius > 0.5:
            # draw the circle and centroid on the frame,
            # then update the list of tracked points
            cv2.circle(frame, (int(x), int(y)), int(radius), (15, 186, 2), 10)
            cv2.circle(frame, center, 5, (0, 0, 255), -1)
    # update the points queue
    try:
        print("Contour radius: {:.2f}".format(radius))
        PixRadius = radius
        #                    serRasp = serial.Serial('/dev/ttyUSB0',57600)
        #                    serRasp.write(PixCoordX)
        #                    serRasp.write(PixCoordY)
        pts.appendleft(center)
    except:
        print("No radius detected ...")
    # loop over the set of tracked points
    for i in range(1, len(pts)):
        # if either of the tracked points are None, ignore
        # them
コード例 #42
0
ファイル: play_mockup.py プロジェクト: mudziok/TensorPet
model = create_model(keep_prob=1)
model.load_weights('model_weights.h5')

frame = 0
for data in x_train:
    big = cv2.resize(data, (int(width), int(height)), interpolation = cv2.INTER_AREA)
    
    vec = np.expand_dims(data, axis=0)
    prediction = model.predict(vec, batch_size=1)[0]
    circle_x = prediction[0]
    circle_y = prediction[1]

    coords = (int(640/4 + (circle_x * radius)), int(480/2 - (circle_y * radius)))
    final = cv2.line(big, (int(width/4), int(height/2)), coords, black, 5) 
    final = cv2.circle(final, coords, 20, white, -1)

    circle_x = y_train[frame][0]
    circle_y = y_train[frame][1]
    coords = (int(640/4*3 + (circle_x * radius)), int(480/2 - (circle_y * radius)))
    final = cv2.line(big, (int(width/4*3), int(height/2)), coords, black, 5) 
    final = cv2.circle(final, coords, 20, white, -1)


    
    cv2.imshow('test', final)
    
    frame += 1
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
        break
コード例 #43
0
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    """Visualizes keypoints (adapted from vis_one_image).
    kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
    """
    dataset_keypoints = PersonKeypoints.NAMES
    kp_lines = PersonKeypoints.CONNECTIONS

    # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

    # Perform the drawing on a copy of the image, to allow for blending.
    kp_mask = np.copy(img)

    # Draw mid shoulder / mid hip first for better visualization.
    mid_shoulder = (kps[:2, dataset_keypoints.index('right_shoulder')] +
                    kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
               kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(kps[2, dataset_keypoints.index('right_hip')],
                            kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(kp_mask,
                 tuple(mid_shoulder),
                 tuple(kps[:2, nose_idx]),
                 color=colors[len(kp_lines)],
                 thickness=2,
                 lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(kp_mask,
                 tuple(mid_shoulder),
                 tuple(mid_hip),
                 color=colors[len(kp_lines) + 1],
                 thickness=2,
                 lineType=cv2.LINE_AA)

    # Draw the keypoints.
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(kp_mask,
                     p1,
                     p2,
                     color=colors[l],
                     thickness=2,
                     lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(kp_mask,
                       p1,
                       radius=3,
                       color=colors[l],
                       thickness=-1,
                       lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(kp_mask,
                       p2,
                       radius=3,
                       color=colors[l],
                       thickness=-1,
                       lineType=cv2.LINE_AA)

    # Blend the keypoints.
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
コード例 #44
0
    def findLinePointsFast(self,image):
        '''
        Find lane lines points in video frame - fast.
        Works starting from second frame.
        '''
        points=([],[])
        shape=image.shape
        #Prepare images for visualization

        img=image.copy()
        red=np.zeros_like(img)
        blue=np.zeros_like(img)

        #For every 10th row starting from bottom
        for y,lx,rx,line in list(zip(self.all_y,self.left_fitx,self.right_fitx,image))[::-10]:
            lxmin=int(lx-20-0.2*(img.shape[0]-y))
            lxmax=int(lx+20+0.2*(img.shape[0]-y))
            rxmin=int(rx-20-0.2*(img.shape[0]-y))
            rxmax=int(rx+20+0.2*(img.shape[0]-y))
            cv2.circle(red,(lxmin,int(y)),1,(255,255,255))
            cv2.circle(red,(lxmax,int(y)),1,(255,255,255))
            cv2.circle(red,(rxmin,int(y)),1,(255,255,255))
            cv2.circle(red,(rxmax,int(y)),1,(255,255,255))
            x_val_hist=[]
            counter=0
            for x in line:
                if x>0:
                    x_val_hist.append(counter)
                counter=counter+1
            if len(x_val_hist)>5:
                #split points to left/right
                left=[(x,y) for x in x_val_hist if x<=lxmax and x>=lxmin]
                right=[(x,y) for x in x_val_hist if x>=rxmin and x<=rxmax]
                l=None
                r=None
                #Compute means for left/right
                if len(left):
                    l=np.mean(np.array(left),axis=0)
                if len(right):
                    r=np.mean(np.array(right),axis=0)
                if l is None or r is None or r[0]-l[0]>200:
                    if (not l is None) and l[0]>lxmin and l[0]<lxmax:
                        cv2.circle(blue,(int(l[0]),int(l[1])),1,(255,255,255))
                        points[0].append(l)
                    if (not r is None) and r[0]>rxmin and r[0]<rxmax:
                        cv2.circle(blue,(int(r[0]),int(r[1])),1,(255,255,255))
                        points[1].append(r)

        if len(points[0])<10 or len(points[1])<10:
            self.retry_counter=self.retry_counter+1


        #Show roi for video frame
        img=cv2.resize(np.dstack((blue,red,red)),(shape[1],int(shape[0])),fx=0,fy=0)
        cv2.imshow('lines-video',img)
        return points
コード例 #45
0
def step1():
    cell_area_hist_list = []
    print("============Step 1 Start============")
    #-----read-----
    root = tk.Tk()
    root.withdraw()

    file_path = filedialog.askopenfilename()
    #img=cv.imread("G:\\2020summer\\Project\\Chromophobe_dataset1\\4.jpg")
    img = cv.imread(file_path)
    img_original = img
    print("Img size: [Width :", img.shape[0], "]", "[Height :", img.shape[1],
          "]")

    img = cv.copyMakeBorder(img,
                            80,
                            450,
                            60,
                            60,
                            cv.BORDER_CONSTANT,
                            value=[255, 255, 255])
    #img=cv.cvtColor(img,cv.COLOR_BGR2BGRA)

    img_masked = img.copy()
    img_nucleus_white_img = img.copy()

    #-----preprocess-----
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    #cv.imshow("gray", gray)

    gauss = cv.GaussianBlur(gray, (5, 5), 5)
    #cv.imshow("gauss1",gauss)

    ret, thresh = cv.threshold(gauss, 190, 255, 0)
    cv.imwrite("temp_file\\figure3_left.jpg", thresh)
    #cv.imshow("thresh",thresh)

    erode = cv.erode(thresh, None, iterations=1)
    #cv.imshow("erode",erode)

    #-----remove outlines-----

    #cv.imshow("erode",erode)
    for i in range(0, img.shape[0]):
        for j in range(0, img.shape[1]):
            erode[0][j] = 255
    #-----find contours-----
    cnts, hierarchy = cv.findContours(erode.copy(), cv.RETR_LIST,
                                      cv.CHAIN_APPROX_NONE)

    def cnt_area(cnt):
        area = cv.contourArea(cnt)
        return area

    counter_number = 0
    location_cells_center = {}
    area_of_cells_nucleus = []
    Whole_pic_cell_area_ave_percent = []
    Whole_pic_cell_color_ave = []
    for i in range(0, len(cnts)):
        if 250 <= cnt_area(cnts[i]) <= 0.2 * (img.shape[0] * img.shape[1]):
            cell_area_hist_list.append(cnt_area(cnts[i]))
            #print(cnts[i])
            #cell_area_hist_list.append(area_calculate_from_points(cnts[i]))
            counter_number += 1
            #print(cnts[i])
            #print("======")
            cv.drawContours(img_masked, cnts[i], -1, (0, 0, 255),
                            2)  #draw contours
            cv.drawContours(img_nucleus_white_img, [cnts[i]], -1,
                            (255, 255, 255), -1)  #masked white
            M = cv.moments(cnts[i])

            #检索每个细胞的内部颜色
            x, y, w, h = cv.boundingRect(cnts[i])

            #cv.imshow('single_cell', newimage)

            cell_area_percent = 0

            for row in range(y, y + h):
                for col in range(x, x + w):
                    result = cv.pointPolygonTest(cnts[i], (col, row), False)
                    if result == -1:

                        cv.circle(gray, (col, row), 1, 255, -1)
                        cv.circle(img, (col, row), 1, (255, 255, 255), -1)
                        cell_area_percent += 1
            cv.rectangle(img, (x, y), (x + w, y + h), (153, 153, 0), 1)
            newimage_gray = gray[y:y + h, x:x + w]

            Single_Cell_Color_Distrution = []
            for row in range(h):
                for col in range(w):
                    if newimage_gray[row, col] != 255:
                        Single_Cell_Color_Distrution.append(newimage_gray[row,
                                                                          col])
            '''
            plt.hist(Single_Cell_Color_Distrution,bins=50)
            plt.title(str(counter_number))
            plt.show()
            '''
            #print("this cell area percent= ",str(cell_area_percent/(w*h)))
            numpy.set_printoptions(precision=3)
            Whole_pic_cell_area_ave_percent.append(cell_area_percent / (w * h))
            Whole_pic_cell_color_ave.append(
                numpy.mean(Single_Cell_Color_Distrution))
            """#找出masked细胞内点的坐标
            rect = cv.minAreaRect(cnts[i])
            cx, cy = rect[0]
            box = cv.boxPoints(rect)
            box = np.int0(box)
            cv.drawContours(img_masked, [box], 0, (0, 0, 255), 2)
            #cv.circle(img_masked, (np.int32(cx), np.int32(cy)), 2, (255, 0, 0), 2, 8, 0)

            box_gray_color=[]
            for by in range(box[2][1],box[0][1]+1):
                for bx in range(box[1][0],box[3][0]+1):
                    #print(bx,by)
                    #cv.circle(img_masked,(bx, by), 1, (255, 0, 0), 2, 8, 0)
                    box_gray_color.append(gray[bx,by])
            plt.hist(box_gray_color)

            plt.hist(box_gray_color,bins=50)
            plt.title(str(counter_number))
            plt.show()

            dist=cv.pointPolygonTest(cnts[i],(50,50),True)
            """
            try:
                cX = int(M["m10"] / M["m00"])
                cY = int(M["m01"] / M["m00"])
                cv.circle(img_masked, (cX, cY), 3, (255, 255, 255), -1)
                cv.putText(img_masked, str(counter_number), (cX - 20, cY - 20),
                           cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                area_of_cells_nucleus.append(cnt_area(cnts[i]))
                location_cells_center[counter_number] = [cX, cY]
            except:
                pass
            if counter_number == 1:
                x1 = cX
                y1 = cY
            if counter_number == 2:
                x2 = cX
                y2 = cY
            if counter_number == 20:
                x_sample = cX
                y_sample = cY

            #cv.drawContours(img_masked, [cnts[i]], -1, (255, 255, 255), -1)#mask contours
    print("Whole pic average cell nucleus area percent: ",
          Whole_pic_cell_area_ave_percent)
    print("Whole pic average cell nucleus area percent_ave: ",
          numpy.mean(Whole_pic_cell_area_ave_percent))
    print("Whole pic average cell nucleus color deep percent: ",
          Whole_pic_cell_color_ave)
    print("Whole pic average cell nucleus color deep percent_ave: ",
          numpy.mean(Whole_pic_cell_color_ave))
    cv.imshow('single_cell', img)
    #-----put Text-----
    print("total cells number : ", counter_number)

    cv.line(img_masked, (x1, y1), (x2, y2), (0, 0, 255), 2)

    list_of_two_points = pixel_between_two_points(x1, x2, y1, y2)

    #-----output information on the line
    height_of_two_points = []
    height_of_two_points_B = []
    height_of_two_points_G = []
    height_of_two_points_R = []

    for m in range(0, len(list_of_two_points)):
        height = img[list_of_two_points[m][1], list_of_two_points[m][0]]
        try:
            height_B = img[list_of_two_points[m][1],
                           list_of_two_points[m][0]][0]
            height_G = img[list_of_two_points[m][1],
                           list_of_two_points[m][0]][1]
            height_R = img[list_of_two_points[m][1],
                           list_of_two_points[m][0]][2]
            height_of_two_points_B.append(height_B)
            height_of_two_points_G.append(height_G)
            height_of_two_points_R.append(height_R)
        except:
            pass
        #print(height)
        height_of_two_points.append(height)
    img_sample = img.copy()
    cv.circle(img_sample, (x_sample, y_sample), 3, (0, 0, 255), -1)
    font = cv.FONT_HERSHEY_SIMPLEX
    cv.putText(img_sample, "Sample_Point", (x_sample - 20, y_sample - 20),
               font, 0.7, (255, 255, 255), 2)
    #cv.imshow("img_sample_location_RED_DOT", img_sample)

    # save to local
    f = open("G:\\2020summer\\Project\\Cell_classfication_1.0.0\\dict.txt",
             'w')
    f.write(str(location_cells_center))
    f.close()

    # < list save
    file1 = open('area_of_nucleus.txt', 'w')
    for fp in area_of_cells_nucleus:
        file1.write(str(fp))
        file1.write('\n')
    file1.close()
    # list save >

    cv.imwrite("G:\\2020summer\\Project\\Cell_classfication_1.0.0\\temp.bmp",
               img_masked)
    cv.imwrite("G:\\2020summer\\Project\\Cell_classfication_1.0.0\\temp_1.bmp",
               img_nucleus_white_img)
    cv.imwrite(
        "G:\\2020summer\\Project\\Chromophobe_dataset1\\figure3_right.jpg",
        img_masked)
    #================hist of cells area==================
    #plt.hist(cell_area_hist_list)
    #plt.show()

    #=================================
    #-----
    #=================================UI/
    cv.putText(img_masked, "Overview", (80, 40), cv.FONT_HERSHEY_SIMPLEX, 1,
               (0, 0, 0), 2)
    image_size_text = "Image size: [Width :" + str(
        img_original.shape[0]) + "]" + "[Height :" + str(
            img_original.shape[1]) + "]"
    cv.putText(img_masked, image_size_text, (80, img.shape[0] - 400),
               cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
    cv.putText(img_masked, "Total cells number: " + str(counter_number),
               (80, img.shape[0] - 350), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0),
               2)
    Cells_density = ('%.2f' % (10000 * counter_number /
                               (img.shape[0] * img.shape[1])))
    print("Cells density : ", Cells_density, " / 100*100 pixels")
    cv.putText(img_masked,
               "Cells density : " + str(Cells_density) + " / 100*100 pixels",
               (80, img.shape[0] - 300), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0),
               2)
    cv.putText(img_masked, "Close window to continue",
               (80, img.shape[0] - 250), cv.FONT_HERSHEY_SIMPLEX, 0.8,
               (0, 0, 0), 1)
    #=================================/UI
    cv.imshow('img_copy', img_masked)
    cv.imwrite("result\\overview_result1.bmp", img_masked)
    print("============Step 1 End============")
    cv.waitKey()
    return counter_number
コード例 #46
0
while True:
    succes, img = cap.read()
    
    imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Because hands use RGB images
    results = hands.process(imgRGB)

    if results.multi_hand_landmarks:
        for handLms in results.multi_hand_landmarks:
            #mpDraw.draw_landmarks(img, handLms) # Point in the hand, do not have any line conect
            mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)
            
            for id, lm in enumerate(handLms.landmark):
                h, w, c = img.shape
                # Convert landmark into real pixel position
                cx, cy = int(lm.x * w), int(lm.y * h)
                # Draw a circle at landmark 0
                if id == 4: # Use the id to track any finger
                    cv2.circle(img, (cx, cy), 25, (255, 0, 255), cv2.FILLED)
        
    cTime = time.time()
    fps = 1 / (cTime - pTime)
    pTime = cTime
    cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 2)
    
    cv2.imshow("Image", img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()
コード例 #47
0
 def __drawTruth__(self, image, fileName):
     annotated, (x, y) = self.db.getTruth(fileName)
     if annotated:
         cv2.circle(image, (x, y), 5, YELLOW, -1)
         return image
コード例 #48
0
def process (input_image, params, model_params):

    oriImg = cv2.imread(input_image)  # B,G,R order
    multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]

    heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
    paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))

    for m in range(len(multiplier)):
        scale = multiplier[m]

        imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
        imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'],
                                                          model_params['padValue'])

        input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)

        output_blobs = model.predict(input_img)

        # extract outputs, resize, and remove padding
        heatmap = np.squeeze(output_blobs[1])  # output 1 is heatmaps
        heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
                             interpolation=cv2.INTER_CUBIC)
        heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3],
                  :]
        heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)

        paf = np.squeeze(output_blobs[0])  # output 0 is PAFs
        paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
                         interpolation=cv2.INTER_CUBIC)
        paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)

        heatmap_avg = heatmap_avg + heatmap / len(multiplier)
        paf_avg = paf_avg + paf / len(multiplier)

    all_peaks = []
    peak_counter = 0

    for part in range(18):
        map_ori = heatmap_avg[:, :, part]
        map = gaussian_filter(map_ori, sigma=3)

        map_left = np.zeros(map.shape)
        map_left[1:, :] = map[:-1, :]
        map_right = np.zeros(map.shape)
        map_right[:-1, :] = map[1:, :]
        map_up = np.zeros(map.shape)
        map_up[:, 1:] = map[:, :-1]
        map_down = np.zeros(map.shape)
        map_down[:, :-1] = map[:, 1:]

        peaks_binary = np.logical_and.reduce(
            (map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > params['thre1']))
        peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]))  # note reverse
        peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)

    connection_all = []
    special_k = []
    mid_num = 10

    for k in range(len(mapIdx)):
        score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
        candA = all_peaks[limbSeq[k][0] - 1]
        candB = all_peaks[limbSeq[k][1] - 1]
        nA = len(candA)
        nB = len(candB)
        indexA, indexB = limbSeq[k]
        if (nA != 0 and nB != 0):
            connection_candidate = []
            for i in range(nA):
                for j in range(nB):
                    vec = np.subtract(candB[j][:2], candA[i][:2])
                    norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
                    # failure case when 2 body parts overlaps
                    if norm == 0:
                        continue
                    vec = np.divide(vec, norm)

                    startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
                                   np.linspace(candA[i][1], candB[j][1], num=mid_num)))

                    vec_x = np.array(
                        [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
                         for I in range(len(startend))])
                    vec_y = np.array(
                        [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
                         for I in range(len(startend))])

                    score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
                    score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
                        0.5 * oriImg.shape[0] / norm - 1, 0)
                    criterion1 = len(np.nonzero(score_midpts > params['thre2'])[0]) > 0.8 * len(
                        score_midpts)
                    criterion2 = score_with_dist_prior > 0
                    if criterion1 and criterion2:
                        connection_candidate.append([i, j, score_with_dist_prior,
                                                     score_with_dist_prior + candA[i][2] + candB[j][2]])

            connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
            connection = np.zeros((0, 5))
            for c in range(len(connection_candidate)):
                i, j, s = connection_candidate[c][0:3]
                if (i not in connection[:, 3] and j not in connection[:, 4]):
                    connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
                    if (len(connection) >= min(nA, nB)):
                        break

            connection_all.append(connection)
        else:
            special_k.append(k)
            connection_all.append([])

    # last number in each row is the total parts number of that person
    # the second last number in each row is the score of the overall configuration
    subset = -1 * np.ones((0, 20))
    candidate = np.array([item for sublist in all_peaks for item in sublist])

    for k in range(len(mapIdx)):
        if k not in special_k:
            partAs = connection_all[k][:, 0]
            partBs = connection_all[k][:, 1]
            indexA, indexB = np.array(limbSeq[k]) - 1

            for i in range(len(connection_all[k])):  # = 1:size(temp,1)
                found = 0
                subset_idx = [-1, -1]
                for j in range(len(subset)):  # 1:size(subset,1):
                    if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
                        subset_idx[found] = j
                        found += 1

                if found == 1:
                    j = subset_idx[0]
                    if (subset[j][indexB] != partBs[i]):
                        subset[j][indexB] = partBs[i]
                        subset[j][-1] += 1
                        subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
                elif found == 2:  # if found 2 and disjoint, merge them
                    j1, j2 = subset_idx
                    membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
                    if len(np.nonzero(membership == 2)[0]) == 0:  # merge
                        subset[j1][:-2] += (subset[j2][:-2] + 1)
                        subset[j1][-2:] += subset[j2][-2:]
                        subset[j1][-2] += connection_all[k][i][2]
                        subset = np.delete(subset, j2, 0)
                    else:  # as like found == 1
                        subset[j1][indexB] = partBs[i]
                        subset[j1][-1] += 1
                        subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]

                # if find no partA in the subset, create a new subset
                elif not found and k < 17:
                    row = -1 * np.ones(20)
                    row[indexA] = partAs[i]
                    row[indexB] = partBs[i]
                    row[-1] = 2
                    row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + \
                              connection_all[k][i][2]
                    subset = np.vstack([subset, row])

    # delete some rows of subset which has few parts occur
    deleteIdx = [];
    for i in range(len(subset)):
        if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
            deleteIdx.append(i)
    subset = np.delete(subset, deleteIdx, axis=0)

    canvas = cv2.imread(input_image)  # B,G,R order
    for i in range(18):
        for j in range(len(all_peaks[i])):
            cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
            
    cv2.imwrite("keypoints.jpg", canvas)
    #print(all_peaks) #keypoints
    all_peaks = translation(all_peaks)
    #print(all_peaks)
    stickwidth = 4

    for i in range(17):
        for n in range(len(subset)):
            index = subset[n][np.array(limbSeq[i]) - 1]
            if -1 in index:
                continue
            cur_canvas = canvas.copy()
            Y = candidate[index.astype(int), 0]
            X = candidate[index.astype(int), 1]
            mX = np.mean(X)
            mY = np.mean(Y)
            length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
            angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
            polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0,
                                       360, 1)
            cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
            canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)

    return canvas,all_peaks
コード例 #49
0
            status 特征点是否找到,找到的状态为1,未找到的状态为0

            err 输出错误向量,(不太理解用途...)

            winSize 搜索窗口的大小

            maxLevel 最大的金字塔层数

            flags 可选标识:OPTFLOW_USE_INITIAL_FLOW   OPTFLOW_LK_GET_MIN_EIGENVALS
	'''
	p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
	good_new = p1[st == 1]
	good_lod = p0[st == 1]

	# 画出跟踪
	for i, (new, old) in enumerate(zip(good_new, good_lod)):
		a, b = new.ravel()
		c, d = old.ravel()
		mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
		frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
	img = cv2.add(frame, mask)
	cv2.imshow('frame', img)

	k = cv2.waitKey(30)
	if k == 27:
		break

	old_gray = frame_gray.copy()
	p0 = good_new.reshape(-1, 1, 2)
cv2.destroyAllWindows()
cap.release()
コード例 #50
0
    def process_image(self, image):

        left, right = [], []

        thresh = self.preprocess(image)
        im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # All potential smaller-end needle protrusions
        residuals = [c for c in contours if self.residual_lower < cv2.contourArea(c) < self.residual_upper]

        not_found = True

        # for r in residuals:
        #     cv2.drawContours(image, [r], 0, (0, 255, 0), 2)

        for c in contours:
                        # Get moments and area for given contour
            M = cv2.moments(c)
            area = cv2.contourArea(c)

            # Throw out all non-needle contours
            if not_found and (self.area_lower < area < self.area_upper):

                # Compute the centroid (center of mass) and center of the given needle
                centroid_x, centroid_y = self.compute_centroid(c, M)
                closest = np.vstack(self.center(c, centroid_x, centroid_y)).squeeze()
                cx, cy = closest[0], closest[1]
                center = (cx, cy)

                # Fit an ellipse to the contour
                ellipse, ellipse_aspect, ellipse_area = self.get_ellipse(c)

                """Contour is the big protruding part of the needle"""
                if self.ellipse_lower < ellipse_area < self.ellipse_upper:

                    not_found = False

                    # Report/display the large residual
                    cv2.putText(image, "centroid", (centroid_x - 20, centroid_y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                    cv2.circle(image, center, 10, (255, 0, 0), -1)
                    # cv2.circle(image, (centroid_x, centroid_y), 10, (255, 255, 255), -1)
                    self.report(area, centroid_x, centroid_y, cx, cy, ellipse_area, 'LARGE RESIDUAL')
                    # cv2.ellipse(image, ellipse, (0, 0, 255), 2)
                    cv2.drawContours(image, [c], 0, (180, 30, 170), 5)
                    
                    # Find the corresponding small residual and markup
                    residual = self.find_residual(center, residuals)
                    if residual is not None:
                        print("SMALL RESIDUAL", cv2.contourArea(residual))
                        residual_centroid = self.compute_centroid(residual)
                        cv2.putText(image, "residual", residual_centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
                        cv2.drawContours(image, [residual], 0, (0, 255, 0), 5)
                        cv2.circle(image, residual_centroid, 10, (255, 0, 0), -1)
                        
                        # Fit a line to the small residual
                        [vx, vy, x, y] = cv2.fitLine(residual, cv2.DIST_L2,0,0.01,0.01)
                        dx, dy = np.asscalar(vx), np.asscalar(vy)
                        # rows, cols = image.shape[:2]
                        # lefty = int((-x*vy/vx) + y)
                        # righty = int(((cols-x)*vy/vx)+y)
                        # cv2.line(image,(cols-1,righty),(0,lefty),(0,255,0),2)

                        """Finds a pull point (relative to contour center) in the direction
                        of the best fit line of the smaller residual and opposite 
                        (not towards) the smaller residual """
                        if self.distance(residual_centroid, center) > \
                           self.distance(residual_centroid, (cx + dx, cy + dy)):
                            dx, dy = -dx, -dy
# <<<<<<< HEAD:stereo/find.py
                        pull_x = int(cx + 350*dx)
                        pull_y = int(cy + 350*dy)
# =======
#                         pull_x = int(cx + 200*dx)
#                         pull_y = int(cy + 200*dy)
# >>>>>>> 4d032d223969dc9f8c8777bfcf2e2dc2f63469e1:stereo/stereo_find_embedded_best_fit.py
                        cv2.circle(image, (pull_x, pull_y), 10, (0, 0, 0), -1)
                        cv2.line(image, center, (pull_x, pull_y), (0, 0, 0), 2)

                        # Compute points in right camera frame (residual center, contour center, pull point)
                        left_center = np.matrix([cx, cy, 0])
                        left_pull = np.matrix([pull_x, pull_y, 0])
                        right_center = transform.transform_data("Left Frame", "Right Frame", left_center, self.TL_R, verbose=False)
                        right_pull = transform.transform_data("Left", "Right", left_pull, self.TL_R, verbose=False)
                        right_cx = int(right_center[0, 0])
                        right_cy = int(right_center[0, 1])
                        right_pull_x = int(right_pull[0, 0])
                        right_pull_y = int(right_pull[0, 1])
                        cv2.circle(self.right_image, (right_cx, right_cy), 10, (0, 0, 0), -1)
                        cv2.circle(self.right_image, (right_pull_x, right_pull_y), 10, (0, 0, 0), -1)
                        cv2.line(self.right_image, (right_cx, right_cy), (right_pull_x, right_pull_y), (0, 0, 0), 2)

                        left.append(center)
                        left.append((pull_x, pull_y))
                        right.append((right_cx, right_cy))
                        right.append((right_pull_x, right_pull_y))
            # elif 250 < area < 500:
            #     cv2.drawContours(image, [c], 0, (0, 255, 255), 2)
        if len(right) > 0 and len(right) == len(left):
            pts3d = self.get_points_3d(left, right)
            print("Found")
            self.pts = [(p.point.x, p.point.y, p.point.z) for p in pts3d]
            pprint.pprint(self.pts)
            with open('needle_data/needle_points.p', "w+") as f:
                pickle.dump(self.pts, f)
            rospy.signal_shutdown("Finished.")
コード例 #51
0
ファイル: top_down.py プロジェクト: vivounicorn/mmpose
    def show_result(self,
                    img,
                    result,
                    skeleton=None,
                    kpt_score_thr=0.3,
                    bbox_color='green',
                    pose_kpt_color=None,
                    pose_limb_color=None,
                    radius=4,
                    text_color=(255, 0, 0),
                    thickness=1,
                    font_scale=0.5,
                    win_name='',
                    show=False,
                    wait_time=0,
                    out_file=None):
        """Draw `result` over `img`.

        Args:
            img (str or Tensor): The image to be displayed.
            result (list[dict]): The results to draw over `img`
                (bbox_result, pose_result).
            kpt_score_thr (float, optional): Minimum score of keypoints
                to be shown. Default: 0.3.
            bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
            pose_kpt_color (np.array[Nx3]`): Color of N keypoints.
                If None, do not draw keypoints.
            pose_limb_color (np.array[Mx3]): Color of M limbs.
                If None, do not draw limbs.
            text_color (str or tuple or :obj:`Color`): Color of texts.
            thickness (int): Thickness of lines.
            font_scale (float): Font scales of texts.
            win_name (str): The window name.
            wait_time (int): Value of waitKey param.
                Default: 0.
            out_file (str or None): The filename to write the image.
                Default: None.

        Returns:
            Tensor: Visualized img, only if not `show` or `out_file`.
        """

        img = mmcv.imread(img)
        img = img.copy()
        img_h, img_w, _ = img.shape

        bbox_result = []
        pose_result = []
        for res in result:
            bbox_result.append(res['bbox'])
            pose_result.append(res['keypoints'])

        if len(bbox_result) > 0:
            bboxes = np.vstack(bbox_result)
            # draw bounding boxes
            mmcv.imshow_bboxes(
                img,
                bboxes,
                colors=bbox_color,
                top_k=-1,
                thickness=thickness,
                show=False,
                win_name=win_name,
                wait_time=wait_time,
                out_file=None)

            for _, kpts in enumerate(pose_result):
                # draw each point on image
                if pose_kpt_color is not None:
                    assert len(pose_kpt_color) == len(kpts)
                    for kid, kpt in enumerate(kpts):
                        x_coord, y_coord, kpt_score = int(kpt[0]), int(
                            kpt[1]), kpt[2]
                        if kpt_score > kpt_score_thr:
                            img_copy = img.copy()
                            r, g, b = pose_kpt_color[kid]
                            cv2.circle(img_copy, (int(x_coord), int(y_coord)),
                                       radius, (int(r), int(g), int(b)), -1)
                            transparency = max(0, min(1, kpt_score))
                            cv2.addWeighted(
                                img_copy,
                                transparency,
                                img,
                                1 - transparency,
                                0,
                                dst=img)

                # draw limbs
                if skeleton is not None and pose_limb_color is not None:
                    assert len(pose_limb_color) == len(skeleton)
                    for sk_id, sk in enumerate(skeleton):
                        pos1 = (int(kpts[sk[0] - 1, 0]), int(kpts[sk[0] - 1,
                                                                  1]))
                        pos2 = (int(kpts[sk[1] - 1, 0]), int(kpts[sk[1] - 1,
                                                                  1]))
                        if (pos1[0] > 0 and pos1[0] < img_w and pos1[1] > 0
                                and pos1[1] < img_h and pos2[0] > 0
                                and pos2[0] < img_w and pos2[1] > 0
                                and pos2[1] < img_h
                                and kpts[sk[0] - 1, 2] > kpt_score_thr
                                and kpts[sk[1] - 1, 2] > kpt_score_thr):
                            img_copy = img.copy()
                            X = (pos1[0], pos2[0])
                            Y = (pos1[1], pos2[1])
                            mX = np.mean(X)
                            mY = np.mean(Y)
                            length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5
                            angle = math.degrees(
                                math.atan2(Y[0] - Y[1], X[0] - X[1]))
                            stickwidth = 2
                            polygon = cv2.ellipse2Poly(
                                (int(mX), int(mY)),
                                (int(length / 2), int(stickwidth)), int(angle),
                                0, 360, 1)

                            r, g, b = pose_limb_color[sk_id]
                            cv2.fillConvexPoly(img_copy, polygon,
                                               (int(r), int(g), int(b)))
                            transparency = max(
                                0,
                                min(
                                    1, 0.5 *
                                    (kpts[sk[0] - 1, 2] + kpts[sk[1] - 1, 2])))
                            cv2.addWeighted(
                                img_copy,
                                transparency,
                                img,
                                1 - transparency,
                                0,
                                dst=img)

        if show:
            imshow(img, win_name, wait_time)

        if out_file is not None:
            imwrite(img, out_file)

        return img
コード例 #52
0
        cv2.putText(frame, 'Right', (50, 300), font, 2, (255, 255, 255), 2)
        print('Right')
    elif (cy - prev[1] > 0 and abs(cy - prev[1]) > 10):
        # print('diffY' + str(cy - prev[1]))
        # print('abs diffY : ' + str(abs(cy - prev[1])))
        print('Up')
        cv2.putText(frame, 'Down', (50, 400), font, 2, (255, 255, 255), 2)
    elif (cy - prev[1] < 0 and abs(cy - prev[1]) > 10):
        # print('diffY' + str(cy - prev[1]))
        # print('abs diffY : ' + str(abs(cy - prev[1])))
        print('Down')
        cv2.putText(frame, 'Up', (50, 500), font, 2, (255, 255, 255), 2)
    prev[0] = cx
    prev[1] = cy

    cv2.circle(frame, centerMass, 7, [100, 0, 255],
               2)  # to display center mass in window
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(frame, 'Center', tuple(centerMass), font, 2, (255, 255, 255),
                2)  #put text at Center Mass
    #draw bounding rectangle for hand
    x, y, w, h = cv2.boundingRect(cnts)
    img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    cv2.drawContours(frame, [hull], -1, (255, 255, 255), 2)
    cv2.imshow('Image', frame)
    k = cv2.waitKey(5) & 0xFF  # Close window on esc key
    if k == 27:  #27 is esc key code
        break

cap.release()
cv2.destroyAllWindows()
コード例 #53
0
    
    #Use MTCNN to detect faces
    result = detector.detect_faces(frame)
    if result != []:
        for person in result:
            bounding_box = person['box']
            keypoints = person['keypoints']
    		
    		#'''
            cv2.rectangle(frame,
                          (bounding_box[0], bounding_box[1]),
                          (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),
                          (0,155,255),
                          2)

            cv2.circle(frame,(keypoints['left_eye']), 2, (0,155,255), 2)
            cv2.circle(frame,(keypoints['right_eye']), 2, (0,155,255), 2)
            cv2.circle(frame,(keypoints['nose']), 2, (0,155,255), 2)
            cv2.circle(frame,(keypoints['mouth_left']), 2, (0,155,255), 2)
            cv2.circle(frame,(keypoints['mouth_right']), 2, (0,155,255), 2)
            #'''

            print(bounding_box[0], bounding_box[1]) # Print x,y coordinates of face detected
    
    cv2.imshow('frame',cv2.flip(frame,1))

    # Waits for the "q" key to quite the program
    if cv2.waitKey(1) &0xFF == ord('q'):
        break

# Releases the capture
コード例 #54
0
if bgr_img.shape[-1] == 3:  # color image
    b, g, r = cv2.split(bgr_img)  # get b,g,r
    rgb_img = cv2.merge([r, g, b])  # switch it to rgb
    copy_img = rgb_img.copy()
    gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
else:
    gray_img = bgr_img

img = cv2.medianBlur(gray_img, 5)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

circles = cv2.HoughCircles(img,
                           cv2.HOUGH_GRADIENT,
                           1,
                           20,
                           param1=90,
                           param2=60,
                           minRadius=0,
                           maxRadius=0)

circles = np.uint16(np.around(circles))

for i in circles[0, :]:
    # draw the outer circle
    cv2.circle(copy_img, (i[0], i[1]), i[2], (0, 255, 0), 2)

plt.subplot(121), plt.imshow(rgb_img)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(copy_img)
plt.title('Hough Transform'), plt.xticks([]), plt.yticks([])
plt.show()
コード例 #55
0
    hull = cv2.convexHull(cnts)

    #Find convex defects
    hull2 = cv2.convexHull(cnts, returnPoints=False)
    defects = cv2.convexityDefects(cnts, hull2)

    #Get defect points and draw them in the original image
    FarDefect = []
    for i in range(defects.shape[0]):
        s, e, f, d = defects[i, 0]
        start = tuple(cnts[s][0])
        end = tuple(cnts[e][0])
        far = tuple(cnts[f][0])
        FarDefect.append(far)
        cv2.line(frame, start, end, [0, 255, 0], 1)
        cv2.circle(frame, far, 10, [100, 255, 255], 3)

#Find moments of the largest contour
    moments = cv2.moments(cnts)

    #Central mass of first order moments
    if moments['m00'] != 0:
        cx = int(moments['m10'] / moments['m00'])  # cx = M10/M00
        cy = int(moments['m01'] / moments['m00'])  # cy = M01/M00
    centerMass = (cx, cy)

    #Draw center mass
    cv2.circle(frame, centerMass, 7, [100, 0, 255], 2)
    font = cv2.FONT_HERSHEY_SIMPLEX
    cv2.putText(frame, 'Center', tuple(centerMass), font, 2, (255, 255, 255),
                2)
コード例 #56
0
ファイル: ball_tracking.py プロジェクト: ProfJust/rtc
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    # Contouren finden => cnts
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)[-2]
    center = None

    # Wenn Contouren gefunden wurden
    if len(cnts) > 0:
        c = max(cnts, key=cv2.contourArea)
        # Kleinsten umschliessenden Kreis finden
        # https://docs.opencv.org/3.4/dd/d49/tutorial_py_contour_features.html
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        # Ist der Radius groß genug für unseren Ball?
        if radius > 10:
            # Gelben Kreis zeichnen
            cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)

    cv2.imshow("Frame", frame)
    cv2.imshow("Mask", mask)

    key = cv2.waitKey(1) & 0xFF

    if key == ord("q"):
        break
    time.sleep(0.1)

camera.release()
cv2.destroyAllWindows()
コード例 #57
0
def extract(imageName, face):
	im = cv2.imread(imageName)
	width, height = im.shape[:2]
	print(width, height)
	#im = cv2.equalizeHist(im)
	#kernel = np.ones((5,5),np.float32)/25
	#dst = cv2.filter2D(im,-1,kernel)
	#im = cv2.GaussianBlur(im,(5,5),100)
	im = cv2.bilateralFilter(im,9,75,75)
	#im = cv2.blur(im,(5,5))
	im = cv2.fastNlMeansDenoisingColored(im,None,10,10,7,24)

	#im = cv2.medianBlur(im,7)
	#cam = cv2.VideoCapture(0)
	#s, im = cam.read()

	position = {}
	#s, im = cam.read()
	hsv_img = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)	# HSV image

	#lower_white = np.array([70,70,190], dtype=np.uint8)
	#upper_white = np.array([150,130,250], dtype=np.uint8)
	lower_white = np.array([70,20,130], dtype=np.uint8)
	upper_white = np.array([180,110,255], dtype=np.uint8)
	
	frame_threshed1 = cv2.inRange(hsv_img, lower_white, upper_white)
	imgray1 = frame_threshed1
	cv2.imshow('white', frame_threshed1)
	ret,thresh1 = cv2.threshold(frame_threshed1,127,255,0)

	contours1, hierarchy1 = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
	areas = [cv2.contourArea(c) for c in contours1]
	for elem in range(len(areas)):
		areas[elem] = int(areas[elem])
		print(areas[elem])
	max_area = 0
	print('-'*50)
	for elem in areas:
		if elem > max_area:
			max_area = elem
	#print max_index
	get = []
	#max_area = areas[max_index]
	for a in range(len(areas)):
		#print areas[a] - max_area
		if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:
			print(areas[a])
			get.append(contours1[a])
		else:
			pass
	#cnt=contours1[max_index]
		
	for elem in get:
		for t in elem:
			x,y,w,h = cv2.boundingRect(elem)
			print(x,y,w,h)
			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
			centroid_x = (x + x+w)/2
			centroid_y = (y + y+h)/2
			if centroid_x > 0 and centroid_x < 66:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'1'] = 'white'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'4'] = 'white'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'7'] = 'white'
			if centroid_x > 66 and centroid_x < 133:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'2'] = 'white'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'5'] = 'white'
				elif centroid_y > 133 and centroid_y < 200:
					#position['white'].append(face+'8')
					position[face+'8'] = 'white'
			if centroid_x > 133	 and centroid_x < 200:
				if centroid_y > 0 and centroid_y < 66:
					#position['white'].append(face+'3')
					position[face+'3'] = 'white'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'6'] = 'white'
				elif centroid_y > 133 and centroid_y < 200:
					#position['white'].append(face+'9')
					position[face+'9'] = 'white'		
			cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)
			
	"""
	for cnt in contours1:
		x,y,w,h = cv2.boundingRect(cnt)
		#print x,
		#print y
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
	"""
	
	#print '------------------------'

	#COLOR_MIN = np.array([15, 90, 130],np.uint8)		# HSV color code lower and upper bounds
	#COLOR_MAX = np.array([50, 160, 200],np.uint8)		# color yellow 

	COLOR_MIN = np.array([15, 90, 130],np.uint8)		# HSV color code lower and upper bounds
	COLOR_MAX = np.array([60, 245, 245],np.uint8)		# color yellow 
	
	frame_threshed = cv2.inRange(hsv_img, COLOR_MIN, COLOR_MAX)		# Thresholding image
	imgray = frame_threshed

	ret,thresh = cv2.threshold(frame_threshed,127,255,cv2.THRESH_BINARY)
	#thresh = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
	#            cv2.THRESH_BINARY,11,2)
	cv2.imshow('yellow', thresh)
	contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	areas = [cv2.contourArea(c) for c in contours]
	for elem in range(len(areas)):
		areas[elem] = int(areas[elem])
		print(areas[elem])
	max_area = 0
	print('-'*50)
	for elem in areas:
		if elem > max_area:
			max_area = elem
	#print max_index
	get = []
	#max_area = areas[max_index]
	for a in range(len(areas)):
		
		if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:
			print(areas[a])	
			get.append(contours[a])
			

		else:
			pass
	#cnt=contours1[max_index]
	for elem in get:
		for t in elem:
			#print 'hey'
			x,y,w,h = cv2.boundingRect(elem)
			#print x,
			#print y
			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
			centroid_x = (x + x+w)/2
			centroid_y = (y + y+h)/2
			cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)
			if centroid_x > 0 and centroid_x < 66:
				if centroid_y > 0 and centroid_y < 66:
					position[face+'1'] = 'yellow'

				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'4'] = 'yellow'
				elif centroid_y > 133 and centroid_y < 200:

					position[face+'7'] = 'yellow'
			if centroid_x > 66 and centroid_x < 133:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'2'] = 'yellow'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'5'] = 'yellow'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'8'] = 'yellow'
			if centroid_x > 133	 and centroid_x < 200:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'3'] = 'yellow'
				elif centroid_y > 66 and centroid_y < 133:
					position[face+'6'] = 'yellow'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'9'] = 'yellow'	

	print(type(contours))
	"""for cnt in contours:
		x,y,w,h = cv2.boundingRect(cnt)
		#print x,
		#print y
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
		centroid_x = (x + x+w)/2
		centroid_y = (y + y+h)/2
		cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)"""


	#lower_blue = np.array([80,180,140], dtype=np.uint8)
	#upper_blue = np.array([140,245,205], dtype=np.uint8)

	lower_blue = np.array([80,180,190], dtype=np.uint8)
	upper_blue = np.array([120,255,255], dtype=np.uint8)

	frame_threshed3 = cv2.inRange(hsv_img, lower_blue, upper_blue)		# Thresholding image
	imgray3 = frame_threshed3
	ret,thresh3 = cv2.threshold(frame_threshed3,127,255,3)

	cv2.imshow('blue', frame_threshed3)

	contours3, hierarchy3 = cv2.findContours(thresh3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	areas = [cv2.contourArea(c) for c in contours3]
	for elem in range(len(areas)):
		areas[elem] = int(areas[elem])
		#print areas[elem]
	max_area = 0
	for elem in areas:
		if elem > max_area:
			max_area = elem
	#print max_index
	get = []
	print('-'*50)
	for a in range(len(areas)):
		#print areas[a] - max_area
		if areas[a] - max_area in range(-1200, 1200) and areas[a] >= 1500:
			#print areas[a]
			get.append(contours3[a])
		else:
			pass
	#cnt=contours1[max_index]
	for elem in get:
		for t in elem:
			x,y,w,h = cv2.boundingRect(elem)
			#print x,
			#print y
			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
			centroid_x = (x + x+w)/2
			centroid_y = (y + y+h)/2
			cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)
			if centroid_x > 0 and centroid_x < 66:
				if centroid_y > 0 and centroid_y < 66:
					position[face+'1'] = 'blue'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'4'] = 'blue'
				elif centroid_y > 133 and centroid_y < 200:

					position[face+'7'] = 'blue'
			if centroid_x > 66 and centroid_x < 133:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'2'] = 'blue'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'5'] = 'blue'
				elif centroid_y > 133 and centroid_y < 200:
					position[face+'8'] = 'blue'
			if centroid_x > 133	 and centroid_x < 200:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'3'] = 'blue'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'6'] = 'blue'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'9'] = 'blue'	

	"""for cnt in contours3:
		x,y,w,h = cv2.boundingRect(cnt)
		#print x,
		#print y
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
		centroid_x = (x + x+w)/2
		centroid_y = (y + y+h)/2
		cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)"""


	#lower_orange = np.array([0, 130, 90],np.uint8)		# HSV color code lower and upper bounds
	#upper_orange = np.array([20, 210, 170],np.uint8)		# color orange
	lower_orange = np.array([5, 150, 150],np.uint8)		# HSV color code lower and upper bounds
	upper_orange = np.array([15, 235, 250],np.uint8)		# color orange

	frame_threshed2 = cv2.inRange(hsv_img, lower_orange, upper_orange)		# Thresholding image
	imgray2 = frame_threshed2
	ret,thresh2 = cv2.threshold(frame_threshed2,127,255,2)
	cv2.imshow('Orange', frame_threshed2)
	contours2, hierarchy2 = cv2.findContours(thresh2,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	areas = [cv2.contourArea(c) for c in contours2]
	for elem in range(len(areas)):
		areas[elem] = int(areas[elem])
		#print areas[elem]
	max_area = 0
	for elem in areas:
		if elem > max_area:
			max_area = elem
	#print max_index
	get = []
	print('-'*50)
	for a in range(len(areas)):
		if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:

			#print areas[a]
			get.append(contours2[a])
		else:
			pass
	#cnt=contours1[max_index]
	for elem in get:
		for t in elem:
			x,y,w,h = cv2.boundingRect(elem)
			#print x,
			#print y
			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
			centroid_x = (x + x+w)/2
			centroid_y = (y + y+h)/2
			cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)
			if centroid_x > 0 and centroid_x < 66:
				if centroid_y > 0 and centroid_y < 66:
					position[face+'1'] = 'orange'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'4'] = 'orange'
				elif centroid_y > 133 and centroid_y < 200:

					position[face+'7'] = 'orange'
			if centroid_x > 66 and centroid_x < 133:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'2'] = 'orange'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'5'] = 'orange'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'8'] = 'orange'
			if centroid_x > 133	 and centroid_x < 200:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'3'] = 'orange'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'6'] = 'orange'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'9'] = 'orange'	

	"""for cnt in contours2:
		x,y,w,h = cv2.boundingRect(cnt)
		#print x,
		#print y
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)"""

	#lower_green = np.array([60, 120, 80],np.uint8)		# HSV color code lower and upper bounds
	#upper_green = np.array([100, 170, 120],np.uint8)		# color orange
	lower_green = np.array([60, 110, 110],np.uint8)		# HSV color code lower and upper bounds
	upper_green = np.array([100, 220, 250],np.uint8)		# color orange
	

	frame_threshed4 = cv2.inRange(hsv_img, lower_green, upper_green)		# Thresholding image
	imgray4 = frame_threshed4
	ret,thresh4 = cv2.threshold(frame_threshed4,127,255,0)
	cv2.imshow('green', frame_threshed4)
	contours4, hierarchy4 = cv2.findContours(thresh4,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	areas = [cv2.contourArea(c) for c in contours4]
	for elem in range(len(areas)):
		areas[elem] = int(areas[elem])
		#print areas[elem]
	max_area = 0
	for elem in areas:
		if elem > max_area:
			max_area = elem
	#print max_index
	get = []
	#max_area = areas[max_index]
	for a in range(len(areas)):
		#print areas[a] - max_area
		if areas[a] - max_area in range(-1000, 1000) and areas[a] >= 1500:

			get.append(contours4[a])
		else:
			pass
	#cnt=contours1[max_index]
	for elem in get:
		for t in elem:
			x,y,w,h = cv2.boundingRect(elem)
			#print x,
			#print y
			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
			centroid_x = (x + x+w)/2
			centroid_y = (y + y+h)/2
			cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)
			if centroid_x > 0 and centroid_x < 66:
				if centroid_y > 0 and centroid_y < 66:
					position[face+'1'] = 'green'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'4'] = 'green'
				elif centroid_y > 133 and centroid_y < 200:

					position[face+'7'] = 'green'
			if centroid_x > 66 and centroid_x < 133:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'2'] = 'green'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'5'] = 'green'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'8'] = 'green'
			if centroid_x > 133	 and centroid_x < 200:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'3'] = 'green'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'6'] = 'green'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'9'] = 'green'	
	"""for cnt in contours4:
		x,y,w,h = cv2.boundingRect(cnt)
		#print x,
		#print y
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
		centroid_x = (x + x+w)/2
		centroid_y = (y + y+h)/2
		cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)"""


	#lower_red = np.array([140, 120, 70],np.uint8)		# HSV color code lower and upper bounds
	#upper_red = np.array([210, 220, 170],np.uint8)		# color orange
	lower_red = np.array([120, 120, 140],np.uint8)		# HSV color code lower and upper bounds
	upper_red = np.array([180, 250, 200],np.uint8)		# color orange

	frame_threshed5 = cv2.inRange(hsv_img, lower_red, upper_red)		# Thresholding image
	imgray5 = frame_threshed5
	ret,thresh5 = cv2.threshold(frame_threshed5,127,255,0)
	cv2.imshow('red', frame_threshed5)
	contours5, hierarchy5 = cv2.findContours(thresh5,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

	areas = [cv2.contourArea(c) for c in contours5]
	for elem in range(len(areas)):
		areas[elem] = int(areas[elem])
		#print areas[elem]
	max_area = 0
	for elem in areas:
		if elem > max_area:
			max_area = elem
	#print max_index
	get = []
	#max_area = areas[max_index]
	for a in range(len(areas)):
		if areas[a] - max_area in range(-1500, 1500) and areas[a] >= 1500:
			print(areas[a])

			get.append(contours5[a])
		else:
			pass
	#cnt=contours1[max_index]
	for elem in get:
		for t in elem:
			x,y,w,h = cv2.boundingRect(elem)
			#print x,
			#print y
			cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
			centroid_x = (x + x+w)/2
			centroid_y = (y + y+h)/2
			cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)
			if centroid_x > 0 and centroid_x < 66:
				if centroid_y > 0 and centroid_y < 66:
					position[face+'1'] = 'red'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'4'] = 'red'
				elif centroid_y > 133 and centroid_y < 200:

					position[face+'7'] = 'red'
			if centroid_x > 66 and centroid_x < 133:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'2'] = 'red'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'5'] = 'red'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'8'] = 'red'
			if centroid_x > 133	 and centroid_x < 200:
				if centroid_y > 0 and centroid_y < 66:
					
					position[face+'3'] = 'red'
				elif centroid_y > 66 and centroid_y < 133:
					
					position[face+'6'] = 'red'
				elif centroid_y > 133 and centroid_y < 200:
					
					position[face+'9'] = 'red'	

	"""for cnt in contours5:
		x,y,w,h = cv2.boundingRect(cnt)
		#print x,
		#print y
		cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
		centroid_x = (x + x+w)/2
		centroid_y = (y + y+h)/2
		cv2.circle(im, (int(centroid_x), int(centroid_y)), 2, (255,0,0), 2)"""
	

	cv2.line(im,(0,66),(200,66),(255,0,0),2)
	cv2.line(im,(0,133),(200,133),(255,0,0),2)
	cv2.line(im,(66,0),(66,200),(255,0,0),2)
	cv2.line(im,(133,0),(133,200),(255,0,0),2)
	#cv2.imshow("Show",im)
	cv2.imshow(imageName, im)
	cv2.imwrite(imageName+'_extracted.jpg', im)

	#cv2.waitKey()
	#cv2.destroyAllWindows()

	return position, im
def main():
    capWebcam = cv2.VideoCapture(1)  # declare a VideoCapture object and associate to webcam, 0 => use 1st webcam

    # show original resolution
    print("default resolution = " + str(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) + "x" + str(
        capWebcam.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    capWebcam.set(cv2.CAP_PROP_FRAME_WIDTH, 320.0)  # change resolution to 320x240 for faster processing
    capWebcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240.0)

    # show updated resolution
    print("updated resolution = " + str(capWebcam.get(cv2.CAP_PROP_FRAME_WIDTH)) + "x" + str(
        capWebcam.get(cv2.CAP_PROP_FRAME_HEIGHT)))

    if capWebcam.isOpened() == False:  # check if VideoCapture object was associated to webcam successfully
        print("error: capWebcam not accessed successfully\n\n")  # if not, print error message to std out
        os.system("pause")  # pause until user presses a key so user can see error message
        return  # and exit function (which exits program)
    # end if

    while cv2.waitKey(1) != 27 and capWebcam.isOpened():  # until the Esc key is pressed or webcam connection is lost
        blnFrameReadSuccessfully, imgOriginal = capWebcam.read()  # read next frame

        if not blnFrameReadSuccessfully or imgOriginal is None:  # if frame was not read successfully
            print("error: frame not read from webcam\n")  # print error message to std out
            os.system("pause")  # pause until user presses a key so user can see error message
            break  # exit while loop (which exits program)
        # end if

        imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)

        imgThreshLow = cv2.inRange(imgHSV, np.array([0, 135, 135]), np.array([18, 255, 255]))
        imgThreshHigh = cv2.inRange(imgHSV, np.array([165, 135, 135]), np.array([179, 255, 255]))

        imgThresh = cv2.add(imgThreshLow, imgThreshHigh)

        imgThresh = cv2.GaussianBlur(imgThresh, (3, 3), 2)

        imgThresh = cv2.dilate(imgThresh, np.ones((5, 5), np.uint8))
        imgThresh = cv2.erode(imgThresh, np.ones((5, 5), np.uint8))

        intRows, intColumns = imgThresh.shape

        circles = cv2.HoughCircles(imgThresh, cv2.HOUGH_GRADIENT, 5,
                                   intRows / 4)  # fill variable circles with all circles in the processed image

        if circles is not None:  # this line is necessary to keep program from crashing on next line if no circles were found
            for circle in circles[0]:  # for each circle
                x, y, radius = circle  # break out x, y, and radius
                print("ball position x = " + str(x) + ", y = " + str(y) + ", radius = " + str(
                    radius))  # print ball position and radius
                cv2.circle(imgOriginal, (x, y), 3, (0, 255, 0),
                           -1)  # draw small green circle at center of detected object
                cv2.circle(imgOriginal, (x, y), radius, (0, 0, 255), 3)  # draw red circle around the detected object
                # end for
        # end if

        cv2.namedWindow("imgOriginal", cv2.WINDOW_AUTOSIZE)  # create windows, use WINDOW_AUTOSIZE for a fixed window size
        cv2.namedWindow("imgThresh", cv2.WINDOW_AUTOSIZE)  # or use WINDOW_NORMAL to allow window resizing

        cv2.imshow("imgOriginal", imgOriginal)  # show windows
        cv2.imshow("imgThresh", imgThresh)
    # end while

    cv2.destroyAllWindows()  # remove windows from memory

    return
     
     
     a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
     b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
     c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
     s = (a+b+c)/2
     ar = math.sqrt(s*(s-a)*(s-b)*(s-c))
     
     d=(2*ar)/a
     
     angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57
     
 
     if angle <= 90 and d>30:
         l += 1
         cv2.circle(roi, far, 3, [255,0,0], -1)
     
     cv2.line(roi,start, end, [0,255,0], 2)
     
     
 l+=1
 
 font = cv2.FONT_HERSHEY_SIMPLEX
 if l==1:
     if areaCnt<2000:
         cv2.putText(frame,'Put your hand in the box',(0,50), font, 1, (0,0,255), 3, cv2.LINE_AA)
     else:
         if areaRatio<12:
             cv2.putText(frame,'0',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
         elif areaRatio<17.5:
             cv2.putText(frame,'Best luck',(0,50), font, 2, (0,0,255), 3, cv2.LINE_AA)
コード例 #60
-1
def hueFinder(image, verbosity=0):
	"""Given an image of a fruit, it finds the center of the fruit and draws a radius from the center to approximate the hue."""
	image_bw = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
	th1 = cv2.adaptiveThreshold(image_bw.copy(), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
	image_copy = image.copy()

	radMin = 999
	dpMin =0
	for dp in range(1,10,1):
		if(verbosity>1):
			image = image_copy.copy()
		circ = cv2.HoughCircles(image_bw.copy(), cv2.HOUGH_GRADIENT, dp, minDist = 400, minRadius=80)
		if(circ is not None):
			for c in circ:
				x,y,r =c[0].astype("int")
				if(radMin>r and r<200):
					radMin=r
					dpMin=dp
				if(verbosity>1):
					print(dp)
					cv2.circle(image,(x,y),r,(0,255,0),2)
					showImage(image,title=str(dp),waitTime=500)
		else:
			if(verbosity>1):
				print("Helllo",dp)
	if(verbosity>1):
		image = image_copy.copy()

	circ = cv2.HoughCircles(image_bw.copy(), cv2.HOUGH_GRADIENT, dpMin, minDist = 400, minRadius=80)

	if(circ is not None):
		imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
		x,y,r = circ[0,0].astype("int")
		print(radMin)
		if(radMin>110):
			radMin=70
		hues = []
		values = []
		imageMasked = np.zeros(imageHSV.shape[:2])
		for i in range(0,imageHSV.shape[0]):
			for j in range(0,imageHSV.shape[1]):
				dx = i-y
				dy = j-x
				if (((dx**2)+(dy**2)) <= (radMin-10)**2) and imageHSV[i][j][0]<60 and imageHSV[i][j][0]>23:
					imageMasked[i][j]=imageHSV[i][j][0]
					#if(imageHSV[i][j][2]<200):
					hues.append(imageHSV[i][j][0])
					values.append(imageHSV[i][j][2])

		if(verbosity>0):
			# showImage(imageMasked, title="Masked Image", waitTime = 5000)
			plt.imshow(imageMasked)
			plt.colorbar()
			plt.show()

		return ("GREEN" if (0.26307*np.mean(values) + (-1.76579)*np.mean(hues))<(-0.00985) else "YELLOW", np.mean(hues), np.mean(values))

	else:
		cv2.destroyAllWindows()
		return ("UNKNOWN",-1,-1)