def describe(self, image):
     # Convert the image to HSV color space and initialize the features
     image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
     features = []
 
     # get the dimensions of the image and compute the center of the image
     (h, w)   = image.shape[:2]
     (cX, cY) = (int(w * 0.5), int(h * 0.5))
 
     # Divide the image into four segments
     segments = [(0,cX,0,cY),(cX,w,0,cY),(cX,w,cY,h),(0,cX,cY,h)]
 
     # Construct elliptical mask representing center of the image
     (axesX,axesY ) = (int(w*0.75)/2, int(h*0.75)/2)
     ellipMask = np.zeros(image.shape[:2], dtype="uint8")
     cv2.ellipse(ellipMask,(cX,cY),(axesX,axesY),0,0,360,255,-1)
  
     # loop over all the segments
     for (startX, endX, startY, endY) in segments:
         # construct a mask for each corner of the image, subtracting
         # the elliptical center from it
         cornerMask = np.zeros(image.shape[:2], dtype = "uint8")
         cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
         cornerMask = cv2.subtract(cornerMask, ellipMask)
     
         # Extract a color histogram from the image then update the feature vector
         hist = self.histogram(image, cornerMask)
         features.extend(hist)
     
     # Extract a color histogram from elliptical region and update the feature vector
     hist = self.histogram(image, ellipMask)
     features.extend(hist)
     return features
예제 #2
0
  def describe(self, image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    features = []

    (h, w) = image.shape[:2]
    (cX, cY) = (int(w * 0.5), int(h * 0.5))

    segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), (0, cX, cY, h)]

    (xL, yL) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
    elli = np.zeros(image.shape[:2], dtype = 'uint8')
    cv2.ellipse(elli, (cX, cY), (xL, yL), 0, 0, 360, 255, -1)

    for (x0, x1, y0, y1) in segments:
      rect = np.zeros(image.shape[:2], dtype = 'uint8')
      cv2.rectangle(rect, (x0, y0), (x1, y1), 255, -1)
      rect = cv2.subtract(rect, elli)

      hist = self.histogram(image, rect)
      features.extend(hist)

    hist = self.histogram(image, elli)
    features.extend(hist)

    return features
예제 #3
0
파일: Fly.py 프로젝트: isk2/Flylab
    def UpdateWingMask(self, npfRoiMean):
        #global globalLock
        
        self.npMaskWings = N.zeros([self.heightRoi, self.widthRoi], dtype=N.uint8)


        # Coordinates of the body ellipse.
        centerBody = (self.widthRoi/2, 
                      self.heightRoi/2)
        sizeBody = (self.lengthBody, 
                    self.widthBody)
        angleBody = 0.0
        
        # Left wing ellipse.
        sizeLeft = (self.lengthBody*10/10, 
                    self.lengthBody*7/10)
        centerLeft = (centerBody[0] - sizeLeft[0]/2 + self.lengthBody*1/10,
                      centerBody[1] - sizeLeft[1]/2 - 1)
        angleLeft = 0.0

        # Right wing ellipse.
        sizeRight = (self.lengthBody*10/10, 
                     self.lengthBody*7/10)
        centerRight = (centerBody[0] - sizeRight[0]/2 + self.lengthBody*1/10,
                       centerBody[1] + sizeRight[1]/2)
        angleRight = 0.0


        # Draw ellipses on the mask.
        #cv2.ellipse(self.npMaskWings,
        #            (centerBody, sizeBody, angleBody),
        #            255, cv.CV_FILLED)
        cv2.ellipse(self.npMaskWings,
                    (centerLeft, sizeLeft, angleLeft*180.0/N.pi),
                    255, cv.CV_FILLED)
        cv2.ellipse(self.npMaskWings,
                    (centerRight, sizeRight, angleRight*180.0/N.pi),
                    255, cv.CV_FILLED)
            
        if (npfRoiMean is not None):
            # Mean Fly Body Mask.
            #with globalLock:
            #    self.thresholdForeground = rospy.get_param('tracking/thresholdForeground', 25.0)
            (threshOut, npMaskBody) = cv2.threshold(npfRoiMean.astype(N.uint8), self.thresholdForeground, 255, cv2.THRESH_BINARY_INV)
            self.npMaskWings = cv2.bitwise_and(self.npMaskWings, npMaskBody)
        
    
            # Publish non-essential stuff.
            if globalNonessential:
                npMaskWings1 = copy.copy(self.npMaskWings)
                npMaskWings1.resize(npMaskWings1.size)
                imgMaskWings  = self.cvbridge.cv_to_imgmsg(cv.fromarray(self.npMaskWings), 'passthrough')
                imgMaskWings.data = list(npMaskWings1)
                self.pubImageMask.publish(imgMaskWings)
        
                npMaskBody1 = copy.copy(npMaskBody)
                npMaskBody1.resize(npMaskBody1.size)
                imgMaskBody  = self.cvbridge.cv_to_imgmsg(cv.fromarray(npMaskBody), 'passthrough')
                imgMaskBody.data = list(npMaskBody1)
                self.pubImageMaskBody.publish(imgMaskBody)
예제 #4
0
    def describe(self, img):
        image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        features = []

        (h, w) = image.shape[:2]
        (cX, cY) = (int(w * 0.5), int(h * 0.5))

        segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), (0, cX, cY, h)]
        (axesX, axesY) = (int(w * 0.75)/2, int(h * 0.75)/2)
        ellipMask = np.zeros(image.shape[:2], dtype="uint8")
        cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)

        for (startX, endX, startY, endY) in segments:
            # construct a mask for each corner of the image, subtracting
            # the elliptical center from it
            cornerMask = np.zeros(image.shape[:2], dtype="uint8")
            cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
            cornerMask = cv2.subtract(cornerMask, ellipMask)

            # extract a color histogram from the image, then update the
            # feature vector
            hist = self.histogram(image, cornerMask)
            features.extend(hist)

        # extract a color histogram from the elliptical region and
        #  update the feature vector
        hist = self.histogram(image, ellipMask)
        features.extend(hist)
        # return the feature vector
        return features
예제 #5
0
 def ransac(self, ntrials, contour, small_gray, draw):
     # RANSAC implementation starts
     r2centerx = []
     r2centery = []
     r2majrad = []
     r2minrad = []
     r2angle = []
     for i in range(ntrials):
         if len(contour) > 60:
             # embed()
             samples = contour[np.random.choice(len(contour), int(len(contour) / 10))]
             ellipse = cv2.fitEllipse(samples)
             if draw:
                 cv2.ellipse(small_gray, ellipse, (0, 0, 255), 2)
             r2centerx.append(ellipse[0][0])
             r2centery.append(ellipse[0][1])
             r2majrad.append(ellipse[1][1])
             r2minrad.append(ellipse[1][0])
             r2angle.append(ellipse[2])
         else:
             r2centerx.append(100 * (i % 2))
             r2centery.append(100 * (i % 2))
             r2majrad.append(100 * (i % 2))
             r2minrad.append(100 * (i % 2))
             r2angle.append(100 * (i % 2))
     r2centerx = np.asarray(r2centerx)
     r2centery = np.asarray(r2centery)
     r2majrad = np.asarray(r2majrad)
     r2minrad = np.asarray(r2minrad)
     r2angle = np.asarray(r2angle)
     return r2centerx, r2centery, r2majrad, r2minrad, r2angle, small_gray
예제 #6
0
 def process(self, img):
     img = super(GoodFeaturesProcessor, self).process(img)
     corners = cv2.goodFeaturesToTrack(img, int(self.num), int(self.distance), float(self.quality))
     if corners:
         for c in corners:
             cv2.ellipse(img, (c[0][0], c[0][1]), (5, 5), 0, 0, 360, 255, -1)
     return img
예제 #7
0
    def detect_motion_new(self, winName, interval=1):
        #Implemented after talking with Dr Smart about image averaging and background extraction

        min_contour_area = 25
        max_contour_area = 1250
        retval = False
        threshold = 65
        try:
            _image_static = None
            _image_static = self.save_image(persist=False)
            _image_static = cv2.cvtColor(numpy.array(_image_static), cv2.COLOR_RGB2GRAY)
            _image_static = cv2.GaussianBlur(_image_static, (21, 21), 0)

            accumulator = numpy.float32(_image_static)
            while True:
                sleep(interval)
                _image_static = self.save_image(persist=False)
                _image_static = cv2.cvtColor(numpy.array(_image_static), cv2.COLOR_RGB2GRAY)
                _image_static = cv2.GaussianBlur(_image_static, (21, 21), 0)

                cv2.accumulateWeighted(numpy.float32(_image_static), accumulator, 0.1)

                _image_static = cv2.convertScaleAbs(accumulator)

                _image_dynamic = self.save_image(persist=False)
                _image_dynamic1 = cv2.cvtColor(numpy.array(_image_dynamic), cv2.COLOR_RGB2GRAY)
                _image_dynamic1 = cv2.GaussianBlur(_image_dynamic1, (21, 21), 0)

                # ideas from http://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html#gsc.tab=0
                _delta = cv2.absdiff(_image_dynamic1, _image_static)

                _threshold = cv2.threshold(_delta, 17, 255, cv2.THRESH_BINARY)[1]
                # dilate the thresholded image to fill in holes, then find contour on thresholded image
                _threshold = cv2.dilate(_threshold, None, iterations=5)

                (img, contours, _) = cv2.findContours(_threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

                dyn = cv2.cvtColor(numpy.array(_image_dynamic), cv2.COLOR_RGB2GRAY)
                # loop over the contours
                for contour in contours:
                    # if the contour is too small, ignore it
                    _area = cv2.contourArea(contour)
                    if _area < min_contour_area: # or _area > max_contour_area:
                        continue  # skip to the next

                    # compute the bounding box for the contour, draw it on the frame,

                    (x, y, w, h) = cv2.boundingRect(contour)
                    #cv2.rectangle(dyn, (x, y), (x + w, y + h), (0, 12, 255), 2)
                    cv2.ellipse(dyn, (x+5, y+25), (10, 20), 90, 0, 360, (255, 0, 0), 2)

                cv2.imshow(winName, numpy.hstack([dyn, _threshold]))

                key = cv2.waitKey(10)
                if key == 27:
                    cv2.destroyWindow(winName)
                    break

        except Exception as ex:
            print(ex)
예제 #8
0
파일: video.py 프로젝트: nico-baudoin/video
def getContours(img, fit_type='ellipse', AREA_EXCLUSION_LIMITS=(200, 2000), CELL_RADIUS_THRESHOLD = 4):
    contours, hierarchy = cv2.findContours(img,
                                           cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    coord_list = []
    for i in range(len(contours)):
        if fit_type == 'circle':
            radius_list = []
            center_list = []
            (x,y),radius = cv2.minEnclosingCircle(contours[i])
            if radius > CELL_RADIUS_THRESHOLD:
                center = (int(x), int(y))
                center_list.append(center)
                radius_list.append(radius)
                cv2.circle(img,center,int(radius),(0,255,0),-11)
            coord_list.append([center_list, radius_list])
            
        elif fit_type == 'ellipse':
            if len(contours[i]) >= 5:
                ellipse = cv2.fitEllipse(contours[i])
                area = np.pi*np.product(ellipse[1])
                if area >= AREA_EXCLUSION_LIMITS[0] and area < AREA_EXCLUSION_LIMITS[1]:
                    cv2.ellipse(img,ellipse,(0,255,0),-1)
    return img, contours, coord_list
def processEye(eyesubrect, vis_roi, gray_roi, vis, gray):
    """threshold
    get contours
    find largest
    fit ellipse
    """
    # apply threshold
    thresh = gray_roi.copy()
    #cv2.adaptiveThreshold(leftthresh, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 25, 25, leftthresh)
    thresh = thresholdByPercentage(thresh, .075)
    
    # find contours from thresholded img
    contours, heirarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    
    maxarea = 0
    maxidx = 0
    
    # find contour with largest area
    for idx in range(len(contours)):
        a = cv2.contourArea(contours[idx])
        if maxarea < a:
            maxarea = a
            maxidx = idx
    
    cv2.drawContours(vis_roi, contours, maxidx, (0,0,255), -1)
    
    # fit ellipse to the detected contour
    ellipseBox = cv2.fitEllipse(contours[maxidx])
    cv2.ellipse(vis_roi, ellipseBox, (50, 150, 255))
    
    return thresh, ellipseBox
예제 #10
0
	def describe(self, image):
		# hsv转换
		image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
		features = []

		# 切个尺寸
		(h, w) = image.shape[:2]
		(cX, cY) = (int(w * 0.5), int(h * 0.5))

		# 图片分割区域
		segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h),
			(0, cX, cY, h)]

		# 填充椭圆形
		(axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
		ellipMask = np.zeros(image.shape[:2], dtype = "uint8")
		# 绘制
		cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0, 0, 360, 255, -1)

		for (startX, endX, startY, endY) in segments:

			cornerMask = np.zeros(image.shape[:2], dtype = "uint8")
			cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
			cornerMask = cv2.subtract(cornerMask, ellipMask)


			hist = self.histogram(image, cornerMask)
			features.extend(hist)

		# 提取中间的椭圆形
		hist = self.histogram(image, ellipMask)
		features.extend(hist)

		return features
예제 #11
0
def track_using_trajectories( cur, prev ):
    global curr_loc_ 
    global static_features_img_
    p0 = cv2.goodFeaturesToTrack( cur, 200, 0.01, 5 )
    insert_int_corners( p0 )

    draw_point( cur, p0, 1 )

    ellipse, p1 = update_mouse_location( p0 )
    if p1 is not None:
        for p in p1:
            cv2.circle( cur, p, 10, 20, 2 )
    cv2.ellipse( cur, ellipse, 1 )
    cv2.circle( cur, curr_loc_, 10, 255, 3)
    display_frame( cur, 1 )
    # cv2.imshow( 'static features', static_features_img_ )
    return 
    # Find a contour
    prevE = find_edges( prev )
    curE = find_edges( cur )
    img = curE - prevE
    cnts, hier = cv2.findContours( img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE )
    cnts = filter( ismouse, cnts )
    cv2.drawContours( img, cnts, -1, 255, 3 )
    display_frame( img, 1)
    return 
    p1, status, err = cv2.calcOpticalFlowPyrLK( prev, cur, p0 )
    mat = cv2.estimateRigidTransform( p0, p1, False )
    # print cv2.warpAffine( curr_loc_, mat, dsize=(2,1) )
    if mat is not None:
        dx, dy = mat[:,2]
        da = math.atan2( mat[1,0], mat[0,0] )
        trajectory_.append( (dx, dy, da) )
        print( "Transformation", dx, dy, da )
        curr_loc_ = (curr_loc_[0] - int(dy), curr_loc_[1] - int(dx))
예제 #12
0
파일: views.py 프로젝트: Topbantsman/cbir
def splitImage(image):
    masks = []

    # grab the dimensions and compute the center of the image
    (h, w) = image.shape[:2]
    (cX, cY) = (int(w * 0.5), int(h * 0.5))

    # divide the image into four rectangles/segments (top-left,
    # top-right, bottom-right, bottom-left)
    segments = [(0, cX, 0, cY), (cX, w, 0, cY), (cX, w, cY, h), (0, cX, cY, h)]

    # construct an elliptical mask representing the center of the image
    (axesX, axesY) = (int(w * 0.75) / 2, int(h * 0.75) / 2)
    ellipMask = np.zeros(image.shape[:2], dtype="uint8")
    # ellipse(image, centre, axes, rotation angle, start angle, end angle, colour, thickness)
    cv2.ellipse(ellipMask, (int(cX), int(cY)), (int(axesX), int(axesY)), 0, 0, 360, 255, -1)
    masks.append(ellipMask)

    # loop over the segments
    for (startX, endX, startY, endY) in segments:
        # construct a mask for each corner of the image, subtracting
        # the elliptical center from it
        cornerMask = np.zeros(image.shape[:2], dtype="uint8")
        cv2.rectangle(cornerMask, (startX, startY), (endX, endY), 255, -1)
        masks.append(cv2.subtract(cornerMask, ellipMask))

    return masks
예제 #13
0
def display_objects(idx):
    _idx = (winsize -1) /2 + idx
    ellipses, dilated, morphed = detect_objects(images[_idx], imdiffs[idx], blursize=(3,3))
    for e in ellipses:
        cv2.ellipse(images[_idx], e, (255,255,0), 1)
    print "Detected %i Cars" % len(ellipses)
    display_image(images[_idx])
예제 #14
0
    def gaussian(self, mean, covariance, label=None):
        """Draw 95% confidence ellipse of a 2-D Gaussian distribution.

        Parameters
        ----------
        mean : array_like
            The mean vector of the Gaussian distribution (ndim=1).
        covariance : array_like
            The 2x2 covariance matrix of the Gaussian distribution.
        label : Optional[str]
            A text label that is placed at the center of the ellipse.

        """
        # chi2inv(0.95, 2) = 5.9915
        vals, vecs = np.linalg.eigh(5.9915 * covariance)
        indices = vals.argsort()[::-1]
        vals, vecs = np.sqrt(vals[indices]), vecs[:, indices]

        center = int(mean[0] + .5), int(mean[1] + .5)
        axes = int(vals[0] + .5), int(vals[1] + .5)
        angle = int(180. * np.arctan2(vecs[1, 0], vecs[0, 0]) / np.pi)
        cv2.ellipse(
            self.image, center, axes, angle, 0, 360, self._color, 2)
        if label is not None:
            cv2.putText(self.image, label, center, cv2.FONT_HERSHEY_PLAIN,
                        2, self.text_color, 2)
def drawStuff(centerCordinates, image):
    # http://opencvpython.blogspot.no/2012/06/contours-2-brotherhood.html
    # http://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html#gsc.tab=0pyth
   ############## creating a minimum rectangle around the object ######################
    rect = cv2.minAreaRect(points=centerCordinates)
    box = cv2.cv.BoxPoints(rect)
    box = np.int0(box)
    cv2.drawContours(image,[box],0,(255,255,255),2)

    ########### circle around object #######3

    (x, y),radius = cv2.minEnclosingCircle(centerCordinates)
    center = (int(x),int(y))
    radius = int(radius)
    cv2.circle(image, center, radius, (255,255,255),2)

    ########### finding a elipse ##############

    ellipse = cv2.fitEllipse(centerCordinates)
    cv2.ellipse(image,ellipse,(255,255,255),2)

    ##### fitting a line ###########

    rows,cols = image.shape[:2]
    [vx,vy,x,y] = cv2.fitLine(points=centerCordinates, distType=cv2.cv.CV_DIST_L2, param =0, reps=0.01, aeps=0.01)
    lefty = int((-x*vy/vx) + y)
    righty = int(((cols-x)*vy/vx)+y)
    cv2.line(image,(cols-1,righty),(0,lefty),(255,255,255),2)

    pixelSizeOfObject = radius  # an okay estimate for testing
    return image, pixelSizeOfObject
예제 #16
0
파일: OptiposLib.py 프로젝트: heathzj/moped
 def fieldMask(self, field, numberOfFieldsPerCircle):
     """
     Returns a square matrix of size 3 * self.markerSizePixels, where the elements corresponding to the given field are 1, and all other elements are 0.
     """
     
     if (field, numberOfFieldsPerCircle) in self.fieldMaskCache:
         return self.fieldMaskCache[(field, numberOfFieldsPerCircle)]
     else:
         halfSize = 3 * self.markerSizePixels // 2
         result = np.zeros((halfSize * 2, halfSize * 2), dtype = np.uint8)
         fillColor = 255
         if field == 0: # Background field, return a rectangle around the circles 
             result = np.bitwise_not(self.markerMask())
         elif 0 < field and field <= 2 * numberOfFieldsPerCircle:
             if field <= numberOfFieldsPerCircle:
                 # First circle
                 y = - 3 * self.markerSizePixels // 4
                 rotationAngle = (-90 + (field - 1) * 360 // numberOfFieldsPerCircle) % 360
             else:
                 # Second circle
                 y = 3 * self.markerSizePixels // 4
                 rotationAngle = (90 - (field - numberOfFieldsPerCircle) * 360 // numberOfFieldsPerCircle) % 360
             cv2.ellipse(result, (halfSize, halfSize + y), (self.markerSizePixels // 2, self.markerSizePixels // 2), 
                         rotationAngle, 0, 360 // numberOfFieldsPerCircle, fillColor, cv2.FILLED)
         else:
             raise Exception("MarkerCandidate.fieldMask: invalid field: " + str(field))
         self.fieldMaskCache[(field, numberOfFieldsPerCircle)] = result
         return result
예제 #17
0
def main():
    np_img = cv2.imread('8.bmp', 0)
    
    blur = cv2.GaussianBlur(np_img,(5,5),0)
    ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    kernel = np.ones((3, 3), np.uint8)
    closing = cv2.morphologyEx(th3, cv2.MORPH_CLOSE, kernel, iterations=4)
    
    cont_img = closing.copy()
    contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)
    
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area < 200 or area > 8000:
            continue

        if len(cnt) < 5:
            continue

        ellipse = cv2.fitEllipse(cnt)
        cv2.ellipse(np_img, ellipse, (155,155,0), 2)    
    
    while True:
        cv2.imshow('final result', np_img)    
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #18
0
    def draw(self, image):
        color = (0, 0, 100)
        if self.id is not None:
            color = COLOR_TABLE[self.id % NUM_COLORS]

        cv2.drawContours(image, [self.contour], 
                        0, # draw the only contour
                        color = color, 
                        thickness = -1, # filled
                        lineType = cv2.CV_AA)
        cv2.ellipse(image, box=((self.centroid_x, self.centroid_y), (6,6), 0), color=(0,0,255),
                    thickness = -1)
        if self.z_w is not None:
            cv2.putText(image, "%s %s %s" % (int(self.x_w), int(self.y_w), int(self.z_w)),
                        (self.centroid_x + 5, self.centroid_y + 5),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, # font_scale
                        (0, 100, 180), # color
                        2 # thickness
                        )
            cv2.putText(image, "d:%s a:%s" % (int(self.median_depth), int(self.area)),
                        (self.centroid_x + 5, self.centroid_y + 25),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, # font_scale
                        (0, 100, 180), # color
                        2 # thickness
                        )            
예제 #19
0
def debug_iter(box, frame, hull, timestamp):
    if box is not None:
        cv2.circle(frame, (int(np.round(box[0][0])), int(np.round(box[0][1]))), 10, (0, 255, 0))
        cv2.ellipse(frame, box, (0, 255, 0))
        cv2.polylines(frame, [hull], 1, (0, 0, 255))
    if random.random() < .5:
        cv2.imshow("Eye", frame)
    key = cv2.waitKey(20)
    # No user input
    if key == -1:
        return
    elif key == 27:
        return 'QUIT'
    elif key == 97: # a
        PARAMS['_delta'] += 5
    elif key == 122: # z
        PARAMS['_delta'] -= 5
    elif key == 115: # s
        PARAMS['_max_variation'] += .1
    elif key == 120: # x
        PARAMS['_max_variation'] -= .1
    elif key == 100: # d
        PARAMS['_min_diversity'] += .1
    elif key == 99: # c
        PARAMS['pupil_intensity'] -= .1
    elif key == 102: # f
        PARAMS['pupil_intensity'] += 5
    elif key == 118: # v
        PARAMS['pupil_intensity'] -= 5
    if 97 <= key <= 122:
        print('Got key[%d]' % key)
        return 'RELOAD'
예제 #20
0
def find_blob(im):    
    im[:30, :] = 0
    im[-30:, :] = 0
    im[:, :10] = 0
    im[:, -10:] = 0
    
#    thresh, im_bw = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#    print thresh
    thresh, im_bw = cv2.threshold(im, 100, 255, cv2.THRESH_BINARY)
    contours, hierarchy = cv2.findContours(im_bw.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#    moments = [cv2.moments(i).nu20 for i in contours]
#    all_bbox = [cv2.boundingRect(i) for i in contours]
    all_area = np.array([cv2.contourArea(c) for c in contours])
#    all_aspect_ratio = np.array([float(b[2]) / b[3] for b in all_bbox])    
    print all_area
    
    big_indices = np.nonzero(all_area > config.AREA_THRESH)[0]
    print big_indices
    
    ellipses = [cv2.fitEllipse(contours[i]) for i in big_indices]
    color = [0, 255, 0]
    im_rgb = cv2.cvtColor(im_bw, cv2.COLOR_GRAY2RGB)
    for ell in ellipses:
        center, dimension, angle = ell
        h, w = dimension
        print h, w
        cv2.ellipse(im_rgb, ell, color, 2, 8)
        
    cv2.namedWindow('threshold')
    cv2.moveWindow('threshold', 800, 100)
    cv2.imshow('threshold', im_rgb)
    cv2.waitKey()
    cv2.destroyAllWindows()
예제 #21
0
 def scan(self, frame, hsv_frame, mask):
     """Updates all the of trackers for identified objects and updates the searcher which is looking for new objects."""
     bproj = cv2.calcBackProject([hsv_frame], [0], self.hist, [0,255], 1)        
     bproj &= mask        
     for index, tracker in enumerate(self.tracking):
         original_bproj = bproj.copy()
         box, bproj, split = tracker.update(bproj)
         coords, dims, theta = box
         w,h = dims
         if split:
             self.splitTracker(tracker)
             del self.tracking[index]
             bproj = original_bproj
         if tracker.hasFound() and w > 0 and h > 0:
             cv2.ellipse(frame, box, self.averageColor, 2)
         else:
             del self.tracking[index]                
     box, bproj, split = self.searcher.update(bproj.copy())        
     if split:
         self.splitTracker(self.searcher)
         self.searcher = Tracker(self.frameDims, self.full_track_window, found = False)
     if self.searcher.hasFound(): # If searcher finds an object, start tracking that object and make a new searcher
         self.tracking.append(self.searcher)
         self.searcher = Tracker(self.frameDims, self.full_track_window, found = False)            
     return frame
예제 #22
0
def fit_ellipses(img):
    import cv2
    
    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    img = cv2.medianBlur(img, 5)
    #ret, thresh = cv2.threshold(img, 169, 255, 0)
    thresh = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
    contours, hierarchy = cv2.findContours(thresh, 1, 2)

    #print contours
    cntLengths = [len(i) for i in contours]
    cntMax = cntLengths.index(max(cntLengths))

    for i in range(len(contours)):
        if 10 < len(contours[i]) < 1500:
            cnt = contours[i]
            M = cv2.moments(cnt)
            #print M

            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])
            print "cx: ", cx
            print "cy: ", cy
             
            ellipse = cv2.fitEllipse(cnt)
            cv2.ellipse(img, ellipse, (0, 255, 0), 2)

    cv2.imshow('img', img)
    cv2.waitKey()
    cv2.destroyAllWindows()
예제 #23
0
파일: CvHelp.py 프로젝트: sosey/ginga
 def ellipse(self, pt, xr, yr, theta, pen, brush):
     x, y = pt
     if (brush is not None) and brush.fill:
         cv2.ellipse(self.canvas, (x, y), (xr, yr), theta, 0.0, 360.0,
                     brush.color, -1)
     cv2.ellipse(self.canvas, (x, y), (xr, yr), theta, 0.0, 360.0,
                 pen.color, pen.linewidth)
예제 #24
0
def draw_props(properties):
    global img, img_src

    img = img_src.copy()

    for props in properties:
        if props['Ellipse'] != None:
            cv2.circle(img, props['Centroid'], 5, BLUE, 1)
            cv2.ellipse(img, props['Ellipse'], CYAN)
            cv2.drawContours(img, [props['ConvexHull']], 0, RED, 1)

            major_axis = ft.angled_line(props['Centroid'], props['Orientation'], props['MajorAxisLength']/2)
            cv2.line(img, tuple(major_axis[0]), tuple(major_axis[1]), RED)

            minor_axis = ft.angled_line(props['Centroid'], props['Orientation'] + 90, props['MinorAxisLength']/2)
            cv2.line(img, tuple(minor_axis[0]), tuple(minor_axis[1]), BLUE)

        box = cv2.cv.BoxPoints(props['BoundingBox'])
        box = np.int0(box)
        cv2.drawContours(img, [box], 0, CYAN, 1)

        for p in props['Extrema']:
            cv2.circle(img, p, 5, CYAN, 1)

    cv2.imshow('image', img)
    def frame_track(self, cur_frame):
        vis = cur_frame.copy()
        hsv = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))

        prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
        prob &= mask
        term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
        # get rotated bounding box
        track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
        # get bounding box
        track_rect_rotate = cv2.boxPoints(track_box)
        # get rectangle 
        track_rect = cv2.boundingRect(track_rect_rotate)

        # store them in yaml compatible format
        track_rect_yaml=track_rect

        if self.show_backproj:
            vis[:] = prob[...,np.newaxis]

        pt1 = (track_rect_yaml[0],track_rect_yaml[1])
        pt2 = (track_rect_yaml[0] + track_rect_yaml[2], track_rect_yaml[1] + track_rect_yaml[3])
        cv2.rectangle(vis, pt1, pt2, (0, 255, 0), 2)
        cv2.ellipse(vis, track_box, (0, 0, 255), 2)

        self.add_frame_to_dataset_based_on_difference(cur_frame, track_rect_yaml)
        return vis
예제 #26
0
def drawEllipses(bgrimg, ellipses):
    for ell in ellipses:
        center, axes, angle = ell
        center = tuple( map(int, center) )
        axes = tuple( map(int, axes) )
        angle = int(angle)
        cv2.ellipse(drawing, center, axes, angle, 0, 360, (255,0,0), 2)
def test_all():
    parser = argparse.ArgumentParser(__doc__)
    parser.add_argument("confocal_file", help="File containing confocal data")
    parser.add_argument("output_dir", help="Output directory")

    args = parser.parse_args()
    if not os.path.isdir(args.output_dir):
        os.mkdir(args.output_dir)
    AutoName.directory = args.output_dir
    AutoWrite.on = False

    image_collection = unpack_data(args.confocal_file)

    for i in range(len(STOMATA)):
        stomata_timeseries = stomata_timeseries_lookup(i)
        for stomate in stomata_timeseries:
            fname = 'annotated_projection_stomate_{}_series_{}.png'.format(
                stomate.stomate_id, stomate.timepoint_id)
            fpath = os.path.join(AutoName.directory, fname)
            raw_zstack = image_collection.zstack_array(s=stomate.series, c=0)
            projected = max_intensity_projection(raw_zstack)
            gray_uint8 = normalise(projected) * 255
            annotation_array = np.dstack([gray_uint8, gray_uint8, gray_uint8])
            box = find_stomate_ellipse_box(raw_zstack, stomate.x, stomate.y)
            cv2.ellipse(annotation_array, box, (255, 0, 0))
            scipy.misc.imsave(fpath, annotation_array)
예제 #28
0
def draw_bellipse(im,ell,c,t):
    #im = image to draw ell onto
    #ell= the elipse to draw (x,y,axes,O,startO,EndO)
    #c  = the color to draw
    #t  = thickness of the line to draw
    #side effect: draws on the window
    cv2.ellipse(im, ell, c, t)
예제 #29
0
 def draw_ellipse(self, minor_axis, major_axis, angle, x, y, image, color):
     if minor_axis > 0:
         print minor_axis, major_axis, angle, x, y
         center = (int(round(x*self.scale)), int(round(y*self.scale)))
         axis = (int(round(minor_axis*self.scale/2)), int(round(major_axis*self.scale/2)))
         angle_deg = np.rad2deg(angle)
         cv2.ellipse(image, center, axis, angle_deg, 0, 360, color, 1)
예제 #30
0
 def draw_ellipse(self, initial_roi, roi, color_img, x_i, y_i, x_vertex, y_vertex, color=(0,255,0)):
     """Draw ellipse surrounding the hand object.
     
     Returns the position of the vertex of the best fit ellipse.        
     """
     purple = (153, 0, 153)        
     box = self.fit_ellipse(color_img, x_i, y_i)
     ( (x, y), (width, height), angle ) = box
     h, w = (height / 2, width / 2)
     theta = math.radians(angle)
     
     #Top-right ellipse vertex            
     x_ch1 = h * math.sin(theta)
     y_ch1 = -h * math.cos(theta)
     x1_f, y1_f = (int(x+x_ch1), int(y+y_ch1))
     dist1 = math.sqrt((x_vertex - x1_f)**2 + (y_vertex - y1_f)**2)            
     #Bottom-left ellipse vertex            
     x_ch2 = -x_ch1
     y_ch2 = -y_ch1
     x2_f, y2_f = (int(x+x_ch2), int(y+y_ch2))
     dist2 = math.sqrt((x_vertex - x2_f)**2 + (y_vertex - y2_f)**2)   
     if dist1 < dist2:
         x_vertex, y_vertex = x1_f, y1_f                
     else:
         x_vertex, y_vertex = x2_f, y2_f   
     cv2.circle(color_img, (x_vertex, y_vertex), 5, purple, thickness=-1)                      
     cv2.ellipse(color_img, box, color, thickness=2)
     return x_vertex, y_vertex
예제 #31
0
def draw_gaussain(img, mean, cov, color):
    x, y = np.int32(mean)
    w, u, _vt = cv.SVDecomp(cov)
    ang = np.arctan2(u[1, 0], u[0, 0]) * (180 / np.pi)
    s1, s2 = np.sqrt(w) * 3.0
    cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA)
예제 #32
0
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt

img = np.zeros((512, 512, 3), np.uint8)
cv.line(img, (0, 0), (511, 511), (255, 0, 0), 5)

cv.rectangle(img, (384, 5), (510, 128), (0, 255, 0), 3)

cv.circle(img, (447, 63), 55, (0, 0, 255), -1)

cv.ellipse(img, (256, 256), (100, 50), 0, 0, 180, 255, -1)

pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)
pts = pts.reshape((-1, 1, 2))
cv.polylines(img, [pts], True, (0, 255, 255))

font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img, 'OpenCV', (10, 500), font, 4, (255, 255, 255), 2, cv.LINE_AA)

plt.imshow(img, cmap='gray', interpolation='bicubic')
plt.xticks([]), plt.yticks([])
plt.show()
예제 #33
0
#ici dolu dikdortgen icin thickness=-1 yapariz
cv2.rectangle(canvas, (20, 20), (40, 40), (240, 120, 156), thickness=-1)

#circle'da center,radius,color,thickness
cv2.circle(canvas, (225, 225), 40, (220, 120, 50), thickness=-1)

#triangle
p1 = (100, 100)
p2 = (200, 300)
p3 = (120, 40)
cv2.line(canvas, p1, p2, (20, 120, 120), thickness=2)
cv2.line(canvas, p2, p3, (80, 100, 170), thickness=2)
cv2.line(canvas, p3, p1, (30, 120, 220), thickness=2)

#herhangi bir sekil icin
points = np.array([[[110, 150], [240, 320], [50, 140], [145, 128]]], np.int32)
cv2.polylines(canvas, [points], True, (0, 0, 100),
              thickness=5)  #cokgenin kapali olmasi icin true gir.

#elips icin merkez, yatay yaricap,dikey yaricap(genislik ve yukseklik), yatay eksenle yapacagi aci
cv2.ellipse(canvas, (160, 160), (60, 100),
            10,
            0,
            360, (255, 0, 0),
            thickness=2)  #0dan 360a taradik,renk,thickness

cv2.imshow("Canvas", canvas)
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #34
0
        eye_roi_color = roi_color

    thresh = None
    print(str(thresh1) + ", " + str(thresh2))
    ret, thresh = cv2.threshold(eye_roi_gray, thresh1, 255, cv2.THRESH_BINARY)
    # thresh = cv2.adaptiveThreshold(eye_roi_gray, 255,
    #                                cv2.ADAPTIVE_THRESH_MEAN_C,
    #                                cv2.THRESH_BINARY, 115, thresh1)
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(roi_color, contours, -1, (0,0,255), 3)
    for cont in contours:
        if len(cont) > 5 and 30000 > cv2.contourArea(cont) > 1000:
            conv = cv2.convexHull(cont)
            ellipse = cv2.fitEllipse(conv)
            cv2.ellipse(eye_roi_color, ellipse, (0, 0, 255), 2)
            cv2.circle(eye_roi_color, (int(ellipse[0][0]), int(ellipse[0][1])),
                       2, (255, 0, 0), 3)

    if thresh is not None:
        if calibrated:
            roi_gray[roic[1]:roic[3], roic[0]:roic[2]] = thresh
        else:
            roi_gray = thresh

    cv2.imshow("preview", roi_color)
    cv2.imshow("preview2", roi_gray)
    # if vout:
    #     vout.write(frame)
    nf = nf + 1
    if time() - ptime > 5:
예제 #35
0
def updateimage():
    camaracontroller = CamaraController()
    ti = time.time()
    while (not camaracontroller.killthread):
        hasFrame, frame = camaracontroller.cap.read()
        if not hasFrame:
            return
        frameWidth = frame.shape[1]
        frameHeight = frame.shape[0]
        camaracontroller.net.setInput(
            cv2.dnn.blobFromImage(
                frame,
                1.0, (camaracontroller.inWidth, camaracontroller.inHeight),
                (127.5, 127.5, 127.5),
                swapRB=True,
                crop=False))
        out = camaracontroller.net.forward()
        out = out[:, :
                  19, :, :]  # MobileNet output [1, 57, -1, -1], we only need the first 19 elements

        assert (len(camaracontroller.BODY_PARTS) == out.shape[1])

        points = []
        for i in range(len(camaracontroller.BODY_PARTS)):
            # Slice heatmap of corresponging body's part.
            heatMap = out[0, i, :, :]

            # Originally, we try to find all the local maximums. To simplify a sample
            # we just find a global one. However only a single pose at the same time
            # could be detected this way.
            _, conf, _, point = cv2.minMaxLoc(heatMap)
            x = (frameWidth * point[0]) / out.shape[3]
            y = (frameHeight * point[1]) / out.shape[2]
            # Add a point if it's confidence is higher than threshold.
            points.append((int(x),
                           int(y)) if conf > camaracontroller.thr else None)

        #Aquí va el código para determinar la posición del cuerpo
        munecaderecha = points[camaracontroller.BODY_PARTS['RWrist']]
        munecaizquierda = points[camaracontroller.BODY_PARTS['LWrist']]
        cododerecho = points[camaracontroller.BODY_PARTS['RElbow']]
        codoizquierdo = points[camaracontroller.BODY_PARTS['LElbow']]
        hombroderecho = points[camaracontroller.BODY_PARTS['RShoulder']]
        cuello = points[camaracontroller.BODY_PARTS['Neck']]
        if ((munecaderecha is None) or (munecaizquierda is None)
                or (cododerecho is None) or (codoizquierdo is None)
                or (hombroderecho is None) or (cuello is None)):
            if (time.time() - ti) > 15:
                camaracontroller.pose = PosicionBrazos.TRASLAESPALDA
                ti = time.time()
        else:
            distanciabrazos = distanciaentrelineas(munecaderecha, cododerecho,
                                                   munecaizquierda,
                                                   codoizquierdo)
            distanciahombrocuello = math.sqrt(
                (cuello[0] - hombroderecho[0])**2 +
                (cuello[1] - hombroderecho[1])**2)
            ratio = (distanciabrazos / distanciahombrocuello)
            if ratio > 3.5:
                camaracontroller.pose = PosicionBrazos.EXTENDIDOS
                ti = time.time()
            elif ratio < 2:
                camaracontroller.pose = PosicionBrazos.CRUZADOS
                ti = time.time()
            else:
                camaracontroller.pose = PosicionBrazos.NEUTRALES
                ti = time.time()
            print("Distancia entre brazos:")
            print(distanciabrazos)
            print("Distancia hombro cuello:")
            print(distanciahombrocuello)
        #Aquí comienza el código para dibujar los puntos y lineas en la imàgen
        for pair in camaracontroller.POSE_PAIRS:
            partFrom = pair[0]
            partTo = pair[1]
            assert (partFrom in camaracontroller.BODY_PARTS)
            assert (partTo in camaracontroller.BODY_PARTS)

            idFrom = camaracontroller.BODY_PARTS[partFrom]
            idTo = camaracontroller.BODY_PARTS[partTo]

            if points[idFrom] and points[idTo]:
                cv2.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
                cv2.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360,
                            (0, 0, 255), cv2.FILLED)
                cv2.ellipse(frame, points[idTo], (3, 3), 0, 0, 360,
                            (0, 0, 255), cv2.FILLED)

        t, _ = camaracontroller.net.getPerfProfile()
        freq = cv2.getTickFrequency() / 1000
        cv2.putText(frame, '%.2fms' % (t / freq), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
        buf1 = cv2.flip(frame, 0)
        buf = buf1.tostring()
        camaracontroller.last_texture = buf
import cv2
import numpy as np

square = np.zeros((300, 300), np.uint8)
cv2.rectangle(square, (50, 50), (250, 250), 255, -1)
cv2.imshow("square", square)
cv2.waitKey(0)

ellipse = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellipse, (150, 150), (150, 150), 30, 0, 180, 255, -1)
cv2.imshow("Ellipse", ellipse)
cv2.waitKey()

And = cv2.bitwise_and(square, ellipse)
cv2.imshow("And", And)
cv2.waitKey(0)

Or = cv2.bitwise_or(square, ellipse)
cv2.imshow("OR", Or)
cv2.waitKey(0)

Not = cv2.bitwise_not(square)
cv2.imshow("Not", Not)
cv2.waitKey(0)

Xor = cv2.bitwise_xor(square, ellipse)
cv2.imshow("Xor", Xor)
cv2.waitKey(0)
예제 #37
0
def ransac_ellipse_fit(points,
                       bgr_img,
                       roi_pos,
                       ransac_iters_max=50,
                       refine_iters_max=3,
                       max_err=2,
                       debug=False):

    if points.size == 0: raise NoEllipseFound()

    blurred_grey_img = cv2.blur(cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY),
                                (3, 3))

    image_dx = cv2.Sobel(blurred_grey_img,
                         ddepth=cv2.CV_32F,
                         dx=1,
                         dy=0,
                         ksize=5)
    image_dy = cv2.Sobel(blurred_grey_img,
                         ddepth=cv2.CV_32F,
                         dx=0,
                         dy=1,
                         ksize=5)

    pts_x, pts_y = np.split(points, 2, axis=1)
    pts_x, pts_y = np.squeeze(pts_x), np.squeeze(pts_y)

    if debug:
        img_points = np.copy(bgr_img)
        draw_points(img_points, points, (0, 0, 255), 1, 2)
        cv2.imshow(winname, img_points)
        cv2.waitKey()

    best_ellipse = None
    best_support = float('-inf')
    best_inliers = None

    # Points on right and left of predicted pupil location (center of ROI-img)
    r_inds, l_inds = np.squeeze([pts_x > (bgr_img.shape[1] / 2)]), np.squeeze(
        [pts_x < (bgr_img.shape[1] / 2)])

    # Not enough points to start process
    if r_inds.size < 3 or l_inds.size < 3: raise NoEllipseFound()

    points_r = points[r_inds]
    points_l = points[l_inds]

    if len(points_r) < 3 or len(points_l) < 3: raise NoEllipseFound()

    # Perform N RANSAC iterations
    for ransac_iter in range(ransac_iters_max):

        try:

            sample = random.sample(points_r, 3) + (random.sample(points_l, 3))
            ellipse = fit_ellipse(sample, bgr_img.shape[:2])

            if debug:
                img_least_sqs = np.copy(bgr_img)
                draw_points(img_least_sqs, points, (0, 0, 255), 1, 2)
                draw_points(img_least_sqs, sample, (0, 255, 255), 6, 2)
                cv2.ellipse(img_least_sqs, ellipse.rotated_rect, (0, 255, 255),
                            1)
                print 'initial fit: ' + str(ransac_iter + 1)
                cv2.imshow(winname, img_least_sqs)
                cv2.imwrite("ransac_initial" + str(ransac_iter) + ".png",
                            img_least_sqs)
                cv2.waitKey()

            # Image-aware sample rejection
            for (p_x, p_y) in sample:
                grad_x, grad_y = ellipse.algebraic_gradient_dir((p_x, p_y))
                dx, dy = image_dx[p_y][p_x], image_dy[p_y][p_x]

                # If sample and ellipse gradients don't agree, move to next set of samples
                if dx * grad_x + dy * grad_y <= 0:
                    if debug:
                        print 'Sample and ellipse gradients do not agree, reject'
                    break

            else:  # Only continues for else-block did not break on above line (image-aware sample rejection)

                # Iteratively refine inliers further
                for _ in range(refine_iters_max):

                    pts_distances = ellipse.distances(pts_x, pts_y)
                    inlier_inds = np.squeeze([
                        np.abs(get_err_scale(ellipse) * pts_distances) <
                        max_err
                    ])
                    inliers = points[inlier_inds]

                    if len(inliers) < 5: raise NotEnoughInliers()

                    ellipse = fit_ellipse(inliers, bgr_img.shape[:2])

                    if debug:
                        img_refined = np.copy(bgr_img)
                        draw_points(img_refined, points, (0, 0, 255), 1, 2)
                        draw_points(img_refined, sample, (0, 255, 255), 6, 2)
                        cv2.ellipse(img_refined, ellipse.rotated_rect,
                                    (0, 255, 255), 1)
                        draw_points(img_refined, inliers, (0, 255, 0), 1, 2)
                        cv2.imshow(winname, img_refined)
                        cv2.waitKey()

                # Calculate the image aware support of the ellipse
                if image_aware_support:

                    inliers_pts_x, inliers_pts_y = np.split(inliers, 2, axis=1)
                    inliers_pts_x, inliers_pts_y = np.squeeze(
                        inliers_pts_x), np.squeeze(inliers_pts_y)

                    # Ellipse gradients at inlier points
                    grads_x, grads_y = ellipse.algebraic_gradient_dirs(
                        inliers_pts_x, inliers_pts_y)

                    # Image gradients at inlier points
                    dxs = image_dx[inliers_pts_y.astype(int),
                                   inliers_pts_x.astype(int)]
                    dys = image_dy[inliers_pts_y.astype(int),
                                   inliers_pts_x.astype(int)]

                    support = np.sum(dxs.dot(grads_x) + dys.dot(grads_y))

                else:
                    support = len(inliers)

                if support > best_support:
                    best_ellipse = ellipse
                    best_support = support
                    best_inliers = inliers

                # Early termination for > 95% inliers
                print len(inliers) / float(len(points))
                if len(inliers) / float(len(points)) > 0.95:
                    print "Early Termination"
                    break

        except NotEnoughInliers:
            if debug: print 'Not Enough Inliers'

        except BadEllipseShape as e:
            if debug: print 'Bad Ellipse Shape: %s' % e.msg

    if best_ellipse == None:
        raise NoEllipseFound()

    coverage = calculate_coverage(best_ellipse, best_inliers)
    if coverage < min_coverage:
        raise CoverageTooLow(
            'Minimum inlier coverage: %d, actual coverage: %d' %
            (min_coverage, coverage))

    if debug:
        img_bgr_img = np.copy(bgr_img)
        cv2.ellipse(img_bgr_img, best_ellipse.rotated_rect, (0, 255, 0), 2)
        cv2.imshow(winname, img_bgr_img)
        cv2.waitKey()

    return best_ellipse
예제 #38
0
    def process_image(self):
        """Process the image using OpenCV DNN

        This code is run for reach image
        """

        #if self.roi_x != None:
        #  self.image = self.image[self.roi_y:self.roi_y+self.roi_height, self.roi_x:self.roi_x+self.roi_width]

        imageWidth = self.image.shape[1]
        imageHeight = self.image.shape[0]

        # resizing, mean subtraction, normalizing, and channel swapping of the image
        #image_resized = self.image
        image_resized = cv2.resize(self.image, (self.inWidth, self.inHeight))
        self.net.setInput(
            cv2.dnn.blobFromImage(image_resized,
                                  1.0, (self.inWidth, self.inHeight),
                                  (127.5, 127.5, 127.5),
                                  swapRB=True,
                                  crop=False))
        # prediction
        out = self.net.forward()
        out = out[:, :
                  19, :, :]  # MobileNet output [1, 57, -1, -1], we only need the first 19 elements

        pose_msg = BodyPose()
        pose_msg.header.stamp = rospy.Time.now()
        #pose_msg.part = self.BODY_PART_NAMES

        assert (len(self.BODY_PARTS) == out.shape[1])

        points = []
        for i in range(len(self.BODY_PARTS)):
            # Slice heatmap of corresponging body's part.
            heatMap = out[0, i, :, :]

            # Originally, we try to find all the local maximums. To simplify a sample
            # we just find a global one. However only a single pose at the same time
            # could be detected this way.
            _, conf, _, point = cv2.minMaxLoc(heatMap)
            x = (imageWidth * point[0]) / out.shape[3]
            y = (imageHeight * point[1]) / out.shape[2]
            pose_msg.part.append(self.BODY_PART_NAMES[i])
            pose_msg.x.append(int(x))
            pose_msg.y.append(int(y))
            pose_msg.confidence.append(conf)
            # Add a point if it's confidence is higher than threshold.
            points.append((int(x), int(y)) if conf > self.threshold else None)

        if self.display:
            for pair in self.POSE_PAIRS:
                partFrom = pair[0]
                partTo = pair[1]
                assert (partFrom in self.BODY_PARTS)
                assert (partTo in self.BODY_PARTS)

                idFrom = self.BODY_PARTS[partFrom]
                idTo = self.BODY_PARTS[partTo]
                if points[idFrom] and points[idTo]:
                    if self.display:
                        cv2.line(self.image, points[idFrom], points[idTo],
                                 (0, 255, 0), 3)
                        cv2.ellipse(self.image, points[idFrom], (3, 3), 0, 0,
                                    360, (0, 0, 255), cv2.FILLED)
                        cv2.ellipse(self.image, points[idTo], (3, 3), 0, 0,
                                    360, (0, 0, 255), cv2.FILLED)
            cv2.imshow("image", self.image)
            self.video.write(self.image)
            cv2.waitKey(1)

        self.pose_publisher.publish(pose_msg)
예제 #39
0
def rosRGBDCallBack(rgb_data, depth_data):
    draw_contours = True
    detect_shape = False
    calculate_size = False

    try:
        cv_image = cv_bridge.imgmsg_to_cv2(rgb_data, "bgr8")
        cv_depthimage = cv_bridge.imgmsg_to_cv2(depth_data, "32FC1")
        cv_depthimage2 = np.array(cv_depthimage, dtype=np.float32)
    except CvBridgeError as e:
        print(e)

    contours_blue, mask_image_blue = HSVObjectDetection(cv_image,
                                                        toPrint=False)

    for cnt in contours_blue:
        if not draw_contours:
            xp, yp, w, h = cv2.boundingRect(cnt)

            cv2.rectangle(cv_image, (xp, yp), (xp + w, yp + h), [0, 255, 255],
                          2)
            cv2.circle(cv_image, (int(xp + w / 2), int(yp + h / 2)), 5,
                       (55, 255, 155), 4)
            if not math.isnan(
                    cv_depthimage2[int(yp + h / 2)][int(xp + w / 2)]):
                zc = cv_depthimage2[int(yp + h / 2)][int(xp + w / 2)]
                X1 = getXYZ(xp + w / 2, yp + h / 2, zc, fx, fy, cx, cy)
                print "x:", X1[0], "y:", X1[1], "z:", X1[2]
            else:
                continue
        else:
            #################Draw contours#####################################
            # In task1, you need to call the function "cv2.drawContours" to show
            # object contour in RVIZ
            #
            #
            cv2.drawContours(cv_image, contours_blue, -1, (130, 25, 60), -1)
            #x_str_blue, y_str_blue = cnt[0][0][:]
            #font_blue = cv2.FONT_HERSHEY_SIMPLEX
            #cv2.putText(cv_image, "Blue", (x_str_blue, y_str_blue), font_blue, 1, (0, 255, 255), 2, cv2.LINE_AA)
            M_blue = cv2.moments(cnt)
            cX_blue = int(M_blue["m10"] / M_blue["m00"])
            cY_blue = int(M_blue["m01"] / M_blue["m00"])
            #cv2.circle(cv_image, (cX_blue, cY_blue), 10, (1, 227, 254), -1)

            c = max(contours_blue, key=cv2.contourArea)
            extRight = tuple(c[c[:, :, 0].argmax()][0])
            #cv2.circle(cv_image, extRight, 5, (0, 255, 0), -1)

            if (len(cnt) >= 5):
                ellipse = cv2.fitEllipse(cnt)
                cv2.ellipse(cv_image, ellipse, (0, 255, 0), 2)

                (x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
                if (angle >= 90) and (angle <= 180):
                    angle = angle - 90
                    ell_x = int(x + 100 * math.cos(angle * 0.0174532925))
                    ell_y = int(y + 100 * math.sin(angle * 0.0174532925))
                    ell_x_short = int(x + 100 * math.sin(angle * 0.0174532925))
                    ell_y_short = int(y - 100 * math.cos(angle * 0.0174532925))
                else:
                    ell_x = int(x + 100 * math.sin(angle * 0.0174532925))
                    ell_y = int(y - 100 * math.cos(angle * 0.0174532925))
                    ell_x_short = int(x - 100 * math.cos(angle * 0.0174532925))
                    ell_y_short = int(y - 100 * math.sin(angle * 0.0174532925))

                cv2.line(cv_image, (cX_blue, cY_blue), (ell_x, ell_y),
                         (0, 255, 0), 3)  # x-axis
                cv2.line(cv_image, (cX_blue, cY_blue),
                         (cX_blue, cY_blue - 100), (255, 0, 0), 3)  # z-axis
                cv2.line(cv_image, (cX_blue, cY_blue),
                         (ell_x_short, ell_y_short), (0, 0, 255), 3)  # y-axix

                if (cX_blue < len(depth) and cY_blue < len(depth[0])):
                    cZ_blue = depth[cX_blue][cY_blue]
                    xyz_blue = getXYZ(cX_blue, cY_blue, cZ_blue / 1000, fx, fy,
                                      cx, cy)

                    # rosrun tf tf_echo /arm_base_link /head_tilt_link
                    matrix = quaternion_matrix([0.937, 0.001, 0.349, -0.004])
                    matrix[0][3] = -0.117
                    matrix[1][3] = 0.000
                    matrix[2][3] = 0.488

                    # rosrun tf tf_echo /base_link /head_tilt_link
                    #matrix = quaternion_matrix([0.937, 0.001, 0.349, -0.004])
                    #matrix[0][3] = -0.02
                    #matrix[1][3] = 0.000
                    #matrix[2][3] = 0.585
                    xyz = np.array(
                        [xyz_blue[2], -xyz_blue[0], -xyz_blue[1], 1])
                    final_xyz = matrix.dot(xyz)

                    #matrix_rot_x = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
                    #final_xyz_rot_x = matrix_rot_x.dot(final_xyz)
                    if (final_xyz[0] <= 0.45):
                        final_xyz_re = np.array(
                            [final_xyz[0] + 0.1, -final_xyz[1] + 0.02, 0.05])
                        print(final_xyz_re)

                    command = Pose()
                    command.position.x = xyz_blue[0]
                    command.position.y = xyz_blue[1]
                    command.position.z = xyz_blue[2]
                    command.orientation.x = 0
                    command.orientation.y = 0.707
                    command.orientation.z = 0
                    command.orientation.w = 0.707
                    #print(command)
                    robot_command_pub.publish(command)
            ###################################################################

            if detect_shape:

                peri = cv2.arcLength(cnt, True)
                obj_num = cv2.approxPolyDP(cnt, 0.04 * peri, True)

                if len(obj_num) == 3:
                    obj_shape = "triangle"
                elif len(obj_num) == 4:
                    obj_shape = "square"
                else:
                    obj_shape = "circle"

                if len(cnt) < 1:
                    continue
                x_str, y_str = cnt[0][0][:]
                font = cv2.FONT_HERSHEY_SIMPLEX
                if obj_shape == "triangle":
                    cv2.putText(cv_image, "Triangle", (x_str, y_str), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)
                elif obj_shape == "square":
                    cv2.putText(cv_image, "Square", (x_str, y_str), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)
                elif obj_shape == "circle":
                    cv2.putText(cv_image, "Circle", (x_str, y_str), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

            if calculate_size:
                ##################################################
                # In task3, we offer the methods to calculate area.
                # If you want to use the method TA offers, you will need to
                # assign vertex list of single object to "vtx_list" and finish
                # functions "get_side_length", "get_radius".
                # You can also write your own code to calculate
                # area. if so, please ignore and comment the line 193 - 218,
                # and write your code there.
                #
                #
                # vtx_list = ?? #hint: vtx_list is similar to cnt
                ##################################################
                vtx_list_tri = cv2.approxPolyDP(cnt, 0.2 * peri, True)
                vtx_list_squ = cv2.approxPolyDP(cnt, 0.1 * peri, True)
                vtx_list_cir = cv2.approxPolyDP(cnt, 0.2 * peri, True)
                #print(vtx_list)
                if obj_shape == "triangle":
                    tri_side_len = get_side_length(vtx_list_tri,
                                                   cv_depthimage2)
                    if tri_side_len is None:
                        continue
                    tri_area = tri_side_len**2 * math.sqrt(3) / 4
                    string = "%.3e" % tri_area
                    cv2.putText(cv_image, string, (x_str, y_str + 30), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

                elif obj_shape == "square":
                    squ_side_len = get_side_length(vtx_list_squ,
                                                   cv_depthimage2)
                    if squ_side_len is None:
                        continue
                    squ_area = squ_side_len**2
                    string = "%.3e" % squ_area
                    cv2.putText(cv_image, string, (x_str, y_str + 30), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

                elif obj_shape == "circle":
                    circle_radius = get_radius(vtx_list_cir, cv_depthimage2)
                    if circle_radius is None:
                        continue
                    circle_area = circle_radius**2 * math.pi
                    string = "%.3e" % circle_area
                    cv2.putText(cv_image, string, (x_str, y_str + 30), font, 1,
                                (0, 255, 255), 2, cv2.LINE_AA)

    img_result_pub.publish(
        cv_bridge.cv2_to_imgmsg(cv_image, encoding="passthrough"))
예제 #40
0
def main():
    # Begin capturing video. You can modify what video source to use with VideoCapture's argument. It's currently set
    # to be your webcam.
    
    # print device list
    dev_list = uvc.device_list()
    print(dev_list)

    worldCapture = uvc.Capture(dev_list[1]["uid"])
    eyeCapture = uvc.Capture(dev_list[0]["uid"])

    # Resolution
    width = 1280
    height = 720
    midX = int(width / 2)
    midY = int(height / 2)
    worldCapture.frame_mode = (width, height, 60)
    eyeCapture.frame_mode = (640, 480, 60)

    # Blink detection variables
    eyeState = 1
    eyeTime = None
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

    while True:
        # To quit this program press q.
        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break

        frame = worldCapture.get_frame_robust()

        # Eye tracking stuff per eyeFrame
        eyeFrame = eyeCapture.get_frame_robust()
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
        retval, thresholded = cv2.threshold(eyeFrame.gray, 40, 255, 0)
        cv2.imshow("threshold", thresholded)
        closed = cv2.erode(cv2.dilate(thresholded, kernel, iterations=1), kernel, iterations=1)
        cv2.imshow("closed", closed)
        thresholded, contours, hierarchy = cv2.findContours(closed, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        drawing = np.copy(eyeFrame.bgr)
        cv2.drawContours(drawing, contours, -1, (255, 0, 0), 2)
        i = 0
        # Calc and draw contours
        for contour in contours:
            area = cv2.contourArea(contour)
            if area < 100:
                continue
            bounding_box = cv2.boundingRect(contour)
            extend = area / (bounding_box[2] * bounding_box[3])
            # reject the contours with big extend
            if extend > 0.8:
                continue
            i += 1
            # calculate countour center and draw a dot there
            m = cv2.moments(contour)
            if m['m00'] != 0:
                center = (int(m['m10'] / m['m00']), int(m['m01'] / m['m00']))
                cv2.circle(drawing, center, 3, (0, 255, 0), -1)
            # fit an ellipse around the contour and draw it into the image
            try:
                ellipse = cv2.fitEllipse(contour)
                cv2.ellipse(drawing, box=ellipse, color=(0, 255, 0))
            except:
                pass
        if eyeState == 1 and i == 0:
            eyeState = 0
            eyeTime = time.time()
        elif eyeState == 0 and i == 1:
            eyeState = 1
            t = time.time() - eyeTime
            if t > 0.3:
                print("BLINK DETECTION!")
                sock.sendto('SELECT'.encode(), ('127.0.0.1', 8898))


        # show the frame
        cv2.imshow("Drawing", drawing)

        image_gray = np.array(frame.gray)
        image = np.array(frame.bgr)
        qr_codes = decode(image)
        
        # Draw the center
        cv2.rectangle(image,(midX - 1, midY - 1),(midX + 1, midY +1),(255, 255, 255),3)

        # Pupil tracking

        # QR Code processing
        if len(qr_codes) > 0:
            qrs = []
            for qr in qr_codes:
                tmp = qr.rect
                cv2.rectangle(image, (tmp.left, tmp.top), (tmp.left + tmp.width, tmp.top + tmp.height), (255, 0, 0), 3)
                
                # Draw the middle of the qr code
                qrX = tmp.left + int(tmp.width / 2)
                qrY = tmp.top + int(tmp.height / 2)
                cv2.rectangle(image, (qrX - 1, qrY - 1), (qrX + 1, qrY + 1), (255, 255, 255), 3)
                distance = round(math.hypot(qrX - midX, qrY - midY))
                print(qr)
                qrs.append((distance, qr))
            selected = sorted(qrs, key=lambda x: x[0])[0][1]
            print(selected.data)
            # Send color data of the selected qr code to helm.
            qr_values = str(selected.data).split(',')
            sock.sendto(qr_values[1].encode(), ('127.0.0.1', 8899))
            sock.sendto(qr_values[0].encode(), ('127.0.0.1', 8898))

            print(selected)
            cv2.rectangle(image, (selected.rect.left, selected.rect.top),
                          (selected.rect.left + selected.rect.width, selected.rect.top + selected.rect.height),
                          (0,255,0), 3)
        # Displays the current frame
        cv2.imshow('Current', image)
예제 #41
0
import numpy as np
import cv2

img = np.zeros((512, 512, 3), np.uint8)  # 绘制一个黑色图片画布

cv2.line(img, (0, 0), (512, 512), (255, 0, 0), 5)
cv2.rectangle(img, (100, 0), (510, 128), (255, 255, 0), 3)
cv2.circle(img, (447, 100), 20, (0, 255, 0), 5)
cv2.circle(img, (200, 100), 20, (0, 255, 0), -1)
cv2.ellipse(img, (200, 300), (80, 50), 0, 0, 180, 255, -1)

font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OpenCV', (10, 500), font, 3, (255, 255, 255), 2, cv2.LINE_AA)

cv2.imshow('draw_line', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #42
0
    def calcul_traitement(self, frame, thres):
        """ Amélioration de l'image par binarisation d'Otsu """
        # find contours in the binary image
        contours, hierarchy = cv2.findContours(thres, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        contours = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
        #print(contours)

        for c in contours:
            # permet de fit une ellipse sur toutes les formes identifiés sur l'image
            if len(c) < 5:
                break

            area = cv2.contourArea(c)
            if area <= 10:  # skip ellipses smaller then
                continue
            #cv2.drawContours(frame, [c], 0, (0,255,0), 3)
            # calculate moments for each contour
            M = cv2.moments(c)

            # calculate x,y coordinate of center
            if M["m00"] != 0:
                self.cX = int(M["m10"] / M["m00"])
                self.cY = int(M["m01"] / M["m00"])
            else:
                self.cX, self.cY = 0, 0

            #Fit une ellipse sur le(s) faisceau(x)
            self.ellipse = cv2.fitEllipse(c)
            #print('Ellipse : ', self.ellipse)

            #Fit un rectangle sur la zone d'intérêt pour la zoomer par la suite
            self.x, self.y, self.w, self.h = cv2.boundingRect(c)
            #rectangle = cv2.rectangle(frame,(self.x,self.y),(self.x+self.w,self.y+self.h),(0,175,175),1)
            #print('Rectangle : Position = ', self.x,',',self.y,'; Size = ',self.w,',',self.h)

        av_fond = self.fond(frame)  #sort la moyenne du fond

        av_img = self.img - av_fond  #retranche le fond de l'image
        img_indices = self.img - av_img < 0  #Vérifie que l'image sans fond n'a pas pixel inférieur à 0
        self.img[img_indices] = 0  #remplace les pixels inférieur à 0 par 0

        av_frame = np.array(frame - av_fond).astype(
            np.uint8
        )  #Retranche le fond de l'image et le mets en 8bits entier pour le transformer en couleur
        frame_indices = frame - av_frame < 0
        frame[frame_indices] = 0

        #Remet l'image en RGB pour y dessiner toutes les formes par la suite et en couleur
        frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

        #Dessine un cercle sur tous les blobs de l'image (formes blanches)
        cv2.circle(frame, (self.cX, self.cY), 2, (0, 0, 255), -1)

        #Dessiner l'ellipse
        thresh = cv2.ellipse(frame, self.ellipse, (0, 255, 0), 1)

        #Dessine les formes sur l'image
        cv2.line(frame, (self.cX, 0), (self.cX, frame.shape[0]), (255, 0, 0),
                 1)  #Dessine une croix sur le barycentre de l'image
        cv2.line(frame, (0, self.cY), (frame.shape[1], self.cY), (255, 0, 0),
                 1)

        #coupe l'image sur le ROI
        crop_img = self.crop(frame)
        self.crop_img = self.crop(self.img)

        return crop_img, self.ellipse, self.cX, self.cY
예제 #43
0
def inscribe_ellipse(result, ellipse, color, thickness):
    cv2.ellipse(result, ellipse, color, thickness)
예제 #44
0
def job2(frame_queue, b):
    print("open pose")

    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--input',
        help='Path to image or video. Skip to capture frames from camera')
    parser.add_argument('--thr',
                        default=0.2,
                        type=float,
                        help='Threshold value for pose parts heat map')
    parser.add_argument('--width',
                        default=368,
                        type=int,
                        help='Resize input to specific width.')
    parser.add_argument('--height',
                        default=368,
                        type=int,
                        help='Resize input to specific height.')

    args = parser.parse_args()

    BODY_PARTS = {
        "Nose": 0,
        "Neck": 1,
        "RShoulder": 2,
        "RElbow": 3,
        "RWrist": 4,
        "LShoulder": 5,
        "LElbow": 6,
        "LWrist": 7,
        "RHip": 8,
        "RKnee": 9,
        "RAnkle": 10,
        "LHip": 11,
        "LKnee": 12,
        "LAnkle": 13,
        "REye": 14,
        "LEye": 15,
        "REar": 16,
        "LEar": 17,
        "Background": 18
    }

    POSE_PAIRS = [["Neck", "RShoulder"], ["Neck", "LShoulder"],
                  ["RShoulder", "RElbow"], ["RElbow", "RWrist"],
                  ["LShoulder", "LElbow"], ["LElbow", "LWrist"],
                  ["Neck", "RHip"], ["RHip", "RKnee"], ["RKnee", "RAnkle"],
                  ["Neck", "LHip"], ["LHip", "LKnee"], ["LKnee", "LAnkle"],
                  ["Neck", "Nose"], ["Nose", "REye"], ["REye", "REar"],
                  ["Nose", "LEye"], ["LEye", "LEar"]]

    inWidth = args.width
    inHeight = args.height

    net = cv2.dnn.readNetFromTensorflow(".\pose\graph_opt.pb")

    while cv2.waitKey(1) < 0:

        if frame_queue.empty():
            cv2.waitKey(1)
            continue

        frame = frame_queue.get()
        print("job2222222222")

        frameWidth = frame.shape[1]
        frameHeight = frame.shape[0]

        net.setInput(
            cv2.dnn.blobFromImage(frame,
                                  1.0, (inWidth, inHeight),
                                  (127.5, 127.5, 127.5),
                                  swapRB=True,
                                  crop=False))
        out = net.forward()
        out = out[:, :
                  19, :, :]  # MobileNet output [1, 57, -1, -1], we only need the first 19 elements

        assert (len(BODY_PARTS) == out.shape[1])

        points = []
        for i in range(len(BODY_PARTS)):
            # Slice heatmap of corresponging body's part.
            heatMap = out[0, i, :, :]

            # Originally, we try to find all the local maximums. To simplify a sample
            # we just find a global one. However only a single pose at the same time
            # could be detected this way.
            _, conf, _, point = cv2.minMaxLoc(heatMap)
            x = (frameWidth * point[0]) / out.shape[3]
            y = (frameHeight * point[1]) / out.shape[2]
            # Add a point if it's confidence is higher than threshold.
            points.append((int(x), int(y)) if conf > args.thr else None)

        for pair in POSE_PAIRS:
            partFrom = pair[0]
            partTo = pair[1]
            assert (partFrom in BODY_PARTS)
            assert (partTo in BODY_PARTS)

            idFrom = BODY_PARTS[partFrom]
            idTo = BODY_PARTS[partTo]

            if points[idFrom] and points[idTo]:
                cv2.line(frame, points[idFrom], points[idTo], (0, 255, 0), 3)
                cv2.ellipse(frame, points[idFrom], (3, 3), 0, 0, 360,
                            (0, 0, 255), cv2.FILLED)
                cv2.ellipse(frame, points[idTo], (3, 3), 0, 0, 360,
                            (0, 0, 255), cv2.FILLED)

        t, _ = net.getPerfProfile()
        freq = cv2.getTickFrequency() / 1000
        cv2.putText(frame, '%.2fms' % (t / freq), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

        cv2.imshow('OpenPose using OpenCV', frame)

        key = cv2.waitKey(100) & 0xFF
        if key == ord('q'):
            break
예제 #45
0
import cv2
import numpy as np

img = np.zeros((480, 640, 3), np.uint8)
# cv2.line(img, (10, 20), (300, 400), (255, 136, 83), 5, 16)
# cv2.rectangle(img, (10, 10), (100, 100), (255, 136, 83), -1)
cv2.circle(img, (320, 240), 100, (255, 136, 83))
cv2.circle(img, (320, 240), 5, (255, 136, 83), -1, 16)
cv2.ellipse(img, (320, 240), (100, 50), 15, 0, 360, (255, 136, 83))
pts = np.array([(300, 10), (150, 100), (450, 100)], np.int32)
cv2.polylines(img, [pts], True, (255, 136, 83))
cv2.fillPoly(img, [pts], (255, 136, 83))
cv2.putText(img, "Hello OpenCV~", (10, 400), cv2.FONT_HERSHEY_PLAIN, 3,
            (255, 136, 83))

cv2.imshow('draw', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
예제 #46
0
# cv2.rectangle(img1, (384, 0), (510, 128), (0, 255, 0), 3)
# cv_show("img", img)

# 3. 画圆 cv2.circle()
# 指定圆心坐标和半径大小
cv2.circle(img, (447, 63), 63, (0, 0, 255), -1)
# # cv_show("img", img)

# 4. 画椭圆 cv2.ellipse()
# 输入几个参数
#     1. 中心点的位置坐标
#     2. 长轴和短轴的长度
#     3. 椭圆沿逆时针方向旋转的角度
#     4. 椭圆弧沿顺时针方向起始的角度和结束角度,如果是0 和 360,就是整个椭圆
# ellipse = cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 180, 255, -1)
cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 360, (0, 255, 0), -1,
            cv2.LINE_AA)
# # cv_show("img", img)

# 5. 画多边形 cv2.polylines(img, pts, isClosed, color[, thickness[, lineType[, shift]]])
# isClosed: 多边形是否闭合
pts = np.array([[10, 5], [20, 30], [70, 20], [50, 10]], np.int32)
pts = pts.reshape((-1, 1, 2))
# 这里reshape 的第一个参数,表明这一维的长度是根据后面的维度的计算出来的。
cv2.polylines(img, [pts], True, (0, 255, 0), 5, cv2.LINE_AA)

# 在图片上添加文字
# cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]])
#     img,图片
#     text,想要输出到图像上的的文本
#     org,文字的起始坐标(左下角为起点)
#     position,输出位置的坐标
예제 #47
0
    plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])
#plt.show()

# Contours ------------------------------------

contours, hierarchy = cv2.findContours(th1, 1, 2)
# print(len(contours))  # 266
for i in range(0, len(contours) - 1):
    cnt = contours[i]
    area = cv2.contourArea(cnt)
    if area > 500 and area < 2000:
        #M = cv2.moments(cnt)
        ellipse = cv2.fitEllipse(cnt)
        im = cv2.ellipse(img, ellipse, (0, 255, 0), 2)

# -------------------------------------
# Blob detector para th1

params = cv2.SimpleBlobDetector_Params()

# Change thresholds
#params.minThreshold = 50  # Darker (lightness como en HSL)
#params.maxThreshold = 100  # Lighter

# Filter by Area.
params.filterByArea = True
params.minArea = 500
params.maxArea = 3000
예제 #48
0
def get_blob_center(image, color, camera):
    """ Computes the center of the dot for a picture from camera 0.
        Metric to detect ellipses: Area of the enclosing ellipse minus area of the contour in ratio to the contour area.

        Args:
            image: Image from camera
            boundaries: Boundaries for the specified color for cv2.inrange

        Returns:
              center: X/Y coordinates of center of dot
    """

    # color values in RGB
    boundaries_red = ([0, 10, 10], [17, 255, 255])
    boundaries_red_1 = ([0, 10, 10], [30, 255, 255])

    #boundaries_blue = ([90, 50, 50], [120, 255, 255])
    boundaries_blue = ([100, 10, 10], [180, 255, 255])

    mask = 0
    img = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2HSV)

    if camera == 0:
        #delete noise in the upper part of the image for the top camera
        overlay = np.zeros((160, 640, 3), np.uint8)
        img[:overlay.shape[0], :overlay.shape[1]] = overlay

    #get the contours of the right color
    if color == "red":
        if camera == 0:
            lower_boundary = np.array(boundaries_red[0])
            upper_boundary = np.array(boundaries_red[1])
        else:
            lower_boundary = np.array(boundaries_red_1[0])
            upper_boundary = np.array(boundaries_red_1[1])

        mask = mask + cv2.inRange(img, lower_boundary, upper_boundary)

    if color == "blue":
        lower_boundary = np.array(boundaries_blue[0])
        upper_boundary = np.array(boundaries_blue[1])

        mask = cv2.inRange(img, lower_boundary, upper_boundary)

    _, cnts, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    if camera == 0:
        # area1 and area2 are the range of contour area
        area1 = 250
        area2 = 5000
        diff_best = area2 + 1
        max_diff = 4.5
        max_ellipse_stretch = 5
        best_ellipse = None
        center = -1

        #iterate through the contours
        for cnt in cnts:
            if area1 < cv2.contourArea(cnt) < area2:
                temp_ellipse = cv2.fitEllipse(cnt)
                (x, y), (MA, ma), angle = temp_ellipse
                # compute metrics
                area_ellipse = (np.pi * MA * ma)
                area_cnt = cv2.contourArea(cnt)
                diff = ((area_ellipse - cv2.contourArea(cnt)) / area_cnt)
                if min(MA, ma) != 0:
                    ellipse_stretch = (max(MA, ma) / min(MA, ma))
                else:
                    ellipse_stretch = max_ellipse_stretch + 1
                #check metrics
                if diff < diff_best and ellipse_stretch < max_ellipse_stretch and diff < max_diff:
                    diff_best = diff
                    best_ellipse = temp_ellipse

        if best_ellipse is not None:
            (x, y), (MA, ma), angle = best_ellipse
            center = (int(x), int(y))
            cv2.circle(img,
                       center,
                       1, (0, 255, 0),
                       thickness=1,
                       lineType=8,
                       shift=0)
            cv2.ellipse(img, best_ellipse, (255, 0, 0), 2, cv2.LINE_AA)
            cv2.imshow('ellipse', img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            cv2.imwrite("pic_found" + str(random.randint(0, 1000)) + ".png",
                        image)

            print("center:", center)
        else:
            cv2.imwrite(
                "pic_not_found" + str(random.randint(0, 1000)) + ".png", image)

            print("no dots found!")

    else:  #camera == 1
        # area1 and area2 are the range of contour area
        area1 = 200
        area2 = 200000
        max_cnt = None
        max_area = 0
        center = -1

        # iterate through the contours
        for cnt in cnts:
            if area1 < cv2.contourArea(cnt) < area2:
                if cv2.contourArea(cnt) > max_area:
                    max_cnt = cnt
                    max_area = cv2.contourArea(cnt)

        if max_cnt is not None:
            ((x, y), radius) = cv2.minEnclosingCircle(max_cnt)
            center = (int(x), int(y))
            cv2.circle(img,
                       center,
                       int(radius), (0, 255, 0),
                       thickness=1,
                       lineType=8,
                       shift=0)
            cv2.imshow('circle', img)
            cv2.waitKey(0)
            cv2.destroyAllWindows()

            cv2.imwrite("pic_found" + str(random.randint(0, 1000)) + ".png",
                        image)

            print("center:", center)
        else:
            cv2.imwrite(
                "pic_not_found" + str(random.randint(0, 1000)) + ".png", image)

            print("no dots found!")

    #cv2.imwrite("pic"+str(random.randint(0,1000))+".png", image)
    return center
예제 #49
0
파일: base.py 프로젝트: lxy5513/python
#create a block image
img = np.zeros((512, 512, 3), np.uint8)

# 画椭圆
'''
params 
     image
     center location (x,y). 
     axes lengths (major axis length, minor axis length).
     angle is the angle of rotation of ellipse in anti-clockwise direction. 
     startAngle and endAngle denotes the starting and ending of ellipse arc measured in clockwise direction from major axis
     color 
     thickness
            '''
cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 360, 255, -1)
plt.imshow(img)
plt.show()

# In[11]:

# Create a black image
img = np.zeros((512, 512, 3), np.uint8)

# Draw a diagonal blue line with thickness of 5 px
img = cv2.line(img, (0, 0), (511, 511), (255, 0, 0), 5)

#Drawing Rectangle
img = cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)

# Drawing Circle
예제 #50
0
Date: Jan 24
Title: openCV
Purpose: editing pictures
@author: CTL
"""

import numpy as np
import cv2

img = np.zeros((1000, 1777, 3),
               np.uint8)  #This line is to create a black image
cv2.line(img, (0, 0), (511, 511), (255, 0, 0), 5)  #This is to drwa a blue line
cv2.rectangle(img, (900, 0), (343, 656), (0, 255, 0),
              3)  #This line is to draw a rectangle
cv2.circle(img, (447, 63), 63, (0, 0, 255), -1)  #This line is to draw a circle
cv2.ellipse(img, (256, 256), (100, 50), 0, 0, 180, 255,
            -1)  #This line is to draw a ellipse
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'shutDown', (10, 500), font, 4, (255, 255, 255), 2,
            cv2.LINE_AA)  #This line is to put a text

#This line is to show the image
#cv2.imshow('image',image)
'''
#This line is to save the image
cv2.inwrite('black.png',img)
'''

img1 = cv2.imread('black.png')
img2 = cv2.imread('glassess.png')

#This is to blend two pictures from 0% to 100%
예제 #51
0
import cv2 as cv
# 哈尔级联人脸定位器
fd = cv.CascadeClassifier('haar/face.xml')
ed = cv.CascadeClassifier('haar/eye.xml')
nd = cv.CascadeClassifier('haar/nose.xml')

vc = cv.VideoCapture(0)
while True:
    frame = vc.read()[1]
    faces = fd.detectMultiScale(frame, 1.3, 5)
    for l, t, w, h in faces:
        a, b = int(w / 2), int(h / 2)
        cv.ellipse(frame, (l + a, t + b), (a, b), 0, 0, 360, (255, 0, 255), 2)
        # 把face裁出来,然后识别眼和鼻子的位置
        face = frame[t:t + h, l:l + w]
        eyes = ed.detectMultiScale(face, 1.3, 5)
        for l, t, w, h in eyes:
            a, b = int(w / 2), int(h / 2)
            cv.ellipse(face, (l + a, t + b), (a, b), 0, 0, 360, (0, 255, 0), 2)
        noses = nd.detectMultiScale(face, 1.3, 5)
        for l, t, w, h in noses:
            a, b = int(w / 2), int(h / 2)
            cv.ellipse(face, (l + a, t + b), (a, b), 0, 0, 360, (0, 255, 255),
                       2)
    cv.imshow('VideoCapture', frame)
    if cv.waitKey(33) == 27:
        break

vc.release()
cv.destroyAllWindows()
예제 #52
0
def evaluate_droplet(img,
                     y_base,
                     mask: Tuple[int, int, int, int] = None) -> Droplet:
    """ 
    Analyze an image for a droplet and determine the contact angles

    :param img: the image to be evaluated as np.ndarray
    :param y_base: the y coordinate of the surface the droplet sits on
    :returns: a Droplet() object with all the informations
    """
    drplt = Droplet()
    # crop img from baseline down (contains no useful information)
    crop_img = img[:y_base, :]
    shape = img.shape
    height = shape[0]
    width = shape[1]
    if USE_GPU:
        crop_img = cv2.UMat(crop_img)
    # calculate thrresholds
    thresh_high, thresh_im = cv2.threshold(img, 0, 255,
                                           cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    thresh_low = 0.5 * thresh_high
    # thresh_high = 179
    # thresh_low = 76

    # values only for 8bit images!

    # apply canny filter to image
    # FIXME adjust canny params, detect too much edges
    bw_edges = cv2.Canny(crop_img, thresh_low, thresh_high)

    # block detection of syringe
    if (not mask is None):
        x, y, w, h = mask
        bw_edges[:, x:x + w] = 0
        #img[:, x:x+w] = 0
        masked = True
    else:
        masked = False

    edge = find_contour(bw_edges, masked)

    if USE_GPU:
        # fetch contours from gpu memory
        # cntrs = [cv2.UMat.get(c) for c in contours]
        edge = cv2.UMat.get(edge)
        if DEBUG & DBG_SHOW_CONTOURS:
            # img = cv2.drawContours(img,cntrs,-1,(100,100,255),2)
            img = cv2.drawContours(img, edge, -1, (255, 0, 0), 2)

    # apply ellipse fitting algorithm to droplet
    (x0, y0), (maj_ax, min_ax), phi_deg = cv2.fitEllipse(edge)
    phi = radians(phi_deg)
    a = maj_ax / 2
    b = min_ax / 2

    # diesen fit vllt zum laufen bringen https://scikit-image.org/docs/0.15.x/api/skimage.measure.html
    #points = edge.reshape(-1,2)
    #points[:,[0,1]] = points[:,[1,0]]
    # ell = EllipseModel()
    # if not ell.estimate(points): raise RuntimeError('Couldn\'t fit ellipse')
    # x0, y0, a, b, phi = ell.params
    # maj_ax = 2*a
    # min_ax = 2*b
    # phi_deg = degrees(phi)

    if DEBUG & DBG_DRAW_ELLIPSE:
        img = cv2.ellipse(img, (int(round(x0)), int(round(y0))),
                          (int(round(a)), int(round(b))),
                          int(round(phi * 180 / pi)),
                          0,
                          360, (255, 0, 255),
                          thickness=1,
                          lineType=cv2.LINE_AA)
        #img = cv2.ellipse(img, (int(round(x0)),int(round(y0))), (int(round(a)),int(round(b))), 0, 0, 360, (0,0,255), thickness=1, lineType=cv2.LINE_AA)

    # calculate intersections of ellipse with baseline
    intersection = calc_intersection_line_ellipse((x0, y0, a, b, phi),
                                                  (0, y_base))

    if intersection is None or not isinstance(intersection, list):
        raise ContourError('No valid intersections found')
    x_int_l = min(intersection)
    x_int_r = max(intersection)

    foc_len = sqrt(abs(a**2 - b**2))

    # calc slope and angle of tangent at intersections
    m_t_l = calc_slope_of_ellipse((x0, y0, a, b, phi), x_int_l, y_base)
    m_t_r = calc_slope_of_ellipse((x0, y0, a, b, phi), x_int_r, y_base)

    # calc angle from inclination of tangents
    angle_l = (pi - atan2(m_t_l, 1)) % pi
    angle_r = (atan2(m_t_r, 1) + pi) % pi

    # calc area of droplet
    area = calc_area_of_droplet((x_int_l, x_int_r), (x0, y0, a, b, phi),
                                y_base)

    # calc height of droplet
    drplt_height = calc_height_of_droplet((x0, y0, a, b, phi), y_base)

    # write values to droplet object
    drplt.angle_l = degrees(angle_l)
    drplt.angle_r = degrees(angle_r)
    drplt.maj = maj_ax
    drplt.min = min_ax
    drplt.center = (x0, y0)
    drplt.phi = phi
    drplt.tilt_deg = phi_deg
    drplt.tan_l_m = m_t_l
    drplt.tan_r_m = m_t_r
    drplt.line_l = (x_int_l - y_base / m_t_l, 0,
                    x_int_l + (height - y_base) / m_t_l, height)
    drplt.line_r = (x_int_r - y_base / m_t_r, 0,
                    x_int_r + (height - y_base) / m_t_r, height)
    drplt.int_l = (x_int_l, y_base)
    drplt.int_r = (x_int_r, y_base)
    drplt.foc_pt1 = (x0 + foc_len * cos(phi), y0 + foc_len * sin(phi))
    drplt.foc_pt2 = (x0 - foc_len * cos(phi), y0 - foc_len * sin(phi))
    drplt.base_diam = x_int_r - x_int_l
    drplt.area = area
    drplt.height = drplt_height
    drplt.is_valid = True

    if DEBUG & DBG_DRAW_TAN_ANGLE:
        # painting
        y_int = int(round(y_base))
        img = cv2.line(img, (int(round(x_int_l - (y_int / m_t_l))), 0), (int(
            round(x_int_l + ((height - y_int) / m_t_l))), int(round(height))),
                       (255, 0, 255),
                       thickness=1,
                       lineType=cv2.LINE_AA)
        img = cv2.line(img, (int(round(x_int_r - (y_int / m_t_r))), 0), (int(
            round(x_int_r + ((height - y_int) / m_t_r))), int(round(height))),
                       (255, 0, 255),
                       thickness=1,
                       lineType=cv2.LINE_AA)
        img = cv2.ellipse(img, (int(round(x_int_l)), y_int), (20, 20),
                          0,
                          0,
                          -int(round(angle_l * 180 / pi)), (255, 0, 255),
                          thickness=1,
                          lineType=cv2.LINE_AA)
        img = cv2.ellipse(img, (int(round(x_int_r)), y_int), (20, 20),
                          0,
                          180,
                          180 + int(round(angle_r * 180 / pi)), (255, 0, 255),
                          thickness=1,
                          lineType=cv2.LINE_AA)
        img = cv2.line(img, (0, y_int), (width, y_int), (255, 0, 0),
                       thickness=2,
                       lineType=cv2.LINE_AA)
        img = cv2.putText(img, '<' + str(round(angle_l * 180 / pi, 1)),
                          (5, y_int - 5), cv2.FONT_HERSHEY_COMPLEX, .5,
                          (0, 0, 0))
        img = cv2.putText(img, '<' + str(round(angle_r * 180 / pi, 1)),
                          (width - 80, y_int - 5), cv2.FONT_HERSHEY_COMPLEX,
                          .5, (0, 0, 0))
예제 #53
0
import cv2


img = np.zeros((512,512,3), np.uint8)
# Draw a diagonal blue line with thickness of 5px
img = cv2.line(img,(0,0),(511,511),(255,0,0),5)  # First parameter is the image, then come the starting and ending
                                                # points followed by BGR color scheme and line thickness

# Drawing a rectangle
img = cv2.rectangle(img,(384,0),(510,128),(0,255,0),3)

# Drawing a circle
img = cv2.circle(img,(447,63),63,(0,0,255),-1)

# Drawing ellipse

img = cv2.ellipse(img,(256,256),(100,50),30,0,180,(50,255,100),-1)

# Drawing a polygon
pts = np.array([[10,5],[20,30],[70,20],[50,10]],np.int32)
pts = pts.reshape((-1,1,2))
img = cv2.polylines(img,[pts],False,(0,255,255))

# Adding text to images
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'OpenCV',(10,500),font,4,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('OpenCV',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

예제 #54
0
파일: draw2.py 프로젝트: mathhat/unik4690
def draw_torso(npimg, Centers, col):
    #i = 0
    torso = []
    tryvar = lambda varpos: Centers[varpos] if varpos in Centers.keys(
    ) else None
    lshould = tryvar(5)
    rshould = tryvar(2)
    neck = tryvar(1)
    minus = -1
    angle = 0
    groin = []

    if lshould and rshould and neck:
        dx = (lshould[0] - rshould[0])
        dy = (lshould[1] - rshould[1])
        d = int(np.sqrt(dx * dx + dy * dy))
        if dx and dy:
            angle = np.arctan(dy * 1. / dx) * 180 / np.pi
        cv2.ellipse(npimg, (neck[0], (lshould[1] + rshould[1]) / 2),
                    (d / 2, int(d * 1.4)), angle, 0, 180, col, -20)
        cv2.ellipse(npimg, (neck[0] - dy,
                            (lshould[1] + rshould[1]) / 2 + int(d * 1.4)),
                    (d / 2, int(d * 1.5)), angle, 180, 360, col, -20)

        groin.append([lshould[0] - int(dy * 1.4), lshould[1] + int(dx * 1.4)])
        groin.append([groin[0][0] - dx, groin[0][1] - dy])
        groin.append([groin[1][0] - dy, groin[1][1] + dx])
        groin.append([(groin[2][0] + groin[0][0] - dy) / 2,
                      (groin[0][1] + dx + groin[0][1]) / 2])
        groin.append([groin[0][0] - dy, groin[0][1] + dx])

        groin = np.asarray(groin)
        groin = groin.reshape((-1, 1, 2), )
        cv2.fillPoly(npimg, [groin], col)

        #cv2.line(npimg,(groin[0][0],groin[0][1]) ,(groin[1][0],groin[1][1]),[0,255,255],d/6)
        #cv2.line(npimg,(groin[2][0],groin[2][1]) ,(groin[3][0],groin[3][1]),[0,255,255],d/6)

        torso.append([lshould[0], lshould[1]])
        torso.append([rshould[0], rshould[1]])
        torso.append([neck[0] + dy / 2, neck[1] - int(dx * 0.35)])
        torso = np.asarray(torso)
        torso = torso.reshape((-1, 1, 2), )

        cv2.fillPoly(npimg, [torso], col)
        cv2.line(npimg, (rshould[0] - dy / 15, rshould[1] + dx / 15),
                 (lshould[0] - dy / 15, lshould[1] + dx / 15), col, d / 6)
        return npimg

    elif neck and lshould:
        should = lshould
        minus = 1
    elif neck and rshould:
        should = rshould
    else:
        return npimg
    #this code runs if only one shoulder is observed
    dx = (should[0] - neck[0]) * 2
    dy = (should[1] - neck[1]) * 2
    d = int(np.sqrt(dx * dx + dy * dy))
    if dx and dy:
        angle = np.arctan(dy * 1. / dx) * 180 / np.pi
    cv2.ellipse(npimg, (neck[0], neck[1]), (d / 2, int(d * 1.5)), angle, 0,
                180, col, -1)
    cv2.ellipse(npimg, (neck[0] - dy, neck[1] + int(d * 1.5)),
                (d / 2, int(d * 1.5)), angle, 180, 360, col, -1)

    torso.append([should[0], should[1]])
    torso.append([should[0] - dx, should[1] - dy])
    torso.append([neck[0] + dy / 2 * minus, neck[1] - int(dx * 0.35) * minus])

    torso = np.asarray(torso)
    torso = torso.reshape((-1, 1, 2), )
    cv2.fillPoly(npimg, [torso], col)
    cv2.line(npimg, (should[0], should[1]), (should[0] - dx, should[1] - dy),
             col, d / 7)

    return npimg
예제 #55
0
cap = cv2.VideoCapture(0)

while (True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    #Use face detection to find  locations of faces in frame
    faces = face_cascade.detectMultiScale(frame,
                                          scaleFactor=1.2,
                                          minSize=(20, 20))
    for (x, y, w, h) in faces:
        frame[y:y + h, x:x + w, :] = cv2.dilate(frame[y:y + h, x:x + w, :],
                                                kernel)  #blurs face
        #draw weird looking face over blurred face
        cv2.circle(frame, (x + w / 3, y + h / 3), h / 12, (255, 255, 255), -1,
                   8)
        cv2.circle(frame, (x + 2 * w / 3, y + h / 3), h / 12, (255, 255, 255),
                   -1, 8)
        cv2.circle(frame, (x + w / 3, y + h / 3), h / 24, (255, 150, 0), -1, 8)
        cv2.circle(frame, (x + 2 * w / 3, y + h / 3), (h / 24), (255, 150, 0),
                   -1, 8)
        cv2.ellipse(frame, (x + w / 2, y + 3 * h / 4), (w / 6, h / 12), 0, 0,
                    180, (0, 0, 255), 8)

    # Display the resulting frame
    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
예제 #56
0
파일: draw2.py 프로젝트: mathhat/unik4690
def draw_head(npimg, Centers, col, bol, k=[0]):
    tryvar = lambda varpos: Centers[varpos] if varpos in Centers.keys(
    ) else None
    lear = tryvar(17)
    rear = tryvar(16)
    leye = tryvar(15)
    reye = tryvar(14)
    nose = tryvar(0)
    neck = tryvar(1)

    if bol:
        k1, k2, k6 = read.k1()
        k3, k4, k5, k7, k8 = read.k2()
    else:
        k1, k2, k3, k4, k5, k6, k7, k8 = k

    if (neck and nose):

        if (lear and rear):  #head circle if both ears are present
            lx = lear[0]
            rx = rear[0]
            hx = (lx + rx) / 2  #Nope
            angle = 0
            dy = lear[1] - rear[1]
            dx = float(lx - rx)
            if dy and dx:
                taneyes = dy / dx  #Bruker heller tangens her for å få en relativ vinkel.
                angle = np.arctan(taneyes) * 180 / np.pi

            # Plasser etterpå "midpunktet" "over" basert på vinkel.
            dx = int(np.linalg.norm(np.array(lear) - np.array(rear)))
            #^relation for headsize, distance between ears
            hy = int((rear[1] + lear[1]) / 2)  #-dx/k2) #nope  #fight me
            hy2 = hy - int(dx / 2.5)
            #limblen= int(dx/4)+1
            #l =8.*dx/(abs(nose[1]-neck[1])+1)
            #cv2.circle(npimg, (hx,hy), int(abs(dx*k1)), col, thickness=-limblen, lineType=8, shift=0)
            cv2.ellipse(npimg, (hx, hy),
                        (int(dx * k1 / 1.3), int(dx * k1 * 1.5)), angle, 180,
                        360, col, -1)
            cv2.ellipse(npimg, (hx + int(angle), hy2),
                        (int(dx * k1 / 1.2), int(dx * k1 / 1.3)), angle, 0,
                        360, col, -1)
            # Addition; jawline from helper funct.
            eye_jaw(npimg, [leye, reye], [False, False], col)
        #head pointing left (left eye hidden)
        elif rear and leye:  #head circle if right ear is present + faceline
            rx = rear[0]
            dx = leye[0] - rx  #/10+1 #dist between ear n eye
            dy = (leye[1] - rear[1])
            angle = 0
            if dy and dx:
                tan = dy / float(dx)
                angle = np.arctan(tan) * 180 / np.pi
            dx = int(np.linalg.norm(np.array(rear) - np.array(leye)))

            hx = int(rx + dx / 10. * 4 * k7 * 0.95)  #- dx*20 / (rx-leye[0]))
            #Jacob's magic circle coord (next to ear) ?? wtf. fight me
            hy = int((rear[1] + leye[1]) / 2 + dx / 10. * k3 * 1.6)
            hy2 = hy - dx / 10

            #limblen = int((dx+abs(rx-leye[0])/2)*0.4)+1
            #cv2.circle(npimg, (hx,hy), int(abs(nose[0]-rx)*k4), col, thickness=-limblen, lineType=8, shift=0)
            cv2.ellipse(npimg, (hx, hy),
                        (int(dx * k4 * 0.85), int(dx * k4 * 0.73)),
                        -angle - 20, 0, 360, col, -1)
            #cv2.ellipse(npimg,(hx,hy2),(int(abs(nose[0]-rx)*k4*0.85),int(abs(nose[0]-rx)*k4*0.65))  ,-20,0,360,col,-1)
            eye_jaw(npimg, [leye, reye], [False, rear], col)
        elif lear and reye:  #head circle if left ear is present + faceline
            lx = lear[0]
            reyex = reye[0]
            dx = (lx - reyex)  #/10+1
            dy = (lear[1] - reye[1])
            angle = 0
            if dy and dx:
                tan = dy / float(dx)
                angle = np.arctan(tan) * 180 / np.pi
            dx = int(np.linalg.norm(np.array(lear) - np.array(reye)))
            hx = int(lx - dx / 10. * 4 * k7 *
                     0.9)  #+ dx*20. /(lx-reyex))#circle center, next to ear
            hy = int((lear[1] + reye[1]) / 2 +
                     dx / 10. * k3 * 1.6)  #circle center, slightly above ear
            hy2 = hy - dx / 10
            #limblen = int((abs(lx-reyex)/2+dx)*k5)
            #cv2.circle(npimg, (hx,hy), int(abs(nose[0]-lx)*k4), col, thickness=-limblen, lineType=8, shift=0)
            cv2.ellipse(npimg, (hx, hy),
                        (int(dx * k4 * 0.85), int(dx * k4 * 0.73)),
                        -angle + 20, 0, 360, col, -1)
            #cv2.ellipse(npimg,(hx,hy2),(int(abs(nose[0]-lx)*k4*0.85),int(abs(nose[0]-lx)*k4*0.65))  ,20,0,360,col,-1)
            eye_jaw(npimg, [leye, reye], [lear, False], col)

    return npimg
예제 #57
0
                # applying kalman filter on image position data
                kalman_filter.input_latest_noisy_measurement(image_position)
                posteri_estimate_graph.append(
                    kalman_filter.get_latest_estimated_measurement())
                # separating estimated values for easier plotting
                estimation = np.zeros(shape=(len(posteri_estimate_graph), 2))
                for i in range(0, len(posteri_estimate_graph)):
                    temp2 = posteri_estimate_graph[i]
                    estimation[i, 0] = temp2[0]
                    estimation[i, 1] = temp2[1]
                # video frame size

                height, width = image.shape[:2]

                opencv.ellipse(image, (int(estimation[-1, 0] * width),
                                       int(estimation[-1, 1] * height + 15)),
                               (70, 90), -180, 0, 360, (255, 0, 0), 2)
                # plotting trajectory points
                for j in range(2, len(estimation)):
                    opencv.circle(image, (int(estimation[j, 0] * width),
                                          int(estimation[j, 1] * height + 15)),
                                  5, (0, 0, 255), -1)

            opencv.putText(image, "Object", (10, 70),
                           opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
            opencv.putText(image, "tracking", (10, 140),
                           opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)

            #opencv.putText(image, "Object tracking", (100, 100), opencv.FONT_HERSHEY_DUPLEX, 2.0, (0, 0, 255))

            opencv.imshow("Robot camera feed", image)