コード例 #1
0
ファイル: dieprofile.py プロジェクト: dcmouser/dicer
    def applyForegroundExtractionMask_Square(self, img, mask, centroid, rotatedRect, marginAdjust):
        imgAnd = dicerfuncs.makeBinaryImageMaskForImg(mask)
        color = 255
        rotatedRect2 = (rotatedRect[0], (rotatedRect[1][0] * marginAdjust, rotatedRect[1][1] * marginAdjust), rotatedRect[2])
        boxpoints = cv2.boxPoints(rotatedRect2)
        boxpoints = boxpoints.astype(int)
        cv2.fillConvexPoly(imgAnd, boxpoints, color)
        mask = cv2.bitwise_and(imgAnd, mask)

        # mask crop image itself
        if (True):
            imgAnd = dicerfuncs.makeBinaryImageMaskForImg(mask)
            rotatedRect2 = (rotatedRect[0], (rotatedRect[1][0] * marginAdjust + 2, rotatedRect[1][1] * marginAdjust + 2), rotatedRect[2])
            boxpoints = cv2.boxPoints(rotatedRect2)
            boxpoints = boxpoints.astype(int)
            cv2.fillConvexPoly(imgAnd, boxpoints, color)
            img = cv2.bitwise_and(imgAnd, img)
            # actually crop? Note:this causes problems i think because of size changes to shape params
            if (False):
                # now lets find where we can crop
                x, y, w, h = cv2.boundingRect(boxpoints)
                img = img[y:y + h, x:x + w]
                mask = mask[y:y + h, x:x + w]

        return (img, mask)
コード例 #2
0
    def test_calc_hist_in_area(self):
        ip = Image_operations()

        p0 = (0, 0)
        p1 = (10, 10)
        p2 = (8, 2)
        poly = [p0, p1, p2]

        img_black = np.zeros((20, 20, 3))
        cv2.fillConvexPoly(img_black, np.array(poly, 'int32'), (0, 0, 0))

        color = (255, 255, 255)
        img_color = np.zeros((20, 20, 3))
        cv2.fillConvexPoly(img_color, np.array(poly, 'int32'), color)

        hist_black = ip.calc_hist_in_area(img_black, poly)
        hist_color = ip.calc_hist_in_area(img_color, poly)

        match = ip.compare_hists(hist_black, hist_color)
        print match

        match = ip.compare_hists(hist_color, hist_color)
        print match

        match = ip.compare_hists(hist_black, hist_black)
        print match
コード例 #3
0
ファイル: faceAverage.py プロジェクト: vovanmozg/average-face
def warpTriangle(img1, img2, t1, t2) :

    # Find bounding rectangle for each triangle
    r1 = cv2.boundingRect(np.float32([t1]))
    r2 = cv2.boundingRect(np.float32([t2]))

    # Offset points by left top corner of the respective rectangles
    t1Rect = [] 
    t2Rect = []
    t2RectInt = []

    for i in xrange(0, 3):
        t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
        t2Rect.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))
        t2RectInt.append(((t2[i][0] - r2[0]),(t2[i][1] - r2[1])))


    # Get mask by filling triangle
    mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)
    cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0);

    # Apply warpImage to small rectangular patches
    img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
    
    size = (r2[2], r2[3])

    img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
    
    img2Rect = img2Rect * mask

    # Copy triangular region of the rectangular patch to the output image
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )
     
    img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = img2[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Rect
コード例 #4
0
ファイル: shape.py プロジェクト: AlexSchwank/CV_Game
 def generate_img_by_polygon(self, poly, fill=False):
     if fill:
         cv2.fillConvexPoly(self.img, np.array(poly, 'int32'), self.color)
     else:
         if self.triangle is not None:
             cv2.fillConvexPoly(self.img, np.array(self.triangle, 'int32'), self.triangle_color)
         cv2.drawContours(self.img, np.array([poly], 'int32'), 0, self.color, thickness=3)
コード例 #5
0
def __compare_segmentations(img_a, img_b):
    # image, contours, hierarchy = cv.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) [opencv3]
    # contours, hierarchy = cv.findContours(image, mode, method[, contours[, hierarchy[, offset]]]) [opencv2]
    out_img_a, ct_img_a, h_a = cv.findContours(img_a, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
    out_img_b, ct_img_b, h_b = cv.findContours(img_b, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)

    error = []

    height, width = img_a.shape[:2]

    for comp_a in ct_img_a:

        mask_a = np.zeros((height, width), np.uint8)
        mask_b = np.zeros((height, width), np.uint8)

        first_point = comp_a[0][0]

        cv.fillConvexPoly(mask_a, comp_a, (255))

        for comp_b in ct_img_b:
            if cv.pointPolygonTest(comp_b, (first_point[0], first_point[1]), False) >= 0:
                cv.fillConvexPoly(mask_b, comp_b, (50))
                break

        set_difference = mask_a - mask_b
        set_difference_count = (set_difference == 255).sum()

        error.append(set_difference_count)

    return error
def draw_hand(full_img, joint_coords, is_loss_track):
    if is_loss_track:
        joint_coords = FLAGS.default_hand

    # Plot joints
    for joint_num in range(FLAGS.num_of_joints):
        color_code_num = (joint_num // 4)
        if joint_num in [0, 4, 8, 12, 16]:
            joint_color = list(map(lambda x: x + 35 * (joint_num % 4), FLAGS.joint_color_code[color_code_num]))
            cv2.circle(full_img, center=(int(joint_coords[joint_num][1]), int(joint_coords[joint_num][0])), radius=3,
                       color=joint_color, thickness=-1)
        else:
            joint_color = list(map(lambda x: x + 35 * (joint_num % 4), FLAGS.joint_color_code[color_code_num]))
            cv2.circle(full_img, center=(int(joint_coords[joint_num][1]), int(joint_coords[joint_num][0])), radius=3,
                       color=joint_color, thickness=-1)

    # Plot limbs
    for limb_num in range(len(FLAGS.limbs)):
        x1 = int(joint_coords[int(FLAGS.limbs[limb_num][0])][0])
        y1 = int(joint_coords[int(FLAGS.limbs[limb_num][0])][1])
        x2 = int(joint_coords[int(FLAGS.limbs[limb_num][1])][0])
        y2 = int(joint_coords[int(FLAGS.limbs[limb_num][1])][1])
        length = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
        if length < 150 and length > 5:
            deg = math.degrees(math.atan2(x1 - x2, y1 - y2))
            polygon = cv2.ellipse2Poly((int((y1 + y2) / 2), int((x1 + x2) / 2)),
                                       (int(length / 2), 3),
                                       int(deg),
                                       0, 360, 1)
            color_code_num = limb_num // 4
            limb_color = list(map(lambda x: x + 35 * (limb_num % 4), FLAGS.joint_color_code[color_code_num]))
            cv2.fillConvexPoly(full_img, polygon, color=limb_color)
コード例 #7
0
def morphTriangle(img1, img2, img, t1, t2, t, alpha):
    # Find bounding rectangle for each triangle
    r1 = cv2.boundingRect(np.float32([t1]))
    r2 = cv2.boundingRect(np.float32([t2]))
    r = cv2.boundingRect(np.float32([t]))

    # Offset points by left top corner of the respective rectangles
    t1Rect = []
    t2Rect = []
    tRect = []

    for i in range(0, 3):
        tRect.append(((t[i][0] - r[0]), (t[i][1] - r[1])))
        t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
        t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))

    # Get mask by filling triangle
    mask = np.zeros((r[3], r[2], 3), dtype=np.float32)
    cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0);

    # Apply warpImage to small rectangular patches
    img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
    img2Rect = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]

    size = (r[2], r[3])
    warpImage1 = applyAffineTransform(img1Rect, t1Rect, tRect, size)
    warpImage2 = applyAffineTransform(img2Rect, t2Rect, tRect, size)

    # Alpha blend rectangular patches
    imgRect = (1.0 - alpha) * warpImage1 + alpha * warpImage2

    # Copy triangular region of the rectangular patch to the output image
    img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]] * (1 - mask) + imgRect * mask
コード例 #8
0
ファイル: flux_to_mc.py プロジェクト: Mipanox/Astroph
    def _sm(self):
    ###--sum of pixel values--###
        if self.header["naxis"] == 3: da = self.data
        else                        : da = self.data[0]

        if self.vr:
            vii = int(self.header["crpix3"]+ \
                     (self.vr[0]-self.header["crval3"]/1000.) \
                      /(self.header["cdelt3"]/1000.))-1
            vff = int(self.header["crpix3"]+ \
                     (self.vr[1]-self.header["crval3"]/1000.) \
                      /(self.header["cdelt3"]/1000.))-1
            rg = range(vii,vff)
        else: rg = range(da.shape[-3])

        fx = []
        if self.pg and len(self.pg) > 1:
        #mask of the polygon
            mk = np.zeros((da.shape[-2],da.shape[-1]))
            cv2.fillConvexPoly(mk, np.array(self.pg), 1)
            
            mk  = mk.astype(np.bool)
            out = np.zeros_like(da[0])

            for zz in rg:
                out[mk] = da[zz][mk]
                fx.append(np.sum(out))
        else:
            for zz in rg:
                fx.append(np.sum(da[zz]))
        return np.array(fx)
コード例 #9
0
ファイル: Convert_Masked.py プロジェクト: Nioy/faceswap
    def get_image_mask(self, image, new_face, landmarks, mat, image_size):

        face_mask = numpy.zeros(image.shape,dtype=float)
        if 'rect' in self.mask_type:
            face_src = numpy.ones(new_face.shape,dtype=float)
            cv2.warpAffine( face_src, mat, image_size, face_mask, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

        hull_mask = numpy.zeros(image.shape,dtype=float)
        if 'hull' in self.mask_type:
            hull = cv2.convexHull( numpy.array( landmarks ).reshape((-1,2)).astype(int) ).flatten().reshape( (-1,2) )
            cv2.fillConvexPoly( hull_mask,hull,(1,1,1) )

        if self.mask_type == 'rect':
            image_mask = face_mask
        elif self.mask_type == 'facehull':
            image_mask = hull_mask
        else:
            image_mask = ((face_mask*hull_mask))


        if self.erosion_kernel is not None:
            if self.erosion_kernel_size > 0:
                image_mask = cv2.erode(image_mask,self.erosion_kernel,iterations = 1)
            elif self.erosion_kernel_size < 0:
                dilation_kernel = abs(self.erosion_kernel)
                image_mask = cv2.dilate(image_mask,dilation_kernel,iterations = 1)

        if self.blur_size!=0:
            image_mask = cv2.blur(image_mask,(self.blur_size,self.blur_size))

        return image_mask
コード例 #10
0
	def image_callback(self, image_in):
		# Import and convert
		image_cv = self.bridge.imgmsg_to_cv(image_in, 'bgr8')
		image_cv2 = numpy.array(image_cv, dtype=numpy.uint8)
		image_hsv = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2HSV)
		image_hsv = cv2.blur(image_hsv, (5, 5))

		# Make binary image of pinkness
		lowerb = numpy.array((50, 100,100))
		upperb = numpy.array((80,255, 255))
		# lowerb = numpy.array((130, 50, 0))
		# upperb = numpy.array((175, 255, 255))
		# is_pink = cv2.inRange(image_hsv, numpy.array((130, 50, 0)), numpy.array((175, 255, 255)))


		# Make binary image of pinkness
		# is_pink = cv2.inRange(image_hsv, numpy.array((50, 92, 50)), numpy.array((132, 231, 187)))
		is_green = cv2.inRange(image_hsv, lowerb, upperb)

		green = copy.deepcopy(image_cv2)
		for dim in range(3): green[:, :, dim] *= is_green / 255
		green_avg = numpy.sum(numpy.sum(green, 0), 0) / numpy.sum(numpy.sum(is_green / 255, 0), 0)
		green_avg = tuple([int(green_avg[0]), int(green_avg[1]), int(green_avg[2])])
		# print green_avg
		#code.interact(local=locals())
		# Manipulate binary image
		contours, hierarchy = cv2.findContours(is_green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
		#code.interact(local=locals())
		#print contours
		print contours
		max_area = 0
		for index, contour in enumerate(contours):
			area = cv2.contourArea(contour)
			if area > max_area:
				max_area = area
				best_index = index

		try:
			best_contour = contours[best_index]
			# rospy.loginfo('Best contour was contour #{0} with area of {1}'.format(best_index, max_area))
			cv2.fillConvexPoly(image_cv2, best_contour, -1)
			# cv2.drawContours(image_cv2, contours, best_index, green_avg, thickness=-1)  # fill in the largest pink blob
		except(UnboundLocalError):
			pass
		#cv2.drawContours(image_cv2, contours, best_index, (0,0,0))  # draw black line around largest pink blob

		
		# Apply binary image to full image
		# image_hsv[:,:] = green_avg
		for dim in range(3): image_hsv[:,:,dim] *= is_green / 255

		# Convert back to ROS Image msg
		# image_hsv_float = (pink.astype(float) + 1) / 256
		# image_rgb = ((matplotlib.colors.hsv_to_rgb(image_hsv_float) * 256) - 1).astype('uint8')
		# image_cv2 = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)


		image_cv = cv.fromarray(image_cv2)
		image_out = self.bridge.cv_to_imgmsg(image_cv, 'bgr8')
		self.pub.publish(image_out)
コード例 #11
0
    def get_background(self, vehicle_roll, vehicle_pitch):

        # create sky coloured image
        image = numpy.zeros((balloon_video.img_height, balloon_video.img_width, 3),numpy.uint8)
        image[:] = self.background_sky_colour_bgr
    
        # create large rectangle which will become the ground
        top_left = balloon_utils.rotate_xy(balloon_video.img_center_x-1000, balloon_video.img_center_y, -vehicle_roll) 
        top_right = balloon_utils.rotate_xy(balloon_video.img_center_x+1000, balloon_video.img_center_y, -vehicle_roll)
        bot_left = balloon_utils.rotate_xy(balloon_video.img_center_x-1000,balloon_video.img_center_y+1000, -vehicle_roll)
        bot_right = balloon_utils.rotate_xy(balloon_video.img_center_x+1000,balloon_video.img_center_y+1000, -vehicle_roll)
    
        # calculate vertical pixel shift
        pitch_pixel_shift = balloon_video.angle_to_pixels_y(vehicle_pitch)
    
        # add pitch adjustment
        top_left = balloon_utils.shift_pixels_down(top_left, pitch_pixel_shift)
        top_right = balloon_utils.shift_pixels_down(top_right, pitch_pixel_shift)
        bot_left = balloon_utils.shift_pixels_down(bot_left, pitch_pixel_shift)
        bot_right = balloon_utils.shift_pixels_down(bot_right, pitch_pixel_shift)
    
        # draw horizon
        box = numpy.array([top_left, top_right, bot_right, bot_left],numpy.int32)
        cv2.fillConvexPoly(image, box, self.background_ground_colour_bgr_scalar)
    
        return image
コード例 #12
0
ファイル: dieextract.py プロジェクト: dcmouser/dicer
def cropImageGivenHull(img, imgMask, hull):
    """Given a hull on an image, mask everything else to black, and CROP to smallest bounding rectangle."""

    if (hull is None):
        return (img, img, hull)

    # ok first of all, lets create a mask that is black outside hull
    imgMask = dicerfuncs.makeBinaryImageMaskForImg(img)
    cv2.fillConvexPoly(imgMask, hull, 255);

    img = cv2.bitwise_and(img, img, mask=imgMask)

    if (False):
        # now minarea rotated rect
        (img, imgMask) = imgRotateToMinAreaBoundingBox(hull, img, imgMask)
        # and now regrab new hull given mask
        hull = imgGetHullGivenMask(imgMask, option_maxpercentagesize=100)

    if (hull is None):
        return (img, img, hull)

    if (True):
        # now get bounding rectangle and crop
        x, y, w, h = cv2.boundingRect(hull)
        img = img[y:y + h, x:x + w]
        imgMask = imgMask[y:y + h, x:x + w]
        # now we want to adjust hull to be valid in the cropped dimensions by subtracting x,y from each point in hull
        hull = adjustHullForCrop(hull, x, y)

    return (img, imgMask, hull)
コード例 #13
0
ファイル: AtomDetector.py プロジェクト: Leucipp-us/Leucippus
    def __grabAtoms(self, image):
        from scipy.spatial import ConvexHull

        segImg = self.segmenter.segment(image)
        contours, _ = cv2.findContours(segImg.copy(),
                                        cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_NONE)

        for cnt in contours:
            M = cv2.moments(cnt)
            if M['m00'] > 0.0:
                c = np.squeeze(cnt)
                cv2.fillConvexPoly(segImg, c[ConvexHull(c).vertices], 255)
            else:
                cv2.fillConvexPoly(segImg, cnt, 0)

        contours, _ = cv2.findContours(segImg.copy(),
                                        cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_NONE)

        conts = []
        centers = []
        for cnt in contours:
            M = cv2.moments(cnt)
            if M['m00'] > 0.0:
                centers.append(np.array((int(M['m10']/M['m00']), int(M['m01']/M['m00']))))
                conts.append(cnt)

        self.segImg = segImg
        self.points = np.array(centers)
        self.contours = np.array(conts)
        return self.points
コード例 #14
0
ファイル: machine-learning.py プロジェクト: lesterfan/Image
def getMoments(shape, img_size, border = 20):
	huMnts = [1,2,3,4,5,6]
	img_size +=  border * 2
	img = np.zeros((img_size,img_size), np.uint8)
	cv2.fillConvexPoly(img, shape, 255)
	contours, hier = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
	if contours:
		contour = cv2.approxPolyDP(contours[0], 10, True)
		mnts = cv2.moments(contour)
		huMnts = cv2.HuMoments(mnts)

		'''
		#number of sides that the shape has
		h7 = len(contour)
		
		#eccentricity of the shape compared to its bounding box
		boundArea = boundingArea(cv2.boundingRect(contour))
		contArea = cv2.contourArea(contour);
		h8 = (boundArea - contArea) / contArea;

		#print huMnts

		huMnts = huMnts[:6] + [h7, h8]

		print huMnts		
		'''
		huMnts = list(itertools.chain.from_iterable(huMnts))
		huMnts = huMnts[:6]
	return huMnts;
コード例 #15
0
ファイル: expts.py プロジェクト: bensondaled/eyeblink
    def acquire_masks(self):
        im1 = self.cam2.get()
        pl.imshow(im1, cmap='gray')
        pl.title('Select Eye')
        pts_eye = pl.ginput(n=0, timeout=0)
        pts_eye = np.array(pts_eye, dtype=np.int32)
        mask_eye = np.zeros(im1.shape, dtype=np.int32)
        cv2.fillConvexPoly(mask_eye, pts_eye, (1,1,1), lineType=cv2.LINE_AA)

        pl.clf()
        
        im2 = self.cam2.get()
        pl.imshow(im2, cmap='gray')
        pl.title('Select Wheel')
        pl.gcf().canvas.draw()
        pts_wheel = pl.ginput(n=0, timeout=0)
        pts_wheel = np.array(pts_wheel, dtype=np.int32)
        mask_wheel = np.zeros(im2.shape, dtype=np.int32)
        cv2.fillConvexPoly(mask_wheel, pts_wheel, (1,1,1), lineType=cv2.LINE_AA)

        pl.close()

        self.mask = np.array([mask_eye, mask_wheel])
        self.mask_flat = self.mask.reshape((2,-1))
        return self.mask
コード例 #16
0
ファイル: Bo2S.py プロジェクト: mistrzunio/bo2s-angu
def triangleColor(img,triangle):
    mask = np.zeros(img.shape,np.uint8)
    cv2.fillConvexPoly(mask,np.int32([triangle]),[1,1,1])
    mask = mask[:,:,1]
    mean_val = cv2.mean(img,mask = mask)
    threshold = 125
    return np.mean(mean_val[0:3])<threshold
コード例 #17
0
ファイル: tools.py プロジェクト: jsren/sdp-vision
def mask_pitch(frame, points):
    mask = frame.copy()
    points = np.array(points, np.int32)
    cv2.fillConvexPoly(mask, points, BLACK)
    hsv_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv_mask, (0, 0, 0), (0, 0, 0))
    return cv2.bitwise_and(frame, frame, mask=mask)
コード例 #18
0
ファイル: recovery.py プロジェクト: zhaohongqiang/software
    def table(self, mat):
        """
        Detect anything (including multiple things) that looks remotely
        like a table, just to create a mask of where to not look for stacks or
        the tower.
        """
        self.results.table_visible = False

        # blurred = cv2.GaussianBlur(self.luv_v, (self.options['table blur size'] * 2 + 1,) * 2, 0)
        # self.post('table blurred', blurred)
        adapted = cv2.adaptiveThreshold(
            self.luv_v,
            127,
            cv2.ADAPTIVE_THRESH_MEAN_C,
            cv2.THRESH_BINARY_INV,
            self.options['table block size'] * 2 + 1,
            self.options['table c'],
        )
        self.post('table adapted', adapted)

        # morph_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (self.options['table morph size'],) * 2)
        # morphed = cv2.morphologyEx(adapted, cv2.MORPH_ELLIPSE, morph_kernel)
        # self.post('table morphed', morphed)

        _, contours, hierarchy = cv2.findContours(adapted.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        table_mask = np.zeros(mat.shape[:2], np.uint8)
        if len(contours) > 0:
            for contour in contours:
                if cv2.contourArea(contour) < self.options['table min area']:
                    continue
                hull = cv2.convexHull(contour)
                cv2.fillConvexPoly(table_mask, hull, 255)

        self.post('table mask', table_mask)
        return table_mask
コード例 #19
0
ファイル: patterns.py プロジェクト: radjkarl/imgProcessor
def patSiemensStar(s0, n=72, vhigh=255, vlow=0, antiasing=False):
    '''make line pattern'''
    arr = np.full((s0,s0),vlow, dtype=np.uint8)
    c = int(round(s0/2.))
    s = 2*np.pi/(2*n)
    step =  0
    for i in range(2*n): 
        p0 = round(c+np.sin(step)*2*s0)
        p1 = round(c+np.cos(step)*2*s0)
       
        step += s

        p2 = round(c+np.sin(step)*2*s0)
        p3 = round(c+np.cos(step)*2*s0)

        pts = np.array(((c,c), 
                        (p0,p1),
                        (p2,p3) ), dtype=int)

        cv2.fillConvexPoly(arr, pts,
                           color=vhigh if i%2 else vlow, 
                           lineType=cv2.LINE_AA  if antiasing else 0)
    arr[c,c]=0
    
    return arr.astype(float)
コード例 #20
0
ファイル: behavior.py プロジェクト: agiovann/CalBlitz
def select_roi(img,n_rois=1):
    """
    Create a mask from a the convex polygon enclosed between selected points
    
    Parameters
    ----------
    img: 2D ndarray
        image used to select the points for the mask
    n_rois: int
        number of rois to select
    
    Returns
    -------
    mask: list
        each element is an the mask considered a ROIs
    """
    
    masks=[];
    for n in range(n_rois):
        fig=pl.figure()
        pl.imshow(img,cmap=pl.cm.gray)
        pts = fig.ginput(0, timeout=0)
        mask = np.zeros(np.shape(img), dtype=np.int32)
        pts = np.asarray(pts, dtype=np.int32)
        cv2.fillConvexPoly(mask, pts, (1,1,1), lineType=cv2.LINE_AA)
        masks.append(mask)
        #data=np.float32(data)
        pl.close()
        
    return masks
コード例 #21
0
    def _visualize(self, img, joint_cands_indices, all_peaks, candidate):

        cmap = matplotlib.cm.get_cmap('hsv')
        for i in range(len(self.index2limbname)-1):
            rgba = np.array(cmap(1 - i / 18. - 1. / 36))
            rgba[0:3] *= 255
            for j in range(len(all_peaks[i])):
                cv2.circle(img, (int(all_peaks[i][j][0]), int(
                    all_peaks[i][j][1])), 4, self.colors[i], thickness=-1)

        stickwidth = 4
        for i in range(len(self.index2limbname) - 2):
            for joint_cand_indices in joint_cands_indices:
                index = joint_cand_indices[np.array(self.limb_sequence[i],
                                                    dtype=np.int32) - 1]
                if -1 in index:
                    continue
                cur_img = img.copy()
                Y = candidate[index.astype(int), 0]
                X = candidate[index.astype(int), 1]
                mX = np.mean(X)
                mY = np.mean(Y)
                length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
                angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
                polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(
                    length / 2), stickwidth), int(angle), 0, 360, 1)
                cv2.fillConvexPoly(cur_img, polygon, self.colors[i])
                img = cv2.addWeighted(img, 0.4, cur_img, 0.6, 0)

        return img
コード例 #22
0
ファイル: mser.py プロジェクト: presocratics/widebaseline
def getPatch(img,con):
	"""Returns a patch from the contour"""
	mask=np.zeros(img.shape,dtype=img.dtype)
	cv2.fillConvexPoly(mask,con,(255,255,255))
	mask&=img
	x,y,w,h=cv2.boundingRect(con)
	return mask[y:y+h,x:x+w]
コード例 #23
0
ファイル: shape.py プロジェクト: afisherkatz/stuff
	def getShape(self, xRotation=0, yRotation=0, x=150, y=150, z=0, display=np.zeros((300,300,3))):
		polygons = []
		for poly in self.polygons:
			points = []
			for (X,Y,Z) in poly['coords']:
				(X,Y,Z) = objectRotation(xRotation, yRotation, X, Y, Z)
				(pX,pY) = scale(X,Y,Z+z)
				points.append((pX+x,pY+y,Z+z))
			points.insert(0,max(points, key=itemgetter(2))[2])
			points.insert(1,min(points[1:], key=itemgetter(2))[2])
			points.append(poly['color'])
			polygons.append(points)
		polygons = sorted(polygons, key=itemgetter(1))
		polygons = sorted(polygons, key=itemgetter(0))
		polygons = [poly[2:] for poly in polygons]
		for points in polygons:
			color = points.pop(-1)
			points = [p[:-1] for p in points]
			lines = zip(points, np.roll(points,1,axis=0))
			points = np.array(points, dtype = 'int32')
			cv2.fillConvexPoly(display, points, color)
			for ((p1,p2)) in lines:
				p1 = tuple(int(i) for i in p1)
				p2 = tuple(int(i) for i in p2)
				cv2.line(display,p1,p2,color=(0,0,0))
		return copy.deepcopy(display)
コード例 #24
0
ファイル: cmyk.py プロジェクト: IEEERobotics/high-level
def drawRects(img, ctrs):
	i = 1
	rectList = []
	for ct in ctrs[0]:
		x, y, w, h = cv2.boundingRect(ct)

		#process only vertical rectagles (ie, w<h) with w and h > 1
		if w < h and w > 10 and h > 10:
			#print i, ". ", len(ct), " -- ", cv2.boundingRect(ct), (x+w/2), cv2.minAreaRect(ct)
			rectList.append([cv2.boundingRect(ct), cv2.minAreaRect(ct)])
			clr=(random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
			#cv2.drawContours(image=img, contours=ct, contourIdx=-1, color=clr , thickness=-1)
			cv2.rectangle(img, (x,y), (x+w,y+h), clr, 5)
			cv2.fillConvexPoly(img, ct, clr)
			cv2.rectangle(img, (x+w/2-3,y), (x+w/2+3,y+h), (255,255,255), -1)
			cv2.rectangle(img, (x,y+h/2-3), (x+w,y+h/2+3), (255,255,255), -1)
			
			rotRect = cv2.minAreaRect(ct)
			box = cv2.cv.BoxPoints(rotRect)
			box = np.int0(box)
			print box
			cv2.drawContours(img, [box], 0, (0,0,255),2)
			#cv2.imshow("asdsdasdadasdasd",img)
			#key = cv2.waitKey(1000)
			i = i + 1
	cv2.rectangle(img, (318,0), (322,640), (255,255,255), -1)
	cv2.imshow("Output",img)
	print "done"
	return rectList
コード例 #25
0
ファイル: alignment.py プロジェクト: fonfonx/RSC_python
def warp_image(img, triangulation, base_points, coord):
    """
    Realize the mesh warping phase

    triangulation is the Delaunay triangulation of the base points
    base_points are the coordinates of the landmark poitns of the reference image

    code inspired from http://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
    """
    all_points, coordinates = preprocess_image_before_triangulation(img)
    img_out = 255 * np.ones(img.shape, dtype=img.dtype)
    for t in triangulation:
        # triangles to map one another
        src_tri = np.array([[all_points[x][0], all_points[x][1]] for x in t]).astype(np.float32)
        dest_tri = np.array([[base_points[x][0], base_points[x][1]] for x in t]).astype(np.float32)
        # bounding boxes
        src_rect = cv2.boundingRect(np.array([src_tri]))
        dest_rect = cv2.boundingRect(np.array([dest_tri]))

        # crop images
        src_crop_tri = np.zeros((3, 2), dtype=np.float32)
        dest_crop_tri = np.zeros((3, 2))
        for k in range(0, 3):
            for dim in range(0, 2):
                src_crop_tri[k][dim] = src_tri[k][dim] - src_rect[dim]
                dest_crop_tri[k][dim] = dest_tri[k][dim] - dest_rect[dim]

        src_crop_img = img[src_rect[1]:src_rect[1] + src_rect[3], src_rect[0]:src_rect[0] + src_rect[2]]

        # affine transformation estimation
        mat = cv2.getAffineTransform(
            np.float32(src_crop_tri),
            np.float32(dest_crop_tri)
        )
        dest_crop_img = cv2.warpAffine(
            src_crop_img,
            mat,
            (dest_rect[2], dest_rect[3]),
            None,
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_REFLECT_101
        )

        # Use a mask to keep only the triangle pixels
        # Get mask by filling triangle
        mask = np.zeros((dest_rect[3], dest_rect[2], 3), dtype=np.float32)
        cv2.fillConvexPoly(mask, np.int32(dest_crop_tri), (1.0, 1.0, 1.0), 16, 0)

        # Apply mask to cropped region
        dest_crop_img = dest_crop_img * mask

        # Copy triangular region of the rectangular patch to the output image
        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] * (
                (1.0, 1.0, 1.0) - mask)

        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] + dest_crop_img

    return img_out[coord[2]:coord[3], coord[0]:coord[1]]
コード例 #26
0
 def drawOn(self, canvas, color=(0, 0, 255), degreeDelta=20):
     points = cv2.ellipse2Poly(tuple(map(int, self.position)),
                               tuple(map(int, self.axes)),
                               self.angle*180/math.pi,
                               0, 360,
                               degreeDelta)
     cv2.fillConvexPoly(canvas, points, color, cv2.CV_AA)
コード例 #27
0
ファイル: synth_shape.py プロジェクト: lrjconan/img-count
def _draw_triangle(img, center, radius, fill, border=None, thickness=None):
    """Draw a equilateral triangle given center and radius.

    Args:
        img: numpy.ndarray, [H, W, 3], dtype=uint8, [0, 255]
        center: tuple, (x, y)
        radius: float
        fill: tuple, (B, G, R)
        border: tuple, (B, G, R)
        thickness: float
    """
    p1 = (int(center[0]), int(np.floor(center[1] - radius)))
    p2 = (int(np.floor(center[0] - radius * np.sin(60.0 * np.pi / 180.0))),
          int(np.floor(center[1] + radius * np.sin(30.0 * np.pi / 180.0))))
    p3 = (int(np.floor(center[0] + radius * np.sin(60.0 * np.pi / 180.0))),
          int(np.floor(center[1] + radius * np.sin(30.0 * np.pi / 180.0))))
    pts = np.array([p1, p2, p3])
    # log.info(pts)
    cv2.fillConvexPoly(img, points=pts, color=fill)

    if border:
        cv2.line(img, pt1=p1, pt2=p2, color=border, thickness=thickness)
        cv2.line(img, pt1=p2, pt2=p3, color=border, thickness=thickness)
        cv2.line(img, pt1=p3, pt2=p1, color=border, thickness=thickness)

    pass
コード例 #28
0
    def update_image2(self): #updates images with boid locations and responsibility vectors
        self.reset_image()
        
        if (self.debug_mode):
            debug_colors = [[255, 50, 50], [50, 255, 50], [50, 50, 255]]
            for boid in self.world.boids:
                poly = self.create_boid_poly([boid.location.y, boid.location.x], boid.theta)#create boid polygon in world coord
                #print poly
                poly = np.float32([ poly ]).reshape(-1,1,2)
                
                for i in range(self.num_proj):
                    boid_pix = cv2.perspectiveTransform(poly, self.homog[i])            
                    boid_pix = np.int32(boid_pix).reshape(1,-1,2)
                    cv2.fillConvexPoly(self.image[i], np.fliplr(boid_pix[0]), debug_colors[i])

        else:
            for boid in self.world.boids:
                poly = self.create_boid_poly([boid.location.y, boid.location.x], boid.theta)#create boid polygon in world coord
                
                #figure out which projector owns it
                proj_own = -1
                for i in range(self.num_proj):
                    if self.resp[i].contains(ShpPoint(boid.location.y, boid.location.x)):
                        proj_own = i
                        break
                        
                #transform to proj coordinates
                if (proj_own >= 0):
                    poly = np.float32([ poly ]).reshape(-1,1,2)
                    boid_pix = cv2.perspectiveTransform(poly, self.homog[proj_own])            
                    boid_pix = np.int32(boid_pix).reshape(1,-1,2)
                    cv2.fillConvexPoly(self.image[proj_own], np.fliplr(boid_pix[0]), boid.color)

        self.draw()
コード例 #29
0
def get_colors(image,coords,approx):
    mask = np.zeros((image.shape[0], image.shape[1]))
    cv2.fillConvexPoly(mask, approx, 1)
    mask = mask.astype(np.bool)

    out = np.zeros_like(image)
    out[mask] = image[mask]
    
##    cv2.imshow('Extracted Image', out)
##    cv2.waitKey(0)
##    cv2.destroyAllWindows()
    out = out[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]
    out_ = np.ma.masked_where(out == 0, out)
    R = out_[:,:,2]
    B = out_[:,:,0]
    G = out_[:,:,1]
    dict = {'red':np.mean(R), 'green':np.mean(G), 'blue':np.mean(B)};

##    
##    color_list = image2.reshape((image2.shape[0] * image2.shape[1], 3))
##    dict = {'red':color_list[:,2].mean(), 'green':color_list[:,1].mean(), 'blue':color_list[:,0].mean()};
##    output = cv2.resize(image2,(649,486))
#     cv2.imshow('image',output)
#     cv2.waitKey(0)
#     cv2.destroyAllWindows()
    return dict
コード例 #30
0
ファイル: interface.py プロジェクト: Pfahlf/Da-Cam
def drawPyramid(img, imgpoints):
    imgpoints = np.int32(imgpoints).reshape(-1,2)
##    img = cv2.line(img, tuple(imgpoints[0]), tuple(imgpoints[6]), (255, 0, 0),5)
##    img = cv2.line(img, tuple(imgpoints[0]), tuple(imgpoints[42]), (255, 0, 0),6)
##    img = cv2.line(img, tuple(imgpoints[42]), tuple(imgpoints[48]), (255, 0, 0),6)
##    img = cv2.line(img, tuple(imgpoints[48]), tuple(imgpoints[6]), (255, 0, 0),6)
##    img = cv2.line(img, tuple(imgpoints[0]), tuple(imgpoints[-34]-300,), (255, 0, 0),6)
##    img = cv2.line(img, tuple(imgpoints[6]), tuple(imgpoints[-34]-300), (255, 0, 0),6)
##    img = cv2.line(img, tuple(imgpoints[42]), tuple(imgpoints[-34]-300), (255, 0, 0),6)
##    img = cv2.line(img, tuple(imgpoints[48]), tuple(imgpoints[-34]-300), (255, 0, 0),6)
    lst = []
    coordinates = list(imgpoints[24])
    x = coordinates[0]
    y = coordinates[1]-300
    lst.append(x)
    lst.append(y)
    triangle1 = np.array([ list(imgpoints[0]), list(imgpoints[42]), lst ], np.int32)
    triangle2 = np.array([ list(imgpoints[0]), list(imgpoints[6]), lst ], np.int32)
    triangle3 = np.array([ list(imgpoints[42]), list(imgpoints[48]), lst ], np.int32)
    triangle4 = np.array([ list(imgpoints[48]), list(imgpoints[6]), lst ], np.int32)
##    rectangle = np.array([ list(imgpoints[0]), list(imgpoints[6]), list(imgpoints[42]), list(imgpoints[48]) ], np.int32)
    img = cv2.fillConvexPoly(img, triangle1,(0, 0, 255))
    img = cv2.fillConvexPoly(img, triangle2,(0, 0, 0))
    img = cv2.fillConvexPoly(img, triangle3,(255, 0, 0))
    img = cv2.fillConvexPoly(img, triangle4,(0, 50, 0))
##    img = cv2.fillConvexPoly(img, rectangle, (255, 255, 255))
    return img
コード例 #31
0
    im_dst = cv2.imread(
        r'C:\Users\arkma\PycharmProjects\karomiOpenCVClass\Resources\Images\times-square.jpg'
    )

    # Get four corners of the billboard
    # noinspection PyUnresolvedReferences
    print('Click on four corners of a billboard and then press ENTER')
    pts_dst = get_four_points(im_dst)

    # Calculate Homography between source and destination points
    # noinspection PyUnresolvedReferences
    h, status = cv2.findHomography(pts_src, pts_dst)

    # Warp source image
    # noinspection PyUnresolvedReferences
    im_temp = cv2.warpPerspective(im_src, h,
                                  (im_dst.shape[1], im_dst.shape[0]))

    # Black out polygonal area in destination image.
    # noinspection PyUnresolvedReferences
    cv2.fillConvexPoly(im_dst, pts_dst.astype(int), 0, 16)

    # Add warped source image to destination image.
    im_dst = im_dst + im_temp

    # Display image.
    # noinspection PyUnresolvedReferences
    cv2.imshow("Image", im_dst)
    # noinspection PyUnresolvedReferences
    cv2.waitKey(0)
コード例 #32
0
def render(pc,
           face_index,
           t_u,
           t_v,
           x,
           y,
           z,
           a,
           b,
           c,
           camera_mat,
           width,
           height,
           part_bboxs,
           is_render_part=True):

    color_table = [[247, 77, 149], [32, 148, 9], [166, 104, 6], [7, 212, 133],
                   [1, 251, 1], [2, 2, 188], [219, 251, 1], [96, 94, 92],
                   [229, 114, 84], [216, 166, 255], [113, 165, 231],
                   [8, 78, 183], [112, 252, 57], [5, 28, 126], [100, 111, 156],
                   [140, 60, 39], [75, 13, 159], [188, 110, 83]]
    # print(face_index)
    tem_depth = np.ones((height, width, 3), dtype=np.uint16) * 20000
    depth_map = np.ones((height, width, 3), dtype=np.uint16) * 20000
    res = np.zeros((height, width, 3), dtype=np.uint8)
    tem = np.zeros((height, width, 3), dtype=np.uint8)
    # for pc, face_index, t_u, t_v, x, y, z, a, b, c in zip(pcs, face_indexs, t_us, t_vs, xs, ys, zs, aas, bs, cs):
    if True:
        rot_mat = get_rotation_mat(a, b, c)
        pc2 = np.dot(rot_mat, pc)
        pc2[0, :] += x
        pc2[1, :] += y
        pc2[2, :] += z

        pc2 = np.dot(camera_mat, pc2)
        u = np.int32(pc2[0, :] / pc2[2, :]).reshape(1, -1)
        v = np.int32(pc2[1, :] / pc2[2, :]).reshape(1, -1)
        zz = pc2[2, :].reshape(1, -1)
        u_item = (u[0, face_index])  # shape is num_face * 3
        v_item = (v[0, face_index])  # shape is num_face * 3
        z_item = (zz[0, face_index])  # shape is num_face * 3

        max_us = np.max(u_item, axis=1)  # num_face * 1
        max_vs = np.max(v_item, axis=1)
        min_us = np.min(u_item, axis=1)
        min_vs = np.min(v_item, axis=1)
        face_depth = np.average(z_item, axis=1)

        t_u = t_u.reshape(1, -1)
        t_v = t_v.reshape(1, -1)
        t_u_item = (t_u[0, face_index])  # shape is num_face * 3
        t_v_item = (t_v[0, face_index])  # shape is num_face * 3

        max_tus = np.max(t_u_item, axis=1)
        max_tvs = np.max(t_v_item, axis=1)
        min_tus = np.min(t_u_item, axis=1)
        min_tvs = np.min(t_v_item, axis=1)

        for us, vs, min_v, max_v, min_u, max_u, des, min_tv, max_tv, min_tu, max_tu in zip(
                u_item, v_item, min_vs, max_vs, min_us, max_us, face_depth,
                min_tvs, max_tvs, min_tus, max_tus):
            part_index = 0
            for p, bbox in enumerate(part_bboxs):

                if (min_tv + max_tv) / 2 > bbox[0] and (min_tu + max_tu) / 2 > bbox[1] and (min_tv + max_tv) / 2 < \
                        bbox[2] and (min_tu + max_tu) / 2 < bbox[3]:
                    part_index = p
            triangle = np.array(
                [[us[0], vs[0]], [us[1], vs[1]], [us[2], vs[2]]], np.int32)

            des = int(des * 100)

            cv2.fillConvexPoly(tem_depth, triangle, (des, des, des))
            cv2.fillConvexPoly(
                tem, triangle,
                (color_table[part_index][2], color_table[part_index][1],
                 color_table[part_index][0]))
            append_mask = tem_depth[min_v:max_v, min_u:max_u,
                                    0:1] < depth_map[min_v:max_v, min_u:max_u,
                                                     0:1]
            depth_map[min_v:max_v, min_u:max_u, 0:1][append_mask] = des

            #
            if is_render_part:
                res[min_v:max_v, min_u:max_u,
                    0:1][append_mask] = part_index + 1
                res[min_v:max_v, min_u:max_u,
                    1:2][append_mask] = part_index + 1
                res[min_v:max_v, min_u:max_u,
                    2:3][append_mask] = part_index + 1
            else:
                res[min_v:max_v, min_u:max_u, 0:1][append_mask] = 1
                res[min_v:max_v, min_u:max_u, 1:2][append_mask] = 1
                res[min_v:max_v, min_u:max_u, 2:3][append_mask] = 1

    mask = ((res[:, :, 0] > 0) & (res[:, :, 1] > 0))

    return res, mask
コード例 #33
0
 img1 = cv2.imread('../img/boy_face.jpg')
 img2 = cv2.imread('../img/girl_face.jpg')
 cv2.imshow('img1', img1)
 cv2.imshow('img2', img2)
 img_draw = img2.copy()
 # 각 이미지에서 얼굴 랜드마크 좌표 구하기--- ⑥ 
 points1 = getPoints(img1)
 points2 = getPoints(img2)
 # 랜드마크 좌표로 볼록 선체 구하기 --- ⑦
 hullIndex = cv2.convexHull(np.array(points2), returnPoints = False)
 hull1 = [points1[int(idx)] for idx in hullIndex]
 hull2 = [points2[int(idx)] for idx in hullIndex]
 # 볼록 선체 안 들로네 삼각형 좌표 구하기 ---⑧ 
 triangles = getTriangles(img2, hull2)
 
 # 각 삼각형 좌표로 삼각형 어핀 변환 ---⑨    
 for i in range(0, len(triangles)):
     t1 = [hull1[triangles[i][j]] for j in range(3)]
     t2 = [hull2[triangles[i][j]] for j in range(3)]
     warpTriangle(img1, img_draw, t1, t2)
 # 볼록선체를 마스크로 써서 얼굴 합성 ---⑩
 mask = np.zeros(img2.shape, dtype = img2.dtype)  
 cv2.fillConvexPoly(mask, np.int32(hull2), (255, 255, 255))
 r = cv2.boundingRect(np.float32([hull2]))    
 center = ((r[0]+int(r[2]/2), r[1]+int(r[3]/2)))
 output = cv2.seamlessClone(np.uint8(img_draw), img2, mask, center, cv2.NORMAL_CLONE)
 
 cv2.imshow("Face Swapped", output)
 cv2.waitKey(0)
 cv2.destroyAllWindows()
     
コード例 #34
0
def img_process(user_img, q):
    img = user_img  # cut out face
    # cv2.imshow('f**k', img)
    # cv2.waitKey(0)

    img2 = cv2.imread('./img/main.png')  # back face and model face

    height2, width2, chan2 = img2.shape
    height1, width1, chan1 = img.shape

    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    mask = np.zeros_like(img_gray)

    img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    # face and point dectector
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        "./shapes/shape_predictor_68_face_landmarks.dat")
    height, width, channels = img2.shape
    img2_new_face = np.zeros((height, width, channels), np.uint8)

    # Face 1
    faces = detector(img_gray)

    # Fail Check
    if len(faces) == 0:
        q.put('fail')
        return ''

    for face in faces:
        landmarks = predictor(img_gray, face)
        landmarks_points = []
        for n in range(0, 68):
            x = landmarks.part(n).x
            y = landmarks.part(n).y
            landmarks_points.append((x, y))

        points = np.array(landmarks_points, np.int32)
        convexhull = cv2.convexHull(points)
        # cv2.polylines(img, [convexhull], True, (255, 0, 0), 3)
        cv2.fillConvexPoly(mask, convexhull, 255)

        face_image_1 = cv2.bitwise_and(img, img, mask=mask)

        # Delaunay triangulation
        rect = cv2.boundingRect(convexhull)
        subdiv = cv2.Subdiv2D(rect)
        subdiv.insert(landmarks_points)
        triangles = subdiv.getTriangleList()
        triangles = np.array(triangles, dtype=np.int32)

        indexes_triangles = []
        for t in triangles:
            pt1 = (t[0], t[1])
            pt2 = (t[2], t[3])
            pt3 = (t[4], t[5])

            index_pt1 = np.where((points == pt1).all(axis=1))
            index_pt1 = extract_index_nparray(index_pt1)

            index_pt2 = np.where((points == pt2).all(axis=1))
            index_pt2 = extract_index_nparray(index_pt2)

            index_pt3 = np.where((points == pt3).all(axis=1))
            index_pt3 = extract_index_nparray(index_pt3)

            if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
                triangle = [index_pt1, index_pt2, index_pt3]
                indexes_triangles.append(triangle)

    # Face 2
    faces2 = detector(img2_gray)
    for face in faces2:
        landmarks = predictor(img2_gray, face)
        landmarks_points2 = []
        for n in range(0, 68):
            x = landmarks.part(n).x
            y = landmarks.part(n).y
            landmarks_points2.append((x, y))

        points2 = np.array(landmarks_points2, np.int32)
        convexhull2 = cv2.convexHull(points2)

    lines_space_mask = np.zeros_like(img_gray)
    lines_space_new_face = np.zeros_like(img2)

    # Triangulation of both faces
    for triangle_index in indexes_triangles:
        # Triangulation of the first face
        tr1_pt1 = landmarks_points[triangle_index[0]]
        tr1_pt2 = landmarks_points[triangle_index[1]]
        tr1_pt3 = landmarks_points[triangle_index[2]]
        triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)

        rect1 = cv2.boundingRect(triangle1)
        (x, y, w, h) = rect1
        cropped_triangle = img[y:y + h, x:x + w]
        cropped_tr1_mask = np.zeros((h, w), np.uint8)

        points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y],
                           [tr1_pt2[0] - x, tr1_pt2[1] - y],
                           [tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32)

        cv2.fillConvexPoly(cropped_tr1_mask, points, 255)

        # Lines space
        lines_space = cv2.bitwise_and(img, img, mask=lines_space_mask)

        # Triangulation of second face
        tr2_pt1 = landmarks_points2[triangle_index[0]]
        tr2_pt2 = landmarks_points2[triangle_index[1]]
        tr2_pt3 = landmarks_points2[triangle_index[2]]
        triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)

        rect2 = cv2.boundingRect(triangle2)
        (x, y, w, h) = rect2

        cropped_tr2_mask = np.zeros((h, w), np.uint8)

        points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],
                            [tr2_pt2[0] - x, tr2_pt2[1] - y],
                            [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)

        cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)

        # Warp triangles
        points = np.float32(points)
        points2 = np.float32(points2)
        M = cv2.getAffineTransform(points, points2)
        warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))
        warped_triangle = cv2.bitwise_and(warped_triangle,
                                          warped_triangle,
                                          mask=cropped_tr2_mask)

        # Reconstructing destination face
        img2_new_face_rect_area = img2_new_face[y:y + h, x:x + w]
        img2_new_face_rect_area_gray = cv2.cvtColor(img2_new_face_rect_area,
                                                    cv2.COLOR_BGR2GRAY)
        _, mask_triangles_designed = cv2.threshold(
            img2_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)
        warped_triangle = cv2.bitwise_and(warped_triangle,
                                          warped_triangle,
                                          mask=mask_triangles_designed)

        img2_new_face_rect_area = cv2.add(img2_new_face_rect_area,
                                          warped_triangle)
        img2_new_face[y:y + h, x:x + w] = img2_new_face_rect_area

    # Face swapped (putting 1st face into 2nd face)
    img2_face_mask = np.zeros_like(img2_gray)
    img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)
    img2_face_mask = cv2.bitwise_not(img2_head_mask)

    img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)
    result = cv2.add(img2_head_noface, img2_new_face)

    (x, y, w, h) = cv2.boundingRect(convexhull2)
    center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))

    # can change it to Mix_clone
    seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask,
                                      center_face2, cv2.NORMAL_CLONE)

    seamlessclone = cv2.resize(seamlessclone, (width2, height2))

    retval, buffer = cv2.imencode('.png', seamlessclone)
    pngBase64 = base64.b64encode(buffer)

    # print(pngBase64)

    dataBase64 = "data:image/png;base64," + \
        str(pngBase64).replace("b'", ' ').replace("'", " ")

    q.put(dataBase64)
コード例 #35
0
            M = cv.getRotationMatrix2D((largerBbox[2] / 2, largerBbox[3] / 2),
                                       angle, 1)

            # rotate the envelope points
            ePoints = np.array(e['points']) - np.array(
                [largerBbox[0], largerBbox[1]])
            ePointsRotated = cv.transform(np.array([ePoints]), M)[0]

            # get new bounding box after rotation
            bboxAfterRotation = cv.boundingRect(np.array(ePointsRotated))
            templates[counter]['bbox'] = bboxAfterRotation

            # create envelope mask
            eMask = np.zeros((largerBbox[3], largerBbox[2]),
                             dtype=np.uint8)  # envelope mask
            cv.fillConvexPoly(eMask, ePointsRotated, 255)
            eMask = eMask[bboxAfterRotation[1]:bboxAfterRotation[1] +
                          bboxAfterRotation[3],
                          bboxAfterRotation[0]:bboxAfterRotation[0] +
                          bboxAfterRotation[2]]

            # write envelope
            templates[counter]['e'] = {}
            templates[counter]['e']['mask'] = eMask
            templates[counter]['e']['points'] = np.array(
                ePointsRotated) - np.array(
                    [bboxAfterRotation[0], bboxAfterRotation[1]])
            templates[counter]['eSize'] = cv.countNonZero(eMask)

            # tissue
            tPoints = np.array(t['points']) - np.array(
コード例 #36
0
ファイル: preprocess.py プロジェクト: xaamin/photo-id-ocr
image = cropped.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(image, (5,5), 0)

canny = cv2.Canny(image, 30, 150)
kernel = np.ones((2,2),np.uint8)
(_, cnts, _) = cv2.findContours(canny.copy(), cv2.RETR_EXTERNAL,
       cv2.CHAIN_APPROX_SIMPLE)
im = image.copy()

rect = cv2.minAreaRect(cnts[-1]) # using the largest contour in the image
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(im,[box],0,(0,0,255),2)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.fillConvexPoly(mask, box, (255, 255, 255))
masked = cv2.bitwise_and(image, image, mask = mask)

x0,y0 = box[0]
x1,y1 = box[1]
x2,y2 = box[2]

# calculation of skew angle

angle = 90.0 - (atan(float(x0-x1)/(y0-y1)) * 180/3.14)
center = (((x0+x2)//2),((y0+y2)//2))
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(masked, M, (image.shape[1], image.shape[0])) # de-skewing,
# can otherwise be performed using warpPerspective() and 4-point Transform

コード例 #37
0
                                                     upper_left_pos[0])
                x3, x4 = int(x3 - left +
                             upper_left_pos[0]), int(x4 - left +
                                                     upper_left_pos[0])
                y1, y2 = int(y1 - up +
                             upper_left_pos[1]), int(y2 - up +
                                                     upper_left_pos[1])
                y3, y4 = int(y3 - up +
                             upper_left_pos[1]), int(y4 - up +
                                                     upper_left_pos[1])
                bbox = [[x1, y1], [x2, y2], [x3, y3], [x4, y4]]

                if len(text_region) == 0:
                    im = np.zeros([W, H], dtype="uint8")
                    text_region.append(bbox)
                    mask = cv2.fillConvexPoly(im, np.array(bbox), 10)
                    break
                else:
                    mask_copy = mask
                    im1 = np.zeros([W, H], dtype="uint8")
                    mask1 = cv2.fillConvexPoly(im1, np.array(bbox), 10)
                    masked_and = mask_copy + mask1
                    and_area = np.sum(
                        np.float32(np.greater(masked_and, 10))
                    )  # use and_are to check if masked_and has overlap area

                    #选择overlap or not
                    if False and and_area > 1.0:
                        successFlag = False
                        break
                    elif x1 > H or x2 > H or x3 > H or x4 > H or y1 > W or y2 > W or y3 > W or y4 > W:  # not exceed the boundary
コード例 #38
0
def _create_rbbox_mask(size, rbbox):
    mask = np.zeros(size, dtype=np.uint8)
    points = annotation_utils.rbox2points(rbbox)
    points = points.astype(int)
    cv2.fillConvexPoly(mask, points, 255)
    return mask
コード例 #39
0
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
    i = 0
    if cv2.contourArea(c) > 3000:
        i = i + 1
        # calculate moments for each contour
        M = cv2.moments(c)
        if M["m00"] != 0:
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
        else:
            cX, cY = 0, 0
        # calculate x,y coordinate of center
        cv2.circle(final, (cX, cY), 5, (255, 255, 255), -1)
        cv2.fillConvexPoly(arr, c, [255, 255, 255])
        cv2.putText(final, 'Centroid', (cX - 25, cY - 25),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
        # print(cv2.arcLength(c, True)) # ARC LENGTH
        # print(cv2.contourArea(c)) # Unit is pixel
        #Formula Used: 1mm = 26pixel -> Area 1 Sq.pixel = 0.00147 (i.e 1/26*26)
        # Therefore the area would be no of pixel * Area of 1 sq.pixel * 1616
        area_mm = round(0.00147 * cv2.contourArea(c) * 1616)  #convert to mm^2
        area = area_mm / 100  #convert to cm^2
        print(cv2.contourArea(c), 'pixels')
        print('Area of the Pothole', i, 'is ', area, 'square cm')

#showing image with foreground objects
cv2.imshow('image', final)
cv2.imwrite('area.jpg', final)
コード例 #40
0
 def build_masks(poly, dims):
     w, h = dims
     mask = np.zeros((h, w), np.uint8)
     cv2.fillConvexPoly(mask, poly, 255)
     mask_3ch = np.dstack((mask, mask, mask))
     return mask, mask_3ch
 def _draw_q_value(self, x, y, action, q_value_norm):
     # First, convert state space to image space for the "up-down" axis, because the world space origin is the bottom left, whereas the image space origin is the top left
     y = 1 - y
     # Compute the image coordinates of the centre of the triangle for this action
     centre_x = x * self.magnification
     centre_y = y * self.magnification
     # Compute the colour for this q value
     colour_r = int((1 - q_value_norm) * 255)
     colour_g = int(q_value_norm * 255)
     colour_b = 0
     colour = (colour_b, colour_g, colour_r)
     # Depending on the particular action, the triangle representing the action will be drawn in a different position on the image
     if action == 0:  # Move right
         point_1_x = centre_x + self.half_cell_length
         point_1_y = centre_y + self.half_cell_length
         point_2_x = point_1_x
         point_2_y = centre_y - self.half_cell_length
         points = np.array([[centre_x, centre_y], [point_1_x, point_1_y],
                            [point_2_x, point_2_y]],
                           dtype=np.int32)
         cv2.fillConvexPoly(self.q_values_image, points, colour)
         cv2.polylines(self.q_values_image, [points],
                       True, (0, 0, 0),
                       thickness=2,
                       lineType=cv2.LINE_AA)
     elif action == 1:  # Move up
         point_1_x = centre_x + self.half_cell_length
         point_1_y = centre_y - self.half_cell_length
         point_2_x = centre_x - self.half_cell_length
         point_2_y = point_1_y
         points = np.array([[centre_x, centre_y], [point_1_x, point_1_y],
                            [point_2_x, point_2_y]],
                           dtype=np.int32)
         cv2.fillConvexPoly(self.q_values_image, points, colour)
         cv2.polylines(self.q_values_image, [points],
                       True, (0, 0, 0),
                       thickness=2,
                       lineType=cv2.LINE_AA)
     elif action == 2:  # Move left
         point_1_x = centre_x - self.half_cell_length
         point_1_y = centre_y - self.half_cell_length
         point_2_x = point_1_x
         point_2_y = centre_y + self.half_cell_length
         points = np.array([[centre_x, centre_y], [point_1_x, point_1_y],
                            [point_2_x, point_2_y]],
                           dtype=np.int32)
         cv2.fillConvexPoly(self.q_values_image, points, colour)
         cv2.polylines(self.q_values_image, [points],
                       True, (0, 0, 0),
                       thickness=2,
                       lineType=cv2.LINE_AA)
     elif action == 3:  # Move down
         point_1_x = centre_x - self.half_cell_length
         point_1_y = centre_y + self.half_cell_length
         point_2_x = centre_x + self.half_cell_length
         point_2_y = point_1_y
         points = np.array([[centre_x, centre_y], [point_1_x, point_1_y],
                            [point_2_x, point_2_y]],
                           dtype=np.int32)
         cv2.fillConvexPoly(self.q_values_image, points, colour)
         cv2.polylines(self.q_values_image, [points],
                       True, (0, 0, 0),
                       thickness=2,
                       lineType=cv2.LINE_AA)
コード例 #42
0
 def draw_quads(self, img, quads, color=(0, 255, 0)):
     img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec,
                                  self.tvec, self.K, self.dist_coef)[0]
     img_quads.shape = quads.shape[:2] + (2, )
     for q in img_quads:
         cv.fillConvexPoly(img, np.int32(q * 4), color, cv.LINE_AA, shift=2)
コード例 #43
0
ファイル: computer_vision.py プロジェクト: arede3/Draco-Py
 def fillConvexPoly(self, pts, color, lineType=cv.LINE_8, shift=0):
     self.newImage = cv.fillConvexPoly(self.oldImage.data, pts, color,
                                       lineType, shift)
     self.oldImage = Matrix(self.newImage)
コード例 #44
0
def draw_convex_hull(im, points, color):
    points = cv2.convexHull(points)
    cv2.fillConvexPoly(im, points, color=color)
コード例 #45
0
im2, contours, _ = cv2.findContours(img, cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_SIMPLE)  # 查找轮廓
# cv2.drawContours(dicom_np, contours, -1, (0, 255, 255), 2)  # 填充轮廓颜色
# plt.subplot(1, 4, 2)
# plt.title('contours')
# plt.imshow(dicom_np, cmap='gray')

distance = []
for i in range(len(contours)):
    # 判断图像中心点(256,256)是否位于该轮廓里面  -1代表在轮廓外面   0代表在轮廓上   1代表在轮廓内
    distance.append(cv2.pointPolygonTest(contours[i], (250, 250), False))
max_index = np.argmax(distance)  # 最大值索引

# 产生仅有 轮廓在最中间且面积较大的部分的 mask  0-1
max_contours_mask = np.zeros((img_origin.shape))
cv2.fillConvexPoly(max_contours_mask, contours[max_index], 1)

plt.subplot(1, 3, 2)
plt.title('max_contours_mask')
plt.imshow(max_contours_mask, cmap='gray')

# 拿到 腹窗范围的图像
WL2, WW2 = 40, 350
img_abdoment2 = (img_origin - (WL2 - WW2 / 2)) / WW2 * 255  # (x-min)/(max-min)
img_abdoment2[img_abdoment2 < 0] = 0
img_abdoment2[img_abdoment2 > 255] = 255
plt.subplot(1, 3, 3)
plt.title('final_save_max_contours')
plt.imshow(img_abdoment2 * max_contours_mask, cmap='gray')
plt.show()
コード例 #46
0
    def warp(s_image, s_landmarks, d_image, d_landmarks, warped_size, target_size, debug):
        h,w,c = s_image.shape
        if (h != w) or (w != 64 and w != 128 and w != 256 and w != 512 and w != 1024):
            raise ValueError ('FullFaceTrainingDataGenerator accepts only square power of 2 images.')
            
        s_landmarks_original = s_landmarks
        d_landmarks_original = d_landmarks
        
        rotation = np.random.uniform(-10, 10)
        scale = np.random.uniform(1 - 0.05, 1 + 0.05)
        tx = np.random.uniform(-0.05, 0.05)
        ty = np.random.uniform(-0.05, 0.05)    
        
        idx_list = np.array( range(0,61) ) #all wo tooth
         
        #remove 'jaw landmarks' which in region of 'not jaw landmarks' and outside
        face_contour = cv2.convexHull( s_landmarks[idx_list[np.argwhere ( idx_list >= 17 )[:,0] ]] )        
        for idx in idx_list[ np.argwhere ( idx_list < 17 )[:,0] ][:]:
            s_l = s_landmarks[idx]
            d_l = d_landmarks[idx]
   
            if not (s_l[0] >= 1 and s_l[0] <= w-2 and s_l[1] >= 1 and s_l[1] <= w-2) or \
               not (d_l[0] >= 1 and d_l[0] <= w-2 and d_l[1] >= 1 and d_l[1] <= w-2) or \
               cv2.pointPolygonTest(face_contour, tuple(s_l[::-1]),False) >= 0 or \
               cv2.pointPolygonTest(face_contour, tuple(d_l[::-1]),False) >= 0:
                idx_list = np.delete (idx_list, np.argwhere (idx_list == idx)[:,0] )

        s_landmarks = s_landmarks[idx_list]
        d_landmarks = d_landmarks[idx_list]

        #4 anchors for warper
        edgeAnchors = np.array ( [ (0,0), (0,h-1), (w-1,h-1), (w-1,0)] )
        s_landmarks_anchored = np.concatenate ((edgeAnchors, s_landmarks)) 
        d_landmarks_anchored = np.concatenate ((edgeAnchors, d_landmarks)) 

        if debug:
            debug_image = image_utils.morph_by_points (s_image, s_landmarks_anchored, d_landmarks_anchored )
        else:
            debug_image = None

        warped = image_utils.morph_by_points (s_image, s_landmarks_anchored, d_landmarks_anchored)

        #embedding mask as 4 channel
        s_image = np.concatenate( (s_image,
                                    cv2.fillConvexPoly( np.zeros(s_image.shape[0:2]+(1,),dtype=np.float32), cv2.convexHull (s_landmarks_original), (1,) ))
                                   , -1 )
        
        warped = np.concatenate( (warped,
                                    cv2.fillConvexPoly( np.zeros(warped.shape[0:2]+(1,),dtype=np.float32), cv2.convexHull (d_landmarks_original), (1,) ))
                                   , -1 )
     
        #random warp by grid
        cell_size = 32
        cell_count = w // cell_size + 1
        
        grid_points = np.linspace( 0, w, cell_count)
        mapx = np.broadcast_to(grid_points, (cell_count, cell_count)).copy()
        mapy = mapx.T
        
        mapx[1:-1,1:-1] = mapx[1:-1,1:-1] + np.random.uniform(low=-cell_size*0.2, high=cell_size*0.2, size=(cell_count-2, cell_count-2))
        mapy[1:-1,1:-1] = mapy[1:-1,1:-1] + np.random.uniform(low=-cell_size*0.2, high=cell_size*0.2, size=(cell_count-2, cell_count-2))

        half_cell_size = cell_size // 2
        
        mapx = cv2.resize(mapx, (w+cell_size,)*2 )[half_cell_size:-half_cell_size-1,half_cell_size:-half_cell_size-1].astype(np.float32)
        mapy = cv2.resize(mapy, (w+cell_size,)*2 )[half_cell_size:-half_cell_size-1,half_cell_size:-half_cell_size-1].astype(np.float32)
        warped = cv2.remap(warped, mapx, mapy, cv2.INTER_LINEAR )
        
        #random transform                                   
        random_transform_mat = cv2.getRotationMatrix2D((w // 2, w // 2), rotation, scale)
        random_transform_mat[:, 2] += (tx*w, ty*w)

        target_image = cv2.warpAffine( s_image, random_transform_mat, (w, w), borderMode=cv2.BORDER_REPLICATE )        
        warped       = cv2.warpAffine( warped,  random_transform_mat, (w, w), borderMode=cv2.BORDER_REPLICATE )
            
        target_image  = cv2.resize( target_image, target_size, cv2.INTER_LINEAR)        
        warped        = cv2.resize( warped      , warped_size, cv2.INTER_LINEAR)
        
        return warped, target_image, debug_image
コード例 #47
0
ファイル: AIMakeup.py プロジェクト: Wenyi1995/VideoBeauty
 def _draw_convex_hull(self, im, points, color):
     """
     勾画多凸边形
     """
     points = cv2.convexHull(points)
     cv2.fillConvexPoly(im, points, color=color)
コード例 #48
0
def eye_on_mask(mask, side):            
    points = [shape[i] for i in side]
    points = np.array(points, dtype=np.int32)
    mask = cv2.fillConvexPoly(mask, points, 255)    # 데이터 유형 np.int32 , 255= white로 채워진 영역이 있는 이미지로 반환
    return mask
コード例 #49
0
                m += 1

            counter = 0
            for item in aligned_shape:

                aligned_shape[counter][0] = aligned_shape[counter][
                    0] if aligned_shape[counter][0] > 0 else 0
                aligned_shape[counter][1] = aligned_shape[counter][
                    1] if aligned_shape[counter][1] > 0 else 0

                counter += 1

            remapped_shape = face_remap(aligned_shape)

            c = remapped_shape[0:27]

            cv2.fillConvexPoly(feature_mask, c, 1)

            extLeft = tuple(c[c[:, :, 0].argmin()][0])
            extRight = tuple(c[c[:, :, 0].argmax()][0])
            extTop = tuple(c[c[:, :, 1].argmin()][0])
            extBot = tuple(c[c[:, :, 1].argmax()][0])

            feature_mask = feature_mask.astype(np.bool)
            out_face[feature_mask] = aligned_face[feature_mask]

            crop = out_face[extTop[1]:extBot[1], extLeft[0]:extRight[0]]
            cv2.imwrite(output_path, crop)

            print('cropped', file_path)
コード例 #50
0
ファイル: morphing.py プロジェクト: Turmac/video_morphing
def create_tri_mask(sz, pts):
    mask = np.zeros(sz)
    mask = cv2.fillConvexPoly(mask, pts, 1.0, 16, 0)
    return mask
コード例 #51
0
def describe(image, p_segments):
    (h, w) = image.shape[:2]
    control = image[0:h, 0:w / 2]
    hC = h
    wC = w / 2
    segments = 2**p_segments

    # Mask to only keep the centre
    mask = np.zeros(control.shape[:2], dtype="uint8")

    (h, w) = control.shape[:2]
    (cX, cY) = (w / 2, h / 2)
    masks = [mask.copy() for i in range(0, 8 * segments)]
    # Generating the different annulus masks
    for i in range(0, 8 * segments):
        cv2.circle(masks[i], (cX, cY),
                   min(90 - 10 * (i % 8), control.shape[1]) / 2, 255, -1)
        cv2.circle(masks[i], (cX, cY),
                   min(80 - 10 * (i % 8), control.shape[1]) / 2, 0, -1)

    if (p_segments == 2):
        points = np.array(
            [[cX, cY], [cX, 0], [0, 0], [0, h], [w, h], [w, cY], [cX, cY]],
            np.int32)
        points = points.reshape((-1, 1, 2))
        for i in range(0, 8):
            cv2.fillConvexPoly(masks[i], points, 0)
    else:
        for k in range(0, 2**(p_segments - 2)):
            alpha = (math.pi / 2**(p_segments - 1)) * (k + 1)
            beta = (math.pi / 2**(p_segments - 1)) * k
            if alpha <= math.pi / 4:
                points = np.array(
                    [[cX, cY], [w, h / 2 - w / 2 * math.tan(alpha)], [w, 0],
                     [0, 0], [0, h], [w, h],
                     [w, h / 2 - w / 2 * math.tan(beta)], [cX, cY]], np.int32)
                points = points.reshape((-1, 1, 2))
                points2 = np.array(
                    [[cX, cY], [w, cY], [w, h / 2 - w / 2 * math.tan(beta)],
                     [cX, cY]], np.int32)
                points2 = points2.reshape((-1, 1, 2))
                for i in range(0, 8):
                    cv2.fillConvexPoly(masks[8 * k + i], points, 0)
                    cv2.fillConvexPoly(masks[8 * k + i], points2, 0)

            else:
                points = np.array(
                    [[cX, cY], [cX + (h / 2) / math.tan(alpha), 0], [0, 0],
                     [0, h], [w, h], [w, 0],
                     [cX + (h / 2) / math.tan(beta), 0], [cX, cY]], np.int32)
                points = points.reshape((-1, 1, 2))
                points2 = np.array(
                    [[cX, cY], [cX + (h / 2) / math.tan(beta), 0], [w, 0],
                     [w, cY], [cX, cY]], np.int32)
                points2 = points2.reshape((-1, 1, 2))
                for i in range(0, 8):
                    cv2.fillConvexPoly(masks[8 * k + i], points, 0)
                    cv2.fillConvexPoly(masks[8 * k + i], points2, 0)

    M90 = cv2.getRotationMatrix2D((cX, cY), 90, 1.0)
    M180 = cv2.getRotationMatrix2D((cX, cY), 180, 1.0)
    M270 = cv2.getRotationMatrix2D((cX, cY), 180, 1.0)

    for i in range(0, 8 * (2**(p_segments - 2))):
        masks[8 * (2**(p_segments - 2)) + i] = cv2.warpAffine(
            masks[i], M90, (w, h))
        masks[2 * 8 * (2**(p_segments - 2)) + i] = cv2.warpAffine(
            masks[i], M180, (w, h))
        masks[3 * 8 * (2**(p_segments - 2)) + i] = cv2.warpAffine(
            masks[i], M270, (w, h))

    rows = segments
    cols = 8
    figure = np.zeros((rows * hC, cols * wC))
    for i in range(rows):
        for j in range(cols):

            figure[i * hC:(i + 1) * hC,
                   j * wC:(j + 1) * wC] = masks[cols * i + j]

    cv2.imwrite("test.jpg", figure)
def detectarPlaca(img):
    I = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    u, _ = cv2.threshold(I, 0, 255, cv2.THRESH_OTSU)
    mascara = np.uint8(255 * (I > u))
    output = cv2.connectedComponentsWithStats(mascara, 4, cv2.CV_32S)
    cantObj = output[0]
    labels = output[1]
    stats = output[2]
    maskObj = []
    maskConv = []
    diferenciaArea = []
    for i in range(1, cantObj):
        if stats[i, 4] > stats[:, 4].mean():
            mascara = ndimage.binary_fill_holes(labels == i)
            mascara = np.uint8(255 * mascara)
            maskObj.append(mascara)
            #calculo convexhull
            _, contours, _ = cv2.findContours(mascara, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
            cnt = contours[0]
            hull = cv2.convexHull(cnt)
            puntosConvex = hull[:, 0, :]
            m, n = mascara.shape
            ar = np.zeros((m, n))
            mascaraCovex = np.uint8(255 *
                                    cv2.fillConvexPoly(ar, puntosConvex, 1))
            maskConv.append(mascaraCovex)
            #comparacion area CH con objeto
            areaObj = np.sum(mascara) / 255
            areaConv = np.sum(mascaraCovex) / 255
            diferenciaArea.append(np.abs(areaObj - areaConv))

    maskPlaca = maskConv[np.argmin(diferenciaArea)]
    # correccion perspectiva

    vertices = cv2.goodFeaturesToTrack(maskPlaca, 4, 0.01, 10)
    x = vertices[:, 0, 0]
    y = vertices[:, 0, 1]
    vertices = vertices[:, 0, :]
    xo = np.sort(x)
    yo = np.sort(y)

    xn = np.zeros((1, 4))
    yn = np.zeros((1, 4))
    n = (np.max(xo) - np.min(xo))
    m = (np.max(yo) - np.min(yo))

    xn = (x == xo[2]) * n + (x == xo[3]) * n

    yn = (y == yo[2]) * m + (y == yo[3]) * m
    verticesN = np.zeros((4, 2))
    verticesN[:, 0] = xn
    verticesN[:, 1] = yn

    vertices = np.int64(vertices)
    verticesN = np.int64(verticesN)

    h, _ = cv2.findHomography(vertices, verticesN)

    placa = cv2.warpPerspective(img, h, (np.max(verticesN[:, 0]),
                                         (np.max(verticesN[:, 1]))))
    return placa
コード例 #53
0
    def callback(self, rgb_img, depth_img, points):

        global count, point_pub, c, grasp, old_x, old_y, im, count_3
        try:
            rgb_image = self.bridge.imgmsg_to_cv2(rgb_img, "bgr8")
            #cv2.imwrite('/home/chandan_main/img/original.png', rgb_image)
            depth_image = self.bridge.imgmsg_to_cv2(depth_img, "passthrough")
            #depth_image.show()
            #print depth_img
            p = np.asarray(points.data)
            #print p
        except CvBridgeError as e:
            print(e)
        rgb_pil = PIL.Image.fromarray(rgb_image[:, :, ::-1].copy())
        im_depth = PIL.Image.fromarray(depth_image)
        #depth_pil.show()
        depth_array = np.array(
            depth_image, dtype=np.float32)  #convert depth img to numpy array
        #pointcloud_Image=array_to_pointcloud2(depth_array,stamp=None, frame_id=None)
        #depth_copy = np.asarray(depth_image)
        frame = cv2.normalize(
            depth_array, depth_array, 0, 255,
            cv2.NORM_MINMAX)  #normalize the depth to range of 0-255
        rgb_image[:, :, 0] = frame  #replace blue by depth
        #cv2.imwrite('/home/tncong/img/bluetodepth.png', rgb_image)
        mask = np.zeros(rgb_image.shape, dtype=np.uint8)
        #roi_corners = np.array([(p[8],p[9]), (p[10],p[11]), (p[14],p[15]), (p[12],p[13]) ], dtype=np.int32) #bounding box vertices, last 4 points, upper of the bounding cube
        #points in bounding cube
        roi_corners = np.array(
            [[(p[0], p[1]), (p[2], p[3]), (p[6], p[7]), (p[4], p[5]),
              (p[8], p[9]), (p[10], p[11]), (p[14], p[15]), (p[12], p[13])]],
            dtype=np.int32)  #bounding box vertices, first 4 points
        hull = cv2.convexHull(
            roi_corners)  #automatically get the convex hull from the points
        channel_count = rgb_image.shape[
            2]  #Shape contains [row,column,numberOfChannel] => shape[2] = 3 (i.e. RGB)
        ignore_mask_color = (255, ) * channel_count

        cv2.fillConvexPoly(mask, hull, ignore_mask_color)
        masked_image1 = cv2.bitwise_and(rgb_image, mask)

        mask2 = cv2.bitwise_not(mask)
        masked_image2 = cv2.bitwise_and(bg, mask2)

        image_final = masked_image1 + masked_image2  #object + background

        #image_merge = image_final+ im_depth

        # origin of the bounding box
        origin_x = (p[0] + p[2] + p[4] + p[6]) / 4
        origin_y = (p[1] + p[3] + p[5] + p[7]) / 4
        #print(origin_x,origin_y)
        #print(img_crop.size)
        offset = 17
        #update every 17 pixel = 5mm
        #print(count)
        #poses=[]
        #predict all object
        if (count < 4):
            #print(count)

            img_pil = PIL.Image.fromarray(image_final)
            #img_pil.show()
            img_crop = img_pil.crop(
                (origin_x - 113, origin_y - 113, origin_x + 114,
                 origin_y + 114))  #crop img to 227x227 using PIL
            size = 32, 32
            img_crop_dexnet = img_crop.resize(size, PIL.Image.ANTIALIAS)
            #img_crop_dexnet = img_pil.crop((origin_x-15, origin_y-15, origin_x+16, origin_y+16)) #crop img to 32x32 using PIL
            #img_crop_dexnet.show()
            #print img_crop
            #img_crop.show()
            img_crop_opencv = np.array(img_crop)  # convert back to opencv type
            #print img_crop_opencv
            #point_cloud=array_to_pointcloud2(img_crop_opencv)
            #point_cloud.show()
            img_crop_opencv = img_crop_opencv[:, :, ::-1].copy(
            )  # Convert RGB to BGR
            img_crop_opencv = PIL.Image.fromarray(img_crop_opencv)

            #img_crop_opencv.show()
            #cv2.imwrite('/home/chandan_main/img/test'+str(c)+'.png', img_crop_opencv)
            #c = c+1
            grasp_rect, grasp_angle, rect_center = predict_grasp(
                model, img_crop_opencv)

            #draw rectangle
            #translate back to original image
            grasp_rect[:, 0] = grasp_rect[:, 0] + origin_x - 113
            grasp_rect[:, 1] = grasp_rect[:, 1] + origin_y - 113
            grasp_angle_degree = (grasp_angle / np.pi) * 180
            #find point with minimum depth inside the rectangle
            poly = Polygon(grasp_rect)
            min_dis = 10000000.
            min_depth = 1000000.
            grasp_rect_points[count].layout.dim.append(MultiArrayDimension())
            grasp_rect_points[count].layout.dim.append(MultiArrayDimension())
            grasp_rect_points[count].layout.dim[0].label = "height"
            grasp_rect_points[count].layout.dim[1].label = "width"
            grasp_rect_points[count].layout.dim[0].size = 4
            grasp_rect_points[count].layout.dim[1].size = 2
            grasp_rect_points[count].layout.dim[0].stride = 4 * 2
            grasp_rect_points[count].layout.dim[1].stride = 2
            grasp_rect_points[count].layout.data_offset = 0
            grasp_rect_points[count].data = [0] * 8
            dstride0 = grasp_rect_points[count].layout.dim[0].stride
            dstride1 = grasp_rect_points[count].layout.dim[1].stride
            offset = grasp_rect_points[count].layout.data_offset
            grasp_rect_points[count].data[0] = grasp_rect[0][0]
            grasp_rect_points[count].data[1] = grasp_rect[0][1]
            grasp_rect_points[count].data[2] = grasp_rect[1][0]
            grasp_rect_points[count].data[3] = grasp_rect[1][1]
            grasp_rect_points[count].data[4] = grasp_rect[2][0]
            grasp_rect_points[count].data[5] = grasp_rect[2][1]
            grasp_rect_points[count].data[6] = grasp_rect[3][0]
            grasp_rect_points[count].data[7] = grasp_rect[3][1]
            #grasp_rect_points[count].data[6]=grasp_rect[i][j]
            #for i in range(4):
            #for j in range(2):
            #grasp_rect_points[count].data[0 + i +dstride1*j]=grasp_rect[i][j]
            #print grasp_rect_points[count].data
            grasp_angle_points[count] = grasp_angle
            #self.min_depth = 100000.
            #self.min_dis   = 1000000.
            for pt in get_points_inside_polygon(poly):
                #print(depth_image[pt[0],pt[1]])
                pt_shapely = Point(pt[0], pt[1])
                #print poly.centroid
                #print pt
                #cross=np.cross(pt,np.array(poly.centroid))
                dis = np.linalg.norm(pt - np.array(poly.centroid))
                #self.min_depth =depth_image[pt[0],pt[1]]
                #print(depth_image[pt[0],pt[1]])
                #print (depth_image[pt[0],pt[1]].size())
                if ((depth_image[pt[0], pt[1]] <= min_depth) &
                    (dis < min_dis)) & (min_depth > 0):
                    self.min_depth = depth_image[pt[0], pt[1]]
                    #print(self.min_depth)
                    point_pub[count].x = pt[0]
                    point_pub[count].y = pt[1]
                    point_pub[count].z = self.min_depth
                    min_dis = dis
                #print self.min_depth

                #zcross=np.cross(point_pub.x,point_pub.y)

#zvec= np.linalg.norm(zcross)
#znorm=zcross/zvec
#znorm1=(grasp_rect[1]-grasp_rect[0])
#znorm2=(grasp_rect[2]-grasp_rect[0])
#print znorm1
#print znorm2
#zcross=np.cross(znorm1,znorm2)
#print zcross

#zvec=cross/znorm        frame = cv2.normalize(depth_array, depth_array, 0, 255, cv2.NORM_MINMAX) #normalize the depth to range of 0-255
#print zvec
#print count
            print point_pub
            #grasp_rect_points[count]=grasp_rect
            #grasp_rect_pub.publish(grasp_rect_points[count])
            #pos.append(point_pub)

            if (count == 0):
                im = rgb_pil

            count = count + 1

            #print grasp_rect_points
            draw = PIL.ImageDraw.Draw(im)
            #draw2 = PIL.ImageDraw.Draw(im_depth)
            draw_box(grasp_rect, draw, blue, red)
            #draw_box(grasp_rect, draw2, blue, red)
            r = 1
            draw.ellipse(
                (point_pub[count - 1].x - r, point_pub[count - 1].y - r,
                 point_pub[count - 1].x + r, point_pub[count - 1].y + r),
                fill=red)
            #draw2.ellipse((point_pub[count-1].x-r, point_pub[count-1].y-r, point_pub[count-1].x+r, point_pub[count-1].y+r), fill=red)
            #pub.publish(point_pub)
            print('postion of', self.model_names[count - 1])
            print self.model_names[count - 1]
            print self.min_depth
            print point_pub[count - 1]
            #poses.append(pos[count-1])
            # print poses

            #print pos

            #pub[self.model_names[count-1]].publish(point_pub[count-1])
            print rect_center
            print grasp_angle
            print grasp_rect
            #point_pub=[geometry_msgs.msg.Point(0,0,0),geometry_msgs.msg.Point(0,0,0),geometry_msgs.msg.Point(0,0,0)]
            #rospy.loginfo(point_pub)
            #pub.publish(point_pub)
            # print zcross
            # print zvec
            #print znorm
            #print point_pub
            if (count == 4):

                #count = 0
                #point_pub=[geometry_msgs.msg.Point(0,0,0),geometry_msgs.msg.Point(0,0,0),geometry_msgs.msg.Point(0,0,0)]
                #print point_pub
                #count_3=count_3+1
                #if(count_3==1):
                #print im
                #print im_depth
                im.show()
                #im = rgb_pil
                #count_3=0
                #cv2.imwrite('/home/chandan_main/img/final.png', image_final)
                #im_depth.show()
                #cv2.imwrite('/home/chandan_main/img/final_Depth.png', depth_image)
                #print poses
                #return poses
                #print point_pub
        '''
コード例 #54
0
def neuron_to_img(neuron,
                  absolute_size=None,
                  min_width=None,
                  min_height=None,
                  pixel_per_segment=None,
                  draw_segment=True,
                  draw_node=True,
                  node_rad=None,
                  seg_rad=None,
                  pad_percent=0,
                  node_color=255,
                  seg_color=127):
    segments = get_edges(neuron)
    abs_coords = False
    if not absolute_size is None:
        img_dim = np.uint16((absolute_size[1], absolute_size[0]))
        img = np.zeros((img_dim[1], img_dim[0], 1), np.uint16)
        abs_coords = True
    else:
        origin, bbox_size = bbox_dimensions(neuron)
        if not min_width is None:
            img_dim = np.uint16((min_width / bbox_size[0]) * bbox_size)
        elif not min_height is None:
            img_dim = np.uint16((min_height / bbox_size[0]) * bbox_size)
        elif not pixel_per_segment is None:
            img_dim = get_image_dimension(neuron, pixel_per_segment)
        else:
            img_dim = np.uint16(bbox_size)
        img = np.zeros((img_dim[1], img_dim[0], 1), np.uint16)
        if not pad_percent == 0:
            y_pad = np.int16(img.shape[0] * (pad_percent / 100.0))
            x_pad = np.int16(img.shape[1] * (pad_percent / 100.0))
            img = np.squeeze(img)
            img = np.pad(img, ((x_pad, x_pad), (y_pad, y_pad)), 'constant')
            img = img[:, :, np.newaxis]
        else:
            y_pad = 0
            x_pad = 0
        if not seg_rad is None:
            seg_rad = int(seg_rad * (img_dim[0] / bbox_size[0]))
            if seg_rad == 0:
                seg_rad = 1
            scaled_seg_rad = int(seg_rad * (img_dim[0] / bbox_size[0]))
    for segment in segments:
        if draw_segment:
            if seg_rad is None:
                polygon_points = segment_to_polygon(segment)
                if not abs_coords:
                    points = []
                    for point in polygon_points:
                        scaled_point = scale_to_image(point,
                                                      (origin, bbox_size),
                                                      img_dim)
                        scaled_point += np.uint16(np.array([y_pad, x_pad]))
                        points.append(scaled_points)
                else:
                    points = polygon_points
                points = np.int32(np.array(points))
                points = points.reshape(-1, 1, 2)
                img = cv2.fillConvexPoly(img, points, seg_color)
            else:
                points = []
                for node in segment:
                    point = (np.uint16(node['x']), np.uint16(node['y']))
                    if not abs_coords:
                        scaled_point = scale_to_image(point,
                                                      (origin, bbox_size),
                                                      img_dim)
                        scaled_point += np.uint16(np.array([y_pad, x_pad]))
                        point = (scaled_point[0], scaled_point[1])
                    points.append(point)
                img = cv2.line(img, points[0], points[1], seg_color, seg_rad)
    if draw_node:
        for index, node in neuron.nodes.iterrows():
            point = (np.uint16(node['x']), np.uint16(node['y']))
            if not abs_coords:
                scaled_node_point = scale_to_image(point, (origin, bbox_size),
                                                   img_dim)
                scaled_node_point += np.uint16(np.array([y_pad, x_pad]))
                point = tuple(scaled_node_point)
            if abs_coords:
                radius = np.int32(node['radius'])
            else:
                if not node_rad is None:
                    radius = np.int32(node_rad * img_dim[0] / bbox_size[0])
                else:
                    radius = np.int32(node['radius'] * img_dim[0] /
                                      bbox_size[0])
            img = cv2.circle(img, point, radius, node_color, -1)
    return img
コード例 #55
0
ファイル: fn.py プロジェクト: tiahflorens/ict2019
def vis_frame(frame, im_res, format='coco'):
    '''
    frame: frame image
    im_res: im_res of predictions
    format: coco or mpii

    return rendered image
    '''
    if format == 'coco':
        l_pair = [
            (0, 1),
            (0, 2),
            (1, 3),
            (2, 4),  # Head
            (5, 6),
            (5, 7),
            (7, 9),
            (6, 8),
            (8, 10),
            (17, 11),
            (17, 12),  # Body
            (11, 13),
            (12, 14),
            (13, 15),
            (14, 16)
        ]

        p_color = [
            (0, 255, 255),
            (0, 191, 255),
            (0, 255, 102),
            (0, 77, 255),
            (0, 255, 0),
            # Nose, LEye, REye, LEar, REar
            (77, 255, 255),
            (77, 255, 204),
            (77, 204, 255),
            (191, 255, 77),
            (77, 191, 255),
            (191, 255, 77),
            # LShoulder, RShoulder, LElbow, RElbow, LWrist, RWrist
            (204, 77, 255),
            (77, 255, 204),
            (191, 77, 255),
            (77, 255, 191),
            (127, 77, 255),
            (77, 255, 127),
            (0, 255, 255)
        ]  # LHip, RHip, LKnee, Rknee, LAnkle, RAnkle, Neck
        line_color = [(0, 215, 255), (0, 255, 204), (0, 134, 255),
                      (0, 255, 50), (77, 255, 222), (77, 196, 255),
                      (77, 135, 255), (191, 255, 77), (77, 255, 77),
                      (77, 222, 255), (255, 156, 127), (0, 127, 255),
                      (255, 127, 77), (0, 77, 255), (255, 77, 36)]
    elif format == 'mpii':
        l_pair = [(8, 9), (11, 12), (11, 10), (2, 1), (1, 0), (13, 14),
                  (14, 15), (3, 4), (4, 5), (8, 7), (7, 6), (6, 2), (6, 3),
                  (8, 12), (8, 13)]
        p_color = [
            PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE,
            PURPLE, RED, RED, BLUE, BLUE
        ]
        line_color = [
            PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED, RED, PURPLE, PURPLE,
            RED, RED, BLUE, BLUE
        ]
    else:
        raise NotImplementedError

    # im_name = im_res['imgname'].split('/')[-1]
    img = frame
    height, width = img.shape[:2]
    img = cv2.resize(img, (int(width / 2), int(height / 2)))
    for human in im_res['result']:
        part_line = {}
        kp_preds = human['keypoints']
        kp_scores = human['kp_score']
        kp_preds = torch.cat(
            (kp_preds, torch.unsqueeze((kp_preds[5, :] + kp_preds[6, :]) / 2,
                                       0)))
        kp_scores = torch.cat(
            (kp_scores,
             torch.unsqueeze((kp_scores[5, :] + kp_scores[6, :]) / 2, 0)))
        # Draw keypoints
        for n in range(kp_scores.shape[0]):
            if kp_scores[n] <= 0.05:
                continue
            cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
            part_line[n] = (int(cor_x / 2), int(cor_y / 2))
            bg = img.copy()
            cv2.circle(bg, (int(cor_x / 2), int(cor_y / 2)), 2, p_color[n], -1)
            # Now create a mask of logo and create its inverse mask also
            transparency = max(0, min(1, kp_scores[n]))
            img = cv2.addWeighted(bg, transparency, img, 1 - transparency, 0)
        # Draw limbs
        for i, (start_p, end_p) in enumerate(l_pair):
            if start_p in part_line and end_p in part_line:
                start_xy = part_line[start_p]
                end_xy = part_line[end_p]
                bg = img.copy()

                X = (start_xy[0], end_xy[0])
                Y = (start_xy[1], end_xy[1])
                mX = np.mean(X)
                mY = np.mean(Y)
                length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5
                angle = math.degrees(math.atan2(Y[0] - Y[1], X[0] - X[1]))
                stickwidth = (kp_scores[start_p] + kp_scores[end_p]) + 1
                polygon = cv2.ellipse2Poly((int(mX), int(mY)),
                                           (int(length / 2), stickwidth),
                                           int(angle), 0, 360, 1)
                cv2.fillConvexPoly(bg, polygon, line_color[i])
                # cv2.line(bg, start_xy, end_xy, line_color[i], (2 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
                transparency = max(
                    0, min(1, 0.5 * (kp_scores[start_p] + kp_scores[end_p])))
                img = cv2.addWeighted(bg, transparency, img, 1 - transparency,
                                      0)
    img = cv2.resize(img, (width, height), interpolation=cv2.INTER_CUBIC)
    return img
コード例 #56
0
    width, heigth, channels = frame.shape
    
    if frame is None:
        print("none")
        break
    
    framecount+=1
    framecopy=frame.copy()
    
    #getting ROI ==============================================================
    # mask defaulting to black for 3-channel and transparent for 4-channel
    mask = np.zeros(frame.shape, dtype=np.uint8)
    # fill the ROI so it doesn't get wiped out when the mask is applied
    channel_count = frame.shape[2]  # i.e. 3 or 4 depending on your image
    ignore_mask_color = (255,)*channel_count
    cv2.fillConvexPoly(mask, ROI_CORNERS, ignore_mask_color)
    # apply the mask
    roi = cv2.bitwise_and(frame, mask)
    rectangle=cv2.boundingRect(ROI_CORNERS)
    #cv2.rectangle(roi,(rectangle[0],rectangle[1]),(rectangle[0]+rectangle[2],rectangle[1]+rectangle[3]),(0,0,255),3)
    x1=rectangle[0]
    x2=rectangle[0]+rectangle[2]
    y1=rectangle[1]
    y2=rectangle[1]+rectangle[3]
    roi=roi[y1:y2,x1:x2]
    roi=cv2.resize(roi, (heigth//2,width//2))

    #==========================================================================
    
    #img = mpimg.imread(path + image_names[-4])
    #img = mpimg.imread(path + 'orig.jpg')
    rectangle = (corn1, corn2, corn3, corn4)

    return rectangle


masks = []
isRefreshable = []
percentages = int((1000 - 250) / 10) * [0]
drawablePercentages = int((1000 - 250) / 10) * [10]

currentX = 1000
while currentX > 250:
    rectangle = getRectangle(currentX, 70)
    mask = np.zeros((Height, Width), np.uint8)
    rect = np.array(rectangle, np.int32)
    cv2.fillConvexPoly(mask, rect, 255)
    masks.append(mask)
    isRefreshable.append(False)
    currentX -= 10


def calculatePercentagesBack(processedImage, diffImage):
    newPercentage = int((1000 - 250) * 10) * [0]
    currentX = 1000
    i = 0

    while currentX > 250:
        perc = getNotzeroPixels(processedImage,
                                groundMask=masks[i],
                                percentage=True)
        percMoving = getNotzeroPixels(diffImage,
コード例 #58
0
    def _draw_frame(self,
                    frame: ma.MaskedArray,
                    frame_confidence: np.ndarray,
                    img=np.ndarray) -> np.ndarray:
        avg_color = np.mean(img, axis=(0, 1))
        print("avg_color", avg_color)

        for person, person_confidence in zip(frame, frame_confidence):
            c = person_confidence.tolist()
            idx = 0
            for component in self.pose.header.components:
                colors = [c[::-1] for c in component.colors]

                def _point_color(p_i: int):
                    opacity = c[p_i + idx]
                    np_color = colors[p_i %
                                      len(component.colors)] * opacity + (
                                          1 - opacity) * avg_color
                    return tuple([int(c) for c in np_color])

                    # Draw Points

                for i in range(len(component.points)):
                    if c[i + idx] > 0:
                        cv2.circle(img=img,
                                   center=tuple(person[i + idx]),
                                   radius=3,
                                   color=_point_color(i),
                                   thickness=-1)

                if self.pose.header.is_bbox:
                    point1 = tuple(person[0 + idx].tolist())
                    point2 = tuple(person[1 + idx].tolist())
                    color = tuple(
                        np.mean(
                            [_point_color(0), _point_color(1)], axis=0))

                    cv2.rectangle(img=img,
                                  pt1=point1,
                                  pt2=point2,
                                  color=color,
                                  thickness=2)
                else:
                    int_person = person.astype(np.int32)
                    # Draw Limbs
                    for (p1, p2) in component.limbs:
                        if c[p1 + idx] > 0 and c[p2 + idx] > 0:
                            point1 = tuple(int_person[p1 + idx].tolist())
                            point2 = tuple(int_person[p2 + idx].tolist())

                            length = ((point1[0] - point2[0])**2 +
                                      (point1[1] - point2[1])**2)**0.5

                            color = tuple(
                                np.mean([_point_color(p1),
                                         _point_color(p2)],
                                        axis=0))

                            deg = math.degrees(
                                math.atan2(point1[1] - point2[1],
                                           point1[0] - point2[0]))
                            polygon = cv2.ellipse2Poly(
                                (int((point1[0] + point2[0]) / 2),
                                 int((point1[1] + point2[1]) / 2)),
                                (int(length / 2), 3), int(deg), 0, 360, 1)
                            cv2.fillConvexPoly(img=img,
                                               points=polygon,
                                               color=color)

                idx += len(component.points)

        return img
コード例 #59
0
]

balls = np.dot(balls, cell_2_meters)  # Convert cells into meters
print "balls=", balls

for i in np.arange(0, w_cells):
    for j in np.arange(0, h_cells):
        if ((i + j) % 2 == 0):
            pose = np.dot(np.array([(0.5 + i), -(0.5 + j), 0.0]),
                          cell_2_meters)
            poly = np.int32(
                rotate(pose, np.array([0, 0]), cell_poly,
                       pixel_size)).reshape(-1, 1, 2)
            #print "-----",i,", ",j
            #print poly
            cv2.fillConvexPoly(grid_map, poly,
                               (208, 208, 208))  #(248,248,248))

for box in boxes:
    poly = np.int32(rotate(box, origin, box_poly,
                           pixel_size)).reshape(-1, 1, 2)
    cv2.fillConvexPoly(grid_map, poly, (0, 0, 0))

for box in walls:
    print box
    poly = np.int32(rotate(box, origin, wall_poly,
                           pixel_size)).reshape(-1, 1, 2)
    cv2.fillConvexPoly(grid_map, poly, (0, 0, 0))

# Draw the grayscale map without axes for use with AMCL
cv2.imwrite("game_map.pgm", grid_map)
コード例 #60
0
        for i in range(17):
            for n in range(len(subset)):
                index = subset[n][np.array(limbSeq[i]) - 1]
                if -1 in index:
                    continue
                cur_canvas = canvas.copy()
                Y = candidate[index.astype(int), 0]
                X = candidate[index.astype(int), 1]
                mX = np.mean(X)
                mY = np.mean(Y)
                length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
                angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
                polygon = cv.ellipse2Poly((int(mY), int(mX)),
                                          (int(length / 2), stickwidth),
                                          int(angle), 0, 360, 1)
                cv.fillConvexPoly(cur_canvas, polygon, colors[i])
                canvas = cv.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)

        plt.imshow(canvas[:, :, [2, 1, 0]])
        fig = matplotlib.pyplot.gcf()
        fig.set_size_inches(12, 12)
    # plt.show()
    except:
        traceback.print_exc()
    else:
        number += 1
        cv.imwrite('results/' + "skeleton" + str(number) + ".jpg",
                   canvas[:, :, [0, 1, 2]])
        with open('results/' + "skeleton" + str(number) + '.txt', 'w') as f:
            for z in range(len(subset)):
                for q in range(18):