def LumConDrift(bgImg,fundusMask): 
    m,n = bgImg.shape
    tsize = 50
    indx=0
    indy=0
    i = tsize
   
    ldrift = np.zeros((int(m/tsize),int(n/tsize)),np.float)
    cdrift = np.zeros((int(m/tsize),int(n/tsize)),np.float)
    while(i<m):
        j = tsize
        while(j<n):           
            if (i+tsize>=m and j+tsize<n):
                block = bgImg[i-tsize:m, j-tsize:j+tsize]
            elif (i+tsize<m and j+tsize>=n):
                block = bgImg[i-tsize:i+tsize, j-tsize:n]
            elif (i+tsize>=m and j+tsize>=n):
                block = bgImg[i-tsize:m, j-tsize:n]
            else :
                block = bgImg[i-tsize:i+tsize, j-tsize:j+tsize]
            mean,std = cv2.meanStdDev(block)
            ldrift[indx,indy] = mean
            cdrift[indx,indy] = std
            indy = indy+1
            j = j+tsize
        indy = 0
        indx = indx+1
        i = i+tsize
    ldrift = cv2.resize(ldrift,(n,m),interpolation = cv2.INTER_CUBIC)
    cdrift = cv2.resize(cdrift,(n,m),interpolation = cv2.INTER_CUBIC)
    ldrift = cv2.multiply(ldrift,fundusMask.astype(float))
    cdrift = cv2.multiply(cdrift,fundusMask.astype(float))
    return ldrift,cdrift
コード例 #2
0
ファイル: evocv.py プロジェクト: eatoin5hrdlu/PACE
 def emphasis(self, img, scale=2, fraction=0.5) :
     """Return monochrome image with doubled(scale) selected color
        minus half of the other two colors(/fraction) added together.
        Where color is Blue (0), Green (1-default), or Red (2)"""
     return cv2.subtract(cv2.multiply(img[:,:,self.color],scale),
                         cv2.multiply(cv2.add( img[:,:,(self.color+1)%3],
                                               img[:,:,(self.color+2)%3]),fraction))
コード例 #3
0
def online_variance(new_data,curr_var,curr_iter,curr_mean):
	if curr_iter==1:
		new_mean = new_data;
		new_var = cv2.multiply(new_data,0);
		return new_mean,new_var;
	else:

		pa=cv2.subtract(new_data,curr_mean);
		pa=cv2.divide(pa,curr_iter,1);
		new_mean=cv2.add(pa,curr_mean);
		#new_mean = curr_mean + (new_data - curr_mean)/curr_iter;
		prev_S = cv2.multiply(curr_var,(curr_iter - 2));
	
		#
		pd1=cv2.subtract(new_data,curr_mean);
		pd2=cv2.subtract(new_data,new_mean);
		pd=cv2.multiply(pd1,pd2);
		new_S=cv2.add(pd,prev_S);
		#new_S = prev_S  + (new_data  - curr_mean) .* (new_data - new_mean);
		
		new_var=cv2.divide(new_S,curr_iter-1);
		
		#new_var = new_S/(curr_iter - 1);
		
		return (new_mean),(new_var);
コード例 #4
0
    def do(self, img):
        dstImage = img
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
        cv2.multiply(cv2.add(img, -minVal), 255 / (maxVal - minVal), dstImage)

        return dstImage
コード例 #5
0
ファイル: blob.py プロジェクト: eatoin5hrdlu/PACE
 def emphasis(self, img, color) :
     """Create a monochrome image by doubling the selected color
        and subtracting half of the other two colors added together.
        Where color is Blue (0), Green (1), or Red (2)"""
     return cv2.subtract(cv2.multiply(img[:,:,color],2),
                         cv2.multiply(cv2.add( img[:,:,(color+1)%3]/3,
                                               img[:,:,(color+2)%3]/3),0.5))
コード例 #6
0
ファイル: Convert_Masked.py プロジェクト: nhu2000/faceswap
    def apply_new_face(self, image, new_face, image_mask, mat, image_size, size):
        base_image = numpy.copy( image )
        new_image = numpy.copy( image )

        cv2.warpAffine( new_face, mat, image_size, new_image, cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )

        outImage = None
        if self.seamless_clone:
            unitMask = numpy.clip( image_mask * 365, 0, 255 ).astype(numpy.uint8)
      
            maxregion = numpy.argwhere(unitMask==255)
      
            if maxregion.size > 0:
              miny,minx = maxregion.min(axis=0)[:2]
              maxy,maxx = maxregion.max(axis=0)[:2]
              lenx = maxx - minx;
              leny = maxy - miny;
              masky = int(minx+(lenx//2))
              maskx = int(miny+(leny//2))
              outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),unitMask,(masky,maskx) , cv2.NORMAL_CLONE )
              
              return outimage
              
        foreground = cv2.multiply(image_mask, new_image.astype(float))
        background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
        outimage = cv2.add(foreground, background)

        return outimage
コード例 #7
0
def find_marker(image, red_thres, green_thres, sat_thres):

    # h,w, channels = img.shape

    # get red and sat
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    blue, green, red = cv2.split(image)
    hue, sat, val = cv2.split(hsv)

    # find the marker by looking for red, with high saturation
    sat = cv2.inRange(sat, np.array((sat_thres[0])), np.array((sat_thres[1])))
    red = cv2.inRange(red, np.array((red_thres[0])), np.array((red_thres[1])))
    green = cv2.inRange(green, np.array((green_thres[0])), np.array((green_thres[1])))
    # AND the two thresholds, finding the car
    car = cv2.multiply(red, sat)
    car = cv2.multiply(car, green)

    # remove noise (not doing it now because the POIs are very small)
    # elem = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
    # car = cv2.erode(car,elem, iterations=1)
    # car = cv2.dilate(car,elem, iterations=3)
    # return cv2.boundingRect(car)

    img, contours, hierarchy = cv2.findContours(car.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # import ipdb; ipdb.set_trace()

    # return cv2.boundingRect(contours[1])
    return map(lambda x: cv2.boundingRect(x), contours)
コード例 #8
0
def ColormapBoundry(image, mean, std, low, high):
    newMin = mean - 0.5*(8-low)*std
    newMax = mean + 0.5*high*std
    newSlope = 255.0/(newMax-newMin)
    cv2.subtract(image, newMin, image)
    cv2.multiply(image, newSlope, image)
    return image.astype("uint8", copy=False)
コード例 #9
0
def draw_mouth(mouth, character, x, y, width, height):
    fit_image = fit_character(mouth[0], width, height)
    fit_mask = fit_character(mouth[1], width, height)
    fit_mask2 = fit_character(mouth[2], width, height)
    fit_height, fit_width = fit_image.shape[0:2]
    y_offset = y + fit_height / 6
    y_offset = max(0, min(y_offset, character.shape[0] - fit_height))
    x_offset = x + (width - fit_width) / 2
    x_offset = max(0, min(x_offset, character.shape[1] - fit_width))
    y0, y1 = y_offset, (y_offset+fit_height)
    x0, x1 = x_offset, (x_offset+fit_width)
    fit_mask = numpy.float32(fit_mask) / 255.0
    fit_mask2 = numpy.float32(fit_mask2) / 255.0
    char_region = numpy.float32(character[y0:y1,x0:x1])
    inverse_fit_mask = fit_mask * -1 + 1.0
    mul = cv2.multiply(char_region, fit_mask)
    m1 = cv2.mean(mul)
    m2 = cv2.mean(fit_mask)
    avg = numpy.float32(map(lambda x, y: x/(y * 255.0) if y else 0.0, m1, m2))
    r = numpy.ones((fit_width,fit_height),numpy.float32)  * avg[0]
    g = numpy.ones((fit_width,fit_height),numpy.float32)  * avg[1]
    b = numpy.ones((fit_width,fit_height),numpy.float32)  * avg[2]
    rgb = cv2.merge((r,g,b))
    rgb += (rgb * -1.0 + 0.8) * fit_mask2
    fit_image = cv2.multiply(numpy.float32(fit_image), rgb)
    fit_image = cv2.multiply(fit_image, inverse_fit_mask)
    character[y0:y1,x0:x1] = numpy.uint8(mul + fit_image)
コード例 #10
0
ファイル: observe.py プロジェクト: eatoin5hrdlu/PACE
def contrast(image) :
	(ret,img) = cv2.threshold(
	    cv2.add(cv2.multiply(
		cv2.add(cv2.multiply(
			cv2.add(cv2.multiply(image,2.0),-60)
				,2.0),-60)
					,2.1),-100), 127,255,cv2.THRESH_BINARY)
	return img
コード例 #11
0
ファイル: sprite.py プロジェクト: joanaferreira0011/ml-in-cv
 def overlay_on(self, frame):
     float_frame = frame.astype(float)
     float_frame[self.get_valid_backdrop_slices_for_shape(frame.shape)] = \
         cv2.multiply(1.0 - self.alpha_channel[self.get_valid_sprite_slices_for_shape(frame.shape)],
                      float_frame[self.get_valid_backdrop_slices_for_shape(frame.shape)])
     float_frame[self.get_valid_backdrop_slices_for_shape(frame.shape)] += \
         cv2.multiply(self.alpha_channel, self.image.astype(float))[self.get_valid_sprite_slices_for_shape(frame.shape)]
     return float_frame.astype(np.uint8)
コード例 #12
0
ファイル: gabor.py プロジェクト: Cytryn31/PracaMagisterska
def build_filters(sigma, gamma, ksize, thetaRange, lmdaRange, thetaDivider, lmdaDivider):
    filters = []
    for theta in np.arange(0, thetaRange, thetaRange / thetaDivider):
        for lmda in np.arange(1, lmdaRange, lmdaDivider):
            kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, lmda, gamma, ktype=cv2.CV_64F)
            cv2.multiply(kern, 1 * kern, kern)
            filters.append(kern)
    return filters
コード例 #13
0
ファイル: Metrics.py プロジェクト: IamSVP/MPTC
def compute_DELTAE(imagename1,imagename2):
	img1 = cv2.imread(imagename1)
	img2 = cv2.imread(imagename2)

	
	

	img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2LAB)
	img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2LAB)
	#cv2.imwrite(imagename2+".png",img1)

	# s1 = cv2.absdiff(img1,img2)
	# s1 = np.float32(s1)
	# s1 = cv2.multiply(s1,s1)
	# s1 = cv2.sqrt(s1)

	L1,a1,b1 = cv2.split(img1)
	L2,a2,b2 = cv2.split(img2)
	
	dL = L1 - L2
	da = a1-a2
	db = b1-b2
	# cv2.imwrite(imagename2+".png",dL)
	
	# dL_2 = cv2.multiply(dL,dL)
	# da_2 = cv2.multiply(da,da)
	# db_2 = cv2.multiply(db,db)
	# dL_2 = np.float32(dL_2)
	# da_2 = np.float32(da_2)
	# db_2 = np.float32(db_2)
	# dE = cv2.sqrt( (dL_2) + (da_2) + (db_2))
	# mde = cv2.mean(dE)
	# print mde


	c1 = np.sqrt(cv2.multiply(a1,a1) + cv2.multiply(b1,b1))
	c2 = np.sqrt(cv2.multiply(a2,a2) + cv2.multiply(b2,b2))
	dCab = c1-c2
	dH = np.sqrt(cv2.multiply(da,da) + cv2.multiply(db,db)- cv2.multiply(db,db))
	sL = 1
	K1 = 0.045 #can be changed
	K2 = 0.015 #can be changed
	sC = 1+K1*c1
	sH = 1+K2 *c1
	kL = 1 #can be changed

	t1 = cv2.divide(dL,kL*sL)
	t2 = cv2.divide(dCab,sC)
	t3 = cv2.divide(dH,sH)
	t1 = cv2.multiply(t1,t1)
	t2 = cv2.multiply(t2,t2)
	t3 = cv2.multiply(t3,t3)
	t1 = np.float32(t1)
	t2 = np.float32(t2)
	t3 = np.float32(t3)
	dE = cv2.sqrt(t1+t2+t3)
	mde = cv2.mean(dE)
	return "{0:.4f}".format(mde[0])
def get_object_grass(image):

#     outputx = cv2.resize(output,(649,486))
#     cv2.imshow('image',outputx)
#     cv2.waitKey(0)
#     cv2.destroyAllWindows()
    
    B = image[:,:,0]
    G = image[:,:,1]
    R = image[:,:,2]
    R_ = cv2.multiply(R, 0.25)
    G = cv2.multiply(G, 0.25)
    B = cv2.multiply(B, 0.5)
    R = cv2.subtract(R, R_)
    R = cv2.subtract(R, G)
    new_image = cv2.subtract(R, B)
    
    
    #gray = cv2.add(cv2.add(image[:,:,2],-image[:,:,1]/2),-image[:,:,0]/2)
   
#     output = cv2.resize(new_image,(649,486))
#     cv2.imshow('image',output)
#     cv2.waitKey(0)
#     cv2.destroyAllWindows()
    
    gray = cv2.medianBlur(new_image,3)
    
    #gray = cv2.bilateralFilter(gray,5,75,75)

    # find the contours in the edged image and keep the largest one;
    _, cnts, _ = cv2.findContours(gray.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]

    for c in cnts:
	# approximate the contour
        
        
        marker = cv2.minAreaRect(c)
        x,y,w,h = cv2.boundingRect(c)
        coords = (x,y,w,h)
        M = cv2.moments(c)
        cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
        center = (cx,cy)
        colors = get_colors(image,coords)

        #print(marker)
        area = marker[1][0]*marker[1][1]
        if ((abs(marker[2]) > 60 or abs(marker[2]) < 40) and area > 7000 and area < 250000
            and marker[1][0]> 50 and marker[1][1] > 50 and colors['blue']<200
            ):

             return (marker,coords,center)
        else:
            continue
        # if our approximated contour has four points, then
        # we can assume that we have found our screen

    return (((0, 0), (0, 0), 0),(0,0,0,0),(0,0))
コード例 #15
0
ファイル: vector_final.py プロジェクト: austinglaser/tdfw
def doMDScalibration(debug):
	retVal = (0, 0)
	cap = cv2.VideoCapture(1) 
	cap.set(10,-0.5) #set brightness
	cap.set(12,0.8) #set brightness
	ret, frame = cap.read()

	#unsharp mask
 	unsharp_mask = cv2.blur(frame, (2, 2))
 	frame = cv2.addWeighted(frame, 1.5, unsharp_mask, -0.5, 0.0)

 	#contrast enchancement
 	array_alpha = np.array([1.2])
	array_beta = np.array([-30.0])
    # add a beta value to every pixel 
	cv2.add(frame, array_beta, frame)                    
    # multiply every pixel value by alpha
	cv2.multiply(frame, array_alpha, frame)  

	boundaries = [([0, 150, 180], [10, 205, 230])]	#very rough color estimation, no absolute color detection
	# loop over the boundaries which actually doesn't matter right now, it only runs once
	for (lower, upper) in boundaries:
		# create NumPy arrays from the boundaries
		lower = np.array(lower, dtype = "uint8")
		upper = np.array(upper, dtype = "uint8")
	 
		# find the colors within the specified boundaries and apply
		# the mask
		mask = cv2.inRange(frame, lower, upper)
		kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(6,8))
		mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
		kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
		mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
		output = cv2.bitwise_and(frame, frame, mask = mask)

		Omoments = cv2.moments(mask)
		dM01 = Omoments['m01']
		dM10 = Omoments['m10']
		dArea = Omoments['m00']

		if dArea > 8000: #the dot is on the screen
 				posX = int(dM10/dArea)
 				posY = int(dM01/dArea)
 				retVal = (posX, posY)
 				#print a circle if the indicator was detected
 				cv2.circle(frame, (posX, posY), 8, (0, 255, 255), -1)

 	if(debug):
 		#debug for showing the image to ensure the puck is detected
		cv2.imshow("images", np.hstack([frame, output]))
		cv2.waitKey(0)

	cap.release()
	cv2.destroyAllWindows()
	print retVal
	return retVal
コード例 #16
0
ファイル: test.py プロジェクト: HapeMask/opencv
    def test_umat_construct(self):
        data = np.random.random([512, 512])
        # UMat constructors
        data_um = cv2.UMat(data)  # from ndarray
        data_sub_um = cv2.UMat(data_um, [0, 256], [0, 256])  # from UMat
        data_dst_um = cv2.UMat(256, 256, cv2.CV_64F)  # from size/type

        # simple test
        cv2.multiply(data_sub_um, 2., dst=data_dst_um)
        assert np.allclose(2. * data[:256, :256], data_dst_um.get())
コード例 #17
0
ファイル: pongGame.py プロジェクト: ErikMayrhofer/Poce
    def get_blurred_field(video, upper_left, lower_right, blur, brightness):
        middle_mat = np.zeros(video.shape, dtype=np.float32)
        cv2.rectangle(middle_mat, upper_left, lower_right, (1.0, 1, 1), thickness=-1)
        middle_mat = cv2.blur(middle_mat, (20, 20))

        middle_field_vid = cv2.blur(video, (int(blur), int(blur)))
        middle_field_vid = cv2.multiply(middle_field_vid, brightness * middle_mat, dtype=3)
        rest_vid = cv2.multiply(video, 1 - middle_mat, dtype=3)

        return cv2.add(middle_field_vid, rest_vid)
def scaleMul(img, image,wavelet):
    print '1'
    cA1,cH1,cV1,cD1= WaveletTransform(image,wavelet)
    print '2'
    cA2,cH2,cV2,cD2= WaveletTransform(cA1,wavelet)
    print '3'
    cA3,cH3,cV3,cD3= WaveletTransform(cA2,wavelet)
    print '4'
    cA4,cH4,cV4,cD4= WaveletTransform(cA3,wavelet)
    
    cHH1=cv2.multiply(cH1,cH2)
    cVV1=cv2.multiply(cV1,cV2)
    cDD1=cv2.multiply(cD1,cD2)
    
    cHH2=cv2.multiply(cH2,cH3)
    cVV2=cv2.multiply(cV2,cV3)
    cDD2=cv2.multiply(cD2,cD3)
    
    cHH3=cv2.multiply(cH3,cH4)
    cVV3=cv2.multiply(cV3,cV4)
    cDD3=cv2.multiply(cD3,cD4)
    
    #####------Adding the horizontal, vertical and diagonal details to form a combined edge map------#####
    final1=cHH1+cVV1+cDD1
    final2=cHH2+cVV2+cDD2
    final3=cHH3+cVV3+cDD3
    
    imsave('results/'+img+'level_1and2.png',final1)
    imsave('results/'+img+'level_2and3.png',final2)
    imsave('results/'+img+'level_3and4.png',final3)
コード例 #19
0
def imageenhancement(image):
    hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
    bgImg,fundusMask = computebgimg(image)
    bgImg = cv2.multiply(image[:,:,1].astype(float),bgImg.astype(float))
    ldrift,cdrift = LumConDrift(bgImg,fundusMask)

    g = image[:,:,1].astype(float)

    imgCorr = cv2.divide(cv2.subtract(g,ldrift),(cv2.add(cdrift,0.0001)))
    imgCorr = cv2.multiply(imgCorr,fundusMask.astype(float))

    imgCorr = cv2.add(imgCorr,np.abs(np.min(imgCorr)))
    imgCorr = cv2.divide(imgCorr,np.max(imgCorr))
    imgCorr = cv2.multiply(imgCorr,fundusMask.astype(float))


    image = image.astype(float)
    image[:,:,0] = cv2.divide(cv2.multiply(imgCorr,image[:,:,0]),hsv[:,:,2].astype(float))
    image[:,:,1] = cv2.divide(cv2.multiply(imgCorr,image[:,:,1]),hsv[:,:,2].astype(float))
    image[:,:,2] = cv2.divide(cv2.multiply(imgCorr,image[:,:,2]),hsv[:,:,2].astype(float))


    fundusMask = fundusMask.astype(float)
    image[:,:,0] = cv2.multiply(image[:,:,0],fundusMask)
    image[:,:,1] = cv2.multiply(image[:,:,1],fundusMask)
    image[:,:,2] = cv2.multiply(image[:,:,2],fundusMask)
    out = image[:,:,1]*255
    return out 
コード例 #20
0
def process_frame(frame):
    """ Process frame based on user input """
    center_val = cv2.getTrackbarPos(tbar_hue_select_name, win_debug_name)
    span_val = cv2.getTrackbarPos(tbar_span_name, win_debug_name)
    
    
    #get mirror point from values
    offset_val = 255/2 - center_val
    
    #get bounds

    #convert to hsv
    frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    #convert grayscale
    frame = get_channel(frame_hsv, 0)
    
    #setupe debug window
    temp = cv2.resize(frame,None,fx=3,fy=1,interpolation=cv2.INTER_NEAREST)
    (h,w) = temp.shape
    x1 = 0
    x2 = w/3
    x3 = 2* w/3
    output_frame = np.zeros(temp.shape+(3,), np.uint8)
    
    #print input frame (#1)
    output_frame[0:h,0:x2] = cv2.cvtColor(cv2.multiply(2,frame), cv2.COLOR_GRAY2BGR) ####################
    
    #print rough threshold frame (#2)
    output_frame[0:h,x2:x3] = cv2.cvtColor(
        cv2.inRange(frame,center_val-span_val/2,center_val+span_val/2),
        cv2.COLOR_GRAY2BGR
        ) #############################
    
    
    
    #normalize
    normalized_hue_frame = frame 
    normalized_hue_frame += offset_val
    if ABS_DIFF:
        distance_frame = cv2.absdiff(normalized_hue_frame,127)
        normalized_hue_frame = cv2.subtract(127,distance_frame)
    
    #print final frame (#3)
    output_frame[0:h,x3:w] = cv2.cvtColor(
        cv2.multiply(2,normalized_hue_frame),
        cv2.COLOR_GRAY2BGR) ################
    #frame_hsv[:,:,0] = normalized_hue_frame
    #output_frame[0:h,x3:w] = cv2.cvtColor(frame_hsv,cv2.COLOR_HSV2BGR) ################
    
    
    
    
    print("Hue Shift: frame += %d. Hue Sel = %d +/- %d" % (offset_val,center_val,span_val/2))
    return output_frame
コード例 #21
0
ファイル: test_umat.py プロジェクト: ArkaJU/opencv
 def test_umat_construct(self):
     data = np.random.random([512, 512])
     # UMat constructors
     data_um = cv.UMat(data)  # from ndarray
     data_sub_um = cv.UMat(data_um, (128, 256), (128, 256))  # from UMat
     data_dst_um = cv.UMat(128, 128, cv.CV_64F)  # from size/type
     # test continuous and submatrix flags
     assert data_um.isContinuous() and not data_um.isSubmatrix()
     assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
     # test operation on submatrix
     cv.multiply(data_sub_um, 2., dst=data_dst_um)
     assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
コード例 #22
0
ファイル: skinscan.py プロジェクト: rogvold/skinry
def change_constrast(image, alpha, beta):
    """
    Change the contrast of image, return processed image

    Input: image, contrast coefficient and brightness coefficient
    """

    array_alpha = np.array([alpha])
    array_beta = np.array([beta])

    cv2.add(image, array_beta, image)
    cv2.multiply(image, array_alpha, image)

    return image
コード例 #23
0
ファイル: fsmedia.py プロジェクト: Nioy/faceswap
    def detect_blurry_faces(self, face, t_mat, resized_image, filename):
        """ Detect and move blurry face """
        if not hasattr(self.args, 'blur_thresh') or not self.args.blur_thresh:
            return None

        blurry_file = None
        aligned_landmarks = self.extractor.transform_points(
            face.landmarks_as_xy(),
            t_mat,
            256,
            48)
        feature_mask = self.extractor.get_feature_mask(aligned_landmarks / 256,
                                                       256,
                                                       48)
        feature_mask = cv2.blur(feature_mask, (10, 10))
        isolated_face = cv2.multiply(
            feature_mask,
            resized_image.astype(float)).astype(np.uint8)
        blurry, focus_measure = is_blurry(isolated_face, self.args.blur_thresh)

        if blurry:
            print("{}'s focus measure of {} was below the blur threshold, "
                  "moving to \"blurry\"".format(Path(filename).stem,
                                                focus_measure))
            blurry_file = get_folder(Path(self.output_dir) /
                                     Path("blurry")) / Path(filename).stem
        return blurry_file
コード例 #24
0
def __tutorial_hough_circle_detection_cv(img_path, min_dim=40, max_dim=60):
    img_color = cv2.imread(img_path)
    img_filtered = cv2.pyrMeanShiftFiltering(img_color, 10, 10)
    img_filtered = cv2.cvtColor(img_filtered, cv2.COLOR_BGRA2GRAY)
    img_filtered = img_filtered.astype(float)
    img_blurred = cv2.GaussianBlur(img_filtered, (7, 7), sigmaX=0)
    img_laplacian = cv2.Laplacian(img_blurred, ddepth=cv2.CV_64F)

    weight = 0.01 * 40
    scale = 0.01 * 20
    img_sharpened = (1.5 * img_filtered) - (0.5 * img_blurred) - (weight * cv2.multiply(img_filtered, scale * img_laplacian))
    img_sharpened = img_sharpened.astype("uint8")

    min_r = int(min_dim / 2)
    max_r = int(max_dim / 2)
    circles = cv2.HoughCircles(img_sharpened, cv2.HOUGH_GRADIENT, 1, 1, param1=50, param2=30, minRadius=min_r, maxRadius=max_r)

    if circles is not None:
        circles = np.around(circles).astype(int)
        for i in circles[0, :]:
            # draw the outer circle
            cv2.circle(img_color, (i[0], i[1]), i[2], (0, 255, 0), 2)
            # draw the center of the circle
            cv2.circle(img_color, (i[0], i[1]), 2, (0, 0, 255), 3)

    cv2.imwrite("D://_Dataset//GTSDB//Test_Regions//_img2_1.png", img_sharpened)
    cv2.imwrite("D://_Dataset//GTSDB//Test_Regions//_img2_2.png", img_color)
コード例 #25
0
ファイル: tantriggs.py プロジェクト: Nambu14/Ringo
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
コード例 #26
0
ファイル: ImageUtils.py プロジェクト: IreneFidone/TUC-Team
def ContrastCorrection(img, correction):

    x,y=img.shape
    bright= np.ndarray( shape=(x,y), dtype="uint8" )
    bright.fill(2)  
    img = cv2.multiply(img, bright)
    return img
コード例 #27
0
ファイル: fsmedia.py プロジェクト: stonezuohui/faceswap
    def process(self, output_item):
        """ Detect and move blurry face """
        extractor = AlignerExtract()

        for idx, detected_face in enumerate(output_item["detected_faces"]):
            frame_name = detected_face["file_location"].parts[-1]
            face = detected_face["face"]
            logger.trace("Checking for blurriness. Frame: '%s', Face: %s", frame_name, idx)
            aligned_landmarks = face.aligned_landmarks
            resized_face = face.aligned_face
            size = face.aligned["size"]
            padding = int(size * 0.1875)
            feature_mask = extractor.get_feature_mask(
                aligned_landmarks / size,
                size, padding)
            feature_mask = cv2.blur(  # pylint: disable=no-member
                feature_mask, (10, 10))
            isolated_face = cv2.multiply(  # pylint: disable=no-member
                feature_mask,
                resized_face.astype(float)).astype(np.uint8)
            blurry, focus_measure = self.is_blurry(isolated_face)

            if blurry:
                blur_folder = detected_face["file_location"].parts[:-1]
                blur_folder = get_folder(Path(*blur_folder) / Path("blurry"))
                detected_face["file_location"] = blur_folder / Path(frame_name)
                logger.verbose("%s's focus measure of %s was below the blur threshold, "
                               "moving to 'blurry'", frame_name, "{0:.2f}".format(focus_measure))
コード例 #28
0
ファイル: main.py プロジェクト: yycho0108/LearnOpenCV
def dropout(i,p):
    mask = np.empty(i.shape, dtype=np.int16)
    mask = cv2.randu(mask,0,255)

    val, mask = cv2.threshold(mask,p*255,255,cv2.THRESH_BINARY)
    mask = np.asarray(mask,dtype=np.float64) / 255.0
    return cv2.multiply(i,mask)
コード例 #29
0
def blurshift(img_blur,sigma0,rhoi,phii):
    img_blur = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
    rows,cols = img_blur.shape
    alpha = 1
    #the std deviation of the guassian kernal 
    sigma1 = int(sigma0)+alpha*(int(rhoi))

    ##img_blur = cv2.GaussianBlur(img,(sigma1,sigma1),0)
    filter_gaussian = gauss2D((rows,cols),sigma1)
    ##plt.imshow(filter_gaussian, cmap=plt.get_cmap('jet'), interpolation='nearest')
    ##plt.colorbar()
    ##plt.show()

    # the shift in x and y direction for DOG response
    delta_xi = -1*int(rhoi)*np.cos(int(phii))
    delta_yi = -1*int(rhoi)*np.sin(int(phii))
    x1 = sigma1
    y1 = sigma1
    rows1,cols1 =img_blur.shape
    M = np.float32([[1,0,delta_xi+x1],[0,1,delta_yi+y1]])
    #translating the response 
    Dogresp_trans = cv2.warpAffine(img_blur,M,(cols1,rows1))
    #mulitplying the coefficients of gaussian and translated DOG response
    Dog_blur_shifted = cv2.multiply(Dogresp_trans.astype(float),filter_gaussian)
    return Dog_blur_shifted
コード例 #30
0
def thresholdColor(img, colattr):
    (domchan, dommin, first, second) = colattr
    channels = cv2.split(img)#red, green, blue
    width, height, cha = img.shape
    mult = np.empty((width,height)).astype(np.uint8)
    mult.fill(255)
    red = channels[2].astype(np.uint8)
    green = channels[1].astype(np.uint8)
    blue = channels[0].astype(np.uint8)
    firsttype = np.zeros(img.shape)
    secondtype = np.zeros(img.shape)

    if domchan == "r":
        zerotype = (red > dommin)
        firsttype = np.true_divide(red,green)#r/g
        secondtype = np.true_divide(red,blue)#r/b
    elif domchan == "g":
        zerotype = (green > dommin)
        firsttype = np.true_divide(green,red)#g/r
        secondtype = np.true_divide(green,blue)#g/b

    zerotype = zerotype.astype(np.int)
    firsttype = (firsttype > first).astype(np.int)# & (firsttype < first[1])
    secondtype = (secondtype > second).astype(np.int)# & (secondtype < second[1])
    combined = cv2.bitwise_and(cv2.bitwise_and(zerotype, secondtype), firsttype)
    combined = cv2.multiply(combined.astype(np.uint8), mult)

    return combined
コード例 #31
0
def process_output(image, output, raw_img, threshold, min_area):
    height, width, _ = image.shape
    pred_x = output[0, :]
    pred_y = output[1, :]

    magnitude, angle = cv2.cartToPolar(pred_x, pred_y)

    thr = np.array([threshold])
    mask = cv2.compare(magnitude, thr, cv2.CMP_GT)
    mask = mask.astype(np.float32)

    parent = np.zeros((height, width, 2), np.float32)
    ending = np.zeros((height, width), np.float32)
    merged_ending = np.zeros((height, width), np.float32)
    PI = np.pi
    for row in range(0, height):
        for col in range(0, width):
            if mask[row][col] == 255:
                if angle[row][col] < PI/8 or angle[row][col] >= 15*PI/8:
                    parent[row][col] = [1,0]
                    if row+1 <= height-1 and mask[row+1][col] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= PI/8 and angle[row][col] < 3*PI/8:
                    parent[row][col] = [1,1]
                    if row+1 <= height-1 and col+1 <= width-1 and mask[row+1][col+1] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= 3*PI/8 and angle[row][col] < 5*PI/8:
                    parent[row][col] = [0,1]
                    if col+1 <= width-1 and mask[row][col+1] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= 5*PI/8 and angle[row][col] < 7*PI/8:
                    parent[row][col] = [-1,1]
                    if row-1 >= 0 and col+1 <= width-1 and mask[row-1][col+1] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= 7*PI/8 and angle[row][col] < 9*PI/8:
                    parent[row][col] = [-1,0]
                    if row-1 >= 0 and mask[row-1][col] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= 9*PI/8 and angle[row][col] < 11*PI/8:
                    parent[row][col] = [-1,-1]
                    if row-1 >= 0 and col-1 >= 0 and mask[row-1][col-1] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= 11*PI/8 and angle[row][col] < 13*PI/8:
                    parent[row][col] = [0,-1]
                    if col-1 >= 0 and mask[row][col-1] == 0:
                        ending[row][col] = 1
                elif angle[row][col] >= 13*PI/8 and angle[row][col] < 15*PI/8:
                    parent[row][col] = [1,-1]
                    if row+1 <= height-1 and col-1 >= 0 and mask[row+1][col-1] == 0:
                        ending[row][col] = 1

    p = Coordinate()
    pc = Coordinate()
    pt = Coordinate()
    visited = np.zeros((height, width), np.float32)
    dict_id = np.zeros((height, width, 2), np.float32)
    # blob lableing to construct trees encoded by P
    # get depth each pixel in text instance
    sup_idx = 1
    for row in range(0, height):
        for col in range(0, width):
            if mask[row][col] == 255 and visited[row][col] == 0:
                p.x = row
                p.y = col
                Q = queue.Queue()
                Q.put(p)
                while not Q.empty():
                    pc = Q.get()
                    dict_id[pc.x][pc.y][0] = sup_idx
                    visited[pc.x][pc.y] = 1
                    for dx in range(-1, 2):
                        for dy in range(-1, 2):
                            pt.x = pc.x + dx
                            pt.y = pc.y + dy
                            if pt.x >= 0 and pt.x <= height-1 and pt.y >= 0 and pt.y <= width-1:
                                if visited[pt.x][pt.y] == 0 and (parent[pt.x][pt.y][0] != 0 or parent[pt.x][pt.y][1] != 0):
                                    if parent[pt.x][pt.y][0] == -1*dx and parent[pt.x][pt.y][1] == -1*dy:
                                        Q.put(pt.copy())
                                        dict_id[pc.x][pc.y][1] = max(
                                            dict_id[pc.x][pc.y][1], dict_id[pt.x][pt.y][1]+1)
                                    elif parent[pc.x][pc.y][0] == 1*dx and parent[pc.x][pc.y][1] == 1*dy:
                                        Q.put(pt.copy())
                                        dict_id[pt.x][pt.y][1] = max(
                                            dict_id[pt.x][pt.y][1], dict_id[pc.x][pc.y][1]+1)
                sup_idx += 1

    element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    # fill hole in ending
    merged_ending = cv2.dilate(
        ending, element, iterations=5).astype(np.float32)
    # dilate ending
    for row in range(0, height):
        for col in range(0, width):
            if ending[row][col] == 1:
                for dilDepth in range(1, min(int(1*dict_id[row][col][1]-16), 12)+1):
                    p.x = row+int(parent[row][col][0]*dilDepth)
                    p.y = col+int(parent[row][col][1]*dilDepth)
                    if p.x >= 0 and p.x <= height-1 and pt.y >= 0 and pt.y <= width-1:
                        merged_ending[p.x][p.y] = 1

    # find connected Components
    cctmp = merged_ending.astype(np.uint8)
    # ccnum: num component, cctmp: mask component
    ccnum, cctmp = cv2.connectedComponents(
        cctmp, connectivity=8, ltype=cv2.CV_16U)
    label = cctmp.astype(np.float32)

    sup_map_cc = np.zeros((sup_idx), np.int32)
    stat = np.zeros((ccnum, 8), np.int32)
    # calculate num stat each label and assign label each sup_idx
    for row in range(0, height):
        for col in range(0, width):
            if ending[row][col] == 1:
                dx = int(parent[row][col][0])
                dy = int(parent[row][col][1])
                cc_idx = int(label[row][col])
                sup_map_cc[int(dict_id[row][col][0])] = cc_idx
                if dx == 1 and dy == 0:  # down
                    stat[cc_idx][0] += 1
                if dx == 1 and dy == 1:  # down right
                    stat[cc_idx][1] += 1
                if dx == 0 and dy == 1:  # right
                    stat[cc_idx][2] += 1
                if dx == -1 and dy == 1:  # up right
                    stat[cc_idx][3] += 1
                if dx == -1 and dy == 0:  # up
                    stat[cc_idx][4] += 1
                if dx == -1 and dy == -1:  # up left
                    stat[cc_idx][5] += 1
                if dx == 0 and dy == -1:  # left
                    stat[cc_idx][6] += 1
                if dx == 1 and dy == -1:  # down left
                    stat[cc_idx][7] += 1

    cc_map_filted = np.zeros((ccnum), np.int32)
    filted_idx = 1
    # Filter unblanced Text
    for cc_idx in range(1, ccnum):
        dif1 = np.abs(stat[cc_idx][0] - stat[cc_idx][4])  # abs(down - up)
        # abs(down_right - up_left)
        dif2 = np.abs(stat[cc_idx][1] - stat[cc_idx][5])
        dif3 = np.abs(stat[cc_idx][2] - stat[cc_idx][6])  # abs(right - left)
        # abs(down_left - up_right)
        dif4 = np.abs(stat[cc_idx][3] - stat[cc_idx][7])
        sum1 = stat[cc_idx][0]+stat[cc_idx][1]+stat[cc_idx][2]+stat[cc_idx][3]
        sum2 = stat[cc_idx][4]+stat[cc_idx][5]+stat[cc_idx][6]+stat[cc_idx][7]
        difsum = np.abs(sum1-sum2)
        sum_total = sum1 + sum2
        ratio1 = float(difsum) / float(sum_total)
        ratio2 = float(dif1+dif2+dif3+dif4) / float(sum_total)
        # keep candidate have low ratio (high opposite directions)
        # <=0.6 mean >40% opposite directions
        if ratio1 <= 0.6 and ratio2 <= 0.6:
            cc_map_filted[cc_idx] = filted_idx
            filted_idx += 1

    # filter candidate
    for row in range(0, height):
        for col in range(0, width):
            if label[row][col] == 0:
                label[row][col] = cc_map_filted[int(
                    sup_map_cc[int(dict_id[row][col][0])])]
            else:
                label[row][col] = cc_map_filted[int(label[row][col])]

    res = np.zeros((height, width), np.float32)
    element_ = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))
    # get result mask
    for i in range(1, filted_idx):
        clstmp = cv2.compare(label, np.array([i]), cv2.CMP_EQ)
        clstmp = cv2.dilate(clstmp, element_, iterations=1)
        clstmp = cv2.erode(clstmp, element_, iterations=1)
        clstmp = cv2.compare(clstmp, np.array([0]), cv2.CMP_GT)
        clstmp = clstmp.astype(np.float32)
        res = cv2.multiply(res, 1-clstmp/255)
        res = cv2.add(res, clstmp/255*i)

    return seg2bbox(res, cv2.resize(raw_img, (width, height)), raw_img, min_area)
コード例 #32
0
    def process_image(self, blur, threshold, adjustment, erode, iterations):

        self.img = self.original.copy()

        debug_images = []

        alpha = float(2.5)

        debug_images.append(('Original', self.original))

        # Adjust the exposure
        exposure_img = cv2.multiply(self.img, np.array([alpha]))
        debug_images.append(('Exposure Adjust', exposure_img))

        # Convert to grayscale
        img2gray = cv2.cvtColor(exposure_img, cv2.COLOR_BGR2GRAY)
        debug_images.append(('Grayscale', img2gray))

        # Blur to reduce noise
        img_blurred = cv2.GaussianBlur(img2gray, (blur, blur), 0)
        debug_images.append(('Blurred', img_blurred))

        cropped = img_blurred

        # Threshold the image
        cropped_threshold = cv2.adaptiveThreshold(
            cropped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,
            threshold, adjustment)
        debug_images.append(('Cropped Threshold', cropped_threshold))

        # Erode the lcd digits to make them continuous for easier contouring
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (erode, erode))
        eroded = cv2.erode(cropped_threshold, kernel, iterations=iterations)
        debug_images.append(('Eroded', eroded))

        # Reverse the image to so the white text is found when looking for the contours
        inverse = inverse_colors(eroded)
        debug_images.append(('Inversed', inverse))

        # Find the lcd digit contours
        _, contours, _ = cv2.findContours(
            inverse, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)  # get contours

        # Assuming we find some, we'll sort them in order left -> right
        if len(contours) > 0:
            contours, _ = sort_contours(contours)

        potential_decimals = []
        potential_digits = []

        total_digit_height = 0
        total_digit_y = 0

        # Aspect ratio for all non 1 character digits
        desired_aspect = 0.6
        # Aspect ratio for the "1" digit
        digit_one_aspect = 0.3
        # The allowed buffer in the aspect when determining digits
        aspect_buffer = 0.15

        # Loop over all the contours collecting potential digits and decimals
        for contour in contours:
            # get rectangle bounding contour
            [x, y, w, h] = cv2.boundingRect(contour)

            aspect = float(w) / h
            size = w * h

            # It's a square, save the contour as a potential digit
            if size > 100 and aspect >= 1 - .3 and aspect <= 1 + .3:
                potential_decimals.append(contour)

            # If it's small and it's not a square, kick it out
            if size < 20 * 100 and (aspect < 1 - aspect_buffer
                                    and aspect > 1 + aspect_buffer):
                continue

            # Ignore any rectangles where the width is greater than the height
            if w > h:
                if self.debug:
                    cv2.rectangle(self.img, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)
                continue

            # If the contour is of decent size and fits the aspect ratios we want, we'll save it
            if ((size > 2000 and aspect >= desired_aspect - aspect_buffer
                 and aspect <= desired_aspect + aspect_buffer) or
                (size > 1000 and aspect >= digit_one_aspect - aspect_buffer
                 and aspect <= digit_one_aspect + aspect_buffer)):
                # Keep track of the height and y position so we can run averages later
                total_digit_height += h
                total_digit_y += y
                potential_digits.append(contour)
            else:
                if self.debug:
                    cv2.rectangle(self.img, (x, y), (x + w, y + h),
                                  (0, 0, 255), 2)

        avg_digit_height = 0
        avg_digit_y = 0
        potential_digits_count = len(potential_digits)
        left_most_digit = 0
        right_most_digit = 0
        digit_x_positions = []

        # Calculate the average digit height and y position so we can determine what we can throw out
        if potential_digits_count > 0:
            avg_digit_height = float(
                total_digit_height) / potential_digits_count
            avg_digit_y = float(total_digit_y) / potential_digits_count
            if self.debug:
                print "Average Digit Height and Y: " + str(
                    avg_digit_height) + " and " + str(avg_digit_y)

        output = ''
        ix = 0

        # Loop over all the potential digits and see if they are candidates to run through KNN to get the digit
        for pot_digit in potential_digits:
            [x, y, w, h] = cv2.boundingRect(pot_digit)

            # Does this contour match the averages
            if h <= avg_digit_height * 1.2 and h >= avg_digit_height * 0.2 and y <= avg_digit_height * 1.2 and y >= avg_digit_y * 0.2:
                # Crop the contour off the eroded image
                cropped = eroded[y:y + h, x:x + w]
                # Draw a rect around it
                cv2.rectangle(self.img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                debug_images.append(('digit' + str(ix), cropped))

                # Call into the KNN to determine the digit
                digit = self.predict_digit(cropped)
                if self.debug:
                    print "Digit: " + digit
                output += digit

                # Helper code to write out the digit image file for use in KNN training
                if self.write_digits:
                    _, full_file = os.path.split(self.file_name)
                    file_name = full_file.split('.')
                    crop_file_path = CROP_DIR + '/' + digit + '_' + file_name[
                        0] + '_crop_' + str(ix) + '.png'
                    cv2.imwrite(crop_file_path, cropped)

                # Track the x positions of where the digits are
                if left_most_digit == 0 or x < left_most_digit:
                    left_most_digit = x

                if right_most_digit == 0 or x > right_most_digit:
                    right_most_digit = x + w

                digit_x_positions.append(x)

                ix += 1
            else:
                if self.debug:
                    cv2.rectangle(self.img, (x, y), (x + w, y + h),
                                  (66, 146, 244), 2)

        decimal_x = 0
        # Loop over the potential digits and find a square that's between the left/right digit x positions on the
        # lower half of the screen
        for pot_decimal in potential_decimals:
            [x, y, w, h] = cv2.boundingRect(pot_decimal)

            if x < right_most_digit and x > left_most_digit and y > (
                    self.height / 2):
                cv2.rectangle(self.img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                decimal_x = x

        # Once we know the position of the decimal, we'll insert it into our string
        for ix, digit_x in enumerate(digit_x_positions):
            if digit_x > decimal_x:
                # insert
                output = output[:ix] + '.' + output[ix:]
                break

        # Debugging to show the left/right digit x positions
        if self.debug:
            cv2.rectangle(
                self.img, (left_most_digit, int(avg_digit_y)),
                (left_most_digit + right_most_digit - left_most_digit,
                 int(avg_digit_y) + int(avg_digit_height)), (66, 244, 212), 2)

        # Log some information
        if self.debug:
            print "Potential Digits " + str(len(potential_digits))
            print "Potential Decimals " + str(len(potential_decimals))
            print "String: " + output

        return debug_images, output
コード例 #33
0
def farben(hue1, hue2, satu, vis, farbe, titel):

    img2 = zuschneiden()

    #checken ob Bild richtig zugeschnitten wurde
    if (not geklappt):
        return

    else:
        cv2.imshow("zugescnitten", img2)

    # Farbkonvertierung
    hsv = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)

    # Video in drei Farbkanaele splitten
    h, s, v = cv2.split(hsv)

    # masken berechnen
    h_mask1 = cv2.inRange(h, hue1 - 25, hue2 + 25)
    h_mask2 = cv2.inRange(h, hue2 - 25, hue2 + 25)
    h_mask = cv2.add(h_mask1, h_mask2)
    s_mask = cv2.inRange(s, satu - 25, satu + 25)
    v_mask = cv2.inRange(v, vis - 25, vis + 25)

    # Multiplikation der Einzelmasken
    mask1 = cv2.multiply(v_mask, s_mask)
    mask = cv2.multiply(mask1, h_mask)

    #Steine rausfiltern und Position bestimmen
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    #Steine in der jeweiligen Farbe finden
    for index in range(len(contours)):
        area = cv2.contourArea(contours[index])
        x, y, w, h = cv2.boundingRect(contours[index])
        if (w > (w2 * 0.6)) and (h > (h2 * 0.6) or area > swSteine):
            x, y, w, h = cv2.boundingRect(contours[index])
            cv2.rectangle(mask, (x, y), (x + w, y + h), (255, 255, 225), 3)
            if w > (w2 * 1.3):
                anzahlNebeneinander = w / w2
                print(anzahlNebeneinander)
            x_pos = (x + (w * 0.5)) - (10 + w1)
            position = 0
            if (fl0 < x_pos < fl1):
                position = 1

            elif (fl1 < x_pos < fl2):
                position = 2

            elif (fl2 < x_pos < fl3):
                position = 3

            elif (fl3 < x_pos < fl4):
                position = 4

            elif (fl4 < x_pos < fl5):
                position = 5

            elif (fl5 < x_pos < fl6):
                position = 6

            elif (fl6 < x_pos < fl7):
                position = 7

            elif (fl7 < x_pos < fl8):
                position = 8

            #print(position, farbe, x_pos)
            #print(fl1,fl2,fl3,fl4,fl5,fl6,fl7,fl8)

            sendNoteOn(farbe, position)
            #print("jetzt wird das Bild erzeugt")
            cv2.imshow(titel, mask)
コード例 #34
0
ファイル: roi_objects.py プロジェクト: mohithc/mohi
def roi_objects(img,
                roi_type,
                roi_contour,
                roi_hierarchy,
                object_contour,
                obj_hierarchy,
                device,
                debug=None):
    """Find objects partially inside a region of interest or cut objects to the ROI.

    Inputs:
    img            = img to display kept objects
    roi_type       = 'cutto' or 'partial' (for partially inside)
    roi_contour    = contour of roi, output from "View and Adjust ROI" function
    roi_hierarchy  = contour of roi, output from "View and Adjust ROI" function
    object_contour = contours of objects, output from "Identifying Objects" function
    obj_hierarchy  = hierarchy of objects, output from "Identifying Objects" function
    device         = device number.  Used to count steps in the pipeline
    debug          = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device         = device number
    kept_cnt       = kept contours
    hierarchy      = contour hierarchy list
    mask           = mask image
    obj_area       = total object pixel area

    :param img: numpy array
    :param roi_type: str
    :param roi_contour: list
    :param roi_hierarchy: list
    :param object_contour: list
    :param obj_hierarchy: list
    :param device: int
    :param debug: str
    :return device: int
    :return kept_cnt: list
    :return hierarchy: list
    :return mask: numpy array
    :return obj_area: int
    """

    device += 1
    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
    else:
        ix, iy = np.shape(img)

    size = ix, iy, 3
    background = np.zeros(size, dtype=np.uint8)
    ori_img = np.copy(img)
    w_back = background + 255
    background1 = np.zeros(size, dtype=np.uint8)
    background2 = np.zeros(size, dtype=np.uint8)

    # Allows user to find all objects that are completely inside or overlapping with ROI
    if roi_type == 'partial':
        for c, cnt in enumerate(object_contour):
            length = (len(cnt) - 1)
            stack = np.vstack(cnt)
            test = []
            keep = False
            for i in range(0, length):
                pptest = cv2.pointPolygonTest(roi_contour[0],
                                              (stack[i][0], stack[i][1]),
                                              False)
                if int(pptest) != -1:
                    keep = True
            if keep == True:
                if obj_hierarchy[0][c][3] > -1:
                    cv2.drawContours(w_back,
                                     object_contour,
                                     c, (255, 255, 255),
                                     -1,
                                     lineType=8,
                                     hierarchy=obj_hierarchy)
                else:
                    cv2.drawContours(w_back,
                                     object_contour,
                                     c, (0, 0, 0),
                                     -1,
                                     lineType=8,
                                     hierarchy=obj_hierarchy)
            else:
                cv2.drawContours(w_back,
                                 object_contour,
                                 c, (255, 255, 255),
                                 -1,
                                 lineType=8,
                                 hierarchy=obj_hierarchy)

        kept = cv2.cvtColor(w_back, cv2.COLOR_RGB2GRAY)
        kept_obj = cv2.bitwise_not(kept)
        mask = np.copy(kept_obj)
        obj_area = cv2.countNonZero(kept_obj)
        kept_cnt, hierarchy = cv2.findContours(kept_obj, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(ori_img,
                         kept_cnt,
                         -1, (0, 255, 0),
                         -1,
                         lineType=8,
                         hierarchy=hierarchy)
        cv2.drawContours(ori_img,
                         roi_contour,
                         -1, (255, 0, 0),
                         5,
                         lineType=8,
                         hierarchy=roi_hierarchy)

    # Allows user to cut objects to the ROI (all objects completely outside ROI will not be kept)
    elif roi_type == 'cutto':
        cv2.drawContours(background1,
                         object_contour,
                         -1, (255, 255, 255),
                         -1,
                         lineType=8,
                         hierarchy=obj_hierarchy)
        roi_points = np.vstack(roi_contour[0])
        cv2.fillPoly(background2, [roi_points], (255, 255, 255))
        obj_roi = cv2.multiply(background1, background2)
        kept_obj = cv2.cvtColor(obj_roi, cv2.COLOR_RGB2GRAY)
        mask = np.copy(kept_obj)
        obj_area = cv2.countNonZero(kept_obj)
        kept_cnt, hierarchy = cv2.findContours(kept_obj, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_NONE)
        cv2.drawContours(w_back, kept_cnt, -1, (0, 0, 0), -1)
        cv2.drawContours(ori_img,
                         kept_cnt,
                         -1, (0, 255, 0),
                         -1,
                         lineType=8,
                         hierarchy=hierarchy)
        cv2.drawContours(ori_img,
                         roi_contour,
                         -1, (255, 0, 0),
                         5,
                         lineType=8,
                         hierarchy=roi_hierarchy)

    else:
        fatal_error('ROI Type' + str(roi_type) +
                    ' is not "cutto" or "partial"!')

    if debug == 'print':
        print_image(w_back, (str(device) + '_roi_objects.png'))
        print_image(ori_img, (str(device) + '_obj_on_img.png'))
        print_image(mask, (str(device) + '_roi_mask.png'))
    elif debug == 'plot':
        plot_image(w_back)
        plot_image(ori_img)
        plot_image(mask, cmap='gray')
        # print ('Object Area=', obj_area)

    return device, kept_cnt, hierarchy, mask, obj_area
コード例 #35
0
ファイル: test_cudaarithm.py プロジェクト: ciwei100000/opencv
    def test_arithmetic(self):
        npMat1 = np.random.random((128, 128, 3)) - 0.5
        npMat2 = np.random.random((128, 128, 3)) - 0.5

        cuMat1 = cv.cuda_GpuMat()
        cuMat2 = cv.cuda_GpuMat()
        cuMat1.upload(npMat1)
        cuMat2.upload(npMat2)
        cuMatDst = cv.cuda_GpuMat(cuMat1.size(), cuMat1.type())

        self.assertTrue(
            np.allclose(
                cv.cuda.add(cuMat1, cuMat2).download(), cv.add(npMat1,
                                                               npMat2)))

        cv.cuda.add(cuMat1, cuMat2, cuMatDst)
        self.assertTrue(
            np.allclose(cuMatDst.download(), cv.add(npMat1, npMat2)))

        self.assertTrue(
            np.allclose(
                cv.cuda.subtract(cuMat1, cuMat2).download(),
                cv.subtract(npMat1, npMat2)))

        cv.cuda.subtract(cuMat1, cuMat2, cuMatDst)
        self.assertTrue(
            np.allclose(cuMatDst.download(), cv.subtract(npMat1, npMat2)))

        self.assertTrue(
            np.allclose(
                cv.cuda.multiply(cuMat1, cuMat2).download(),
                cv.multiply(npMat1, npMat2)))

        cv.cuda.multiply(cuMat1, cuMat2, cuMatDst)
        self.assertTrue(
            np.allclose(cuMatDst.download(), cv.multiply(npMat1, npMat2)))

        self.assertTrue(
            np.allclose(
                cv.cuda.divide(cuMat1, cuMat2).download(),
                cv.divide(npMat1, npMat2)))

        cv.cuda.divide(cuMat1, cuMat2, cuMatDst)
        self.assertTrue(
            np.allclose(cuMatDst.download(), cv.divide(npMat1, npMat2)))

        self.assertTrue(
            np.allclose(
                cv.cuda.absdiff(cuMat1, cuMat2).download(),
                cv.absdiff(npMat1, npMat2)))

        cv.cuda.absdiff(cuMat1, cuMat2, cuMatDst)
        self.assertTrue(
            np.allclose(cuMatDst.download(), cv.absdiff(npMat1, npMat2)))

        self.assertTrue(
            np.allclose(
                cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(),
                cv.compare(npMat1, npMat2, cv.CMP_GE)))

        cuMatDst1 = cv.cuda_GpuMat(cuMat1.size(), cv.CV_8UC3)
        cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE, cuMatDst1)
        self.assertTrue(
            np.allclose(cuMatDst1.download(),
                        cv.compare(npMat1, npMat2, cv.CMP_GE)))

        self.assertTrue(
            np.allclose(cv.cuda.abs(cuMat1).download(), np.abs(npMat1)))

        cv.cuda.abs(cuMat1, cuMatDst)
        self.assertTrue(np.allclose(cuMatDst.download(), np.abs(npMat1)))

        self.assertTrue(
            np.allclose(
                cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(),
                cv.cuda.abs(cuMat1).download()))

        cv.cuda.sqr(cuMat1, cuMatDst)
        cv.cuda.sqrt(cuMatDst, cuMatDst)
        self.assertTrue(
            np.allclose(cuMatDst.download(),
                        cv.cuda.abs(cuMat1).download()))

        self.assertTrue(
            np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(), npMat1))

        cv.cuda.exp(cuMat1, cuMatDst)
        cv.cuda.log(cuMatDst, cuMatDst)
        self.assertTrue(np.allclose(cuMatDst.download(), npMat1))

        self.assertTrue(
            np.allclose(cv.cuda.pow(cuMat1, 2).download(), cv.pow(npMat1, 2)))

        cv.cuda.pow(cuMat1, 2, cuMatDst)
        self.assertTrue(np.allclose(cuMatDst.download(), cv.pow(npMat1, 2)))
コード例 #36
0
def remove_background(path):
    basewidth = 512
    img = Image.open(path)
    # wpercent = (basewidth / float(img.size[0]))
    # hsize = int((float(img.size[1]) * float(wpercent)))
    # img = img.resize((basewidth, hsize), Image.ANTIALIAS)
    img = img.resize((basewidth, basewidth), Image.ANTIALIAS)
    img.save(path)

    src = cv2.imread(path, 1)
    blurred = cv2.GaussianBlur(src, (5, 5), 0)
    blurred_float = blurred.astype(np.float32) / 255.0
    edgeDetector = cv2.ximgproc.createStructuredEdgeDetection("model.yml")
    edges = edgeDetector.detectEdges(blurred_float) * 255.0
    # cv2.imwrite('images/edge-raw.png', edges)

    edges_8u = np.asarray(edges, np.uint8)
    filterOutSaltPepperNoise(edges_8u)
    # cv2.imwrite('images/edge.png', edges_8u)

    contour = findSignificantContour(edges_8u)
    contourImg = np.copy(src)
    cv2.drawContours(contourImg, [contour], 0, (0, 255, 0), 2, cv2.LINE_AA, maxLevel=1)
    # cv2.imwrite('images/contour.png', contourImg)

    mask = np.zeros_like(edges_8u)
    cv2.fillPoly(mask, [contour], 255)

    # calculate sure foreground area by dilating the mask
    mapFg = cv2.erode(mask, np.ones((5, 5), np.uint8), iterations=10)

    # mark inital mask as "probably background"
    # and mapFg as sure foreground
    trimap = np.copy(mask)
    trimap[mask == 0] = cv2.GC_BGD
    trimap[mask == 255] = cv2.GC_PR_BGD
    trimap[mapFg == 255] = cv2.GC_FGD

    # visualize trimap
    trimap_print = np.copy(trimap)
    trimap_print[trimap_print == cv2.GC_PR_BGD] = 128
    trimap_print[trimap_print == cv2.GC_FGD] = 255
    # cv2.imwrite('images/trimap.png', trimap_print)

    # run grabcut

    bgdModel = np.zeros((1, 65), np.float64)
    fgdModel = np.zeros((1, 65), np.float64)
    rect = (0, 0, mask.shape[0] - 1, mask.shape[1] - 1)
    cv2.grabCut(src, trimap, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)

    # create mask again
    mask2 = np.where(
        (trimap == cv2.GC_FGD) | (trimap == cv2.GC_PR_FGD),
        255,
        0
    ).astype('uint8')
    # cv2.imwrite('images/mask2.png', mask2)

    contour2 = findSignificantContour(mask2)
    mask3 = np.zeros_like(mask2)
    cv2.fillPoly(mask3, [contour2], 255)

    # blended alpha cut-out
    mask3 = np.repeat(mask3[:, :, np.newaxis], 3, axis=2)
    mask4 = cv2.GaussianBlur(mask3, (3, 3), 0)
    alpha = mask4.astype(float) * 1.1  # making blend stronger
    alpha[mask3 > 0] = 255
    alpha[alpha > 255] = 255
    alpha = alpha.astype(float)

    foreground = np.copy(src).astype(float)
    foreground[mask4 == 0] = 0
    background = np.ones_like(foreground, dtype=float) * 255

    # cv2.imwrite('images/foreground.png', foreground)
    # cv2.imwrite('images/background.png', background)
    # cv2.imwrite('images/alpha.png', alpha)

    # Normalize the alpha mask to keep intensity between 0 and 1
    alpha = alpha / 255.0
    # Multiply the foreground with the alpha matte
    foreground = cv2.multiply(alpha, foreground)

    cv2.imwrite("images/foreground.png", foreground)

    src = cv2.imread("images/foreground.png", 1)
    os.remove("images/foreground.png")
    tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    _, alpha = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY)
    b, g, r = cv2.split(src)
    rgba = [b, g, r, alpha]
    dst = cv2.merge(rgba, 4)
    cv2.imwrite(path, dst)
コード例 #37
0
ファイル: tabletop.py プロジェクト: fbottarel/Mask_RCNN
def apply_detection_results(image,
                            masks,
                            bboxes,
                            class_ids,
                            class_names,
                            colors,
                            scores=None):
    """
    Performs inference on target image and draws them on a return image.
    :param image (np.ndarray): 3-channel, 8-bit RGB image. Size must be according to CONFIG file (default 640x480)
    :param masks (np.ndarray): binary array of size [height, width, no_of_detections]
    :param bboxes (list): list of bboxes. Each bbox is a tuple (y1, x1, y2, x2)
    :param class_ids (list): one numerical ID for each detection
    :param class_names (list): string of class names, as given by Dataset.class_names
    :param colors (dict): keys are class names, values are float [0 : 1] 3d tuples representing RGB color
    :param scores (list): list of scores, one for each detection
    :return: result (image): image with detection results splashed on it, 3-channel, 8-bit RGB image
    """

    # Opacity of masks: 0.5
    opacity = 0.5

    result = image.astype(float) / 255

    for detection_idx in range(masks.shape[2]):

        if not np.any(bboxes[detection_idx]):
            # Skip this instance. Has no bbox. Likely lost in image cropping.
            continue

        # Get the color in float form
        color = colors[class_names[class_ids[detection_idx]]]

        # Draw the segmentation mask
        mask = masks[:, :, detection_idx]
        alpha_mask = np.stack((mask, mask, mask), axis=2)
        alpha_mask = alpha_mask.astype(np.float) * opacity
        assert alpha_mask.shape == image.shape

        foreground = np.ones(image.shape, dtype=float) * color
        _background = cv2.multiply(1.0 - alpha_mask, result)
        _foreground = cv2.multiply(alpha_mask, foreground)

        result = cv2.add(_foreground, _background)

        # Draw the bounding box
        y1, x1, y2, x2 = bboxes[detection_idx]
        cv2.rectangle(result, (x1, y1), (x2, y2), color, thickness=1)

        # Caption time
        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 0.3
        lineType = 2
        offset_x_text = 2
        offset_y_text = -4
        label = class_names[class_ids[detection_idx]]
        caption = "{} {:.3f}".format(
            label, scores[detection_idx]) if scores.any() else label

        cv2.putText(result,
                    caption, (x1 + offset_x_text, y2 + offset_y_text),
                    fontFace=font,
                    fontScale=fontScale,
                    color=(1.0, 1.0, 1.0),
                    lineType=lineType)

    result *= 255
    result = result.astype(np.uint8)

    return result
コード例 #38
0
    def decode_segmap(image, source, nc=21):

        label_colors = np.array([
            (0, 0, 0),  # 0=배경
            # 1=비행기, 2=자전거, 3=새, 4=배, 5=병
            (128, 0, 0),
            (0, 128, 0),
            (128, 128, 0),
            (0, 0, 128),
            (128, 0, 128),
            # 6=버스, 7=차, 8=고양이, 9=의자, 10=소
            (0, 128, 128),
            (128, 128, 128),
            (64, 0, 0),
            (192, 0, 0),
            (64, 128, 0),
            # 11=식탁, 12=개, 13=소, 14=오토바이, 15=사람
            (192, 128, 0),
            (64, 0, 128),
            (192, 0, 128),
            (64, 128, 128),
            (192, 128, 128),
            # 16=화분식물, 17=양, 18=소파, 19=기차, 20=TV
            (0, 64, 0),
            (128, 64, 0),
            (0, 192, 0),
            (128, 192, 0),
            (0, 64, 128)
        ])

        #r,g,b의 값에 image와 동일한 0배열 반환
        r = np.zeros_like(image).astype(np.uint8)
        g = np.zeros_like(image).astype(np.uint8)
        b = np.zeros_like(image).astype(np.uint8)
        for l in range(0, nc):
            idx = image == l
            r[idx] = label_colors[l, 0]
            g[idx] = label_colors[l, 1]
            b[idx] = label_colors[l, 2]

        #추출할 객체를 인식하는 배열 합치기
        rgb = np.stack([r, g, b], axis=2)

        #추출할 객체 이미지 호출
        foreground = cv2.imread(source)

        #추출할 이미지를 rgb값으로 색깔 변환하고 사이즈 조정
        foreground = cv2.cvtColor(foreground, cv2.COLOR_BGR2RGB)
        foreground = cv2.resize(foreground, (r.shape[1], r.shape[0]))

        #rgb 출력 맵과 크기가 같은 배경 배열 생성
        background = 255 * np.ones_like(rgb).astype(np.uint8)

        #uint8를 float형으로 변환
        foreground = foreground.astype(float)
        background = background.astype(float)

        #rgb 출력 맵의 이진마스크 생성
        th, alpha = cv2.threshold(np.array(rgb), 0, 255, cv2.THRESH_BINARY)

        #마스크에 약간의 blur처리를 통해 가장자리 부드럽게 처리
        alpha = cv2.GaussianBlur(alpha, (7, 7), 0)

        #알파 마스크 정규화
        alpha = alpha.astype(float) / 255

        #알파 마스크와 추출할 이미지 덧붙이기
        foreground = cv2.multiply(alpha, foreground)

        #배경 이미지에 (1-alpha)값 덧붙이기
        background = cv2.multiply(1.0 - alpha, background)
        #처리된 결과물 추출
        outImage = cv2.add(foreground, background)
        # 이미지 저장
        # plt.imsave('blog/../static/test/test{}.png'.format(i+1), outImage/255)
        image_name = 'blog/../media/process/2020/11/result{}.png'.format(
            str(dt.datetime.now()).replace(' ',
                                           '').replace(':',
                                                       '_').replace('.', '_'))
        plt.imsave(image_name, outImage / 255)

        return image_name
コード例 #39
0
ファイル: alpha_bending.py プロジェクト: aneekdas96/ESPAIL
        # Convert uint8 to float
        foreground = foreground.astype(float)
        background = background.astype(float)

        # Normalize the alpha mask to keep intensity between 0 and 1
        alpha = alpha.astype(float) / 255
        alpha_b = alpha_b.astype(float) / 255
        # Multiply the foreground with the alpha matte

        alpha = alpha.resize((256, 256, 3), Image.ANTIALIAS)
        foreground = foreground.resize((256, 256, 3), Image.ANTIALIAS)
        background = background.resize((256, 256, 3), Image.ANTIALIAS)

        alpha = numpy.array(alpha)
        foreground = numpy.array(foreground)
        background = numpy.array(background)

        alpha = alpha[:, :, ::-1].copy()
        foreground = foreground[:, :, ::-1].copy()
        background = background[:, :, ::-1].copy()

        foreground = cv2.multiply(alpha, foreground)

        # Multiply the background with ( 1 - alpha )
        background = cv2.multiply(1.0 - alpha, background)

        # Add the masked foreground and background.
        outImage = cv2.add(foreground, background)

        cv2.imwrite('out_imgi[-6:].jpg', outImage)
コード例 #40
0
def mask_average(input_param_file, input_mask_file, output_dir):

    try:

        os.chdir(output_dir)

        print(input_mask_file)
        print(type(input_mask_file))
        print(input_mask_file.split("\n"))
        mask_files = input_mask_file.split("\n")
        # print("---")
        # print(mask_files[0])
        # print(mask_files[1])
        # print(len(mask_files))
        nROI = len(mask_files)

        # Load param image
        param_image = nib.load(input_param_file)
        size_x_param = param_image.shape[0]
        size_y_param = param_image.shape[1]
        try:
            size_z_param = param_image.shape[2]
        except:
            size_z_param = None
        param_hdr = param_image.header
        param_qform = param_hdr['qform_code']
        param_sform = param_hdr['sform_code']
        canonical_param_image = nib.as_closest_canonical(param_image)
        param_img_orientation = nib.aff2axcodes(canonical_param_image.affine)
        param_image_data = param_image.get_fdata()
        param_image_original = param_image.get_fdata()
        param_filename = os.path.basename(input_param_file).split('.')[0]

        # Load mask
        mask_image = nib.load(mask_files[0])
        mask_image.affine.shape
        size_x_mask = mask_image.shape[0]
        size_y_mask = mask_image.shape[1]
        try:
            size_z_mask = mask_image.shape[2]
        except:
            size_z_mask = None
        mask_hdr = mask_image.header
        mask_qform = mask_hdr['qform_code']
        mask_sform = mask_hdr['sform_code']
        canonical_mask = nib.as_closest_canonical(mask_image)
        mask_img_orientation = nib.aff2axcodes(canonical_mask.affine)
        mask = np.zeros((size_x_mask, size_y_mask, size_z_mask, nROI))
        for i in range(nROI):
            mask_image = nib.load(mask_files[i])
            mask_image_data = mask_image.get_fdata()
            mask[:, :, :, i] = mask_image_data

        # Check image orientation
        # print(param_hdr.get_sform(coded=True))
        # print(param_hdr.get_qform(coded=True))
        # print(mask_hdr.get_sform(coded=True))
        # print(mask_hdr.get_qform(coded=True))

        # mask_hdr.set_qform(param_hdr.get_sform, code='scanner')
        # param_hdr.set_qform(mask_hdr.get_sform, code='scanner')

        # if mask_qform == param_qform and mask_sform == param_sform:
        #     pass
        # elif mask_qform != param_qform and mask_sform != param_sform:
        #     mask_hdr.set_sform(param_hdr.get_sform, code='scanner')

        # affine = param_hdr.get_sform

        # img = nib.load(input_mask_file)
        # img.header.set_qform(affine, 1)
        # img.header.set_sform(affine, 1)
        # img.affine = affine
        # img_data = img.get_fdata()
        # plt.imshow(img_data,cmap='gray')
        # img.to_filename('tryout.nii')

        if size_x_mask != size_x_param and size_y_mask != size_y_param and size_z_mask != 1:
            full_resized_mask = np.zeros(
                (size_x_param, size_y_param, size_z_mask))
            with open("statistics.txt", "w+") as f:
                f.write(
                    "***************************************************************************** \n"
                )
                f.write("XNAT-PIC Pipeline: Mask Average\n")
                f.write(
                    "Apply a mask to a parametric map and compute mean value in the ROIs\n"
                )
                f.write("\n")
                f.write("Author: Sara Zullino \n")
                f.write("Mailto: [email protected] \n")
                f.write(
                    "***************************************************************************** \n"
                )
                f.write("\n")
                resized_mask = np.zeros(
                    (size_x_param, size_y_param, size_z_mask, nROI))
                for i in range(nROI):
                    mask_roi = mask[:, :, :, i]
                    #mask_copy_bin = np.sign(mask_copy)
                    mask_roi_resized = cv2.resize(
                        mask_roi,
                        dsize=(size_x_param, size_y_param),
                        interpolation=cv2.INTER_LINEAR)
                    mask_roi_resized_int = (mask_roi_resized != 0).astype(int)
                    resized_mask[:, :, :, i] = mask_roi_resized_int
                    temp = resized_mask[:, :, :, i]
                    full_resized_mask = full_resized_mask + temp

                    # Applying multiply method
                    param_image_mask = cv2.multiply(param_image_data,
                                                    resized_mask[:, :, :, i],
                                                    dtype=cv2.CV_32F)

                    param_image_mask_nonzero_array = param_image_mask[
                        param_image_mask > 0]
                    mean_T2 = param_image_mask_nonzero_array.mean()
                    std_T2 = param_image_mask_nonzero_array.std()
                    median_T2 = np.median(param_image_mask_nonzero_array)

                    print("param_image_mask_nonzero_array max is",
                          np.amax(param_image_mask_nonzero_array))
                    print("param_image_mask_nonzero_array min is",
                          np.amin(param_image_mask_nonzero_array))

                    R2_image_mask_nonzero_array = 1000 / param_image_mask_nonzero_array
                    mean_R2 = R2_image_mask_nonzero_array.mean()
                    std_R2 = R2_image_mask_nonzero_array.std()
                    median_R2 = np.median(R2_image_mask_nonzero_array)

                    # Count the number of nonzero pixels in the thresholded image
                    roi_area = cv2.countNonZero(resized_mask[:, :, :, i])

                    print("ROI number: {}".format(i + 1), file=f)
                    print("ROI file name: %s" %
                          str(os.path.basename(mask_files[i])),
                          file=f)
                    print("ROI Area: {:0.2f} ".format(roi_area), file=f)
                    print("Mean T2: {:0.2f} ms".format(mean_T2), file=f)
                    print("STD T2: {:0.2f} ms".format(std_T2, 2), file=f)
                    print("Median T2: {:0.2f} ms".format(median_T2), file=f)
                    print("Mean R2: {:0.2f} 1/s ".format(mean_R2, 2), file=f)
                    print("STD R2: {:0.2f} 1/s".format(std_R2, 2), file=f)
                    print("Median R2: {:0.2f} 1/s".format(median_R2), file=f)
                    f.write(
                        "----------------------------------------------------------------------------- \n"
                    )
            f.close()

            # Save nifti figures
            param_image_full_mask = np.zeros(
                (size_x_param, size_y_param, size_z_mask))
            for j in range(0, size_z_mask):
                param_image_full_mask[:, :,
                                      j] = cv2.multiply(param_image_data[:, :,
                                                                         j],
                                                        full_resized_mask[:, :,
                                                                          j],
                                                        dtype=cv2.CV_32F)
                #param_image_full_mask[:,:,j] = cv2.multiply(full_resized_mask[:,:,j], full_resized_mask[:,:,j], dtype=cv2.CV_32F)
            full_resized_param_img = nib.Nifti1Image(param_image_full_mask,
                                                     param_image.affine)
            full_resized_param_img.header.get_data_shape()
            nib.save(full_resized_param_img, '%s_masked.nii' % param_filename)

            full_resized_mask_img = nib.Nifti1Image(full_resized_mask,
                                                    mask_image.affine)
            full_resized_mask_img.header.get_data_shape()
            nib.save(full_resized_mask_img, 'mask.nii')

            # # Figure: PARAMETRIC MAP
            # def display_multiple_img(images, rows = 1, cols=1):
            #     figure, ax = plt.subplots(nrows=rows,ncols=cols )
            #     for ind,title in enumerate(images):
            #         ax.ravel()[ind].imshow(images[title], cmap='gray')
            #         ax.ravel()[ind].set_title(title)
            #         ax.ravel()[ind].set_axis_off()
            #     plt.tight_layout()
            #     #plt.suptitle('%s masked [s]'% param_filename)
            #     plt.savefig('%s_masked.png' % param_filename)
            #     plt.show()

            # total_images = size_z_mask
            # images = {str(i+1): param_image_full_mask[:,:,i] for i in range(total_images)}

            # display_multiple_img(images, 1, size_z_mask)

            # # Figure: MASK
            # def display_multiple_img(images, rows = 1, cols=1):
            #     figure, ax = plt.subplots(nrows=rows,ncols=cols )
            #     for ind,title in enumerate(images):
            #         ax.ravel()[ind].imshow(images[title], cmap='gray')
            #         ax.ravel()[ind].set_title(title)
            #         ax.ravel()[ind].set_axis_off()
            #     plt.tight_layout()
            #     #plt.suptitle('mask')
            #     plt.savefig('mask.png')
            #     plt.show()

            # total_images = size_z_mask
            # images = {str(i+1): full_resized_mask[:,:,i] for i in range(total_images)}

            # display_multiple_img(images, 1, size_z_mask)

        elif size_x_mask != size_x_param and size_y_mask != size_y_param and size_z_mask == 1:
            full_resized_mask = np.zeros((size_x_param, size_y_param))
            if size_x_mask != size_x_param and size_y_mask != size_y_param:
                resized_mask = np.zeros((size_x_param, size_y_param, nROI))
                with open("statistics.txt", "w+") as f:
                    f.write(
                        "***************************************************************************** \n"
                    )
                    f.write("XNAT-PIC Pipeline: Mask Average\n")
                    f.write(
                        "Apply a mask to a parametric map and compute mean value in the ROIs\n"
                    )
                    f.write("\n")
                    f.write("Author: Sara Zullino \n")
                    f.write("Mailto: [email protected] \n")
                    f.write(
                        "***************************************************************************** \n"
                    )
                    f.write("\n")
                    for i in range(nROI):
                        mask_roi = mask[:, :, :, i]
                        #mask_copy_bin = np.sign(mask_copy)
                        mask_roi_resized = cv2.resize(
                            mask_roi,
                            dsize=(size_x_param, size_y_param),
                            interpolation=cv2.INTER_LINEAR)
                        mask_roi_resized_int = (mask_roi_resized !=
                                                0).astype(int)
                        resized_mask[:, :, i] = mask_roi_resized_int
                        temp = resized_mask[:, :, i]
                        full_resized_mask = full_resized_mask + temp

                        # Applying multiply method
                        param_image_mask = cv2.multiply(param_image_data,
                                                        resized_mask[:, :, i],
                                                        dtype=cv2.CV_32F)

                        param_image_mask_nonzero_array = param_image_mask[
                            param_image_mask > 0]
                        mean_T2 = param_image_mask_nonzero_array.mean()
                        std_T2 = param_image_mask_nonzero_array.std()
                        median_T2 = np.median(param_image_mask_nonzero_array)

                        print("param_image_mask_nonzero_array max is",
                              np.amax(param_image_mask_nonzero_array))
                        print("param_image_mask_nonzero_array min is",
                              np.amin(param_image_mask_nonzero_array))

                        R2_image_mask_nonzero_array = 1000 / param_image_mask_nonzero_array
                        mean_R2 = R2_image_mask_nonzero_array.mean()
                        std_R2 = R2_image_mask_nonzero_array.std()
                        median_R2 = np.median(R2_image_mask_nonzero_array)

                        # Count the number of nonzero pixels in the thresholded image
                        roi_area = cv2.countNonZero(resized_mask[:, :, i])

                        print("ROI number: {}".format(i + 1), file=f)
                        print("ROI file name: %s" %
                              str(os.path.basename(mask_files[i])),
                              file=f)
                        print("ROI Area: {:0.2f} ".format(roi_area), file=f)
                        print("Mean T2: {:0.2f} ms".format(mean_T2), file=f)
                        print("STD T2: {:0.2f} ms".format(std_T2, 2), file=f)
                        print("Median T2: {:0.2f} ms".format(median_T2),
                              file=f)
                        print("Mean R2: {:0.2f} 1/s ".format(mean_R2, 2),
                              file=f)
                        print("STD R2: {:0.2f} 1/s".format(std_R2, 2), file=f)
                        print("Median R2: {:0.2f} 1/s".format(median_R2),
                              file=f)
                        f.write(
                            "----------------------------------------------------------------------------- \n"
                        )

                f.close()

            # Save nifti figures
            param_image_full_mask = cv2.multiply(param_image_data,
                                                 full_resized_mask,
                                                 dtype=cv2.CV_32F)

            full_resized_param_img = nib.Nifti1Image(param_image_full_mask,
                                                     param_image.affine)
            full_resized_param_img.header.get_data_shape()
            nib.save(full_resized_param_img, '%s_masked.nii' % param_filename)

            full_resized_mask_img = nib.Nifti1Image(full_resized_mask,
                                                    mask_image.affine)
            full_resized_mask_img.header.get_data_shape()
            nib.save(full_resized_mask_img, 'mask.nii')

    except Exception as e:
        print(e)
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_tb(exc_traceback)
        sys.exit(1)
コード例 #41
0
imgCannyErosion = cv2.erode(imgCanny, kernel, iterations=1)
imgCannyDilation = cv2.dilate(imgCanny, kernel, iterations=1)
imgCannyDialThenErosion = cv2.erode(imgCannyDilation, kernel, iterations=1)

#operations
imgand = cv2.bitwise_and(img2Gray, imgGray, mask=None)
imgor = cv2.bitwise_or(img2Gray, imgGray, mask=None)
imgxor = cv2.bitwise_xor(img2Gray, imgGray, mask=None)
imgxnor = cv2.bitwise_not(imgxor, mask=None)
imgnand = cv2.bitwise_not(imgand, mask=None)
not1 = cv2.bitwise_not(imgGray, mask=None)
not2 = cv2.bitwise_not(img2Gray, mask=None)
coloradd = imgand = cv2.add(img, img2, mask=None)
colorsub = imgand = cv2.subtract(img, img2, mask=None)
colormulti = imgand = cv2.multiply(img, img2)
colordiv = imgand = cv2.divide(img, img2)

print("Image Dimension(h, w, 3): " + str(img.shape))
print("Resized Image Dimension(h, w, 3): " + str(imgResize.shape))
print("Cropped Image Dimension(h, w, 3): " + str(imgCropped.shape))

cv2.imwrite("Gray.jpg", imgGray)
cv2.imwrite("Blur.jpg", imgBlur)
cv2.imwrite("Canny.jpg", imgCanny)
cv2.imwrite("Dilation.jpg", imgDilation)
cv2.imwrite("Erosion.jpg", imgErosion)
cv2.imwrite("Resize.jpg", imgResize)
cv2.imwrite("Cropped.jpg", imgCropped)
cv2.imwrite("CannyDilation.jpg", imgCannyDilation)
cv2.imwrite("CannyErosion.jpg", imgCannyErosion)
コード例 #42
0
        if (np.sum(I) == 0):
            mean_sub[counter_row, counter_col] = 0
            std_sub[counter_row, counter_col] = 1
        else:
            means = np.mean(temp)
            stddevs = np.sqrt(np.var(temp))  #Computation of mean and std
            mean_sub[counter_row, counter_col] = means
            std_sub[counter_row, counter_col] = stddevs

row_img, col_img = img.shape
mean_full = np.zeros((row_img, col_img), dtype=np.double)
cv2.resize(mean_sub, (col_img, row_img),
           mean_full,
           interpolation=cv2.INTER_CUBIC)  # Interpolate the mean_dev
mask = mask.astype(float)
cv2.multiply(mean_full, mask, mean_full)  #multiplication by mask
std_full = cv2.resize(std_sub, (col_img, row_img))  #interpolate stddev
cv2.multiply(std_full, mask, std_full)  #multiplication by mask
img_sub = cv2.subtract(img, mean_full)
pcm_dist = cv2.divide(img_sub, std_full)

pcm_dist = np.abs(
    pcm_dist
)  # Compute the absolute value of the distance as it should be positive
cv2.multiply(pcm_dist, mask, pcm_dist)
pcm_dist[pcm_dist < 1] = 1  #choose the threshold globally as 1
pcm_dist[pcm_dist != 1] = 0

block_r = 512
block_c = 512
for counter_row in range(sz_r / block_r):
コード例 #43
0
    height, width = img_color.shape[:2]
    img_color = cv.resize(img_color, (width, height), interpolation=cv.INTER_AREA)

    # 원본 영상을 HSV 영상으로 변환합니다.
    img_hsv = cv.cvtColor(img_color, cv.COLOR_BGR2HSV)

    # 범위 값으로 HSV 이미지에서 마스크를 생성합니다.
    img_mask1 = cv.inRange(img_hsv, lower_blue1, upper_blue1)
    img_mask2 = cv.inRange(img_hsv, lower_blue2, upper_blue2)
    img_mask3 = cv.inRange(img_hsv, lower_blue3, upper_blue3)
    img_mask = img_mask1 | img_mask2 | img_mask3
    
    img_mask = cv.bitwise_not(img_mask)
    blurred = cv.GaussianBlur(img_mask, (11, 11), 0)
    blurred1 = cv.cvtColor(blurred, cv.COLOR_GRAY2BGR)
    blurred1 = blurred1.astype(float)/255
    img_color1 = img_color.astype(float)
    img_result = cv.multiply(blurred1, img_color1)

    cv.imshow('img_color', img_color)
    cv.imshow('img_mask', img_mask)
    img_result = np.uint8(img_result)
    cv.imshow('img_result', img_result)


    # ESC 키누르면 종료
    if cv.waitKey(1) & 0xFF == 27:
        break


cv.destroyAllWindows()
コード例 #44
0
    # D = avg(Xpow) -----------------------------------------------------------------------------------------------

    cv2.reduce(matXpow, 1, cv2.cv.CV_REDUCE_AVG, vetD, -1)

    # Y = inv(D)*X-------------------------------------------------------------------------------------------------

    for i in xrange(0, size_img):
        matY[i, :, 0] = (1 / vetD[i]) * matX[i, :, 0]

    for i in xrange(0, size_img):
        matY[i, :, 1] = (1 / vetD[i]) * matX[i, :, 1]

    # X = conjugate(X)--------------------------------------------------------------------------------------------

    for i in xrange(0, size_img):
        cv2.multiply(matX[i, :, 1], np.ones((total_img, 1)), matImX[i, :], -1,
                     -1)

    matX[:, :, 1] = matImX

    # print("--- X ---  size: {} ".format(matX.shape))
    # print matX[:, :, 1]

    # X'-----------------------------------------------------------

    matXtr[:, :, 0] = cv2.transpose(matX[:, :, 0])
    matXtr[:, :, 1] = cv2.transpose(matX[:, :, 1])

    # print("\n\n--- X' ---  size: {} ".format(matXtr.shape))
    # print matXtr

    # Z = X'*Y ----------------------------------------------------
コード例 #45
0
                                 np.uint8(gaussian_star_finder.mask))

                result = image.copy()

                cv2.drawKeypoints(result, kp, result, (0, 0, 255), 1)
        else:
            log_star_detector.kernel_size = kernel_size
            log_star_detector.block_size = block_size
            log_star_detector.threshold = threshold

            candidate_finder.setImage(image)

            if image_switch == 2:
                result = log_star_detector.debug

                masked = cv2.multiply(result, 1 - saturated)
                result = cv2.multiply(masked, gaussian_star_finder.mask) * 255
            else:
                result = image.copy()
                candidate_finder.drawCandidates(result)

        cv2.imshow(window, result)

        k = cv2.waitKey(30) & 0xFF
        if k == 27:
            break
        if k == ord('s'):
            filename = 'out.png'
            print('Saving ' + filename)
            cv2.imwrite(filename, result)
コード例 #46
0
import cv2
import numpy as np

img = cv2.imread("lena.jpg")

cv2.imshow("Origianl image", img)

cv2.waitKey(0)
#M=np.ones(img.shape,dtype="uint8")*150
M = np.zeros(img.shape, dtype="uint8") + 150
added = cv2.add(img, M)

cv2.imshow("Added", added)

cv2.waitKey(0)

subtracted = cv2.subtract(img, M)
cv2.imshow("Subtract", subtracted)
cv2.waitKey(0)

mul = cv2.multiply(img, M)
cv2.imshow("Multiply", mul)
cv2.waitKey(0)

cv2.destroyAllWindows()
コード例 #47
0
    def __drawDNA(self, DNA, inImg):
        #get DNA data
        color = DNA[0]
        posX = int(
            DNA[2]) + self.padding  #add padding since indices have shifted
        posY = int(DNA[1]) + self.padding
        size = DNA[3]
        rotation = DNA[4]
        brushNumber = int(DNA[5])

        #load brush alpha
        brushImg = self.brushes[brushNumber]
        #resize the brush
        brushImg = cv2.resize(brushImg,
                              None,
                              fx=size,
                              fy=size,
                              interpolation=cv2.INTER_CUBIC)
        #rotate
        brushImg = self.__rotateImg(brushImg, rotation)
        #brush img data
        brushImg = cv2.cvtColor(brushImg, cv2.COLOR_BGR2GRAY)
        rows, cols = brushImg.shape

        #create a colored canvas
        myClr = np.copy(brushImg)
        myClr[:, :] = color

        #find ROI
        inImg_rows, inImg_cols = inImg.shape
        y_min = int(posY - rows / 2)
        y_max = int(posY + (rows - rows / 2))
        x_min = int(posX - cols / 2)
        x_max = int(posX + (cols - cols / 2))

        # Convert uint8 to float
        foreground = myClr[0:rows, 0:cols].astype(float)
        background = inImg[y_min:y_max, x_min:x_max].astype(float)  #get ROI
        # Normalize the alpha mask to keep intensity between 0 and 1
        alpha = brushImg.astype(float) / 255.0

        try:
            # Multiply the foreground with the alpha matte
            foreground = cv2.multiply(alpha, foreground)

            # Multiply the background with ( 1 - alpha )
            background = cv2.multiply(np.clip((1.0 - alpha), 0.0, 1.0),
                                      background)
            # Add the masked foreground and background.
            outImage = (np.clip(cv2.add(foreground, background), 0.0,
                                255.0)).astype(np.uint8)

            inImg[y_min:y_max, x_min:x_max] = outImage
        except:
            print('------ \n', 'in image ', inImg.shape)
            print('pivot: ', posY, posX)
            print('brush size: ', self.brushSide)
            print('brush shape: ', brushImg.shape)
            print(" Y range: ", rangeY, 'X range: ', rangeX)
            print('bg coord: ', posY, posY + rangeY, posX, posX + rangeX)
            print('fg: ', foreground.shape)
            print('bg: ', background.shape)
            print('alpha: ', alpha.shape)

        return inImg
コード例 #48
0
import cv2
import numpy as np

ksize_morph = np.ones((7, 7), np.uint8)
ksize_filter = (15, 15)
sigmaX = 2.4
sigmaY = 2.4

filename = '/home/yashmanian/images/Imgs/ironman.jpg'
img = cv2.imread(filename)
height, width, channels = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

gray = cv2.GaussianBlur(gray, (11, 11), 2, 2, cv2.BORDER_CONSTANT)
ret, bin = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)
masked = cv2.multiply(gray, bin)
masked = np.float32(masked)

dst = cv2.cornerHarris(masked, 5, 7, 0.04)
dst = cv2.dilate(dst, ksize_morph, iterations=1)

cv2.imshow(dst)

if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
コード例 #49
0
background = cv2.imread("radar_bg.png", -1)
alpha = foreground[..., 3]
# alpha = cv2.imread("puppets_alpha.png")

# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)

# Normalize the alpha mask to keep intensity between 0 and 1
alpha = alpha.astype(float) / 255

t1 = time.time()
for _ in range(times):

    # Multiply the foreground with the alpha matte
    foreground[..., 0] = cv2.multiply(alpha, foreground[..., 0])
    foreground[..., 1] = cv2.multiply(alpha, foreground[..., 1])
    foreground[..., 2] = cv2.multiply(alpha, foreground[..., 2])

    # Multiply the background with ( 1 - alpha )
    background[..., 0] = cv2.multiply(1.0 - alpha, background[..., 0])
    background[..., 1] = cv2.multiply(1.0 - alpha, background[..., 1])
    background[..., 2] = cv2.multiply(1.0 - alpha, background[..., 2])

    # Add the masked foreground and background.
    outImage = cv2.add(foreground, background)

t2 = time.time()
print(1e3 * (t2 - t1))

# # Display image
コード例 #50
0
ファイル: analyze_object.py プロジェクト: npp97-field/plantcv
def analyze_object(img,
                   imgname,
                   obj,
                   mask,
                   device,
                   debug=False,
                   filename=False):
    # Outputs numeric properties for an input object (contour or grouped contours)
    # Also color classification?
    # img = image object (most likely the original), color(RGB)
    # imgname= name of image
    # obj = single or grouped contour object
    # device = device number. Used to count steps in the pipeline
    # debug= True/False. If True, print image
    # filename= False or image name. If defined print image
    device += 1
    ori_img = np.copy(img)
    if len(np.shape(img)) == 3:
        ix, iy, iz = np.shape(img)
    else:
        ix, iy = np.shape(img)
    size = ix, iy, 3
    size1 = ix, iy
    background = np.zeros(size, dtype=np.uint8)
    background1 = np.zeros(size1, dtype=np.uint8)
    background2 = np.zeros(size1, dtype=np.uint8)

    # Check is object is touching image boundaries (QC)
    frame_background = np.zeros(size1, dtype=np.uint8)
    frame = frame_background + 1
    frame_contour, frame_heirarchy = cv2.findContours(frame, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_NONE)
    ptest = []
    vobj = np.vstack(obj)
    for i, c in enumerate(vobj):
        xy = tuple(c)
        pptest = cv2.pointPolygonTest(frame_contour[0], xy, measureDist=False)
        ptest.append(pptest)
    in_bounds = all(c == 1 for c in ptest)

    # Convex Hull
    hull = cv2.convexHull(obj)
    hull_vertices = len(hull)
    # Moments
    #  m = cv2.moments(obj)
    m = cv2.moments(mask, binaryImage=True)
    ## Properties
    # Area
    area = m['m00']

    if area:
        # Convex Hull area
        hull_area = cv2.contourArea(hull)
        # Solidity
        solidity = area / hull_area
        # Perimeter
        perimeter = cv2.arcLength(obj, closed=True)
        # x and y position (bottom left?) and extent x (width) and extent y (height)
        x, y, width, height = cv2.boundingRect(obj)
        # Centroid (center of mass x, center of mass y)
        cmx, cmy = (m['m10'] / m['m00'], m['m01'] / m['m00'])
        # Ellipse
        center, axes, angle = cv2.fitEllipse(obj)
        major_axis = np.argmax(axes)
        minor_axis = 1 - major_axis
        major_axis_length = axes[major_axis]
        minor_axis_length = axes[minor_axis]
        eccentricity = np.sqrt(1 - (axes[minor_axis] / axes[major_axis])**2)

        #Longest Axis: line through center of mass and point on the convex hull that is furthest away
        cv2.circle(background, (int(cmx), int(cmy)), 4, (255, 255, 255), -1)
        center_p = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
        ret, centerp_binary = cv2.threshold(center_p, 0, 255,
                                            cv2.THRESH_BINARY)
        centerpoint, cpoint_h = cv2.findContours(centerp_binary, cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_NONE)

        dist = []
        vhull = np.vstack(hull)

        for i, c in enumerate(vhull):
            xy = tuple(c)
            pptest = cv2.pointPolygonTest(centerpoint[0], xy, measureDist=True)
            dist.append(pptest)

        abs_dist = np.absolute(dist)
        max_i = np.argmax(abs_dist)

        caliper_max_x, caliper_max_y = list(tuple(vhull[max_i]))
        caliper_mid_x, caliper_mid_y = [int(cmx), int(cmy)]

        xdiff = float(caliper_max_x - caliper_mid_x)
        ydiff = float(caliper_max_y - caliper_mid_y)

        if xdiff != 0:
            slope = (float(ydiff / xdiff))
        if xdiff == 0:
            slope = 1
        b_line = caliper_mid_y - (slope * caliper_mid_x)

        if slope == 0:
            xintercept = 0
            xintercept1 = 0
            yintercept = 'none'
            yintercept1 = 'none'
            cv2.line(background1, (iy, caliper_mid_y), (0, caliper_mid_y),
                     (255), 1)
        else:
            xintercept = int(-b_line / slope)
            xintercept1 = int((ix - b_line) / slope)
            yintercept = 'none'
            yintercept1 = 'none'
            if 0 <= xintercept <= iy and 0 <= xintercept1 <= iy:
                cv2.line(background1, (xintercept1, ix), (xintercept, 0),
                         (255), 1)
            elif xintercept < 0 or xintercept > iy or xintercept1 < 0 or xintercept1 > iy:
                if xintercept < 0 and 0 <= xintercept1 <= iy:
                    yintercept = int(b_line)
                    cv2.line(background1, (0, yintercept), (xintercept1, ix),
                             (255), 1)
                elif xintercept > iy and 0 <= xintercept1 <= iy:
                    yintercept1 = int((slope * iy) + b_line)
                    cv2.line(background1, (iy, yintercept1), (xintercept1, ix),
                             (255), 1)
                elif 0 <= xintercept <= iy and xintercept1 < 0:
                    yintercept = int(b_line)
                    cv2.line(background1, (0, yintercept), (xintercept, 0),
                             (255), 1)
                elif 0 <= xintercept <= iy and xintercept1 > iy:
                    yintercept1 = int((slope * iy) + b_line)
                    cv2.line(background1, (iy, yintercept1), (xintercept, 0),
                             (255), 1)
                else:
                    yintercept = int(b_line)
                    yintercept1 = int((slope * iy) + b_line)
                    cv2.line(background1, (0, yintercept), (iy, yintercept1),
                             (255), 1)

        ret1, line_binary = cv2.threshold(background1, 0, 255,
                                          cv2.THRESH_BINARY)
        #print_image(line_binary,(str(device)+'_caliperfit.png'))

        cv2.drawContours(background2, [hull], -1, (255), -1)
        ret2, hullp_binary = cv2.threshold(background2, 0, 255,
                                           cv2.THRESH_BINARY)
        #print_image(hullp_binary,(str(device)+'_hull.png'))

        caliper = cv2.multiply(line_binary, hullp_binary)
        #print_image(caliper,(str(device)+'_caliperlength.png'))

        caliper_y, caliper_x = np.array(caliper.nonzero())
        caliper_matrix = np.vstack((caliper_x, caliper_y))
        caliper_transpose = np.transpose(caliper_matrix)
        caliper_length = len(caliper_transpose)

        caliper_transpose1 = np.lexsort((caliper_y, caliper_x))
        caliper_transpose2 = [(caliper_x[i], caliper_y[i])
                              for i in caliper_transpose1]
        caliper_transpose = np.array(caliper_transpose2)

    else:
        hull_area, solidity, perimeter, width, height, cmx, cmy = 'ND', 'ND', 'ND', 'ND', 'ND', 'ND', 'ND'

    #Store Shape Data
    shape_header = ('HEADER_SHAPES', 'area', 'hull-area', 'solidity',
                    'perimeter', 'width', 'height', 'longest_axis',
                    'center-of-mass-x', 'center-of-mass-y', 'hull_vertices',
                    'in_bounds')

    shape_data = ('SHAPES_DATA', area, hull_area, solidity, perimeter, width,
                  height, caliper_length, cmx, cmy, hull_vertices, in_bounds)

    #Draw properties
    if area and filename:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), 1)
        cv2.drawContours(ori_img, [hull], -1, (0, 0, 255), 1)
        cv2.line(ori_img, (x, y), (x + width, y), (0, 0, 255), 1)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (0, 0, 255),
                 1)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])),
                 (tuple(caliper_transpose[0])), (0, 0, 255), 1)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (0, 0, 255), 1)
        # Output images with convex hull, extent x and y
        extention = filename.split('.')[-1]
        out_file = str(filename[0:-4]) + '_shapes.' + extention
        print_image(ori_img, out_file)
        print('\t'.join(map(str, ('IMAGE', 'shapes', out_file))))
    else:
        pass

    if debug:
        cv2.drawContours(ori_img, obj, -1, (255, 0, 0), 1)
        cv2.drawContours(ori_img, [hull], -1, (0, 0, 255), 1)
        cv2.line(ori_img, (x, y), (x + width, y), (0, 0, 255), 1)
        cv2.line(ori_img, (int(cmx), y), (int(cmx), y + height), (0, 0, 255),
                 1)
        cv2.circle(ori_img, (int(cmx), int(cmy)), 10, (0, 0, 255), 1)
        cv2.line(ori_img, (tuple(caliper_transpose[caliper_length - 1])),
                 (tuple(caliper_transpose[0])), (0, 0, 255), 1)
        print_image(ori_img, (str(device) + '_shapes.png'))

    return device, shape_header, shape_data, ori_img
コード例 #51
0
def letsHat(img, hat):

    # Get alpha channel for the hat img
    r, g, b, a = cv2.split(hat_im)
    hat_rgb = cv2.merge((r, g, b))

    cv2.imwrite("hat_alpha.jpg", a)
    cv2.imwrite("hat_rgb.jpg", hat_rgb)

    # Trained dlib face key points detector
    predictor_path = "shape_predictor_5_face_landmarks.dat"
    predictor = dlib.shape_predictor(predictor_path)

    # Dlib front face detector
    detector = dlib.get_frontal_face_detector()

    # Front face detection result
    face_detect = detector(img, 1)

    # If face detected
    if len(face_detect) > 0:
        for d in face_detect:  # for each face detected
            x, y, w, h = d.left(), d.top(), (d.right() -
                                             d.left()), (d.bottom() - d.top())
            # imgRect = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2, 8, 0)

            # 5 key points detection
            shape = predictor(img, d)
            # for point in shape.parts():
            # face_pts = cv2.circle(img, (point.x, point.y), 3, color = (0, 255, 0))
            # Draw 5 feature pts one by one
            # cv2.imshow('image', face_pts)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            ## Draw detection retangle and pts on face
            # cv2.imshow('image', img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # Select outermost feature pts on left and right eyes
            pt1 = shape.part(0)
            pt2 = shape.part(2)
            # print pt1, pt2

            # Calculate centre pt of eye
            centre_pt = ((pt1.x + pt2.x) // 2, (pt1.y + pt2.y) // 2)
            # face_centrept = cv2.circle(img, centre_pt, 3, color = (0, 255, 0))

            ## Draw centre pts on face
            # cv2.imshow('image', img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # Adjust hat size according to face size
            # # shape[0]=width, shape[1]=height
            # print img.shape[0], img.shape[1]
            factor = 1.5
            resize_hat_h = int(
                round(hat_rgb.shape[0] * w / hat_rgb.shape[1] * factor))
            resize_hat_w = int(
                round(hat_rgb.shape[1] * w / hat_rgb.shape[1] * factor))

            if resize_hat_h > y:
                resize_hat_h = y - 1

            resize_hat = cv2.resize(hat_rgb, (resize_hat_w, resize_hat_h))
            # cv2.imshow('image', resize_hat)
            # cv2.imshow('image2', hat_rgb)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # Make resize mask from alpha channel
            mask = cv2.resize(a, (resize_hat_w, resize_hat_h))
            mask_inv = cv2.bitwise_not(mask)

            # Hat skew wrt face detection rectangle
            dh = 0
            dw = 0

            # ROI of figure image
            roi = img[(y + dh - resize_hat_h):(y + dh),
                      (x + dw):(x + resize_hat_w + dw)]
            # imgRect = cv2.rectangle(img, (x + dw, y + dh - resize_hat_h), (x + resize_hat_w + dw, y + dh), (255, 0, 0), 2, 8, 0)
            # imgRect = cv2.rectangle(img, (x + dw, y + dh), (x + dw, y + dh), (0, 2, 0), 2, 8, 0)
            # cv2.imshow('image', img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            roi = img[(y + dh - resize_hat_h) : (y + dh), \
               (centre_pt[0] - resize_hat_w // 3) : (centre_pt[0] + resize_hat_w // 3 * 2)]

            # Extract hat space in ROI
            roi = roi.astype(float)
            # print mask_inv
            mask_inv = cv2.merge((mask_inv, mask_inv, mask_inv))
            alpha = mask_inv.astype(float) / 255
            # print alpha
            if alpha.shape != roi.shape:
                alpha = cv2.resize(alpha, (roi.shape[1], roi.shape[0]))

            bg = cv2.multiply(alpha, roi)
            bg = bg.astype('uint8')

            # cv2.imshow('imge', bg)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # Extract hat region
            hat_region = cv2.bitwise_and(resize_hat, resize_hat, mask=mask)
            cv2.imwrite('hat.jpg', hat_region)
            # cv2.imshow('image', hat_region)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # print bg.shape, hat_region.shape
            if bg.shape != hat_region.shape:
                hat_region = cv2.resize(hat_region, (bg.shape[1], bg.shape[0]))

            # Add the two ROI (add hat to background image)
            add_hat = cv2.add(bg, hat_region)
            # cv2.imshow('addhat',add_hat)
            # cv2.imshow('hat', hat_region)
            # cv2.imshow('bg', bg)
            # cv2.imshow('original', img)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

            # Put the hat added region back to original image
            img[(y + dh - resize_hat_h) : (y + dh), \
              (centre_pt[0] - resize_hat_w // 3) : (centre_pt[0] + resize_hat_w // 3 * 2)]\
            = add_hat

        # Show the result and save
        cv2.imshow('original', img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        return img

    else:
        print "No Face Detected!!!"
コード例 #52
0
def multiply_demo(m1, m2):
    # 相乘
    dst = cv.multiply(m1, m2)
    cv.imshow("multiply_demo", dst)
コード例 #53
0
import numpy as np
import cv2
import cv2.cv as cv
from PIL import Image
import scipy
import scipy.io
from matplotlib import pyplot as plt
from computebgimg import computebgimg
from LumConDrift import LumConDrift

image = cv2.imread(
    'C:\Users\AK PUJITHA\Desktop\iiit h\semester 6\honors project 2\Image Enahancement-Matlab\output\image010.png',
    1)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
bgImg, fundusMask = computebgimg(image)
bgImg = cv2.multiply(image[:, :, 1].astype(float), bgImg.astype(float))
ldrift, cdrift = LumConDrift(bgImg, fundusMask)

g = image[:, :, 1].astype(float)

imgCorr = cv2.divide(cv2.subtract(g, ldrift), (cv2.add(cdrift, 0.0001)))
imgCorr = cv2.multiply(imgCorr, fundusMask.astype(float))

imgCorr = cv2.add(imgCorr, np.abs(np.min(imgCorr)))
imgCorr = cv2.divide(imgCorr, np.max(imgCorr))
imgCorr = cv2.multiply(imgCorr, fundusMask.astype(float))

image = image.astype(float)
image[:, :, 0] = cv2.divide(cv2.multiply(imgCorr, image[:, :, 0]),
                            hsv[:, :, 2].astype(float))
image[:, :, 1] = cv2.divide(cv2.multiply(imgCorr, image[:, :, 1]),
コード例 #54
0
        ##nose=nose[0]
    
        pts.append(chin[0])
        pts.append(chin[16])
        pts.append(nose[2])
        pts=np.float32(pts)
        ## nosepts=(np.array(nosepts).reshape((-1,1,2)))

        ##for cnt in nosepts:
          ##  cv2.draw(frame, [cnt], -1, (0,0,255), -1)
        
        glasstemp=transform(pts, glass_idx)
        alpha=glasstemp[:,:,3]/255.0
        glasstemp=glasstemp[:,:,0:3]
        for c in range(3):
            frame[:,:,c]=cv2.multiply(1-alpha,frame[:,:,c])
        for c in range(3):
            glasstemp[:,:,c]=cv2.multiply(alpha, glasstemp[:,:,c])
        frame=cv2.add(frame, glasstemp)
        
    
        '''
        ## for drawing outlines of the eyes
        eye1=cv2.convexHull(np.array(lefteye))
        eye2=cv2.convexHull(np.array(righteye))
        cv2.drawContours(frame, [eye1], -1, (0, 255, 0), 1)
        cv2.drawContours(frame, [eye2], -1, (0, 255, 0), 1)
        
        '''
        
    
コード例 #55
0
import os
import cv2
import matplotlib.pyplot as plt

path = "Images/"

image1 = cv2.imread(path + "image.jpg", 1)  # 1 for RGB, 0 for B&W
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)

image2 = cv2.imread(path + "lena.jpg", 1)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)

add = cv2.add(image2, image1)
sub = cv2.subtract(image2, image1)
mul = cv2.multiply(image2, image1)
div = cv2.divide(image2, image1)

plt.subplot(2, 3, 1)
plt.imshow(image1)
plt.title("First Image")

plt.subplot(2, 3, 2)
plt.imshow(image2)
plt.title("Second Image")

plt.subplot(2, 3, 3)
plt.imshow(add)
plt.title("Addition")

plt.subplot(2, 3, 4)
plt.imshow(sub)
コード例 #56
0
cv2.destroyAllWindows()

out_vid = []
trf = T.Compose([T.Resize(640),T.ToTensor(),T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])
for i in range(len(frames)):
    ip = trf(frames[i]).unsqueeze(0)
    op = dlab(ip)['out']
    om = torch.argmax(op.squeeze(), dim=0).detach().cpu().numpy()
    print(om.shape)
    rgb = decode_segmap(om)
    output = rgb*frame_np[i]
    out_vid.append(output)
final_img = []
back = np.array(Image.open("./background.jpg").resize((640,640), Image.BILINEAR), np.float64)/255
for i in range(len(frames)):
    temp = out_vid[i].astype(float)
    thr, alpha = cv2.threshold(temp, 0, 255, cv2.THRESH_BINARY)
    alpha = cv2.GaussianBlur(alpha, (7,7), 0)
    alpha = alpha/255
    fore = cv2.multiply(alpha,temp)
    backg = cv2.multiply((1-alpha),back)
    outp = cv2.add(fore,backg)
    final_img.append(255*outp)


out = cv2.VideoWriter("./output.avi",cv2.VideoWriter_fourcc('M','P','E','G'), 30, (640,640))
for i in range(len(final_img)):
    out.write(np.uint8(final_img[i])) 
out.release()

コード例 #57
0
def rechteck():
    schwarz = False

    begrenzung = []

    #img = fotoMachen()
    # Farbkonvertierung
    # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # Video in drei Farbkanaele splitten
    # h, s, v= cv2.split(hsv)
    b, g, r = cv2.split(img)

    # masken berechnen
    satu = 30
    vis = 20
    # s_mask = cv2.inRange(s, (44*2.55), (77*2.55))
    # v_mask = cv2.inRange(v, 0, (13.7*2.55))
    b_mask = cv2.inRange(b, 0, 45)
    g_mask = cv2.inRange(g, 0, 25)
    r_mask = cv2.inRange(r, 0, 45)

    # Multiplizieren der Einzelmasken
    mask = cv2.multiply(g_mask, r_mask)
    mask = cv2.multiply(mask, b_mask)

    #Dilation-Maske
    kernel = np.ones((2, 2), np.uint8)
    mask = cv2.dilate(mask, kernel, iterations=2)

    #Areas finden
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    maxArea = 0

    #groesste schwarze Flaeche
    for index in range(len(contours)):
        area = cv2.contourArea(contours[index])
        if area > maxArea:
            maxArea = area
            i = index
            schwarz = True

    #Diese kennzeichnen
    if (schwarz):
        x1, y1, w1, h1 = cv2.boundingRect(contours[i])
        cv2.rectangle(mask, (x1 - 10, y1 - 10), (x1 + w1, y1 + h1),
                      (100, 255, 100), 3)
        vergleichsH = h1 * 0.9
    else:
        print("Etwas ist schief gelaufen, es gibt keine schwarze Fläche")
        cv2.imshow("Schief gelaufen", mask)
        sendNoteOn(6, 0)
        return

    #Postition der schwarzen Steine im Array "contours" umranden
    maxArea1 = 0

    for index in range(len(contours)):
        area = cv2.contourArea(contours[index])
        x, y, w, h = cv2.boundingRect(contours[index])
        if h > vergleichsH:
            print("Es gibt eine Flaeche, die ins Raster faellt")
            begrenzung.append(index)
            cv2.rectangle(mask, (x1 - 10, y1 - 10), (x1 + w1, y1 + h1),
                          (100, 255, 100), 3)
            i1 = index
            if h != h1:
                print("2. Fläche gefunden")
                x2 = x
                y2 = y
                w2 = w
                h2 = h
                maxArea1 = area

    gefundeneSteine = len(begrenzung)
    cv2.imshow('schwarze Steine', mask)

    #falls keine Hoehe vergleichbar ist, zweit groesste Flaeche suchen
    if gefundeneSteine < 2:
        print("Es gibt nur 1 Flaeche, die ins Raster gefallen ist")
        for index in range(len(contours)):
            area = cv2.contourArea(contours[index])
            x, y, w, h = cv2.boundingRect(contours[index])
            if (area > maxArea1 and area != maxArea):
                maxArea1 = area
                i1 = index

        x2, y2, w2, h2 = cv2.boundingRect(contours[i1])
        cv2.rectangle(mask, (x2 - 10, y2 - 10), (x2 + w2, y2 + h2),
                      (100, 255, 100), 3)

        if (y2 + h2 - 100) < (y2 + h2) < (y2 + h2 + 100):
            begrenzung.append(i1)

        cv2.imshow('schwarze Steine', mask)
    print(h1, vergleichsH, h2)
    print(begrenzung)

    #begrenzung.append(i1)
    #print (maxArea, maxArea1)

    if (len(begrenzung) != 2):
        print(
            "Etwas ist schief gelaufen, es gibt nur schwarze Fläche oder mehr als 2"
        )
        cv2.imshow("Schief gelaufen", mask)
        sendNoteOn(6, 0)
        return

    position = [x1, y1, w1, h1, x2, y2, w2, h2, maxArea1]
    return (position)
コード例 #58
0
def add_hat(img, hat_img):
    # 分离rgba通道,合成rgb三通道帽子图,a通道后面做mask用
    r, g, b, a = cv2.split(hat_img)
    rgb_hat = cv2.merge((r, g, b))

    cv2.imwrite("hat_alpha.jpg", a)

    # ------------------------- 用dlib的人脸检测代替OpenCV的人脸检测-----------------------
    # # 灰度变换
    # gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    # # 用opencv自带的人脸检测器检测人脸
    # face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
    # faces = face_cascade.detectMultiScale(gray,1.05,3,cv2.CASCADE_SCALE_IMAGE,(50,50))

    # ------------------------- 用dlib的人脸检测代替OpenCV的人脸检测-----------------------

    # dlib人脸关键点检测器
    predictor_path = "shape_predictor_5_face_landmarks.dat"
    predictor = dlib.shape_predictor(predictor_path)

    # dlib正脸检测器
    detector = dlib.get_frontal_face_detector()

    # 正脸检测
    dets = detector(img, 1)

    # 如果检测到人脸
    if len(dets) > 0:
        for d in dets:
            x, y, w, h = d.left(), d.top(
            ), d.right() - d.left(), d.bottom() - d.top()
            # x,y,w,h = faceRect
            # cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2,8,0)

            # 关键点检测,5个关键点
            shape = predictor(img, d)
            # for point in shape.parts():
            #     cv2.circle(img,(point.x,point.y),3,color=(0,255,0))

            # cv2.imshow("image",img)
            # cv2.waitKey()

            # 选取左右眼眼角的点
            point1 = shape.part(0)
            point2 = shape.part(2)

            # 求两点中心
            eyes_center = ((point1.x + point2.x) // 2,
                           (point1.y + point2.y) // 2)

            # cv2.circle(img,eyes_center,3,color=(0,255,0))
            # cv2.imshow("image",img)
            # cv2.waitKey()

            #  根据人脸大小调整帽子大小
            factor = 1.5
            resized_hat_h = int(
                round(rgb_hat.shape[0] * w / rgb_hat.shape[1] * factor))
            resized_hat_w = int(
                round(rgb_hat.shape[1] * w / rgb_hat.shape[1] * factor))

            if resized_hat_h > y:
                resized_hat_h = y - 1

            # 根据人脸大小调整帽子大小
            resized_hat = cv2.resize(rgb_hat, (resized_hat_w, resized_hat_h))

            # 用alpha通道作为mask
            mask = cv2.resize(a, (resized_hat_w, resized_hat_h))
            mask_inv = cv2.bitwise_not(mask)

            # 帽子相对与人脸框上线的偏移量
            dh = 0
            dw = 0
            # 原图ROI
            # bg_roi = img[y+dh-resized_hat_h:y+dh, x+dw:x+dw+resized_hat_w]
            bg_roi = img[y + dh - resized_hat_h:y + dh,
                         (eyes_center[0] -
                          resized_hat_w // 3):(eyes_center[0] +
                                               resized_hat_w // 3 * 2)]

            # 原图ROI中提取放帽子的区域
            bg_roi = bg_roi.astype(float)
            mask_inv = cv2.merge((mask_inv, mask_inv, mask_inv))
            alpha = mask_inv.astype(float) / 255

            # 相乘之前保证两者大小一致(可能会由于四舍五入原因不一致)
            alpha = cv2.resize(alpha, (bg_roi.shape[1], bg_roi.shape[0]))
            # print("alpha size: ",alpha.shape)
            # print("bg_roi size: ",bg_roi.shape)
            bg = cv2.multiply(alpha, bg_roi)
            bg = bg.astype('uint8')

            cv2.imwrite("bg.jpg", bg)
            # cv2.imshow("image",img)
            # cv2.waitKey()

            # 提取帽子区域
            hat = cv2.bitwise_and(resized_hat, resized_hat, mask=mask)
            cv2.imwrite("hat.jpg", hat)

            # cv2.imshow("hat",hat)
            # cv2.imshow("bg",bg)

            # print("bg size: ",bg.shape)
            # print("hat size: ",hat.shape)

            # 相加之前保证两者大小一致(可能会由于四舍五入原因不一致)
            hat = cv2.resize(hat, (bg_roi.shape[1], bg_roi.shape[0]))
            # 两个ROI区域相加
            add_hat = cv2.add(bg, hat)
            # cv2.imshow("add_hat",add_hat)

            # 把添加好帽子的区域放回原图
            img[y + dh - resized_hat_h:y + dh,
                (eyes_center[0] -
                 resized_hat_w // 3):(eyes_center[0] +
                                      resized_hat_w // 3 * 2)] = add_hat

            # 展示效果
            # cv2.imshow("img",img )
            # cv2.waitKey(0)

            return img
コード例 #59
0
import cv2
import numpy as np
img=cv2.imread('C:/Users/yogesh yadav/Desktop/lena.jpg')
cv2.imshow('LENA_original',img)
cv2.waitKey(0)
a=np.ones(img.shape,dtype="uint8")*100
added=cv2.add(img,a)
cv2.imshow("added",added)
cv2.waitKey(0)
multi=cv2.multiply(img,a)
cv2.imshow("muultiply",multi)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #60
0
def addHat(img, hat_img):
    # 分离rgba通道,合成rgb三通道帽子图,a通道后面做mask用
    r, g, b, a = cv2.split(hat_img)
    rgbHat = cv2.merge((r, g, b))

    # dlib人脸关键点检测器,正脸检测
    dets = detector(img, 1)

    # 如果检测到人脸
    if len(dets) > 0:
        for d in dets:
            x, y, w, h = d.left(), d.top(
            ), d.right() - d.left(), d.bottom() - d.top()

            # 关键点检测,5个关键点")
            shape = predictor(img, d)

            # 选取左右眼眼角的点")
            point1 = shape.part(0)
            point2 = shape.part(2)

            # 求两点中心
            eyes_center = ((point1.x + point2.x) // 2,
                           (point1.y + point2.y) // 2)

            # 根据人脸大小调整帽子大小
            factor = 1.5
            resizedHatH = int(
                round(rgbHat.shape[0] * w / rgbHat.shape[1] * factor))
            resizedHatW = int(
                round(rgbHat.shape[1] * w / rgbHat.shape[1] * factor))

            if resizedHatH > y:
                resizedHatH = y - 1

            # 根据人脸大小调整帽子大小
            resizedHat = cv2.resize(rgbHat, (resizedHatW, resizedHatH))

            # 用alpha通道作为mask
            mask = cv2.resize(a, (resizedHatW, resizedHatH))
            maskInv = cv2.bitwise_not(mask)

            # 帽子相对与人脸框上线的偏移量
            dh = 0
            bgRoi = img[y + dh - resizedHatH:y + dh,
                        (eyes_center[0] -
                         resizedHatW // 3):(eyes_center[0] +
                                            resizedHatW // 3 * 2)]

            # 原图ROI中提取放帽子的区域
            bgRoi = bgRoi.astype(float)
            maskInv = cv2.merge((maskInv, maskInv, maskInv))
            alpha = maskInv.astype(float) / 255

            # 相乘之前保证两者大小一致(可能会由于四舍五入原因不一致)
            alpha = cv2.resize(alpha, (bgRoi.shape[1], bgRoi.shape[0]))
            bg = cv2.multiply(alpha, bgRoi)
            bg = bg.astype('uint8')

            # 提取帽子区域
            hat = cv2.bitwise_and(resizedHat, cv2.bitwise_not(maskInv))

            # 相加之前保证两者大小一致(可能会由于四舍五入原因不一致)")
            hat = cv2.resize(hat, (bgRoi.shape[1], bgRoi.shape[0]))
            # 两个ROI区域相加")
            addHat = cv2.add(bg, hat)

            # 把添加好帽子的区域放回原图
            img[y + dh - resizedHatH:y + dh,
                (eyes_center[0] -
                 resizedHatW // 3):(eyes_center[0] +
                                    resizedHatW // 3 * 2)] = addHat

            return img