def getGradientImageInfo(gray):
	temp1=gray
	gx=np.array(0)
	gy=np.array(0)
	gd=gray
	gm=gray
	gx=cv2.Sobel(temp1, cv2.CV_16S, 1, 0, gx, 3, 1, 0, cv2.BORDER_DEFAULT)
	gy=cv2.Sobel(temp1, cv2.CV_16S, 0, 1, gy, 3, 1, 0, cv2.BORDER_DEFAULT)
	gm=cv2.add(cv2.pow(gx, 2), cv2.pow(gy, 2))		
	gm=pylab.sqrt(gm) 
	gd=cv2.add(np.arctan(gx), np.arctan(gy))*(180/math.pi)
	resolution=5
	gx=gx[::resolution*-1,::resolution]
	gy=gy[::resolution*-1,::resolution]
	gm=gm[::resolution*-1,::resolution]
	gd=gd[::resolution*-1,::resolution]
	X,Y = np.meshgrid( np.arange(0,2*math.pi,.2),np.arange(0,2*math.pi,.2))
	U = pylab.cos(X)
	V = pylab.sin(Y)
	q=matplotlib.pyplot.quiver(gx,gy)
#	key=matplotlib.pyplot.quiverkey(q, 1, 1, 5, 'test', coordinates='data', color='b')
	#matplotlib.pyplot.show()
	matplotlib.pyplot.close()
	#cv2.imshow('gd', gd)
	return gx,gy,gm,gd, resolution
Exemple #2
0
def mergeImages(images, transforms):
    # Calculate the size of the final merged image
    minP, maxP = ( np.array([0, 0]), np.array([0, 0]) )
    for trans in transforms:
        width, height = images[trans.getSourceIndex()].width, images[trans.getSourceIndex()].height
        for point in ([0, 0], [0, height], [width, 0], [width, height]):
            current = trans.transform(point)
            for j in (0, 1):
                if current[j] < minP[j]:
                    minP[j] = current[j]
                if current[j] > maxP[j]:
                    maxP[j] = current[j]
    if minP[0] < 0 or minP[1] < 0:
        delta = -minP.clip(None, 0)
        for trans in transforms:
            trans.addDelta( delta )
    size = [ maxP[0] - minP[0], maxP[1] - minP[1] ]
    canvas = np.zeros( ( size[1], size[0], 3), np.uint8 )
    for i in range( 0, len(images) ):
        print "\tMerging image " + str(i)
        warped = cv2.warpPerspective( images[i].imgData, transforms[i].mat, ( size[0], size[1] ) )
        ret, mask = cv2.threshold( cv2.cvtColor( warped, cv2.COLOR_BGR2GRAY ), 0, 255, cv2.THRESH_BINARY )
        canvas_bg = cv2.bitwise_and( canvas, canvas, mask = cv2.bitwise_not(mask) )
        warped_fg = cv2.bitwise_and( warped, warped, mask = mask )
        cv2.add(canvas_bg, warped_fg, canvas)
    return canvas
Exemple #3
0
def stitch(img1, img2):

    MIN_MATCH_COUNT = 10

    # Initiate SIFT detector
    sift = cv2.SIFT()


    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    flann_matcher = get_matcher()
    matches = flann_matcher.knnMatch(des1, des2, k=2)

    # store all the good matches as per Lowe's ratio test.
    good = []
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)

    if len(good)>MIN_MATCH_COUNT:

        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

        H, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)

        (o_size, offset) = calc_size(img1.shape, img2.shape, H)
        (o_size, offset) = calc_size(img1.shape, img2.shape, H)

        # y
        dst_h = o_size[0]
        # x
        dst_w = o_size[1]

        offset_h = np.matrix(np.identity(3), np.float32)
        offset_h[0,2] = offset[0]
        offset_h[1,2] = offset[1]

        warped_1 = cv2.warpPerspective(
                    img1,
                    offset_h,
                    (dst_w, dst_h)
                )

        warped_2 = cv2.warpPerspective(
                    img2,
                    H,
                    (dst_w, dst_h)
                )

        out = np.zeros((dst_h, dst_w), np.uint8)
        out = cv2.add(out, warped_1, dtype=cv2.CV_8U)
        out = cv2.add(out, warped_2, dtype=cv2.CV_8U)

        return out
    else:
        print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
        matchesMask = None
def addmask2frame(mask, frame):
	frame2 = np.copy(frame)
	mask_all = np.zeros_like(mask[:,:,:,0])
	for i in range(0, 19):
		frame2 = cv2.add(frame2,mask[:,:,:,i])
		mask_all = cv2.add(mask_all,mask[:,:,:,i])
	return frame2, mask_all
def GetIrisUsingThreshold(gray,pupil):
	tempResultImg = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR) #used to draw temporary results
	props = RegionProps()
	val,binI =cv2.threshold(gray, 110, 255, cv2.THRESH_BINARY_INV)
	n=3
	n2=3
	ker=np.ones((n,n))/(n**2)		
	ker2=np.ones((n2,n2))/(n2**2)			
	temp1=cv2.dilate(binI, ker2)	
	temp2=cv2.erode(binI, ker)
	temp3=cv2.add(temp1,temp2)
	temp4=cv2.add(temp2,temp1)
	temp5=cv2.add(temp3,temp4)
	temp6=cv2.add(temp4,temp3)
	binI=temp3
	cv2.imshow("Threshold",binI)	
	contours, hierarchy = cv2.findContours(binI, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)	
	a=500.0
	b=6000.0
	c=0.30
	glintElipseArray=[]	
	for i in contours:
		vals = props.CalcContourProperties(i.astype('int'),['Area','Length','Centroid','Extend','ConvexHull'])
		if a<vals['Area'] and b>vals['Area']:
			if c<vals['Extend']:
				x,y=vals['Centroid']
				cv2.circle(tempResultImg,(int(x),int(y)), 2, (0,255,0),4) #draw a circle
				if (len(i.astype(int))>=5):
					temp=cv2.fitEllipse(i.astype(int))
					ax=int(temp[0][0])
					bx=int(temp[0][1])
					cx=int((temp[1][0]+temp[1][1])/4)
					glintElipseArray.append(cv2.fitEllipse(i.astype(int)))
	cv2.imshow("TempResults",tempResultImg)
	return glintElipseArray
Exemple #6
0
def do_frame():
    global current_capture
    current_capture = get_image()
    cv2.imshow('capture', current_capture)
    blur_img = cv2.blur(current_capture, (5, 5))
    hsv_img = cv2.cvtColor(blur_img, cv2.COLOR_BGR2HSV)

    hue_min, hue_max = get_hues()
    thresh_img = cv2.inRange(hsv_img, hue_min, hue_max)
    out = np.zeros(current_capture.shape, np.uint8)
    cv2.add(out, current_capture, dst=out, mask=thresh_img)
    hsv_pixel = current_hue_pix.copy()
    hsv_pixel[1] = hsv_pixel[2] = 255
    bgr_hue = col(hsv_pixel, hsv=True)
    hsv_pixel[0] += 90
    hsv_pixel[0] %= 180
    bgr_complement = col(hsv_pixel, hsv=True)
    cv2.rectangle(out, (0, 0), (10, 10), bgr_hue, -1)
    contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    if contours:
        big_contours = [c for c in contours if cv2.contourArea(c) > 1000]
        cv2.drawContours(out, big_contours, -1, bgr_complement)
        moments = [cv2.moments(c) for c in big_contours]
        central_moments = [calc_central_moments(m) for m in moments]
        for x, y in central_moments:
            cv2.circle(out, (int(x), int(y)), 15, bgr_complement, -1)

    cv2.imshow('output', out)
    key = cv2.waitKey(50)
    if key == ESCAPE_KEY:
        sys.exit()
def process_image(img):
    img = cv2.GaussianBlur(img, (1, 1), 0)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    contourImgs = []
    for i in range(len(hTracklist)):
        tempLow = np.array([hTracklist[i]-7, sTracklist[i]-70, vTracklist[i]-70], dtype=np.uint8)
        if tempLow[1] > sTracklist[i]:
            tempLow[1] = 0
        if tempLow[2] > vTracklist[i]:
            tempLow[2] = 0
        if tempLow[0] < 0:
            tempLow[0] = 180+tempLow[0]
        elif tempLow[0] > 180:
            tempLow[0] = tempLow[0]-180
        tempHigh = np.array([hTracklist[i]+7, sTracklist[i]+70, vTracklist[i]+70], dtype=np.uint8)
        if tempHigh[1] < sTracklist[i]:
            tempHigh[1] = 255
        if tempHigh[2] < vTracklist[i]:
            tempHigh[2] = 255
        if tempHigh[0] < 0:
            tempHigh[0] = 180+tempHigh[0]
        elif tempHigh[0] > 180:
            tempHigh[0] = tempHigh[0]-180
        tempImg = cv2.inRange(hsv, tempLow, tempHigh)
        if tempLow[0]>tempHigh[0]:
            u1 = np.array([180, tempHigh[1], tempHigh[2]], dtype=np.uint8)
            tempImg = cv2.add(tempImg, cv2.inRange(hsv, tempLow, u1))
            l1 = np.array([0, tempLow[1], tempLow[2]], dtype=np.uint8)
            tempImg = cv2.add(tempImg, cv2.inRange(hsv, l1, tempHigh))
        tempImg = cv2.morphologyEx(tempImg, cv2.MORPH_OPEN, (5, 5), iterations=5)
        image, contours, hierarchy = cv2.findContours(tempImg, 1, 2)
        
        contourImgs.append(contours)
    return contourImgs, hsv
def _colour_approach(person, background):

	# spliting images to red, green & blue components

	person_c = [person[:,:,0], person[:,:,1], person[:,:,2]]
	background_c = [background[:,:,0], background[:,:,1], background[:,:,2]]

	# subtracting images by component

	diff_c = []

	diff_c.append(cv.absdiff(person_c[0], background_c[0]))
	diff_c.append(cv.absdiff(person_c[1], background_c[1]))
	diff_c.append(cv.absdiff(person_c[2], background_c[2]))

	# applying Gaussian blur to each component (reducing noise)

	diff_c[0] = cv.GaussianBlur(diff_c[0], (5, 5), 0)
	diff_c[1] = cv.GaussianBlur(diff_c[1], (5, 5), 0)
	diff_c[2] = cv.GaussianBlur(diff_c[2], (5, 5), 0)

	# merging components to a grey image 
	# cv.add() is a saturated operation (250 + 10 = 260 => 255)

	diff = cv.add(cv.add(diff_c[0], diff_c[1]), diff_c[2])

	# applying Gaussian blur again

	diff_b = cv.GaussianBlur(diff, (11, 11), 0)

	return diff_b
def side_binarization_adaptive_thresh(image, configuration):
    img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    result1 = b_algorithm.adaptive_thresh_mean_c(
        img,
        41,
        20,
        mask=configuration.roi_main.mask)

    result2 = b_algorithm.adaptive_thresh_mean_c(
        img,
        399,
        45,
        mask=configuration.roi_main.mask)

    result3 = b_algorithm.adaptive_thresh_gaussian_c(
        img,
        41,
        20,
        mask=configuration.roi_main.mask)

    result = cv2.add(result1, result2)
    result = cv2.add(result, result3)

    return result
def online_variance(new_data,curr_var,curr_iter,curr_mean):
	if curr_iter==1:
		new_mean = new_data;
		new_var = 0;
		return new_mean,new_var;
	else:

		pa=cv2.subtract(new_data,curr_mean);
		pa=cv2.divide(pa,curr_iter,1);
		new_mean=cv2.add(pa,curr_mean);
		#new_mean = curr_mean + (new_data - curr_mean)/curr_iter;
	
		prev_S = curr_var * (curr_iter - 2);
	
		#
		pd1=cv2.subtract(new_data,curr_mean);
		pd2=cv2.subtract(new_data,new_mean);
		pd=cv2.multiply(pd1,pd2);
		new_S=cv2.add(pd,prev_S);
		#new_S = prev_S  + (new_data  - curr_mean) .* (new_data - new_mean);
		
		new_var=cv2.divide(new_S,curr_iter-1);
		#new_var = new_S/(curr_iter - 1);
		
		return new_mean,new_var;
Exemple #11
0
def side_binarization(image, mean_image, configuration):
    """
    Binarization of side image based on mean shift difference

    :param image: BGR image
    :param mean_image: Mean image
    :param configuration: Object BinarizeConfiguration
    :return: Binary image
    """
    roi_main = configuration.roi_main

    mask = cv2.add(roi_main.mask,
                   configuration.roi_orange_band.mask)

    mask = cv2.add(mask,
                   configuration.roi_panel.mask)

    result = b_algorithm.mixed_binarization(
        image,
        mean_image,
        configuration.roi_main.hsv_min,
        configuration.roi_main.hsv_max,
        configuration.meanshift_binarization_factor.threshold,
        configuration.meanshift_binarization_factor.dark_background,
        mask,
        configuration.roi_main.mask)

    mask_clean_noise = cv2.add(configuration.roi_orange_band.mask,
                               configuration.roi_panel.mask)

    result = b_processing.clean_noise(result, mask_clean_noise)

    return result
def imageenhancement(image):
    hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
    bgImg,fundusMask = computebgimg(image)
    bgImg = cv2.multiply(image[:,:,1].astype(float),bgImg.astype(float))
    ldrift,cdrift = LumConDrift(bgImg,fundusMask)

    g = image[:,:,1].astype(float)

    imgCorr = cv2.divide(cv2.subtract(g,ldrift),(cv2.add(cdrift,0.0001)))
    imgCorr = cv2.multiply(imgCorr,fundusMask.astype(float))

    imgCorr = cv2.add(imgCorr,np.abs(np.min(imgCorr)))
    imgCorr = cv2.divide(imgCorr,np.max(imgCorr))
    imgCorr = cv2.multiply(imgCorr,fundusMask.astype(float))


    image = image.astype(float)
    image[:,:,0] = cv2.divide(cv2.multiply(imgCorr,image[:,:,0]),hsv[:,:,2].astype(float))
    image[:,:,1] = cv2.divide(cv2.multiply(imgCorr,image[:,:,1]),hsv[:,:,2].astype(float))
    image[:,:,2] = cv2.divide(cv2.multiply(imgCorr,image[:,:,2]),hsv[:,:,2].astype(float))


    fundusMask = fundusMask.astype(float)
    image[:,:,0] = cv2.multiply(image[:,:,0],fundusMask)
    image[:,:,1] = cv2.multiply(image[:,:,1],fundusMask)
    image[:,:,2] = cv2.multiply(image[:,:,2],fundusMask)
    out = image[:,:,1]*255
    return out 
def mirror4(img):
    """
    Create 4 mirrored images and return a merged one.

    img: a gray-scaled image
    """
    height, width = img.shape

    # the upper left
    affine = np.array([[0.5, 0.0, 0.0],
                       [0.0, 0.5, 0.0]])
    img_tmp1 = cv2.warpAffine(img, affine, (width, height))

    # the upper right
    affine = np.array([[-0.5, 0.0, width-1],
                       [ 0.0, 0.5,     0.0]])
    img_tmp2 = cv2.warpAffine(img, affine, (width, height))

    # the lower right
    affine = np.array([[-0.5,  0.0,  width-1],
                       [ 0.0, -0.5, height-1]])
    img_tmp3 = cv2.warpAffine(img, affine, (width, height))

    # the lower left
    affine = np.array([[0.5,  0.0,      0.0],
                       [0.0, -0.5, height-1]])
    img_tmp4 = cv2.warpAffine(img, affine, (width, height))

    return cv2.add(cv2.add(img_tmp1, img_tmp2), cv2.add(img_tmp3, img_tmp4))
		def setROI():
			global cut, refPt, lin, size
			
			frame1 = self.camera.getImage()
			gray_frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
			#gray_frame1 = gray_frame1[0:420, 0:640]
			size = gray_frame1.shape
			print(size)
			lin = np.zeros(size, dtype=np.uint8)

			img = cv2.add(gray_frame1, lin)
			if not cut:			
				cv2.imshow('ROI SELECTION', img)
				cv2.setMouseCallback('ROI SELECTION', self.click_and_crop)


			while (not cut):
				frame = self.camera.getImage()
				gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
				#gray_frame = gray_frame[60:420, 0:640]
				
				img_tru = cv2.add(gray_frame, lin)
				cv2.imshow('ROI SELECTION', img_tru)
				key = cv2.waitKey(1) & 0xFF
				if len(refPt) == 2 and not cut:
					cv2.destroyWindow('ROI SELECTION')
					cut = True
					break
				else:
					continue
def reconstruction(output,fps,laplacian,gaussian,origGaussian,levels):
	print "Reconstructing the video..."
	l,w,h,c = origGaussian[0].shape
	vidType = cv2.VideoWriter_fourcc('M','J','P','G')
	vid = cv2.VideoWriter(output,vidType,fps,(h,w),1)
	for i in range(l):
		for j in range(levels):
			# Get the lowest level as the filtered gaussian and scale it up 
			# at every interval
			if j==0:
				imgFiltered = cv2.convertScaleAbs(gaussian[levels-1][i].real)
			else:
				imgFiltered = cv2.pyrUp(imgFiltered)

			# Take the orignal gaussian
			imgOriginal = origGaussian[levels-j-1][i]

			# And the original Laplacian
			imgLap = laplacian[j+1][i]

			# Add them together for most layers
			img = cv2.add(imgOriginal,imgLap)

			# For the important layer, add in the filtered result
			if j==levels-1:
				img = cv2.add(imgFiltered[:img.shape[0],:img.shape[1]],img[:imgFiltered.shape[0],:imgFiltered.shape[1]])
				img = cv2.convertScaleAbs(img)

		vid.write(img)
	vid.release()
	def getThresholdedImg(self):
		#basic structure taken from color ball tracking code - have modified parameters
		green = cv2.inRange(self.hsv,self.greenMin,self.greenMax)
		blue = cv2.inRange(self.hsv,self.blueMin,self.blueMax)
		red = cv2.inRange(self.hsv,self.redMin,self.redMax)
		self.colors = cv2.add(green,blue)
		self.colors = cv2.add(red,self.colors)
		return self.colors
Exemple #17
0
def contrast(image) :
	(ret,img) = cv2.threshold(
	    cv2.add(cv2.multiply(
		cv2.add(cv2.multiply(
			cv2.add(cv2.multiply(image,2.0),-60)
				,2.0),-60)
					,2.1),-100), 127,255,cv2.THRESH_BINARY)
	return img
Exemple #18
0
def add(img,img1):

    img1 = img
    img2 = img1

    cv2.add(img1,img2)
    
    return img1
def img_adj(frame):
    """On-screen live adjustment of brightness and contrast."""
    global brightness, contrast
    
    brightness = 1.5
    contrast = -2.
    sat = 0.
    inc = 0.5

    finished = False
    scale = 1300./frame.shape[1]
    frame = cv2.resize(frame,None,fx=scale,fy=scale,interpolation = cv2.INTER_CUBIC)

    mod_img = frame[upperPt[1]:lowerPt[1],upperPt[0]:lowerPt[0]]
    prime_img = frame[:]
        
    cv2.imshow('mod',mod_img)
    
    while not finished:
        mod_img = adjBrtCont(prime_img,brightness, contrast)
        drawMatchColor(mod_img,brightness,contrast)
        cv2.imshow('mod',mod_img)

        k = cv2.waitKey(0)
        if k == ord('w'): #wincreases contrast
            contrast -= 2.
            mod_img = cv2.add(prime_img,np.array([contrast])) 
            
        elif k == ord('s'): #s decreases contrast
            contrast += 2.
            mod_img = cv2.add(prime_img,np.array([contrast])) 
            
        elif k == ord('a'): #a decreases brightness
            brightness -= 0.05
            mod_img = cv2.multiply(prime_img,np.array([brightness]))
            
        elif k == ord('d'): #d increases brightness
            brightness += 0.05
            mod_img = cv2.divide(prime_img,np.array([brightness]))

        elif k == ord('q'): #q decreases saturation
            sat += inc
            mod_img = mod_img[:,:,1]+inc   
            
        elif k == ord('e'): #e decreases saturation
            sat -= inc
            mod_img = mod_img[:,:,1]-inc     

        elif k == ord('o'): #show original
            cv2.imshow('original',frame)
            #k = cv2.waitKey()
            
        elif k == ord('x'): #x exits adjustment
            finished = True
            cv2.destroyAllWindows()

    cv2.destroyAllWindows()
    return brightness, contrast
Exemple #20
0
def doMDScalibration(debug):
	retVal = (0, 0)
	cap = cv2.VideoCapture(1) 
	cap.set(10,-0.5) #set brightness
	cap.set(12,0.8) #set brightness
	ret, frame = cap.read()

	#unsharp mask
 	unsharp_mask = cv2.blur(frame, (2, 2))
 	frame = cv2.addWeighted(frame, 1.5, unsharp_mask, -0.5, 0.0)

 	#contrast enchancement
 	array_alpha = np.array([1.2])
	array_beta = np.array([-30.0])
    # add a beta value to every pixel 
	cv2.add(frame, array_beta, frame)                    
    # multiply every pixel value by alpha
	cv2.multiply(frame, array_alpha, frame)  

	boundaries = [([0, 150, 180], [10, 205, 230])]	#very rough color estimation, no absolute color detection
	# loop over the boundaries which actually doesn't matter right now, it only runs once
	for (lower, upper) in boundaries:
		# create NumPy arrays from the boundaries
		lower = np.array(lower, dtype = "uint8")
		upper = np.array(upper, dtype = "uint8")
	 
		# find the colors within the specified boundaries and apply
		# the mask
		mask = cv2.inRange(frame, lower, upper)
		kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(6,8))
		mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
		kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
		mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
		output = cv2.bitwise_and(frame, frame, mask = mask)

		Omoments = cv2.moments(mask)
		dM01 = Omoments['m01']
		dM10 = Omoments['m10']
		dArea = Omoments['m00']

		if dArea > 8000: #the dot is on the screen
 				posX = int(dM10/dArea)
 				posY = int(dM01/dArea)
 				retVal = (posX, posY)
 				#print a circle if the indicator was detected
 				cv2.circle(frame, (posX, posY), 8, (0, 255, 255), -1)

 	if(debug):
 		#debug for showing the image to ensure the puck is detected
		cv2.imshow("images", np.hstack([frame, output]))
		cv2.waitKey(0)

	cap.release()
	cv2.destroyAllWindows()
	print retVal
	return retVal
Exemple #21
0
def cv_mean2(image_iterator, n_images):
    """ Compute the element-wise mean over an image iterator containing n_images
    """
    if n_images > 1000:
        return None
    img = image_iterator.next()
    img_sum = numpy.empty(img.shape, 'int32')
    img_sum = reduce(lambda x, y: cv2.add(x, numpy.int32(y)), image_iterator,
                     cv2.add(img_sum, numpy.int32(img)))
    return numpy.uint8(numpy.round(numpy.divide(img_sum, n_images)))
Exemple #22
0
    def stitch(self, base_img, img_to_stitch, homography=None):
        """
        Stitch img_to_stitch to base_img.
        :param base_img: The base image to which the img_to_stitch is going to be stitched on.
        :param img_to_stitch: The image to be stitched on base_img.
        :return: The warped image of the base_img and img_to_stitch.

        Note that the black part of the warped image will be chopped after stitching.
        """
        if homography is None:
            H = self.find_homography(base_img, img_to_stitch)
        else:
            H = homography
        H = H / H[2, 2]
        H_inv = la.inv(H)

        (min_x, min_y, max_x, max_y) = self.find_dimensions(img_to_stitch, H_inv)
        max_x = max(max_x, base_img.shape[1])
        max_y = max(max_y, base_img.shape[0])

        move_h = np.matrix(np.identity(3), np.float32)

        if (min_x < 0):
            move_h[0, 2] += -min_x
            max_x += -min_x

        if (min_y < 0):
            move_h[1, 2] += -min_y
            max_y += -min_y

        mod_inv_h = move_h * H_inv

        img_w = int(math.ceil(max_x))
        img_h = int(math.ceil(max_y))

        # Warp the new image given the homography from the old images.
        base_img_warp = cv2.warpPerspective(base_img, move_h, (img_w, img_h))

        img_to_stitch_warp = cv2.warpPerspective(img_to_stitch, mod_inv_h, (img_w, img_h))

        # Put the base image on an enlarged palette.
        enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)

        # Create a mask from the warped image for constructing masked composite.
        (ret, data_map) = cv2.threshold(cv2.cvtColor(img_to_stitch_warp, cv2.COLOR_BGR2GRAY),
                                        0, 255, cv2.THRESH_BINARY)

        enlarged_base_img = cv2.add(enlarged_base_img, base_img_warp,
                                    mask=np.bitwise_not(data_map),
                                    dtype=cv2.CV_8U)

        final_img = cv2.add(enlarged_base_img, img_to_stitch_warp,
                            dtype=cv2.CV_8U)

        return final_img
  def trackCallback(self, data, *args) :
    self.lock.acquire()
    id = args[0]
    q = (data.header.stamp.secs, id, data.data)
    heapq.heappush(self.currentBlobs, q)


    allseen = True
    for x in self.maskImages :
      allseen = allseen and (x != None)

    if allseen :

      counts = []
      for i,dsti in enumerate(self.maskImages) :
        t = dsti.astype(float)
        t.fill(0) 
        counts.append(t)
      

      for p in self.currentBlobs :
        cam = p[1]
        z = p[2]
        for q in z :
          t = counts[cam].copy()
          t.fill(0)
          cv2.circle(t, (int(q.x+q.z/2), int(q.y+q.z/2)), int(q.z/2), 1, thickness=cv2.cv.CV_FILLED)
          counts[cam] = cv2.add(counts[cam], t)


      common = []
      for i,dsti in enumerate(counts) :
        tmp = cv2.warpPerspective(dsti,self.calib[i],self.dim, borderMode= cv2.BORDER_CONSTANT,borderValue=0)
        common.append(tmp)

      combined = common[0].copy()
      combined.fill(0)
      for i,img in enumerate(common) :
        combined = cv2.add(combined, img)

      v = cv2.minMaxLoc(combined)
      v = v[1]
      if v == 0:
        v = 1.0

      combined = cv2.multiply(combined, 255/v)
      combined = np.uint8(combined)

      try :
        self.image_pub.publish(self.bridge.cv2_to_imgmsg(combined, "mono8"))
       
      except CvBridgeError, e:
        print e 
   
      gc.collect()
    def feed(self, image):
        if self.backImg == None:
            self.backImg = image.copy()
            self.prevImg = image.copy()
            self.height, self.width = image.shape
            self.bestRun = np.ones((self.height,self.width), np.uint8)
            self.currentRun = np.ones((self.height,self.width), np.uint8)
            self.minFixedPixel = int(image.size*self.PERCENTAGE)
            #print self.minFixedPixel
            if self.showBackImage:
                self.createTrackbars()
                cv2.imshow("backImage", self.backImg)
            return self.backImg

        self.checkSettings()
        
        diffImage = cv2.absdiff(self.prevImg,image)
        ret, threshold1 = cv2.threshold(diffImage, self.THRESHOLD, 1, cv2.THRESH_BINARY_INV)
        ret, threshold255 = cv2.threshold(diffImage, self.THRESHOLD, 255, cv2.THRESH_BINARY_INV)
        
        nonZero = cv2.countNonZero(threshold1)
        nonZeroRatio = nonZero / float(image.size)
        perfection = self.PERFECTION
        if nonZeroRatio < self.PERCENTAGE:
            perfection = 5
            print perfection
        
        nonChanged = cv2.bitwise_and(self.currentRun, threshold255)
        self.currentRun = cv2.add(threshold1,nonChanged)

        newBestsMask = cv2.compare(self.currentRun, self.bestRun, cv2.CMP_GE)
        oldBestsMask = cv2.compare(self.currentRun, self.bestRun, cv2.CMP_LT)

        newBestRuns = cv2.bitwise_and(self.currentRun, self.currentRun, mask = newBestsMask)
        oldBestRuns = cv2.bitwise_and(self.bestRun, self.bestRun, mask = oldBestsMask)
        self.bestRun = cv2.add(newBestRuns, oldBestRuns)

        newBackImgPoints = cv2.bitwise_and(image, image,mask = newBestsMask)
        oldBackImgPoints = cv2.bitwise_and(self.backImg, self.backImg, mask = oldBestsMask)
        self.backImg = cv2.add(newBackImgPoints, oldBackImgPoints)

        stablePoints = cv2.compare(self.bestRun, perfection, cv2.CMP_GT)
        unstablePoints = cv2.bitwise_not(stablePoints)
        stablePoints = cv2.bitwise_and(stablePoints, perfection)
        unstablePoints = cv2.bitwise_and(unstablePoints, self.bestRun)
        self.bestRun = cv2.add(stablePoints, unstablePoints)
        
        self.nonZeroPoints = cv2.countNonZero(stablePoints)
        self.prevImg = image.copy()

        if self.showBackImage:
            cv2.imshow("backImage", self.backImg)
            
        return self.backImg
    def image_callback(self, msg):
        if not self.thread_lock.acquire(False):
            return
        #converts rosmsg to 8bit bgr image
        image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
        #converts the bgr image to hsv
        imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        #creates color masks using bounds
        redMask = cv2.inRange(imageHSV, self.RED_BOUNDS[0], self.RED_BOUNDS[1]).astype('uint8')
        yellowMask = cv2.inRange(imageHSV, self.YELLOW_BOUNDS[0], self.YELLOW_BOUNDS[1])
        greenMask = cv2.inRange(imageHSV, self.GREEN_BOUNDS[0], self.GREEN_BOUNDS[1])

        #final mask to be used
        finalMask = cv2.add(cv2.add(redMask, yellowMask), greenMask)
        #finalMask = yellowMask
        #creates hsv image with mask
        filteredHSV = cv2.bitwise_and(imageHSV, imageHSV, mask=finalMask)
        #converts filtered hsv image to bgr image
        imageBGR = cv2.cvtColor(imageHSV, cv2.COLOR_HSV2BGR)
        #converts bgr image to grayscale
        grayscale = cv2.cvtColor(filteredHSV, cv2.COLOR_BGR2GRAY)

        #finds contours
        contours, h = cv2.findContours(grayscale, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
        rospy.loginfo(len(contours))

        #creates box for every contour
        for j in range(0, len(contours)):
            hull = cv2.convexHull(contours[j])
            rect = cv2.minAreaRect(hull)

            box = cv2.cv.BoxPoints(rect)
            box = np.int0(box)

            #creates tuple for the center of the blob
            rectx = int(rect[0][0])
            recty = int(rect[0][1])
            center = (rectx, recty)

            #calculates the area of the blob
            area = rect[1][0] * rect[1][1]

            if  area > 1000:
                cv2.drawContours(imageBGR, [box], 0, (0, 0, 255), 2)
                cv2.circle(imageBGR, center, 10, (0, 0, 255), thickness=1)




        #cv2.imshow("window", imageBGR)

        self.pub_image.publish(self.bridge.cv2_to_imgmsg(imageBGR, "bgr8"))
        self.thread_lock.release()
Exemple #26
0
def process_motion_frame(q, f, tick, ts, mfa=False, rotateAng=False, width=False, gBlur=(9, 9)):
    '''
    This function defines the image processing techniques that are applied
    to a new thread when a frame is retreived from the camera.
    '''
    rects_sal = []
    fgmask = None
    f_copy = f.copy()
    if rotateAng is not False and rotateAng != 0:
        f = imutils.rotate(f, angle=rotateAng)
    if width is not False:
        f = imutils.resize(f, width=width)
    # blur & bg sub
    try:
        fgmask = fgbg.apply(cv2.GaussianBlur(f, gBlur, 0), learningRate=config.computing.learning_rate)
    except:
        print("-"*60)
        traceback.print_exc(file=sys.stdout)
        print("-"*60)
        raise

    # get our frame outlines
    f_rects, rects_mot = get_motions(f, fgmask, thickness=1)
    rects_sal.extend(rects_mot)
    num_motion = len(rects_mot)

    if True:
        # don't do anything else if there's no motion of any kind detected
        # if num_motion > 0 or mfa is True:
        num_bodies = 0
        num_faces = 0
        if config.computing.body_detection_en or config.computing.face_detection_en:
            # generate a histogram equalized bw image if we're doing processing
            # that needs it
            f_bw = cv2.equalizeHist(cv2.cvtColor(f, cv2.COLOR_BGR2GRAY))
            if config.computing.body_detection_en:
                fBody, rectsBody = detectPerson(f, color=(255, 0, 0))
                if len(rectsBody) > 0:
                    f_rects = cv2.add(f_rects, fBody)
                    num_bodies = len(rectsBody)
                    rects_sal.extend(rectsBody)

            if config.computing.face_detection_en:
                fFace, rectsFace = detectFace(f_bw, color=(0, 255, 0))
                if len(rectsFace) > 0:
                    f_rects = cv2.add(f_rects, fFace)
                    num_faces = len(rectsFace)
                    rects_sal.extend(rectsFace)

        f_rects = imutils.resize(f_rects, width=f_copy.shape[1])
        q.put({"f": f_copy, "ts": ts, "rects_sal": rects_sal, "sz_scaled": getsize(
            f), "num_motion": num_motion, "num_bodies": num_bodies, "num_faces": num_faces})

    return f_copy, f_rects, fgmask, rects_sal, tick, ts
Exemple #27
0
def addImage(img, name, x, y) :
    global lasttime
    thistime = time.time()
    images = animes[name]
    if (thistime - lasttime > 0.1):
        aframe[name] = (aframe[name]+1)%len(images)
#        print str(aframe[name]) + "  of " + name + " numimages=" + str(len(images))
        lasttime = thistime
    (w,h) = images[aframe[name]].shape
    img[x:x+w,y:y+h,0] = cv2.add(images[aframe[name]],img[x:x+w,y:y+h,0])
    img[x:x+w,y:y+h,1] = cv2.add(images[aframe[name]],img[x:x+w,y:y+h,1])
    img[x:x+w,y:y+h,2] = cv2.add(images[aframe[name]],img[x:x+w,y:y+h,2])
def warpImages(img1, img2, H):
    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]

    list_of_points_1 = np.float32([[0,0], [0,rows1], [cols1,rows1], [cols1,0]])
    temp_points = np.float32([[0,0], [0,rows2], [cols2,rows2], [cols2,0]])
    list_of_points_1 = np.array([list_of_points_1])
    temp_points = np.array([temp_points])
    #H =  np.array([[H[0][0], H[0][1], H[0][2]], [H[1][0], H[1][1], H[1][2]]])
    #list_of_points_2 = cv2.perspectiveTransform(temp_points, H)
    list_of_points_2 = cv2.transform(temp_points, H)
    list_of_points = np.concatenate((list_of_points_1, list_of_points_2), axis=0)


    [x_min, y_min] = np.min(np.min(list_of_points, axis=1), axis=0)
    [x_max, y_max] = np.max(np.max(list_of_points, axis=1), axis=0)

    print(list_of_points)
    print(x_min, y_min)

    translation_dist = [-x_min,-y_min]
    H_translation = np.array([[1, 0,translation_dist[0]], [0, 1, translation_dist[1]], [0,0,1]])

    #float d = H[0][0], H[0][1], H[0][2];

    #H_affine = np.array([[H[0][0], H[0][1], H[0][2]], [H[1][0], H[1][1], H[1][2]]])
    #H_translation_affine = np.array([[H_translation[0][0], H_translation[0][1], H_translation[0][2]], [H_translation[1][0], H_translation[1][1], H_translation[1][2]]])

    H_translation_affine = np.float32([[1,0,0],[0,1,0]])
    #H_affine = np.array([[H[0][0], H[0][1], H[0][2] + H_translation[0][2]], [H[1][0], H[1][1], H[1][2]+ H_translation[1][2]]])
    H_affine = np.array([[H[0][0], H[0][1], H[0][2]], [H[1][0], H[1][1], H[1][2]]])
    #output_img = cv2.warpPerspective(img2, H_translation.dot(H), (x_max-x_min, y_max-y_min))

    #img1_large = cv2.warpPerspective(img1, H_translation, (x_max-x_min, y_max-y_min))
    #output_img = cv2.warpPerspective(img2, H_translation.dot(H), (y_max-y_min, x_max-x_min))
    #img1_large = cv2.warpPerspective(img1, H_translation, (y_max-y_min, x_max-x_min))

    output_img = cv2.warpAffine(img2, H_affine, (y_max-y_min, x_max-x_min))
    img1_large = cv2.warpAffine(img1, H_translation_affine, (y_max-y_min, x_max-x_min))

    print(output_img.shape[:2])

    base_image = np.zeros((x_max-x_min, y_max-y_min, 3), np.uint8)

    print(base_image.shape[:2])

    (ret,data_map) = cv2.threshold(cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY),0, 255, cv2.THRESH_BINARY)

    base_image = cv2.add(base_image, img1_large, mask=np.bitwise_not(data_map), dtype=cv2.CV_8U)

    final_img = cv2.add(base_image, output_img, dtype=cv2.CV_8U)

    return final_img
def find_beads(path_to_red_img, path_to_blue_img, radius):

    # loading images color & grayscale
    red_img, red_ch = get_grayscale_channel(path_to_red_img, 'red')
    blue_img, blue_ch = get_grayscale_channel(path_to_blue_img, 'blue')

    # summing
    sum_ch = cv2.add(blue_ch, red_ch)
    sum_color = cv2.add(blue_img, red_img)

    # finding beads via contours applying different threshold with each step
    beads_com_list, beads_centers = [], []
    for i in range(10, 255, 1):
        ret, thresh = cv2.threshold(sum_ch, i, 255, cv2.THRESH_BINARY)
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        for cnt in contours:
            # checking if the contour area is close to the assumed (pi*r^2) bead area
            if np.pi*radius*radius-10 < cv2.contourArea(cnt) < np.pi*radius*radius+10:
                # calculating center of mass
                moments = cv2.moments(cnt)
                center_of_mass_y = int(moments['m01'] / moments['m00'])
                center_of_mass_x = int(moments['m10'] / moments['m00'])
                # check that the center is not at the border of image
                if sum_color.shape[0] - (radius+5) > center_of_mass_y > radius+5 and \
                                        sum_color.shape[1] > center_of_mass_x > radius+5:
                    # drawing found contours
                    cv2.drawContours(sum_color, [cnt], 0, (0, 255, 0), 1)
                    # check for possible duplicates
                    if (center_of_mass_x, center_of_mass_y) not in beads_com_list:
                        beads_com_list.append((center_of_mass_x, center_of_mass_y))
    for i in range(len(beads_com_list)):
        for j in range(i, len(beads_com_list)):
            x = beads_com_list[j][0] - beads_com_list[i][0]
            y = beads_com_list[j][1] - beads_com_list[i][1]
            dist = x*x + y*y
            if dist <= radius*radius+1:
                av_cent_x = int((beads_com_list[j][0] + beads_com_list[i][0])/2)
                av_cent_y = int((beads_com_list[j][1] + beads_com_list[i][1])/2)
                if (av_cent_x, av_cent_y) not in beads_centers:
                    beads_centers.append((av_cent_x, av_cent_y))
            elif beads_com_list[i] not in beads_centers:
                beads_centers.append(beads_com_list[i])
            elif beads_com_list[j] not in beads_centers:
                beads_centers.append(beads_com_list[j])
    #cv2.imshow('contours', sum_color)
    cv2.imwrite('./output/latest_data/2015-07-02_Beads_PE_12samples_output/found_contours_'+path_to_red_img[-29:-9]+'.png', sum_color)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    #print beads_com_list
    #print beads_centers
    print len(beads_centers), 'centers found'

    return sum_color, beads_centers
def shift_hsv(img, hue_shift, sat_shift, val_shift):
    dtype = img.dtype
    img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32)
    hue, sat, val = cv2.split(img)
    hue = cv2.add(hue, hue_shift)
    hue = np.where(hue < 0, 180 - hue, hue)
    hue = np.where(hue > 180, hue - 180, hue)
    hue = hue.astype(dtype)
    sat = clip(cv2.add(sat, sat_shift), dtype, 255 if dtype == np.uint8 else 1.)
    val = clip(cv2.add(val, val_shift), dtype, 255 if dtype == np.uint8 else 1.)
    img = cv2.merge((hue, sat, val)).astype(dtype)
    img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
    return img
Exemple #31
0
    async def run_cv(self):
        print("Running CV")
        # define a video capture object
        vid = cv2.VideoCapture(1)

        # def onMouse(event, x, y, flags, param):
        #     if event == cv2.EVENT_LBUTTONDOWN:
        #         print('x = %d, y = %d'%(x, y))

        # lower_blue = np.array([88, 240, 64])
        # upper_blue = np.array([255, 255, 112])

        # Actual run
        # lower_blue = np.array([96, 128, 64])
        # upper_blue = np.array([255, 255, 255])

        # lower_blue = np.array([92, 180, 50])
        # upper_blue = np.array([128, 255, 255])

        # Cyan
        lower_blue = np.array([100, 112, 80])
        upper_blue = np.array([128, 255, 255])

        # lower_green = np.array([64, 128, 32])
        # upper_green = np.array([96, 255, 128])

        # Actual run
        lower_green = np.array([40, 40, 20])
        upper_green = np.array([98, 255, 255])

        # Yellow
        # lower_green = np.array([16, 0, 0])
        # upper_green = np.array([32, 255, 225])

        blue_x = []
        blue_y = []
        blue_count = 0
        blue_pos = [0, 0]

        green_x = []
        green_y = []
        green_count = 0
        green_pos = [0, 0]

        median_filt = 5
        angle = 0

        while (True):
            # print("CV")
            ret, frame = vid.read()

            if frame is not None:

                gaus_blur_frame = cv2.GaussianBlur(frame, (3, 3), 0)
                # gaus_blur_frame = cv2.medianBlur(frame, 3)

                # Homography correction
                # pts_actual = np.array([[47, 20], [47, 390], [560, 20],[560, 390]]) # from top down image
                # pts_camera = np.array([[130, 156], [32, 382],[471, 154],[573, 388]]) # from camera feed

                # pts_actual = np.array([[132, 391], [564, 381], [520, 43],[158, 39]]) # from top down image
                # pts_camera = np.array([[98, 351], [570, 349],[481, 78],[162, 67]]) # from camera feed

                # Actual run
                height, width, channels = frame.shape
                homographized = cv2.warpPerspective(gaus_blur_frame, self.h,
                                                    (width, height))

                hsv_frame = cv2.cvtColor(homographized, cv2.COLOR_BGR2HSV)

                # Blue mask
                blue_mask = cv2.inRange(hsv_frame, lower_blue, upper_blue)
                blue = cv2.bitwise_and(homographized,
                                       homographized,
                                       mask=blue_mask)

                # red_mask = cv2.inRange(hsv_frame, lower_red, upper_red)
                # red = cv2.bitwise_and(homographized, homographized, mask=red_mask)

                # Green mask
                green_mask = cv2.inRange(hsv_frame, lower_green, upper_green)
                green = cv2.bitwise_and(homographized,
                                        homographized,
                                        mask=green_mask)

                # Both colors
                colors = cv2.add(blue, green)

                blue_grayscale = cv2.cvtColor(blue, cv2.COLOR_BGR2HSV)
                blue_canny = cv2.Canny(blue_grayscale, 50, 240)
                blue_circles = cv2.HoughCircles(blue_canny,
                                                cv2.HOUGH_GRADIENT,
                                                dp=1,
                                                minDist=100,
                                                param1=10,
                                                param2=20,
                                                minRadius=1,
                                                maxRadius=120)
                blue_cen = []

                if blue_circles is not None:
                    blue_cen = blue_circles[0, :][0][:2]
                    if (len(blue_x) == median_filt):
                        blue_x[blue_count] = blue_cen[0]
                        blue_y[blue_count] = blue_cen[1]
                    else:
                        blue_x.append(blue_cen[0])
                        blue_y.append(blue_cen[1])

                    if (blue_count == median_filt - 1):
                        blue_count = 0
                    else:
                        blue_count += 1

                green_grayscale = cv2.cvtColor(green, cv2.COLOR_BGR2HSV)
                green_canny = cv2.Canny(green_grayscale, 50, 240)
                green_circles = cv2.HoughCircles(green_canny,
                                                 cv2.HOUGH_GRADIENT,
                                                 dp=1,
                                                 minDist=100,
                                                 param1=10,
                                                 param2=20,
                                                 minRadius=1,
                                                 maxRadius=120)
                green_cen = []

                if green_circles is not None:
                    green_cen = green_circles[0, :][0][:2]
                    if (len(green_x) == median_filt):
                        green_x[green_count] = green_cen[0]
                        green_y[green_count] = green_cen[1]
                    else:
                        green_x.append(green_cen[0])
                        green_y.append(green_cen[1])

                    if (green_count == median_filt - 1):
                        green_count = 0
                    else:
                        green_count += 1

                if (len(blue_x) == median_filt):
                    blue_pos = [np.median(blue_x), np.median(blue_y)]
                    if (blue_cen == []):
                        cv2.circle(homographized, (blue_pos[0], blue_pos[1]),
                                   2, (255, 255, 255), 3)
                    else:
                        cv2.circle(homographized, (blue_pos[0], blue_pos[1]),
                                   2, (0, 0, 255), 3)
                if (len(green_x) == median_filt):
                    green_pos = [np.median(green_x), np.median(green_y)]
                    if (green_cen == []):
                        cv2.circle(homographized, (green_pos[0], green_pos[1]),
                                   2, (255, 255, 255), 3)
                    else:
                        cv2.circle(homographized, (green_pos[0], green_pos[1]),
                                   2, (255, 255, 0), 3)

                if ((len(blue_x) == median_filt)
                        and (len(green_x) == median_filt)):
                    y_dist = green_pos[1] - blue_pos[1]
                    x_dist = green_pos[0] - blue_pos[0]
                    angle = np.arctan2(y_dist, x_dist) * 180 / np.pi
                    # print(angle)

                # self.data.current_pos = [self.data.current_pos[0]+1, self.data.current_pos[1] + 1]
                self.data.current_pos = [(blue_pos[0] + green_pos[0]) // 2,
                                         (blue_pos[1] + green_pos[1]) // 2]
                # self.data.current_pos = blue_pos
                self.data.angle = angle
                # cv2.namedWindow('frame',cv2.WINDOW_NORMAL)
                # # cv2.setMouseCallback("frame", onMouse)
                # cv2.imshow('frame', frame)

                # cv2.imshow("Blue", blue)
                # cv2.imshow("Green", green)
                # cv2.imshow("Colors", colors)
                # cv2.circle(homographized, (345, 206), 2, (255,255,255), 3)
                # cv2.circle(homographized, (345, 236), 2, (255,255,255), 3)
                # cv2.circle(homographized, (345, 266), 2, (255,255,255), 3)
                for i, target in enumerate(self.data.target_pos):
                    # cv2.circle(homographized, target, 2, (255, 255, 255), 3)
                    cv2.putText(homographized, str(i + 1), target,
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255),
                                2)

                if self.data.obs_detection:
                    for x, y, w, h in self.data.obstacle_pos:
                        cv2.rectangle(homographized, (x, y), (x + w, y + h),
                                      (36, 255, 12), 2)

                cv2.imshow("Camera Feed (Homographized)", homographized)

            # dst = cv2.Canny(frame, 100, 150, None, 3)
            # linesP = cv2.HoughLinesP(dst, 1, np.pi / 180, 50, 100, 50, 10)
            # cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
            # if linesP is not None:
            #     for i in range(0, len(linesP)):
            #         l = linesP[i][0]
            #         cv2.line(cdst, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv2.LINE_AA)
            # cv2.imshow("detected lines", cdst)
            # # the 'q' button is set as the quitting button
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            await asyncio.sleep(0.02)

            # Line detection
            # a = np.array([[225, 113]], dtype='float32')
            # a = np.array([a])
            # adjCoord = cv2.perspectiveTransform(frame, h) # convert camera coords to actual coords
            # print(adjCoord) # print actual coordinate for converted point (ie robot position?)

        # After the loop release the cap object
        vid.release()
        # Destroy all the windows
        cv2.destroyAllWindows()
Exemple #32
0
def detect_red_light(I):
    '''
    This function takes a numpy array <I> and returns a list <bounding_boxes>.
    The list <bounding_boxes> should have one element for each red light in the
    image. Each element of <bounding_boxes> should itself be a list, containing
    four integers that specify a bounding box: the row and column index of the
    top left corner and the row and column index of the bottom right corner (in
    that order). See the code below for an example.

    Note that PIL loads images in RGB order, so:
    I[:,:,0] is the red channel
    I[:,:,1] is the green channel
    I[:,:,2] is the blue channel
    '''

    bounding_boxes = [
    ]  # This should be a list of lists, each of length 4. See format example below.

    # Format the image
    image = cv2.cvtColor(I, cv2.COLOR_RGB2BGR)
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    # Hue thresholds
    min_sat = min(90, int(cv2.mean(hsv)[2]))
    lower_red1 = np.array([0, min_sat, min_sat])
    upper_red1 = np.array([10, 255, 255])
    lower_red2 = np.array([160, min_sat, min_sat])
    upper_red2 = np.array([180, 255, 255])
    lower_not_red = np.array([30, min_sat, min_sat])
    upper_not_red = np.array([150, 255, 255])

    # Mask generation
    mask1 = cv2.inRange(hsv, lower_red1, upper_red1)
    mask2 = cv2.inRange(hsv, lower_red2, upper_red2)
    maskr = cv2.add(mask1, mask2)

    maskbg = cv2.bitwise_not(cv2.inRange(hsv, lower_not_red, upper_not_red))
    maskr = cv2.bitwise_and(maskr, maskbg)

    # Mask filtering
    kernele = np.ones((2, 2), np.uint8)
    kernel = np.ones((1, 1), np.uint8)
    kerneld = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    maskr = cv2.erode(maskr, kernel, iterations=1)
    maskr = cv2.morphologyEx(maskr, cv2.MORPH_CLOSE, kerneld, iterations=1)
    maskr = cv2.dilate(cv2.erode(maskr, kernele, iterations=1),
                       kernele,
                       iterations=1)

    # get contours
    contours, hierarchy = cv2.findContours(maskr, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    # Check if contour is a circle
    for con in contours:
        perimeter = cv2.arcLength(con, True)
        area = cv2.contourArea(con)
        if 10 > area or area > 250:
            continue
        if perimeter == 0:
            break
        circularity = 4 * math.pi * (area / (perimeter * perimeter))
        if 0.8 < circularity < 1.15:
            mask = np.zeros(maskr.shape, np.uint8)
            cv2.drawContours(mask, con, -1, 255, -1)
            if cv2.mean(image, mask=mask)[2] >= 100 * min_sat / 90:
                mean_val = cv2.mean(image, mask=mask)
                if (mean_val[2] / (mean_val[1] + mean_val[0])) > 0.8:
                    bbox = cv2.boundingRect(con)
                    bounding_boxes.append([
                        bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]
                    ])

    return bounding_boxes
Exemple #33
0
from __future__ import print_function
import numpy as np
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
cv2.imshow("Original", image)

print("max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print("min of 0: {}".format(cv2.subtract(np.uint8([50]), np.uint8([100]))))
print("wrap around: {}".format(np.uint8([200] + np.uint8([100]))))
print("wrap around: {}".format(np.uint8([50]) - np.uint8([100])))
import cv2

img1 = cv2.imread("1.jpg")
img2 = cv2.imread("2.png")

r1 = cv2.resize(img1, (720, 720))
r2 = cv2.resize(img2, (720, 720))

s = cv2.add(r1, r2)
s = cv2.addWeighted(r1, 3, r2, 0.5, 0)
# s=cv2.subtract(r1,r2)

cv2.imshow('add', s)
cv2.waitKey(0)
cv2.destroyAllWindows()
def create_pig_nose_filter(image_path='',
                           nose_image_type=config.__PIG_NOSE_FILTER__):
    create_pig_nose_filter_logger = logging.getLogger(
        'selfie_with_filters.create_pig_nose_filter')

    create_pig_nose_filter_logger.info(
        "In the create_pig_nose_filter method ...")

    # 0. check if there is a valid file
    if not image_path:
        create_pig_nose_filter_logger.error(
            "Error! No Image provided! Can't create filter if there is no image!!"
        )
        return ""

    # 1. read image (selfie)

    frame = cv2.imread(image_path)

    create_pig_nose_filter_logger.info("The image is at: %s" % image_path)
    image_height, image_width = frame.shape[:2]
    create_pig_nose_filter_logger.info("Image Height: %d, Image Width: %d" %
                                       (image_height, image_width))

    # 2. now run a detect faces on this image

    faces = get_facial_landmarks(image_path=image_path)

    for face_idx, face in enumerate(faces):

        # Now get the nose positions for each face
        if "Landmarks" in face:
            center_nose = get_landmark_value(landmarks=face["Landmarks"],
                                             parameter_name="nose",
                                             image_width=image_width,
                                             image_height=image_height)
            left_nose = get_landmark_value(landmarks=face["Landmarks"],
                                           parameter_name="noseLeft",
                                           image_width=image_width,
                                           image_height=image_height)
            right_nose = get_landmark_value(landmarks=face["Landmarks"],
                                            parameter_name="noseRight",
                                            image_width=image_width,
                                            image_height=image_height)
        else:
            create_pig_nose_filter_logger.warning(
                "No Landmarks found in face!")
            continue

        create_pig_nose_filter_logger.info(
            "Retrieved Nose positions for face: %d" % (face_idx + 1))

        nose_width = int(
            hypot(left_nose[0] - right_nose[0], left_nose[1] - right_nose[1]) *
            1.7)
        nose_height = int(nose_width * 0.77)

        # New nose position
        top_left = (int(center_nose[0] - nose_width / 2),
                    int(center_nose[1] - nose_height / 2))
        bottom_right = (int(center_nose[0] + nose_width / 2),
                        int(center_nose[1] + nose_height / 2))

        # 3. apply effects of pig nose

        nose_image = cv2.imread(nose_image_type)
        rows, cols, _ = frame.shape
        nose_mask = np.zeros((rows, cols), np.uint8)

        nose_mask.fill(0)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Adding the new nose
        nose_pig = cv2.resize(nose_image, (nose_width, nose_height))
        nose_pig_gray = cv2.cvtColor(nose_pig, cv2.COLOR_BGR2GRAY)
        _, nose_mask = cv2.threshold(nose_pig_gray, 25, 255,
                                     cv2.THRESH_BINARY_INV)

        nose_area = frame[top_left[1]:top_left[1] + nose_height,
                          top_left[0]:top_left[0] + nose_width]
        nose_area_no_nose = cv2.bitwise_and(nose_area,
                                            nose_area,
                                            mask=nose_mask)
        final_nose = cv2.add(nose_area_no_nose, nose_pig)

        frame[top_left[1]:top_left[1] + nose_height,
              top_left[0]:top_left[0] + nose_width] = final_nose

        create_pig_nose_filter_logger.info("Added the pig nose for face: %d" %
                                           (face_idx + 1))

    # 4. display the frame thus computed
    # 5. save the image
    filtered_image = "%s/%s" % (config.__CEREBRO_MEDIA_DIR__,
                                config.__FILTERED_IMAGE_NAME__)
    cv2.imwrite(filtered_image, frame)

    return filtered_image
Exemple #36
0
import cv2

logoimg = cv2.imread("image.png")
if logoimg is None:
    print("Cannot find logo image")
    exit(-1)

sceneimg = cv2.imread("lenna.png")
if sceneimg is None:
    print("Cannot find scene image")
    exit(-1)

# logoimg = cv2.resize(logoimg, (512, 512))
logoimg = cv2.resize(logoimg, (0, 0), fx=1, fy=1)
logo_mask = cv2.cvtColor(logoimg, cv2.COLOR_BGR2GRAY)
# cv2.imshow("Result", logo_mask)
# cv2.waitKey(0)

retval, logo_mask = cv2.threshold(logo_mask, 180, 255, cv2.THRESH_BINARY_INV)

roi = sceneimg[0:logo_mask.shape[0],
               sceneimg.shape[1] - logo_mask.shape[1]:sceneimg.shape[1]]
cv2.add(logoimg, roi, roi, logo_mask)
cv2.imshow("Result", sceneimg)
cv2.waitKey(0)

cv2.destroyAllWindows()
Exemple #37
0
    frame = imutils.resize(frame, width=800)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

    kernel = np.ones((5, 5), 'int')
    mask = cv2.inRange(hsv, greenLower, greenUpper)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, kernel, iterations=2)

    blueMask = cv2.inRange(hsv, blueLower, blueUpper)
    blueMask = cv2.erode(blueMask, None, iterations=2)
    blueMask = cv2.dilate(blueMask, kernel, iterations=2)

    centroidMask = cv2.inRange(hsv, centroidLower, centroidUpper)  # red
    centroidMask2 = cv2.inRange(hsv, (0, 145, 100), (10, 210, 160))  # red2
    centroidMask = cv2.add(centroidMask,
                           centroidMask2)  #combine 2 limits for red
    centroidMask = cv2.dilate(centroidMask, kernel, iterations=2)

    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    center = None

    #blue contours
    blueCnts = cv2.findContours(blueMask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    blueCnts = imutils.grab_contours(blueCnts)
    blueCenter = None
Exemple #38
0
            mask_inv = cv2.resize(orig_mask_inv,
                                  (mustacheWidth, mustacheHeight),
                                  interpolation=cv2.INTER_AREA)

            # take ROI for mustache from background equal to size of mustache image
            roi = roi_color[y1:y2, x1:x2]

            # roi_bg contains the original image only where the mustache is not
            # in the region that is the size of the mustache.
            roi_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)

            # roi_fg contains the image of the mustache only where the mustache is
            roi_fg = cv2.bitwise_and(mustache, mustache, mask=mask)

            # join the roi_bg and roi_fg
            dst = cv2.add(roi_bg, roi_fg)

            # place the joined image, saved to dst back over the original image
            roi_color[y1:y2, x1:x2] = dst

            break

    #     cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
    #     angry, fear, happy, sad, surprise, neutral = predict_emotion(face_image_gray)
    #     text1 = 'Angry: {}     Fear: {}   Happy: {}'.format(angry, fear, happy)
    #     text2 = '  Sad: {} Surprise: {} Neutral: {}'.format(sad, surprise, neutral)

    # cv2.putText(frame, text1, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 3)
    # cv2.putText(frame, text2, (50, 150), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 3)

    # Display the resulting frame
Exemple #39
0
def number_recognition(cut_image):
    # --Read Input Image-- (이미지 불러오기)
    src = cv2.imread(cut_image)  #이미지 불러오기

    print("불러오기는 했음")
    '''
    dst = src.copy()  # 이미지영역을 반으로 자르기(번호판 인식률 속도를 높이기 위함)
    dst = src[480:960, 50:670]
    cv2.imshow("half img", dst)
    cv2.waitKey(0)
    '''

    prevtime = time.time()  # 걸린 시간 체크하는 함수

    # 변수 선언
    height, width, channel = src.shape  # 이미지에 대한 값을 가질 변수

    numcheck = 0  # 반복문에서 번호판 문자열 검사할 변수
    charsok = 0  # 반복문에서 번호판 글자를 제대로 읽었는지 검사할 변수
    add_w_padding, add_h_padding = 0, 0  # 추가할 padding값을 가질 변수
    w_padding_max, h_padding_max = 0, 0  # 일정한 padding값을 가지게되었을때 반복문을 제어할 변수

    # --Convert Image to Grayscale-- (이미지 흑백변환)

    gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)  # 이미지 흑백변환

    # --Maximize Contrast(Optional)-- (흑백대비 최대화)

    structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
    imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT,
                                   structuringElement)

    imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
    gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)

    # --Adaptive Thresholding-- (가우시안블러(이미지 노이즈 제거) 및 쓰레시 홀딩)

    img_blurred = cv2.GaussianBlur(gray, ksize=(5, 5),
                                   sigmaX=0)  # GaussianBlur 적용

    img_thresh = cv2.adaptiveThreshold(  # adaptiveThreshold 적용
        img_blurred,
        maxValue=255.0,
        adaptiveMethod=cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        thresholdType=cv2.THRESH_BINARY_INV,
        blockSize=19,
        C=9)

    # --Find Contours-- (윤곽선 찾기)

    contours, hierarchy = cv2.findContours(  # opencv의 findContours를 이용하여 contours에 저장
        img_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    temp_result = np.zeros((height, width, channel),
                           dtype=np.uint8)  # numpy.zeros를 이용하여 윤곽선 범위 저장

    cv2.drawContours(temp_result, contours, -1, (255, 255, 255))  # 윤곽선 그리기

    # --Prepare Data-- (데이터 비교하기, 글자영역으로 추정되는 rectangle 그리기)

    temp_result = np.zeros(
        (height, width, channel), dtype=np.uint8
    )  # drawContours를 이용해 그린 윤곽선에 다시 numpy.zeros를 이용해 다시 윤곽선 범위 저장 (안하면 윤곽선 좀 남아있음)

    contours_dict = []  # contour 정보를 모두 저장받을 리스트 변수

    for contour in contours:
        x, y, w, h = cv2.boundingRect(contour)  # 위치 높낮이 데이터 정보 저장
        cv2.rectangle(temp_result,
                      pt1=(x, y),
                      pt2=(x + w, y + h),
                      color=(255, 255, 255),
                      thickness=2)  # 윤곽선을 감싸는 사각형 구하기

        # insert to dict
        contours_dict.append({  # contour 정보를 모두 저장
            'contour': contour,
            'x': x,
            'y': y,
            'w': w,
            'h': h,
            'cx': x + (w / 2),
            'cy': y + (h / 2)
        })

    # --Select Candidates by Char Size-- (글자 같은 영역 찾기)

    MIN_AREA = 80  # 윤곽선의 가운데 렉트 최소 넓이 80
    MIN_WIDTH, MIN_HEIGHT = 2, 8  # 바운드 렉트의 최소 너비와 높이는 2, 8
    MIN_RATIO, MAX_RATIO = 0.25, 1.0  # 바운드 렉트의 비율 가로 대비 세로 비율 최솟값 0.25, 최댓값 1.0

    possible_contours = []  # 글자로 예상되는 contour들을 저장받을 리스트 변수

    cnt = 0  # count 변수
    for d in contours_dict:  # contours_dict에 저장된 것을 조건에 맞다면 possible_contours에 append
        area = d['w'] * d['h']
        ratio = d['w'] / d['h']

        if area > MIN_AREA \
                and d['w'] > MIN_WIDTH and d['h'] > MIN_HEIGHT \
                and MIN_RATIO < ratio < MAX_RATIO:
            d['idx'] = cnt
            cnt += 1
            possible_contours.append(d)

    # visualize possible contours
    temp_result = np.zeros((height, width, channel), dtype=np.uint8)

    for d in possible_contours:
        #     cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
        cv2.rectangle(temp_result,
                      pt1=(d['x'], d['y']),
                      pt2=(d['x'] + d['w'], d['y'] + d['h']),
                      color=(255, 255, 255),
                      thickness=2)  # 글자로 예상되는 영역만 rectangle 그리기

    # --Select Candidates by Arrangement of Contours-- (글자의 연속성(번호판으로 예상되는 영역) 찾기)

    MAX_DIAG_MULTIPLYER = 4.7  # 5 contour와 contour의 사이의 길이 (값계속 바꿔가면서 테스트 해야함)
    MAX_ANGLE_DIFF = 13  # 12.0 첫번째 contour와 두번째 contour의 직각 삼각형의 앵글 세타각도
    MAX_AREA_DIFF = 0.5  # 0.5  면적의 차이
    MAX_WIDTH_DIFF = 0.8  # 0.8 contour 간의 가로길이 차이
    MAX_HEIGHT_DIFF = 0.2  # 0.2 contour 간의 세로길이 차이
    MIN_N_MATCHED = 4  # 3 글자영역으로 예측된 것의 최소 갯수 (ex 3개이상이면 번호판일 것)

    def find_chars(contour_list):  # 재귀함수로 번호판 후보군을 계속 찾음
        matched_result_idx = []  # 최종 결과값의 인덱스를 저장

        for d1 in contour_list:  # 컨투어(d1, d2)를 서로 비교
            matched_contours_idx = []
            for d2 in contour_list:
                if d1['idx'] == d2['idx']:
                    continue

                dx = abs(d1['cx'] - d2['cx'])
                dy = abs(d1['cy'] - d2['cy'])

                diagonal_length1 = np.sqrt(d1['w']**2 + d1['h']**2)

                distance = np.linalg.norm(
                    np.array([d1['cx'], d1['cy']]) -
                    np.array([d2['cx'], d2['cy']]))  # d1과 d2거리를 계산
                if dx == 0:  # dx의 절댓값이 0이라면 (d1과 d2의 x값을 갖고 있다면)
                    angle_diff = 90
                else:
                    angle_diff = np.degrees(np.arctan(dy /
                                                      dx))  # 아크탄젠트 값을 구함 (라디안)
                area_diff = abs(d1['w'] * d1['h'] - d2['w'] * d2['h']) / (
                    d1['w'] * d1['h'])  # 면적의 비율
                width_diff = abs(d1['w'] - d2['w']) / d1['w']  # 너비의 비율
                height_diff = abs(d1['h'] - d2['h']) / d1['h']  # 높이의 비율

                if distance < diagonal_length1 * MAX_DIAG_MULTIPLYER \
                        and angle_diff < MAX_ANGLE_DIFF and area_diff < MAX_AREA_DIFF \
                        and width_diff < MAX_WIDTH_DIFF and height_diff < MAX_HEIGHT_DIFF:
                    matched_contours_idx.append(
                        d2['idx'])  # 설정한 파라미터 기준에 맞는 값들의 인덱스만 append

            # append this contour
            matched_contours_idx.append(d1['idx'])  # d1을 빼먹고 넣었으므로 d1도 넣어줌

            if len(matched_contours_idx
                   ) < MIN_N_MATCHED:  # 예상한 번호판의 최소 갯수가 맞지 않다면 continue
                continue

            matched_result_idx.append(
                matched_contours_idx)  # 최종후보군으로 넣음 append

            unmatched_contour_idx = []  # 최종 후보군이 아닌 것들도 아닌 것들끼리 한번 더 비교
            for d4 in contour_list:
                if d4['idx'] not in matched_contours_idx:  # matched_contour_idx가 아닌 것들
                    unmatched_contour_idx.append(d4['idx'])

            unmatched_contour = np.take(
                possible_contours, unmatched_contour_idx
            )  # numpy.take를 이용해서 unmathced_contour에 저장

            # recursive
            recursive_contour_list = find_chars(unmatched_contour)  # 다시 돌려봄

            for idx in recursive_contour_list:
                matched_result_idx.append(
                    idx)  # 최종 결과값을 mathced_result_idx에 다시 저장

            break

        return matched_result_idx

    result_idx = find_chars(possible_contours)

    matched_result = []  # 예상되는 번호판 contour정보를 담을 리스트 변수
    for idx_list in result_idx:
        matched_result.append(np.take(possible_contours, idx_list))

    # visualize possible contours (번호판 contour 그리기)
    temp_result = np.zeros((height, width, channel), dtype=np.uint8)

    for r in matched_result:  # 번호판으로 예상되는 역역을 그림
        for d in r:
            #         cv2.drawContours(temp_result, d['contour'], -1, (255, 255, 255))
            cv2.rectangle(temp_result,
                          pt1=(d['x'], d['y']),
                          pt2=(d['x'] + d['w'], d['y'] + d['h']),
                          color=(255, 255, 255),
                          thickness=2)

    # --Rotate Plate Images-- (이미지 회전)

    plate_imgs = []  # 번호판 이미지를 담을 리스트 변수
    plate_infos = []  # 번호판 정보를 담을 리스트 변수

    longest_idx, longest_text = -1, 0  # idx값 초기화
    plate_chars = []  # 번호판 리스트 변수

    while charsok == 0:  # 번호판 글자로 예상되는 값이 나올 때까지 반복
        PLATE_WIDTH_PADDING = 1.2 + add_w_padding  # 가로 패딩 값 예제 디폴트는 1.3
        PLATE_HEIGHT_PADDING = 1.51 + add_h_padding  # 세로 패딩 값 예제 디폴트는 1.5
        MIN_PLATE_RATIO = 3  # 3 최소 번호판 비율
        MAX_PLATE_RATIO = 10  # 10 최대 번호판 비율

        for i, matched_chars in enumerate(matched_result):
            sorted_chars = sorted(matched_chars, key=lambda x: x['cx'])

            plate_cx = (sorted_chars[0]['cx'] + sorted_chars[-1]['cx']) / 2
            plate_cy = (sorted_chars[0]['cy'] + sorted_chars[-1]['cy']) / 2

            plate_width = (sorted_chars[-1]['x'] + sorted_chars[-1]['w'] -
                           sorted_chars[0]['x']) * PLATE_WIDTH_PADDING

            sum_height = 0
            for d in sorted_chars:
                sum_height += d['h']

            plate_height = int(sum_height / len(sorted_chars) *
                               PLATE_HEIGHT_PADDING)

            triangle_height = sorted_chars[-1]['cy'] - sorted_chars[0][
                'cy']  # 번호판의 간격을 삼각형을 기준으로 세타 값을 구함
            triangle_hypotenus = np.linalg.norm(
                np.array([sorted_chars[0]['cx'], sorted_chars[0]['cy']]) -
                np.array([sorted_chars[-1]['cx'], sorted_chars[-1]['cy']]))

            angle = np.degrees(np.arcsin(
                triangle_height / triangle_hypotenus))  # 라디안 값을 구해서 각도로 바꿈

            rotation_matrix = cv2.getRotationMatrix2D(
                center=(plate_cx, plate_cy), angle=angle,
                scale=1.0)  # 로테이션 이미지 구하기

            img_rotated = cv2.warpAffine(img_thresh,
                                         M=rotation_matrix,
                                         dsize=(width, height))  # 이미지 변형

            img_cropped = cv2.getRectSubPix(  # 회전된 이미지에서 원하는 부분만 자름
                img_rotated,
                patchSize=(int(plate_width), int(plate_height)),
                center=(int(plate_cx), int(plate_cy)))

            if img_cropped.shape[1] / img_cropped.shape[0] < MIN_PLATE_RATIO or img_cropped.shape[1] / \
                    img_cropped.shape[
                        0] < MIN_PLATE_RATIO > MAX_PLATE_RATIO:  # 번호판 비율이 맞지 않다면 continue
                continue

            plate_imgs.append(img_cropped)  # plate_imgs에 append

            plate_infos.append({  # plate_infos에 append
                'x': int(plate_cx - plate_width / 2),
                'y': int(plate_cy - plate_height / 2),
                'w': int(plate_width),
                'h': int(plate_height)
            })

        # --Another Thresholding to Find Chars-- (찾은문자에서 다시 쓰레시홀딩)

        for i, plate_img in enumerate(plate_imgs):
            if numcheck > 3:  # 예상되는 번호판 영역에서 문자열을 검사해 숫자 3개가 넘는다면(번호판일 확률이 높다면)
                break

            plate_img = cv2.resize(plate_img, dsize=(0, 0), fx=1.6, fy=1.6)
            _, plate_img = cv2.threshold(plate_img,
                                         thresh=0.0,
                                         maxval=255.0,
                                         type=cv2.THRESH_BINARY
                                         | cv2.THRESH_OTSU)  # 쓰레시홀딩

            # find contours again (same as above)
            contours, hierarchy = cv2.findContours(
                plate_img, cv2.RETR_LIST,
                cv2.CHAIN_APPROX_SIMPLE)  # contour 다시 찾기

            plate_min_x, plate_min_y = plate_img.shape[1], plate_img.shape[0]
            plate_max_x, plate_max_y = 0, 0

            for contour in contours:
                x, y, w, h = cv2.boundingRect(
                    contour)  # for문을 돌려 boundingRect를 다시 구함

                area = w * h  # 면적
                ratio = w / h  # 비율

                if area > MIN_AREA \
                        and w > MIN_WIDTH and h > MIN_HEIGHT \
                        and MIN_RATIO < ratio < MAX_RATIO:  # 설정한 기준(파라미터)에 맞는지 다시 확인
                    if x < plate_min_x:  # x, y의 최댓값,최소값을 구함
                        plate_min_x = x
                    if y < plate_min_y:
                        plate_min_y = y
                    if x + w > plate_max_x:
                        plate_max_x = x + w
                    if y + h > plate_max_y:
                        plate_max_y = y + h

            img_result = plate_img[plate_min_y:plate_max_y, plate_min_x:
                                   plate_max_x]  # 이미지를 번호판 부분만 잘라내기

            img_result = cv2.GaussianBlur(img_result, ksize=(3, 3),
                                          sigmaX=0)  # GaussianBlur(노이즈 제거)
            _, img_result = cv2.threshold(img_result,
                                          thresh=0.0,
                                          maxval=255.0,
                                          type=cv2.THRESH_BINARY
                                          | cv2.THRESH_OTSU)  # 쓰레시홀딩 한번 더
            img_result = cv2.copyMakeBorder(
                img_result,
                top=10,
                bottom=10,
                left=10,
                right=10,
                borderType=cv2.BORDER_CONSTANT,  # 이미지에 패딩(여백)을 줌
                value=(0, 0, 0))  # 검은색

            cv2.imwrite('00.jpg', img_result)
            chars = pytesseract.image_to_string(
                Image.open('00.jpg'), config='--psm 7 --oem 0',
                lang='kor')  # 저장한 이미지를 불러 pytesseract로 읽음
            nowtime = time.time()
            #print("이미지 불러 온 후 글자 : " + chars)

            result_chars = ''  # 번호판 인식 문자 정보를 담을 변수
            has_digit = False
            for c in chars:  # 판독해서 특수문자를 제외한 한글 문자와 숫자 넣기
                if ord('가') <= ord(c) <= ord('힣') or c.isdigit(
                ):  # 숫자나 한글이 포함되어 있는지
                    if c.isdigit():
                        has_digit = True  # 숫자가 하나라도 있는지
                    result_chars += c

            for n in range(len(result_chars)):  # 번호판 형식이 맞는지 다시한번 검사 및 문자열 자르기
                if len(result_chars) < 7:  # 번호판 길이가 7자리(번호판의 최소 길이는 7자리)보다 짧다면
                    break
                elif result_chars[0].isdigit(
                ) == False:  # 첫문자가 문자라면(숫자가 아니라면) 자르기
                    result_chars = result_chars[1:result_chars.__len__()]

                elif result_chars[
                        len(result_chars) -
                        1].isdigit() == False:  # 마지막 문자가 한글데이터라면(숫자가 아니라면) 자르기
                    result_chars = result_chars[0:(result_chars.__len__() - 1)]

            plate_chars.append(
                result_chars)  # 결과 result_chars를 plate_chars에 append

            for j in range(
                    len(result_chars)
            ):  # 번호판의 배열이 나오는지를 검사 ex) 12가3456(7자리번호판) or 123가4567(8자리번호판)
                if len(result_chars) < 7:  # 결과길이가 7자리(번호판의 최소 길이는 7자리)보다 짧다면
                    break
                elif (j == 2 and result_chars[j].isdigit()
                      == True) and result_chars[j + 1].isdigit(
                      ) == True:  # 번호판의 3번째와 4번째가 동시에 숫자라면(글자가 아니라면)
                    break
                elif (j != 2 and j != 3) and result_chars[j].isdigit(
                ) == False:  # 번호판의 3,4번째(글자영역)가 아닌데 문자라면
                    break
                elif (j == 2
                      and result_chars[j].isdigit() == False) and result_chars[
                          j + 1].isdigit() == False:  # 번호판의 3,4번째자리가 둘 다 문자라면
                    break
                if 6 <= j and result_chars[j].isdigit(
                ) == True:  # 6번째까지 숫자자리에 문자가 없고 7번째 영역이 숫자라면 번호판일 것
                    charsok = 1  # 반복문을 멈춤
                    break

            if has_digit and len(result_chars) > longest_text:  # 조건을 만족하면
                longest_idx = i  # 가장 긴 값을 인덱스로 줌

            for numch, in result_chars:  # 문자열 검사를 통해 숫자가 3개 이상이라면 번호판일 확률이 높으므로 이 plate_imgs는 번호판일 것임 그러므로 패딩값을 조절하면 되기에 이미지는 고정할 것
                if numch.isdigit() == True:
                    numcheck += 1

        # --Result-- (결과값)

        info = plate_infos[longest_idx]  # 번호판 좌표 정보 담기
        chars = plate_chars[longest_idx]  # 번호판 문자열 정보 담기

        # 가로 패딩값을 0.1씩 늘림 -> 가로를 초기화 후 세로 패딩값을 0.1씩 늘림 -> 가로 세로 패딩값을 0.1씩 늘림 모두 0.6이 되면 프로그램 종료
        if add_w_padding <= 0.6 and w_padding_max == 0:  # w패딩이 0.5보다 작다면 (가로 패딩만 먼저 늘려보기)
            add_w_padding += 0.1  # w패딩을 0.1씩 증가

        elif w_padding_max == 1 and add_h_padding <= 0.6 and h_padding_max == 0:  # w패딩이 0.5를 찍고 h패딩이 0.5보다 작다면
            add_w_padding = 0  # w패딩을 다시 Default값으로 (세로 패딩만 늘려보기)
            add_h_padding += 0.1  # h패딩을 0.1씩 증가

        if add_w_padding == 0.6:  # 0.6까지 늘어났다면
            w_padding_max = 1
        if add_h_padding == 0.6:  # 0.6까지 늘어났다면
            h_padding_max = 1
            add_w_padding = 0
            add_h_padding = 0

        if w_padding_max == 1 and h_padding_max == 1:  # 너비높이 0.1씩 증가시키기
            add_w_padding += 0.1
            add_h_padding += 0.1
            if add_w_padding == 0.6 and add_h_padding == 0.6:  # 패딩값을 너비 높이 다 0.6씩 늘렸다면(번호판을 못 찾았다면)
                break
        # 초기화
        numcheck = 0
        plate_imgs = []
        plate_chars = []

    sec = nowtime - prevtime
    print("걸린시간 %0.5f" % sec)
    print("최종 값 : " + chars)

    img_out = src.copy()
    cv2.rectangle(img_out,
                  pt1=(info['x'], info['y']),
                  pt2=(info['x'] + info['w'], info['y'] + info['h']),
                  color=(255, 0, 0),
                  thickness=2)  # 원본 이미지에 번호판 영역 그리기

    cv2.imwrite('result.jpg', img_out)  #원본 이미지에서 번호판 영역 추출한 사진

    return chars  # 결과값 return
Exemple #40
0
    def cutout(
        pageimg: np.array,
        coordstring: str,
        mode: CutMode,
        angle=0,
        cval=None,
        scale=1,
    ):
        """Cut region from image
        Parameters
        ----------
        pageimg : page image
        coordstring : coordinates from PAGE in the form "c1_1,c_2 c2_1,c2_2 ..."
        mode :
            CutMode.BOX : cut straight rectangle around coordinates
            CutMode.POLYGON : cut polygon around coordinates
            CutMode.MBR : cut minimum bounding rectangle around coordinates
        angle :
            float : rotate angle in clockwise direction
            None : calculate angle from minimum bounding rectangle
        cval :
            colour : mask and fill empty regions with
            None : calculate via maximum pixel
        scale : factor to scale the coordinates with
        """

        coords = [p.split(",") for p in coordstring.split()]
        coords = [(int(scale * int(c[1])), int(scale * int(c[0])))
                  for c in coords]
        coords = np.array(coords, np.int32).reshape((-1, 1, 2))
        maxX, maxY = np.amax(coords, 0).squeeze()
        minX, minY = np.amin(coords, 0).squeeze()
        cut = pageimg[minX:maxX + 1, minY:maxY + 1]
        if cut.size == 0:
            return cut  # empty image
        coords -= (minX, minY)
        maxX, maxY = (maxX - minX, maxY - minY)
        minX, minY = (0, 0)

        # calculate angle if needed
        if angle is None:
            mbr = cv.minAreaRect(coords)
            angle = mbr[2] if maxX <= maxY else mbr[2] - 90

        # set cval if needed
        if cval is None:
            if cut.ndim == 2:
                cval = np.amax(cut).item()
            else:
                x, y = np.unravel_index(np.argmax(np.mean(cut, axis=2)),
                                        cut.shape[:2])
                cval = cut[x, y, :].tolist()

        # rotate cut
        if angle:
            (h, w) = cut.shape[:2]
            (cX, cY) = (w // 2, h // 2)
            M = cv.getRotationMatrix2D((cX, cY), -angle, 1.0)
            cos = np.abs(M[0, 0])
            sin = np.abs(M[0, 1])
            # compute the new bounding dimensions of the image
            nW = np.ceil((h * sin) + (w * cos)).astype(int)
            nH = np.ceil((h * cos) + (w * sin)).astype(int)
            # adjust the rotation matrix to take into account translation
            M[0, 2] += (nW / 2) - cX
            M[1, 2] += (nH / 2) - cY
            # rotate coords
            coords = cv.transform(coords[..., ::-1], M)
            minX, minY = np.amin(coords, 0).squeeze()
            maxX, maxY = np.amax(coords, 0).squeeze()
            # rotate image
            cut = cv.warpAffine(
                cut,
                M,
                (nW, nH),
                flags=cv.INTER_LINEAR,
                borderMode=cv.BORDER_CONSTANT,
                borderValue=cval,
            )
        else:
            coords = coords[..., ::-1]
            minX, minY = minY, minX
            maxX, maxY = maxY, maxX

        # simplify coordinates with MBR
        if mode is CutMode.MBR:
            mbr = cv.minAreaRect(coords)
            coords = cv.boxPoints(mbr).astype(int).reshape(-1, 1, 2)

        # mask pixels outside coords
        if mode in (CutMode.POLYGON, CutMode.MBR):
            box = (np.ones(cut.shape) * cval).astype(cut.dtype)
            mask = np.zeros(cut.shape, dtype=np.uint8)
            mask = cv.fillPoly(mask, [coords], color=[255] * cut.ndim)
            mask_inv = cv.bitwise_not(mask)
            fg = cv.bitwise_and(cut, mask)
            bg = cv.bitwise_and(box, mask_inv)
            cut = cv.add(fg, bg)

        return cut[minY:maxY + 1, minX:maxX + 1]
def create_eye_mask_filter(image_path=''):
    create_eye_mask_filter_logger = logging.getLogger(
        'selfie_with_filters.create_eye_mask_filter')

    create_eye_mask_filter_logger.info(
        "In the create_eye_mask_filter method ...")

    filtered_image = ''

    # 0. check if there is a valid file
    if not image_path:
        create_eye_mask_filter_logger.error(
            "Error! No Image provided! Can't create filter if there is no image!!"
        )
        return ""

    # 1. read image (selfie)

    frame = cv2.imread(image_path)

    create_eye_mask_filter_logger.info("The image is at: %s" % image_path)
    image_height, image_width = frame.shape[:2]
    create_eye_mask_filter_logger.info("Image Height: %d, Image Width: %d" %
                                       (image_height, image_width))

    # 2. now run a detect faces on this image

    faces = get_facial_landmarks(image_path=image_path)

    for face_idx, face in enumerate(faces):

        create_eye_mask_filter_logger.info(face["BoundingBox"])
        bb_left = face["BoundingBox"]["Left"]
        bb_left_px = int(bb_left * image_width)

        bb_top = face["BoundingBox"]["Top"]
        bb_top_px = int(bb_top * image_height)

        bb_height = face["BoundingBox"]["Height"]
        bb_height_px = int(bb_height * image_height)

        bb_width = face["BoundingBox"]["Width"]
        bb_width_px = int(bb_width * image_width)

        create_eye_mask_filter_logger.info("%f, %f" % (bb_left, bb_left_px))
        create_eye_mask_filter_logger.info("%f, %f" % (bb_top, bb_top_px))
        create_eye_mask_filter_logger.info("%f, %f" %
                                           (bb_height, bb_height_px))
        create_eye_mask_filter_logger.info("%f, %f" % (bb_width, bb_width_px))

        bb_left_top = (bb_left_px, bb_top_px)
        bb_bottom_right = (bb_left_px + bb_width_px, bb_top_px + bb_height_px)
        cv2.rectangle(frame, bb_left_top, bb_bottom_right, (0, 255, 0), 2)

        # Now get the nose positions for each face
        if "Landmarks" in face:
            upper_jawline_left = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="upperJawlineLeft",
                image_width=image_width,
                image_height=image_height)
            upper_jawline_right = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="upperJawlineRight",
                image_width=image_width,
                image_height=image_height)

            leftEyeBrowLeft = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="leftEyeBrowLeft",
                image_width=image_width,
                image_height=image_height)
            leftEyeBrowUp = get_landmark_value(landmarks=face["Landmarks"],
                                               parameter_name="leftEyeBrowUp",
                                               image_width=image_width,
                                               image_height=image_height)
            leftEyeBrowRight = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="leftEyeBrowRight",
                image_width=image_width,
                image_height=image_height)

            rightEyeBrowLeft = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="rightEyeBrowLeft",
                image_width=image_width,
                image_height=image_height)
            rightEyeBrowUp = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="rightEyeBrowUp",
                image_width=image_width,
                image_height=image_height)
            rightEyeBrowRight = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="rightEyeBrowRight",
                image_width=image_width,
                image_height=image_height)

        else:
            create_eye_mask_filter_logger.warning(
                "No Landmarks found in face!")
            continue

        create_eye_mask_filter_logger.info(
            "Retrieved Jaw positions for face: %d" % (face_idx + 1))

        create_eye_mask_filter_logger.info(upper_jawline_left)
        cv2.circle(frame, upper_jawline_left, 3, (255, 0, 0), -1)
        cv2.circle(frame, upper_jawline_right, 3, (255, 0, 0), -1)

        cv2.circle(frame, leftEyeBrowLeft, 3, (0, 255, 0), -1)
        cv2.circle(frame, leftEyeBrowUp, 3, (0, 255, 0), -1)
        cv2.circle(frame, leftEyeBrowRight, 3, (0, 255, 0), -1)

        cv2.circle(frame, rightEyeBrowLeft, 3, (0, 0, 255), -1)
        cv2.circle(frame, rightEyeBrowUp, 3, (0, 0, 255), -1)
        cv2.circle(frame, rightEyeBrowRight, 3, (0, 0, 255), -1)

        eye_mask_width = int(bb_width_px * 1.25)
        eye_mask_height = int(bb_height_px / 2.5)
        create_eye_mask_filter_logger.info(eye_mask_width)
        create_eye_mask_filter_logger.info(eye_mask_height)

        #em_left_top = (int(bb_left_px-eye_mask_width/7), int(bb_top_px-eye_mask_height))
        em_left_top = (upper_jawline_left[0],
                       int(upper_jawline_left[1] - eye_mask_height / 2))
        #em_bottom_right = (int(bb_left_px+(eye_mask_width*1)), int(bb_top_px-eye_mask_height/2))
        em_bottom_right = (upper_jawline_right[0],
                           int(upper_jawline_left[1] + eye_mask_height / 10))

        create_eye_mask_filter_logger.info(em_left_top)
        create_eye_mask_filter_logger.info(em_bottom_right)

        cv2.rectangle(frame, em_left_top, em_bottom_right, (0, 0, 255), 2)

        # 3. apply effects of flower crown

        eyemask_image = cv2.imread(config.__EYE_MASK_FILTER__)
        rows, cols, _ = frame.shape
        eyemask_mask = np.zeros((rows, cols), np.uint8)

        eyemask_mask.fill(0)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Adding the new crown
        eyemask = cv2.resize(eyemask_image, (eye_mask_width, eye_mask_height))
        eyemask_gray = cv2.cvtColor(eyemask, cv2.COLOR_BGR2GRAY)
        _, eyemask_mask = cv2.threshold(eyemask_gray, 25, 255,
                                        cv2.THRESH_BINARY_INV)

        eyemask_area = frame[em_left_top[1]:em_left_top[1] + eye_mask_height,
                             em_left_top[0]:em_left_top[0] + eye_mask_width]
        eyemask_area_no_eyemask = cv2.bitwise_and(eyemask_area,
                                                  eyemask_area,
                                                  mask=eyemask_mask)
        final_eyemask = cv2.add(eyemask_area_no_eyemask, eyemask)

        #cv2.imshow("eyemask", final_eyemask)

        frame[em_left_top[1]:em_left_top[1] + eye_mask_height,
              em_left_top[0]:em_left_top[0] + eye_mask_width] = final_eyemask

        create_eye_mask_filter_logger.info("Added the eyemask for face: %d" %
                                           (face_idx + 1))
        '''
		eye_mask_image = cv2.imread(__EYE_MASK_FILTER__)
		rows, cols, _ = frame.shape
		eyemask_mask = np.zeros((rows, cols), np.uint8)

		eyemask_mask.fill(0)
		gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

		# Adding the new crown
		eye_mask = cv2.resize(eye_mask_image, (eye_mask_width, eye_mask_height))
		eye_mask_gray = cv2.cvtColor(eye_mask, cv2.COLOR_BGR2GRAY)
		_, eyemask_mask = cv2.threshold(eye_mask_gray, 25, 255, cv2.THRESH_BINARY_INV)

		eyemask_area = frame[em_left_top[1]: em_left_top[1] + eye_mask_height,
			em_left_top[0]: em_left_top[0] + eye_mask_width]
		eyemask_area_no_eyemask = cv2.bitwise_and(eyemask_area, eyemask_area, mask=eyemask_mask)
		final_eyemask = cv2.add(eyemask_area_no_eyemask, eye_mask)

		frame[em_left_top[1]: em_left_top[1] + eye_mask_height,
			em_left_top[0]: em_left_top[0] + eye_mask_width] = final_eyemask

		create_eye_mask_filter_logger.info("Added the eyemask for face: %d" % (face_idx+1))
		'''

    # 4. display the frame thus computed
    cv2.imshow("ImageFrame", frame)
    while True:
        key = cv2.waitKey(0)

        print(key)
        print(int(key))
        test = int(key) == 27
        print(test)

        if int(key) == 27:
            cv2.destroyAllWindows()
            print("Destroyed the Window")
            break

    return "TESTONLY"
    '''
	# 5. save the image
	filtered_image = "%s/%s" % (__CEREBRO_MEDIA_DIR__, __FILTERED_IMAGE_NAME__)
	cv2.imwrite(filtered_image, frame)
	'''

    return filtered_image
def create_flower_crown_filter(image_path=''):
    create_flower_crown_filter_logger = logging.getLogger(
        'selfie_with_filters.create_flower_crown_filter')

    create_flower_crown_filter_logger.info(
        "In the create_flower_crown_filter method ...")

    # 0. check if there is a valid file
    if not image_path:
        create_flower_crown_filter_logger.error(
            "Error! No Image provided! Can't create filter if there is no image!!"
        )
        return ""

    # 1. read image (selfie)

    frame = cv2.imread(image_path)

    create_flower_crown_filter_logger.info("The image is at: %s" % image_path)
    image_height, image_width = frame.shape[:2]
    create_flower_crown_filter_logger.info(
        "Image Height: %d, Image Width: %d" % (image_height, image_width))

    # 2. now run a detect faces on this image

    faces = get_facial_landmarks(image_path=image_path)

    for face_idx, face in enumerate(faces):

        create_flower_crown_filter_logger.info(face["BoundingBox"])
        bb_left = face["BoundingBox"]["Left"]
        bb_left_px = int(bb_left * image_width)

        bb_top = face["BoundingBox"]["Top"]
        bb_top_px = int(bb_top * image_height)

        bb_height = face["BoundingBox"]["Height"]
        bb_height_px = int(bb_height * image_height)

        bb_width = face["BoundingBox"]["Width"]
        bb_width_px = int(bb_width * image_width)

        create_flower_crown_filter_logger.info(
            "BB - Left: %f, BB - Left (in px): %d" % (bb_left, bb_left_px))
        create_flower_crown_filter_logger.info(
            "BB - Top: %f, BB - Top (in px): %d" % (bb_top, bb_top_px))
        create_flower_crown_filter_logger.info(
            "BB - Height: %f, BB - Height (in px): %d" %
            (bb_height, bb_height_px))
        create_flower_crown_filter_logger.info(
            "BB - Width: %f, BB - Width (in px): %d" % (bb_width, bb_width_px))

        bb_left_top = (bb_left_px, bb_top_px)
        bb_bottom_right = (bb_left_px + bb_width_px, bb_top_px + bb_height_px)
        #cv2.rectangle(frame,bb_left_top, bb_bottom_right, (0,255,0), 2)

        # Now get the nose positions for each face
        if "Landmarks" in face:
            upper_jawline_left = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="upperJawlineLeft",
                image_width=image_width,
                image_height=image_height)
            upper_jawline_right = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="upperJawlineRight",
                image_width=image_width,
                image_height=image_height)
        else:
            create_flower_crown_filter_logger.warning(
                "No Landmarks found in face!")
            continue

        create_flower_crown_filter_logger.info(
            "Retrieved Jaw positions for face: %d" % (face_idx + 1))

        #cv2.circle(frame, upper_jawline_left, 3, (255,0,0), -1)
        #cv2.circle(frame, upper_jawline_right, 3, (255,0,0), -1)

        head_crown_width = int(bb_width_px * 1.25)
        head_crown_height = int(bb_height_px / 2.5)
        create_flower_crown_filter_logger.info(head_crown_width)
        create_flower_crown_filter_logger.info(head_crown_height)

        hc_left_top = (int(bb_left_px - head_crown_width / 7),
                       int(bb_top_px - head_crown_height))
        hc_bottom_right = (int(bb_left_px + (head_crown_width * 3)),
                           int(bb_top_px - head_crown_height / 2))
        create_flower_crown_filter_logger.info(hc_left_top)
        create_flower_crown_filter_logger.info(hc_bottom_right)

        #cv2.rectangle(frame, hc_left_top, hc_bottom_right, (0,0,255), 2)

        # 3. apply effects of flower crown

        crown_image = cv2.imread(config.__FLOWER_CROWN_FILTER__)
        rows, cols, _ = frame.shape
        crown_mask = np.zeros((rows, cols), np.uint8)

        crown_mask.fill(0)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Adding the new crown
        flower_crown = cv2.resize(crown_image,
                                  (head_crown_width, head_crown_height))
        flower_crown_gray = cv2.cvtColor(flower_crown, cv2.COLOR_BGR2GRAY)
        _, crown_mask = cv2.threshold(flower_crown_gray, 25, 255,
                                      cv2.THRESH_BINARY_INV)

        crown_area = frame[hc_left_top[1]:hc_left_top[1] + head_crown_height,
                           hc_left_top[0]:hc_left_top[0] + head_crown_width]
        crown_area_no_crown = cv2.bitwise_and(crown_area,
                                              crown_area,
                                              mask=crown_mask)
        final_crown = cv2.add(crown_area_no_crown, flower_crown)

        frame[hc_left_top[1]:hc_left_top[1] + head_crown_height,
              hc_left_top[0]:hc_left_top[0] + head_crown_width] = final_crown

        create_flower_crown_filter_logger.info("Added the crown for face: %d" %
                                               (face_idx + 1))

    # 4. display the frame thus computed
    '''
	cv2.imshow("ImageFrame", frame)
	while True:
		key = cv2.waitKey(0)

		print(key)
		print(int(key))
		test = int(key) == 27 
		print(test)

		if int(key) == 27:
			cv2.destroyAllWindows()
			print("Destroyed the Window")
			break

	return "TESTONLY"
	'''

    # 5. save the image
    filtered_image = "%s/%s" % (config.__CEREBRO_MEDIA_DIR__,
                                config.__FILTERED_IMAGE_NAME__)
    cv2.imwrite(filtered_image, frame)

    return filtered_image
Exemple #43
0
    # Blend new frame and the rest of the pano
    new_frame = np.zeros(pano_size, dtype=np.uint8)
    new_frame[h.tolist(), theta.tolist(), :] = frame

    empty_pano = np.zeros(pano_size, dtype=np.uint8)

    # get the intersecting region
    ret1, mask1 = cv2.threshold(cv2.cvtColor(new_frame, cv2.COLOR_BGR2GRAY), 0,
                                255, cv2.THRESH_BINARY)
    ret2, mask2 = cv2.threshold(cv2.cvtColor(pano, cv2.COLOR_BGR2GRAY), 0, 255,
                                cv2.THRESH_BINARY)

    # empty_pano has old pano on top
    empty_pano = cv2.add(empty_pano,
                         new_frame,
                         mask=(cv2.bitwise_and(mask1, cv2.bitwise_not(mask2))))
    empty_pano = cv2.add(empty_pano, pano)

    # pano has the newest frame on top
    pano[h.tolist(), theta.tolist(), :] = frame

    # blend the two
    pano = cv2.addWeighted(empty_pano, 0.5, pano, 0.5, 0)

# Crop the final pano
# source: http://codereview.stackexchange.com/questions/132914/crop-black-border-of-image-using-numpy
ret, mask = cv2.threshold(cv2.cvtColor(pano, cv2.COLOR_BGR2GRAY), 0, 255,
                          cv2.THRESH_BINARY)
pano = pano[np.ix_(mask.any(1), mask.any(0))]
      ->imread => image(s) read
      ->add => add images (addition)
      ->imshow => show the image(s)
      
Script description:
    1. Download 2 different images.
    2. Apply basic math operations.
     -> Add two images.
     -> Subst two images.
'''

#Import library (ies)
import cv2

#def add_images(x, y):
#    #Here the images are added.
#    new_image = cv2.add(x, y)
#    cv2.imshow('New image', new_image)

#Main :::::::::::::::::::::::::::::
img_1 = cv2.imread('images/car1.jpg')
img_2 = cv2.imread('images/car1.jpg')

new_image = cv2.add(img_1, img_2)
cv2.imshow('New image', new_image)

#add_images(img_1, img_2)

cv2.waitKey(0)
cv2.destroyAllWindows()
Exemple #45
0
def predict():

    shirtno = int(request.form["shirt"])
    # pantno = int(request.form["pant"])

    cv.waitKey(1)
    cap = cv.VideoCapture(0)
    ih = shirtno
    # i=pantno
    while True:
        imgarr = ["shirt1.png", 'shirt2.png', 'shirt51.jpg', 'shirt6.png']

        #ih=input("Enter the shirt number you want to try")
        imgshirt = cv.imread(imgarr[ih - 1], 1)  #original img in bgr
        if ih == 3:
            shirtgray = cv.cvtColor(imgshirt,
                                    cv.COLOR_BGR2GRAY)  #grayscale conversion
            ret, orig_masks_inv = cv.threshold(
                shirtgray, 200, 255, cv.THRESH_BINARY
            )  #there may be some issues with image threshold...depending on the color/contrast of image
            orig_masks = cv.bitwise_not(orig_masks_inv)

        else:
            shirtgray = cv.cvtColor(imgshirt,
                                    cv.COLOR_BGR2GRAY)  #grayscale conversion
            ret, orig_masks = cv.threshold(
                shirtgray, 0, 255, cv.THRESH_BINARY
            )  #there may be some issues with image threshold...depending on the color/contrast of image
            orig_masks_inv = cv.bitwise_not(orig_masks)
        origshirtHeight, origshirtWidth = imgshirt.shape[:2]

        # imgarr=["pant7.jpg",'pant21.png']
        # #i=input("Enter the pant number you want to try")
        # imgpant = cv.imread(imgarr[i-1],1)
        # imgpant=imgpant[:,:,0:3]#original img in bgr
        # pantgray = cv.cvtColor(imgpant,cv.COLOR_BGR2GRAY) #grayscale conversion
        # if i==1:
        #     ret, orig_mask = cv.threshold(pantgray,100 , 255, cv.THRESH_BINARY) #there may be some issues with image threshold...depending on the color/contrast of image
        #     orig_mask_inv = cv.bitwise_not(orig_mask)
        # else:
        #     ret, orig_mask = cv.threshold(pantgray,50 , 255, cv.THRESH_BINARY)
        #     orig_mask_inv = cv.bitwise_not(orig_mask)
        # origpantHeight, origpantWidth = imgpant.shape[:2]

        face_cascade = cv.CascadeClassifier(
            'haarcascade_frontalface_default.xml')

        ret, img = cap.read()

        img_w = img.shape[0]
        img_h = img.shape[1]
        # img_w = int(width*0.75)
        # img_h = int(height*0.75)
        # img = cv.resize(img[:,:,0:3],(1000,1000), interpolation = cv.INTER_AREA)
        cv.namedWindow("img", cv.WINDOW_NORMAL)
        # cv.setWindowProperty('img',cv.WND_PROP_FULLSCREEN,cv.cv.CV_WINDOW_FULLSCREEN)
        cv.resizeWindow("img", frame_width, frame_height)
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray,
                                              scaleFactor=1.1,
                                              minNeighbors=4)

        for (x, y, w, h) in faces:
            cv.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)

            face_w = w
            face_h = h
            face_x1 = x
            face_x2 = face_x1 + face_w
            face_y1 = y
            face_y2 = face_y1 + face_h

            # set the shirt size in relation to tracked face
            shirtWidth = int(2.9 * face_w + offset)
            shirtHeight = int((shirtWidth * origshirtHeight / origshirtWidth) +
                              offset / 3)
            cv.putText(img, (str(shirtWidth) + " " + str(shirtHeight)),
                       (x + w, y + h), cv.FONT_HERSHEY_COMPLEX, 1,
                       (255, 255, 255), 1)

            shirt_x1 = face_x2 - int(face_w / 2) - int(
                shirtWidth / 2)  # setting shirt centered wrt recognized face
            shirt_x2 = shirt_x1 + shirtWidth
            shirt_y1 = face_y2 + 5  # some padding between face and upper shirt. Depends on the shirt img
            shirt_y2 = shirt_y1 + shirtHeight

            # Check for clipping
            if shirt_x1 < 0:
                shirt_x1 = 0
            if shirt_y1 < 0:
                shirt_y1 = 0
            if shirt_x2 > img_w:
                shirt_x2 = img_w
            if shirt_y2 > img_h:
                shirt_y2 = img_h

            shirtWidth = shirt_x2 - shirt_x1
            shirtHeight = shirt_y2 - shirt_y1
            if shirtWidth < 0 or shirtHeight < 0:
                continue

        # Re-size the original image and the masks to the shirt sizes
            shirt = cv.resize(imgshirt, (shirtWidth, shirtHeight),
                              interpolation=cv.INTER_AREA)
            mask = cv.resize(orig_masks, (shirtWidth, shirtHeight),
                             interpolation=cv.INTER_AREA)
            mask_inv = cv.resize(orig_masks_inv, (shirtWidth, shirtHeight),
                                 interpolation=cv.INTER_AREA)

            # take ROI for shirt from background equal to size of shirt image
            roi = img[shirt_y1:shirt_y2, shirt_x1:shirt_x2]

            # roi_bg contains the original image only where the shirt is not
            # in the region that is the size of the shirt.
            roi_bg = cv.bitwise_and(roi, roi, mask=mask_inv)
            roi_fg = cv.bitwise_and(shirt, shirt, mask=mask)
            dst = cv.add(roi_bg, roi_fg)
            img[shirt_y1:shirt_y2, shirt_x1:shirt_x2] = dst

            # kernel = np.ones((5, 5), np.float32) / 25
            # imgshirtt = cv2.filter2D(dst, -1, kernel)

            # if face_y1 + shirtHeight +face_h< frame_height:
            #     #cv2.putText(frame, "press 'n' key for next item and 'p' for previous item", (x, y),cv2.FONT_HERSHEY_COMPLEX, .8, (255, 255, 255),1)
            #     img[shirt_y1:shirt_y2, shirt_x1:shirt_x2] = dst

            # else:
            #     text = 'Too close to Screen'
            #     #cv2.putText(frame, "press 'n'  key for next item and 'p' for previous item", (x-200, y-200),cv2.FONT_HERSHEY_COMPLEX, .8, (255, 255, 255), 1)
            #     cv.putText(img, text, (int(face_x1-face_w/4.3), int(face_y1)), cv.FONT_HERSHEY_COMPLEX, 1,(0, 0, 250), 1)

            # if keyboard.is_pressed('m' or 'M'):
            #     ID= 0

            # if keyboard.is_pressed('W' or 'w'):
            #     ID= 1

            # if keyboard.is_pressed('i'):
            #     if offset>100:
            #         print("THIS IS THE MAX SIZE AVAILABLE")
            #     else:
            #         offset+=50
            #         print('+ pressed')

            # if keyboard.is_pressed('d'):
            #     if offset <0:
            #         print("THIS IS THE MIN SIZE AVAILABLE")
            #     else:
            #         offset -= 50
            #         print('- pressed')

        # # Re-size the original image and the masks to the shirt sizes
        # shirt = cv.resize(imgshirt, (shirtWidth,shirtHeight), interpolation = cv.INTER_AREA) #resize all,the masks you made,the originla image,everything
        # mask = cv.resize(orig_masks, (shirtWidth,shirtHeight), interpolation = cv.INTER_AREA)
        # masks_inv = cv.resize(orig_masks_inv, (shirtWidth,shirtHeight), interpolation = cv.INTER_AREA)
        # # take ROI for shirt from background equal to size of shirt image
        # rois = img[y1s:y2s, x1s:x2s]
        #     # roi_bg contains the original image only where the shirt is not
        #     # in the region that is the size of the shirt.
        # num=rois
        # roi_bgs = cv.bitwise_and(rois,num,mask = masks_inv)
        # # roi_fg contains the image of the shirt only where the shirt is
        # roi_fgs = cv.bitwise_and(shirt,shirt,mask = mask)
        # # join the roi_bg and roi_fg
        # dsts = cv.add(roi_bgs,roi_fgs)
        # img[y1s:y2s, x1s:x2s] = dsts # place the joined image, saved to dst back over the original image
        # #print "blurring"

        # break
        cv.imshow("img", img)
        #cv.setMouseCallback('img',change_dress)
        if cv.waitKey(100) == ord('q'):
            break

    cap.release()  # Destroys the cap object
    cv.destroyAllWindows()  # Destroys all the windows created by imshow

    return render_template('index.html')
Exemple #46
0
import cv2
import numpy as np
import argparse

ap=argparse.ArgumentParser()
ap.add_argument("-i","--image",required=True)
args=vars(ap.parse_args())

image = cv2.imread(args["image"])
cv2.imshow("original",image)
cv2.waitKey(0)

print("max : {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print("min : {}".format(cv2.subtract(np.uint8([200]),np.uint8([200]))))
print("numpy add : {}".format(np.uint8([200])+np.uint8([100])))
print("numpy sub : {}".format(np.uint8([100])-np.uint8([200])))

M = np.ones(image.shape,dtype="uint8")*100
added = cv2.add(image,M)
cv2.imshow("added",added)
cv2.waitKey(0)

M = np.ones(image.shape,dtype="uint8")*50
subtract = cv2.subtract(image,M)
cv2.imshow("subtract",subtract)
cv2.waitKey(0)
Exemple #47
0
        plt.close()
    else:
        img_name = "./walk01/walk01" + str(i + 1) + ".jpg"
        img_color = cv2.imread(img_name, cv2.IMREAD_COLOR)

        img_name1 = "./walk01/walk01" + str(i) + ".jpg"
        img_name2 = "./walk01/walk01" + str(i + 1) + ".jpg"
        #print(i+2,"-YES")
        img_color_1 = cv2.imread(img_name1)  # 直接读为灰度图像
        img_color_2 = cv2.imread(img_name2)  # 直接读为灰度图像
        img_sub = cv2.subtract(img_color_1, img_color_2)
        #相減後相加(灰圖)
        if i == 1:
            add_img_sub = img_sub
        else:
            add_img_sub = cv2.add(add_img_sub, img_sub)

        #剪完後二值化
        img1 = cv2.imread(img_name1, 0)  # 直接读为灰度图像
        img2 = cv2.imread(img_name2, 0)  # 直接读为灰度图像
        img_sub = cv2.subtract(img1, img2)
        ret_2, thresh3 = cv2.threshold(img_sub, 127, 255, cv2.THRESH_TRUNC)
        #儲存thresh3

        output_tru_name = "./walk01-out/tru/walk01-out-tru-" + str(i) + ".jpg"
        cv2.imwrite(output_tru_name, thresh3)
        #二值化相加
        if i == 1:
            tru_add_img_sub = thresh3
        else:
            tru_add_img_sub = cv2.add(tru_add_img_sub, thresh3)
Exemple #48
0
# 目标
#
#     学习对图像的几种算术运算,如加法,减法,按位运算等。
#     您将学习以下函数:cv.add(),cv.addWeighted()等。
import numpy as np
import cv2 as cv

x = np.uint8([250])
y = np.uint8([10])
print(cv.add(x, y))

#图像混合
img_1 = cv.imread('./images/test.jpg')
img_2 = cv.imread('./images/copy_test.jpg')
dst = cv.addWeighted(img_1, 0.7, img_2, 0.3, 0)
img_1.shape
img_2.shape
cv.imshow('dst', dst)
cv.waitKey(0)
cv.destroyAllWindows()
Exemple #49
0
        #       ORIGINAL GREEN
        #        lw = np.array([65,60,60])
        #        up = np.array([80,255,255])

        lw = np.array([40, 30, 60])  #40 30 60
        up = np.array([80, 255, 255])

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lw, up)
        #        mask_blur = cv2.GaussianBlur(mask, (5,5), 0)
        mask_blur = cv2.medianBlur(mask, 5)
        mask_inv = cv2.bitwise_not(mask_blur)

        #add blackout
        twibbonMasked = cv2.bitwise_and(frame, frame, mask=mask_inv)
        selImgMasked = cv2.bitwise_and(selImgRes, selImgRes, mask=mask_blur)

        #add two img
        result = cv2.add(twibbonMasked, selImgMasked)
        out.write(result)

        # cv2.imshow('res',result)
        if cv2.waitKey(1) & 0XFF == ord('q'):
            break
    else:
        break

cap.release()
out.release()
cv2.destroyAllWindows()
Exemple #50
0
        w_f = int(max(x_f, w3) - min(x_i, 0))
        h_f = int(max(y_f, h3) - min(y_i, 0))

        img2_temp = np.zeros((h_f, w_f, d), np.uint8)
        img2_temp[max(0, -y_i):int(max(0, -y_i)+h3), max(0, -x_i):int(max(0, -x_i)+w3), :] = img2

        img1_temp = np.zeros((h_f, w_f, d), np.uint8)
        img1_temp[max(0, y_i):(max(0, y_i)+h2), max(0, x_i):(max(0, x_i)+w2), :] = t_img1

        img1gray = cv.cvtColor(img1_temp, cv.COLOR_BGR2GRAY)
        ret, mask = cv.threshold(img1gray, 1, 255, cv.THRESH_BINARY)
        mask_inv = cv.bitwise_not(mask)

        img1_fg = cv.bitwise_and(img1_temp, img1_temp, mask = mask)
        img2_bg = cv.bitwise_and(img2_temp, img2_temp, mask = mask_inv)

        img2 = cv.add(img2_bg, img1_fg)
        #img2 = cv.polylines(img2,[np.int32(dst_2)],True,255,3, cv.LINE_AA) #test
        cv.imwrite('result.png', img2)
    else:
        print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
        matchesMask = None

    # draw_params = dict(matchColor = (0,255,0),
    #                    singlePointColor = (255,0,0),
    #                    matchesMask = matchesMask,
    #                    flags = 2)

    # img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
Exemple #51
0
'''


#IMAGE LOGIC

rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]

img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 220, 255, cv2.THRESH_BINARY_INV) # (<img object>, <threshold pixel value>, <Converted pixel value>, <..>), if the pixel value is above threshhold value, the pixel value gets converted to the vonverted pixel value

mask_inv = cv2.bitwise_not(mask) #inverted the color

img1_bg = cv2.bitwise_and(roi, roi, mask = mask_inv)
img2_fg = cv2.bitwise_and(img2, img2, mask = mask)

dst = cv2.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst

cv2.imshow('img2', img2)
cv2.imshow('mask', mask)
cv2.imshow('res', img1)
cv2.imshow('mask_inv', mask_inv)
cv2.imshow('img1_bg', img1_bg)
cv2.imshow('img2_fg', img2_fg)
cv2.imshow('dst', dst)

#cv2.imshow('mask',mask)
cv2.waitKey(0)
cv2.destroyAllWindows
Exemple #52
0
for i in range(6):
    G = cv.pyrDown(G)
    gpB.append(G)
# 生成A的拉普拉斯金字塔
lpA = [gpA[5]]
for i in range(5, 0, -1):
    GE = cv.pyrUp(gpA[i])
    L = cv.subtract(gpA[i - 1], GE)
    lpA.append(L)
# 生成B的拉普拉斯金字塔
lpB = [gpB[5]]
for i in range(5, 0, -1):
    GE = cv.pyrUp(gpB[i])
    L = cv.subtract(gpB[i - 1], GE)
    lpB.append(L)
# 现在在每个级别中添加左右两半图像
LS = []
for la, lb in zip(lpA, lpB):
    rows, cols, dpt = la.shape
    ls = np.hstack((la[:, 0:cols / 2], lb[:, cols / 2:]))
    LS.append(ls)
# 现在重建
ls_ = LS[0]
for i in range(1, 6):
    ls_ = cv.pyrUp(ls_)
    ls_ = cv.add(ls_, LS[i])
# 图像与直接连接的每一半
real = np.hstack((A[:, :cols / 2], B[:, cols / 2:]))
cv.imwrite('Pyramid_blending2.jpg', ls_)
cv.imwrite('Direct_blending.jpg', real)
##
                        # cv2.imshow('win',img_cv2_bgr)
                        # cv2.waitKey(0)
                        # cv2.destroyAllWindows()
                    #     break
                    # break
                #print(boxes,scores)
                # augmented_whole_image_box[prefix_file_augment][0] += boxes
                # augmented_whole_image_box[prefix_file_augment][1] += scores
                #count += 1
                # boxes,scores = non_max_suppression_fast(np_boxes,0.5,np_scores)
                whole_img = np.zeros(whole_image_dim,dtype=np.uint8)
                [cv2.rectangle(whole_img,(xmin,ymin),(xmax,ymax),(0,255,0),3)for xmin,ymin,xmax,ymax in boxes]
                mask_img_path = os.path.join('/media/htic/NewVolume1/murali/mitosis/mitotic_count/test_masks_augmented',file_name) #augmented
                mask_img = cv2.imread(mask_img_path)
                #yellow_img = cv2.rotate(yellow_img,cv2.ROTATE_90_COUNTERCLOCKWISE)
                res_img = cv2.add(mask_img,whole_img)
                cv2.imwrite(os.path.join(detection_out_path,file_name) ,res_img)



            # print (augmented_whole_image_box.keys())
            # print (len(augmented_whole_image_box.keys()))
         
            # for each in augmented_whole_image_box.keys():
            #     whole_img = np.zeros(whole_image_dim,dtype=np.uint8)
            #     if not len(augmented_whole_image_box[each][0]):
            #         continue

                #np_boxes = np.array(augmented_whole_image_box[each][0])
                #np_scores = np.array(augmented_whole_image_box[each][1])
                # boxes = (augmented_whole_image_box[each][0])
Exemple #54
0
def mask(img, mask):
    return cv2.add(np.zeros_like(img), img, mask=mask)
def apply_tongue(frame=None,
                 face=None,
                 image_width=0,
                 image_height=0,
                 tongue_filter_image=''):
    apply_tongue_logger = logging.getLogger('selfie_with_filters.apply_nose')
    apply_tongue_logger.info("In the apply_nose method ...")

    # Now get the nose positions for each face
    if "Landmarks" in face:
        center_tongue = get_landmark_value(landmarks=face["Landmarks"],
                                           parameter_name="mouthDown",
                                           image_width=image_width,
                                           image_height=image_height)
        left_tongue = get_landmark_value(landmarks=face["Landmarks"],
                                         parameter_name="mouthLeft",
                                         image_width=image_width,
                                         image_height=image_height)
        right_tongue = get_landmark_value(landmarks=face["Landmarks"],
                                          parameter_name="mouthRight",
                                          image_width=image_width,
                                          image_height=image_height)
    else:
        apply_nose_logger.warning("No Landmarks found in face!")
        return None

    apply_tongue_logger.info("Retrieved Nose positions for face ...")

    tongue_width = int(
        hypot(left_tongue[0] - right_tongue[0],
              left_tongue[1] - right_tongue[1]) * 1.25)
    tongue_height = int(tongue_width * 0.75)

    # New nose position
    #top_left = (int(center_nose[0] - nose_width / 2),
    #                      int(center_nose[1] - nose_height / 2))
    top_left = (int(center_tongue[0] - tongue_width / 2),
                int(center_tongue[1] - tongue_height / 3))
    bottom_right = (int(center_tongue[0] + tongue_width / 2),
                    int(center_tongue[1] + tongue_height / 2))

    # 3. apply effects of pig nose

    tongue_image = cv2.imread(tongue_filter_image)
    rows, cols, _ = frame.shape
    tongue_mask = np.zeros((rows, cols), np.uint8)

    tongue_mask.fill(0)
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Adding the new nose
    tongue_animal = cv2.resize(tongue_image, (tongue_width, tongue_height))
    tongue_animal_gray = cv2.cvtColor(tongue_animal, cv2.COLOR_BGR2GRAY)
    _, tongue_mask = cv2.threshold(tongue_animal_gray, 25, 255,
                                   cv2.THRESH_BINARY_INV)

    tongue_area = frame[top_left[1]:top_left[1] + tongue_height,
                        top_left[0]:top_left[0] + tongue_width]
    tongue_area_no_tongue = cv2.bitwise_and(tongue_area,
                                            tongue_area,
                                            mask=tongue_mask)
    final_tongue = cv2.add(tongue_area_no_tongue, tongue_animal)

    frame[top_left[1]:top_left[1] + tongue_height,
          top_left[0]:top_left[0] + tongue_width] = final_tongue

    return frame
def apply_ear(frame=None,
              face=None,
              image_width=0,
              image_height=0,
              ear_direction="",
              ear_filter_image=''):
    apply_ear_logger = logging.getLogger('selfie_with_filters.apply_ear')
    apply_ear_logger.info("In the apply_ear method ...")

    # Now get the nose positions for each face
    if "Landmarks" in face:
        #center_tongue = get_landmark_value(landmarks=face["Landmarks"],
        #	parameter_name="mouthDown", image_width=image_width, image_height=image_height)
        #left_tongue = get_landmark_value(landmarks=face["Landmarks"],
        #	parameter_name="upperJawlineLeft", image_width=image_width, image_height=image_height)
        if ear_direction == "left":
            center_ear = get_landmark_value(landmarks=face["Landmarks"],
                                            parameter_name="upperJawlineLeft",
                                            image_width=image_width,
                                            image_height=image_height)
            # get the left eye dimensions
            left_eye_left = get_landmark_value(landmarks=face["Landmarks"],
                                               parameter_name="leftEyeLeft",
                                               image_width=image_width,
                                               image_height=image_height)
            left_eye_right = get_landmark_value(landmarks=face["Landmarks"],
                                                parameter_name="leftEyeRight",
                                                image_width=image_width,
                                                image_height=image_height)

        elif ear_direction == "right":
            center_ear = get_landmark_value(landmarks=face["Landmarks"],
                                            parameter_name="upperJawlineRight",
                                            image_width=image_width,
                                            image_height=image_height)
            # get the right eye dimensions
            right_eye_left = get_landmark_value(landmarks=face["Landmarks"],
                                                parameter_name="rightEyeLeft",
                                                image_width=image_width,
                                                image_height=image_height)
            right_eye_right = get_landmark_value(
                landmarks=face["Landmarks"],
                parameter_name="rightEyeRight",
                image_width=image_width,
                image_height=image_height)

        #right_tongue = get_landmark_value(landmarks=face["Landmarks"],
        #	parameter_name="mouthRight", image_width=image_width, image_height=image_height)
    else:
        apply_ear_logger.warning("No Landmarks found in face!")
        return None

    apply_ear_logger.info("Retrieved Ear positions for face ...")

    #tongue_width = int(hypot(left_tongue[0] - right_tongue[0],
    #                   left_tongue[1] - right_tongue[1]) * 1.25)
    # reko doesn't give any info. on ear dimensions, so
    # lets use the width of the eyes as a proxy for face scale
    if ear_direction == "left":
        ear_width = int(
            hypot(left_eye_left[0] - left_eye_right[0],
                  left_eye_left[1] - left_eye_right[1]) * 2.0)
    elif ear_direction == "right":
        ear_width = int(
            hypot(right_eye_left[0] - right_eye_right[0],
                  right_eye_left[1] - right_eye_right[1]) * 2.0)

    ear_height = int(ear_width * 1.25)

    # New nose position
    #top_left = (int(center_nose[0] - nose_width / 2),
    #                      int(center_nose[1] - nose_height / 2))
    if ear_direction == "left":
        top_left = (int(center_ear[0] - ear_width / 1.25),
                    int(center_ear[1] - ear_height / 0.75))
        bottom_right = (int(center_ear[0] + ear_width / 2),
                        int(center_ear[1] + ear_height / 2))
    elif ear_direction == "right":
        top_left = (int(center_ear[0]), int(center_ear[1] - ear_height / 0.75))
        bottom_right = (int(center_ear[0] + ear_width / 1),
                        int(center_ear[1] + ear_height / 1))

    # 3. apply effects of pig nose

    ear_image = cv2.imread(ear_filter_image)
    rows, cols, _ = frame.shape
    ear_mask = np.zeros((rows, cols), np.uint8)

    ear_mask.fill(0)
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Adding the new nose
    ear_animal = cv2.resize(ear_image, (ear_width, ear_height))
    ear_animal_gray = cv2.cvtColor(ear_animal, cv2.COLOR_BGR2GRAY)
    _, ear_mask = cv2.threshold(ear_animal_gray, 25, 255,
                                cv2.THRESH_BINARY_INV)

    ear_area = frame[top_left[1]:top_left[1] + ear_height,
                     top_left[0]:top_left[0] + ear_width]
    ear_area_no_ear = cv2.bitwise_and(ear_area, ear_area, mask=ear_mask)
    final_ear = cv2.add(ear_area_no_ear, ear_animal)

    frame[top_left[1]:top_left[1] + ear_height,
          top_left[0]:top_left[0] + ear_width] = final_ear

    return frame
Exemple #57
0
common.p_histogram(prior.flatten())

for ix in range(400, 500):
    past = np.zeros_like(video_masks_large[ix], dtype=np.float32)
    future = np.zeros_like(video_masks_large[ix], dtype=np.float32)
    past += 1.0 * video_masks_large[ix - 3] + 3.0 * video_masks_large[
        ix - 2] + 6.0 * video_masks_large[ix - 1]
    future += 6.0 * video_masks_large[ix + 1] + 3.0 * video_masks_large[
        ix + 2] + 1.0 * video_masks_large[ix + 3]
    probability = prior * (past + future + 0.05)
    #probability = cv2.GaussianBlur(probability, (25, 25), 0)
    common.p_heat(np.hstack([past, probability, future]))

    #ix = 100
    to_print = np.copy(video_frames[ix])
    to_print[:, :, 0] = cv2.add(to_print[:, :, 0], video_masks_large[ix])
    to_print[:, :, 1] = cv2.add(to_print[:, :, 1], video_masks_large[ix])
    to_print[:, :, 2] = cv2.add(to_print[:, :, 2], video_masks_large[ix])
    common.p_bgr(to_print)

past = np.zeros_like(video_masks_small[ix], dtype=np.float32)
future = np.zeros_like(video_masks_small[ix], dtype=np.float32)
past += 1.0 * video_masks_small[ix - 3] + 3.0 * video_masks_small[
    ix - 2] + 6.0 * video_masks_small[ix - 1]
future += 6.0 * video_masks_small[ix + 1] + 3.0 * video_masks_small[
    ix + 2] + 1.0 * video_masks_small[ix + 3]
probability = prior * (past + future)
#probability = cv2.GaussianBlur(probability, (25, 25), 0)
common.p_heat(np.hstack([past, probability, future]))

common.p_bgr(video_frames[ix])
Exemple #58
0
def main():
    #input
    np.set_printoptions(
        formatter={'float_kind': lambda x: "{0:0.3f}".format(x)})
    img_cover = cv2.imread(COVER_IMG_PATH, cv2.IMREAD_GRAYSCALE)
    img_desk = cv2.imread(DESK_IMG_PATH, cv2.IMREAD_GRAYSCALE)
    hp_cover = cv2.imread(HP_IMG_PATH, cv2.IMREAD_GRAYSCALE)
    img_dia1 = cv2.imread(DIAMOND_HEAD_1, cv2.IMREAD_GRAYSCALE)
    img_dia2 = cv2.imread(DIAMOND_HEAD_2, cv2.IMREAD_GRAYSCALE)

    #2-1
    orb = cv2.ORB_create()
    kp_cover = orb.detect(img_cover, None)
    kp_cover, des_cover = orb.compute(img_cover, kp_cover)
    kp_desk = orb.detect(img_desk, None)
    kp_desk, des_desk = orb.compute(img_desk, kp_desk)

    np_best_match = BFMatcher(des_cover, des_desk)
    matches = []
    #2-1
    for i in range(len(np_best_match)):
        matches.append(
            cv2.DMatch(np_best_match[i]['toKP'], np_best_match[i]['fromKP'],
                       0))
    res = cv2.drawMatches(img_desk,
                          kp_desk,
                          img_cover,
                          kp_cover,
                          matches[:10],
                          None,
                          flags=2)
    cv2.imshow('Feature Matching', res)
    cv2.imwrite('2_1_Feature Matching.png', res)
    cv2.waitKey()
    cv2.destroyAllWindows()

    #2-2
    #srcP=cover / destP = desk
    matches_normal = 15
    srcP = np.empty(shape=(matches_normal, 2), dtype=float)
    destP = np.empty(shape=(matches_normal, 2), dtype=float)
    for i in range(matches_normal):
        srcP[i][0] = kp_cover[matches[i].trainIdx].pt[0]
        srcP[i][1] = kp_cover[matches[i].trainIdx].pt[1]
        destP[i][0] = kp_desk[matches[i].queryIdx].pt[0]
        destP[i][1] = kp_desk[matches[i].queryIdx].pt[1]

    #normalize and get transformation matrix
    H_normal = compute_homography(srcP, destP)
    # #모서리들 보내기
    # height, width = img_cover.shape
    dst_normal = cv2.warpPerspective(img_cover, H_normal,
                                     (img_desk.shape[1], img_desk.shape[0]))

    roi = np.copy(img_desk)
    img2gray = dst_normal
    ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    img1_fg = cv2.bitwise_and(dst_normal, dst_normal, mask=mask)
    img2_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)

    dst_normal = cv2.add(img1_fg, img2_bg)

    cv2.imshow("2_2_H_dst_normal", dst_normal)
    cv2.imwrite('2_2_H_normal.png', dst_normal)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    #2-3
    #srcP=cover / destP = desk
    matches_ransac = 25
    srcP = np.empty(shape=(matches_ransac, 2), dtype=float)
    destP = np.empty(shape=(matches_ransac, 2), dtype=float)
    for i in range(matches_ransac):
        srcP[i][0] = kp_cover[matches[i].trainIdx].pt[0]
        srcP[i][1] = kp_cover[matches[i].trainIdx].pt[1]
        destP[i][0] = kp_desk[matches[i].queryIdx].pt[0]
        destP[i][1] = kp_desk[matches[i].queryIdx].pt[1]
    H_ransac = compute_homography_ransac(srcP, destP, th=4)
    roi = np.copy(img_desk)
    dst_ransac = cv2.warpPerspective(img_cover, H_ransac,
                                     (img_desk.shape[1], img_desk.shape[0]))
    img2gray = dst_ransac
    ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    img1_fg = cv2.bitwise_and(dst_ransac, dst_ransac, mask=mask)
    img2_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)

    dst_ransac = cv2.add(img1_fg, img2_bg)
    cv2.imshow('2_3_H_ransac', dst_ransac)
    cv2.imwrite('2_3_H_ransac.png', dst_ransac)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    hp_cover = cv2.resize(hp_cover, (img_cover.shape[1], img_cover.shape[0]),
                          interpolation=cv2.INTER_AREA)
    roi = np.copy(img_desk)
    dst_ransac = cv2.warpPerspective(hp_cover, H_ransac,
                                     (img_desk.shape[1], img_desk.shape[0]))
    img2gray = dst_ransac
    ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    img1_fg = cv2.bitwise_and(dst_ransac, dst_ransac, mask=mask)
    img2_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
    dst_ransac = cv2.add(img1_fg, img2_bg)

    cv2.imshow('2_4_H_ransac', dst_ransac)
    cv2.imwrite('2_4_H_ransac.png', dst_ransac)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    #2-5
    orb = cv2.ORB_create()
    kp_dia1 = orb.detect(img_dia1, None)
    kp_dia1, des_dia1 = orb.compute(img_dia1, kp_dia1)
    kp_dia2 = orb.detect(img_dia2, None)
    kp_dia2, des_dia2 = orb.compute(img_dia2, kp_dia2)
    np_best_match_dia = BFMatcher(des_dia2, des_dia1)
    matches = []
    for i in range(len(np_best_match_dia)):
        matches.append(
            cv2.DMatch(np_best_match_dia[i]['toKP'],
                       np_best_match_dia[i]['fromKP'], 0))

    matches_ransac = 35
    srcP = np.empty(shape=(matches_ransac, 2), dtype=float)
    destP = np.empty(shape=(matches_ransac, 2), dtype=float)
    for i in range(matches_ransac):
        srcP[i][0] = kp_dia2[matches[i].trainIdx].pt[0]
        srcP[i][1] = kp_dia2[matches[i].trainIdx].pt[1]
        destP[i][0] = kp_dia1[matches[i].queryIdx].pt[0]
        destP[i][1] = kp_dia1[matches[i].queryIdx].pt[1]

    H_ransac = compute_homography_ransac(srcP, destP, th=4)
    img_dia1_bigger = np.copy(img_dia1)
    img_dia1_bigger = np.concatenate(
        (img_dia1_bigger, np.zeros(shape=img_dia1.shape, dtype=np.uint8)),
        axis=1)
    img_dia1_bigger = np.concatenate(
        (img_dia1_bigger,
         np.zeros(shape=(img_dia1.shape[0], img_dia1.shape[1] * 2),
                  dtype=np.uint8)),
        axis=0)

    dst_ransac = cv2.warpPerspective(
        img_dia2, H_ransac, (img_dia1.shape[1] * 2, img_dia2.shape[0] * 2))
    B = np.copy(dst_ransac)
    roi = np.copy(dst_ransac)
    img2gray = img_dia1_bigger
    ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)
    img1_fg = cv2.bitwise_and(img_dia1_bigger, img_dia1_bigger, mask=mask)
    img2_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
    # #
    img_dia1_bigger = cv2.add(img1_fg, img2_bg)

    # final_ransac=np.copy(img_dia1_bigger[:img_dia1.shape[0],:img_dia1.shape[1]])
    dia_size = img_dia1.shape
    for i in range(dia_size[1] * 2):
        if np.sum(img_dia1_bigger[dia_size[0] - 1, i:i + 15]) == 0:
            img_edge = i
            break
    dia_stitched = np.copy(img_dia1_bigger[:dia_size[0], :img_edge])
    cv2.imshow('2_5_a_H_ransac', dia_stitched)
    cv2.imwrite('2_5_a_H_ransac.png', dia_stitched)
    cv2.waitKey()
    cv2.destroyAllWindows()

    length = 100
    for i in range(length):
        rate = float(i / length)
        rate2 = 1 - rate
        dia_stitched[:, dia_size[1] - 100 +
                     i] = rate * img_dia1[:, dia_size[1] - 100 +
                                          i] + rate2 * B[:dia_size[0],
                                                         dia_size[1] - 100 + i]
    cv2.imshow('2_5_b_gradation', dia_stitched)
    cv2.imwrite('2_5_b_gradation.png', dia_stitched)
    cv2.waitKey()
    cv2.destroyAllWindows()
                      qualityLevel=0.7,
                      minDistance=10,
                      blockSize=7)
lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT))
color = np.random.randint(0, 255, (100, 3))
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
mask = np.zeros_like(old_frame)
while True:
    ret, frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None)
    good_new = p1[st == 1]
    good_old = p0[st == 1]
    for i, (new, old) in enumerate(zip(good_new, good_old)):
        a, b = new.ravel()
        c, d = old.ravel()
        mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
        frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
    img = cv2.add(frame, mask)
    cv2.imshow('frame', img)
    k = cv2.waitKey(30) & 0xff
    if k == 27:
        break
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1, 1, 2)
cv2.destroyAllWindows()