Beispiel #1
0
    def find_match(node, img):
        # for root node
        if node.parent is None:
            for k, v in node.iteritems():
                res = find_match(v, img)
                if res is not None:
                    return res
            return node

        # find in this node
        if node.tmpl is not None:
            s_bgr = cv2.split(node.tmpl) # Blue Green Red
            i_bgr = cv2.split(img)
            weight = (0.3, 0.3, 0.4)
            resbgr = [0, 0, 0]
            for i in range(3): # bgr
                resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
            match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]

            # match = cv2.matchTemplate(img, node.tmpl, cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
            # found!
            if max_val > 0.7:
                x, y = max_loc
                h, w = node.tmpl.shape[:2]
                cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
                # find in children
                for k, v in node.iteritems():
                    res = find_match(v, img)
                    if res is not None: 
                        return res
                return node
Beispiel #2
0
def find_match(img, tmpl, rect=None, mask=None):
    if rect is not None:
        h, w = img.shape[:2]
        x, y, x1, y1 = rect
        if x1 > w or y1 > h:
            return 0, None
        img = img[y:y1, x:x1, :]

        if mask is not None:
            img = img.copy()
            img[mask!=0] = 0
            tmpl = tmpl.copy()
            tmpl[mask!=0] = 0

    s_bgr = cv2.split(tmpl) # Blue Green Red
    i_bgr = cv2.split(img)

    weight = (0.3, 0.3, 0.4)
    resbgr = [0, 0, 0]
    for i in range(3): # bgr
        resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
    match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
    confidence = max_val
    x, y = max_loc
    h, w = tmpl.shape[:2]
    if rect is None:
        rect = (x, y, x+w, y+h)
    # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
    # cv2.imshow('test', img)
    # cv2.waitKey(20)
    return confidence, rect
Beispiel #3
0
def get_diffs( file_img1, file_img2, min_pts ):
    # CHECK: file existence
    if not (os.path.isfile(file_img1) and os.path.isfile(file_img2)):
        return -1

    # read in images
    img1 = cv2.imread( file_img1 )
    img2 = cv2.imread( file_img2 )

    # CHECK: img size equality
    if img1.shape != img2.shape:
        return -2

    # get threshold of the difference between the images
    ret, im_diff = cv2.threshold( (img1 - img2), 0,255, cv2.THRESH_BINARY)

    # collapse image into single channel
    im_diff = cv2.add(cv2.split(im_diff)[0],cv2.split(im_diff)[1],cv2.split(im_diff)[2])

    # find groups of contiguous points where images are different
    cont, _ = cv2.findContours( im_diff, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE )

    # put all contours with more than min_pts points into JSON 
    d = {'diff':[]}
    for c in cont:
        if len(c) > min_pts:
            momnt = cv2.moments(c)
            d['diff'].append([momnt['m10']/momnt['m00'],momnt['m01']/momnt['m00']])

    # dump JSON to stdout
    json.dump(d,sys.stdout)
    def foregroundMask(color, background):
        b = cv2.split(background)
        im = cv2.split(color)
        mask = None
        out = None
        # For each channel (B-G-R) in the image:
        for i in range(len(b)):
            # Channel c gets a median blur of same size as the kernel
            # used in the preprocessed background
            c = cv2.medianBlur(im[i], 5)
            # Gets difference between background and image (chanel-wise)
            imgs = cv2.absdiff(im[i], b[i])
            # Run a Binary OTSU threshold to get what is background and what is
            # not based on the absolute difference.
            (thresh, im_bw) = cv2.threshold(imgs, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

            # Close the Thresholded image to remove
            # small noises in the threshold
            # Reopen the Threshold
            im_bw = cv2.morphologyEx(im_bw, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
            im_bw = cv2.morphologyEx(im_bw, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
            # Merge Channel Masks
            if mask is None:
                # If it's the first channel being processed
                mask = im_bw
            else:
                # Else, merge channels by running an OR
                # (wherever it's above the threshold, it's an object)
                mask = cv2.bitwise_or(mask, im_bw)
        return mask
def computeColourPSNR(image1,image2):
    lumaImage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2YCR_CB)
    lumaImage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2YCR_CB)
    cv2.imwrite("luma1.jpg",lumaImage1)
    Y1, Cr1, Cb1 = cv2.split(lumaImage1)
    Y2, Cr2, Cb2 = cv2.split(lumaImage2)
    return computePSNR(Y1,Y2)    
	def visualize(self, input_img):
		in_shape = input_img.shape
		if len(in_shape) == 2:
			in_channels = 1
		elif len(in_shape) == 3:
			in_channels = 3
			 
		if in_channels != self.nb_input_features:
			print('No. of channels do not match')
			sys.exit(-1) 	 
		if in_shape[0] != self.img_width or in_shape[1] != self.img_height:
			print('Image Dimensions not matching to network')
			input_img = cv2.resize(input_img, (self.img_width, self.img_height))
		out_imgs=[]	
		# if self.nb_input_features == 1:
		# 	for i in self.filters:
		# 		out_imgs.append(cv2.filter2D(input_img, -1, i[:,:,0]))
		# if self.nb_input_features == 3:
		for i in self.filters:
			#i = i/i.sum()
			channels = cv2.split(input_img)
			#channels = [i/i.sum() for i in channels]
			filters_ch = cv2.split(i)
			out = cv2.merge([ cv2.filter2D(l,-1,m) for l,m in zip(channels, filters_ch) ])
			out_imgs.append(out)
		return out_imgs
Beispiel #7
0
    def process(self, mat):
        start_time = time.time()

        self.results = shm.recovery_vision.get()
        self.fill_single_camera_direction(self.results)

        self.post('original', mat)

        self.lab_l, self.lab_a, self.lab_b = cv2.split(cv2.cvtColor(mat, cv2.COLOR_BGR2LAB))
        self.post('lab a', self.lab_a)
        self.post('lab b', self.lab_b)

        self.luv_l, self.luv_u, self.luv_v = cv2.split(cv2.cvtColor(mat, cv2.COLOR_BGR2LUV))
        self.post('luv v', self.luv_v)

        # table_mask = self.table(mat)
        table_mask = np.zeros(mat.shape[:2], np.uint8)
        mark_mask = self.marks(mat)
        tower_mask = cv2.bitwise_not(cv2.bitwise_or(table_mask, mark_mask))
        self.tower(mat, tower_mask)

        shm.recovery_vision.set(self.results)

        runtime = time.time() - start_time
        min_runtime = 1 / self.options['max fps']
        if min_runtime > runtime:
            time.sleep(min_runtime - runtime)
            runtime = min_runtime
        print('FPS: {}'.format(1 / (runtime)))
def HistEqual(imgBGR):
    # hist euql in BGR
    b,g,r = cv2.split(imgBGR)
    b = cv2.equalizeHist(b)
    g = cv2.equalizeHist(g)
    r = cv2.equalizeHist(r)
    imgBGRHist = cv2.merge((b,g,r))

    # hist euql in HSV
    imgHSV = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2HSV)
    h,s,v = cv2.split(imgHSV)
##    h = cv2.equalizeHist(h)
##    s = cv2.equalizeHist(s)
    v = cv2.equalizeHist(v)
    imgHSV = cv2.cvtColor(cv2.merge((h,s,v)), cv2.COLOR_HSV2BGR)

    # hist euql in LAB
    imgLAB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2LAB)
    l,a,b = cv2.split(imgLAB)
    l = cv2.equalizeHist(l)
##    a = cv2.equalizeHist(a)
##    b = cv2.equalizeHist(b)
    imgLAB = cv2.cvtColor(cv2.merge((l,a,b)), cv2.COLOR_LAB2BGR)

    # normalization
##    imgBGRL1 = imgBGR.copy().astype(np.float)
##    imgBGRL2 = imgBGR.copy().astype(np.float)
##    imgBGRINF = imgBGR.copy().astype(np.float)
##    cv2.normalize(imgBGRL1, imgBGRL1, 255, 0, cv2.NORM_L1)
##    cv2.normalize(imgBGRL2, imgBGRL2, 255, 0, cv2.NORM_L2)
##    cv2.normalize(imgBGRINF, imgBGRINF, 255, 0, cv2.NORM_INF)

    return imgBGRHist, imgHSV, imgLAB
def init_back():
  global green_mask
  green_mask = cv2.imread("green_mask.jpg")
  green_mask = cv2.cvtColor(green_mask,cv2.COLOR_BGR2GRAY)
  ret, green_mask = cv2.threshold(green_mask, 40, 255, cv2.THRESH_BINARY)
  
  for f in os.listdir('learn_1'):
    if f.endswith('jpg'):
      frame = cv2.imread('learn_1/'+f)
      hsv = cv2.split(cv2.cvtColor(frame,cv2.COLOR_BGR2HSV))
      diff_h.acc_back(hsv[0])
      
      gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
      diff_g.acc_back(gray)
      
  diff_h.create_models_from_stats()
  diff_g.create_models_from_stats()

  for f in os.listdir('learn_2'):
    if f.endswith('jpg'):
      frame = cv2.imread('learn_2/'+f)
      hsv = cv2.split(cv2.cvtColor(frame,cv2.COLOR_BGR2HSV))
      diff_dark_h.acc_back(hsv[0])
      
      gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
      diff_dark_g.acc_back(gray)
      
  diff_dark_h.create_models_from_stats()
  diff_dark_g.create_models_from_stats()
Beispiel #10
0
def blend(foregroundSrc, backgroundSrc, dst, alphaMask):
    
    # Calculate the normalized alpha mask.
    maxAlpha = numpy.iinfo(alphaMask.dtype).max
    normalizedAlphaMask = (1.0 / maxAlpha) * alphaMask
    
    # Calculate the normalized inverse alpha mask.
    normalizedInverseAlphaMask = \
        numpy.ones_like(normalizedAlphaMask)
    normalizedInverseAlphaMask[:] = \
        normalizedInverseAlphaMask - normalizedAlphaMask
    
    # Split the channels from the sources.
    foregroundChannels = cv2.split(foregroundSrc)
    backgroundChannels = cv2.split(backgroundSrc)
    
    # Blend each channel.
    numChannels = len(foregroundChannels)
    i = 0
    while i < numChannels:
        backgroundChannels[i][:] = \
            normalizedAlphaMask * foregroundChannels[i] + \
            normalizedInverseAlphaMask * backgroundChannels[i]
        i += 1
    
    # Merge the blended channels into the destination.
    cv2.merge(backgroundChannels, dst)
  def isCamFlash(self,origin_im):
      grayim = cv2.cvtColor(origin_im, cv2.COLOR_BGR2GRAY)
      if time.time() - self.Rtime >= 3 or not self.camFLAG:
          self.Rimg = origin_im
          self.Rtime = time.time()
          self.camFLAG = 1

      blurred = cv2.GaussianBlur(grayim, (25, 25), 0)
      ret, tr = cv2.threshold(blurred, 230, 255, 0)
      (cnts, _) = cv2.findContours(tr.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
      for c in cnts:
          if cv2.contourArea(c) > 250:
              x, y, w, h = cv2.boundingRect(c)

              ref = self.Rimg[y:y + h, x:x + w]
              rhsv = cv2.cvtColor(ref, cv2.COLOR_BGR2HSV)
              rh, rs, rv = cv2.split(rhsv)
              size = np.size(rhsv)

              cur = origin_im[y:y + h, x:x + w]
              chsv = cv2.cvtColor(cur, cv2.COLOR_BGR2HSV)
              ch, cs, cv = cv2.split(chsv)
              diff = ((float(np.sum(cv)) / size)) - (float(np.sum(rv)) / size)
              if float(np.sum(cv)) >= 60 and diff > 30:
                  return 1
      return 0
def Harris_match(img1, img2):
    """算法主程序"""
    # 下面执行一点颜色空间的转换
    r, g, b = cv2.split(img1)
    img1 = cv2.merge((b, g, r))
    r, g, b = cv2.split(img2)
    img2 = cv2.merge((b, g, r))
    G_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    G_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    # 计算并描述角点
    harrisim1 = Harris.Compute_Harris_Response(G_img1)
    filtered_coords1 = numpy.array(Harris.Get_Harris_Points(harrisim1), dtype=int)
    patches1 = Harris.get_descriptors(G_img1, filtered_coords1)

    harrisim2 = Harris.Compute_Harris_Response(G_img2)
    filtered_coords2 = numpy.array(Harris.Get_Harris_Points(harrisim2), dtype=int)
    patches2 = Harris.get_descriptors(G_img2, filtered_coords2)

    # Harris.Plot_harris_points(img1, filtered_coords1)
    # Harris.Plot_harris_points(img2, filtered_coords2)
    matches = Harris.match_twosided(patches1, patches2)

    plt.figure()
    plt.gray()
    Harris.plot_matches(img1, img2, filtered_coords1, filtered_coords2, matches, show_below=False)
    plt.show()
Beispiel #13
0
def process_frame(frame, thresh, param1, param2, min_radius, max_radius, 
			min_distance, gauss_kernel_size, dilate_kernel_size, detection_threshold):
	#obtaining red channel - 3rd element of tuple returned by split() function:
	red = cv2.split(frame)[2]
	#converting BGR image to HSV scale and obtaining Saturation channel:
	saturation = cv2.split(cv2.cvtColor(frame, cv2.COLOR_BGR2HSV))[1] 
	#thresholding with chosen method:
	saturation = cv2.threshold(saturation,thresh,255,cv2.THRESH_BINARY)[1] 
	#gaussian blur for better yolk detection:
	red = cv2.GaussianBlur(red, (gauss_kernel_size,gauss_kernel_size), 0) 
	#detecting yolks with Hough Circles Transform:
	circles = cv2.HoughCircles(red,cv2.HOUGH_GRADIENT,1,min_distance,param1, 
							   param2,minRadius,maxRadius)
	#if circles are found, they are exlcuded from saturation channel:
	if isinstance(circles, np.ndarray): 
		circles = np.uint16(np.around(circles))
		for i in circles[0,:]:
			#rectangle with offset excluded to delete yellow light reflected from egg yolk:
			cv2.rectangle(saturation,(i[0]-i[2]-offset,0),(i[0]+i[2]+offset,480),0,-2)
	#dilatation and erosion used as image opening to reduce noise:
	saturation = cv2.morphologyEx(saturation, cv2.MORPH_OPEN, 
			cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(dilate_kernel_size,dilate_kernel_size)))
	#summing the remaining egg yolk:
	sum = int(np.sum(saturation)/255) 
	#decision whether or not batch is acceptable:
	return 0 if sum > detection_threshold else 1 
Beispiel #14
0
def calibrateVal(handFolder):
    print handFolder
    fnames = os.listdir(handFolder)
    fnames.sort()
    fnames = [f for f in fnames if f.find('image_') >= 0]
    n = len(fnames)/2
    i = 0
    while (i < n):
        depPath = os.path.join(folder, 'image_'+str(i)+'_dep.png')
        imgPath = os.path.join(folder, 'image_'+str(i)+'_rgb.png')

        image = cv2.imread(imgPath)
        depth = cv2.imread(depPath, -1)

        shp = (image.shape[0], image.shape[1])

        hue = np.zeros(shp, dtype='uint8')
        sat = np.zeros(shp, dtype='uint8')
        val = np.zeros(shp, dtype='uint8')

        hands = np.zeros(shp, dtype='uint8')

        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        cv2.split(hsv, (hue, sat, val))

        print cv2.mean(val)[0]
        i = i + 1;
def make_fg_bg_hist_plot(fg, bg):
    # make a plot comparing color histograms of foreground to background
    f, axarr = plt.subplots(2, 2)
    b, g, r, a = cv2.split(fg)
    bData = np.extract(a>0, b)
    gData = np.extract(a>0, g)
    rData = np.extract(a>0, r)
    axarr[0,0].set_title("Foreground")
    axarr[0,0].set_ylabel("Normalized # of pixels")
    for chan, col in zip([rData, gData, bData], ['red', 'green', 'blue']):
        hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
        hist /= hist.sum() # normalize to compare images of different sizes
        axarr[0,0].plot(hist, color = col)
        axarr[0,0].set_xlim([0, 256])

    b, g, r, a = cv2.split(bg)
    bData = np.extract(a>0, b)
    gData = np.extract(a>0, g)
    rData = np.extract(a>0, r)
    axarr[0,1].set_title("Background")
    for chan, col in zip([rData, gData, bData], ['red', 'green', 'blue']):
        hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
        hist /= hist.sum() # normalize to compare images of different sizes
        axarr[0,1].plot(hist, color = col)
        axarr[0,1].set_xlim([0, 256])
    axarr[1,0].imshow(cv2.cvtColor(fg, cv2.COLOR_BGRA2RGBA))
    axarr[1,1].imshow(cv2.cvtColor(bg, cv2.COLOR_BGRA2RGBA))
    plt.show()
Beispiel #16
0
def ORBit(local_colour_camera_image, local_colour_training_image):
	# Convert Original Image of Scene into Grayscale
	b,g,r = cv2.split(local_colour_camera_image)
	local_colour_camera_image = cv2.merge([r,g,b])
	local_camera_image = cv2.cvtColor(local_colour_camera_image, cv2.COLOR_RGB2GRAY)

	# Convert Training Image into Grayscale
	b,g,r = cv2.split(local_colour_training_image)
	local_colour_training_image = cv2.merge([r,g,b])
	local_training_image = cv2.cvtColor(local_colour_training_image, cv2.COLOR_RGB2GRAY)

	# Initiate ORB detector
	orb = cv2.ORB(100,1.2)

	# Find the keypoints and descriptors with ORB
	kp1, des1 = orb.detectAndCompute(local_training_image,None)
	kp2, des2 = orb.detectAndCompute(local_camera_image, None) #c1:c2,r1:r2]

	# create BFMatcher object
	bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)

	# Match descriptors.
	matches = bf.match(des1,des2)

	# Sort them in the order of their distance.
	matches = sorted(matches, key = lambda x:x.distance)

	# Draw first 30 matches.
	out = drawMatches(local_colour_training_image,kp1,local_colour_camera_image,kp2,matches[:30])
	return out
def find_marker(image, red_thres, green_thres, sat_thres):

    # h,w, channels = img.shape

    # get red and sat
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    blue, green, red = cv2.split(image)
    hue, sat, val = cv2.split(hsv)

    # find the marker by looking for red, with high saturation
    sat = cv2.inRange(sat, np.array((sat_thres[0])), np.array((sat_thres[1])))
    red = cv2.inRange(red, np.array((red_thres[0])), np.array((red_thres[1])))
    green = cv2.inRange(green, np.array((green_thres[0])), np.array((green_thres[1])))
    # AND the two thresholds, finding the car
    car = cv2.multiply(red, sat)
    car = cv2.multiply(car, green)

    # remove noise (not doing it now because the POIs are very small)
    # elem = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
    # car = cv2.erode(car,elem, iterations=1)
    # car = cv2.dilate(car,elem, iterations=3)
    # return cv2.boundingRect(car)

    img, contours, hierarchy = cv2.findContours(car.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # import ipdb; ipdb.set_trace()

    # return cv2.boundingRect(contours[1])
    return map(lambda x: cv2.boundingRect(x), contours)
Beispiel #18
0
def process(imgroi):
    global img

    cv2.imshow("roi",imgroi)
    imgB, imgG, imgR = cv2.split(imgroi) 
    cv2.cvtColor(imgroi, cv2.COLOR_RGB2HSV);
    imgHSV = cv2.cvtColor(imgroi, cv2.COLOR_RGB2HSV)
    imgH, imgS, imgV = cv2.split(imgHSV) 
    filteredS = filter_high(imgS,gl_S)
    filteredV = filter_high(imgV,gl_V)
    filteredG = filter_high(imgG,gl_R)
    filteredR = filter_high(imgR,gl_G)
    filteredB = filter_high(imgB,gl_B)
    res = cv2.bitwise_and(filteredS,filteredV)
    res = cv2.bitwise_and(res,filteredG)
    res = cv2.bitwise_and(res,filteredR)
    res = cv2.bitwise_and(res,filteredB)
    res = cv2.blur(res,(5,5))
    res1 = res.copy()
    contours, hierarchy = cv2.findContours(res,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
    show_contours(contours)
    
    cv2.imshow("ori",img)
    cv2.imshow("res",res)
    cv2.imshow("res1",res1)
Beispiel #19
0
def changeColor(i, indices, pixel, labels, color):
	global imageOnCanvas
	global canvas
	global img
	img = np.zeros((80,80,3), np.uint8)
	img[:,:] = color
	img = Image.fromarray(img)
	img = ImageTk.PhotoImage(img)
	labels[i].configure(image = img)
	labels[i].image = img

	img = cv2.imread('curr_img.jpg')
	height, width, channel = img.shape

	for j in range(len(coordinates[indices[i]])):
		img[coordinates[indices[i]][j][1],coordinates[indices[i]][j][0]] = color

	if(i != (len(indices)-1)):
		img_old = cv2.imread('curr_img.jpg')
		hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
		h, s, v = cv2.split(hsv)
		hsv2 = cv2.cvtColor(img_old, cv2.COLOR_BGR2HSV)
		h1, s1, v1 = cv2.split(hsv2)
		final_hsv = cv2.merge((h, s, v1))
		img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)

	cv2.imwrite('curr_img.jpg', img)

	img = Image.fromarray(img)
	img = ImageTk.PhotoImage(img)

	canvas.delete(imageOnCanvas)
	canvas.create_image(0, 0, image=img, anchor="nw")
Beispiel #20
0
    def preprocess(self):
        self.raw_image = self.image

        self.image = self.image[5:-5,5:-5]
        channels = cv2.split(self.image)
        self.bg_colors = tuple(map(lambda x:np.average(np.hstack((x[:5], x[-5:]))), channels))
        self.image = self.image[5:-5,5:-5]

        gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 0, 350, apertureSize=5)
        self.canny = edges
        contours, h = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        rect = cv2.boundingRect(np.vstack(contours))
        self.rect = rect
        rect_height = rect[3]
        count = rect_height/100 + 1
        self.count = str(count)

        self.image = self.image[rect[1]:rect[1]+70, 13:-13]

        channels = cv2.split(self.image)
        fg_colors = tuple(map(np.average, channels))
        self.color_diff = tuple(map(lambda x,y:x-y, fg_colors, self.bg_colors))

        if max(self.color_diff) == self.color_diff[1]:
            self.color = 'g'
        elif max(self.color_diff) == self.color_diff[2]:
            self.color = 'r'
        else:
            self.color = 'p'
Beispiel #21
0
 def circles(self,cv_image):
     cv_image=cv2.resize(cv_image,dsize=(self.screen['width'],self.screen['height']))
     #if self.blur:
     #    cv_image=cv2.GaussianBlur(cv_image,ksize=[5,5],sigmaX=0)
     
     channels=cv2.split(cv_image)
     channels[0] = cv2.equalizeHist(channels[0])
     channels[1] = cv2.equalizeHist(channels[1])
     #channels[2] = cv2.equalizeHist(channels[2])
     img = cv2.merge(channels, cv_image)
     img=cv2.bilateralFilter(img, -1, 5, 0.1)
     kern = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
     img=cv2.morphologyEx(img, cv2.MORPH_CLOSE, kern)
     hsvImg=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
     luvImg=cv2.cvtColor(img,cv2.COLOR_BGR2LUV)
     gauss = cv2.GaussianBlur(luvImg, ksize=(5,5), sigmaX=10)
     sum = cv2.addWeighted(luvImg, 1.5, gauss, -0.6, 0)
     enhancedImg = cv2.medianBlur(sum, 3)
     ch=cv2.split(enhancedImg)
     mask = cv2.inRange(ch[2],self.highThresh[2],self.lowThresh[2])
     mask1=cv2.inRange(ch[1],self.highThresh[0],self.lowThresh[0])
     mask2=cv2.inRange(ch[2],self.highThresh[1],self.lowThresh[1])
     
    # cv2.imshow(mask)
     #cv2.imshow(mask1)
     #cv2.imshow(mask2)
     mask_out=cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)
     try:
         self.image_filter_pub.publish(self.bridge.cv2_to_imgmsg(mask_out, encoding="bgr8"))
     except CvBridgeError as e:
         rospy.logerr(e)
Beispiel #22
0
def hvv(img):
	hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
	yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
	h_, s_, v_ = cv2.split(hsv)
	y, u, v = cv2.split(yuv)
	hvv = cv2.merge((h_, v, v_))
	return hvv
def generateHistFromHLL(imgNames):
    hlLF = open(rootPath + '\\' + "hll_hist_unify.txt", "a")
    count = 0
    process = 0
    for imgName in imgNames:
        print str(process) + '/' + str(len(imgNames))
        process += 1
        point = getPoint(imgName)
        img = cv2.imread(imgName)
        if img is None:
            continue
        img = adjustSize(img)
        if img is None:
            continue
        b, g, r = cv2.split(img)
        if b.tolist() == g.tolist() and g.tolist() == r.tolist():
            print 'gray image'
            continue
        hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
        h, l, s = cv2.split(hls)
        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
        L, a, b = cv2.split(lab)
        height = img.shape[0]
        width = img.shape[1]
        hist = getHistogramFromLayer(h, 180, 180.0) + getHistogramFromLayer(l) + getHistogramFromLayer(L)
        hist = map(lambda x : round(float(x)/(height*width), 3), hist)
        if len(hist) == 0:
            print 'unexpected error!'
            continue
        write(process, hlLF, hist, point)
        count += 1
        if count > 200:
            hlLF.flush()
            count = 0
    hlLF.close()
Beispiel #24
0
def color_distort (image0):

  # save alpha channel separately
  assert is_bgra(image0), image0.shape
  b, g, r, a = cv2.split(image0)
  image0 = cv2.merge((b, g, r))

  hsv = cv2.cvtColor(image0, cv2.COLOR_BGR2HSV)

  def make_lut(a, n=256):
    '''create a lokup table that will saturate the image historgram,
       either into white (for a > 1), or to black (for 0 < a < 1) '''
    assert a > 0
    if a >= 1:
      lut = np.power(np.arange(n, dtype=float) / (n-1), a) * (n-1)
    else:
      lut = (n-1) - make_lut(1/a, n)[::-1]
    return lut.astype(np.uint8)

  # change hue pixelwise
  dhue = (np.random.rand() - 0.5) * COEF_HUE
  hsv[:,:,0] = np.mod(hsv[:,:,0].astype(int) + dhue, 255).astype(np.uint8)
  # change histogram of values
  dval = np.exp((np.random.rand() - 0.5) * COEF_INTENSITY)
  lut = make_lut(dval)
  hsv[:,:,2] = cv2.LUT(hsv[:,:,2], lut)

  image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

  # restore saved alpha channel
  b, g, r = cv2.split(image)
  image = cv2.merge((b, g, r, a))

  return image
Beispiel #25
0
def hist_similarity(image_1, image_2):
    """color hist based image similarity
    
    @param image_1: np.array(the first input image)
    @param image_2: np.array(the second input image)
    @return similarity: float(range from [0,1], the bigger the more similar)
    """
    if image_1.ndim == 2 and image_2.ndim == 2:
        hist_1 = cv2.calcHist([image_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([image_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    elif image_1.ndim == 3 and image_2.ndim == 3:
        """R,G,B split"""
        b_1, g_1, r_1 = cv2.split(image_1)
        b_2, g_2, r_2 = cv2.split(image_2)
        hist_b_1 = cv2.calcHist([b_1], [0], None, [256], [0.0, 255.0])
        hist_g_1 = cv2.calcHist([g_1], [0], None, [256], [0.0, 255.0])
        hist_r_1 = cv2.calcHist([r_1], [0], None, [256], [0.0, 255.0])
        hist_b_2 = cv2.calcHist([b_2], [0], None, [256], [0.0, 255.0])
        hist_g_2 = cv2.calcHist([g_2], [0], None, [256], [0.0, 255.0])
        hist_r_2 = cv2.calcHist([r_2], [0], None, [256], [0.0, 255.0])
        similarity_b = cv2.compareHist(hist_b_1,hist_b_2,cv2.cv.CV_COMP_CORREL)
        similarity_g = cv2.compareHist(hist_g_1,hist_g_2,cv2.cv.CV_COMP_CORREL)
        similarity_r = cv2.compareHist(hist_r_1,hist_r_2,cv2.cv.CV_COMP_CORREL)
        sum_bgr = similarity_b + similarity_g + similarity_r
        similarity = sum_bgr/3.
    else:
        gray_1 = cv2.cvtColor(image_1,cv2.cv.CV_RGB2GRAY)
        gray_2 = cv2.cvtColor(image_2,cv2.cv.CV_RGB2GRAY)
        hist_1 = cv2.calcHist([gray_1], [0], None, [256], [0.0, 255.0])
        hist_2 = cv2.calcHist([gray_2], [0], None, [256], [0.0, 255.0])
        similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
    return similarity
def CannyEdge(frame,color='all'):
	#http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_imgproc/py_canny/py_canny.html
	frame = cv2.medianBlur(frame,5)
	if color == 'all':
		b,g,r = cv2.split(frame)
		b= cv2.Canny(b,70,200)
		g= cv2.Canny(g,70,200)
		r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))
	if color == 'b':
		b,g,r = cv2.split(frame)
		b= cv2.Canny(b,70,200)
		#g= cv2.Canny(g,70,200)
		#r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))	
	if color == 'g':
		b,g,r = cv2.split(frame)
		#b= cv2.Canny(b,70,200)
		g= cv2.Canny(g,70,200)
		#r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))
	if color == 'r':
		b,g,r = cv2.split(frame)
		#b= cv2.Canny(b,70,200)
		#g= cv2.Canny(g,70,200)
		r= cv2.Canny(r,70,200)
		return cv2.merge((b,g,r))
Beispiel #27
0
def get_mask_YCbCr(im, mask_YCbCr):
    minH = 80;    maxH = 220 
    minS = 65;    maxS = 220 
    minV = 65;    maxV = 220 

    tmp = cv2.cvtColor(im, cv.CV_BGR2YCrCb) # RGB → YCrCb
    
    p_src = cv2.split(tmp) # 元画像RGBのimを。HVSの場合はtmpとする。修正HSVとする場合はimでよい。
    p_dst = cv2.split(tmp) # マスク用に変更する画像

    # 修正HSVではなく普通のHSVで使うやつ
    H = p_src[0]    # 0から180 
    S = p_src[1]
    V = p_src[2] 

    p_dst[0] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    p_dst[1] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    p_dst[2] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    
    mask_YCbCr[:,:,0] = p_dst[0]; mask_YCbCr[:,:,1] = p_dst[1]; mask_YCbCr[:,:,2] = p_dst[2];

    # 細かいノイズを取り除く
    element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
    cv2.dilate( np.uint8(mask_YCbCr), element )
    cv2.erode(np.uint8(mask_YCbCr), element)
 def detectMotion(self, minpx=40, maxpx=800,showimg=False):
     self.light(True)
     time.sleep(0.2)
     self.light(True)
     time.sleep(0.2)
     image1 = self.captureImage()
     self.dwell(800)
     image2 = self.captureImage()
     self.light(False)
     image1 = cv2.resize(image1, (1280, 960))
     h1, s1, v1 = cv2.split(cv2.cvtColor(image1, cv2.COLOR_BGR2HSV))
     image2 = cv2.resize(image2, (1280, 960))
     h2, s2, v2 = cv2.split(cv2.cvtColor(image2, cv2.COLOR_BGR2HSV))
     image = cv2.subtract(v1,v2)
     ret,gray = cv2.threshold(image,25,255,0)
     gray2 = gray.copy()
     gray2 = cv2.morphologyEx(gray2, cv2.MORPH_OPEN, np.ones((5,5),np.uint8))
     gray2 = cv2.Canny(gray2,30,100)
     gray2 = np.nonzero(gray2)
     gray2 = len(np.nonzero(gray2)[0])
     if showimg == True:
         cv2.imshow('image', gray2)
         cv2.waitKey(0)
     if minpx<gray2<maxpx:
         return True
     else:
         return False
def warmer(img, amount):

	if (amount < 0 or amount > 1):
		raise NameError('amount must be between 0 and 1')

	incr_ch_lut = create_LUT_8UC1([0, 64,      192,      256],
	                              [0, 64 + 40*amount, 192 + 45*amount, 256])
	decr_ch_lut = create_LUT_8UC1([0, 64,      192,      256],
                       	          [0, 64 - 52*amount, 192 - 85*amount, 192])

	img_rgb = cv2.imread("images/" + filename + ".jpg")
	 
	c_r, c_g, c_b = cv2.split(img_rgb)
	c_r = cv2.LUT(c_r, incr_ch_lut).astype(np.uint8)
	c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)
	img_rgb = cv2.merge((c_r, c_g, c_b))

	c_b = cv2.LUT(c_b, decr_ch_lut).astype(np.uint8)
	 

	c_h, c_s, c_v = cv2.split(cv2.cvtColor(img_rgb,
	    cv2.COLOR_RGB2HSV))
	c_s = cv2.LUT(c_s, incr_ch_lut).astype(np.uint8)
	 
	img_warmer = cv2.cvtColor(cv2.merge(
	                      (c_h, c_s, c_v)),
	                       cv2.COLOR_HSV2RGB)

	return img_warmer
Beispiel #30
0
def find_all_template(im_source, im_search, threshold=0.5, maxcnt=0, rgb=False, bgremove=False):
    '''
    Locate image position with cv2.templateFind

    Use pixel match to find pictures.

    Args:
        im_source(string): 图像、素材
        im_search(string): 需要查找的图片
        threshold: 阈值,当相识度小于该阈值的时候,就忽略掉

    Returns:
        A tuple of found [(point, score), ...]

    Raises:
        IOError: when file read error
    '''
    # method = cv2.TM_CCORR_NORMED
    # method = cv2.TM_SQDIFF_NORMED
    method = cv2.TM_CCOEFF_NORMED

    if rgb:
        s_bgr = cv2.split(im_search) # Blue Green Red
        i_bgr = cv2.split(im_source)
        weight = (0.3, 0.3, 0.4)
        resbgr = [0, 0, 0]
        for i in range(3): # bgr
            resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], method)
        res = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
    else:
        s_gray = cv2.cvtColor(im_search, cv2.COLOR_BGR2GRAY)
        i_gray = cv2.cvtColor(im_source, cv2.COLOR_BGR2GRAY)
        # 边界提取(来实现背景去除的功能)
        if bgremove:
            s_gray = cv2.Canny(s_gray, 100, 200)
            i_gray = cv2.Canny(i_gray, 100, 200)

        res = cv2.matchTemplate(i_gray, s_gray, method)
    w, h = im_search.shape[1], im_search.shape[0]

    result = []
    while True:
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        if DEBUG: 
            print 'templmatch_value(thresh:%.1f) = %.3f' %(threshold, max_val) # not show debug
        if max_val < threshold:
            break
        # calculator middle point
        middle_point = (top_left[0]+w/2, top_left[1]+h/2)
        result.append((middle_point, max_val))
        if maxcnt and len(result) >= maxcnt:
            break
        # floodfill the already found area
        cv2.floodFill(res, None, max_loc, (-1000,), max_val-threshold+0.1, 1, flags=cv2.FLOODFILL_FIXED_RANGE)
    return result
def reinhard_color_transfer(target, source, clip=False, preserve_paper=False, source_mask=None, target_mask=None):
	"""
	Transfers the color distribution from the source to the target
	image using the mean and standard deviations of the L*a*b*
	color space.

	This implementation is (loosely) based on to the "Color Transfer
	between Images" paper by Reinhard et al., 2001.

	Parameters:
	-------
	source: NumPy array
		OpenCV image in BGR color space (the source image)
	target: NumPy array
		OpenCV image in BGR color space (the target image)
	clip: Should components of L*a*b* image be scaled by np.clip before
		converting back to BGR color space?
		If False then components will be min-max scaled appropriately.
		Clipping will keep target image brightness truer to the input.
		Scaling will adjust image brightness to avoid washed out portions
		in the resulting color transfer that can be caused by clipping.
	preserve_paper: Should color transfer strictly follow methodology
		layed out in original paper? The method does not always produce
		aesthetically pleasing results.
		If False then L*a*b* components will scaled using the reciprocal of
		the scaling factor proposed in the paper.  This method seems to produce
		more consistently aesthetically pleasing results

	Returns:
	-------
	transfer: NumPy array
		OpenCV image (w, h, 3) NumPy array (uint8)
	"""


	# convert the images from the RGB to L*ab* color space, being
	# sure to utilizing the floating point data type (note: OpenCV
	# expects floats to be 32-bit, so use that instead of 64-bit)
	source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype(np.float32)
	target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype(np.float32)

	# compute color statistics for the source and target images
	src_input = source if source_mask is None else source*source_mask
	tgt_input = target if target_mask is None else target*target_mask
	(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = lab_image_stats(src_input)
	(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = lab_image_stats(tgt_input)

	# subtract the means from the target image
	(l, a, b) = cv2.split(target)
	l -= lMeanTar
	a -= aMeanTar
	b -= bMeanTar

	if preserve_paper:
		# scale by the standard deviations using paper proposed factor
		l = (lStdTar / lStdSrc) * l
		a = (aStdTar / aStdSrc) * a
		b = (bStdTar / bStdSrc) * b
	else:
		# scale by the standard deviations using reciprocal of paper proposed factor
		l = (lStdSrc / lStdTar) * l
		a = (aStdSrc / aStdTar) * a
		b = (bStdSrc / bStdTar) * b

	# add in the source mean
	l += lMeanSrc
	a += aMeanSrc
	b += bMeanSrc

	# clip/scale the pixel intensities to [0, 255] if they fall
	# outside this range
	l = _scale_array(l, clip=clip)
	a = _scale_array(a, clip=clip)
	b = _scale_array(b, clip=clip)

	# merge the channels together and convert back to the RGB color
	# space, being sure to utilize the 8-bit unsigned integer data
	# type
	transfer = cv2.merge([l, a, b])
	transfer = cv2.cvtColor(transfer.astype(np.uint8), cv2.COLOR_LAB2BGR)

	# return the color transferred image
	return transfer
Beispiel #32
0
 def getRedBlue(self):
     b, g, r = cv2.split(self.image)
     self.image = cv2.merge((b, g - g, r))
     self.mostrarImagen()
Beispiel #33
0
def main():
    path = "/home/g_r00t/PRIYANK/ex_images/"
    imgpath1 = path + "4.2.01.tiff"

    img1 = cv2.imread(imgpath1, 1)
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)

    r, g, b = cv2.split(img1)

    imgpath2 = path + "4.2.01.tiff"
    img3 = cv2.imread(imgpath2, 0)
    img2 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    r1, g1, b1 = cv2.split(img2)

    img_titles = [
        "Red", "Green", "Blue", "gray_r", "gray_g", "gray_b", "Plasma_r",
        "Plasma_g", "Plasma_b"
    ]
    cmaps = [
        'Reds', 'Greens', 'Blues', 'gray', 'gray', 'gray', 'hsv', 'hsv', 'hsv'
    ]
    show_image = [r, g, b, r, g, b, r, g, b]

    #-----matplotlib na original color/gray/hsv-----------------------------------
    plt.subplot(5, 3, 13)
    plt.imshow(cv2.merge((r, g, b)))
    plt.title("Original")
    plt.xticks([])
    plt.yticks([])  # to removee the scales from the graph

    plt.subplot(5, 3, 14)
    plt.imshow(img3, cmap='gray')
    plt.title("Original_gray")
    plt.xticks([])
    plt.yticks([])  # to removee the scales from the graph

    plt.subplot(5, 3, 15)
    plt.imshow(img3, cmap='hsv')
    plt.title("Original_hsv")
    plt.xticks([])
    plt.yticks([])  # to removee the scales from the graph

    #------matplotlib na original na 3 splits = r / g /  b------------------------
    plt.subplot(5, 3, 10)
    plt.imshow(r)
    plt.title("r")
    plt.xticks([])
    plt.yticks([])  # to removee the scales from the graph

    plt.subplot(5, 3, 11)
    plt.imshow(g)
    plt.title("g")
    plt.xticks([])
    plt.yticks([])  # to removee the scales from the graph

    plt.subplot(5, 3, 12)
    plt.imshow(b)
    plt.title("b")
    plt.xticks([])
    plt.yticks([])  # to removee the scales from the graph

    #--------r/g/b splits with different cmap color theme.---------------------------
    for i in range(9):
        plt.subplot(5, 3, i + 1)
        plt.imshow(show_image[i], cmaps[i])
        plt.title(img_titles[i])
        plt.xticks([])
        plt.yticks([])  # to removee the scales from the graph

    plt.show()
Beispiel #34
0
import cv2
import os
#-----Reading the image-----------------------------------------------------
path_to_data = 'data/sample_drive/cam_0'
images = os.listdir(path_to_data)
save_path = 'results/contrast/100'
# image = "average_img_cam2.jpg"
i = 0
for image in images:
    print("image", image)
    if i > 100:
        break
    image_path = os.path.join(path_to_data, image)
    img = cv2.imread(image_path, 1)
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)

    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)

    limg = cv2.merge((cl, a, b))

    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)

    cv2.imwrite(os.path.join(save_path, f'gaussian_blur{i}.jpg'), final)
    i += 1
Beispiel #35
0
 def getRedGreen(self):
     b, g, r = cv2.split(self.image)
     self.image = cv2.merge((b - b, g, r))
     self.mostrarImagen()
Beispiel #36
0
import cv2
img = cv2.imread(r'D:\teleguience\teleguidence\time\piuture\1.png')
# img = cv2.GaussianBlur(img,(5,5),5)

# 转换空间到hsv, 对v通道直方图均衡化

img_2 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_2)
# cv2.imshow('1',h)
# cv2.waitKey()
v_2 = cv2.equalizeHist(v)
img_3 = cv2.merge([h, s, v_2])
img_4 = cv2.cvtColor(img_3, cv2.COLOR_HSV2BGR)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', img_4)
cv2.waitKey()
cv2.fas
Beispiel #37
0
import os
#-------------image names
# name of images you want to load
format1 = ".bmp"
format2 = ".jpg"
mosaic = "Resources/crayons_mosaic"
org = "Resources/crayons"

#------------PART1

#---image split and seprate R G B pattern Channels

image = cv2.imread(mosaic + format1)
image = np.float32(image)

b, g, r = cv2.split(image)

for i in range(b.shape[0]):
    for j in range(b.shape[1]):
        if i % 2 == 0 and j % 2 == 0:
            b[i][j] = b[i][j]
        else:
            b[i][j] = 0
for i in range(r.shape[0]):
    for j in range(r.shape[1]):
        if i % 2 == 0 and j % 2 == 1:
            r[i][j] = r[i][j]
        elif i % 2 == 1 and j % 2 == 0:
            r[i][j] = r[i][j]
        else:
            r[i][j] = 0
 def __rearrange(self):
     b, g, r = cv2.split(self.__frame)
     self.__frame = cv2.merge((r, g, b))
     self.__rearranged = True
     return self.__frame
import cv2
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.image as im

example_grid = '../calibration_images/example_grid1.jpg'

img = im.imread(example_grid)

img = img[:, :, ::-1]
b, g, r = cv2.split(img)  #, cv2.RBG2HSV)

plt.imshow(g)
plt.show()

plt.imshow(r)
plt.show()

plt.imshow(b)
plt.show()

ret1, thresh1 = cv2.threshold(img1, 180, 255, cv2.THRESH_TOZERO)

plt.imshow(thresh1 * 255)
plt.show()
# -*- coding: latin-1 -*-
import numpy as np
import cv2
from matplotlib import pyplot as plt

perfil = cv2.imread('temperatura.jpg')
forno = cv2.imread('forno-pre.jpg')

col_perfil, lin_perfil, _ = perfil.shape
col_forno, lin_forno, _ = forno.shape
print 'perfil antes:', lin_perfil, col_perfil, 'forno:', lin_forno, col_forno

perfil = perfil[7:-25, 27:]

b, g, r = cv2.split(perfil)  # get b,g,r
perfil = cv2.merge([r, g, b])  # switch it to rgb

perfil = cv2.resize(perfil, (lin_forno, col_forno))
col_perfil, lin_perfil, _ = perfil.shape
print 'perfil depois:', lin_perfil, col_perfil

lin = lin_perfil
col = col_perfil

pts1 = np.float32([[0, 0], [0, col], [lin, col], [lin, 0]])

p1, p2, p3, p4 = [70, 120], [320, 200], [780, 55], [600, 20]
pts2 = np.float32([p1, p2, p3, p4])

M = cv2.getPerspectiveTransform(pts1, pts2)
    def segmentation(self, LpRegion):
        LpRegion = self.clean_border(LpRegion)
        # cv2.imshow("edge", edged)

        V = cv2.split(cv2.cvtColor(LpRegion, cv2.COLOR_BGR2HSV))[2]
        # adaptive threshold
        T = threshold_local(V, 15, offset=10, method="gaussian")
        thresh = (V > T).astype("uint8") * 255
        # convert black pixel of digits to white pixel
        thresh = cv2.bitwise_not(thresh)
        thresh = imutils.resize(thresh, width=400)
        thresh = clear_border(thresh)
        # cv2.imwrite("step2_2.png", thresh)
        cv2.imshow("thresh", thresh)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        # try:
        #     lines = cv2.HoughLinesP(image=thresh,rho=1,theta=np.pi/180, threshold=200,lines=np.array([]), minLineLength=200,maxLineGap=20)
        #     angle = 0
        #     num = 0
        #     thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
        #     for line in lines:
        #         my_degree = math.degrees(math.atan2(line[0][3]-line[0][1], line[0][2]-line[0][0]))
        #         if -45 < my_degree < 45:
        #             angle += my_degree
        #             num += 1
        #         cv2.line(thresh, (line[0][0], line[0][1]), (line[0][2], line[0][3]), (255, 0, 0))
        #     angle /= num

        #     cv2.imshow("draw", thresh)
        #     cv2.waitKey(0)
        #     cv2.destroyAllWindows()
        #     # cv2.imwrite("draw.png", thresh)
        #     # Rotate image to deskew
        #     (h, w) = thresh.shape[:2]
        #     center = (w // 2, h // 2)
        #     M = cv2.getRotationMatrix2D(center, angle, 1.0)
        #     thresh = cv2.warpAffine(thresh, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
        # except:
        #     pass

        # edges = cv2.Canny(thresh,100,200)
        # thresh = cv2.medianBlur(thresh, 5)
        # cv2.imshow("thresh", edges)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        # cv2.imwrite("thresh.png", thresh)
        # connected components analysis
        labels = measure.label(thresh, connectivity=2, background=0)

        # loop over the unique components
        for label in np.unique(labels):
            # if this is background label, ignore it
            if label == 0:
                continue

            # init mask to store the location of the character candidates
            mask = np.zeros(thresh.shape, dtype="uint8")
            mask[labels == label] = 255
            # find contours from mask
            contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            if len(contours) > 0:
                contour = max(contours, key=cv2.contourArea)
                (x, y, w, h) = cv2.boundingRect(contour)

                # rule to determine characters
                aspectRatio = w / float(h)
                solidity = cv2.contourArea(contour) / float(w * h)
                heightRatio = h / float(LpRegion.shape[0])

                if h * w > MIN_PIXEL_AREA and 0.25 < aspectRatio < 1.0 and solidity > 0.2 and 0.35 < heightRatio < 2.0:
                    # extract characters
                    candidate = np.array(mask[y:y + h, x:x + w])
                    square_candidate = convert2Square(candidate)
                    square_candidate = cv2.resize(square_candidate, (28, 28),
                                                  cv2.INTER_AREA)
                    # cv2.imwrite('./characters/' + str(y) + "_" + str(x) + ".png", cv2.resize(square_candidate, (56, 56), cv2.INTER_AREA))
                    square_candidate = square_candidate.reshape((28, 28, 1))
                    # cv2.imshow("square_candidate", square_candidate)
                    # cv2.waitKey(0)
                    # cv2.destroyAllWindows()
                    self.candidates.append((square_candidate, (y, x)))
def custom_split3(img):
    # NB: cv.split return list but g-api requires tuple in multiple output case
    return tuple(cv.split(img))
Beispiel #43
0
pictures = os.listdir(Images_Path)

detector = dlib.get_frontal_face_detector()

print(pictures)

def rotate(img):
    rows,cols,_ = img.shape
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -90, 1)
    dst = cv2.warpAffine(img, M, (cols, rows))
    return dst

for f in pictures:
    img = cv2.imread(os.path.join(Images_Path,f), cv2.IMREAD_COLOR)
    b, g, r = cv2.split(img)
    img2 = cv2.merge([r, g, b])
    img = rotate(img)

    dets = detector(img, 1)
    #print("Number of faces detected: {}".format(len(dets)))

    for idx, face in enumerate(dets):
        # print('face{}; left{}; top {}; right {}; bot {}'.format(idx, face.left(). face.top(), face.right(), face.bottom()))

        left = face.left()
        top = face.top()
        right = face.right()
        bot = face.bottom()
        #print(left, top, right, bot)
        #cv2.rectangle(img, (left, top), (right, bot), (0, 255, 0), 3)
def run():
    with picamera.PiCamera() as camera:
        # Set the camera resolution
        x = 400
        camera.resolution = (int(1.33 * x), x)
        # Various optional camera settings below:
        # camera.framerate = 5
        # camera.awb_mode = 'off'
        # camera.awb_gains = (0.5, 0.5)

        # Need to sleep to give the camera time to get set up properly
        time.sleep(1)

        with picamera.array.PiRGBArray(camera) as stream:
            # Loop constantly
            while True:
                # Grab data from the camera, in colour format
                # NOTE: This comes in BGR rather than RGB, which is important
                # for later!
                camera.capture(stream, format='bgr', use_video_port=True)
                image = stream.array

                # Get the individual colour components of the image
                b, g, r = cv2.split(image)

                # Calculate the NDVI

                # Bottom of fraction
                bottom = (r.astype(float) + b.astype(float))
                bottom[bottom ==
                       0] = 0.01  # Make sure we don't divide by zero!

                ndvi = (r.astype(float) - b) / bottom
                ndvi = contrast_stretch(ndvi)
                ndvi = ndvi.astype(np.uint8)

                # Do the labelling
                label(b, 'Blue')
                label(g, 'Green')
                label(r, 'NIR')
                label(ndvi, 'NDVI')

                # Combine ready for display
                combined = disp_multiple(b, g, r, ndvi)

                # Display
                cv2.imshow('image', combined)

                stream.truncate(0)

                # If we press ESC then break out of the loop
                c = cv2.waitKey(7) % 0x100
                if c == 27:
                    break

    # Important cleanup here!
    cv2.imwrite('images/r.jpg', r)
    cv2.imwrite('images/g.jpg', g)
    cv2.imwrite('images/b.jpg', b)
    cv2.imwrite('images/ndvi.jpg', ndvi)
    cv2.destroyAllWindows()
    t_img = tifffile.imread('/media/disk/han/dataset/testset/256/{0}_t.tif'.format(filename[-7:-4]))
    v_img = tifffile.imread('/media/disk/han/dataset/testset/256/{0}.tif'.format(filename[-7:-4]))

    h, w = t_img.shape

    original_result = np.zeros((h, w))

    t_img = np.array(t_img)
    v_img = np.array(v_img)

    for i in range(0, h - 1024, 1024):

        for j in range(0, w - 1024, 1024):
            patch = original_img.read_region((j, i), 0, (1024, 1024)).convert('RGB')
            patch=np.array(patch)
            R, G, B = cv2.split(patch)
            _, R = cv2.threshold(R, 235, 1, cv2.THRESH_BINARY)
            _, B = cv2.threshold(B, 235, 1, cv2.THRESH_BINARY)
            _, G = cv2.threshold(G, 210, 1, cv2.THRESH_BINARY)

            background_label_img = R * B * G
            forground_label_img = np.ones((1024, 1024)) - background_label_img
            original_result[i:i + 1024, j:j + 1024] = forground_label_img

    for i in range(0, h - 1024, 1024):
        patch = original_img.read_region((w - 1024, i), 0, (1024, 1024)).convert('RGB')
        patch=np.array(patch)

        R, G, B = cv2.split(patch)
        _, R = cv2.threshold(R, 235, 1, cv2.THRESH_BINARY)
        _, B = cv2.threshold(B, 235, 1, cv2.THRESH_BINARY)
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read Image

img = cv2.imread('test.png')

# Denoising
dst = cv2.fastNlMeansDenoisingColored(img, None, 15, 15, 7, 21)

b, g, r = cv2.split(dst)  # get b,g,r
rgb_dst = cv2.merge([r, g, b])  # switch it to rgb
cv2.imwrite("Images/Denoising_1_copyTest.jpg", rgb_dst)

# Sharping Image
imgIn = cv2.imread("Image/Denoising_1_copyTest.jpg", cv2.IMREAD_GRAYSCALE)

# Create the identity filter, but with the 1 shifted to the right!
#kernel = np.zeros( (9,9), np.float32)

#Identity, times two!
#kernel[4,4] = 2.0

# Create a box filter:
#boxFilter = np.ones( (9,9), np.float32) / 81.0

# Subtract the two:
#kernel = kernel - boxFilter

#sharp_1 = cv2.filter2D(imgIn, -1, kernel)
Beispiel #47
0
    def __init__(self, image, parent=None):
        super(PcaWidget, self).__init__(parent)

        self.comp_combo = QComboBox()
        self.comp_combo.addItems(
            [self.tr('#{}'.format(i + 1)) for i in range(3)])

        self.distvect_radio = QRadioButton(self.tr('Vector Distance'))
        self.cross_radio = QRadioButton(self.tr('Cross Correlation'))
        self.distvect_radio.setChecked(True)
        self.last_radio = self.distvect_radio

        self.image = image
        self.components = []
        rows, cols, dims = self.image.shape
        bgr = np.reshape(self.image, (rows * cols, dims)).astype(np.float32)
        m, eigen_vec, eigen_val = cv.PCACompute2(bgr, np.array([]))
        p = self.image.astype(np.float32) - m
        for v in eigen_vec:
            c = np.cross(p, v)
            d = np.linalg.norm(c, axis=2) / np.linalg.norm(v)
            distance = normalize_mat(d, to_bgr=True)
            cross = cv.merge([normalize_mat(x) for x in cv.split(c)])
            self.components.extend([distance, cross])

        table_data = [[m[0, 2], m[0, 1], m[0, 0]],
                      [eigen_vec[0, 2], eigen_vec[0, 1], eigen_vec[0, 0]],
                      [eigen_vec[1, 2], eigen_vec[1, 1], eigen_vec[1, 0]],
                      [eigen_vec[2, 2], eigen_vec[2, 1], eigen_vec[2, 0]],
                      [eigen_val[2, 0], eigen_val[1, 0], eigen_val[0, 0]]]
        table_widget = QTableWidget(5, 4)
        table_widget.setHorizontalHeaderLabels([
            self.tr('Element'),
            self.tr('Red'),
            self.tr('Green'),
            self.tr('Blue')
        ])
        table_widget.setItem(0, 0, QTableWidgetItem(self.tr('Mean color')))
        table_widget.setItem(1, 0, QTableWidgetItem(self.tr('Eigen vect 1')))
        table_widget.setItem(2, 0, QTableWidgetItem(self.tr('Eigen vect 2')))
        table_widget.setItem(3, 0, QTableWidgetItem(self.tr('Eigen vect 3')))
        table_widget.setItem(4, 0, QTableWidgetItem(self.tr('Eigen values')))
        for i in range(len(table_data)):
            modify_font(table_widget.item(i, 0), bold=True)
            for j in range(len(table_data[i])):
                table_widget.setItem(i, j + 1,
                                     QTableWidgetItem(str(table_data[i][j])))
        # item = QTableWidgetItem()
        # item.setBackgroundColor(QColor(m[0, 2], m[0, 1], m[0, 0]))
        # table_widget.setItem(0, 4, item)
        # table_widget.resizeRowsToContents()
        # table_widget.resizeColumnsToContents()
        table_widget.setEditTriggers(QAbstractItemView.NoEditTriggers)
        table_widget.setSelectionMode(QAbstractItemView.SingleSelection)

        self.viewer = ImageViewer(self.image, self.image, None)
        self.process()

        self.comp_combo.currentIndexChanged.connect(self.process)
        self.distvect_radio.toggled.connect(self.process)
        self.cross_radio.toggled.connect(self.process)

        top_layout = QHBoxLayout()
        top_layout.addWidget(QLabel(self.tr('Component:')))
        top_layout.addWidget(self.comp_combo)
        top_layout.addWidget(QLabel(self.tr('Projection:')))
        top_layout.addWidget(self.distvect_radio)
        top_layout.addWidget(self.cross_radio)
        top_layout.addStretch()
        bottom_layout = QHBoxLayout()
        bottom_layout.addWidget(table_widget)

        main_layout = QVBoxLayout()
        main_layout.addLayout(top_layout)
        main_layout.addWidget(self.viewer)
        main_layout.addLayout(bottom_layout)
        self.setLayout(main_layout)
def segment(image, plot_seg, plot_hist):

    image = image[
        400:1400,
        500:1600, :]  #cropping the fundus image to ger region of interest

    Abo, Ago, Aro = cv2.split(image)  #splitting into 3 channels
    #Aro = clahe.apply(Aro)
    Ago = clahe.apply(Ago)
    M = 60  #filter size
    filter = signal.gaussian(M, std=6)  #Gaussian Window
    filter = filter / sum(filter)
    STDf = filter.std()  #It'standard deviation

    Ar = Aro - Aro.mean() - Aro.std()  #Preprocessing Red

    Mr = Ar.mean()  #Mean of preprocessed red
    SDr = Ar.std()  #SD of preprocessed red
    Thr = 0.5 * M - STDf - Ar.std()  #Optic disc Threshold
    #print(Thr)

    Ag = Ago - Ago.mean() - Ago.std()  #Preprocessing Green
    Mg = Ag.mean()  #Mean of preprocessed green
    SDg = Ag.std()  #SD of preprocessed green
    Thg = 0.5 * Mg + 2 * STDf + 2 * SDg + Mg  #Optic Cup Threshold
    #print(Thg)

    hist, bins = np.histogram(
        Ag.ravel(), 256, [0, 256])  #Histogram of preprocessed green channel
    histr, binsr = np.histogram(
        Ar.ravel(), 256, [0, 256])  #Histogram of preprocessed red channel

    smooth_hist_g = np.convolve(filter, hist)  #Histogram Smoothing Green
    smooth_hist_r = np.convolve(filter, histr)  #Histogram Smoothing Red

    #plot histogram if input is true
    if plot_hist:
        plt.subplot(2, 2, 1)
        plt.plot(hist)
        plt.title("Preprocessed Green Channel")

        plt.subplot(2, 2, 2)
        plt.plot(smooth_hist_g)
        plt.title("Smoothed Histogram Green Channel")

        plt.subplot(2, 2, 3)
        plt.plot(histr)
        plt.title("Preprocessed Red Channel")

        plt.subplot(2, 2, 4)
        plt.plot(smooth_hist_r)
        plt.title("Smoothed Histogram Red Channel")

        plt.show()

    r, c = Ag.shape
    Dd = np.zeros(shape=(r, c))  #Segmented disc image initialization
    Dc = np.zeros(shape=(r, c))  #Segmented cup image initialization

    #Using obtained threshold for thresholding of the fundus image
    for i in range(1, r):
        for j in range(1, c):
            if Ar[i, j] > Thr:
                Dd[i, j] = 255
            else:
                Dd[i, j] = 0

    for i in range(1, r):
        for j in range(1, c):

            if Ag[i, j] > Thg:
                Dc[i, j] = 1
            else:
                Dc[i, j] = 0

    #Saving the segmented image in the same place as the code folder
    cv2.imwrite('disk.png', Dd)
    plt.imsave('cup.png', Dc)

    if plot_seg:
        plt.imshow(Dd, cmap='gray', interpolation='bicubic')
        plt.axis("off")
        plt.title("Optic Disk")
        plt.show()

        plt.imshow(Dc, cmap='gray', interpolation='bicubic')
        plt.axis("off")
        plt.title("Optic Cup")
        plt.show()
Beispiel #49
0
#!/bin/python

import cv2 as opencv

# image = 3D array
image = opencv.imread("wrench.jpg")

opencv.imshow("wrench", image)
opencv.waitKey()

B, G, R = opencv.split(image)
opencv.imshow("Red", R)
opencv.waitKey()
opencv.imshow("Green", G)
opencv.waitKey()
opencv.imshow("Blue", B)
opencv.waitKey()

# Blue has been advanced
merged = opencv.merge([B + 100, G, R])
opencv.imshow("Merged image", merged)
opencv.waitKey()

# Image is greyscale
gray_image = opencv.cvtColor(image, opencv.COLOR_BGR2GRAY)
opencv.imshow("gs_wrench", gray_image)
opencv.waitKey()

print("BGR value:", B, G, R)
print(f"Color image: {image.shape} -> 3 == channel(RGB)")
print(f"Grayscale image: {gray_image.shape}")
Beispiel #50
0
import cv2

imagem = cv2.imread("fruits.jpg")
imagem = cv2.cvtColor(imagem, cv2.COLOR_BGR2HSV)
matiz, saturacao, valor = cv2.split(imagem)

cv2.imshow("Canal	H", matiz)
cv2.imshow("Canal	S", saturacao)
cv2.imshow("Canal	V", valor)

imagem = cv2.merge((matiz, saturacao, valor))
# nossos monitores representam imagens em RGB, por isso a conversão ;)
imagem = cv2.cvtColor(imagem, cv2.COLOR_HSV2BGR)

cv2.imshow("Imagem", imagem)

cv2.waitKey(0)
cv2.destroyAllWindows()
Beispiel #51
0
def add_alpha(image):
    b, g, r = cv2.split(image)
    alpha = np.ones(b.shape, dtype=b.dtype) * 255
    return cv2.merge((b, g, r, alpha))
Beispiel #52
0
mbllen_gen = Network.build_mbllen((32, 32, 3))
mbllen_gen.load_weights('weight/mbllen/LOL_img_lowlight.h5')

# Load test image
img = readImage(r'C:\Users\ywqqq\Documents\PRS_prj\maskdetection\test.jpg')

# If noise in image, denoise.
gaussian_noise = False
salt_and_pepper_noise = False

if gaussian_noise:
    img = denoise(img, 'gaussian')

if salt_and_pepper_noise:
    img = denoise(img, 'salt-and-pepper')

# Get the luminance of the image.
# If luminance < 70, then apply illumination improvemtn
imgHSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
H, S, V = cv2.split(imgHSV)
if V < 70:
    img = resolve_single(mbllen_gen, 'mbllen', img)

# Get the resolution of the image.
# If the resolution < 10000, then apply super resolution

r = img.shape[0] * img.shape[1]
if r < 10000:
    img = resolve_single(sr_gen, 'srgan', img)
Beispiel #53
0
 def getBlueGreen(self):
     b, g, r = cv2.split(self.image)
     self.image = cv2.merge((b, g, r - r))
     self.mostrarImagen()
def crop_img_channel(img_path: str, total_number: int, output_dir: str):
    img = cv2.imread(img_path, )
    # h,w,c = img.shape
    #split image
    b, g, r = cv2.split(img)
    h, w = b.shape
    test_img = img.copy()
    test_img_score = img.copy()
    cv2.imshow("preview_blue_channel", b)
    cv2.waitKey(5000)
    crop_height = h / total_number
    crop_width = w / total_number
    img_crop_list = []
    bgr_color_list = []
    count = 0
    temp_input_img = "/home/nabu/workspace/pytorch_env/deepNN_py/carpet_script/bw_convert_train/inference_result_aug/anomaly_val_data/hole014.png"
    temp_img = cv2.imread(temp_input_img)
    temp_img_copy = temp_img.copy()
    b1, g1, r1 = cv2.split(temp_img)
    for i in range(total_number):
        for j in range(total_number):
            # crop_box=(j*crop_width,i*crop_height,(j+1)*crop_width,(i+1)*crop_height)
            # print(int(i*crop_height))
            # print(j*crop_width)
            # print((i+1)*crop_height)
            # print((j+1)*crop_width)
            # img1 = img[int(j*crop_width):int((j+1)*crop_width),int(i*crop_height):int((i+1)*crop_height),:]
            # print(j,i)
            # exit()
            #draw rect
            # test_img = cv2.rectangle(test_img,(int(j*crop_width),int((j+1)*crop_width)),(int(i*crop_height),int((i+1)*crop_height)),(0,255,0),3)

            test_img = cv2.rectangle(test_img, (int(
                j * crop_width), int(i * crop_height), int(
                    (j + 1) * crop_width), int((i + 1) * crop_height)),
                                     (0, 255, 0), 1)

            test_img_score = cv2.rectangle(test_img_score, (int(
                j * crop_width), int(i * crop_height), int(
                    (j + 1) * crop_width), int((i + 1) * crop_height)),
                                           (0, 255, 0), 1)
            temp_img_copy = cv2.rectangle(temp_img_copy, (int(
                j * crop_width), int(i * crop_height), int(
                    (j + 1) * crop_width), int((i + 1) * crop_height)),
                                          (0, 255, 0), 1)

            # test_img = cv2.rectangle(test_img, (x, y), (x + 64, y + 64), (36,255,12), 1)
            cv2.putText(test_img, f"{count}",
                        (int(j * crop_width) + 5, int(i * crop_height) + 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (36, 255, 12), 1)

            img1 = r[int(i * crop_width):int((i + 1) * crop_width),
                     int(j * crop_height):int((j + 1) * crop_height)]

            # img1 = b[int(j*crop_width):int((j+1)*crop_width),int(i*crop_height):int((i+1)*crop_height)]
            # img1 =img.crop(crop_box)
            print(img1)
            bgr_color_list.append(sum(sum(img1)))
            print(sum(sum(img1)))
            if sum(sum(img1)) > 3500:
                cv2.putText(
                    test_img_score, f"{sum(sum(img1))}",
                    (int(j * crop_width) + 5, int(i * crop_height) + 15),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (36, 255, 12), 1)
                cv2.putText(
                    temp_img_copy, f"{sum(sum(img1))}",
                    (int(j * crop_width) + 5, int(i * crop_height) + 15),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.4, (36, 255, 12), 1)

            print(img1.shape)
            img1 = cv2.putText(img1, f'{count}', (0, 20),
                               cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2,
                               cv2.LINE_AA)
            cv2.imwrite(os.path.join(output_dir, f"{count}.png"), img1)
            count += 1
            cv2.imshow("preview", img1)
            # cv2.waitKey(0)
            img_crop_list.append(img1)
    cv2.imwrite(f"bbox-img.png", test_img)
    cv2.imwrite(f"bbox-img_score.png", test_img_score)
    cv2.imwrite(f"temp-bbox-img_score.png", temp_img_copy)
    print("Count", count)
    # print(bgr_color_list)
    return bgr_color_list
Beispiel #55
0
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()
# to show color image to show channels
print(img)
print(img.shape)  # h, w, c

# image crop
img_crop = img[0:100, 0:200]
cv2.imshow('img_crop', img_crop)
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()

# color split
B, G, R = cv2.split(img)
cv2.imshow('B', B)
cv2.imshow('G', G)
cv2.imshow('R', R)
key = cv2.waitKey()
if key == 27:
    cv2.destroyAllWindows()


# change color
def random_light_color(img):
    # brightness
    B, G, R = cv2.split(img)

    b_rand = random.randint(-50, 50)
    if b_rand == 0:
Beispiel #56
0
def add_hat(img, hat):
    # 分离图片的rgba通道,合成rgb三通道帽子图,a通道作为mask后面使用
    r, g, b, a = cv2.split(hat)
    rgb_hat = cv2.merge((r, g, b))

    cv2.imwrite("hat_alpha.png", a)

    # dlib正脸检测器
    detector = dlib.get_frontal_face_detector()
    dets = detector(img, 1)

    # dlib人脸关键点预测器
    predictor_path = "shape_predictor_5_face_landmarks.dat"
    predictor = dlib.shape_predictor(predictor_path)

    # 如果检测到人脸
    if len(dets) > 0:
        for i in dets:
            x, y, w, h = i.left(), i.top(
            ), i.right() - i.left(), i.bottom() - i.top()
            shape = predictor(img, i)

            # 选取左右眼眼角的点
            point1 = shape.part(0)
            point2 = shape.part(2)

            # 求两点中心
            eyes_center = ((point1.x + point2.x) // 2,
                           (point1.y + point2.y) // 2)

            #  根据人脸大小调整帽子大小
            #  factor手工放缩因子
            factor = 1
            resized_hat_h = int(
                round(rgb_hat.shape[0] * h / rgb_hat.shape[1] * factor))
            resized_hat_w = int(
                round(rgb_hat.shape[1] * w / rgb_hat.shape[1] * factor))
            # print(resized_hat_h, resized_hat_w)
            # print(rgb_hat.shape[1], rgb_hat.shape[0])

            if resized_hat_h > y:
                resized_hat_h = y - 1

            # 根据上面获取的大小调整帽子的大小
            resized_hat = cv2.resize(rgb_hat, (resized_hat_w, resized_hat_h))

            # alpha通道作为mask
            mask = cv2.resize(a, (resized_hat_w, resized_hat_h))
            # cv2.imshow("mask",mask)
            mask_inv = cv2.bitwise_not(mask)
            mask_inv = cv2.merge((mask_inv, mask_inv, mask_inv))
            cv2.imshow("mask_inv", mask_inv)

            # 帽子相对与人脸框上线的偏移量-对于我们的帽子优化一下为0
            dh = 0
            dw = 0

            # 原图感兴趣的区域(ROI)
            # //4 <- 4 根据图片来选取,可以自由切换帽子在头部的比例
            bg_roi = img[y + dh - resized_hat_h:y + dh,
                         (eyes_center[0] -
                          resized_hat_w // 4):(eyes_center[0] +
                                               resized_hat_w // 4 * 3)]

            # 原图ROI中提取放帽子的区域
            bg_roi = bg_roi.astype(float)
            alpha = mask_inv.astype(float) / 255

            # 相乘之前保证两者大小一致(round)
            alpha = cv2.resize(alpha, (bg_roi.shape[1], bg_roi.shape[0]))
            bg = cv2.multiply(alpha, bg_roi)
            bg = bg.astype('uint8')

            cv2.imwrite("bg.jpg", bg)

            # 提取帽子区域
            hat = cv2.bitwise_and(resized_hat, resized_hat, mask=mask)
            cv2.imwrite("hat.jpg", hat)

            # 相加之前保证两者大小一致(round)
            hat = cv2.resize(hat, (bg_roi.shape[1], bg_roi.shape[0]))
            # 两个ROI区域相加
            add_hat = cv2.add(bg, hat)

            # 把添加好帽子的区域放回原图
            img[y + dh - resized_hat_h:y + dh,
                (eyes_center[0] -
                 resized_hat_w // 4):(eyes_center[0] +
                                      resized_hat_w // 4 * 3)] = add_hat

            return img
Beispiel #57
0
    def gen_vid_bg_none(self):

        self.text_pos = self.generate_text_pos()

        filename = self.title + "." + self.extension
        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        video = cv2.VideoWriter(self.path + "/" + filename, fourcc, self.fps,
                                self.resolution)

        sticker = None
        txt = None

        # Loop through all product images
        for img in self.product:
            # Resize each image
            resized = self.resize_image(img, self.resolution)

            # Check if there are some
            if len(self.stickers) != 0:
                sticker = self.stickers.pop()
                res_sticker = self.resize_image(
                    sticker,
                    (self.resolution[0] // 6, self.resolution[1] // 4), False)

                stick_pos = self.generate_sticker_pos(res_sticker.shape)

            if len(self.texts) > 0:
                txt = self.texts.pop()
                length = random.choice(range(72, 120))
            else:
                length = random.choice(range(54, 84))

            if sticker is not None:
                resized = self.overlay_sticker(resized, res_sticker, stick_pos)

            # Ak je text poziciovany vlavo dole
            if self.text_pos[0] <= self.resolution[0] // 3:
                # Posun z prava do lava
                text_animation = [
                    (self.resolution[0] - self.text_pos[0]) // length, 0
                ]
            else:
                # Posun zo stredu na kraj
                text_animation = [-(self.text_pos[0] // length), 0]

            # Remove alpha channel
            b_channel, g_channel, r_channel = cv2.split(resized)[:3]
            resized = cv2.merge((b_channel, g_channel, r_channel))

            # Uloz si dolnu cast obrazka
            cropped_img = copy.deepcopy(
                resized[self.resolution[1] // 2:self.resolution[1],
                        0:self.resolution[0]])

            for i in range(0, length):

                # Ak je text tak mozme urobit posun
                if txt is not None:

                    # Prepisat dolnu cast obrazka
                    resized[self.resolution[1] // 2:self.resolution[1],
                            0:self.resolution[0]] = cropped_img

                    self.text_pos = [
                        self.text_pos[0] + text_animation[0],
                        self.text_pos[1] + text_animation[1]
                    ]

                    resized = self.overlay_text(resized, txt, self.text_pos)

                video.write(resized)

            sticker = None
            txt = None
        video.release()
Beispiel #58
0
# create tmp images
bbb = np.zeros((h,w),np.uint8)
ggg = np.zeros((h,w),np.uint8)
rrr = np.zeros((h,w),np.uint8)
processed = np.zeros((h,w) ,np.uint8)
storage = np.zeros(size, dtype=np.float32)
#Create Filters
close_k = np.ones((20, 20), np.uint8)
open_k = np.ones((10, 10), np.uint8)

def channel_processing(channel):
    pass
    c = cv2.morphologyEx(channel, cv2.MORPH_CLOSE, close_k)
    o = cv2.morphologyEx(c, cv2.MORPH_OPEN, open_k)
    gauss = cv2.GaussianBlur(o, (7, 7), 15)
    th = cv2.threshold(gauss, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    cv2.imshow("1",th)


bbb,ggg,rrr = cv2.split(orig)
#cv2.split(orig,rrr,ggg,bbb,None)
#process each component
channel_processing(rrr)
channel_processing(ggg)
channel_processing(bbb)


processed = cv2.merge((bbb, ggg, rrr))
cv2.imwrite('combined.jpg', processed)
cv2.imshow("r",rrr)
Beispiel #59
0
import cv2
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

image = cv2.imread("16.jpg")
imagePlt = plt.imread('16.jpg')
'''
cv2.namedWindow('Image')
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.distroyAllWindows()
'''

R = cv2.split(image)[0]
G = cv2.split(image)[1]
B = cv2.split(image)[2]
'''
cv2.imshow("Image", image)
cv2.imshow("Red", R)
cv2.imshow("Green", G)
cv2.imshow("Blue", B)
cv2.waitKey(0)
'''


def calcAndDrawHist(image, color):
    hist = cv2.calcHist([image], [0], None, [256], [0.0, 255.0])
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(hist)
    histImage = np.zeros([256, 256, 3], np.uint8)
Beispiel #60
0
    def process_frame(self, frame_num, frame_img):
        # type: (int, numpy.ndarray) -> List[int]
        """ Similar to ThresholdDetector, but using the HSV colour space DIFFERENCE instead
        of single-frame RGB/grayscale intensity (thus cannot detect slow fades with this method).

        Arguments:
            frame_num (int): Frame number of frame that is being passed.

            frame_img (Optional[int]): Decoded frame image (numpy.ndarray) to perform shot
                detection on. Can be None *only* if the self.is_processing_required() method
                (inhereted from the base shotDetector class) returns True.

        Returns:
            List[int]: List of frames where shot cuts have been detected. There may be 0
            or more frames in the list, and not necessarily the same as frame_num.
        """
        cut_list = []
        metric_keys = self._metric_keys
        _unused = ''

        if self.last_frame is not None:
            # Change in average of HSV (hsv), (h)ue only, (s)aturation only, (l)uminance only.
            delta_hsv_avg, delta_h, delta_s, delta_v = 0.0, 0.0, 0.0, 0.0

            if (self.stats_manager is not None
                    and self.stats_manager.metrics_exist(
                        frame_num, metric_keys)):
                delta_hsv_avg, delta_h, delta_s, delta_v = self.stats_manager.get_metrics(
                    frame_num, metric_keys)

            else:
                num_pixels = frame_img.shape[0] * frame_img.shape[1]
                curr_hsv = cv2.split(cv2.cvtColor(frame_img,
                                                  cv2.COLOR_BGR2HSV))
                last_hsv = self.last_hsv
                if not last_hsv:
                    last_hsv = cv2.split(
                        cv2.cvtColor(self.last_frame, cv2.COLOR_BGR2HSV))

                delta_hsv = [0, 0, 0, 0]
                for i in range(3):
                    num_pixels = curr_hsv[i].shape[0] * curr_hsv[i].shape[1]
                    curr_hsv[i] = curr_hsv[i].astype(numpy.int32)
                    last_hsv[i] = last_hsv[i].astype(numpy.int32)
                    delta_hsv[i] = numpy.sum(
                        numpy.abs(curr_hsv[i] -
                                  last_hsv[i])) / float(num_pixels)
                delta_hsv[3] = sum(delta_hsv[0:3]) / 3.0
                delta_h, delta_s, delta_v, delta_hsv_avg = delta_hsv

                if self.stats_manager is not None:
                    self.stats_manager.set_metrics(
                        frame_num, {
                            metric_keys[0]: delta_hsv_avg,
                            metric_keys[1]: delta_h,
                            metric_keys[2]: delta_s,
                            metric_keys[3]: delta_v
                        })

                self.last_hsv = curr_hsv
            # pdb.set_trace()
            if delta_hsv_avg >= self.threshold:
                if self.last_shot_cut is None or (
                    (frame_num - self.last_shot_cut) >= self.min_shot_len):
                    cut_list.append(frame_num)
                    self.last_shot_cut = frame_num

            if self.last_frame is not None and self.last_frame is not _unused:
                del self.last_frame

        # If we have the next frame computed, don't copy the current frame
        # into last_frame since we won't use it on the next call anyways.
        if (self.stats_manager is not None and
                self.stats_manager.metrics_exist(frame_num + 1, metric_keys)):
            self.last_frame = _unused
        else:
            self.last_frame = frame_img.copy()
        # if len(cut_list) > 0:
        #     print(frame_num,cut_list)
        return cut_list