def __init__(self, iplimage): # Rough-n-ready but it works dammit # alpha = cv.CreateMat(iplimage.height,iplimage.width, cv.CV_8UC1) alpha = np.zeros((iplimage.shape[0], iplimage.shape[1]), np.uint8) # Zieht ein schwarzes Rechteck ueber das Bild cv2.rectangle(alpha, (0, 0), (iplimage.shape[1], iplimage.shape[0]), cv.ScalarAll(255), -1) rgba = np.zeros((iplimage.shape[0], iplimage.shape[1], 4), np.uint8) #cv2.Set(rgba, (1, 2, 3, 4)) cv2.mixChannels( [iplimage, alpha], [rgba], [ 0, 0, # rgba[0] -> bgr[2] 1, 1, # rgba[1] -> bgr[1] 2, 2, # rgba[2] -> bgr[0] 3, 3 # rgba[3] -> alpha[0] ]) self.__imagedata = rgba.tostring() super(IplQImage, self).__init__(self.__imagedata, iplimage.shape[1], iplimage.shape[0], QtGui.QImage.Format_RGB32)
def Hist_and_Backproj(val, target, hue): ## [initialize] bins = val histSize = max(bins, 2) ranges = [0, 180] # hue_range ## [initialize] hsvt = cv.cvtColor(target, cv.COLOR_BGR2HSV) ch = (0, 0) hue2 = np.empty(hsvt.shape, hsvt.dtype) cv.mixChannels([hsvt], [hue2], ch) hist2 = cv.calcHist([hue], [0], None, [histSize], ranges, accumulate=False) hist = cv.calcHist([hue], [0], None, [histSize], ranges, accumulate=False) cv.normalize(hist, hist, alpha=0, beta=255, norm_type=cv.NORM_MINMAX) backproj = cv.calcBackProject([hue2], [0], hist, ranges, scale=1) w = 400 h = 400 bin_w = int(round(w / histSize)) histImg = np.zeros((h, w, 3), dtype=np.uint8) for i in range(bins): cv.rectangle(histImg, (i * bin_w, h), ((i + 1) * bin_w, h - int(round(hist[i] * h / 255.0))), (0, 0, 255), cv.FILLED) cv.imshow('Histogram', histImg) return backproj
def main(argv): global src src = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR) if src is None: print 'Usage:\ncalc_back_project.py <path_to_image>' return -1 global hsv hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) global hue hue = np.zeros(hsv.shape, dtype=np.uint8) ch = [0] * 2 cv2.mixChannels([hsv], [hue], ch) window_image = 'Source image' cv2.namedWindow(window_image, cv2.WINDOW_AUTOSIZE) cv2.createTrackbar('* Hue bins: ', window_image, bins, 180, Hist_and_Backproj) Hist_and_Backproj(bins) cv2.imshow(window_image, src) cv2.waitKey(0) return 0
def __init__(self, drawingOverlay, last_gframe): # convert red pixels to yellow for the overlay self.drawingOverlay = np.zeros_like(drawingOverlay) cv2.mixChannels( [drawingOverlay], [self.drawingOverlay], (0, 0, 2, 1, 2, 2) ) self.shape = (drawingOverlay.shape[1], drawingOverlay.shape[0]) # Grab all pixels of overlay xs, ys, zs = np.where(drawingOverlay > 0) self.overlayPts = zip(xs, ys) # Grab bounding rect ptsMat = np.float32(map(lambda x: [x], self.overlayPts)) y, x, h, w = cv2.boundingRect(ptsMat) x1, y1 = x, y x2, y2 = x + w, y + h # Create dense optical flow tracker self.first_flow_pts = np.float32([ [x1, y1], [x2, y1], [x2, y2], [x1, y2] ]) self.flow_tracker = OpticalFlowHomographyTracker( last_gframe, self.first_flow_pts )
def find_squares(image): gray0=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) pyr=cv2.pyrDown(gray0) timg=cv2.pyrUp(pyr) for c in range (0,3): cv2.mixChannels([timg], [gray0], [0,0]) for l in range (0,N): if l == 0: gray=cv2.Canny(gray0,thresh,100) kernel = np.ones((5,5),np.uint8) dilation = cv2.dilate(gray,kernel,iterations = 2) else: gray = gray0 >= (l+1)*255/N; ret, contours, hire=cv2.findContours(dilation,cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for i in range (0,len(contours)): approx= cv2.approxPolyDP(contours[i],3,1) convex = cv2.isContourConvex(contours[i]) if len(approx)==4 and math.fabs(cv2.contourArea(contours[i])) and convex== True: print("square") maxCosine = 0 for j in range(2,5): cosine = math.fabs(angle(approx[j%4], approx[j-2], approx[j-1])); if (maxCosine>cosine): maxCosine=cosine if maxCosine < 0.3: squares.append(approx)
def switch_by_mixchannels(src): """ :param src: :return: """ dst = np.zeros_like(src) cv2.mixChannels([src], [dst], fromTo=[0, 2, 1, 1, 2, 0]) return dst
def processFrames(socket, frameWidth, frameHeight): global dlibObject, counter, svc frameSize = frameWidth * frameHeight * 4 # assuming ARGB while True: timestamp, frameNumber, frameName, frame, err = readFrame( socket, frameSize) if not err: print(" > read frame " + str(frameNumber) + ": " + frameName + " (" + str(len(frame)) + " bytes)") imgARGB = np.frombuffer(frame, 'uint8').reshape(frameHeight, frameWidth, 4) imgAlpha = np.zeros((frameHeight, frameWidth, 1), 'uint8') imgBGR = np.zeros((frameHeight, frameWidth, 3), 'uint8') cv2.mixChannels([imgARGB], [imgBGR, imgAlpha], [1, 2, 2, 1, 3, 0, 0, 3]) # cv2.imwrite('test-frame'+str(counter)+'.jpg', imgBGR) # counter += 1 # continue # sys.exit(0) p1 = datetime.datetime.now() rects = dlibObject.getAllFaceBoundingBoxes(imgBGR) if len(rects) > 0: print(" > DETECTED " + str(len(rects)) + " faces") facesArray = [] for r in rects: faceAnnotation = getAnnotationFromRect( r, frameWidth, frameHeight) # run classifier, if probabilirt less than 50%, ignore classifier's data faceAnnotation = runClassifier(imgBGR, faceAnnotation, r) p2 = datetime.datetime.now() delta = p2 - p1 processingMs = int(delta.total_seconds() * 1000) print(" > open face processing took " + str(processingMs) + " ms") facesArray.append(faceAnnotation) drawBox(imgBGR, r, str(faceAnnotation['label'])) dumpAnnotations(timestamp, frameNumber, frameName, facesArray) else: print(" > no faces detected") dumpImage(imgBGR) # handy code to slice image into separate channels # imgR = np.zeros((frameHeight, frameWidth, 1), 'uint8') # imgG = np.zeros((frameHeight, frameWidth, 1), 'uint8') # imgB = np.zeros((frameHeight, frameWidth, 1), 'uint8') # imgA = np.zeros((frameHeight, frameWidth, 1), 'uint8') # cv2.mixChannels([imgARGB], [imgA, imgR, imgG, imgB], [0,0, 1,1, 2,2, 3,3]) # cv2.imwrite('test-frame-A.jpg', imgA) # cv2.imwrite('test-frame-R.jpg', imgR) # cv2.imwrite('test-frame-G.jpg', imgG) # cv2.imwrite('test-frame-B.jpg', imgB) else: print(" > error reading frame: " + str(err))
def swap_channels(self, channels): image_format = self.image_format if ((channels is ImageFormat.BGR and image_format.channels is ImageFormat.RGB) or (channels is ImageFormat.RGB and image_format.channels is ImageFormat.BGR)): output = self.__class__(image_format, channels=channels) cv2.mixChannels([self], [output], (0,2, 1,1, 2,0)) return output # ??? else: raise NotImplementedError
def on_btn2_3_click(self): img = cv2.imread("../images/q2_train.jpg") hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) hue = np.zeros(hsv.shape, dtype=np.uint8) cv2.mixChannels([hsv], [hue], [0, 0]) hist = cv2.calcHist([img], [0], None, [256], [0, 256]) # hist = cv2.calcHist([hue], [0], None, [256], [0,256]) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX, -1) backproj = cv2.calcBackProject([hue], [0, 1], hist, [0, 256], 1) cv2.imshow("backproj", backproj) cv2.waitKey(0) cv2.destroyAllWindows()
def backproject(): srcImg = cv.imread(img_path + 'hu.jpg') hsvImg = cv.cvtColor(srcImg, cv.COLOR_BGR2HSV) hueImg = np.zeros(hsvImg.shape, np.uint8) hueImg = cv.mixChannels([hsvImg], [hueImg], [0, 0])[0] # 取数值的第一个 # 调整值 def on_BinChg(v): # 计算直方图 if v < 2: v = 2 # sample hist = cv.calcHist([hueImg], [0], None, [v], [0, 180]) hist = cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX, -1) # target backImg = cv.calcBackProject([hueImg], [0], hist, [0, 180], 1) cv.imshow('BackImg', backImg) # 绘制直方图 histImg = np.zeros((400, 400, 3), np.uint8) bin_w = int(400 / v) for i in range(v): cv.rectangle(histImg, (i * bin_w, 400), ((i + 1) * bin_w, 400 - int(hist[i] * 400 / 255)), (100, 223, 255), -1) cv.imshow('histImg', histImg) cv.namedWindow('srcImg', cv.WINDOW_AUTOSIZE) cv.createTrackbar('HueVal', 'srcImg', 0, 180, on_BinChg) cv.imshow('srcImg', srcImg) cv.waitKey(0) cv.destroyAllWindows()
def generate_boxes(self): record = set() image = self.array gray0 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) timg = cv2.medianBlur(image, 9) for c in range(3): ch = [c, 0] cv2.mixChannels([timg], [gray0], ch) for l in range(self.N): if l == 0: gray = cv2.Canny(gray0, self.canny1, self.canny2, self.canny3 * 2 + 1) gray = cv2.dilate( gray, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))) else: _, gray = cv2.threshold(gray0, (l + 1) * 255 / self.N, 255, cv2.THRESH_BINARY) # gray=cv2.convertScaleAbs(gray) # cv2.Mat() _, contour, _ = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for i in range(len(contour)): approx = cv2.approxPolyDP( contour[i], cv2.arcLength(contour[i], True) * self.approx, True) if len(approx) == 4 \ and 30000 < abs(cv2.contourArea(approx)) < self.image.size[0] * self.image.size[1] * 0.95 \ and cv2.isContourConvex(approx): maxCosine = 0.0 for j in range(2, 5): cosine = abs( self.angle(approx[j % 4], approx[j - 2], approx[j - 1])) maxCosine = max(maxCosine, cosine) if maxCosine < 0.3: box = tuple((it[0][0], it[0][1]) for it in approx) sorted_box = tuple(sorted(box)) if not self.sorted_box_in_record( sorted_box, record): record.add(sorted_box) yield box
def got_frame(self, cvimg): rgbimg = np.empty_like(cvimg) cv2.mixChannels([cvimg], [rgbimg], [0, 2, 1, 1, 2, 0]) tmpimg = PImage.fromarray(rgbimg) tmpimg.thumbnail((300, 300), PImage.ANTIALIAS) tkimage = ImageTk.PhotoImage(tmpimg) self.img_label.config({'image': tkimage}) self.img_label.photo = tkimage # prevent garbage collection # Resize the image for display (or maybe should resize each output image) if self.vid_writer: self.vid_writer.write(rgbimg) for tkfilter in self.filter_list: tkfilter.got_frame(rgbimg)
def got_frame(self, cvimg): rgbimg = np.empty_like(cvimg) cv2.mixChannels([cvimg], [rgbimg], [0,2, 1,1, 2,0]) tmpimg = PImage.fromarray(rgbimg) tmpimg.thumbnail((300, 300), PImage.ANTIALIAS) tkimage = ImageTk.PhotoImage(tmpimg) self.img_label.config({'image': tkimage}) self.img_label.photo = tkimage # prevent garbage collection # Resize the image for display (or maybe should resize each output image) if self.vid_writer: self.vid_writer.write(rgbimg) for tkfilter in self.filter_list: tkfilter.got_frame(rgbimg)
def find_thresh(comp_img, target, bins): ## [Transform it to HSV] hsv = cv.cvtColor(comp_img, cv.COLOR_BGR2HSV) ## [Transform it to HSV] ## [Use only the Hue value] ch = (0, 0) hue = np.empty(hsv.shape, hsv.dtype) cv.mixChannels([hsv], [hue], ch) bj = Hist_and_Backproj(bins, target, hue) cv.imshow('bj', bj) s = [] for x in bj: s = list(set(s + list(set(x)))) print(s) ret, thresh = cv.threshold(bj, 250, 255, cv.THRESH_BINARY) return thresh
def __init__(self,iplimage): # Rough-n-ready but it works dammit # alpha = cv.CreateMat(iplimage.height,iplimage.width, cv.CV_8UC1) alpha = np.zeros((iplimage.shape[0],iplimage.shape[1]), np.uint8) # Zieht ein schwarzes Rechteck ueber das Bild cv2.rectangle(alpha, (0, 0), (iplimage.shape[1],iplimage.shape[0]), cv.ScalarAll(255) ,-1) rgba = np.zeros((iplimage.shape[0], iplimage.shape[1], 4), np.uint8) #cv2.Set(rgba, (1, 2, 3, 4)) cv2.mixChannels([iplimage, alpha],[rgba], [ 0, 0, # rgba[0] -> bgr[2] 1, 1, # rgba[1] -> bgr[1] 2, 2, # rgba[2] -> bgr[0] 3, 3 # rgba[3] -> alpha[0] ]) self.__imagedata = rgba.tostring() super(IplQImage,self).__init__(self.__imagedata, iplimage.shape[1], iplimage.shape[0], QtGui.QImage.Format_RGB32)
def apply_hist_mask(self, src, hist): """ Parameters ---------- src: np.array. Image frame hist: array-like. Contains Hue probability distribuiton Returns ------- Returns image mask """ ch = (0, 0) hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV) hue = np.empty(hsv.shape, hsv.dtype) cv.mixChannels([hsv], [hue], ch) backproj = cv.calcBackProject([hue], [0], self.hist, self.ranges, scale=1) return backproj
def Go_31(self, src, device='cpu'): if device == 'cpu': # CPU version. dst = cv2.mixChannels([src], [self.color_cpu, self.alpha_cpu], (0, 0, 1, 1, 2, 2, 3, 3)) elif device == 'numpy': # NumPy version. dst = [src[..., 0:3], src[..., 3]] else: # GPU version. # print("GPU version not support mixChannels.") dst = [self.color_gpu, self.alpha_gpu] return dst
def __init__(self, drawingOverlay, last_gframe): # convert red pixels to yellow for the overlay self.drawingOverlay = np.zeros_like(drawingOverlay) cv2.mixChannels([drawingOverlay], [self.drawingOverlay], (0, 0, 2, 1, 2, 2)) self.shape = (drawingOverlay.shape[1], drawingOverlay.shape[0]) # Grab all pixels of overlay xs, ys, zs = np.where(drawingOverlay > 0) self.overlayPts = zip(xs, ys) # Grab bounding rect ptsMat = np.float32(map(lambda x: [x], self.overlayPts)) y, x, h, w = cv2.boundingRect(ptsMat) x1, y1 = x, y x2, y2 = x + w, y + h # Create dense optical flow tracker self.first_flow_pts = np.float32([[x1, y1], [x2, y1], [x2, y2], [x1, y2]]) self.flow_tracker = OpticalFlowHomographyTracker( last_gframe, self.first_flow_pts)
def Go31(self, src_color, src_alpha, device='cpu'): if device == 'cpu': # CPU version. dst = cv2.mixChannels([src_color, src_alpha], [self.merged_cpu], (0, 0, 1, 1, 2, 2, 3, 3))[0] elif device == 'numpy': # NumPy version. self.merged_cpu[..., 0:3] = src_color self.merged_cpu[..., 3] = src_alpha dst = self.merged_cpu else: # GPU version. # print("GPU version not support mixChannels.") dst = self.merged_gpu return dst
import cv2 as cv import numpy as np src = cv.imread("../../CPP_OpenCV/img/Lena.jpg") dst = np.zeros(src.shape, dtype=np.uint8) mv = cv.split(src) mv[0][:, :] = 0 mv[1][:, :] = 0 mv[2][:, :] = 0 merged = cv.merge(mv) cv.imshow("merged", merged) cv.mixChannels([src], [dst], [0, 0, 1, 1, 2, 2]) cv.imshow("mixChannel", dst) cv.waitKey(0)
def __getitem__(self, index_and_mode): if self.disentangled: index, sampling_mode = index_and_mode else: index = index_and_mode mat_attr = self.mat_attrs[index] #mat=self.mats[index] #geom=self.geoms[index] #illum=self.illums[index] ##### OpenCV version # read image image_rgb = cv2.cvtColor( cv2.imread( os.path.join(self.root, "renderings", self.files[index]), 1), cv2.COLOR_BGR2RGB) size = image_rgb.shape[0] # read normals normals_bgra = cv2.imread( os.path.join(self.root, "normals", self.files[index][:-3] + "png"), -1) if (type(normals_bgra) is np.ndarray): # if the normals exist, resize them and trasnform to RGB (is BGR when reading) if normals_bgra.shape[0] != size: normals_bgra = cv2.resize(normals_bgra, (size, size)) normals = np.ndarray((size, size, 4), dtype=np.uint8) cv2.mixChannels([normals_bgra], [normals], [0, 2, 1, 1, 2, 0, 3, 3]) else: # otherwise, put the normals and a full mask mask = np.ones((size, size, 1), np.uint8) * 255 normals = np.ndarray((size, size, 4), dtype=np.uint8) cv2.mixChannels([image_rgb, mask], [normals], [0, 0, 1, 1, 2, 2, 3, 3]) if self.mode == "test": #slighlty erode mask so that the results are nicer element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) normals[:, :, 3] = cv2.dilate(normals[:, :, 3], element) # add mask to image image = np.ndarray(normals.shape, dtype=np.uint8) cv2.mixChannels([image_rgb, normals], [image], [0, 0, 1, 1, 2, 2, 6, 3]) if (self.use_illum): illum = cv2.imread( os.path.join(self.root, "illum", self.files[index]), -1) if (not type(illum) is np.ndarray): illum = extract_highlights(image) illum = np.concatenate([illum, illum, illum], axis=2) #3channels? else: if illum.ndim == 3: #RGB image illum = cv2.cvtColor( illum, cv2.COLOR_BGR2RGB) #or cv2.COLOR_BGR2GRAY else: illum = illum[:, :, np.newaxis] #image is already B&W illum = np.concatenate([illum, illum, illum], axis=2) else: illum = torch.Tensor() ##### PIL version: faster but apply the alpha channel when resizing #image = Image.open(os.path.join(self.root, "renderings", self.files[index])) # try: # normals = Image.open(os.path.join(self.root, "normals", self.files[index][:-3]+"png")) # mask=get_alpha_channel(normals) # except FileNotFoundError: # #put the original image in place of the normals + full mask # normals=image # mask = Image.new('L',normals.size,255) # normals.putalpha(mask) # image.putalpha(mask) # apply the transforms if self.transform is not None: if self.use_illum: image, normals, illum = self.transform(image, normals, illum) else: image, normals = self.transform(image, normals) # mask the normals normals = normals * normals[3:] # mask the input image if asked if self.mask_input_bg: image = image * image[3:] if self.use_illum: illum = illum * image[3:] if self.disentangled: return image, normals, illum, torch.FloatTensor( mat_attr), sampling_mode else: return image, normals, self.files[index][:-4].split( "/")[-1], torch.FloatTensor(mat_attr)
import cv2 as cv src = cv.imread("./test.png") cv.namedWindow("input", cv.WINDOW_AUTOSIZE) cv.imshow("input", src) # 蓝色通道为零 mv = cv.split(src) mv[0][:, :] = 0 dst1 = cv.merge(mv) cv.imshow("output1", dst1) # 绿色通道为零 mv = cv.split(src) mv[1][:, :] = 0 dst2 = cv.merge(mv) cv.imshow("output2", dst2) # 红色通道为零 mv = cv.split(src) mv[2][:, :] = 0 dst3 = cv.merge(mv) cv.imshow("output3", dst3) cv.mixChannels(src, dst3, [2, 0]) cv.imshow("output4", dst3) cv.waitKey(0) cv.destroyAllWindows()
mv = cv.split(src) mv[0][:, :] = 0 dst1 = cv.merge(mv) cv.imshow("output1", dst1) # 绿色通道为零 mv = cv.split(src) mv[1][:, :] = 0 dst2 = cv.merge(mv) cv.imshow("output2", dst2) # 红色通道为零 mv = cv.split(src) mv[2][:, :] = 0 dst3 = cv.merge(mv) cv.imshow("output3", dst3) # cv.mixChannels(src, dst3, [2, 0]) # cv.imshow("output4", dst3) dst = np.zeros(src.shape, dtype=np.uint8) print(src.shape) print(dst.shape) cv.mixChannels([src], [dst], fromTo=[2, 0, 1, 1, 0, 2]) # 就是交换第一和第三通道 cv.imshow("output4", dst) cv.waitKey(0) cv.destroyAllWindows() """ mixChannels就是通道的交换与提取,【2,0】将输入矩阵的第三个通道数据复制到输出矩阵的第一个通道。 """
def find_squares(img): blurred = cv2.medianBlur(img, 9) height, width, depth = blurred.shape gray0 = blurred.copy() # gray0 = cv2.convertScaleAbs(cv2.cvtColor(np.empty((height, width, 1), dtype=np.uint16), cv2.COLOR_GRAY2BGR)) # gray = np.zeros((blurred.size()[0], blurred.size()[1], 1), dtype=np.uint8) squares = [] # find squares in every color plane of the image for c in range(0, 3): ch = [c, 0] cv2.mixChannels(blurred, gray0, ch) # try several threshold levels threshold_level = 8 for l in range(0, threshold_level): # Use Canny instead of zero threshold level! # Canny helps to catch squares with gradient shading if l == 0: gray = cv2.Canny(gray0, 10, 20, apertureSize=3) # Dilate helps to remove potential holes between edge segments gray = cv2.dilate(gray, np.ones((11, 11), "uint8")) else: # gray = 1 if (gray0.any() >= (l+1) * 255 / threshold_level) else 0 gray = cv2.cvtColor( cv2.threshold(gray0, int(l / float(threshold_level) * 255), 128, cv2.THRESH_BINARY_INV)[1], cv2.COLOR_BGR2GRAY, ) # Find contours and store them in a list # print gray contours = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) contours = contours[1] # Test contours for i in range(0, len(contours)): # approximate contour with accuracy proportional # to the contour perimeter arclength = cv2.arcLength(np.array(contours[i], copy=True), True) * 0.02 approx = cv2.approxPolyDP(contours[i], arclength, True) # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if ( len(approx) == 4 and abs(cv2.contourArea(np.array(approx, copy=True))) > 1000 and cv2.isContourConvex(np.array(approx, copy=True)) ): maxCosine = 0 for j in range(2, 5): cosine = abs(angle(approx[j % 4], approx[j - 2], approx[j - 1])) maxCosine = max([maxCosine, cosine]) if maxCosine > math.pi: squares.append(approx) return squares
args = parser.parse_args() src = cv.imread(args.input) if src is None: print('Could not open or find the image:', args.input) exit(0) ## [Read the image] ## [Transform it to HSV] hsv = cv.cvtColor(src, cv.COLOR_BGR2HSV) ## [Transform it to HSV] ## [Use only the Hue value] ch = (0, 0) hue = np.empty(hsv.shape, hsv.dtype) cv.mixChannels([hsv], [hue], ch) ## [Use only the Hue value] ## [Create Trackbar to enter the number of bins] window_image = 'Source image' cv.namedWindow(window_image) bins = 25 cv.createTrackbar('* Hue bins: ', window_image, bins, 180, Hist_and_Backproj ) Hist_and_Backproj(bins) ## [Create Trackbar to enter the number of bins] ## [Show the image] cv.imshow(window_image, src) cv.waitKey() ## [Show the image]
# image0[:, :, 0] = 255 # image0[:, :, 1] = 255 # image0[:, :, 2] = 255 cv2.namedWindow('image0', cv2.WINDOW_AUTOSIZE) cv2.imshow('image0', image0) # # 蓝色通道为零 mv = cv2.split(image0) mv[0][:, :] = 0 #mv[2][:, :] = 0 dst1 = cv2.merge(mv) cv2.imshow('dst1', dst1) # # 绿色通道为零 mv = cv2.split(image0) mv[1][:, :] = 0 dst2 = cv2.merge(mv) cv2.imshow('dst2', dst2) # # 红色通道为零 mv = cv2.split(image0) mv[2][:, :] = 0 dst3 = cv2.merge(mv) cv2.imshow('dst3', dst3) cv2.mixChannels([dst1], [dst3], [0, 0]) #一定注意输入和输出外层的括号 cv2.imshow('image1', dst3) cv2.waitKey(0) cv2.destroyAllWindows()
import numpy as np src = cv.imread("../data/images/cos.jpg") cv.imshow("source image", src) ## 通道分离 b, g, r = cv.split(src) cv.imshow("blue", b) cv.imshow("green", g) cv.imshow("red", r) ## 通道合并,将一个通道所有值设置为0再合并 for i in range(3): bgr = cv.split(src) bgr[i][:, :] = 0 # 将第i个通道设置为0 dst = cv.merge(bgr) cv.imshow("merge channel without {}".format(i), dst) ## mixChannels 将输入数组的指定通道复制到输出数组的指定通道 # 需要初始化输出矩阵的大小 dst = np.zeros_like(src) # 两个两个一组,0,1;1,2;2,0,src的第一个通道放到dst第二个换,src的第二个通道与放到dst第三个,src的第三个通道与放到dst第一个通道 from_to = [0, 1, 1, 2, 2, 0] # 加上[],将src和dst变成列表,否则结果不正常,参考:https://stackoverflow.com/questions/42329901/opencv-python-cv2-mixchannels cv.mixChannels([src], [dst], from_to) cv.imshow("mixChannels", dst) cv.waitKey(0) cv.destroyAllWindows()
def main(): Posiciones = [] # Guardo las posiciones iniciales de configuracion featuringRead = np.loadtxt('featuring.out', delimiter=' ') featuringRead = np.float32(featuringRead) # Apply KMeans compactness, labels, centers = cv2.kmeans( featuringRead, K=3, bestLabels=None, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 20, 0), attempts=1, flags=cv2.KMEANS_RANDOM_CENTERS) A = featuringRead[labels.ravel() == 0] B = featuringRead[labels.ravel() == 1] C = featuringRead[labels.ravel() == 2] MIN_OBJECT_AREA = 20 * 20 MAX_OBJECT_AREA = 40 * 40 Multi = cv2.imread("multi700x490.jpg", 1) Multihsv = cv2.cvtColor(Multi, cv2.COLOR_BGR2HSV) Multihue = np.zeros(Multihsv.shape, dtype=np.uint8) cv2.mixChannels([Multihsv], [Multihue], [0, 0]) rospy.init_node("Analisis_de_Color") """reset_cameras() close_camera("left") close_camera("right") close_camera("head")""" open_camera("right", 960, 600) subscribe_to_camera("right") #screen_pub = rospy.Publisher('/robot/xdisplay', Image, queue_size=10) endpoint_sub = rospy.Subscriber('/robot/limb/right/endpoint_state', EndpointState, callback=endpoint_callback) state_sub = rospy.Subscriber('robot/navigators/right_navigator/state', NavigatorState, callback=on_state) #rate = rospy.Rate(3) print 'Ahora graba posiciones' # Posiciones ########3######## ################# #2######0#######1 ################# ########4######## # while 1: if button0: Posiciones.append([x_ini, y_ini]) while button0: continue if button1 or button2: break print 'posiciones grabadas' blank_image = np.zeros((600, 960, 3), np.uint8) for i in range(20): cv2.line(blank_image, (int((960 / 20) * (i + 1)), 0), (int( (960 / 20) * (i + 1)), 600), (255, 0, 0), 5) cv2.line(blank_image, (0, int(600 / 20) * (i + 1)), (960, int((600 / 20) * (i + 1))), (255, 0, 0), 5) cv2.circle(blank_image, (int(centers[0][0]), int(centers[0][1])), 20, (0, 255, 0), -1) cv2.circle(blank_image, (int(centers[1][0]), int(centers[1][1])), 20, (0, 255, 0), -1) cv2.circle(blank_image, (int(centers[2][0]), int(centers[2][1])), 20, (0, 255, 0), -1) for i in range(len(A)): cv2.circle(blank_image, (int(A[i][0]), int(A[i][1])), 4, (0, 255, 255), -1) for i in range(len(B)): cv2.circle(blank_image, (int(B[i][0]), int(B[i][1])), 4, (0, 255, 255), -1) for i in range(len(C)): cv2.circle(blank_image, (int(C[i][0]), int(C[i][1])), 4, (0, 255, 255), -1) Muevee = Mueve('right', Posiciones) erodeElement = cv2.getStructuringElement( cv2.MORPH_RECT, (3, 3)) # selecciono el tipo de kernel while 1: Etiquetas = [] # Guardo las etiquetas de KMeans Angulo = [] Dimensiones = [] Coordenadas = [] SetFrames = [] Grays = [] if img.getImg() is None: continue while len(SetFrames) < 20: SetFrames.append(np.copy(img.getImg())) Grays.append(0) Color = img.getImg() gray = cv2.cvtColor(Color, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) gray = cv2.GaussianBlur(gray, (5, 5), 0, 0) """sobel64f = cv2.Sobel(gray,cv2.CV_64F,1,1,ksize=5) abs_sobel64f = np.absolute(sobel64f) sobel_8u = np.uint8(abs_sobel64f) sobel_8u = cv2.erode(sobel_8u,erodeElement)""" gray = cv2.Canny(gray, 100, 150, 3) """for m in range(gray.shape[:2][0]): for n in range(gray.shape[:2][1]): if gray[m][n]==0: gray[m][n]=sobel_8u[m][n]""" for i in range(len(SetFrames)): Grays[i] = cv2.cvtColor(SetFrames[i], cv2.COLOR_BGR2GRAY) Grays[i] = cv2.equalizeHist(Grays[i]) Grays[i] = cv2.GaussianBlur(Grays[i], (5, 5), 0, 0) Grays[i] = cv2.Canny(Grays[i], 100, 150, 3) for i in range(len(Grays)): gray = cv2.bitwise_or(gray, Grays[i]) cv2.imshow("gray", gray) (contours, hierarchy) = cv2.findContours(gray.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(hierarchy) > 0: index = 0 while index != -1: moment = cv2.moments(contours[index]) area = moment['m00'] if area > MIN_OBJECT_AREA and area < MAX_OBJECT_AREA: rect = cv2.minAreaRect(contours[index]) roi = crop_minAreaRect(Color, rect) if roi.shape[:2][1] > 0 and roi.shape[:2][0] > 0: hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) hue = np.zeros(hsv.shape, dtype=np.uint8) cv2.mixChannels([hsv], [hue], [0, 0]) hist = cv2.calcHist([hue], [0], None, [180], [0, 180]) cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) backproj = cv2.calcBackProject([Multihue], [0], hist, [0, 180], 1) ret, backproj = cv2.threshold(backproj, 127, 255, cv2.THRESH_BINARY) receiver = CentroMasa(backproj, Color) dist = [ math_calc_dist(receiver, centers[0]), math_calc_dist(receiver, centers[1]), math_calc_dist(receiver, centers[2]) ] min_index, min_value = min(enumerate(dist), key=operator.itemgetter(1)) #print 'centro: ',centers[min_index],' distancia: ',dist[min_index] Coordenadas.append( list([ int(moment['m10'] / area), int(moment['m01'] / area) ])) Etiquetas.append(min_index) Angulo.append(rect[2]) Dimensiones.append(rect[1]) box = cv2.cv.BoxPoints(rect) box = np.int0(box) cv2.circle(blank_image, (int(receiver[0]), int(receiver[1])), 5, (0, 0, 255), -1) cv2.drawContours(Color, [box], 0, (0, 0, 255), 2) index = hierarchy[0][index][0] cv2.imshow("Imagen Filtrada", Color) cv2.imshow("Enrejado", blank_image) #msgsub = cv_bridge.CvBridge().cv2_to_imgmsg(blank_image, encoding="8UC3") #screen_pub.publish(msgsub) Color1 = [] Color2 = [] Color3 = [] print Etiquetas for i in range(len(Coordenadas)): if Etiquetas[i] == 0: Color1.append(i) elif Etiquetas[i] == 1: Color2.append(i) else: Color3.append(i) for i in Color1: Muevee.CA(Coordenadas[i][0], Coordenadas[i][1], Etiquetas[i], Angulo[i], Dimensiones[i]) for i in Color2: Muevee.CA(Coordenadas[i][0], Coordenadas[i][1], Etiquetas[i], Angulo[i], Dimensiones[i]) for i in Color3: Muevee.CA(Coordenadas[i][0], Coordenadas[i][1], Etiquetas[i], Angulo[i], Dimensiones[i]) Muevee.randomm() Muevee.mover_baxter('base', Muevee.pose[:3], Muevee.pose[3:6]) if cv2.waitKey(1) & 0xFF == ord( 'q'): # Indicamos que al pulsar "q" el programa se cierre break #rate.sleep() cv2.destroyAllWindows()
import cv2 import numpy as np if __name__ == '__main__': roi = cv2.imread('hope.jpg') hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV) target = cv2.imread('img3.jpg') hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV) #mix Channels hue = np.empty(hsv.shape, np.float32) cv2.mixChannels(hsv.astype('float32'),hue, [0, 0]) # calculating object histogram roihist = cv2.calcHist([hue],[0, 0], None, [180, 256], [0, 180, 0, 256] ) # normalize histogram and apply backprojection cv2.normalize(roihist,roihist,0,255,cv2.NORM_MINMAX) dst = cv2.calcBackProject([hsvt],[0,0],roihist,[0,180,0,256],1) #a minsziftem term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) ret, track_window = cv2.meanShift(dst, track_window, term_crit) # Draw it on image x,y,w,h = track_window img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2) cv2.imshow('img2',img2) # Now convolute with circular disc
src = 'cartoon.jpg' input_image = cv.imread(src) if input_image is None: print('Could not load image: ', input_image) exit(0) # Splitting image into RGB channels: blue, green, red = cv.split(input_image) print(blue.shape) # We create a dummy 3D array blue_channel = np.zeros(input_image.shape, input_image.dtype) green_channel = np.zeros(input_image.shape, input_image.dtype) red_channel = np.zeros(input_image.shape, input_image.dtype) # We match each color channel to a 3D dimension: # Blue Rendering : [blue; 0; 0] # Green Rendering: [0; green; 0] # Red Rendering: [0; 0; red] cv.mixChannels([blue, green, red], [blue_channel], [0, 0]) cv.mixChannels([blue, green, red], [green_channel], [1, 1]) cv.mixChannels([blue, green, red], [red_channel], [2, 2]) cv.imshow('Blue Channel', blue_channel) cv.imshow('Green Channel', green_channel) cv.imshow('Red Channel', red_channel) cv.waitKey(0) cv.destroyAllWindows()
def get_feed(self): global a_time_to_die, feed_data, feed_width, feed_height, feed_ready scanner = zbar.ImageScanner() scanner.parse_config('enable') shift = 1 while 1: if a_time_to_die: return try: HOST = '' # The remote host PORT = 12345 # The same port as used by the server s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) s.send('Yo!') data = data = recvall(s) s.close() data = pickle.loads(data) #print 'Received img' height, width, depth = data.shape if effect == smiley_face: try: data = cv2.flip(detect_faces(cv2.flip(data, 0)),0) except: print "face fail" elif effect == target_face: try: data = cv2.flip(detect_faces_2(cv2.flip(data, 0)),0) except: print "face 2 fail" elif effect == trippy_colours: try: shift += 1 data += shift % 256 except: print "shift fail" elif effect == trippy_colours_2: try: shift += 1 for x in xrange(shift % 3): data2 = cv2.copy(data) cv2.mixChannels(data2,data, [(0,1),(1,2),(2,1)]) except: print "mario fail" elif effect == vertical_hold: try: shift += 10 M = numpy.float32([[1,0,0],[0,1,shift%height]]) data1 = cv2.warpAffine(data,M,(width,height)) M = numpy.float32([[1,0,0],[0,1,(shift%height)-height]]) data2 = cv2.warpAffine(data,M,(width,height)) data = data1 + data2 M = numpy.float32([[1,0,shift%width],[0,1,0]]) data1 = cv2.warpAffine(data,M,(width,height)) M = numpy.float32([[1,0,(shift%width)-width],[0,1,0]]) data2 = cv2.warpAffine(data,M,(width,height)) data = data1 + data2 except: print "vhold fail" elif effect == colour_band: try: shift += 5 b = height / 20 top = shift % (width - b) # band of colour data_g = cv2.cvtColor(cv2.cvtColor(data, cv2.COLOR_BGR2GRAY), cv2.CV_GRAY2BGR) data_g[0:width, top:top+b] = data[0:width, top:top+b] data = data_g # inversion data_i = 255 - data data[0:width, top:top+b] = data_i[0:width, top:top+b] except: print "vhold fail" data = cv2.cvtColor(data,cv2.COLOR_BGR2RGB) feed_data = data.tostring() feed_width = width feed_height = height feed_ready = True sleep(.001) except: s.close() print("fail") sleep(25)
src = cv.imread("../images/1.png") cv.namedWindow("input", cv.WINDOW_AUTOSIZE) cv.imshow("input", src) # 蓝色通道为零 mv = cv.split(src) mv[0][:, :] = 0 dst1 = cv.merge(mv) cv.imshow("output1", dst1) # 绿色通道为零 mv = cv.split(src) mv[1][:, :] = 0 dst2 = cv.merge(mv) cv.imshow("output2", dst2) # 红色通道为零 mv = cv.split(src) mv[2][:, :] = 0 dst3 = cv.merge(mv) cv.imshow("output3", dst3) dst = np.zeros(src.shape, dtype=np.uint8) print(src.shape) print(dst.shape) cv.mixChannels([src], [dst], fromTo=[2, 0, 1, 1, 0, 2]) cv.imshow("output4", dst) cv.waitKey(0) cv.destroyAllWindows()
# # # import cv2 src = cv2.imread("d://pics//212121.jpg") windowImage = 'inputImage' cv2.namedWindow(windowImage, cv2.WINDOW_NORMAL) cv2.namedWindow('backProject', cv2.WINDOW_NORMAL) # cv2.namedWindow('histogram', cv2.WINDOW_NORMAL); hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) nChannels = [0, 0] cv2.mixChannels(hsv, hsv, nChannels) hue = hsv bins = 12 cv2.imshow(windowImage, src) histBase = cv2.calcHist([hsv], [0], None, [bins], [0, 180]) cv2.normalize(hsv, hue, 0, 256, cv2.NORM_MINMAX, -1, None) hue = cv2.calcBackProject([hue], [0], 0, [0, 180], 1) # def histAndBackProjection(pos , tmp): # cv2.calcHist(hue,[1],None,pos,[0,180]); # cv2.normalize(hue,hue,0,1,cv2.NORM_MINMAX,-1,None); # # hue = cv2.calcBackProject([hue],1,0,[0,180],(0,255,255)); #
def find_squares(img): blurred = cv2.medianBlur(img, 9) height, width, depth = blurred.shape gray0 = blurred.copy() #gray0 = cv2.convertScaleAbs(cv2.cvtColor(np.empty((height, width, 1), dtype=np.uint16), cv2.COLOR_GRAY2BGR)) # gray = np.zeros((blurred.size()[0], blurred.size()[1], 1), dtype=np.uint8) squares = [] # find squares in every color plane of the image for c in range(0, 3): ch = [c, 0] cv2.mixChannels(blurred, gray0, ch) # try several threshold levels threshold_level = 8 for l in range(0, threshold_level): # Use Canny instead of zero threshold level! # Canny helps to catch squares with gradient shading if l == 0: gray = cv2.Canny(gray0, 10, 20, apertureSize=3) # Dilate helps to remove potential holes between edge segments gray = cv2.dilate(gray, np.ones((11, 11), 'uint8')) else: #gray = 1 if (gray0.any() >= (l+1) * 255 / threshold_level) else 0 gray = cv2.cvtColor( cv2.threshold(gray0, int(l / float(threshold_level) * 255), 128, cv2.THRESH_BINARY_INV)[1], cv2.COLOR_BGR2GRAY) # Find contours and store them in a list #print gray contours = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) contours = contours[1] # Test contours for i in range(0, len(contours)): # approximate contour with accuracy proportional # to the contour perimeter arclength = cv2.arcLength(np.array(contours[i], copy=True), True) * 0.02 approx = cv2.approxPolyDP(contours[i], arclength, True) # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if len(approx) == 4 and abs( cv2.contourArea(np.array(approx, copy=True)) ) > 1000 and cv2.isContourConvex(np.array(approx, copy=True)): maxCosine = 0 for j in range(2, 5): cosine = abs( angle(approx[j % 4], approx[j - 2], approx[j - 1])) maxCosine = max([maxCosine, cosine]) if maxCosine > math.pi: squares.append(approx) return squares