def analyzeImage(original): scaleImage = cv.cvCreateImage(cv.cvSize(int(original.width*scale), int(original.height*scale)), 8, 3) cv.cvResize(original, scaleImage) # Create 1-channel image for the egdes edgeImage = cv.cvCreateImage(cv.cvGetSize(scaleImage), 8, 1) # Retrieve edges edgeDetector.findBWEdges(scaleImage, edgeImage, edgeThreshold1, edgeThreshold2) # Get cuts cuts = lib.findGoldenMeans(cv.cvGetSize(scaleImage)) # Run along allComponents = [] for cut in cuts: cutComponents = analyzeCut(scaleImage, edgeImage, cut) allComponents.append(cutComponents) # Get the collected component_dictionaries for dict in allComponents: lib.drawBoundingBoxes(original, dict, scale) # Draw the margins for cut in cuts: lib.drawMargin(original, cut, margin, scale) #include if super margen is need to drawn #lib.drawMargin(original, cut, superMargin, scale) return (original, allComponents)
def _cv_to_pygame(self,frame,channel=-1) : # scale the image to size of the window cvt_scale = cv.cvCreateImage(cv.cvSize(self.image_dims[0],self.image_dims[1]),frame.depth,frame.nChannels) #cv.cvResize(frame,cvt_scale,cv.CV_INTER_LINEAR) cv.cvResize(frame,cvt_scale,cv.CV_INTER_NN) # need to convert the colorspace differently depending on where the image came from cvt_color = cv.cvCreateImage(cv.cvSize(cvt_scale.width,cvt_scale.height),cvt_scale.depth,3) if frame.nChannels == 3 : # frame is in BGR format, convert it to RGB so the sky isn't orange cv.cvCvtColor(cvt_scale,cvt_color,cv.CV_BGR2RGB) elif frame.nChannels == 1 : # image has only one channel, iow 1 color if channel == 0 : cv.cvMerge(frame,None,None,None,cvt_color) elif channel == 1 : cv.cvMerge(None,frame,None,None,cvt_color) elif channel == 2 : cv.cvMerge(None,None,frame,None,cvt_color) elif channel == 3 : cv.cvMerge(None,None,None,frame,cvt_color) else : cv.cvCvtColor(cvt_scale,cvt_color,cv.CV_GRAY2RGB) # create a pygame surface frame_surface=pygame.image.frombuffer(cvt_color.imageData,self.image_dims,'RGB') return frame_surface
def detect_faces(self, img_grey): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20,20) self.image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Create a small image for better performance small_size = cv.cvSize(cv.cvRound(img_grey.width/self.image_scale),cv.cvRound(img_grey.height/self.image_scale)) small_img = cv.cvCreateImage(small_size, 8, 1) cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.faces_storage) if(self.cascade): t = cv.cvGetTickCount(); faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.faces_storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t cv.cvReleaseImage(small_img) #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); return faces
def detect_faces(self, img_grey): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20, 20) self.image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 # Create a small image for better performance small_size = cv.cvSize(cv.cvRound(img_grey.width / self.image_scale), cv.cvRound(img_grey.height / self.image_scale)) small_img = cv.cvCreateImage(small_size, 8, 1) cv.cvResize(img_grey, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.faces_storage) if (self.cascade): t = cv.cvGetTickCount() faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.faces_storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t cv.cvReleaseImage(small_img) #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); return faces
def depthmatrix(leftimage, rightimage, precision=4, mask=0): """Returns a 3-channel 32bit floating-point distance matrix. Channels 1,2,3 = x,y,z coordinates of that point. Precision is the number of times to downsample mask. Downsample is the number of loops to go through with successively smaller match areas. If mask is set, only pixels in the mask are set.""" info = cv.cvGetSize(leftimage) width = info.width height = info.height precision_pixels = (2**precision) downsampled_size = cv.cvSize(width/precision_pixels, height/precision_pixels) print "Precision of", downsampled_size.width, downsampled_size.height, "px" if mask: downsampled_mask = cv.cvCreateImage(downsampled_size, 8, 1) cv.cvResize(mask, downsampled_mask) matx = cv.cvCreateImage(downsampled_size, 8, 1) maty = cv.cvCreateImage(downsampled_size, 8, 1) matz = cv.cvCreateImage(downsampled_size, 8, 1) for i in xrange(width/precision_pixels): for j in xrange(height/precision_pixels): if mask: if (not cv.cvGetReal2D(downsampled_mask, j, i)): continue x = i*precision y = j*precision depth = depthmatch(x+precision_pixels/2, y+precision_pixels/2, leftimage, rightimage, roi=precision_pixels, buf=precision_pixels*2) #print i, j # fill in result matrix if mask wasn't 0 at this point (X,Y,Z) cv.cvSetReal2D(matx, j, i, int(depth[0][0])) cv.cvSetReal2D(maty, j, i, int(depth[0][1])) cv.cvSetReal2D(matz, j, i, int(depth[0][2])) return matz
def _get_scaled_frame(self,frame=None) : if frame is None : frame = self._get_cv_frame() scale = (480,320) scaled = cv.cvCreateImage(cv.cvSize(scale[0],scale[1]),frame.depth,frame.nChannels) #cv.cvResize(frame,scaled,cv.CV_INTER_LINEAR) cv.cvResize(frame,scaled,cv.CV_INTER_NN) return scaled
def putoriginal(fname, img): ori_img = highgui.cvLoadImage (fname) ori_img_thumb = cv.cvCreateImage(cv.cvSize(ori_img.width/4, ori_img.height/4), 8,3) cv.cvResize(ori_img, ori_img_thumb) for x in range(ori_img_thumb.height): for y in range(ori_img_thumb.width): cv.cvSet2D(img, x, y, cv.cvGet2D(ori_img_thumb, x, y)) return
def putoriginal(fname, img): ori_img = highgui.cvLoadImage(fname) ori_img_thumb = cv.cvCreateImage( cv.cvSize(ori_img.width / 4, ori_img.height / 4), 8, 3) cv.cvResize(ori_img, ori_img_thumb) for x in range(ori_img_thumb.height): for y in range(ori_img_thumb.width): cv.cvSet2D(img, x, y, cv.cvGet2D(ori_img_thumb, x, y)) return
def _detect(image): """ Detects faces on `image` Parameters: @image: image file path Returns: [((x1, y1), (x2, y2)), ...] List of coordenates for top-left and bottom-right corner """ # the OpenCV API says this function is obsolete, but we can't # cast the output of cvLoad to a HaarClassifierCascade, so use # this anyways the size parameter is ignored capture = cvCreateFileCapture(image) if not capture: return [] frame = cvQueryFrame(capture) if not frame: return [] img = cvCreateImage(cvSize(frame.width, frame.height), IPL_DEPTH_8U, frame.nChannels) cvCopy(frame, img) # allocate temporary images gray = cvCreateImage((img.width, img.height), COPY_DEPTH, COPY_CHANNELS) width, height = (cvRound(img.width / IMAGE_SCALE), cvRound(img.height / IMAGE_SCALE)) small_img = cvCreateImage((width, height), COPY_DEPTH, COPY_CHANNELS) # convert color input image to grayscale cvCvtColor(img, gray, CV_BGR2GRAY) # scale input image for faster processing cvResize(gray, small_img, CV_INTER_LINEAR) cvEqualizeHist(small_img, small_img) cvClearMemStorage(STORAGE) coords = [] for haar_file in CASCADES: cascade = cvLoadHaarClassifierCascade(haar_file, cvSize(1, 1)) if cascade: faces = cvHaarDetectObjects(small_img, cascade, STORAGE, HAAR_SCALE, MIN_NEIGHBORS, HAAR_FLAGS, MIN_SIZE) or [] for face_rect in faces: # the input to cvHaarDetectObjects was resized, so scale the # bounding box of each face and convert it to two CvPoints x, y = face_rect.x, face_rect.y pt1 = (int(x * IMAGE_SCALE), int(y * IMAGE_SCALE)) pt2 = (int((x + face_rect.width) * IMAGE_SCALE), int((y + face_rect.height) * IMAGE_SCALE)) coords.append((pt1, pt2)) return coords
def resizeImage(self, image_location, ouput_location, size): """ resizes the image to a rectangle with the given size and saves it """ width = size height = size input_image = highgui.cvLoadImage(image_location, 1) # flag: >0 the loaded image is forced to be a 3-channel color image output_image = cv.cvCreateImage(cv.cvSize(cv.cvRound(width), cv.cvRound(height)), 8, 3); cv.cvResize(input_image, output_image, cv.CV_INTER_LINEAR); highgui.cvSaveImage(ouput_location, output_image) # save the image to file
def read(self): """Capture the current frame from OpenCV, returns a cvMat object""" # Capture the current frame frame = highgui.cvQueryFrame(self._capture) # Do we need to scale the captures? if self.scale: scaled_frame = cv.cvCreateImage(self.cv_capture_dims, frame.depth, frame.nChannels) cv.cvResize(frame, scaled_frame, self.interplation_method) frame = scaled_frame return frame
def resize(self, width, height): """ Image resizing function. Arguments: - self: The main object pointer. - width: The new image width. - height: The new image height. """ tmp = cv.cvCreateImage( cv.cvSize( width, height ), 8, 3 ) cv.cvResize( self.__image, tmp, cv.CV_INTER_AREA ) return tmp
def get_eye(self): eyes = False face = self.cap.get_area(commons.haar_cds['Face']) if face: cvtile = cv.cvCreateMat(128,128,cv.CV_8UC3) bwtile = cv.cvCreateMat(128,128,cv.CV_8U) areas = [ (pt[1].x - pt[0].x)*(pt[1].y - pt[0].y) for pt in face ] startF = face[areas.index(max(areas))][0] endF = face[areas.index(max(areas))][1] facerect = self.cap.rect(startF.x, startF.y, endF.x - startF.x, endF.y - startF.y) if not facerect: return cv.cvResize(facerect, cvtile) cv.cvCvtColor( cvtile, bwtile, cv.CV_BGR2GRAY ) leye,reye,lcp,rcp = self.fel.locateEyes(bwtile) leye = pv.Point(leye) reye = pv.Point(reye) leye_x = int((float(leye.X())*facerect.width/cvtile.width) + startF.x) leye_y = int((float(leye.Y())*facerect.height/cvtile.height) + startF.y) reye_x = int((float(reye.X())*facerect.width/cvtile.width) + startF.x) reye_y = int((float(reye.Y())*facerect.height/cvtile.height) + startF.y) eye_rect = { "startX" : leye_x - 5, "startY" : leye_y - 5, "endX" : leye_x + 5, "endY" : leye_y + 5} #self.cap.image(self.cap.rect(leye_x - 5, leye_y - 5, 20, 20)) if not hasattr(self.cap, "leye"): self.cap.add( Point("point", "leye", [int(leye_x), int(leye_y)], parent=self.cap, follow=True) ) else: self.cap.add( Point("point", "reye", [int(reye_x), int(reye_y)], parent=self.cap, follow=True) ) # Shows the face rectangle #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), parent=self.cap) ) self.foreheadOrig = None return False
def __normImage(self, img, length): #print "Generating norm image..." width = length height = length gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1); small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(width), cv.cvRound(height)), 8, 1 ); # convert color input image to grayscale cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY); # scale input image for faster processing cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR); cv.cvEqualizeHist(small_img, small_img); #cvClearMemStorage(self.storage); norm_image = small_img # save the 'normalized image' return norm_image
def __findedge(self, filename): tmpimg = highgui.cvLoadImage (filename) self.img = cv.cvCreateImage(cv.cvSize(int(tmpimg.width * self.enlarge), int(tmpimg.height * self.enlarge)), 8, 3) cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR) if (self.drawimage): self.drawimg = cv.cvCloneImage(self.img) else: self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8,1) cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY) self.allcurve = [] for i in range(80, 200, 20): bimg = cv.cvCloneImage(greyimg) cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY) self.__findcurve(bimg)
def __findedge(self, filename): tmpimg = highgui.cvLoadImage(filename) self.img = cv.cvCreateImage( cv.cvSize(int(tmpimg.width * self.enlarge), int(tmpimg.height * self.enlarge)), 8, 3) cv.cvResize(tmpimg, self.img, cv.CV_INTER_LINEAR) if (self.drawimage): self.drawimg = cv.cvCloneImage(self.img) else: self.drawimg = cv.cvCreateImage(cv.cvGetSize(self.img), 8, 3) greyimg = cv.cvCreateImage(cv.cvSize(self.img.width, self.img.height), 8, 1) cv.cvCvtColor(self.img, greyimg, cv.CV_BGR2GRAY) self.allcurve = [] for i in range(80, 200, 20): bimg = cv.cvCloneImage(greyimg) cv.cvSmooth(bimg, bimg, cv.CV_MEDIAN, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BILATERAL, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) # cv.cvSmooth(bimg, bimg, cv.CV_BLUR, 9) cv.cvThreshold(greyimg, bimg, i, 255, cv.CV_THRESH_BINARY) self.__findcurve(bimg)
def detect_face(self, img): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20,20) image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.cvCreateImage(cv.cvSize(img.width,img.height), 8, 1) small_img = cv.cvCreateImage(cv.cvSize(cv.cvRound(img.width/image_scale), cv.cvRound(img.height/image_scale)), 8, 1) cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.storage) if(self.cascade): t = cv.cvGetTickCount(); faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); if faces: for r in faces: pt1 = cv.cvPoint(int(r.x*image_scale), int(r.y*image_scale)) pt2 = cv.cvPoint(int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale)) cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) return img
def detect_face(self, img): """ Detect faces within an image, then draw around them. The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned for accurate yet slow object detection. For a faster operation on real video images the settings are: scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING, min_size=<minimum possible face size """ min_size = cv.cvSize(20, 20) image_scale = 1.3 haar_scale = 1.2 min_neighbors = 2 haar_flags = 0 gray = cv.cvCreateImage(cv.cvSize(img.width, img.height), 8, 1) small_img = cv.cvCreateImage( cv.cvSize(cv.cvRound(img.width / image_scale), cv.cvRound(img.height / image_scale)), 8, 1) cv.cvCvtColor(img, gray, cv.CV_BGR2GRAY) cv.cvResize(gray, small_img, cv.CV_INTER_LINEAR) cv.cvEqualizeHist(small_img, small_img) cv.cvClearMemStorage(self.storage) if (self.cascade): t = cv.cvGetTickCount() faces = cv.cvHaarDetectObjects(small_img, self.cascade, self.storage, haar_scale, min_neighbors, haar_flags, min_size) t = cv.cvGetTickCount() - t #print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.)); if faces: for r in faces: pt1 = cv.cvPoint(int(r.x * image_scale), int(r.y * image_scale)) pt2 = cv.cvPoint(int((r.x + r.width) * image_scale), int((r.y + r.height) * image_scale)) cv.cvRectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) return img
def read(self): frame=self.input.read() scaled_frame=cv.cvCreateImage(cv.cvSize(self.new_size[0],self.new_size[1]),frame.depth,frame.nChannels) cv.cvResize(frame,scaled_frame,self.interplation_method) return scaled_frame
def get_eye(self): eyes = False face = self.cap.get_area(commons.haar_cds['Face']) if face: cvtile = cv.cvCreateMat(128, 128, cv.CV_8UC3) bwtile = cv.cvCreateMat(128, 128, cv.CV_8U) areas = [(pt[1].x - pt[0].x) * (pt[1].y - pt[0].y) for pt in face] startF = face[areas.index(max(areas))][0] endF = face[areas.index(max(areas))][1] facerect = self.cap.rect(startF.x, startF.y, endF.x - startF.x, endF.y - startF.y) if not facerect: return cv.cvResize(facerect, cvtile) cv.cvCvtColor(cvtile, bwtile, cv.CV_BGR2GRAY) leye, reye, lcp, rcp = self.fel.locateEyes(bwtile) leye = pv.Point(leye) reye = pv.Point(reye) leye_x = int((float(leye.X()) * facerect.width / cvtile.width) + startF.x) leye_y = int((float(leye.Y()) * facerect.height / cvtile.height) + startF.y) reye_x = int((float(reye.X()) * facerect.width / cvtile.width) + startF.x) reye_y = int((float(reye.Y()) * facerect.height / cvtile.height) + startF.y) eye_rect = { "startX": leye_x - 5, "startY": leye_y - 5, "endX": leye_x + 5, "endY": leye_y + 5 } #self.cap.image(self.cap.rect(leye_x - 5, leye_y - 5, 20, 20)) if not hasattr(self.cap, "leye"): self.cap.add( Point("point", "leye", [int(leye_x), int(leye_y)], parent=self.cap, follow=True)) else: self.cap.add( Point("point", "reye", [int(reye_x), int(reye_y)], parent=self.cap, follow=True)) # Shows the face rectangle #self.cap.add( Graphic("rect", "Face", ( startF.x, startF.y ), (endF.x, endF.y), parent=self.cap) ) self.foreheadOrig = None return False
def opencv_scale(filename, width, height): im = highgui.cvLoadImage(filename) newim = cv.cvCreateImage(cv.cvSize(width, height), 8, 3) cv.cvResize(im, newim, cv.CV_INTER_AREA) highgui.cvSaveImage("outcv.jpg", newim)
def blob_identification(binary_image): from opencv.highgui import cvSaveImage, cvLoadImageM from opencv.cv import cvCreateImage, cvGetSize, cvCreateMat, cvSet, CV_RGB, cvResize from Blob import CBlob from BlobResult import CBlobResult from classification import classification from os import chdir, environ path = environ.get("HOME") frame_size = cvGetSize(binary_image) blo = cvCreateImage(frame_size, 8, 1) resblo = cvCreateMat(240, 320, binary_image.type) mask = cvCreateImage(frame_size, 8, 1) cvSet(mask, 255) myblobs = CBlobResult(binary_image, mask, 0, True) myblobs.filter_blobs(325, 2000) blob_count = myblobs.GetNumBlobs() count = 0 pixr = [] pixrm = [] for i in range(blob_count): value = [] rowval = [] colval = [] cvSet(blo, 0) my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blo, CV_RGB(255, 0, 255), 0, 0) cvSet(resblo, 0) cvResize(blo, resblo, 1) for rowitem in range(resblo.rows): for colitem in range(resblo.cols): if resblo[rowitem, colitem] != 0: rowval.append(rowitem) colval.append(colitem) value.append(resblo[rowitem, colitem]) pixr.append(rowval[0]) pixrm.append(rowval[-1]) rowmin = min(rowval) rowedit = [] for item in rowval: rowedit.append(item - rowmin) coledit = [] colmin = min(colval) for item in colval: coledit.append(int(item) - colmin) rowmax = max(rowedit) colmax = max(colval) - colmin moved = cvCreateMat(rowmax + 10, colmax + 10, blo.type) cvSet(moved, 0) for i in range(len(rowval)): moved[int(rowedit[i]) + 5, int(coledit[i]) + 5] = int(value[i]) chdir(path + "/alpr/latest/blobs") cvSaveImage("pic" + str(count) + ".png", moved) count += 1 avoid = classification(pixr, pixrm) blob_image = cvCreateImage(frame_size, 8, 1) cvSet(blob_image, 0) for i in range(blob_count): if i not in avoid: my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blob_image, CV_RGB(255, 0, 255), 0, 0) cvSaveImage("blob.jpg", blob_image) return
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width,frame.height),cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width,img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255,0,0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255,255,0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle(img_orig, cv.cvPoint(cv.cvRound(circle[0]),cv.cvRound(circle[1])),cv.cvRound(circle[2]),cv.CV_RGB(0,0,255),3,8,0) # Draw squares if squares: i = 0 while i<squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i+1]) pt.append(squares[i+2]) pt.append(squares[i+3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0,255,0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(),self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
highgui.cvSetMouseCallback("depthmatch - left", mousecb) highgui.cvCreateTrackbar("ROI", "depthmatch - left", variable_roi, size.width, cb_roi) highgui.cvCreateTrackbar("Buffer", "depthmatch - left", variable_buf, size.width, cb_buf) highgui.cvCreateTrackbar("Focal Length", "depthmatch - left", variable_focal, 1000, cb_focal) highgui.cvCreateTrackbar("Baseline/10", "depthmatch - left", variable_base, 1000, cb_base) leftdraw = cv.cvCreateImage(size, 8, 3) rightdraw = cv.cvCreateImage(size, 8, 3) while 1: depth = depthmatch(xmatch, ymatch, left, right, roi=variable_roi, buf=variable_buf,baseline=variable_base, focal_length=variable_focal) cv.cvCopy(left, leftdraw) cv.cvCopy(right, rightdraw) cv.cvLine(leftdraw, depth[1], depth[2], (0,255,0), 2) cv.cvPutText(leftdraw, "%2f(m) at (%2f,%2f)" % (depth[0][2],depth[0][0],depth[0][1]), (xmatch,ymatch), font, (0,0,255)) cv.cvLine(rightdraw, depth[2], depth[2], (0,0,255), 5) highgui.cvShowImage("depthmatch - left", leftdraw) highgui.cvShowImage("depthmatch - right", rightdraw) print depth highgui.cvWaitKey(10) if __name__ == "__main__" and test_number == 2: left = highgui.cvLoadImage(str(sys.argv[1])) right = highgui.cvLoadImage(str(sys.argv[2])) highgui.cvNamedWindow("Depth") depth = depthmatrix(left, right, 4) depth_full = cv.cvCreateImage(cv.cvGetSize(left), 8, 1) cv.cvResize(depth, depth_full) highgui.cvShowImage("Depth", depth_full) while 1: highgui.cvWaitKey(10)
def timerEvent(self, ev): # Fetch a frame from the video camera frame = highgui.cvQueryFrame(self.cap) img_orig = cv.cvCreateImage(cv.cvSize(frame.width, frame.height), cv.IPL_DEPTH_8U, frame.nChannels) if (frame.origin == cv.IPL_ORIGIN_TL): cv.cvCopy(frame, img_orig) else: cv.cvFlip(frame, img_orig, 0) # Create a grey frame to clarify data img_grey = cv.cvCreateImage(cv.cvSize(img_orig.width, img_orig.height), 8, 1) cv.cvCvtColor(img_orig, img_grey, cv.CV_BGR2GRAY) # Detect objects within the frame self.faces_storage = cv.cvCreateMemStorage(0) faces = self.detect_faces(img_grey) self.circles_storage = cv.cvCreateMemStorage(0) circles = self.detect_circles(img_grey) self.squares_storage = cv.cvCreateMemStorage(0) squares = self.detect_squares(img_grey, img_orig) self.lines_storage = cv.cvCreateMemStorage(0) lines = self.detect_lines(img_grey, img_orig) # Draw faces if faces: for face in faces: pt1, pt2 = self.face_points(face) cv.cvRectangle(img_orig, pt1, pt2, cv.CV_RGB(255, 0, 0), 3, 8, 0) # Draw lines if lines: for line in lines: cv.cvLine(img_orig, line[0], line[1], cv.CV_RGB(255, 255, 0), 3, 8) # Draw circles if circles: for circle in circles: cv.cvCircle( img_orig, cv.cvPoint(cv.cvRound(circle[0]), cv.cvRound(circle[1])), cv.cvRound(circle[2]), cv.CV_RGB(0, 0, 255), 3, 8, 0) # Draw squares if squares: i = 0 while i < squares.total: pt = [] # read 4 vertices pt.append(squares[i]) pt.append(squares[i + 1]) pt.append(squares[i + 2]) pt.append(squares[i + 3]) ## draw the square as a closed polyline cv.cvPolyLine(img_orig, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv.CV_AA, 0) i += 4 # Resize the image to display properly within the window # CV_INTER_NN - nearest-neigbor interpolation, # CV_INTER_LINEAR - bilinear interpolation (used by default) # CV_INTER_AREA - resampling using pixel area relation. (preferred for image decimation) # CV_INTER_CUBIC - bicubic interpolation. img_display = cv.cvCreateImage(cv.cvSize(self.width(), self.height()), 8, 3) cv.cvResize(img_orig, img_display, cv.CV_INTER_NN) img_pil = adaptors.Ipl2PIL(img_display) s = StringIO() img_pil.save(s, "PNG") s.seek(0) q_img = QImage() q_img.loadFromData(s.read()) bitBlt(self, 0, 0, q_img)
def compute_saliency(image): global thresh global scale saliency_scale = int(math.pow(2,scale)); bw_im1 = cv.cvCreateImage(cv.cvGetSize(image), cv.IPL_DEPTH_8U,1) cv.cvCvtColor(image, bw_im1, cv.CV_BGR2GRAY) bw_im = cv.cvCreateImage(cv.cvSize(saliency_scale,saliency_scale), cv.IPL_DEPTH_8U,1) cv.cvResize(bw_im1, bw_im) highgui.cvShowImage("BW", bw_im) realInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); imaginaryInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 1); complexInput = cv.cvCreateImage( cv.cvGetSize(bw_im), cv.IPL_DEPTH_32F, 2); cv.cvScale(bw_im, realInput, 1.0, 0.0); cv.cvZero(imaginaryInput); cv.cvMerge(realInput, imaginaryInput, None, None, complexInput); dft_M = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.height - 1 ); dft_N = saliency_scale #cv.cvGetOptimalDFTSize( bw_im.width - 1 ); dft_A = cv.cvCreateMat( dft_M, dft_N, cv.CV_32FC2 ); image_Re = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Im = cv.cvCreateImage( cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); # copy A to dft_A and pad dft_A with zeros tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( complexInput, tmp, None ); if(dft_A.width > bw_im.width): tmp = cv.cvGetSubRect( dft_A, cv.cvRect(bw_im.width,0, dft_N - bw_im.width, bw_im.height)); cv.cvZero( tmp ); cv.cvDFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height ); cv.cvSplit( dft_A, image_Re, image_Im, None, None ); # Compute the phase angle image_Mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); image_Phase = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); #compute the phase of the spectrum cv.cvCartToPolar(image_Re, image_Im, image_Mag, image_Phase, 0) log_mag = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); cv.cvLog(image_Mag, log_mag) #Box filter the magnitude, then take the difference image_Mag_Filt = cv.cvCreateImage(cv.cvSize(dft_N, dft_M), cv.IPL_DEPTH_32F, 1); filt = cv.cvCreateMat(3,3, cv.CV_32FC1); cv.cvSet(filt,cv.cvScalarAll(-1.0/9.0)) cv.cvFilter2D(log_mag, image_Mag_Filt, filt, cv.cvPoint(-1,-1)) cv.cvAdd(log_mag, image_Mag_Filt, log_mag, None) cv.cvExp(log_mag, log_mag) cv.cvPolarToCart(log_mag, image_Phase, image_Re, image_Im,0); cv.cvMerge(image_Re, image_Im, None, None, dft_A) cv.cvDFT( dft_A, dft_A, cv.CV_DXT_INVERSE, complexInput.height) tmp = cv.cvGetSubRect( dft_A, cv.cvRect(0,0, bw_im.width, bw_im.height)); cv.cvCopy( tmp, complexInput, None ); cv.cvSplit(complexInput, realInput, imaginaryInput, None, None) min, max = cv.cvMinMaxLoc(realInput); #cv.cvScale(realInput, realInput, 1.0/(max-min), 1.0*(-min)/(max-min)); cv.cvSmooth(realInput, realInput); threshold = thresh/100.0*cv.cvAvg(realInput)[0] cv.cvThreshold(realInput, realInput, threshold, 1.0, cv.CV_THRESH_BINARY) tmp_img = cv.cvCreateImage(cv.cvGetSize(bw_im1),cv.IPL_DEPTH_32F, 1) cv.cvResize(realInput,tmp_img) cv.cvScale(tmp_img, bw_im1, 255,0) return bw_im1
from opencv import highgui, cv import math mask = highgui.cvLoadImage(str(sys.argv[1])) cap = highgui.cvCreateCameraCapture(1) IMGW = 640 IMGH = 400 highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_WIDTH, IMGW) highgui.cvSetCaptureProperty(cap, highgui.CV_CAP_PROP_FRAME_HEIGHT, IMGH) tmp = highgui.cvQueryFrame(cap) # resize mask size = cv.cvGetSize(tmp) mask_r = cv.cvCreateImage(size, 8, 3) cv.cvResize(mask, mask_r) mask_bw = cv.cvCreateImage(size, 8, 1) cv.cvCvtColor(mask_r, mask_bw, cv.CV_RGB2GRAY) total_pixels = size.width * size.height sample_pixels = 0.0 for x in xrange(size.width): for y in xrange(size.height): if cv.cvGetReal2D(mask_bw, y, x) > 0: sample_pixels = sample_pixels + 1 print "Sample region: %f%%" % (100 * sample_pixels / total_pixels) del (tmp) h_bins = 20 h_limit = 180 s_bins = 32
def blob_identification(binary_image): from opencv.highgui import cvSaveImage,cvLoadImageM from opencv.cv import cvCreateImage,cvGetSize,cvCreateMat,cvSet,CV_RGB,cvResize from Blob import CBlob from BlobResult import CBlobResult from classification import classification from os import chdir,environ path = environ.get("HOME") frame_size = cvGetSize (binary_image) blo = cvCreateImage(frame_size, 8, 1) resblo=cvCreateMat(240,320,binary_image.type) mask = cvCreateImage (frame_size, 8, 1) cvSet(mask, 255) myblobs = CBlobResult(binary_image, mask, 0, True) myblobs.filter_blobs(325,2000) blob_count = myblobs.GetNumBlobs() count=0 pixr=[] pixrm=[] for i in range(blob_count): value=[] rowval=[] colval=[] cvSet(blo,0) my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blo,CV_RGB(255,0,255),0,0) cvSet(resblo,0) cvResize(blo,resblo,1) for rowitem in range(resblo.rows): for colitem in range(resblo.cols): if resblo[rowitem,colitem]!=0: rowval.append(rowitem) colval.append(colitem) value.append(resblo[rowitem,colitem]) pixr.append(rowval[0]) pixrm.append(rowval[-1]) rowmin=min(rowval) rowedit=[] for item in rowval: rowedit.append(item-rowmin) coledit=[] colmin=min(colval) for item in colval: coledit.append(int(item)-colmin) rowmax=max(rowedit) colmax=max(colval)-colmin moved=cvCreateMat(rowmax+10,colmax+10,blo.type) cvSet(moved,0) for i in range(len(rowval)): moved[int(rowedit[i])+5,int(coledit[i])+5]=int(value[i]) chdir(path+"/alpr/latest/blobs") cvSaveImage("pic"+ str(count)+".png",moved) count+=1 avoid=classification(pixr,pixrm) blob_image = cvCreateImage(frame_size, 8, 1) cvSet(blob_image,0) for i in range(blob_count): if i not in avoid: my_enum_blob = myblobs.GetBlob(i) my_enum_blob.FillBlob(blob_image,CV_RGB(255,0,255),0,0) cvSaveImage("blob.jpg",blob_image) return