def url_jpg_contours(url): position = 100 filedata = urllib2.urlopen(url).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) col_edge = cv.CreateImage((im.width, im.height), 8, 3) # convert to grayscale gray_im = cv.CreateImage((im.width, im.height), 8, 1) edge_im = cv.CreateImage((im.width, im.height), 8, 1) cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY) cv.Canny(gray_im, edge_im, position, position * 3, 3) cv.SetZero(col_edge) # copy edge points cv.Copy(im, col_edge, edge_im) edge_im_array = np.asarray(edge_im[:]) ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(edge_im_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) scale = 10000.0 points = [] for contour in contours: for i in contour: for j in i: lng_offset = j[0] / scale lat_offset = j[1] / scale points.append([lng_offset, lat_offset]) return points
def load_sample(name=None): if len(argv) > 1: img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR) elif name is not None: try: img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR) except IOError: urlbase = 'https://raw.github.com/Itseez/opencv/master/samples/c/' file = name.split('/')[-1] filedata = urllib2.urlopen(urlbase + file).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) return img0
def load_sample(name=None): if len(argv) > 1: img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR) elif name is not None: try: img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR) except IOError: urlbase = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/' file = name.split('/')[-1] filedata = urllib2.urlopen(urlbase + file).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) return img0
def mp_icon(filename): '''load an icon from the data directory''' # we have to jump through a lot of hoops to get an OpenCV image # when we may be in a package zip file try: import pkg_resources raw = pkg_resources.resource_stream(__name__, "data/%s" % filename).read() except Exception: raw = open(os.path.join(__file__, 'data', filename)).read() imagefiledata = cv.CreateMatHeader(1, len(raw), cv.CV_8UC1) cv.SetData(imagefiledata, raw, len(raw)) img = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) return img
def get_sample(self, filename, iscolor=cv.CV_LOAD_IMAGE_COLOR): if not filename in self.image_cache: filedata = None if OpenCVTests.repoPath is not None: candidate = OpenCVTests.repoPath + '/' + filename if os.path.isfile(candidate): with open(candidate, 'rb') as f: filedata = f.read() if filedata is None: filedata = urllib.urlopen(OpenCVTests.repoUrl + '/' + filename).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) self.image_cache[filename] = cv.DecodeImageM( imagefiledata, iscolor) return self.image_cache[filename]
def convert_tif_to_png(self, buffer): if not cv: msg = """[PILEngine] convert_tif_to_png failed: opencv not imported""" logger.error(msg) return buffer # can not use cv2 here, because ubuntu precise shipped with python-opencv 2.3 which has bug with imencode # requires 3rd parameter buf which could not be created in python. Could be replaced with these lines: # img = cv2.imdecode(numpy.fromstring(buffer, dtype='uint16'), -1) # buffer = cv2.imencode('.png', img)[1].tostring() mat_data = cv.CreateMatHeader(1, len(buffer), cv.CV_8UC1) cv.SetData(mat_data, buffer, len(buffer)) img = cv.DecodeImage(mat_data, -1) buffer = cv.EncodeImage(".png", img).tostring() mime = self.get_mimetype(buffer) self.extension = EXTENSION.get(mime, '.jpg') return buffer
def compute(self): print 'SpecificWorker.compute...' try: images = self.rgbdbus_proxy.getImages(["default"]) if len(images) > 0: image = images["default"] #nparr = np.fromstring(image.colorImage, np.uint8).reshape( image.camera.colorHeight, image.camera.colorWidth, 3 ) #img = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR) header = cv.CreateMatHeader(image.camera.colorHeight, image.camera.colorWidth, cv.CV_8UC3) cv.SetData(header, image.colorImage, cv.CV_AUTOSTEP) cv2.imshow("img", np.asarray(header)) else: print "Camera not found in server" except Ice.Exception, e: traceback.print_exc() print e
def __showPic(self): try: import cv2.cv as cv except: return "Need OpenCV" content = urllib2.urlopen( "http://www.renren.com/validateuser.do").read() place = content.find("http://icode.renren.com/getcode.do?t=ninki&rnd=") place2 = content.find("\"/>", place) content = content[place:place2] filedata = urllib2.urlopen(content).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) cv.ShowImage("captchas", img0) cv.WaitKey() captchas = raw_input("Please enter the captchas:") cv.DestroyAllWindows() postdata = urllib.urlencode({"id": self.__id, "icode": captchas}) req = urllib2.Request(url="http://www.renren.com/validateuser.do", data=postdata) content = urllib2.urlopen(req).read()
if __name__ == "__main__": cv.NamedWindow("win") if len(sys.argv) > 1: filename = sys.argv[1] im = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) im3 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_COLOR) else: try: # try opening local copy of image fileName = '../cpp/left01.jpg' im = cv.LoadImageM(fileName, False) im3 = cv.LoadImageM(fileName, True) except: # if local copy cannot be opened, try downloading it url = 'https://raw.github.com/opencv/opencv/master/samples/cpp/left01.jpg' filedata = urllib2.urlopen(url).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) im3 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) chessboard_dim = (9, 6) found_all, corners = cv.FindChessboardCorners(im, chessboard_dim) print found_all, len(corners) cv.DrawChessboardCorners(im3, chessboard_dim, corners, found_all) cv.ShowImage("win", im3) cv.WaitKey() cv.DestroyAllWindows()
def extract_features(filename, is_url=False): '''Extracts features to be used in text image classifier. :param filename: input image :param is_url: is input image a url or a file path on disk :return: tuple of features: (average_slope, median_slope, average_tilt, median_tilt, median_differences, average_differences, nr_straight_lines) Most relevant ones are average_slope, average_differences and nr_straight_lines. ''' if is_url: filedata = urllib2.urlopen(filename).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) cv.SetData(imagefiledata, filedata, len(filedata)) src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE) else: src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE) # normalize size normalized_size = 400 # smaller dimension will be 400, longer dimension will be proportional orig_size = cv.GetSize(src) max_dim_idx = max(enumerate(orig_size), key=lambda l: l[1])[0] min_dim_idx = [idx for idx in [0, 1] if idx != max_dim_idx][0] new_size = [0, 0] new_size[min_dim_idx] = normalized_size new_size[max_dim_idx] = int( float(orig_size[max_dim_idx]) / orig_size[min_dim_idx] * normalized_size) dst = cv.CreateImage(new_size, 8, 1) cv.Resize(src, dst) # cv.SaveImage("/tmp/resized.jpg",dst) src = dst dst = cv.CreateImage(cv.GetSize(src), 8, 1) color_dst = cv.CreateImage(cv.GetSize(src), 8, 3) storage = cv.CreateMemStorage(0) cv.Canny(src, dst, 50, 200, 3) cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR) slopes = [] # difference between xs or ys - variant of slope tilts = [] # x coordinates of horizontal lines horizontals = [] # y coordinates of vertical lines verticals = [] if USE_STANDARD: coords = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 50, 50, 10) lines = [] for coord in coords: (rho, theta) = coord a = cos(theta) b = sin(theta) x0 = a * rho y0 = b * rho pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a))) pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a))) lines += [(pt1, pt2)] else: lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10) # eliminate duplicates - there are many especially with the standard version # first round the coordinates to integers divisible with 5 (to eliminate different but really close ones) # TODO # lines = list(set(map(lambda l: tuple([int(p) - int(p)%5 for p in l]), lines))) nr_straight_lines = 0 for line in lines: (pt1, pt2) = line # compute slope, rotate the line so that the slope is smallest # (slope is either delta x/ delta y or the reverse) # add smoothing term in denominator in case of 0 slope = min( abs(pt1[1] - pt2[1]), (abs(pt1[0] - pt2[0]))) / (max(abs(pt1[1] - pt2[1]), (abs(pt1[0] - pt2[0]))) + 0.01) # if slope < 0.1: # if slope < 5: if slope < 0.05: if abs(pt1[0] - pt2[0]) < abs(pt1[1] - pt2[1]): # means it's a horizontal line horizontals.append(pt1[0]) else: verticals.append(pt1[1]) if slope < 0.05: # if slope < 5: # if slope < 0.1: nr_straight_lines += 1 slopes.append(slope) tilts.append(min(abs(pt1[1] - pt2[1]), (abs(pt1[0] - pt2[0])))) # print slope average_slope = sum(slopes) / float(len(slopes)) median_slope = npmedian(nparray(slopes)) average_tilt = sum(tilts) / float(len(tilts)) median_tilt = npmedian(nparray(tilts)) differences = [] horizontals = sorted(horizontals) verticals = sorted(verticals) print "x_differences:" for (i, x) in enumerate(horizontals): if i > 0: # print abs(horizontals[i] - horizontals[i-1]) differences.append(abs(horizontals[i] - horizontals[i - 1])) print "y_differences:" for (i, y) in enumerate(verticals): if i > 0: # print abs(verticals[i] - verticals[i-1]) differences.append(abs(verticals[i] - verticals[i - 1])) print filename print "average_slope:", average_slope print "median_slope:", median_slope print "average_tilt:", average_tilt print "median_tilt:", median_tilt median_differences = npmedian(nparray(differences)) print "median_differences:", median_differences if not differences: # big random number for average difference average_differences = 50 else: average_differences = sum(differences) / float(len(differences)) print "average_differences:", average_differences print "nr_lines:", nr_straight_lines # print "sorted xs:", sorted(lines) return (average_slope, median_slope, average_tilt, median_tilt, median_differences, average_differences, nr_straight_lines)
def url_jpg_contours(): url = 'http://i12.tietuku.com/05ef0b29030fa46c.jpg' filedata = urllib2.urlopen(url).read() imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1) print imagefiledata #<cvmat(type=42424000 8UC1 rows=1 cols=48230 step=48230 )> cv.SetData(imagefiledata, filedata, len(filedata)) im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR) col_edge = cv.CreateImage((im.width, im.height), 8, 3) # convert to grayscale gray_im = cv.CreateImage((im.width, im.height), 8, 1) edge_im = cv.CreateImage((im.width, im.height), 8, 1) cv.CvtColor(im, gray_im, cv.CV_BGR2GRAY) cv.Canny(gray_im, edge_im, position, position * 3, 3) cv.SetZero(col_edge) # copy edge points cv.Copy(im, col_edge, edge_im) #ret, edge_jpg = cv2.imencode('.jpg', edge_im, [int(cv.CV_IMWRITE_JPEG_QUALITY), 80]) edge_im_array = np.asarray(edge_im[:]) print type(edge_im_array) #edge_jpg_gray = cv2.cvtColor(edge_im_array,cv2.COLOR_BGR2GRAY) ret, edge_im_array = cv2.threshold(edge_im_array, 127, 255, cv2.THRESH_BINARY) print type(edge_im_array) contours, hierarchy = cv2.findContours( edge_im_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) #压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息 contours_img = cv2.cvtColor(edge_im_array, cv2.COLOR_GRAY2BGR) url_str_len_contours = str(len(contours)) #取轮廊数量 str_len_contours = str(len(contours)) #取轮廊数量 #数据处理 first_contours = contours[0] #第一条轨迹坐标集合,数据格式为numpy.ndarry first_contours_list = first_contours.tolist() #print contours #输出所有轨迹坐标集合 #print contours[-1] #输出最后一条轨迹坐标,数据格式为numpy.ndarry #print contours[0][0].tolist()[0] #输出第一条轨迹起始点坐标[[375 241]]并转化成list格式[[375,241]] |**.tolist()[0] 可以省掉一个中括号输出[375,241] #print contours[0][0].tolist()[0][0] #输出第一条轨迹起始点坐标的X坐标值。 #print contours[0][0].tolist()[0][1] #输出第一条轨迹起始点坐标的Y坐标值。 #print [i[0][0] for i in contours] #print [i[0][0] for i in contours[0]] scale = 1 #不缩放 contours_img = cv2.resize(contours_img, (0, 0), fx=scale, fy=scale) print "Url_jpg_contours_num:%s" % url_str_len_contours for cnt in contours: color = np.random.randint(0, 255, (3)).tolist() cv2.drawContours(contours_img, [cnt * scale], 0, color, 1) cv2.imshow("URL_canny_img", edge_im_array) cv2.imshow("URL_contours_img", contours_img) #轮廊清单转文本输出 edge_im_array_pix = str(np.size(edge_im_array)) contours_img_pix = str(np.size(contours_img)) ss = open("Contours" + ".log", 'w') ss.write("edge_im_array_pix nums:" + "%s" % edge_im_array_pix + "\n") ss.write("contours_img_pix nums:" + "%s" % contours_img_pix + "\n") ss.write("_url_contours num:" + "%s" % str_len_contours + "\n") for ele in contours: ss.write("%s" % ele) ss.write("**" * 50 + "\n") ss.close() #return contours cv2.waitKey(0)