def CreateStereoBag(left_imgs, right_imgs, bagname): '''Creates a bag file containing stereo image pairs''' bag = rosbag.Bag(bagname, 'w') try: for i in range(len(left_imgs)): print("Adding %s" % left_imgs[i]) fp_left = open(left_imgs[i], "r") p_left = ImageFile.Parser() while 1: s = fp_left.read(1024) if not s: break p_left.feed(s) im_left = p_left.close() fp_right = open(right_imgs[i], "r") print("Adding %s" % right_imgs[i]) p_right = ImageFile.Parser() while 1: s = fp_right.read(1024) if not s: break p_right.feed(s) im_right = p_right.close() Stamp = roslib.rostime.Time.from_sec(time.time()) Img_left = Image() Img_left.header.stamp = Stamp Img_left.width = im_left.size[0] Img_left.height = im_left.size[1] Img_left.encoding = "rgb8" Img_left.header.frame_id = "camera/left" Img_left_data = [ pix for pixdata in im_left.getdata() for pix in pixdata ] Img_left.data = Img_left_data Img_right = Image() Img_right.header.stamp = Stamp Img_right.width = im_right.size[0] Img_right.height = im_right.size[1] Img_right.encoding = "rgb8" Img_right.header.frame_id = "camera/right" Img_right_data = [ pix for pixdata in im_right.getdata() for pix in pixdata ] Img_right.data = Img_right_data bag.write('camera/left/image_raw', Img_left, Stamp) bag.write('camera/right/image_raw', Img_right, Stamp) finally: bag.close()
def CreateMonoBag(imgs, bagname): '''Creates a bag file with camera images''' bag = rosbag.Bag(bagname, 'w') try: for i in range(len(imgs)): print("Adding %s" % imgs[i]) fp = open(imgs[i], "r") p = ImageFile.Parser() while 1: s = fp.read(1024) if not s: break p.feed(s) im = p.close() #Stamp = rospy.rostime.Time.from_sec(time.time()) Stamp = rospy.Time.from_sec(time.time()) Img = Image() Img.header.stamp = Stamp Img.width = im.size[0] Img.height = im.size[1] #Img.encoding = "rgb8" Img.encoding = "mono8" Img.header.frame_id = "camera" Img_data = [pix for pixdata in im.getdata() for pix in pixdata] #Img_data = [pix for pixdata in [im.getdata()] for pix in pixdata] Img.data = Img_data #Change "rgb8" encoding to "mono8", and "[pix for pixdata in im.getdata() for pix in pixdata]" to "[pix for pixdata in [im.getdata()] for pix in pixdata]" bag.write('camera/image_raw', Img, Stamp) finally: bag.close()
def _fetch_image_size(url, referer): """Return the size of an image by URL downloading as little as possible.""" request = _initialize_request(url, referer) if not request: return None parser = ImageFile.Parser() response = None try: response = urllib2.urlopen(request) while True: chunk = response.read(1024) if not chunk: break parser.feed(chunk) if parser.image: return parser.image.size except urllib2.URLError: return None finally: if response: response.close()
def recog(data, threshold=140): p = ImageFile.Parser() p.feed(data) im = p.close() imgry = im.convert('L') out = imgry.point(lambda i: 0 if i < threshold else 255) return image_to_string(out)
def siteconfig(request): if request.method == 'GET': return render_to_response('admin/siteconfig.html') if request.method == 'POST': sitename = request.POST.get('sitename') keywords = request.POST.get('keywords') descriptions = request.POST.get('descriptions') copyrightinfo = request.POST.get('copyrightinfo') form = PictureForm(request.POST, request.FILES) if form.is_valid(): if 'login' and 'banner' in request.FILES: login = request.FILES["login"] banner = request.FILES["banner"] else: login = None banner = None parser = ImageFile.Parser() for chunk in login.chunks() and banner.chunks(): parser.feed(chunk) site = Site() site.sitename = sitename site.keywords = keywords site.descriptions = descriptions site.login = login site.banner = banner site.copyrightinfo = copyrightinfo site.save() return render_to_response('admin/siteconfig.html')
def CreateBag(imgs,bagname): '''Creates a bag file with camera images''' bag =rosbag.Bag(bagname, 'w') try: for i in range(len(imgs)): print("Adding %s" % imgs[i]) fp = open( imgs[i], "r" ) p = ImageFile.Parser() while 1: s = fp.read(1024) if not s: break p.feed(s) im = p.close() if im.mode != "RGB": im=im.convert("RGB") Stamp = rospy.rostime.Time.from_sec(time.time()) Img = Image() Img.header.stamp = Stamp Img.width = im.size[0] Img.height = im.size[1] Img.encoding = "rgb8" Img.step = Img.width *3; Img.header.frame_id = "camera" Img_data = [pix for pixdata in im.getdata() for pix in pixdata] Img.data = Img_data bag.write('/camera/image', Img, Stamp) finally: bag.close()
def get_picture(self): if self.stream_file is None: return None image_parser = ImageFile.Parser() self.stream_file.readline() # skip boundary self.stream_file.readline() # Content-type p = self.stream_file.readline() # Content-Length try: cl = int(p[p.find(' ') + 1:-2]) except ValueError: self.stream_file = None return None self.stream_file.readline() # Empty image_parser.feed(self.stream_file.read(cl)) image = image_parser.close() surface = pygame.image.fromstring( image.tostring(), image.size, image.mode) self.stream_file.readline() # skip eol self.stream_file.readline() # skip eol return surface
def getimagesize(url): """ Attempts to determine an image's width and height, and returns a string suitable for use in an <img> tag, or None in case of failure. Requires that PIL is installed. >>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif") ... #doctest: +ELLIPSIS, +SKIP 'width="..." height="..."' """ try: import ImageFile import urllib2 except ImportError: return None try: p = ImageFile.Parser() f = urllib2.urlopen(url) while True: s = f.read(1024) if not s: break p.feed(s) if p.image: return 'width="%i" height="%i"' % p.image.size except (IOError, ValueError): return None
def analyse_png(self, png_name): f = self.get_data(png_name) buf = f.read() f.close() p = ImageFile.Parser() p.feed(buf) im = p.close() self.total_pixels += im.size[0] * im.size[1] if png_name.endswith('.jpg'): notice('File %s is already compressed' % png_name) return False if len(buf) < self.size_threshold: notice('File %s does not meet size threshold (size is %d)' % (png_name, len(buf))) return False if self.force_regex.match(png_name): warning('File %s will be forced' % png_name) elif self.has_transparency(im): notice('File %s has transparent pixels' % png_name) if len(buf) >= self.size_warning_threshold: warning('Transparent image is quite large: %s (%d bytes)' % (png_name, len(buf))) return False ss = StringIO.StringIO() try: im.save(ss, 'JPEG', quality=self.compression_level) except IOError, ex: warning('Unable to convert %s to JPEG: %s' % (png_name, ex)) return False
def fetchTile(server, z, x, y): url = '%s/ctile?style=7&v=1&scale=1&size=1' % server url += '&md=backgroundMapnik2&z=%d&x=%d&y=%d' % (z, x, y) logging.info('Fetching tile at: %s' % url) tile = urllib2.urlopen(url) parser = ImageFile.Parser() parser.feed(tile.read()) im = parser.close() return im
def LoadCompressed(data): JpegHeaderSize = ReadUInt32(data, _BLP_HEADER_SIZE) Offset, Size = MipMap(data, 0) parser = ImageFile.Parser() parser.feed(data[_BLP_HEADER_SIZE+4:_BLP_HEADER_SIZE+4+JpegHeaderSize]) parser.feed(data[Offset:Offset+Size]) img = parser.close().convert('RGB') r, g, b = img.split() return Image.merge("RGB", (b, g, r))
def fetch_url(url, referer=None, retries=1, dimension=False): cur_try = 0 log.debug('fetching: %s' % url) nothing = None if dimension else (None, None) url = clean_url(url) #just basic urls if not url.startswith('http://'): return nothing while True: try: req = Request(url) if useragent: req.add_header('User-Agent', useragent) if referer: req.add_header('Referer', referer) open_req = urlopen(req) #if we only need the dimension of the image, we may not #need to download the entire thing if dimension: content = open_req.read(chunk_size) else: content = open_req.read() content_type = open_req.headers.get('content-type') if not content_type: return nothing if 'image' in content_type: p = ImageFile.Parser() new_data = content while not p.image and new_data: p.feed(new_data) new_data = open_req.read(chunk_size) content += new_data #return the size, or return the data if dimension and p.image: return p.image.size elif dimension: return nothing elif dimension: #expected an image, but didn't get one return nothing return content_type, content except (URLError, HTTPError, InvalidURL), e: cur_try += 1 if cur_try >= retries: log.debug('error while fetching: %s referer: %s' % (url, referer)) log.debug(e) return nothing finally:
def load(self): if not self.handle() == self.SUCCESS: return False, 'Loading failed from url in ImageParser.' try: parser = ImageFile.Parser() for fragment in self.object: parser.feed(fragment) img = parser.close() except IOError, e: return False, "Error in Paring Image: {0}".format(e)
def _pgm2bmp(self, _name): _file = os.path.join(self._map_file_dir_path, _name) fp = open(_file + ".pgm", "rb") p = ImageFile.Parser() while True: s = fp.read(1024) if not s: break p.feed(s) Im = p.close() Im.save(_file + ".bmp")
def upload(request): if request.method == 'POST': print 'request : ',request print 'request.FILES : ',request.FILES form = UploadFileForm(request.POST, request.FILES) print 'form : ',form print 'type(form) : ',type(form) #form.save('mypic.jpg') #f = open('file_test.txt', 'w') #f.write(form) print 'request.FILES[file] : ',request.FILES['0'] print(form.is_valid()) #if form.is_valid(): pic = request.FILES['0'] #pic_np = np.array(pic) #img = Image.fromarray(pic_np)# #image_resize = img.resize((224,224)) parser = ImageFile.Parser() for chunk in pic.chunks(): parser.feed(chunk) img = parser.close() img = img.convert('RGB') image_resize = img.resize((224,224),Image.ANTIALIAS) greyscale_map = list(image_resize.getdata()) greyscale_map = np.array(greyscale_map) greyscale_map = greyscale_map.reshape((1,224, 224,3)) print 'input type : ',type(greyscale_map) print 'inpur type shape : ',greyscale_map.shape with graph.as_default(): score = load_model.model.predict(greyscale_map) score = score[0] #print 'score : ',score #print 'score_type : ',type(score) #final_score = heapq.nlargest(1,score) #print 'final score : ',final_score[0] #print 'argmax :',np.argmax(score, axis=0) #peakIndex = np.where(score==final_score[0]) final_score = np.argmax(score,axis=0) #print 'peakIndex : ',peakIndex #score = ourmodel.predict(greyscale_map) print 'score : ',final_score+1 #score = 100 #handle_uploaded_file(request.FILES['file']) result = {'code':'1', 'data':final_score+1, 'message':'successful!' } #return HttpResponse(result) return JsonResponse(result) #return HttpResponseRedirect('/success/url/') else: form = UploadFileForm() return HttpResponse("failed.")
def get_image_size(uri): file = urllib.urlopen(uri) p = ImageFile.Parser() data = file.read() #file.read(1024) if not data: return None p.feed(data) if p.image: return p.image.size file.close() #not an image return None
def get_image_dimensions(path): """Returns the (width, height) of an image at a given path.""" p = ImageFile.Parser() fp = open(path, 'rb') while 1: data = fp.read(1024) if not data: break p.feed(data) if p.image: return p.image.size break fp.close() return None
def upload(request): f = request.FILES if f.get('docfile') is not None: fi = f.get('docfile') parser = ImageFile.Parser() for chunk in fi.chunks(): parser.feed(chunk) img = parser.close() name = os.path.join(settings.Img_dir, 'liu.jpg') img.save(name) return render_to_response('load_image.html', context_instance=RequestContext(request)) else: return HttpResponseRedirect("/load_image/")
def getsizes(self, uri): # Blatantly stolen from http://effbot.org/zone/pil-image-size.htm # get file size *and* image size (None if not known) file = urllib.urlopen(uri) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return p.image.size break file.close() return None
def getsizes(fname): # get file size *and* image size (None if not known) file = open(fname) size = os.path.getsize(fname) p = ImageFile.Parser() while True: data = file.read(1024) if not data: break p.feed(data) if p.image: file.close() return p.image.size file.close() return size, None
def getDataFromImage(self, open_url, img_info=None): """Attains header information from img liburl :param open_url: urlinfo opened urllib2 object for the image url. :param img_info: ImageInfo :return: """ img_parser = ImageFile.Parser() for block_buf in self.readImageDataPerByte( urllib2.urlopen(open_url.geturl())): img_parser.feed(block_buf) if img_parser.image: img_info.width, img_info.height = img_parser.image.size return img_info
def getsizes(url): # get file size *and* image size (None if not known) file = urllib.urlopen(url) size = file.headers.get("content-length") if size: size = int(size) p = ImageFile.Parser() while 1: data = file.read(1024) if not data: break p.feed(data) if p.image: return size, p.image.size break file.close() return size, None
def parse(cls, tfile): """ 解析远程图片文件, 生成Image类型对象返回 params: file: 从request.FILES中获取的数据对象 returns: img: PIL Image Object """ try: parser = ImageFile.Parser() for chunk in tfile.chunks(): parser.feed(chunk) except Exception, e: return False, str(e)
def RetrieveImageFromCamera(self): cameraConnection = self.ConnectToCamera() try: image = None # Define the settings for the camera params = urllib.urlencode({ 'res': self.res, 'x0': self.x0, 'y0': self.y0, 'x1': self.x1, 'y1': self.y1, 'quality': self.quality, 'doublescan': 1 }) # Request an image from the camera try: cameraConnection.request("GET", "/image?%s" % params) #cameraConnection.request("GET", "/h264f?res=full&x0=640&x1=1280&y0=352&y1=768&qp=16&doublescan=1&ssn=33&iframe=1") response = cameraConnection.getresponse() except httplib.HTTPException as e: rospy.logerr('Cannot connect to the camera: %s' % e) return None except socket.error as e: rospy.logerr('Cannot connect to the camera: %s' % e) return None if response.status != 200: # There was an error reading from the camera rospy.logerr('Received an error code from the camera %i, %s' % (response.status, response.reason)) else: # We have a response from the camera so parse it out into a # message format parser = ImageFile.Parser() rawBytes = response.read(response.getheader('content-length')) parser.feed(rawBytes) pilImage = parser.close() cvImage = cv.CreateImageHeader(pilImage.size, cv.IPL_DEPTH_8U, 3) cv.SetData(cvImage, pilImage.tostring(), pilImage.size[0] * 3) image = self.cvBridge.cv_to_imgmsg(cvImage, "bgr8") finally: cameraConnection.close() return image
def edit_topic(request, group_id, topic_id): """编辑话题 @fanlintao """ try: group = Group.objects.get(id=group_id) t_topic = Topic.objects.get(id=topic_id) global image_obj for i in t_topic.image.all(): if i not in image_obj: image_obj.append(i) if request.method == 'POST': form = topicForm(request.POST, instance=t_topic) if "image" in request.FILES: # 如果有上传图片 imageForm = topicImageForm(request.POST, request.FILES) if imageForm.is_valid(): import ImageFile f = request.FILES["image"] parser = ImageFile.Parser() for chunk in f.chunks(): parser.feed(chunk) img = parser.close() img.save(settings.TOPIC_IMAGE_PATH + f.name) i = imageForm.save() image_obj.append(i) print image_obj[0].image if form.is_valid(): g = form.save(commit=False) g.creator = request.user g.save() if len(image_obj): for i in image_obj: g.image.add(i) image_obj = [] g_id = int(group.id) return redirect( reverse("group_detail", kwargs={'group_id': g_id}) + '?type=recent') ctx = { 'form': topicForm(instance=t_topic), 'g': group, 'imageForm': topicImageForm(), 'images': image_obj } return render(request, 'topics/new/topic.html', ctx) except ObjectDoesNotExist: pass
def getJpgSizesFromUrl(uri): file = urllib.urlopen(uri) # use content-length to know the bytesize size = file.headers.get("content-length") if size: size = int(size) # we parse as we receive the image (to avoid downloading it full) p = ImageFile.Parser() while 1: # ONLY read 1024 bytes of data, to get the jpg headers data = file.read(1024) if not data: return (None, (None, None)) break p.feed(data) if p.image: return size, p.image.size break file.close() return size, (None, (None, None))
def save_image(self, image_url, image_url_sh, quality_size): if quality_size == 1: min_size = 200 else: min_size = 80 # try 4 times to download the image width = 1 height = 1 for i in range(4): try: if image_url != None: imagefile = urllib2.urlopen(image_url) else: return 0 p = ImageFile.Parser() while 1: data = imagefile.read(1024) if not data: break p.feed(data) if p.image: width = p.image.size[0] height = p.image.size[1] if width < min_size or height < min_size: return 0 if quality_size == 1 and (width * 1.0 / height > 3.5 or height * 1.0 / width > 3.5): return 0 else: return 0 im = p.close() imagefile_name = image_url_sh + ".jpg" sub_dir = imagefile_name[0:3] + "/" save_re = self.save_original_image( im, self.original_image_folder + sub_dir + imagefile_name) if save_re == 1: return save_re except Exception, e: self.logger.error("save image failed url error: %s - %s" % (e, image_url)) time.sleep(0.25)
def __init__(self, data, filter=None): """""" if cons.OS_WINDOWS: self.image_name = os.path.join(sys.path[0], "tmp.tif") self.text_name = os.path.join(sys.path[0], "tmp") else: if cons.OS_OSX: os.environ["TESSDATA_PREFIX"] = os.path.join( sys.path[0], "tesseract", "") self.text = tempfile.NamedTemporaryFile(suffix=TEXT_SUFFIX) self.image = tempfile.NamedTemporaryFile(suffix=IMAGE_SUFFIX) self.image_name = self.image.name self.text_name = self.text.name.rsplit(TEXT_SUFFIX, 1)[0] p = ImageFile.Parser() p.feed(data) if filter: image = filter(p.close()) else: image = p.close() image.save(self.image_name)
def __init__(self, data, filter=None): """""" try: #create temporary image-files with tempfile.NamedTemporaryFile(suffix=TEXT_SUFFIX, delete=False) as fh: self.text_name = fh.name with tempfile.NamedTemporaryFile(suffix=IMAGE_SUFFIX, delete=False) as fh: self.image_name = fh.name p = ImageFile.Parser() p.feed(data) image = p.close() if filter: image = filter(image) image.save(self.image_name) except EnvironmentError as err: logger.exception(err) self.text_name = "" self.image_name = ""
def parse(cls, file): """ 解析远程图片文件, 生成Image类型对象返回. params: file: 从request.FILES中获取的数据对象 returns: img: PIL Image Object """ try: parser = ImageFile.Parser() for chunk in file.chunks(): parser.feed(chunk) finally: image = parser.close() if image.mode != 'RGBA': image = image.convert('RGBA') return image