def requests_image( file_url, s, o, k, path, ): while len(o) > 50: o = o[:-1] o = string.replace(o, ':', '-') k = string.replace(k, ':', '-') thepath = path + '/' + k + '/' + o + '/' if not os.path.isdir(thepath): os.makedirs(thepath) i = s.get(file_url, allow_redirects=False) if i.status_code == 302: urlpath = i.headers['location'] else: urlpath = i.url name = urlsplit(urlpath)[2].split('/') name = name[len(name)-1] if not os.path.exists(thepath + name): print urlpath i = s.get(urlpath) if i.status_code == requests.codes.ok: with iopen(thepath + name, 'wb') as file: file.write(i.content) else: return False
def fetch_video(file_url, directory, unit_name, file_name, path): session = requests.Session() file_name = string.replace(file_name, ':', '-') if '.' in file_name: format = '.' + file_name.split('.')[1] else: format = ' ' while len(format) > 7: format = '.' + file_name.split('.')[1] while len(directory) > 50: directory = directory[:-1] directory = string.replace(directory, ':', '-') unit_name = string.replace(unit_name, ':', '-') thepath = path + '/' + unit_name + '/' + directory + '/' while len(thepath + file_name) > 256: file_name = file_name[:-9] + format if not os.path.isdir(thepath): os.makedirs(thepath) if not os.path.exists(path + file_name): print file_url i = session.get(file_url) if i.status_code == requests.codes.ok: with iopen(thepath + file_name + '.m4v', 'wb') as file: file.write(i.content) else: return False
def requests_video( file_url, s, o, k, file_name, path, ): file_name = string.replace(file_name, ':', '-') if '.' in file_name: format = '.' + file_name.split('.')[1] else: format = ' ' while len(format) > 7: format = '.' + file_name.split('.')[1] while len(o) > 50: o = o[:-1] o = string.replace(o, ':', '-') k = string.replace(k, ':', '-') thepath = path + '/' + k + '/' + o + '/' while len(thepath + file_name) > 256: file_name = file_name[:-9] + format if not os.path.isdir(thepath): os.makedirs(thepath) if not os.path.exists(path + file_name): print file_url i = s.get(file_url) if i.status_code == requests.codes.ok: with iopen(thepath + file_name + '.m4v', 'wb') as file: file.write(i.content) else: return False
def saveimage(self, url): imgreq = requests.get(url, stream=True) imgreq.raw.decode_content = True # handle spurious Content-Encoding print(".", end='', flush=True) logging.debug('Fetched ' + imgreq.url) if (imgreq.headers['content-type'] in self.suffix_match and imgreq.status_code == requests.codes.ok): resfolder = self.higher.getstructurenames()['hiddenresfolder'] #make path if not there if (not os.path.exists(resfolder)): os.makedirs(resfolder) suffix = self.suffix_match[imgreq.headers['content-type']] outname = self.folderpath + '/' + resfolder + '/' + self.movie.id + ' POSTER' + '.' + suffix with iopen(outname, 'wb') as file: file.write(imgreq.content) self.postersrc = resfolder + '/' + self.movie.id + ' POSTER' + '.' + suffix self.addfiletolog(self.archivetypes['poster'], outname) else: logging.error("Bad image request for title:" + title) logging.debug("Got code:" + imgreq.status_code + " and content-type:" + imgreq.headers['content-type'])
def main(args, reload=False): species = {} db = args.data_file('theplantlist', 'db.json') if reload: for a in bs(get('/1.1/browse/-/')).find('ul', id='nametree').find_all('a'): with iopen(args.data_file('theplantlist', a.text + '.csv'), 'w', encoding='utf8') as fp: fp.write(get(a['href'] + a.text + '.csv')) if db.exists(): with open(db) as fp: species = json.load(fp) else: for p in args.data_file('theplantlist').files('*.csv'): for row in reader(p, namedtuples=True, delimiter=','): if row.Taxonomic_status_in_TPL == 'Accepted': id_ = slug(row.Genus + row.Species) species[id_] = row.ID with open(db, 'w') as fp: json.dump(species, fp) with transaction.manager: found = 0 for p in DBSession.query(Parameter): id_ = slug(p.name) if id_ in species: found += 1 p.tpl_id = species[id_] print(found)
def write_file(img_url, title): if not img_url: sys.stderr.write('Something went wrong - no url\n') return if 'v.redd.it' in img_url: sys.stderr.write( 'The source of this file is v.redd.it. Unfortunately, reddit recognizes requests to this source as being from a script and blocks them. Apologies.\n' ) return extension = '.jpg' for ext in ('mp4', 'png', 'gif', 'jpeg', 'jpg'): if ext in img_url: extension = '.' + ext # remove former cat file, necessary for applescript for cat_file in glob.glob('cat*'): os.unlink(cat_file) fname = 'cat' + extension r = requests.get(img_url, stream=True) # Print title for text msg print('Title: {}'.format(title)) with iopen(fname, 'wb') as file: file.write(r.content)
def fetch_video(file_url, directory, unit_name, file_name, path): session = requests.Session() #file_name = string.replace(file_name, ':', '-') file_name = file_name.replace(':', '-') if '.' in file_name: format = '.' + file_name.split('.')[1] else: format = ' ' while len(format) > 7: format = '.' + file_name.split('.')[1] while len(directory) > 50: directory = directory[:-1] #directory = string.replace(directory, ':', '-') directory = directory.replace(':', '-') #unit_name = string.replace(unit_name, ':', '-') unit_name = unit_name.replace(':', '-') thepath = path + '/' + unit_name + '/' + directory + '/' while len(thepath + file_name) > 256: file_name = file_name[:-9] + format if not os.path.isdir(thepath): os.makedirs(thepath) if not os.path.exists(path + file_name): print(file_url) i = session.get(file_url) if i.status_code == requests.codes.ok: with iopen(thepath + file_name + '.m4v', 'wb') as file: file.write(i.content) else: return False
def requests_image(file_url, for_path): i = requests.get(file_url) if i.status_code == requests.codes.ok: with iopen(for_path, 'wb') as file: file.write(i.content) else: return False
def fetch_video(file_url, directory, unit_name, file_name, path): print('ILectureUnit.fetch_video() directory: {directory!r}, unit_name: {unit_name!r}, file_name: {file_name!r}, path: {path!r}'.format(directory=directory, unit_name=unit_name, file_name=file_name, path=path)) #return session = requests.Session() file_name = string.replace(file_name, ':', '-') if '.' in file_name: format = '.' + file_name.split('.')[1] else: format = ' ' while len(format) > 7: format = '.' + file_name.split('.')[1] while len(directory) > 50: directory = directory[:-1] directory = string.replace(directory, ':', '-') unit_name = string.replace(unit_name, ':', '-') thepath = path + '/' + unit_name + '/' + directory + '/' while len(thepath + file_name) > 256: file_name = file_name[:-9] + format if not os.path.isdir(thepath): os.makedirs(thepath) if not os.path.exists(path + file_name): print('fetch_video() file_url: {0!r}'.format(file_url)) return# REMOVEME i = session.get(file_url) if i.status_code == requests.codes.ok: with iopen(thepath + file_name + '.m4v', 'wb') as file: file.write(i.content) else: return False
def _download(url, path): from io import open as iopen try: i = requests.get(url) if i.status_code == requests.codes.ok: with iopen(path, 'wb') as file: file.write(i.content) cmd = "file {}".format(path) process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() output = str(output) if re.search(r'PNG image', output): ext = '.png' elif re.search(r'JPEG image', output): ext = '.jpg' else: raise ValueError("Invalid image ({})".format(output)) _path = path + ext os.rename(path, _path) except Exception as e: if os.path.exists(path): os.remove(path) print("ERROR: {})\n".format(e)) url = "http://placehold.it/300x300" path = path.split('.')[:-1] + '.png' _download(url, path)
def loadImages(): print("Loading the files", file=sys.stderr) # Request to gat all the USERS in the app r = requests.get('https://co-workers.herokuapp.com/api/cw-api/profiles', headers={'Authorization': 'Bearer '+TOKEN}) rep = r.json() for x in rep['data']: #Prepare the saving of the image o = x['photo'].split('/') src = o[-1] #Query to obtain the file img i = requests.get(x['photo'], stream = True) #Save the img with iopen(src, 'wb') as file: file.write(i.content) #Load the saved img with face-recognition img = face_recognition.load_image_file(src) #Array of all the faces found in the img (normally one) allRecFace = face_recognition.face_encodings(img) #If a face is recognised if (len(allRecFace) > 0): #Add to ENCODED_USR_IMG the user and his img img_encoded = face_recognition.face_encodings(img)[0] ENCODED_USR_IMG.append({'user':x['user'], 'img':img_encoded}) else: #Put NULL in the img ENCODED_USR_IMG.append({'user':x['user'], 'img':'null'})
def save_image(file_url, file_save): i = requests.get(file_url) if i.status_code == requests.codes.ok: with iopen(file_save, 'wb') as file: file.write(i.content) else: return False
def requests_image( file_url, s, o, k, path, ): while len(o) > 50: o = o[:-1] o = string.replace(o, ':', '-') k = string.replace(k, ':', '-') thepath = path + '/' + k + '/' + o + '/' if not os.path.isdir(thepath): os.makedirs(thepath) i = s.get(file_url, allow_redirects=False) if i.status_code == 302: urlpath = i.headers['location'] else: urlpath = i.url name = urlsplit(urlpath)[2].split('/') name = name[len(name) - 1] if not os.path.exists(thepath + name): print urlpath i = s.get(urlpath) if i.status_code == requests.codes.ok: with iopen(thepath + name, 'wb') as file: file.write(i.content) else: return False
def download_image(file_url): file_url = unquote(file_url) try: i = requests.get(file_url, timeout=10) except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as err: # return 'Server taking too long. Try again later' return '', '' else: file_name_from_web = urllib.parse.urlsplit(file_url)[2].split('/')[-1] ext_from_url = file_name_from_web.split(".")[-1] content_type = i.headers.get('Content-Type') image_type = None if content_type: image_type = content_type.split('/')[1] # file_name.split('.')[1] else: image_type = ext_from_url image_extension = "." + ('jpg' if image_type == 'jpeg' else image_type) # print(file_name, image_type) if image_type in image_type_list and i.status_code == requests.codes.ok: with iopen("tmp" + image_extension, 'wb') as file: file.write(i.content) return file_name_from_web, image_extension else: return '', ''
def store_page(page, page_url, path, debug): # Store page if isinstance(page, str): # a parsed page page_text = page parsed_url = urlparse(page_url) full_path = parsed_url.path full_path = re.sub('//', '/', full_path) filename = full_path.split('/')[-1] if '.html/' in full_path: # it's a parameter, skip it, but pretend it was stored. return True if '.' not in filename: # it's a path, not a filename: filename = 'index.html' directory_path = full_path else: directory_path = full_path.rsplit('/',1)[0] if not directory_path: local_path = path + directory_path + '/' + filename elif directory_path[-1] != '/': local_path = path + directory_path + '/' + filename else: local_path = path + directory_path + filename if directory_path: local_directory_path = path + directory_path if not os.path.exists(local_directory_path): try: os.makedirs(local_directory_path, exist_ok=True) except OSError as e: if debug: print(f"Couldn't make the directory! {local_directory_path}: {e}") return False image_suffix_list = ['jpg', 'gif', 'png', 'jpeg'] file_ext = filename.split('.')[-1] if file_ext in image_suffix_list: #image file save try: with iopen(local_path, 'wb') as f: f.write(page.content) except Exception as e: if debug: print(f"Couldn't write file! {e}") return False elif not isinstance(page, str): #other file save try: with open(local_path, 'w') as f: f.write(page.text) except Exception as e: if debug: print(f"Couldn't write file! {e}") return False else: try: with open(local_path, 'w') as f: f.write(page_text) except Exception as e: if debug: print(f"Couldn't write file! {e}") return False return True
def write_file(img_url, title): if not img_url: sys.stderr.write("Something went wrong - no url\n") return if "v.redd.it" in img_url: sys.stderr.write( "The source of this file is v.redd.it. Unfortunately, reddit recognizes requests to this source as being from a script and blocks them. Apologies.\n" ) return extension = ".jpg" for ext in ("mp4", "png", "gif", "jpeg", "jpg"): if ext in img_url: extension = "." + ext # remove former cat file, necessary for applescript for cat_file in glob.glob("cat*"): os.unlink(cat_file) fname = "cat" + extension r = requests.get(img_url, stream=True) # Print title for text msg print("Title: {}".format(title)) with iopen(fname, "wb") as file: file.write(r.content)
def slangify_test_json(tweet_json, name, urban, urban_low): questions = [] q = {} if isinstance(tweet_json, list): for item in tweet_json: if isinstance(item, dict): context = item[TWEET_QA_TWEET_KEY] slang = find_slang(urban_low, context.lower()) q["Question"] = item[TWEET_QA_QUESTION_KEY] q["Tweet"] = context q['Slang'] = slang q["qid"] = item[TWEET_QA_QID_KEY] questions.append(q) slang = [] q = {} else: logger.error( "Something went wrong with the tweet qa json file %s", args.tweet_test) exit(1) else: logger.error("Something went wrong with the tweet qa json file %s", args.tweet_test) exit(1) with iopen('tweet_' + name + '_slang_automatic.json', 'w') as fp: dump(questions, fp)
def downloadfile(url: str, filepath="./data", filename="noname") -> str: ''' Takes a url of a file that is assumed to be activated. TODO: put in error check for activated Downloads the file and attempts to use it's given name (from the data headers) ''' #make the request result = requests.get(url, auth=auth_header) if (result.status_code == 429): raise Exception("rate limit error") #try to get filename from the header if "Content-Disposition" in result.headers.keys(): params = parse_header(result.headers["Content-Disposition"])[1] else: params = {"Content-Disposition": "filename=" + str(filename)} if "filename" in params.keys(): filename = params["filename"] #save the image in the data folder path = filepath + '/' + filename if result.status_code == 200: with iopen(path, 'wb') as f: f.write(result.content) return filename
def requests_video(file_url, s, o, k, file_name, path): file_name = string.replace(file_name, ":", "-") if "." in file_name: format = "." + file_name.split(".")[1] else: format = " " while len(format) > 7: format = "." + file_name.split(".")[1] while len(o) > 50: o = o[:-1] o = string.replace(o, ":", "-") k = string.replace(k, ":", "-") thepath = path + "/" + k + "/" + o + "/" while len(thepath + file_name) > 256: file_name = file_name[:-9] + format if not os.path.isdir(thepath): os.makedirs(thepath) if not os.path.exists(path + file_name): print file_url i = s.get(file_url) if i.status_code == requests.codes.ok: with iopen(thepath + file_name + ".m4v", "wb") as file: file.write(i.content) else: return False
def load(self): dt = dateConv.DateConv() self.jourSem = dt.jourSem() print("Jour de la semaine : ", self.jourSem) if self.jourSem > 5: # le WE on affiche le menu du lundi self.sem = str(dt.numSem()+1) self.jourSem = 1 else: d = datetime.datetime.now() if d.hour > 12: self.jourSem += 1 self.sem = str(dt.numSem()) self.sem= "0"+self.sem self.sem = self.sem[-2:] url = "http://www.macantineetmoi.com/images/menu/sainte-clotilde/sainte-clotilde_S"+self.sem+".jpg" file_name = "menu.jpg" print("Recuperation de ", url) headers = { 'Accept':'text/html', 'Accept-Encoding': '', 'User-Agent': None } try: self.resp = requests.get(url, headers= headers, timeout=10) except: self.ret = -1 return self.ret = self.resp.status_code print("Apres requests ", self.ret) j=0 if self.ret == requests.codes.ok: with iopen(file_name, 'wb') as file: for chunk in self.resp.iter_content(1024): #print("> ", j) j = j+1 file.write(chunk) file.close() print("Terminé ")
def articleCreation(): form = articleForm() if request.method == "POST" and form.validate_on_submit(): img_file = form.front_image.data if isinstance(img_file, type(None)): del img_file img = "None" else: filename = secure_filename(img_file.filename) img_set.save(img_file, name=f"{filename}") with iopen(f'{PATH}\\static\\assets\\uploads\\images\\{filename}', 'rb') as image: img = str(base64.b64encode(image.read()), 'utf-8') current_date = datetime.now() date_util = DateUtil(current_date) creation_date = date_util.datetimeSubDate(date_re) del current_date body = request.form["editordata"] new_article = Article(title=form.title.data, author=form.author.data, create_date=creation_date, short_desc=form.short_desc.data, title_img=img, body=body) db.session.add(new_article) db.session.commit() alert.setAlert('success', 'Article has been Created.') return redirect(url_for(".homePage")) else: return render_template("public/articles/articleform.html", form=form)
def handle_unsplash_plasma_desktopskill_intent(self, message): utterance = message.data.get('utterance').lower() utterance = utterance.replace( message.data.get('UnsplashPlasmaDesktopSkillKeyword'), '') searchString = utterance rawrinpt = str(searchString) category = rawrinpt size = '1920x1080' file_url = 'https://source.unsplash.com/' + size + '/' + '?' + category suffix_random = str(random.randint(1111, 9999)) file_name = category + suffix_random i = requests.get(file_url) if i.status_code == requests.codes.ok: ossep = os.path.sep directory = os.path.realpath(os.getcwd() + ossep + "pictures" + ossep) if not os.path.exists(directory): time.sleep(1) os.makedirs(directory) with iopen(directory + ossep + file_name + '.jpg', 'wb') as file: file.write(i.content) file.close() currdir = os.getcwd() bus = dbus.SessionBus() remote_object = bus.get_object("org.kde.plasmashell", "/PlasmaShell") remote_object.evaluateScript( 'var allDesktops = desktops();print (allDesktops);for (i=0;i<allDesktops.length;i++) {d = allDesktops[i];d.wallpaperPlugin = "org.kde.image";d.currentConfigGroup = Array("Wallpaper", "org.kde.image", "General");d.writeConfig("Image", "file://' + currdir + '/pictures/' + file_name + '.jpg' + '")}', dbus_interface="org.kde.PlasmaShell")
def reading_satellite(csvforimport, APIkey, zoom, size): API_KEY = APIkey base = "https://maps.googleapis.com/maps/api/staticmap?scale=2&size=" locations = [] with open(csvforimport, 'rt') as f: reader = csv.reader(f) for row in reader: ind, lat, lng = int(row[0]), float(row[1]), float(row[2]) location = (ind, lat, lng) locations.append(location) for location in locations: ind, lat, lng = location latlng = "center={},{}".format(lat, lng) view = "zoom={}&maptype=satellite".format(zoom) keys = "key={}".format(API_KEY) url = "{}{}x{}&{}&{}&{}".format(base, size, size, latlng, view, keys) filename = "{}_{}_{}_{}.png".format(ind, lat, lng, zoom) res = requests.get(url) if res.status_code == requests.codes.ok: with iopen(filename, 'wb') as file: file.write(res.content) print(filename) else: return False
def fetch_image(img_ur, save_filename): img = requests.get(img_ur) if img.status_code == 200: with iopen(save_filename, 'wb') as f: f.write(img.content) else: print('Received error: {}'.format(img.status_code))
def __write_resource_from_request(self, req): file_name = '%s.jpg' % self.file_name file_path = os.path.join(self.dest_folder, file_name) print u'Writing %s' % file_name with iopen(file_path, 'wb') as file: file.write(req.content)
def connect(self, read_test=True): PTLogger.debug("I2C: Connecting to address " + hex(self._device_address) + " on " + self._device_path) self._lock_file_handle = open(self._lock_file_name, 'w') self._read_device = iopen(self._device_path, "rb", buffering=0) self._write_device = iopen(self._device_path, "wb", buffering=0) fcntl.ioctl(self._read_device, self.I2C_SLAVE, self._device_address) fcntl.ioctl(self._write_device, self.I2C_SLAVE, self._device_address) if (read_test is True): PTLogger.debug("I2C: Test read 1 byte") self._read_device.read(1) PTLogger.debug("I2C: OK")
def download_image(count_request): r = requests.get( url=count_request.get_endpoint() ) tmp_filename = '/tmp/{bus_stop_congestion_id}_{source_filename}'.format( bus_stop_congestion_id=count_request.bus_stop_congestion_id, source_filename=count_request.source_filename) with iopen(tmp_filename, 'wb') as file: file.write(r.content)
def requests_image(file_url, file_name): suffix_list = ['jpg', 'gif', 'png', 'tif', 'svg', ] file_suffix = file_name.split('.')[1] i = requests.get(file_url) if file_suffix in suffix_list and i.status_code == requests.codes.ok: with iopen(file_name, 'wb') as file: file.write(i.content) else: return False
def connect(self, read_test=True): logger.debug( "I2C: Connecting to address " + hex(self._device_address) + " on " + self._device_path ) self._read_device = iopen(self._device_path, "rb", buffering=0) self._write_device = iopen(self._device_path, "wb", buffering=0) ioctl(self._read_device, self.I2C_SLAVE, self._device_address) ioctl(self._write_device, self.I2C_SLAVE, self._device_address) if read_test is True: logger.debug("I2C: Test read 1 byte") self._read_device.read(1) logger.debug("I2C: OK")
def makemasterindex(self): html = '' tablerow = ( '<tr class="">' + '<td><a class="rowlink" href="$%{foldername}/index.htm">' + '<img class="thumbnail" src="$%{foldername}/$%{postersrc}"></a></td>' + '<td class="rowtitle">$%{title}</td>' + '<td class="rowyear">$%{year}</td>' + '<td class="rowruntime">$%{runtime}</td>' + '<td class="rowroger">$%{roger}</td>' + '<td class="rowimdb">$%{imdb}</td>' + '<td class="rowrotten">$%{rotten}</td>' + '<td class="rowmeta">$%{meta}</td>' + '</a></tr>') tablerows = '' template = templatex.TemplateX(tablerow) for f in self.indexlist: movieattributes = { 'foldername': '', 'postersrc': '', 'title': '', 'year': '', 'runtime': '', 'roger': '', 'imdb': '', 'rotten': '', 'meta': '' } movieattributes.update(f) tablerows += template.substitute(movieattributes) res_path = getmoduleresources() + '/' with open(res_path + 'masterindex.html', 'r') as templatefile: formatstr = templatefile.read().replace('\n', '') formatstr = formatstr.replace('\t', '') html = templatex.TemplateX(formatstr) replacedict = { 'headertitle': self.configdict['searchfolder'].split('/')[-1], 'cf_res': self.structnames['hiddenresfolder'], 'table_rows': tablerows, 'tmdb_logo': '/images/tmdb-attribution-rectangle.png', } writeouthtml = html.substitute(replacedict) htmlfilename = self.configdict['searchfolder'] + '/index.htm' if (os.path.isfile(htmlfilename)): os.remove(htmlfilename) logging.info('Deleted ' + htmlfilename) with iopen(htmlfilename, 'x') as file: file.write(writeouthtml) logging.info('Wrote new ' + htmlfilename)
async def save(url): print('fetch and save', url) data = await fetch_async(url) if not data: print('err fetch ', url, ' no data') return name = url.split('/')[-1] #print(name,' ',len(r)) with iopen(name, 'wb') as fw: fw.write(r)
def savemetadata(self): metafile = self.res_path + 'metadata.txt' metadata = '' attr = self.getattributes() for key in attr: metadata += key + '>' + attr[key] + '\n' if (os.path.isfile(metafile)): os.remove(metafile) with iopen(metafile, 'x') as file: file.write(metadata)
def deltmp(self): try: f = iopen(self.tmp) f.close() except: pass try: remove(self.tmp) self.tmp = None except: QTimer.singleShot(1000, self.tmp)
def downloader(image_url, name): file_extension = 'jpg' full_file_name = str(name) + '.' + str(file_extension) try: i = requests.get(image_url) except Exception: return if ('photo_unavailable' in i.url): return if i.status_code == requests.codes.ok: with iopen(full_file_name, 'wb') as file: file.write(i.content)
def requests_image(file_url, DIR): suffix_list = ['jpg', 'png'] file_name = urlsplit(file_url)[2].split('/')[-1] file_suffix = file_name.split('.')[1] i = requests.get(file_url) if file_suffix in suffix_list and i.status_code == requests.codes.ok: with iopen(DIR + file_name, 'wb') as file: file.write(i.content) return True else: return False
def imageDownloader(images, directory, limit=0): http = urllib3.PoolManager() index = 0 for img in images[0:]: if index == limit and index != 0: break else: file_name = urlsplit(img)[2].split('/')[-1] image = http.request('GET', img, headers={'User-Agent': 'Mozilla/5.0'}) index += 1 with iopen(directory + "/" + file_name, 'wb') as file: file.write(image.data) print("> Image {} downloaded ({}/{})".format(file_name, index, len(images) if limit == 0 else limit))
def requests_image(file_url, file_name): suffix_list = ['jpg', 'gif', 'png', 'tif', 'svg', ] # file_name = urlsplit(file_url)[2].split('/')[-1] # path = Path('/mi_images/' + file_name) # path.parent.mkdir(parents=True, exist_ok=True) file_suffix = file_name.split('.')[1] i = requests.get(file_url) if file_suffix in suffix_list and i.status_code == requests.codes.ok: with iopen(file_name, 'wb') as file: file.write(i.content) else: return False
def save_all_gravatars(size): from apps.userprofile.models import SiteUser import requests from io import open as iopen users = SiteUser.objects.all() for u in users: url = get_gravatar_url("derp", u, size) print url r = requests.get(url) if r.status_code == requests.codes.ok: print r.status_code with iopen(u.first_name + "_" + u.last_name + ".jpg", 'wb') as file: file.write(r.content)
def fetch_document(self, file_url, folder_name, path): while len(folder_name) > 50: folder_name = folder_name[:-1] while len(self.name) > 50: self.name = self.name[:-1] folder_name = string.replace(folder_name, ':', ' ') self.name = string.replace(self.name, ':', ' ') folder_name = sanitize(folder_name) self.name = sanitize(self.name) urlResponse = self.session.get(file_url, allow_redirects=False) if urlResponse.status_code == 302: urlpath = urlResponse.headers['location'] else: urlpath = urlResponse.url if len(folder_name) > 0: thepath = path + '/' + self.name + '/' + folder_name + '/' else: thepath = path + '/' + self.name + '/' name = urlsplit(urlpath)[2].split('/') name = name[len(name)-1] name = urllib2.unquote(name).decode('utf8') while ((len(thepath + name) > 240) or (len(name) > 50)): if "." in name: filename = name.split('.') ext = filename[len(filename)-1] prefix = '' for x in filename[:-1]: prefix = prefix + x name = prefix[:-1] + '.' + ext else: name = name[:-1] name = sanitize(name) if name != 'defaultTab' and '.html' not in name: if not os.path.isdir(thepath): os.makedirs(thepath) if (not os.path.exists(thepath + name)): print urlpath urlResponse = self.session.get(urlpath) if urlResponse.status_code == requests.codes.ok: with iopen(thepath + name, 'wb') as file: file.write(urlResponse.content) else: return False
def requests_image( file_url, s, o, k, path, ): while len(o) > 50: o = o[:-1] while len(k) > 50: k = k[:-1] o = string.replace(o, ':', ' ') k = string.replace(k, ':', ' ') o = sanitize(o) k = sanitize(k) thepath = path + '/' + k + '/' + o + '/' if not os.path.isdir(thepath): os.makedirs(thepath) i = s.get(file_url, allow_redirects=False) if i.status_code == 302: urlpath = i.headers['location'] else: urlpath = i.url name = urlsplit(urlpath)[2].split('/') name = name[len(name)-1] name = urllib2.unquote(name).decode('utf8') while ((len(thepath + name) > 240) or (len(name) > 50)): if "." in name: filename = name.split('.') ext = filename[len(filename)-1] prefix = '' for x in filename[:-1]: prefix = prefix + x name = prefix[:-1] + '.' + ext else: name = name[:-1] name = sanitize(name) if not os.path.exists(thepath + name): print urlpath i = s.get(urlpath) if i.status_code == requests.codes.ok: with iopen(thepath + name, 'wb') as file: file.write(i.content) else: return False
def work_func(task): """ Download an image from a url return success/failure and the task to caller """ try: # experiment with time # to big give laggy results socket.setdefaulttimeout(1) i = requests.get(task['url']) except: return((False, task)) if i.status_code == requests.codes.ok: filename = path.join(DATA_DIR, '{0}'.format(task['filename'])) f = iopen(filename, 'wb') f.write(i.content) f.close() return((True, task)) else: return((False, task))
def requests_image(file_url, s, o, k, path): while len(o) > 50: o = o[:-1] while len(k) > 50: k = k[:-1] o = string.replace(o, ":", " ") k = string.replace(k, ":", " ") o = sanitize(o) k = sanitize(k) thepath = path + "/" + k + "/" + o + "/" if not os.path.isdir(thepath): os.makedirs(thepath) i = s.get(file_url, allow_redirects=False) if i.status_code == 302: urlpath = i.headers["location"] else: urlpath = i.url name = urlsplit(urlpath)[2].split("/") name = name[len(name) - 1] name = urllib2.unquote(name).decode("utf8") while (len(thepath + name) > 240) or (len(name) > 50): if "." in name: filename = name.split(".") ext = filename[len(filename) - 1] prefix = "" for x in filename[:-1]: prefix = prefix + x name = prefix[:-1] + "." + ext else: name = name[:-1] name = sanitize(name) if not os.path.exists(thepath + name): print urlpath i = s.get(urlpath) if i.status_code == requests.codes.ok: with iopen(thepath + name, "wb") as file: file.write(i.content) else: return False
def read_csv(fname): with iopen(fname, newline='', encoding='utf-8-sig') as f: for row in reader(f): yield row
import requests from PIL import Image from io import open as iopen from pytesser import * i = requests.get('http://www.ems.com.cn/ems/rand?0.33681450065245544') # Just a proof of concept, if you wanted to actually use this you'd have to scrape the page and input it correctly with iopen('temp.png', 'wb') as file: file.write(i.content) im = Image.open('temp.png') im2 = Image.new("P", im.size, 255) im3 = im2.load() for i in range(0,im.size[0]): for j in range(0,im.size[1]): pixel = im.getpixel((i,j)) if sum(pixel) > 400: im3[i,j] = 0 im2.save('result.png', 'PNG') answer = image_to_string(im2) print answer
def open(self, filename): self.fhwnd = iopen(filename, "rb") self.eof = False