def save(self, *args, **kwargs): if self.image: #open image pil_image_obj = Image.open(self.image) img_size = pil_image_obj.size if float(img_size[0]) > 500: new_image = resizeimage.resize_width(pil_image_obj, 500) new_image_io = BytesIO() new_image.save(new_image_io, format='JPEG') temp_name = self.image.name self.image.delete(save=False) self.image.save(temp_name, content=ContentFile(new_image_io.getvalue()), save=False) if self.image_slurp: print("image slurp") imgRequest = request.urlopen(self.image_slurp) if imgRequest.status == 200: file_name = self.image_slurp.split('/')[-1] img_temp = NamedTemporaryFile() img_temp.write(imgRequest.read()) img_temp.flush() img_file = File(img_temp) pil_image_obj = Image.open(img_temp) img_size = pil_image_obj.size if float(img_size[0]) > 500: new_image = resizeimage.resize_width(pil_image_obj, 500) else: new_image = pil_image_obj new_image_io = BytesIO() new_image.save(new_image_io, format='JPEG') temp_name = file_name self.image_slurp = None self.image.delete(save=False) self.image.save(temp_name, content=ContentFile(new_image_io.getvalue()), save=False) super(Article, self).save(*args, **kwargs)
def upload_fb(request): """Uploads the user's picture from Facebook.""" if request.method == "POST": form = FacebookPictureForm(request.POST) if form.is_valid(): # Need to download the image from the url and save it. photo_temp = NamedTemporaryFile(delete=True) fb_url = form.cleaned_data["facebook_photo"] photo_temp.write(urllib2.urlopen(fb_url).read()) photo_temp.flush() photo_temp.seek(0) # Delete old avatars if they exist avatars = Avatar.objects.filter(user=request.user) for avatar in avatars: avatar.avatar.storage.delete(avatar.avatar.name) avatar.avatar.delete() avatar.delete() path = avatar_file_path(user=request.user, filename="fb_photo.jpg") avatar = Avatar( user=request.user, primary=True, avatar=path, ) # print "saving facebook photo to " + path avatar.avatar.storage.save(path, File(photo_temp)) avatar.save() return HttpResponseRedirect( reverse("profile_index") + "?changed_avatar=True") raise Http404
def scrap_items(): for itemlist in ITEMLIST: soup = BS(urllib2.urlopen(''.join([LOLWIKI, itemlist])).read()) item_table = soup.find('table', class_='stdt sortable') for tr in item_table.find_all('tr'): tds = tr.find_all('td') if len(tds) < 1: continue if tr.find('p') == None: continue item_name = tr.find('p').text.strip() item_url = tr.find('img')['src'] if item_url.split(':')[0] == 'data': item_url = tr.find('img')['data-src'] if not HOOKED: continue #store item in database d_item = Item() d_item.name = item_name t_img = NamedTemporaryFile(delete=True) t_img.write(urllib2.urlopen(item_url).read()) t_img.flush() t_img.name = '.'.join([item_name, 'jpg']) d_item.picture = File(t_img) d_item.save()
def save_url_image(field, url, name): r = requests.get(url) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() field.save(name, File(img_temp), save=True)
def pairwise_protein(query_name, query_seq, query_frame, subject_name, subject_seq, subject_frame): if query_frame < 0: query_name = query_name + "(" + str(query_frame) + ")" query_seq = Seq(query_seq).reverse_complement()[-query_frame - 1 :].translate().tostring() elif query_frame > 0: query_name = query_name + "(" + str(query_frame) + ")" query_seq = Seq(query_seq)[query_frame - 1 :].translate().tostring() if subject_frame < 0: subject_name = subject_name + "(" + str(subject_frame) + ")" subject_seq = Seq(subject_seq).reverse_complement()[-subject_frame - 1 :].translate().tostring() elif subject_frame > 0: subject_name = subject_name + "(" + str(subject_frame) + ")" subject_seq = Seq(subject_seq)[subject_frame - 1 :].translate().tostring() input_file = NamedTemporaryFile(prefix="mafft_") input_file.write("\n".join([">" + query_name, query_seq.upper(), ">" + subject_name, subject_seq.upper()])) input_file.flush() namelength = max([len(query_name), len(subject_name)]) + 4 mafft_cmd = "mafft --preservecase --clustalout --namelength " + str(namelength) + " " + input_file.name mafft_proc = Popen(mafft_cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = mafft_proc.communicate() return stdout
def save_image(profile, url): img = NamedTemporaryFile(delete=True) img.write(urllib.request.urlopen(url).read()) img.flush() profile.avatar_image.save(str(profile.id), File(img))
def _save_image(self, url): img = NamedTemporaryFile(delete=True) img.write(urllib.request.urlopen(url).read()) img.flush() self.user.avatar_image.save(str(self.user.id), File(img))
def _download_file(url, obj, obj_fieldname='executed_file', filename=None, update_obj=True): """ Task to download a file form a url and save it to a model field """ if url not in [None, '']: # Validate the url URLValidator(url) if filename is None: filename = urlparse.urlparse(url).path filename_no_ext, ext = os.path.splitext(filename.split('/')[-1]) # # @BUSINESSRULE must have a file .suffix # if ext is None: raise Exception('Cannot download a file with no filename.extension: %s' % url) filename = '%s%s' % (filename_no_ext, ext) #try: img_temp = NamedTemporaryFile(delete=True, suffix=ext) img_temp.write(urllib2.urlopen(url).read()) img_temp.flush() # # SAVE THE FILE LOCALLY # # use the upload_to function to name and place the file appropriately filename = obj._meta.get_field(obj_fieldname).upload_to(instance=obj, filename=filename) file_object = File(img_temp) # return both the filename and the file_object for saving to the model return (default_storage.save(filename, file_object), file_object,)
def user_join(request): login_from = int(request.POST.get("login_from", 0)) user_id = request.POST.get("user_id") password = request.POST.get("password") user_name = request.POST.get("user_name") user_email = request.POST.get("user_email") phone = request.POST.get("user_phone") gender = int(request.POST.get("user_gender", 0)) img_url = request.POST.get("img_url", "") profile_img = None if login_from != 0: password = "******" % (login_from, password) if img_url: profile_img = NamedTemporaryFile(delete=True) profile_img.write(urllib2.urlopen(img_url).read()) profile_img.flush() try: user = User.objects.create_user(username=user_id, password=password, email=user_email, first_name=user_name) if user: profile = UserProfile.objects.create(user=user, gender=gender, login_from=login_from, phone=phone) if profile_img: profile.src.save("%s.jpg" % user_id, File(profile_img)) user = authenticate(username=user_id, password=password) if user: login(request, user) request.session.set_expiry(31536000) return HttpResponseRedirect("/") except Exception as e: pass return HttpResponseRedirect("/home/")
def download_media(self, media_url): """ Fetches the recording and stores it with the provided recording_id :param media_url: the url where the media lives :return: the url for our downloaded media with full content type prefix """ response = requests.get(media_url, stream=True, auth=self.auth) disposition = response.headers.get('Content-Disposition', None) content_type = response.headers.get('Content-Type', None) if content_type: extension = None if disposition: filename = re.findall("filename=\"(.+)\"", disposition)[0] extension = filename.rpartition('.')[2] elif content_type == 'audio/x-wav': extension = 'wav' temp = NamedTemporaryFile(delete=True) temp.write(response.content) temp.flush() return '%s:%s' % (content_type, self.org.save_media(File(temp), extension)) return None
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out("> %s... " % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter( creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug, ): self.write_out(self.style.NOTICE("SKIPPED (already imported)\n")) continue categories = self.import_categories(feed_entry) entry_dict = { "title": feed_entry.title[:255], "content": feed_entry.description, "excerpt": feed_entry.get("summary"), "status": PUBLISHED, "creation_date": creation_date, "start_publication": creation_date, "last_update": datetime.now(), "slug": slug, } if not entry_dict["excerpt"] and self.auto_excerpt: entry_dict["excerpt"] = Truncator("...").words(50, strip_tags(feed_entry.description)) if self.tags: entry_dict["tags"] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.image_enclosure: for enclosure in feed_entry.enclosures: if "image" in enclosure.get("type") and enclosure.get("href"): img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(enclosure["href"]).read()) img_tmp.flush() entry.image.save(slug, File(img_tmp)) break if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get("author_detail"): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get("name")), feed_entry.author_detail.get("email", "") ) except IntegrityError: user = User.objects.get(username=slugify(feed_entry.author_detail.get("name"))) entry.authors.add(user) self.write_out(self.style.ITEM("OK\n"))
def create_user_profile(sender,instance,created,**kwargs): """ Creates a Profile model for each User that is created. This function is called on the post_save signal from User. """ u = instance if created: p = Profile.objects.create(user=u) if not u.last_name: names = u.first_name.split(" ") if len(names)>1: u.last_name = names[1] u.save() if not u.get_profile().image: # get gravatar user_sa = UserSocialAuth.objects.filter(user=u) if user_sa: user_sa = user_sa[0] token = user_sa.extra_data['access_token'] g = Github(token).get_user() gravatar_id = g.gravatar_id url = "https://secure.gravatar.com/avatar/" + gravatar_id + "?s=500" img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(url).read()) img_temp.flush() u.get_profile().image.save("gravatar_image_" + str(u.pk), File(img_temp))
def process(filename): data = json.loads( open(filename, 'r').read() ) # pprint.pprint( data ) print "%s (%s) - %s" % (data['name'], data['slug'], filename) slug = data['slug'] try: person = models.Person.objects.get(slug=slug) return # don't try to update the person except models.Person.DoesNotExist: person = models.Person(slug=slug) person.legal_name = data['name'] person.summary = data['summary'] person.date_of_birth = data['date_of_birth'] person.save() content_type = ContentType.objects.get_for_model(person) if data.get('profile_url'): models.Contact.objects.get_or_create( content_type = content_type, object_id = person.id, value = re.sub('\s', '%20', data['profile_url'] ), kind = profile_url_kind, ) if data.get('email'): models.Contact.objects.get_or_create( content_type = content_type, object_id = person.id, value = data['email'], kind = email_kind, ) # import image if data.get('image') and 'img_not_found' not in data['image']: image_url = re.sub('\s', '%20', data['image'] ); photo, created = Image.objects.get_or_create( content_type = content_type, object_id = person.id, source = image_url, ) if created: print " Fetching " + image_url try: img_temp = NamedTemporaryFile(delete=True) img_temp.write( urllib2.urlopen(image_url).read() ) img_temp.flush() photo.image.save( person.slug, File(img_temp) ) photo.save() except urllib2.HTTPError: print " ...failed!"
def import_images(logo_dir): report_data = dict(failed=0, succeeded=0) for logo_name in os.listdir(logo_dir): error_message = "" identifier, extension = splitext(logo_name) if extension.lower() in (".png", ".jpg", ".jpeg", ".gif"): try: internal_org_id = InternalOrganisationID.objects.get( recording_org=Organisation.objects.get(id=CORDAID_ORG_ID), identifier=identifier ) org = internal_org_id.referenced_org filename = model_and_instance_based_filename("Organisation", org.pk, "logo", logo_name) with open(os.path.join(logo_dir, logo_name), "rb") as f: logo_data = f.read() logo_tmp = NamedTemporaryFile(delete=True) logo_tmp.write(logo_data) logo_tmp.flush() org.logo.save(filename, File(logo_tmp), save=True) action = "succeeded" except Exception, e: action = "failed" error_message = "with the following error message: {error_message}".format(error_message=e.message) report_data[action] += 1 log_and_alert( u"Upload of image to organisation {org_id} {action} {error_message}".format( org_id=org.id, action=action, error_message=error_message ) )
def _update_image(facebook_id, image_url): ''' Updates the user profile's image to the given image url Unfortunately this is quite a pain to get right with Django Suggestions to improve this are welcome ''' image_name = 'fb_image_%s.jpg' % facebook_id image_temp = NamedTemporaryFile() try: image_response = urllib2.urlopen(image_url) except AttributeError: image_response = urllib.request.urlopen(image_url) image_content = image_response.read() image_temp.write(image_content) http_message = image_response.info() image_size = len(image_content) try: content_type = http_message.type except AttributeError: content_type = http_message.get_content_type() image_file = InMemoryUploadedFile( file=image_temp, name=image_name, field_name='image', content_type=content_type, size=image_size, charset=None ) image_file.seek(0) image_temp.flush() return image_name, image_file
def saveProject(self, args): print u'%s' % args try: obj = Project.objects.get(project_name = args['name'],url = args['url']) return None except Project.DoesNotExist: obj = Project( submitted_event = self.event, project_name = args['name'], short_description = args['desc'], url = args['url'], project_type = args['type'] ) img_temp = NamedTemporaryFile(delete=True) if args['image']: img_temp.write(urllib2.urlopen(args['image']).read()) img_temp.flush() img_temp.seek(0) img_filepath = urlparse(args['image']).path.split('/')[-1] obj.image.save(img_filepath, File(img_temp)) obj.save() else : obj.save() return obj
def do_export(self): """ Does actual export. Called from a celery task. """ book = Workbook() self.render_book(book) temp = NamedTemporaryFile(delete=True) book.save(temp) temp.flush() org_root = getattr(settings, 'SITE_ORGS_STORAGE_ROOT', 'orgs') filename = '%s/%d/%s/%s.xls' % (org_root, self.org_id, self.directory, random_string(20)) default_storage.save(filename, File(temp)) self.filename = filename self.save(update_fields=('filename',)) subject = "Your export is ready" download_url = self.org.make_absolute_url(reverse(self.download_view, args=[self.pk])) send_email([self.created_by], subject, 'utils/email/export', {'download_url': download_url}) # force a gc import gc gc.collect()
def upload_image_url(request): if request.method != 'POST': return HttpResponse(status=403) image_url = request.POST.get('image_url', None) source_domain = request.POST.get('source_domain', None) headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', # 'Accept-Encoding': 'none', # 'Accept-Language': 'en-US,en;q=0.8', # 'Connection': 'keep-alive', 'referer': source_domain, } ext = mimetypes.guess_extension(mimetypes.guess_type(image_url)[0]) req = urllib2.Request(image_url, headers=headers) img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(req).read()) img_temp.flush() post_photo = Post_photo() post_photo.photo.save('%s%s'%(uuid.uuid4(), ext), File(img_temp)) post_photo.save() res = { 'link': post_photo.photo.url, } return JsonResponse(res, safe=False)
def get_cached(cls, key): """ Busca imagem no cache, se não encontrada tenta buscar no banco de dados. * key: chave da imagem """ if not cls._cache.has_key(key): obj = ImageConstant.objects.with_id(key) if not obj: return f = NamedTemporaryFile( delete=False, suffix='.%s' % obj.image.format) buf = obj.image.read() if not buf: return f.write(buf) cls._open_images[key] = f f.flush() cls._cache[key] = f.name return cls._cache[key]
def save(self, *args, **kwargs): hash_name = os.urandom(32).encode('hex') if not self.image: temp_img = NamedTemporaryFile() temp_img.write(urllib2.urlopen(self.url).read()) temp_img.flush() image = Image.open(temp_img.name) image.save(temp_img.name, 'JPEG') self.image.save(''.join([hash_name, '.jpg']), File(temp_img)) if not self.thumbnail: if not self.image: image = Image.open(temp_img.name) else: super(Pin, self).save() image = Image.open(self.image.path) size = image.size prop = 200.0 / float(image.size[0]) size = (int(prop*float(image.size[0])), int(prop*float(image.size[1]))) image.thumbnail(size, Image.ANTIALIAS) temp_thumb = NamedTemporaryFile() image.save(temp_thumb.name, 'JPEG') self.thumbnail.save(''.join([hash_name, '.jpg']), File(temp_thumb)) super(Pin, self).save(*args, **kwargs)
def fetch_photos_from_msg(self, album, msg=None): u = album.user token = get_access_token(u) graph = facebook.GraphAPI(token) if msg.status == 'awaiting': parts = urlparse.urlparse(msg.next_page) qs = urlparse.parse_qs(parts.query) after = qs.get('after')[0] photos = graph.get_object(album.fb_album_id + "/photos", fields='id,source', limit=2, after=after) new_next_page = photos.get('paging').get('next') new_msg = Message.objects.create(next_page=new_next_page, user=u, status='awaiting') for photo in photos.get('data'): img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlopen(photo.get('source')).read()) img_temp.flush() photo_object = Photo.objects.create(title=photo.get('id'), description=photo.get('created_time'), album=album, file=File(img_temp)) pprint(photo_object.filename) self.stdout.write('Successfully fetched photo for source "%s"\n' % photo.get('source')) msg.status = 'done' msg.save() self.stdout.write('Finished this queue "%s"\n' % new_msg.next_page)
def image(self): try: r = requests.get(self.remote_path) except Exception as e: raise Exception('Exception %s raised ' 'during loading image %s' % (e, self.remote_path)) if self.storage.exists(self.full_name) and self.if_cache: im = Image.open(self.storage.path(self.full_name)) im = processors.save_image(im, format=self.file_extension) else: img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() im = Image.open(img_temp.name) if not self.final_size is None: im = processors.scale_and_crop( im, self.final_size, self.method) im = processors.colorspace(im) im = processors.save_image(im, format=self.file_extension) self.storage.save(self.full_name, im) return im
def get_image_from_url(url): """ Get and save images from urls """ r = requests.get(url) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() return File(img_temp)
def create_thumbnail(model_instance): # CREATING IMAGE FROM THUMBNAIL backend = detect_backend(model_instance.url) thumbnail_url = backend.get_thumbnail_url() if backend.__class__.__name__ == 'YoutubeBackend': if thumbnail_url.endswith('hqdefault.jpg'): for resolution in YOUTUBE_RESOLUTIONS: temp_thumbnail_url = thumbnail_url.replace( 'hqdefault.jpg', resolution) if checkUrl(temp_thumbnail_url): thumbnail_url = temp_thumbnail_url break img_temp = NamedTemporaryFile(delete=True) try: img_temp.write(urllib2.urlopen(thumbnail_url).read()) except: http = urllib3.PoolManager() img_temp.write(http.request('GET', thumbnail_url).data) img_temp.flush() image = WagtailImage(title=model_instance.title) image.file.save(model_instance.title + '.jpg', File(img_temp)) model_instance.thumbnail = image model_instance.thumbnail.tags.add('video-thumbnail') model_instance.save()
def handle(self, *args, **options): rss_url = 'http://blog.djangogirls.org/rss' response = requests.get(rss_url) rss = ElementTree.fromstring(response.content) for post in rss.iter('item'): title = post.find('title').text if 'Your Django Story: Meet' in title: name = title.replace('Your Django Story: Meet ', '') is_story = True else: name = title is_story = False if not Story.objects.filter(name=name).exists(): post_url = post.find('link').text post = pq(post.find('description').text) image_url = post('img').attr.src story = Story(name=name, post_url=post_url, content=post, is_story=is_story) if image_url: img = NamedTemporaryFile(delete=True) img.write(urlopen(image_url).read()) img.flush() story.image.save(image_url.split('/')[-1], File(img)) story.save() if is_story: print('Story of %s has been fetched' % name) else: print('Blogpost "%s" has been fetched' % name)
def post(self, request, *args, **kwargs): parent = self.get_object() files = json.loads(request.POST.get('files[]')) need_upload = request.POST.get('need_upload') == 'true' for f in files: obj = self.model(parent=parent) if need_upload: r = requests.get(f.get('link'), stream=True) img_temp = NamedTemporaryFile(delete=True) for chunk in r.iter_content(8192): img_temp.write(chunk) obj.file.save(f.get('name'), File(img_temp)) img_temp.flush() link = f.get('thumbnailLink').replace('bounding_box=75', 'bounding_box=256') r = requests.get(link, stream=True) if f.get('thumbnailLink'): img_temp = NamedTemporaryFile(delete=True) for chunk in r.iter_content(8192): img_temp.write(chunk) obj.thumbnail.save(f.get('name'), File(img_temp)) img_temp.flush() if f.get('thumbnailLink'): obj.outer_thumbnail_url = f.get('thumbnailLink') obj.content_type = mimetypes.guess_type(f.get('name'))[0] # or 'image/png', obj.outer_url = f.get('link') obj.save() response = JSONResponse({'status': 'ok'}, mimetype=response_mimetype(request)) response['Content-Disposition'] = 'inline; filename=files.json' return response
def multiple_dna(*args): """ List of tuples: (seq_name, seq_frame, seq) """ seq_name_lengths = [] input_file = NamedTemporaryFile(prefix="mafft_") for arg in args: seq_name, seq_frame, seq = arg if seq_frame < 0: seq_name = "%s(%s)" % (seq_name, "-") seq = Seq(seq).reverse_complement().tostring() elif seq_frame > 0: seq_name = "%s(%s)" % (seq_name, "+") input_file.write(">%s\n%s\n" % (seq_name, seq.upper())) seq_name_lengths.append(len(seq_name)) input_file.flush() namelength = max(seq_name_lengths) + 4 mafft_cmd = ( "mafft --genafpair --maxiterate 1000 --preservecase --clustalout --namelength " + str(namelength) + " " + input_file.name ) mafft_proc = Popen(mafft_cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = mafft_proc.communicate() input_file.close() return stdout
def post(self, request, *args, **kwargs): # Set Project project = request.POST.get('project', 'default') open_db(project) url = request.POST.get('url') tags = request.POST.get('tag_list') tags = "url," + tags if request.POST.get('tor'): downloaded_file = network.download(url, tor=True) else: downloaded_file = network.download(url, tor=False) if downloaded_file is None: messages.error(request, "server can't download from URL") return redirect(reverse("main-page-project", kwargs={"project": project})) tf = NamedTemporaryFile() tf.write(downloaded_file) if not tf: messages.error(request, "server can't download from URL") return redirect(reverse("main-page-project", kwargs={"project": project})) tf.flush() sha_256 = add_file(tf.name, name=url.split('/')[-1], tags=tags) if sha_256: messages.success(request, "stored file in database: {}".format(tf.name)) return redirect(reverse('main-page-project', kwargs={'project': project})) else: messages.error(request, "Unable to Store The File, already in database") return redirect(reverse("main-page-project", kwargs={"project": project}))
def save_to_model(file_field, file_name): img_temp = NamedTemporaryFile(delete=True) img_temp.write(open(os.path.join(settings.MEDIA_ROOT, file_name), 'r').read()) img_temp.flush() file_field.save(os.path.basename(file_name), File(img_temp), save=False) # delete files after saving in models delete_file(file_name)
def get_image_cover(self, path): r = requests.get(u"http://stratege.ru{}".format(path)) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() name_file = path.split("/")[-1] return name_file, img_temp
def do_import(self): """ :return: List; contains fields that have changed """ from . import same_data changes = [] photo_id = self.get_attrib(self.parent_elem, akvo_ns('photo-id'), 'current_image') current_image = file_from_zip_archive( self.iati_import_job.iati_xml_file, "out_proj/{}.jpg".format(photo_id)) if current_image: tmp_file = NamedTemporaryFile() for line in current_image.readlines(): tmp_file.write(line) tmp_file.flush() # update current image if it's different from the existing one try: old_file = self.project.current_image.file except (IOError, ValueError): old_file = None new_file = File(tmp_file) if not same_data(old_file, new_file): filename = model_and_instance_based_filename( 'Project', self.project.pk, 'current_image', 'image.jpg') new_file.seek(0) self.project.current_image.save(filename, new_file) changes += ['current_image'] current_image_caption = self.get_attrib(self.parent_elem, akvo_ns('image-caption'), 'current_image_caption') if current_image_caption: changes += self.update_project_field('current_image_caption', current_image_caption) current_image_credit = self.get_attrib(self.parent_elem, akvo_ns('photo-credit'), 'current_image_credit') if current_image_credit: changes += self.update_project_field('current_image_credit', current_image_credit) return changes
def photos_list(request, id): func_return = _init_facebook_app(request) if (func_return != False): return func_return try: graph, user, sys_user = _get_facebook_app(request) if sys_user is None: return HttpResponseRedirect("/facebook/setup/") except facebook.GraphAPIError: del request.session["access_token"] return HttpResponseRedirect("http://apps.facebook.com/" + settings.FACEBOOK_APP_ID + "/") if request.method == 'POST': photos_ids = request.POST.getlist("object_photos") for photo_id in photos_ids: photo = graph.get_object(photo_id) img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(photo["source"]).read()) img_temp.flush() user_photo = Photo(user=request.user, ) user_photo.image.save( photo["id"] + "." + (photo["source"].split(".")[-1]), File(img_temp)) user_photo.save() if len(photos_ids) > 0: _add_flash_message(request, "Your photos has been added") return HttpResponseRedirect(reverse('facebook_photos_list', args=[id])) album = graph.get_object(id) photos = graph.get_connections(album["id"], "photos") return dict( user=user, album=album, photos=photos, message=_get_flash_message(request), )
def generate_batch(self): sepa = SepaDD(settings.SEPA_CONFIG, schema="pain.008.001.02", clean=True) payments = [] for batch_result in SepaBatchResult.objects.filter(batch=self): payment = batch_result.payment if batch_result.success: pay = { "name": payment.account.display_name, "IBAN": payment.account.iban_code, "amount": int(payment.amount * 100), "BIC": batch_result.bic_code, "type": "RCUR", "collection_date": datetime.date.today(), "mandate_id": payment.account.cif, "execution_date": datetime.date.today(), "mandate_date": datetime.date.today(), "description": payment.concept, "endtoend_id": str(payment.reference).replace('-', ''), } sepa.add_payment(pay) payments.append(batch_result.payment) payment.invoice_prefix = self.invoice_prefix payment.invoice_number = batch_result.invoice_number payment.invoice_date = self.attempt payment.save() if (len(payments) > 0): sepa_xml = sepa.export(validate=True) xml_temp = NamedTemporaryFile() xml_temp.write(sepa_xml) xml_temp.flush() self.sepa_file.save(f"sepa_batch_{self.pk}.xml", File(xml_temp)) self.save() # We update the payments date for payment in payments: #payment.added = datetime.datetime.now() payment.type = DEBIT payment.save()
def before_import_row(self, row, row_number=None, **kwargs): new_row = OrderedDict() for item in row.items(): old_key = item[0] new_key = str(old_key).lower().replace(' ', '_') if str(item[0]).lower().replace(' ', '_') in self._meta.export_order: new_row[new_key] = item[1] else: new_row[old_key] = item[1] subjects = [ s.strip().title() for s in new_row.get('subjects_taught').split(',') ] new_row['subjects_taught'] = ','.join(subjects) row = new_row images_dir = STATICFILES_DIRS[0] + '/images' image_list = [] for file in os.listdir(images_dir): image_list.append(file) # Create subjects if not found for subject in subjects: Subject.objects.get_or_create(name__iexact=subject, defaults={'name': subject}) images_folder_path = '%s/media/static/images/' % SITE_URL if str(row['profile_picture']).strip( ) and row['profile_picture'] in image_list: row['profile_picture'] = "%s/%s" % (images_folder_path, row['profile_picture']) else: row['profile_picture'] = "%s/placeholder.png" % images_folder_path # # -------------------------------------- # Generate temporary file and download image from provided URL tmp_file = NamedTemporaryFile(delete=True, dir=f'{settings.MEDIA_ROOT}') tmp_file.write(requests.get(row['profile_picture']).content) tmp_file.flush() image_name = row['profile_picture'].split('/')[-1] # Add file object to row row['profile_picture'] = File(tmp_file, image_name) return row
def get_product_description(link, category): URL = 'https://tainabox.com.ua' print('Start importing from %s' % URL + link) # rez = requests.get(URL + link, verify=False) rez = requests.get(URL + link) soup = BeautifulSoup(rez.text, 'html.parser') for desc in soup.findAll('div', {'class': 'product__big-item'}): image = desc.find('div', { 'class': 'product__big-item_right' }).find('img') consist = desc.find('div', {'class': 'product__item__composition__value'}) price = desc.find('div', {'class': 'to-order__value'}) name = desc.find('div', {'product__big-item__name'}) in_stop = False for w in STOP_WORDS: if name.text.find(w) > -1: in_stop = True if consist.text.find(w) > -1: in_stop = True if not in_stop: p = Product() p.name = re.sub('\n', '', name.text) p.price = re.split(r'\n', price.text)[0] p.consist = consist.text p.category = category img_url = URL + image['src'] img_temp = NamedTemporaryFile(delete=True) req = urllib.request.Request( img_url, data=None, headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36' }) img_temp.write(urllib.request.urlopen(req).read()) img_temp.flush() img_res = re.split(r'\.', image['src']) p.image.save("image_.{}".format(img_res[-1]), File(img_temp)) p.save()
def get_file_from_url(url): log.info('try to get file from url: %s' % url) file_obj = None try: try: response = urllib2.urlopen(url) temp_file = NamedTemporaryFile(delete=True) temp_file.write(response.read()) temp_file.flush() file_obj = File(temp_file) except Exception, e: log.warning('%s' % e) except Exception, e: log.warning('%s' % e) pass
def start(self, image_url, **kwargs): self.login(settings.ASTROMETRY_NET_API_KEY) r = requests.get(image_url, verify=False, allow_redirects=True, headers={'User-Agent': 'Mozilla/5.0'}) f = NamedTemporaryFile(delete=True) f.write(r.content) f.flush() f.seek(0) upload = self.upload(File(f), **kwargs) if upload['status'] == 'success': return upload['subid'] return 0
def create_pdf(self): xslt_root = etree.parse(STYLESHEET) transform = etree.XSLT(xslt_root) payload = etree.parse(self.temppath) temp = NamedTemporaryFile() temp_html = NamedTemporaryFile(suffix='.html') html_file = str(transform(payload)) temp_html.write(html_file.encode()) temp_html.flush() cpdf = ChromePDF(PATH_TO_CHROME_EXE) cpdf._chrome_options = [ '--headless', '--no-gpu', '--print-to-pdf-no-header' ] cpdf.html_to_pdf(html_file, temp) temp.flush() temp_html.close() self.newfile = temp return None
def get_image_for_issue(service, issue): if service.name == "Google Code": image_path = "/p/"+issue['project']+"/logo" img_temp = NamedTemporaryFile() try: url_opened=urllib2.urlopen("http://"+service.domain+image_path) except Exception, e: return False, False if url_opened: buffer = url_opened.read() if len(buffer) > 0: img_temp.write(buffer) img_temp.flush() return issue['project']+".jpg", File(img_temp) img_temp.flush()
def write_temp(self, payload): """Write a temporary file to disk, necessary for certain payload types that are not stored on file. Args: payload (bytes): ingested payload Returns: temp (NamedTemporaryFile): temporary file on disk """ logger.debug( f"{self.processor} importer: creating temporary file" ) temp = NamedTemporaryFile() temp.write(payload) temp.flush() return temp
def value_from_datadict(self, data, files, name): upload = super(UrlOrFileInput, self).value_from_datadict(data, files, name) url = data.get('%s_url' % name, None) if url: #TODO support for non-jpgs (via PIL?) img_temp = NamedTemporaryFile(delete=True) try: img_temp.write(urllib2.urlopen(url).read()) except: return upload img_temp.flush() img_temp.file.seek(0) size = os.fstat(img_temp.file.fileno()).st_size return InMemoryUploadedFile(img_temp, None, img_temp.name + '.jpg' , 'image/jpeg', size, None) #print url #print upload #print type(upload) return upload
def fb_profile_image_save(login): """ 페이스북 유저정보에서 유저id를 받아온다. 유저id를 이용하여 프로필 사진을 불러온다. 프로필 사진을 불러올 때 사이즈를 크게 한다. 유저 정보에 이미지를 저장 한다. :return: login """ image_url = 'https://graph.facebook.com/' + login.user.username + '/picture?type=large&width=200&height=200' img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlopen(image_url).read()) img_temp.flush() try: login.user.profile_image.save( 'image_{}.{}'.format(login.user.username, 'jpg'), File(img_temp)) except IntegrityError as IE: print(IE) return login
def image(self): if self.image_url is None or self.image_url.name is None or self.image_url.name == '': if self.s3_url is not None and self.s3_url.name is not None and self.s3_url.name.startswith( 'http'): # We need to copy the contents of original file to new one used by web image_name_split = self.s3_url.name.split('/') image_name = image_name_split[len(image_name_split) - 1] img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(self.s3_url.name).read()) img_temp.flush() self.image_url.save(image_name, File(img_temp)) else: self.image_url = self.s3_url.name self.save() return self.image_url
def home(request): offers = Offers.objects.all().order_by("-created") field = [f.name for f in Offers._meta.get_fields()] if request.POST: if "upload" in request.POST: for i in offers: if i.offer_image_url and not i.offer_photo: r = requests.get(i.offer_image_url, verify=False) if r.status_code == requests.codes.ok: img_temp = NamedTemporaryFile() img_temp.write(r.content) img_temp.flush() img_filename = urlsplit(i.offer_image_url).path[1:] i.offer_photo.save(img_filename, File(img_temp), save=True) continue messages.success(request, "Фото загружено") return render(request, 'home.html', locals()) else: try: file = request.FILES['file'] format_file = request.POST.get("file_format", False) if file.name.split(".")[-1].lower() != format_file: messages.error(request, "Формат файла не подходит!") else: uploading_file = UploadingProducts({ 'file': file, 'format_file': format_file }) if uploading_file.parsing(): messages.success( request, "Загружено и обновлено. {}".format( uploading_file.add if uploading_file. add else "")) else: messages.error( request, "Ошибка. Нет поля: {}".format(uploading_file.err)) except MultiValueDictKeyError: messages.error(request, "Выберите файл!") return render(request, 'home.html', locals())
def _process_message(self, message): msg = Message() msg.mailbox = self msg.subject = message['subject'][0:255] msg.message_id = message['message-id'][0:255] msg.from_header = message['from'] msg.to_header = message['to'] message = self._filter_message_body(message) msg.body = message.as_string() if message['in-reply-to']: try: msg.in_reply_to = Message.objects.filter(message_id=message['in-reply-to'])[0] except IndexError: pass msg.save() if message.is_multipart(): for part in message.walk(): if part.get_content_maintype() == 'multipart': continue if part.get('Content-Disposition') is None: continue filename = part.get_filename() # ignore SMIME extension filename_basename, filename_extension = os.path.splitext(filename) buffer_space = 40 if len(filename) > 100 - buffer_space: # Ensure that there're at least a few chars available afterward # for duplication things like _1, _2 ... _99 and the FileField's # upload_to path. filename_basename = filename_basename[0:100-len(filename_extension)-buffer_space] filename = filename_basename + filename_extension if filename_extension in SKIPPED_EXTENSIONS: continue data = part.get_payload(decode=True) if not data: continue temp_file = NamedTemporaryFile(delete=True) temp_file.write(data) temp_file.flush() attachment = MessageAttachment() attachment.document.save(filename, File(temp_file)) attachment.save() msg.attachments.add(attachment) return msg
def setup_profile(request): """Display page 4 (profile) of the first login wizard.""" # Fields with file uploads are not AJAX requests. if request.method == "POST": form = ProfileForm(request.POST, user=request.user) profile = request.user.profile if form.is_valid(): profile.name = form.cleaned_data["display_name"].strip() if not profile.setup_profile: profile.setup_profile = True profile.add_points(score_mgr.setup_points(), datetime.datetime.today(), "Set up profile") profile.save() if form.cleaned_data["pic_method"] == 0: name = request.user for avatar in Avatar.objects.filter(user=name): avatar.delete() elif form.cleaned_data["pic_method"] == 2 and form.cleaned_data["facebook_photo"]: # Need to download the image from the url and save it. photo_temp = NamedTemporaryFile(delete=True) fb_url = form.cleaned_data["facebook_photo"] photo_temp.write(urllib2.urlopen(fb_url).read()) photo_temp.flush() photo_temp.seek(0) path = avatar_file_path(user=request.user, filename="fb_photo.jpg") avatar = Avatar( user=request.user, primary=True, avatar=path, ) avatar.avatar.storage.save(path, File(photo_temp)) avatar.save() return HttpResponseRedirect(reverse("setup_activity")) return _get_profile_form(request, form=form, non_xhr=False) return _get_profile_form(request)
def add_twitter_image_to_queue(self, person, image_url, user_id): if person.queuedimage_set.exists(): # Don't add an image to the queue if there is one already # in the queue. It doesn't matter if that queued image has # been moderated or not, or whether it's been rejected or # not. At the moment we just want to be really careful not # to make people check the same Twitter avatar twice. verbose( _(" That person already had an image in the queue, so skipping." )) return verbose( _(" Adding that person's Twitter avatar to the moderation queue")) # Add a new queued image image_url = image_url.replace('_normal.', '.') img_temp = NamedTemporaryFile(delete=True) r = requests.get(image_url) if r.status_code != 200: msg = _(" Ignoring an image URL with non-200 status code " "({status_code}): {url}") verbose(msg.format(status_code=r.status_code, url=image_url)) return img_temp.write(r.content) img_temp.flush() # Trying to get the image extension checks that this really is # an image: if get_image_extension(img_temp.name) is None: msg = _(" The image at {url} wasn't of a known type") verbose(msg.format(url=image_url)) return justification_for_use = "Auto imported from Twitter: " \ "https://twitter.com/intent/user?user_id={user_id}".format( user_id=user_id) qi = QueuedImage(decision=QueuedImage.UNDECIDED, why_allowed=CopyrightOptions.PROFILE_PHOTO, justification_for_use=justification_for_use, person=person) qi.save() qi.image.save(image_url, File(img_temp)) qi.save()
def save(self, *args, **kwargs): update = self.pk is None if not update: orig = Film.objects.get(pk=self.pk) if orig.slug != self.slug or orig.imdb_poster_url != self.imdb_poster_url: update = True if update: self.slug = slugify(self.titre) img = requests.get(self.imdb_poster_url) if img.status_code == requests.codes.ok: img_temp = NamedTemporaryFile(delete=True) img_temp.write(img.content) img_temp.flush() self.imdb_poster.save(self.slug, File(img_temp), save=False) img_temp.close() super(Film, self).save(*args, **kwargs) if update: self.nouveau()
def handle(self, *args, **options): response = requests.get(url=URL).json() for item in response: item_created, created = Item.objects.get_or_create( id=item['id'], defaults={ 'title': item['title'], 'description': item['description'], 'image': item['image'], 'weight': item['weight_grams'], 'price': item['price'] }) if created: img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlopen(item['image']).read()) img_temp.flush() item_created.image.save(f"foodb{item_created.pk}", File(img_temp)) return
def get_file_from_url(image_url): """ @summary: download file from given url @param image_url: string @return: File instance """ ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE filename = urllib.parse.urlparse(image_url).path.split('/')[-1] req = urllib.request.Request(image_url, ) response = urlopen(req, context=ctx) data = response.read() # Get Response img_temp = NamedTemporaryFile(delete=True) img_temp.write(data) img_temp.flush() return File(img_temp, name=filename)
def image_upload_view(request): form = ImageUploadForm() if request.method == 'POST': form = ImageUploadForm(request.POST, request.FILES) if form.is_valid(): if 'upload_file' in request.FILES: new_image = form.save(commit=False) new_image.img = request.FILES['upload_file'] new_image.name = str(request.FILES['upload_file']) new_image.save() return redirect("images:resize", pk=new_image.pk) else: image_url = request.POST['url_file'] response = requests.get(image_url, stream=True) if response.ok and response.headers['content-type'] in ( 'image/png', 'image/jpeg'): img_temp = NamedTemporaryFile() img_temp.write(response.content) img_temp.flush() new_image = form.save() new_image.name = path.basename(image_url) new_image.img.save(new_image.name, File(img_temp), save=True) return redirect("images:resize", pk=new_image.pk) else: messages.info( request, 'Не удалось найти изображение. Попробуйте снова.') else: print(form.errors) return render(request, 'images/image_create.html', {'form': form})
def download_label(self, username, password): link = self.shipmentlink_set.get(type='label') res = time_f(requests.get, 'canada-post-dp-shipping.get-label', link.data['href'], auth=(username, password)) if res.status_code == 202: raise Shipment.Wait if not res.ok: res.raise_for_status() img_temp = NamedTemporaryFile(delete=True) img_temp.write(res.content) img_temp.flush() filepath = requests.utils.urlparse(link.data['href']).path filename = path.basename(filepath) if not filename.lower().endswith('.pdf'): filename = filename + ".pdf" self.label = File(img_temp, filename) self.save()
def save(self, *args, **kwargs): hash_name = os.urandom(32).encode('hex') #create image if not self.image: print 'model - if not self.image' temp_img = NamedTemporaryFile() if self.uImage: print 'model - self.uImage' image = Image.open(settings.TMP_ROOT + self.uImage) self.imgName = self.uImage if self.imgUrl: print 'model - self.image' temp_img.write(urllib2.urlopen(self.imgUrl).read()) temp_img.flush() image = Image.open(temp_img.name) self.imgName = self.imgUrl if image.mode != "RGB": print 'model - image.mode' image = image.convert("RGB") image.save(temp_img.name, 'JPEG') self.image.save(''.join([hash_name, '.jpg']), File(temp_img)) #create image thumbnail print 'model - starting thumbnail' temp_thumb = NamedTemporaryFile() size = image.size prop = 200.0 / float(image.size[0]) size = (int(prop * float(image.size[0])), int(prop * float(image.size[1]))) image.thumbnail(size, Image.ANTIALIAS) image.save(temp_thumb.name, 'JPEG') self.thumbnail.save(''.join([hash_name, '.jpg']), File(temp_thumb)) #super(Pin, self).save() if self.uImage: print 'model - delete_uplaod called' delete_upload(None, self.uImage) media_url = settings.MEDIA_URL if not self.srcUrl: print 'model - if not srcUrl' self.srcUrl = media_url + self.image.name #always link to our saved image to prevent linking back to dead images. self.imgUrl = media_url + self.image.name super(Pin, self).save(*args, **kwargs)
def save_user_profile(backend, user, response, *args, **kwargs): if backend.name == 'google-oauth2': if 'gender' in response.keys(): if response['gender'] == 'male': user.shopuserprofile.gender = 'M' else: user.shopuserprofile.gender = 'W' if 'tagline' in response.keys(): user.shopuserprofile.tagline = response['tagline'] if 'aboutMe' in response.keys(): user.shopuserprofile.about_me = response['aboutMe'] if 'ageRange' in response.keys(): minAge = response['ageRange']['min'] if int(minAge) < 18: user.delete() raise AuthForbidden('social_core.backends.google.GoogleOAuth2') if 'picture' in response.keys(): seed = ''.join( random.choices(string.ascii_letters + string.digits, k=16)) url = response['picture'] r = requests.get(url) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() user.avatar.save(seed + '.' + response['picture'].split('.')[-1], File(img_temp), save=True) if 'language' in response.keys(): if response['locale'] == 'ru': user.shopuserprofile.language = 'ru' else: user.shopuserprofile.language = 'en' user.save() return
def main_parsing(resp, data): soap = BeautifulSoup(resp.text, "lxml") movieName = soap.find("div", {'class': "titleBar"}).find("h1") data["name"] = movieName.contents[0].strip().replace(u'\xa0', u'') movieDesc = soap.find("div", {"class": "summary_text"}) data["desc"] = movieDesc.string.strip() movieGenre = soap.find("div", {"class": "subtext"}) data["genre"] = "" movieRating = soap.find("span", {"itemprop": "ratingValue"}) data["rating"] = float(movieRating.string) movieImg = soap.find("div", {"class": "poster"}).find("img").get("src") img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlrst.urlopen(movieImg).read()) img_temp.flush() data["photo"] = movieImg for i in movieGenre.contents: data["genre"] += i.string.strip() print(data) mov_check = None try: mov_check = Movie.objects.get(name=data["name"]) return Response( {"data": f"Movie named {mov_check} is already parsed!"}) except Movie.DoesNotExist: movie = Movie.objects.create(name=data["name"], desc=data["desc"], genre=data["genre"], rating=data['rating']) movie.photo.save('image.png', File(img_temp)) items_lst = [] items_lst.append(data) with open(CSV, 'w', newline='') as file: writer = csv.writer(file, delimiter=';') writer.writerow([ "Movie name", "Description", "Poster link", "Genre", "Rating" ]) for item in items_lst: writer.writerow([ item['name'], item['desc'], item['photo'], item['genre'], item['rating'] ]) return Response({"data": "OK"})