def save(self, *args, **kwargs): if self.image: #open image pil_image_obj = Image.open(self.image) img_size = pil_image_obj.size if float(img_size[0]) > 500: new_image = resizeimage.resize_width(pil_image_obj, 500) new_image_io = BytesIO() new_image.save(new_image_io, format='JPEG') temp_name = self.image.name self.image.delete(save=False) self.image.save(temp_name, content=ContentFile(new_image_io.getvalue()), save=False) if self.image_slurp: print("image slurp") imgRequest = request.urlopen(self.image_slurp) if imgRequest.status == 200: file_name = self.image_slurp.split('/')[-1] img_temp = NamedTemporaryFile() img_temp.write(imgRequest.read()) img_temp.flush() img_file = File(img_temp) pil_image_obj = Image.open(img_temp) img_size = pil_image_obj.size if float(img_size[0]) > 500: new_image = resizeimage.resize_width(pil_image_obj, 500) else: new_image = pil_image_obj new_image_io = BytesIO() new_image.save(new_image_io, format='JPEG') temp_name = file_name self.image_slurp = None self.image.delete(save=False) self.image.save(temp_name, content=ContentFile(new_image_io.getvalue()), save=False) super(Article, self).save(*args, **kwargs)
def fetch_photos_from_msg(self, album, msg=None): u = album.user token = get_access_token(u) graph = facebook.GraphAPI(token) if msg.status == 'awaiting': parts = urlparse.urlparse(msg.next_page) qs = urlparse.parse_qs(parts.query) after = qs.get('after')[0] photos = graph.get_object(album.fb_album_id + "/photos", fields='id,source', limit=2, after=after) new_next_page = photos.get('paging').get('next') new_msg = Message.objects.create(next_page=new_next_page, user=u, status='awaiting') for photo in photos.get('data'): img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlopen(photo.get('source')).read()) img_temp.flush() photo_object = Photo.objects.create(title=photo.get('id'), description=photo.get('created_time'), album=album, file=File(img_temp)) pprint(photo_object.filename) self.stdout.write('Successfully fetched photo for source "%s"\n' % photo.get('source')) msg.status = 'done' msg.save() self.stdout.write('Finished this queue "%s"\n' % new_msg.next_page)
def handle(self, *args, **options): rss_url = 'http://blog.djangogirls.org/rss' response = requests.get(rss_url) rss = ElementTree.fromstring(response.content) for post in rss.iter('item'): title = post.find('title').text if 'Your Django Story: Meet' in title: name = title.replace('Your Django Story: Meet ', '') is_story = True else: name = title is_story = False if not Story.objects.filter(name=name).exists(): post_url = post.find('link').text post = pq(post.find('description').text) image_url = post('img').attr.src story = Story(name=name, post_url=post_url, content=post, is_story=is_story) if image_url: img = NamedTemporaryFile(delete=True) img.write(urlopen(image_url).read()) img.flush() story.image.save(image_url.split('/')[-1], File(img)) story.save() if is_story: print('Story of %s has been fetched' % name) else: print('Blogpost "%s" has been fetched' % name)
def get_image_cover(self, path): r = requests.get(u"http://stratege.ru{}".format(path)) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() name_file = path.split("/")[-1] return name_file, img_temp
def pairwise_protein(query_name, query_seq, query_frame, subject_name, subject_seq, subject_frame): if query_frame < 0: query_name = query_name + "(" + str(query_frame) + ")" query_seq = Seq(query_seq).reverse_complement()[-query_frame - 1 :].translate().tostring() elif query_frame > 0: query_name = query_name + "(" + str(query_frame) + ")" query_seq = Seq(query_seq)[query_frame - 1 :].translate().tostring() if subject_frame < 0: subject_name = subject_name + "(" + str(subject_frame) + ")" subject_seq = Seq(subject_seq).reverse_complement()[-subject_frame - 1 :].translate().tostring() elif subject_frame > 0: subject_name = subject_name + "(" + str(subject_frame) + ")" subject_seq = Seq(subject_seq)[subject_frame - 1 :].translate().tostring() input_file = NamedTemporaryFile(prefix="mafft_") input_file.write("\n".join([">" + query_name, query_seq.upper(), ">" + subject_name, subject_seq.upper()])) input_file.flush() namelength = max([len(query_name), len(subject_name)]) + 4 mafft_cmd = "mafft --preservecase --clustalout --namelength " + str(namelength) + " " + input_file.name mafft_proc = Popen(mafft_cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = mafft_proc.communicate() return stdout
def post(self, request, *args, **kwargs): # Set Project project = request.POST.get('project', 'default') open_db(project) url = request.POST.get('url') tags = request.POST.get('tag_list') tags = "url," + tags if request.POST.get('tor'): downloaded_file = network.download(url, tor=True) else: downloaded_file = network.download(url, tor=False) if downloaded_file is None: messages.error(request, "server can't download from URL") return redirect(reverse("main-page-project", kwargs={"project": project})) tf = NamedTemporaryFile() tf.write(downloaded_file) if not tf: messages.error(request, "server can't download from URL") return redirect(reverse("main-page-project", kwargs={"project": project})) tf.flush() sha_256 = add_file(tf.name, name=url.split('/')[-1], tags=tags) if sha_256: messages.success(request, "stored file in database: {}".format(tf.name)) return redirect(reverse('main-page-project', kwargs={'project': project})) else: messages.error(request, "Unable to Store The File, already in database") return redirect(reverse("main-page-project", kwargs={"project": project}))
def user_join(request): login_from = int(request.POST.get("login_from", 0)) user_id = request.POST.get("user_id") password = request.POST.get("password") user_name = request.POST.get("user_name") user_email = request.POST.get("user_email") phone = request.POST.get("user_phone") gender = int(request.POST.get("user_gender", 0)) img_url = request.POST.get("img_url", "") profile_img = None if login_from != 0: password = "******" % (login_from, password) if img_url: profile_img = NamedTemporaryFile(delete=True) profile_img.write(urllib2.urlopen(img_url).read()) profile_img.flush() try: user = User.objects.create_user(username=user_id, password=password, email=user_email, first_name=user_name) if user: profile = UserProfile.objects.create(user=user, gender=gender, login_from=login_from, phone=phone) if profile_img: profile.src.save("%s.jpg" % user_id, File(profile_img)) user = authenticate(username=user_id, password=password) if user: login(request, user) request.session.set_expiry(31536000) return HttpResponseRedirect("/") except Exception as e: pass return HttpResponseRedirect("/home/")
def do_export(self): """ Does actual export. Called from a celery task. """ book = Workbook() self.render_book(book) temp = NamedTemporaryFile(delete=True) book.save(temp) temp.flush() org_root = getattr(settings, 'SITE_ORGS_STORAGE_ROOT', 'orgs') filename = '%s/%d/%s/%s.xls' % (org_root, self.org_id, self.directory, random_string(20)) default_storage.save(filename, File(temp)) self.filename = filename self.save(update_fields=('filename',)) subject = "Your export is ready" download_url = self.org.make_absolute_url(reverse(self.download_view, args=[self.pk])) send_email([self.created_by], subject, 'utils/email/export', {'download_url': download_url}) # force a gc import gc gc.collect()
def _download_file(url, obj, obj_fieldname='executed_file', filename=None, update_obj=True): """ Task to download a file form a url and save it to a model field """ if url not in [None, '']: # Validate the url URLValidator(url) if filename is None: filename = urlparse.urlparse(url).path filename_no_ext, ext = os.path.splitext(filename.split('/')[-1]) # # @BUSINESSRULE must have a file .suffix # if ext is None: raise Exception('Cannot download a file with no filename.extension: %s' % url) filename = '%s%s' % (filename_no_ext, ext) #try: img_temp = NamedTemporaryFile(delete=True, suffix=ext) img_temp.write(urllib2.urlopen(url).read()) img_temp.flush() # # SAVE THE FILE LOCALLY # # use the upload_to function to name and place the file appropriately filename = obj._meta.get_field(obj_fieldname).upload_to(instance=obj, filename=filename) file_object = File(img_temp) # return both the filename and the file_object for saving to the model return (default_storage.save(filename, file_object), file_object,)
def get_image_from_url(url): """ Get and save images from urls """ r = requests.get(url) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() return File(img_temp)
def shrinkImageFromData(data): api_key = settings.TINYPNG_API_KEY info, new_data = shrink_data(data, api_key) img_shrunked = NamedTemporaryFile(delete=False) img_shrunked.write(new_data) img_shrunked.flush() return ImageFile(img_shrunked)
def save(self, *args, **kwargs): if not self.image: temp_img = NamedTemporaryFile() temp_img.write(urllib2.urlopen(self.url).read()) temp_img.flush() self.image.save(self.url.split('/')[-1], File(temp_img)) if not self.thumbnail: if not self.image: image = Image.open(temp_img.name) else: super(Pin, self).save() image = Image.open(self.image.path) size = image.size prop = 200 / image.size[0] size = (prop*image.size[0], prop*image.size[1]) image.resize(size, Image.ANTIALIAS) temp_thumb = NamedTemporaryFile() image.save(temp_thumb.name, 'JPEG') if self.url: name = self.url.split('/')[-1] else: name = self.image.name self.thumbnail.save(name, File(temp_thumb)) super(Pin, self).save(*args, **kwargs)
def newpin(request): if request.method=='GET': data=dict() board=Board.objects.get(id=request.GET['b']) data['board']=board return render_to_response('newpin.html',data,context_instance=RequestContext(request)) else: if len(request.FILES)!=0: im = request.FILES['pic'] picture=Picture.objects.create(first_piner=request.user,active=True,web_url='/',image=im) picture.save() bid = request.POST['board'] board=Board.objects.get(id=bid) pin=Pin.objects.create(to_board=board,picture=picture,description=request.POST['description']) pin.save() else: #get image from url url = request.POST['pic_url'] web_url = request.POST['web_url'] img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(url).read()) img_temp.flush() picture=Picture.objects.create(first_piner=request.user,active=True,web_url=web_url,image=File(img_temp)) picture.save() bid = request.POST['board'] board=Board.objects.get(id=bid) pin=Pin.objects.create(to_board=board,picture=picture,description=request.POST['description']) pin.save() request.method='GET' return boardpage(request,bid)
def import_entries(self, feed_entries): """Import entries""" for feed_entry in feed_entries: self.write_out("> %s... " % feed_entry.title) creation_date = datetime(*feed_entry.date_parsed[:6]) slug = slugify(feed_entry.title)[:255] if Entry.objects.filter( creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug, ): self.write_out(self.style.NOTICE("SKIPPED (already imported)\n")) continue categories = self.import_categories(feed_entry) entry_dict = { "title": feed_entry.title[:255], "content": feed_entry.description, "excerpt": feed_entry.get("summary"), "status": PUBLISHED, "creation_date": creation_date, "start_publication": creation_date, "last_update": datetime.now(), "slug": slug, } if not entry_dict["excerpt"] and self.auto_excerpt: entry_dict["excerpt"] = Truncator("...").words(50, strip_tags(feed_entry.description)) if self.tags: entry_dict["tags"] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.image_enclosure: for enclosure in feed_entry.enclosures: if "image" in enclosure.get("type") and enclosure.get("href"): img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(enclosure["href"]).read()) img_tmp.flush() entry.image.save(slug, File(img_tmp)) break if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get("author_detail"): try: user = User.objects.create_user( slugify(feed_entry.author_detail.get("name")), feed_entry.author_detail.get("email", "") ) except IntegrityError: user = User.objects.get(username=slugify(feed_entry.author_detail.get("name"))) entry.authors.add(user) self.write_out(self.style.ITEM("OK\n"))
def save_image(profile, url): img = NamedTemporaryFile(delete=True) img.write(urllib.request.urlopen(url).read()) img.flush() profile.avatar_image.save(str(profile.id), File(img))
def get_cached(cls, key): """ Busca imagem no cache, se não encontrada tenta buscar no banco de dados. * key: chave da imagem """ if not cls._cache.has_key(key): obj = ImageConstant.objects.with_id(key) if not obj: return f = NamedTemporaryFile( delete=False, suffix='.%s' % obj.image.format) buf = obj.image.read() if not buf: return f.write(buf) cls._open_images[key] = f f.flush() cls._cache[key] = f.name return cls._cache[key]
def save_to_model(file_field, file_name): img_temp = NamedTemporaryFile(delete=True) img_temp.write(open(os.path.join(settings.MEDIA_ROOT, file_name), 'r').read()) img_temp.flush() file_field.save(os.path.basename(file_name), File(img_temp), save=False) # delete files after saving in models delete_file(file_name)
def post(self, request, *args, **kwargs): parent = self.get_object() files = json.loads(request.POST.get('files[]')) need_upload = request.POST.get('need_upload') == 'true' for f in files: obj = self.model(parent=parent) if need_upload: r = requests.get(f.get('link'), stream=True) img_temp = NamedTemporaryFile(delete=True) for chunk in r.iter_content(8192): img_temp.write(chunk) obj.file.save(f.get('name'), File(img_temp)) img_temp.flush() link = f.get('thumbnailLink').replace('bounding_box=75', 'bounding_box=256') r = requests.get(link, stream=True) if f.get('thumbnailLink'): img_temp = NamedTemporaryFile(delete=True) for chunk in r.iter_content(8192): img_temp.write(chunk) obj.thumbnail.save(f.get('name'), File(img_temp)) img_temp.flush() if f.get('thumbnailLink'): obj.outer_thumbnail_url = f.get('thumbnailLink') obj.content_type = mimetypes.guess_type(f.get('name'))[0] # or 'image/png', obj.outer_url = f.get('link') obj.save() response = JSONResponse({'status': 'ok'}, mimetype=response_mimetype(request)) response['Content-Disposition'] = 'inline; filename=files.json' return response
def _update_image(facebook_id, image_url): ''' Updates the user profile's image to the given image url Unfortunately this is quite a pain to get right with Django Suggestions to improve this are welcome ''' image_name = 'fb_image_%s.jpg' % facebook_id image_temp = NamedTemporaryFile() try: image_response = urllib2.urlopen(image_url) except AttributeError: image_response = urllib.request.urlopen(image_url) image_content = image_response.read() image_temp.write(image_content) http_message = image_response.info() image_size = len(image_content) try: content_type = http_message.type except AttributeError: content_type = http_message.get_content_type() image_file = InMemoryUploadedFile( file=image_temp, name=image_name, field_name='image', content_type=content_type, size=image_size, charset=None ) image_file.seek(0) image_temp.flush() return image_name, image_file
def download_mixdown(self): """ download generated mixdown from api & store locally (in `mixdown_file` field) """ if not self.mixdown: log.info('mixdown not available on api') return if not self.mixdown['status'] == 3: log.info('mixdown not ready on api') return url = self.mixdown['mixdown_file'] log.debug('download mixdown from api: {} > {}'.format(url, self.name)) f_temp = NamedTemporaryFile(delete=True) f_temp.write(urlopen(url).read()) f_temp.flush() # wipe existing file try: self.mixdown_file.delete(False) except IOError: pass self.mixdown_file.save(url.split('/')[-1], File(f_temp)) return MixdownAPIClient().request_for_playlist(self)
def save_url_image(field, url, name): r = requests.get(url) img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() field.save(name, File(img_temp), save=True)
def download_media(self, media_url): """ Fetches the recording and stores it with the provided recording_id :param media_url: the url where the media lives :return: the url for our downloaded media with full content type prefix """ response = requests.get(media_url, stream=True, auth=self.auth) disposition = response.headers.get('Content-Disposition', None) content_type = response.headers.get('Content-Type', None) if content_type: extension = None if disposition: filename = re.findall("filename=\"(.+)\"", disposition)[0] extension = filename.rpartition('.')[2] elif content_type == 'audio/x-wav': extension = 'wav' temp = NamedTemporaryFile(delete=True) temp.write(response.content) temp.flush() return '%s:%s' % (content_type, self.org.save_media(File(temp), extension)) return None
def create_user_profile(sender,instance,created,**kwargs): """ Creates a Profile model for each User that is created. This function is called on the post_save signal from User. """ u = instance if created: p = Profile.objects.create(user=u) if not u.last_name: names = u.first_name.split(" ") if len(names)>1: u.last_name = names[1] u.save() if not u.get_profile().image: # get gravatar user_sa = UserSocialAuth.objects.filter(user=u) if user_sa: user_sa = user_sa[0] token = user_sa.extra_data['access_token'] g = Github(token).get_user() gravatar_id = g.gravatar_id url = "https://secure.gravatar.com/avatar/" + gravatar_id + "?s=500" img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(url).read()) img_temp.flush() u.get_profile().image.save("gravatar_image_" + str(u.pk), File(img_temp))
def process(filename): data = json.loads( open(filename, 'r').read() ) # pprint.pprint( data ) print "%s (%s) - %s" % (data['name'], data['slug'], filename) slug = data['slug'] try: person = models.Person.objects.get(slug=slug) return # don't try to update the person except models.Person.DoesNotExist: person = models.Person(slug=slug) person.legal_name = data['name'] person.summary = data['summary'] person.date_of_birth = data['date_of_birth'] person.save() content_type = ContentType.objects.get_for_model(person) if data.get('profile_url'): models.Contact.objects.get_or_create( content_type = content_type, object_id = person.id, value = re.sub('\s', '%20', data['profile_url'] ), kind = profile_url_kind, ) if data.get('email'): models.Contact.objects.get_or_create( content_type = content_type, object_id = person.id, value = data['email'], kind = email_kind, ) # import image if data.get('image') and 'img_not_found' not in data['image']: image_url = re.sub('\s', '%20', data['image'] ); photo, created = Image.objects.get_or_create( content_type = content_type, object_id = person.id, source = image_url, ) if created: print " Fetching " + image_url try: img_temp = NamedTemporaryFile(delete=True) img_temp.write( urllib2.urlopen(image_url).read() ) img_temp.flush() photo.image.save( person.slug, File(img_temp) ) photo.save() except urllib2.HTTPError: print " ...failed!"
def save(self, *args, **kwargs): hash_name = os.urandom(32).encode('hex') if not self.image: temp_img = NamedTemporaryFile() temp_img.write(urllib2.urlopen(self.url).read()) temp_img.flush() image = Image.open(temp_img.name) image.save(temp_img.name, 'JPEG') self.image.save(''.join([hash_name, '.jpg']), File(temp_img)) if not self.thumbnail: if not self.image: image = Image.open(temp_img.name) else: super(Pin, self).save() image = Image.open(self.image.path) size = image.size prop = 200.0 / float(image.size[0]) size = (int(prop*float(image.size[0])), int(prop*float(image.size[1]))) image.thumbnail(size, Image.ANTIALIAS) temp_thumb = NamedTemporaryFile() image.save(temp_thumb.name, 'JPEG') self.thumbnail.save(''.join([hash_name, '.jpg']), File(temp_thumb)) super(Pin, self).save(*args, **kwargs)
def upload_image_url(request): if request.method != 'POST': return HttpResponse(status=403) image_url = request.POST.get('image_url', None) source_domain = request.POST.get('source_domain', None) headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', # 'Accept-Encoding': 'none', # 'Accept-Language': 'en-US,en;q=0.8', # 'Connection': 'keep-alive', 'referer': source_domain, } ext = mimetypes.guess_extension(mimetypes.guess_type(image_url)[0]) req = urllib2.Request(image_url, headers=headers) img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(req).read()) img_temp.flush() post_photo = Post_photo() post_photo.photo.save('%s%s'%(uuid.uuid4(), ext), File(img_temp)) post_photo.save() res = { 'link': post_photo.photo.url, } return JsonResponse(res, safe=False)
def import_images(logo_dir): report_data = dict(failed=0, succeeded=0) for logo_name in os.listdir(logo_dir): error_message = "" identifier, extension = splitext(logo_name) if extension.lower() in (".png", ".jpg", ".jpeg", ".gif"): try: internal_org_id = InternalOrganisationID.objects.get( recording_org=Organisation.objects.get(id=CORDAID_ORG_ID), identifier=identifier ) org = internal_org_id.referenced_org filename = model_and_instance_based_filename("Organisation", org.pk, "logo", logo_name) with open(os.path.join(logo_dir, logo_name), "rb") as f: logo_data = f.read() logo_tmp = NamedTemporaryFile(delete=True) logo_tmp.write(logo_data) logo_tmp.flush() org.logo.save(filename, File(logo_tmp), save=True) action = "succeeded" except Exception, e: action = "failed" error_message = "with the following error message: {error_message}".format(error_message=e.message) report_data[action] += 1 log_and_alert( u"Upload of image to organisation {org_id} {action} {error_message}".format( org_id=org.id, action=action, error_message=error_message ) )
def collect_context(movie_tree, movie_html, cast_crew_tree): movie_title_xpath = '//*[@id="overview-top"]/h1/span[1]/text()' movie_title = movie_tree.xpath(movie_title_xpath)[0] print movie_title if "<h2>Episodes" not in movie_html: movie_year_xpath = '//*[@id="main"]/div[1]/div[1]/div/h3/span/text()' genre_xpath = '//*[@id="overview-top"]/div[2]/a/span/text()' else: # movie_year_xpath = '//*[@id="overview-top"]/h1/span[2]/text()' # genre_xpath = '//*[@id="overview-top"]/div[1]/a/span/text()' print "Yep. TV show. Do not want." movie_year = cast_crew_tree.xpath(movie_year_xpath) if len(movie_year) == 1: movie_year_final = Scraper.clean_int_from_ugly_string(movie_year) else: # not my favorite to hard-code it. temporary as I worked out an issue with # TV shows -- of which there is only one involved here. # movie_year_final = 1962 print "Like I said about that whole TV show thing." # print type(movie_year_final) movie_genre = movie_tree.xpath(genre_xpath) all_genres = [genre for genre in movie_genre] description_xpath = '//*[@id="overview-top"]/p[2]/text()' description = movie_tree.xpath(description_xpath)[0] description = description.strip() # print type(description) # print description all_context = {} all_context[movie_title] = [movie_year_final, all_genres, description] movie, created = Movie.objects.get_or_create( title=movie_title, year_released=movie_year_final, genre=all_genres, description=description ) poster_xpath = '//*[@id="img_primary"]/div[1]/a/img/@src' poster = movie_tree.xpath(poster_xpath) if len(poster) == 1: poster = poster[0] poster_response = urllib.urlopen(poster).read() poster_temp = NamedTemporaryFile(delete=True) poster_temp.write(poster_response) movie.poster.save('%s.jpg' % movie_title, File(poster_temp)) movie.save() else: # three movies with no poster. what even is life. print "Ain't no poster here." # movie.save() return all_context, movie
def _save_image(self, url): img = NamedTemporaryFile(delete=True) img.write(urllib.request.urlopen(url).read()) img.flush() self.user.avatar_image.save(str(self.user.id), File(img))
def image(self): try: r = requests.get(self.remote_path) except Exception as e: raise Exception('Exception %s raised ' 'during loading image %s' % (e, self.remote_path)) if self.storage.exists(self.full_name) and self.if_cache: im = Image.open(self.storage.path(self.full_name)) im = processors.save_image(im, format=self.file_extension) else: img_temp = NamedTemporaryFile(delete=True) img_temp.write(r.content) img_temp.flush() im = Image.open(img_temp.name) if not self.final_size is None: im = processors.scale_and_crop( im, self.final_size, self.method) im = processors.colorspace(im) im = processors.save_image(im, format=self.file_extension) self.storage.save(self.full_name, im) return im
def export_handler(request, course_key_string): """ The restful handler for exporting a course. GET html: return html page for import page application/x-tgz: return tar.gz file containing exported course json: not supported Note that there are 2 ways to request the tar.gz file. The request header can specify application/x-tgz via HTTP_ACCEPT, or a query parameter can be used (?_accept=application/x-tgz). If the tar.gz file has been requested but the export operation fails, an HTML page will be returned which describes the error. """ course_key = CourseKey.from_string(course_key_string) if not has_course_access(request.user, course_key): raise PermissionDenied() course_module = modulestore().get_course(course_key) # an _accept URL parameter will be preferred over HTTP_ACCEPT in the header. requested_format = request.REQUEST.get('_accept', request.META.get('HTTP_ACCEPT', 'text/html')) export_url = reverse_course_url('export_handler', course_key) + '?_accept=application/x-tgz' if 'application/x-tgz' in requested_format: name = course_module.url_name export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz") root_dir = path(mkdtemp()) try: export_to_xml(modulestore(), contentstore(), course_module.id, root_dir, name) logging.debug(u'tar file being generated at {0}'.format(export_file.name)) with tarfile.open(name=export_file.name, mode='w:gz') as tar_file: tar_file.add(root_dir / name, arcname=name) except SerializationError as exc: log.exception(u'There was an error exporting course %s', course_module.id) unit = None failed_item = None parent = None try: failed_item = modulestore().get_item(exc.location) parent_loc = modulestore().get_parent_location(failed_item.location) if parent_loc is not None: parent = modulestore().get_item(parent_loc) if parent.location.category == 'vertical': unit = parent except: # pylint: disable=bare-except # if we have a nested exception, then we'll show the more generic error message pass return render_to_response('export.html', { 'context_course': course_module, 'in_err': True, 'raw_err_msg': str(exc), 'failed_module': failed_item, 'unit': unit, 'edit_unit_url': reverse_usage_url("container_handler", parent.location) if parent else "", 'course_home_url': reverse_course_url("course_handler", course_key), 'export_url': export_url }) except Exception as exc: log.exception('There was an error exporting course %s', course_module.id) return render_to_response('export.html', { 'context_course': course_module, 'in_err': True, 'unit': None, 'raw_err_msg': str(exc), 'course_home_url': reverse_course_url("course_handler", course_key), 'export_url': export_url }) finally: shutil.rmtree(root_dir / name) wrapper = FileWrapper(export_file) response = HttpResponse(wrapper, content_type='application/x-tgz') response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(export_file.name.encode('utf-8')) response['Content-Length'] = os.path.getsize(export_file.name) return response elif 'text/html' in requested_format: return render_to_response('export.html', { 'context_course': course_module, 'export_url': export_url }) else: # Only HTML or x-tgz request formats are supported (no JSON). return HttpResponse(status=406)
def post(self, request, *args, **kwargs): solution = get_object_or_404(Solution, pk=kwargs.pop('pk')) solver = Solver() status = solver.status(solution.submission_id) if status == Solver.SUCCESS: info = solver.info(solution.submission_id) solution.objects_in_field = ', '.join(info['objects_in_field']) solution.ra = "%.3f" % info['calibration']['ra'] solution.dec = "%.3f" % info['calibration']['dec'] solution.orientation = "%.3f" % info['calibration']['orientation'] solution.radius = "%.3f" % info['calibration']['radius'] solution.pixscale = "%.3f" % corrected_pixscale( solution, info['calibration']['pixscale']) try: target = solution.content_type.get_object_for_this_type( pk=solution.object_id) except solution.content_type.model_class().DoesNotExist: # Target image was deleted meanwhile context = {'status': Solver.FAILED} return HttpResponse(simplejson.dumps(context), content_type='application/json') # Annotate image try: annotations_obj = solver.annotations(solution.submission_id) solution.annotations = simplejson.dumps(annotations_obj) annotator = Annotator(solution) annotated_image = annotator.annotate() except RequestError as e: solution.status = Solver.FAILED solution.save() context = {'status': solution.status, 'error': str(e)} return HttpResponse(simplejson.dumps(context), content_type='application/json') except ThumbnailNotReadyException: solution.status = Solver.PENDING solution.save() context = {'status': solution.status} return HttpResponse(simplejson.dumps(context), content_type='application/json') filename, ext = os.path.splitext(target.image_file.name) annotated_filename = "%s-%d%s" % (filename, int(time.time()), ext) if annotated_image: solution.image_file.save(annotated_filename, annotated_image) # Get sky plot image url = solver.sky_plot_zoom1_image_url(solution.submission_id) if url: img = NamedTemporaryFile(delete=True) img.write(urllib.request.urlopen(url).read()) img.flush() img.seek(0) f = File(img) try: solution.skyplot_zoom1.save(target.image_file.name, f) except IntegrityError: pass solution.status = status solution.save() context = {'status': solution.status} return HttpResponse(simplejson.dumps(context), content_type='application/json')
def start(request): context = {} if request.method == 'POST': form = forms.StartForm(request.POST) if form.is_valid(): name = form.cleaned_data['name'] information = fetch_user(name, is_username='******' not in name) # print "INFORMATION" # from pprint import pprint # pprint( information) assert information # must have something description = [] if information.get('bio'): description.append(information['bio']) if information.get('city'): description.append('City: %s' % information['city']) if information.get('ircname'): description.append('IRC nick: %s' % information['ircname']) # lastly make it a string description = '\n'.join(description) additional_links = [] if information.get('url'): additional_links.append(information['url']) for each in information.get('accounts', []): if '://' in each.get('identifier', ''): additional_links.append(each['identifier']) # lastly make it a string additional_links = '\n'.join(additional_links) now = timezone.now() title = (information.get('full_name') or information.get('username') or name) event = Event.objects.create( status=Event.STATUS_INITIATED, creator=request.user, mozillian=information['username'], slug=slugify('mozillian-%s' % information['username']), title=title, privacy=Event.PRIVACY_CONTRIBUTORS, short_description="%s is a Mozillian!" % title, description=description, additional_links=additional_links, start_time=now, ) if information.get('photo'): # download it locally and photo_name = urlparse(information['photo']).path.split('/')[-1] img_temp = NamedTemporaryFile(delete=True) img_temp.write(urllib2.urlopen(information['photo']).read()) img_temp.flush() event.placeholder_img.save(photo_name, File(img_temp), save=True) mozillians_channel, __ = Channel.objects.get_or_create( name=settings.MOZILLIANS_CHANNEL_NAME, slug=settings.MOZILLIANS_CHANNEL_SLUG, ) event.channels.add(mozillians_channel) messages.info( request, "That's great! Now have a look and try to make your profile " "more complete.") return redirect('webrtc:details', event.id) else: form = forms.StartForm() qs = (Event.objects.filter(mozillian__isnull=False).filter( creator=request.user)) for event in qs: context['event'] = event context['form'] = form return render(request, 'webrtc/start.html', context)
def callback(request): def startButton(reply_token, send_text): try: reply = line_bot_api.reply_message(reply_token, [ TextSendMessage(send_text), TemplateSendMessage(alt_text='メニューを更新', template=ButtonsTemplate( title='メニューを更新', text='更新ボタンを押してください', actions=[ PostbackTemplateAction( label='メニューを更新', data='start'), ])) ]) except LineBotApiError: return HttpResponseBadRequest() return reply def confirmButton(reply_token, send_text): try: reply = line_bot_api.reply_message(reply_token, [ TextSendMessage(send_text), TemplateSendMessage( alt_text='確認', template=ButtonsTemplate( title='確認', text='選択してください', actions=[ PostbackTemplateAction(label='更新', data='save'), PostbackTemplateAction(label='中止', data='quit'), ])) ]) except LineBotApiError: return HttpResponseBadRequest() return reply if request.method == 'POST': body = request.body.decode('utf-8') signature = request.META['HTTP_X_LINE_SIGNATURE'] try: events = parser.parse(body, signature) except InvalidSignatureError: return HttpResponseForbidden() except LineBotApiError: return HttpResponseBadRequest() for event in events: posts = Post.objects.values('user_id').order_by('-created_at') users = [] for post in posts: users.append(post['user_id']) has_created = event.source.user_id in users if has_created == True: post = Post.objects.filter( user_id=event.source.user_id).order_by('-created_at')[0] if isinstance(event, FollowEvent): startButton( event.reply_token, 'フォローありがとうございます!\nメニューを更新したい場合、下のボタンを押してください\nなお、以下がサイトのURLです\n\nhttps://{}' .format(settings.DOMAIN_NAME)) elif isinstance(event, PostbackEvent): if event.postback.data == 'start': if has_created == False: post = Post(user_id=event.source.user_id, status=1) post.save() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('メニューの更新ですね\nまずは料理名を入力してください')) except LineBotApiError: return HttpResponseBadRequest() else: if post.status == 0: post = Post(user_id=event.source.user_id, status=1) post.save() try: line_bot_api.reply_message( event.reply_token, TextSendMessage( 'メニューの更新ですね\nまずは料理名を入力してください')) except LineBotApiError: return HttpResponseBadRequest() elif event.postback.data == 'save': if has_created == True: if post.status == 5: post.status = 0 post.save() try: line_bot_api.reply_message( event.reply_token, TextSendMessage( 'サイトが更新されました\n\nhttps://{}'.format( settings.DOMAIN_NAME))) except LineBotApiError: return HttpResponseBadRequest() elif event.postback.data == 'quit': if has_created == True: if post.status == 5: post.image_path.delete() post.delete() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('操作中のデータを削除しました')) except LineBotApiError: return HttpResponseBadRequest() elif isinstance(event, MessageEvent): if isinstance(event.message, TextMessage): if event.message.text == 'ニッポンポン': del_posts = Post.objects.exclude( Q(image_path__contains='ebi-fry') | Q(image_path__contains='katsu-don') | Q(image_path__contains='beef-stew')) for del_post in del_posts: del_post.image_path.delete() Post.objects.all().delete() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('合衆国ニッポンポン!!!')) except LineBotApiError: return HttpResponseBadRequest() elif event.message.text == 'ナナリー': del_posts = Post.objects.exclude( Q(image_path__contains='ebi-fry') | Q(image_path__contains='katsu-don') | Q(image_path__contains='beef-stew')) for del_post in del_posts: del_post.image_path.delete() Post.objects.all().delete() Post.objects.bulk_create([ Post(title='エビフライ', description='プリプリのエビをカリカリの衣で包んだ至高の一品', price='850', image_path='img/post/ebi-fry.jpg', status=0), Post(title='シチュー', description='バラ肉をルーとともにホロホロになるまで煮込んだビーフシチュー', price='1200', image_path='img/post/beef-stew.jpg', status=0), Post(title='かつ丼', description='揚げたてのカツをふんわり半熟卵でとじた定番の一品', price='750', image_path='img/post/katsu-don.jpg', status=0), ]) try: line_bot_api.reply_message( event.reply_token, TextSendMessage('ナナリーーーーーッッ!!!!!')) except LineBotApiError: return HttpResponseBadRequest() if has_created == True: if post.status == 0: startButton( event.reply_token, 'ご利用ありがとうございます\nメニューを更新したい場合、下のボタンを押してください\nなお、以下がサイトのURLです\n\nhttps://{}' .format(settings.DOMAIN_NAME)) elif post.status == 1: post.title = event.message.text post.status = 2 post.save() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('次はメニューの説明文を入力してください'), ) except LineBotApiError: return HttpResponseBadRequest() elif post.status == 2: post.description = event.message.text post.status = 3 post.save() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('次は値段を入力してください'), ) except LineBotApiError: return HttpResponseBadRequest() elif post.status == 3: try: post.price = int(event.message.text) post.status = 4 post.save() except: try: line_bot_api.reply_message( event.reply_token, TextSendMessage('半角数字を入力してください'), ) except LineBotApiError: return HttpResponseBadRequest() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('次は掲載する画像を送信してください'), ) except LineBotApiError: return HttpResponseBadRequest() elif post.status == 4: try: line_bot_api.reply_message( event.reply_token, TextSendMessage('画像を送信してください'), ) except LineBotApiError: return HttpResponseBadRequest() else: startButton( event.reply_token, 'ご利用ありがとうございます\nメニューを更新したい場合、下のボタンを押してください\nなお、以下がサイトのURLです\n\nhttps://{}' .format(settings.DOMAIN_NAME)) elif isinstance(event.message, ImageMessage): if has_created == True: if post.status == 4: try: message_content = line_bot_api.get_message_content( event.message.id) with NamedTemporaryFile(mode='w+b') as f: for chunk in message_content.iter_content( ): f.write(chunk) post.image_path.save(event.message.id + '.jpg', File(f), save=True) post.status = 5 post.save() except: traceback.print_exc() try: line_bot_api.reply_message( event.reply_token, TextSendMessage('画像を保存できませんでした'), ) except LineBotApiError: return HttpResponseBadRequest() confirmButton( event.reply_token, '項目は以上です\nこれでよろしければ「保存」、更新を中止したい場合は「中止」を押してください' ) elif post.status == 0: startButton( event.reply_token, 'ご利用ありがとうございます\nメニューを更新したい場合、下のボタンを押してください\nなお、以下がサイトのURLです\n\nhttps://{}' .format(settings.DOMAIN_NAME)) elif post.status == 5: confirmButton(event.reply_token, 'すでに画像は送信されています\n更新または中止を選択してください') else: try: line_bot_api.reply_message( event.reply_token, TextSendMessage('テキストを入力してください'), ) except LineBotApiError: return HttpResponseBadRequest() else: startButton( event.reply_token, 'ご利用ありがとうございます\nメニューを更新したい場合、下のボタンを押してください\nなお、以下がサイトのURLです\n\nhttps://{}' .format(settings.DOMAIN_NAME)) return HttpResponse() else: return HttpResponseBadRequest()
class CompilerFilter(FilterBase): """ A filter subclass that is able to filter content via external commands. """ command = None options = () default_encoding = settings.FILE_CHARSET def __init__(self, content, command=None, **kwargs): super(CompilerFilter, self).__init__(content, **kwargs) self.cwd = None if command: self.command = command if self.command is None: raise FilterError("Required attribute 'command' not given") if isinstance(self.options, dict): # turn dict into a tuple new_options = () for item in kwargs.items(): new_options += (item, ) self.options = new_options # append kwargs to self.options for item in kwargs.items(): self.options += (item, ) self.stdout = self.stdin = self.stderr = subprocess.PIPE self.infile = self.outfile = None def input(self, **kwargs): encoding = self.default_encoding options = dict(self.options) if self.infile is None and "{infile}" in self.command: # create temporary input file if needed if self.filename is None: self.infile = NamedTemporaryFile(mode='wb') self.infile.write(self.content.encode(encoding)) self.infile.flush() options["infile"] = self.infile.name else: # we use source file directly, which may be encoded using # something different than utf8. If that's the case file will # be included with charset="something" html attribute and # charset will be available as filter's charset attribute encoding = self.charset # or self.default_encoding self.infile = open(self.filename) options["infile"] = self.filename if "{outfile}" in self.command and "outfile" not in options: # create temporary output file if needed ext = self.type and ".%s" % self.type or "" self.outfile = NamedTemporaryFile(mode='r+', suffix=ext) options["outfile"] = self.outfile.name # Quote infile and outfile for spaces etc. if "infile" in options: options["infile"] = shell_quote(options["infile"]) if "outfile" in options: options["outfile"] = shell_quote(options["outfile"]) try: command = self.command.format(**options) proc = subprocess.Popen(command, shell=True, cwd=self.cwd, stdout=self.stdout, stdin=self.stdin, stderr=self.stderr) if self.infile is None: # if infile is None then send content to process' stdin filtered, err = proc.communicate(self.content.encode(encoding)) else: filtered, err = proc.communicate() filtered, err = filtered.decode(encoding), err.decode(encoding) except (IOError, OSError) as e: raise FilterError('Unable to apply %s (%r): %s' % (self.__class__.__name__, self.command, e)) else: if proc.wait() != 0: # command failed, raise FilterError exception if not err: err = ('Unable to apply %s (%s)' % (self.__class__.__name__, self.command)) if filtered: err += '\n%s' % filtered raise FilterError(err) if self.verbose: self.logger.debug(err) outfile_path = options.get('outfile') if outfile_path: with io.open(outfile_path, 'r', encoding=encoding) as file: filtered = file.read() finally: if self.infile is not None: self.infile.close() if self.outfile is not None: self.outfile.close() return smart_text(filtered)
def generate_export_course(request, org, course, name): """ This method will serialize out a course to a .tar.gz file which contains a XML-based representation of the course """ location = get_location_and_verify_access(request, org, course, name) course_module = modulestore().get_instance(location.course_id, location) loc = Location(location) export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz") root_dir = path(mkdtemp()) try: export_to_xml(modulestore('direct'), contentstore(), loc, root_dir, name, modulestore()) except SerializationError, e: logging.exception( 'There was an error exporting course {0}. {1}'.format( course_module.location, unicode(e))) unit = None failed_item = None parent = None try: failed_item = modulestore().get_instance( course_module.location.course_id, e.location) parent_locs = modulestore().get_parent_locations( failed_item.location, course_module.location.course_id) if len(parent_locs) > 0: parent = modulestore().get_item(parent_locs[0]) if parent.location.category == 'vertical': unit = parent except: # if we have a nested exception, then we'll show the more generic error message pass return render_to_response( 'export.html', { 'context_course': course_module, 'successful_import_redirect_url': '', 'in_err': True, 'raw_err_msg': str(e), 'failed_module': failed_item, 'unit': unit, 'edit_unit_url': reverse('edit_unit', kwargs={'location': parent.location}) if parent else '', 'course_home_url': reverse('course_index', kwargs={ 'org': org, 'course': course, 'name': name }) })
def do_import(self): """ Retrieve and store the current image, as well as the image caption and credit. The image will be extracted from the 'url' attribute of the first 'document-link' element containing a file with one of the extensions of VALID_IMAGE_EXTENSIONS. If an image is successfully retrieved, the image caption will be based on the underlying 'title' element and the image credit will be based on the akvo photo-credit attribute of the 'document-link' element. :return: List; contains fields that have changed """ changes = [] image_meta_changes = [] for document_link_element in self.parent_elem.findall('document-link'): url = self.get_attrib(document_link_element, 'url', 'current_image') if url: filename, extension = file_info_from_url(url) if extension not in VALID_IMAGE_EXTENSIONS: continue # get content length of uncompressed cargo header_query = requests.head( url, headers={'Accept-Encoding': 'identity'}) content_length = int( header_query.headers.get('content-length', '0')) # If we have no image or the size of the image URL differs from the stored one, # we go get. This _may_ in unlucky cases lead to a new image not being fetched. # TODO: add a timestamp to the image for better comparison criteria if not self.project.current_image or ( self.project.current_image.size != content_length): request = requests.get(url, stream=True) if request.status_code == 200: tmp_file = NamedTemporaryFile() for chunk in request.iter_content(1024): if not chunk: break tmp_file.write(chunk) tmp_file.flush() self.project.current_image.save( filename, File(tmp_file)) changes.append('current_image') else: self.add_log( 'document-link', 'current_image', 'Error trying to fetch image: {}'.format(url)) current_image_caption = self.get_child_element_text( document_link_element, 'title', 'current_image_caption') if current_image_caption: self.project.current_image_caption = current_image_caption image_meta_changes.append('current_image_caption') current_image_credit = self.get_attrib(document_link_element, akvo_ns('photo-credit'), 'current_image_credit') if current_image_credit: self.project.current_image_credit = current_image_credit image_meta_changes.append('current_image_credit') if image_meta_changes: self.project.save(update_fields=image_meta_changes) return changes + image_meta_changes
def create_eks_cluster(challenge): """ Called when Challenge is approved by the EvalAI admin calls the create_eks_nodegroup function Arguments: sender {type} -- model field called the post hook instance {<class 'django.db.models.query.QuerySet'>} -- instance of the model calling the post hook """ from .models import ChallengeEvaluationCluster for obj in serializers.deserialize("json", challenge): challenge_obj = obj.object cluster_name = "{0}-cluster".format(challenge_obj.title.replace(" ", "-")) if challenge_obj.approved_by_admin and challenge_obj.is_docker_based: client = get_boto3_client("eks", aws_keys) try: response = client.create_cluster( name=cluster_name, version="1.15", roleArn=settings.EKS_CLUSTER_ROLE_ARN, resourcesVpcConfig={ "subnetIds": [VPC_DICT["SUBNET_1"], VPC_DICT["SUBNET_2"]], "securityGroupIds": [VPC_DICT["SUBNET_SECURITY_GROUP"]], }, ) waiter = client.get_waiter("cluster_active") waiter.wait(name=cluster_name) # creating kubeconfig cluster = client.describe_cluster(name=cluster_name) cluster_cert = cluster["cluster"]["certificateAuthority"]["data"] cluster_ep = cluster["cluster"]["endpoint"] cluster_config = { "apiVersion": "v1", "kind": "Config", "clusters": [{ "cluster": { "server": str(cluster_ep), "certificate-authority-data": str(cluster_cert), }, "name": "kubernetes", }], "contexts": [{ "context": { "cluster": "kubernetes", "user": "******" }, "name": "aws", }], "current-context": "aws", "preferences": {}, "users": [{ "name": "aws", "user": { "exec": { "apiVersion": "client.authentication.k8s.io/v1alpha1", "command": "heptio-authenticator-aws", "args": ["token", "-i", cluster_name], } }, }], } # Write in YAML. config_text = yaml.dump(cluster_config, default_flow_style=False) config_file = NamedTemporaryFile(delete=True) config_file.write(config_text.encode()) ChallengeEvaluationCluster.objects.create( challenge=challenge_obj, name=cluster_name, cluster_endpoint=cluster_ep, cluster_ssl=cluster_cert, ) # Creating nodegroup create_eks_nodegroup.delay(challenge, cluster_name) return response except ClientError as e: logger.exception(e) return
image = ImageImporter(self.activity.current_image()) try: image.get_image() except Exception, e: log( "Error trying to fetch image to project. Image URL: {extra}", dict(rsr_id=self.activity.rsr_id(), internal_id=self.activity.internal_id(), iati_id=self.activity.iati_id(), event=ERROR_IMAGE_UPLOAD, extra=self.activity.current_image())) if image.image: filename = model_and_instance_based_filename( 'Project', self.project.pk, 'current_image', image.filename) image_temp = NamedTemporaryFile(delete=True) image_temp.write(image.image) image_temp.flush() self.project.current_image.save(filename, File(image_temp), save=True) log( "Save project image: {extra}", dict(rsr_id=self.activity.rsr_id(), internal_id=self.activity.internal_id(), iati_id=self.activity.iati_id(), event=ACTION_SET_IMAGE, extra=filename)) else: log( "No image found for project: {rsr_id}",
def field_image_from_link(instance, field_name, image_name, image_url): img_temp = NamedTemporaryFile() img_temp.write(urlopen(image_url).read()) img_temp.flush() getattr(instance, field_name).save(image_name + '.jpg', File(img_temp))
def post(self, request, *args, **kwargs): token_header = request.META.get('HTTP_AUTHORIZATION') auth_token = re.sub(r'Token', '', token_header) auth_token = auth_token.strip() attempt = request.FILES['attempt'] extension = request.POST['extension'] expected_sound = request.POST['expected_sound'] expected_tone = request.POST['expected_tone'] is_native_text = request.POST.get('is_native', 'false') is_native = False if is_native_text.lower() == 'false' else True user = Token.objects.get(key=auth_token).user s = PinyinSyllable.objects.get(sound=expected_sound, tone=expected_tone) rs = RecordedSyllable(native=is_native, user=user, syllable=s, file_extension=extension) original_path = rs.create_audio_path('original') attempt_data = attempt.read() m = md5() m.update(attempt_data) attempt_md5 = m.hexdigest() attempt_claimed_md5 = request.POST.get('attempt_md5', '') logger.info('MD5 Claimed: {}, Actual: {}'.format(attempt_claimed_md5, attempt_md5)) with open(original_path, 'wb') as f: f.write(attempt_data) rs.audio_original = original_path mp3_path = rs.create_audio_path('mp3') convert_file_format(original_path, mp3_path) rs.audio_mp3 = mp3_path rs.save() with open(original_path, 'rb') as original: with NamedTemporaryFile(suffix='.wav') as normalized: normalize_pipeline(original_path, normalized.name) sample_rate, wave_data = scipy.io.wavfile.read(normalized.name) # --- Deal with sample that's too short to accurately analyze # minimum length (seconds) min_length = 0.15 attempt_length = len(wave_data) / sample_rate logger.info('Attempt length {}{}: {}'.format( expected_sound, expected_tone, attempt_length) ) if attempt_length < min_length: tone = None else: sample_characteristics = generate_all_characteristics(wave_data, sample_rate) tr = ToneRecognizer() tone = tr.get_tone(sample_characteristics) mp3_filename = os.path.basename(mp3_path) mp3_url_path = os.path.join(settings.MEDIA_URL, settings.SYLLABLE_AUDIO_DIR, mp3_filename) result = { 'status': True, 'tone': tone, 'attempt_path': mp3_url_path, } json_result = json.dumps(result) logger.debug(json_result) resp = HttpResponse(json_result) return resp
def handle(self, *args, **options): help = 'automatiseer het toevoegen van fotos aan een product' if settings.DEV: ''' DIT IS DE DEV METHODE ################################################################### ''' path = os.path.join(settings.MEDIA_ROOT + '/custom_image_list') for index, product in enumerate(Product.objects.all()): product_code = product.upc image_path = path + '/' + str(product_code) + '.jpg' try: img = open(image_path, 'r') except IOError: print('IO ERROR') continue # image exists! extended_path = 'file://' + image_path name = str(product_code) + '.jpg' img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlopen(extended_path).read()) img_temp.flush() new = ProductImage(product=product) try: new.original.save(name, File(img_temp), True) except: continue new.save() print('SAVING') self.stdout.write('--Het is gefixt!--') else: path = 'https://dsshop.s3.eu-central-1.amazonaws.com/media/custom_image_list' for index, product in enumerate(Product.objects.all()): product_code = product.upc file_name = str(product_code) + '.JPG' image_path = path + '/' + file_name try: myfile = urlopen(image_path) except: self.stdout.write('--ERROR: file bestaat wsl niet-- %s' % product_code) continue image_content = ContentFile(requests.get(image_path).content) new_image = ProductImage(product=product) self.stdout.write('VERDER: %s' % product_code) try: new_image.original.save(file_name, image_content) except IntegrityError: self.stdout.write('--DUPLICATE KEY--') continue except DjangoIntegrityError: self.stdout.write('--DUPLICATE KEY-- van django') continue new_image.save() self.stdout.write('--SAVING--') self.stdout.write('--Het is gefixt!--')
def process_locale_dir(self, locale_dir, files): """ Extract translatable literals from the specified files, creating or updating the POT file for a given locale directory. Use the xgettext GNU gettext utility. """ build_files = [] for translatable in files: if self.verbosity > 1: self.stdout.write("processing file %s in %s" % (translatable.file, translatable.dirpath)) if self.domain not in ("djangojs", "django"): continue build_file = self.build_file_class(self, self.domain, translatable) try: build_file.preprocess() except UnicodeDecodeError as e: self.stdout.write( "UnicodeDecodeError: skipped file %s in %s (reason: %s)" % ( translatable.file, translatable.dirpath, e, )) continue except BaseException: # Cleanup before exit. for build_file in build_files: build_file.cleanup() raise build_files.append(build_file) if self.domain == "djangojs": is_templatized = build_file.is_templatized args = [ "xgettext", "-d", self.domain, "--language=%s" % ("C" if is_templatized else "JavaScript", ), "--keyword=gettext_noop", "--keyword=gettext_lazy", "--keyword=ngettext_lazy:1,2", "--keyword=pgettext:1c,2", "--keyword=npgettext:1c,2,3", "--output=-", ] elif self.domain == "django": args = [ "xgettext", "-d", self.domain, "--language=Python", "--keyword=gettext_noop", "--keyword=gettext_lazy", "--keyword=ngettext_lazy:1,2", "--keyword=pgettext:1c,2", "--keyword=npgettext:1c,2,3", "--keyword=pgettext_lazy:1c,2", "--keyword=npgettext_lazy:1c,2,3", "--output=-", ] else: return input_files = [bf.work_path for bf in build_files] with NamedTemporaryFile(mode="w+") as input_files_list: input_files_list.write("\n".join(input_files)) input_files_list.flush() args.extend(["--files-from", input_files_list.name]) args.extend(self.xgettext_options) msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: for build_file in build_files: build_file.cleanup() raise CommandError( "errors happened while running xgettext on %s\n%s" % ("\n".join(input_files), errors)) elif self.verbosity > 0: # Print warnings self.stdout.write(errors) if msgs: if locale_dir is NO_LOCALE_DIR: for build_file in build_files: build_file.cleanup() file_path = os.path.normpath(build_files[0].path) raise CommandError( "Unable to find a locale path to store translations for " "file %s. Make sure the 'locale' directory exists in an " "app or LOCALE_PATHS setting is set." % file_path) for build_file in build_files: msgs = build_file.postprocess_messages(msgs) potfile = os.path.join(locale_dir, "%s.pot" % self.domain) write_pot_file(potfile, msgs) for build_file in build_files: build_file.cleanup()
def generate_token(self, instance, invitation_url): # token imports from PIL import Image, ImageFont, ImageDraw from django.core.files.temp import NamedTemporaryFile from django.core.files import File import urllib2 from urlparse import urlparse, urlunparse _, root_url = get_site() def stamp(image, text, offset): f = ImageFont.load_default() txt_img = Image.new('RGBA', f.getsize(text)) d = ImageDraw.Draw(txt_img) d.text((0, 0), text, font=f, fill="#888") exp_img_r = txt_img.rotate(0, expand=1) iw, ih = image.size tw, th = txt_img.size x = iw / 2 - tw / 2 y = ih / 2 - th / 2 image.paste(exp_img_r, (x, y + offset), exp_img_r) return offset + th # normalize sataic url r_parse = urlparse(root_url, 'http') s_parse = urlparse(settings.STATIC_URL, 'http') s_parts = (s_parse.scheme, s_parse.netloc or r_parse.netloc, s_parse.path, s_parse.params, s_parse.query, s_parse.fragment) static_url = urlunparse(s_parts) # open base token image img_url = static_url + 'notification/img/token-invite.png' temp_img = NamedTemporaryFile() temp_img.write(urllib2.urlopen(img_url).read()) temp_img.flush() image = Image.open(temp_img.name) # stamp expiration date delta = datetime.timedelta(days=settings.ACCOUNT_INVITATION_DAYS) expiration_date = instance.date_invited + delta exp_text = expiration_date.strftime("%x") stamp(image, exp_text, 18) # stamp recipient name if instance.recipient[1]: offset = stamp(image, instance.recipient[1], -16) if instance.recipient[2]: offset = stamp(image, instance.recipient[2], offset) image.save(temp_img.name, "PNG", quality=95) if not default_storage.exists('tokens/%s.png' % instance.key): default_storage.save('tokens/%s.png' % instance.key, File(temp_img)) get_token_url = root_url + reverse('invitation_token', kwargs={'key': instance.key}) token_html = ''.join(['<a style="display: inline-block;" href="', invitation_url, '"><img width="100" height="100" class="token"', ' src="', get_token_url, '" alt="invitation token"></a>']) return token_html
def create_export_tarball(course_module, course_key, context): """ Generates the export tarball, or returns None if there was an error. Updates the context with any error information if applicable. """ name = course_module.url_name export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz") root_dir = path(mkdtemp()) try: if isinstance(course_key, LibraryLocator): export_library_to_xml(modulestore(), contentstore(), course_key, root_dir, name) else: export_course_to_xml(modulestore(), contentstore(), course_module.id, root_dir, name) logging.debug(u'tar file being generated at %s', export_file.name) with tarfile.open(name=export_file.name, mode='w:gz') as tar_file: tar_file.add(root_dir / name, arcname=name) except SerializationError as exc: log.exception(u'There was an error exporting %s', course_key) unit = None failed_item = None parent = None try: failed_item = modulestore().get_item(exc.location) parent_loc = modulestore().get_parent_location( failed_item.location) if parent_loc is not None: parent = modulestore().get_item(parent_loc) if parent.location.category == 'vertical': unit = parent except: # pylint: disable=bare-except # if we have a nested exception, then we'll show the more generic error message pass context.update({ 'in_err': True, 'raw_err_msg': str(exc), 'failed_module': failed_item, 'unit': unit, 'edit_unit_url': reverse_usage_url("container_handler", parent.location) if parent else "", }) raise except Exception as exc: log.exception('There was an error exporting %s', course_key) context.update({'in_err': True, 'unit': None, 'raw_err_msg': str(exc)}) raise finally: shutil.rmtree(root_dir / name) return export_file
"error": "We failed to reach a server, reason: %s" % e.reason }, context_instance=RequestContext(request)) elif hasattr(e, "code"): return render_to_response("error.html", { "error": "The remote server couldn't fulfill the request, HTTP error code %s" % e.code }, context_instance=RequestContext( request)) # Store temp file. url_temp = NamedTemporaryFile(delete=True) url_temp.write(url.read()) url_temp.flush() # Convert to File object. url_file = File(url_temp).name # Check content type. mime = magic.Magic(mime=True) content_type = mime.from_file(url_file) if not check_allowed_content(content_type): return render_to_response( "error.html", {"error": "File type not supported"}, context_instance=RequestContext(request)) # Create analysis task.
def input(self, **kwargs): encoding = self.default_encoding options = dict(self.options) if self.infile is None and "{infile}" in self.command: # create temporary input file if needed if self.filename is None: self.infile = NamedTemporaryFile(mode='wb') self.infile.write(self.content.encode(encoding)) self.infile.flush() options["infile"] = self.infile.name else: # we use source file directly, which may be encoded using # something different than utf8. If that's the case file will # be included with charset="something" html attribute and # charset will be available as filter's charset attribute encoding = self.charset # or self.default_encoding self.infile = open(self.filename) options["infile"] = self.filename if "{outfile}" in self.command and "outfile" not in options: # create temporary output file if needed ext = self.type and ".%s" % self.type or "" self.outfile = NamedTemporaryFile(mode='r+', suffix=ext) options["outfile"] = self.outfile.name # Quote infile and outfile for spaces etc. if "infile" in options: options["infile"] = shell_quote(options["infile"]) if "outfile" in options: options["outfile"] = shell_quote(options["outfile"]) try: command = self.command.format(**options) proc = subprocess.Popen(command, shell=True, cwd=self.cwd, stdout=self.stdout, stdin=self.stdin, stderr=self.stderr) if self.infile is None: # if infile is None then send content to process' stdin filtered, err = proc.communicate(self.content.encode(encoding)) else: filtered, err = proc.communicate() filtered, err = filtered.decode(encoding), err.decode(encoding) except (IOError, OSError) as e: raise FilterError('Unable to apply %s (%r): %s' % (self.__class__.__name__, self.command, e)) else: if proc.wait() != 0: # command failed, raise FilterError exception if not err: err = ('Unable to apply %s (%s)' % (self.__class__.__name__, self.command)) if filtered: err += '\n%s' % filtered raise FilterError(err) if self.verbose: self.logger.debug(err) outfile_path = options.get('outfile') if outfile_path: with io.open(outfile_path, 'r', encoding=encoding) as file: filtered = file.read() finally: if self.infile is not None: self.infile.close() if self.outfile is not None: self.outfile.close() return smart_text(filtered)
def generate_export(export_type, extension, username, id_string, export_id=None, filter_query=None, group_delimiter='/', split_select_multiples=True, binary_select_multiples=False, sync_to_gsuit=False, user=None): """ Create appropriate export object given the export type """ time.sleep(5) export_type_func_map = { Export.XLS_EXPORT: 'to_xls_export', Export.CSV_EXPORT: 'to_flat_csv_export', Export.CSV_ZIP_EXPORT: 'to_zipped_csv', Export.SAV_ZIP_EXPORT: 'to_zipped_sav', Export.ANALYSER_EXPORT: 'to_analyser_export' } xform = XForm.objects.get( user__username__iexact=username, id_string__exact=id_string) # query mongo for the cursor records = query_mongo(username, id_string, filter_query) export_builder = ExportBuilder() export_builder.GROUP_DELIMITER = group_delimiter export_builder.SPLIT_SELECT_MULTIPLES = split_select_multiples export_builder.BINARY_SELECT_MULTIPLES = binary_select_multiples __version__ = "0" try: __version__ = filter_query['$and'][0]['__version__'] except Exception as e: print(str(e)) if __version__: survey = build_survey_from_history(xform, __version__) if not survey: export_builder.set_survey(xform.data_dictionary().survey) else: export_builder.set_survey(survey) else: export_builder.set_survey(xform.data_dictionary().survey) prefix = slugify('{}_export__{}__{}'.format(export_type, username, id_string)) temp_file = NamedTemporaryFile(prefix=prefix, suffix=("." + extension)) # get the export function by export type func = getattr(export_builder, export_type_func_map[export_type]) func.__call__( temp_file.name, records, username, id_string, filter_query) # generate filename basename = "%s_%s" % ( id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S")) if export_type == Export.ANALYSER_EXPORT: # Analyser exports should be distinguished by more than just their file extension. basename= '{}_ANALYSER_{}'.format(id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S")) filename = basename + "." + extension # check filename is unique while not Export.is_filename_unique(xform, filename): filename = increment_index_in_filename(filename) file_path = os.path.join( username, 'exports', id_string, export_type, filename) # TODO: if s3 storage, make private - how will we protect local storage?? storage = get_storage_class()() # seek to the beginning as required by storage classes print sync_to_gsuit, 'file_url--------->', temp_file, filter_query try: if sync_to_gsuit == True and '__version__' not in filter_query['$and'][0]: if not os.path.exists("media/forms/"): os.makedirs("media/forms/") temporarylocation="media/forms/submissions_{}.xls".format(id_string) import shutil shutil.copy(temp_file.name, temporarylocation) fxf_form = FieldSightXF.objects.get(pk=filter_query['$and'][0]['fs_project_uuid']) upload_to_drive(temporarylocation, str(fxf_form.id) + '_' +id_string, None, fxf_form.project, user) os.remove(temporarylocation) except Exception as e: print e.__dict__ # get or create export object temp_file.seek(0) export_filename = storage.save( file_path, File(temp_file, file_path)) dir_name, basename = os.path.split(export_filename) temp_file.close() if export_id: export = Export.objects.get(id=export_id) else: fsxf = filter_query.values()[0] # print("fsxf", fsxf) export = Export(xform=xform, export_type=export_type, fsxf_id=fsxf) export.filedir = dir_name export.filename = basename export.internal_status = Export.SUCCESSFUL # dont persist exports that have a filter if filter_query is None: export.save() export.save() return export
def process_locale_dir(self, locale_dir, files): """ Extract translatable literals from the specified files, creating or updating the POT file for a given locale directory. Use the xgettext GNU gettext utility. """ build_files = [] for translatable in files: if self.verbosity > 1: self.stdout.write('processing file %s in %s\n' % ( translatable.file, translatable.dirpath )) if self.domain not in ('djangojs', 'django'): continue build_file = self.build_file_class(self, self.domain, translatable) try: build_file.preprocess() except UnicodeDecodeError as e: self.stdout.write( 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % ( translatable.file, translatable.dirpath, e, ) ) continue build_files.append(build_file) if self.domain == 'djangojs': is_templatized = build_file.is_templatized args = [ 'xgettext', '-d', self.domain, '--language=%s' % ('C' if is_templatized else 'JavaScript',), '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--output=-', ] elif self.domain == 'django': args = [ 'xgettext', '-d', self.domain, '--language=Python', '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=ugettext_noop', '--keyword=ugettext_lazy', '--keyword=ungettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--keyword=pgettext_lazy:1c,2', '--keyword=npgettext_lazy:1c,2,3', '--output=-', ] else: return input_files = [bf.work_path for bf in build_files] with NamedTemporaryFile(mode='w+') as input_files_list: input_files_list.write(('\n'.join(input_files))) input_files_list.flush() args.extend(['--files-from', input_files_list.name]) args.extend(self.xgettext_options) msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: for build_file in build_files: build_file.cleanup() raise CommandError( 'errors happened while running xgettext on %s\n%s' % ('\n'.join(input_files), errors) ) elif self.verbosity > 0: # Print warnings self.stdout.write(errors) if msgs: if locale_dir is NO_LOCALE_DIR: file_path = os.path.normpath(build_files[0].path) raise CommandError( 'Unable to find a locale path to store translations for ' 'file %s' % file_path ) for build_file in build_files: msgs = build_file.postprocess_messages(msgs) potfile = os.path.join(locale_dir, '%s.pot' % self.domain) write_pot_file(potfile, msgs) for build_file in build_files: build_file.cleanup()
def get_media_temp(self, url): img_temp = NamedTemporaryFile(delete=True) img_temp.write(urlopen(url).read()) img_temp.flush() return img_temp
def google_products(self, request): def prettify_xml(elem): """ Return a pretty-printed XML string for the Element. """ rough_string = tostring(elem) reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent='\t').encode('utf-8', 'replace') products = get_product_model().objects.filter(feed_google=True) root = Element('rss') root.attrib['xmlns:g'] = 'http://base.google.com/ns/1.0' root.attrib['version'] = '2.0' channel = SubElement(root, 'channel') title = SubElement(channel, 'title') title.text = request.settings.name link = SubElement(channel, 'link') link.text = settings.DOMAIN_NAME description = SubElement(channel, 'description') for p in products: # availability if p.is_available and not p.pre_order: txt_availability = 'in stock' elif p.pre_order: txt_availability = 'preorder' else: txt_availability = 'out of stock' # determine delivery charge by placing the product onto the basket basket = Basket() basket.add_item(p, None, 1) delivery_charge = basket.delivery # determine feed item attributes txt_id = unicode(p.id) txt_title = clean_unicode(p.title).strip() txt_link = p.get_absolute_url() txt_description = text_from_html(p.description, 5000) txt_condition = 'new' txt_price = '%.2f GBP' % p.price txt_google_category = p.category.google_product_category if p.category and p.category.google_product_category else None txt_category = p.category.get_taxonomy_path( ) if p.category else None txt_country = 'GB' txt_delivery_price = '%s %s' % (delivery_charge, 'GBP') txt_barcode = p.barcode.strip() if p.barcode else None txt_part_number = p.part_number.strip() if p.part_number else None txt_brand = p.get_brand_title() # create item item = SubElement(channel, 'item') # id _id = SubElement(item, 'g:id') _id.text = txt_id # title title = SubElement(item, 'title') title.text = txt_title # link/url link = SubElement(item, 'link') link.text = txt_link # main text description = SubElement(item, 'description') description.text = txt_description # condition condition = SubElement(item, 'g:condition') condition.text = txt_condition # price price = SubElement(item, 'g:price') price.text = txt_price # availability availability = SubElement(item, 'g:availability') availability.text = txt_availability # google shopping category if txt_google_category: gcategory = SubElement(item, 'g:google_product_category') gcategory.text = txt_google_category # product type if txt_category: category = SubElement(item, 'g:product_type') category.text = txt_category # shipping shipping = SubElement(item, 'g:shipping') # country country = SubElement(shipping, 'g:country') country.text = txt_country # delivery price delivery_price = SubElement(shipping, 'g:price') delivery_price.text = txt_delivery_price # barcode, must be a valid UPC-A (GTIN-12), EAN/JAN (GTIN-13) # or GTIN-14, so we need to have at least 12 characters. if txt_barcode: gtin = SubElement(item, 'g:gtin') gtin.text = txt_barcode # part number if txt_part_number: _mpn = SubElement(item, 'g:mpn') _mpn.text = txt_part_number # brand if txt_brand: brand = SubElement(item, 'g:brand') brand.text = txt_brand # image if p.image: image = SubElement(item, 'g:image_link') image.text = p.image.large_url # additional images if len(p.gallery) > 0: for m in p.gallery[:10]: additional_image_link = SubElement( item, 'g:additional_image_link') additional_image_link.text = m.large_url # get temp. filename f = NamedTemporaryFile(delete=False) tmp_filename = f.name f.close() # create tmp file (utf-8) f = open(tmp_filename, 'w+b') f.write(prettify_xml(root)) f.seek(0) # send response filename = 'google_products_%s.xml' % datetime.date.today().strftime( '%d_%m_%Y') response = HttpResponse(FileWrapper(f), content_type='text/plain') response['Content-Disposition'] = 'attachment; filename=%s' % filename return response
def restore_documents( restore_file: io.BytesIO, user: User, skip_ocr=False ): restore_file.seek(0) with tarfile.open(fileobj=restore_file, mode="r") as restore_archive: backup_json = restore_archive.extractfile('backup.json') backup_info = json.load(backup_json) leading_user_in_path = False _user = user if not user: leading_user_in_path = True # user was not specified. It is assument that # backup.json contains a list of users. # Thus recreate users first. for backup_user in backup_info['users']: user = User.objects.create( username=backup_user['username'], email=backup_user['email'], is_active=backup_user['is_active'], is_superuser=backup_user['is_superuser'] ) # in case --include-user-password switch was used # update user (raw digest of) password field password = backup_user.get('password') if password: user.password = password user.save() for restore_file in restore_archive.getnames(): if restore_file == "backup.json": continue logger.debug(f"Restoring file {restore_file}...") splitted_path = PurePath(restore_file).parts base, ext = os.path.splitext( remove_backup_filename_id(splitted_path[-1]) ) # if there is leading username, remove it. if leading_user_in_path: username = splitted_path[0] _user = User.objects.get(username=username) splitted_path = splitted_path[1:] if backup_info.get('documents', False): backup_info_documents = backup_info['documents'] else: backup_info_documents = _get_json_user_documents_list( backup_info, _user ) leading_user_in_path = True for info in backup_info_documents: document_info = info if info['path'] == restore_file: break parent = None # variables used only to shorten debug message _sp = splitted_path _rf = restore_file logger.debug( f"{_rf}: splitted_path={_sp} len(splitted_path)={len(_sp)}" ) # we first have to create a folder structure if len(splitted_path) > 1: for folder in splitted_path[:-1]: folder_object = Folder.objects.filter( title=folder, user=_user ).filter(parent=parent).first() if folder_object is None: new_folder = Folder.objects.create( title=folder, parent=parent, user=_user ) parent = new_folder else: parent = folder_object with NamedTemporaryFile("w+b", suffix=ext) as temp_output: logger.debug(f"Extracting {restore_file}...") ff = restore_archive.extractfile(restore_file) temp_output.write( ff.read() ) temp_output.seek(0) size = os.path.getsize(temp_output.name) page_count = get_pagecount(temp_output.name) if parent: parent_id = parent.id else: parent_id = None new_doc = Document.objects.create_document( user=_user, title=document_info['title'], size=size, lang=document_info['lang'], file_name=remove_backup_filename_id(splitted_path[-1]), parent_id=parent_id, notes="", page_count=page_count, rebuild_tree=False # speeds up 100x ) tag_attributes = document_info.get('tags', []) for attrs in tag_attributes: attrs['user'] = _user tag, created = Tag.objects.get_or_create(**attrs) new_doc.tags.add(tag) default_storage.copy_doc( src=temp_output.name, dst=new_doc.path().url() ) if not skip_ocr: for page_num in range(1, page_count + 1): ocr_page.apply_async(kwargs={ 'user_id': _user.id, 'document_id': new_doc.id, 'file_name': new_doc.file_name, 'page_num': page_num, 'lang': document_info['lang']} )
def facebook_extra_details(backend, user, *args, **kwargs): """Populate user object with extra details available with Facebook API """ logger.debug(pprint.pformat(kwargs)) logger.debug(backend.name) if backend.name != 'facebook': return social = kwargs.get('social') if not social: return url = FB_API_URL.format(token=social.extra_data['access_token']) response = requests.get(url) data = response.json() logger.debug(pprint.pformat(data)) if not user.email and 'email' in data: user.email = data['email'] if 'gender' in data: user.gender = { 'male': user.MALE, 'female': user.FEMALE }[data['gender']] # TODO: Facebook API gives you users current UTC offset as a # "timezone" and getting an actual timezone from it is non-trivial # so setting it for later # if 'timezone' in data: # user.timezone = data['timezone'] if not user.birthday and 'birthday' in data: birthday = list(map(int, data['birthday'].split('/'))) day = month = 1 year = 1900 # it's either MM/DD/YYYY or MM/DD or YYYY so: try: year = birthday[-1] month, day = birthday[0:2] month, day, year = birthday except ValueError: # When there's not enough values to unpack pass user.birthday = date(year=year, month=month, day=day) # Getting profile and cover images # TODO: consider making a celery task for getting images # Since this is one time thing that happening after user signs up # we will fetch images in place for now if not user.img.name and 'picture' in data: img_url = data['picture']['data']['url'] img_filename = img_url.split('/')[-1] img_filename = img_filename.split('?', 1)[0] img_temp = NamedTemporaryFile(delete=True) img_temp.write(requests.get(img_url).content) img_temp.flush() user.img.save(img_filename, File(img_temp)) if not user.cover.name and 'cover' in data: cover_url = data['cover']['source'] cover_filename = cover_url.split('/')[-1] cover_filename = cover_filename.split('?', 1)[0] cover_temp = NamedTemporaryFile(delete=True) cover_temp.write(requests.get(cover_url).content) cover_temp.flush() user.cover.save(cover_filename, File(cover_temp)) user.save()
def handle(self, **options): if not settings.MEDIA_ROOT: raise ImproperlyConfigured('Please set MEDIA_ROOT in your settings file') remote_url = options['remote_url'] try: val = URLValidator() val(remote_url) except ValidationError: raise CommandError('Please enter a valid URL') exercise_api = "{0}/api/v2/exercise/?limit=999&status=2" image_api = "{0}/api/v2/exerciseimage/?exercise={1}" thumbnail_api = "{0}/api/v2/exerciseimage/{1}/thumbnails/" headers = {'User-agent': default_user_agent('wger/{} + requests'.format(get_version()))} # Get all exercises result = requests.get(exercise_api.format(remote_url), headers=headers).json() for exercise_json in result['results']: exercise_name = exercise_json['name'] exercise_uuid = exercise_json['uuid'] exercise_id = exercise_json['id'] self.stdout.write('') self.stdout.write("*** Processing {0} (ID: {1}, UUID: {2})".format(exercise_name, exercise_id, exercise_uuid)) try: exercise = Exercise.objects.get(uuid=exercise_uuid) except Exercise.DoesNotExist: self.stdout.write(' Remote exercise not found in local DB, skipping...') continue # Get all images images = requests.get(image_api.format(remote_url, exercise_id), headers=headers).json() if images['count']: for image_json in images['results']: image_id = image_json['id'] result = requests.get(thumbnail_api.format(remote_url, image_id), headers=headers).json() image_name = os.path.basename(result['original']) self.stdout.write(' Fetching image {0} - {1}'.format(image_id, image_name)) try: image = ExerciseImage.objects.get(pk=image_id) self.stdout.write(' --> Image already present locally, skipping...') continue except ExerciseImage.DoesNotExist: self.stdout.write(' --> Image not found in local DB, creating now...') image = ExerciseImage() image.pk = image_id # Save the downloaded image, see link for details # http://stackoverflow.com/questions/1308386/programmatically-saving-image-to- retrieved_image = requests.get(result['original'], headers=headers) img_temp = NamedTemporaryFile(delete=True) img_temp.write(retrieved_image.content) img_temp.flush() image.exercise = exercise image.is_main = image_json['is_main'] image.status = image_json['status'] image.image.save( os.path.basename(image_name), File(img_temp), ) image.save() else: self.stdout.write(' No images for this exercise, nothing to do')
i = 0 #looping though each tweet, creating a new "Tweet" object, and saving it for tweet in tweet_list: #creating the "Tweet" object image_url = tweet.get('user').get('profile_image_url_https') #getting the username and tweet text out of the tweet from the tweet list twitter_user = tweet.get('user').get('screen_name') tweet_text = tweet.get('text') #setting the user, text and search term attributes on the "Tweet" object new_tweet = Tweet.objects.create(text=unidecode(tweet_text)) new_tweet.user = twitter_user new_tweet.search = SEARCH_TERM #creating a temporary 'in memory' file to stream the twitter image into temp_image = NamedTemporaryFile(delete=True) #getting the image file/data from the twitter server image_link = requests.get(image_url) #writing the file/data from the twitter server to the temporary file temp_image.write(image_link.content) #naming the file filename = "tweetimage_%s.jpg" % i #saving the "Tweet" object to the database new_tweet.image.save(filename, File(temp_image)) i + 1
class PugCompilerFilter(CompilerFilter): binary = './node_modules/pug-cli/index.js' args = '-c -D' command = "{binary} {infile} {args} -o {outfile}" options = ( ("binary", binary), ("args", args), ) def input(self, **kwargs): encoding = self.default_encoding options = dict(self.options) relative_path = self.filename.split('static/templates/')[1][:-4] if self.infile is None and "{infile}" in self.command: # we use source file directly, which may be encoded using # something different than utf8. If that's the case file will # be included with charset="something" html attribute and # charset will be available as filter's charset attribute encoding = self.charset # or self.default_encoding self.infile = open(self.filename) options["infile"] = self.filename basename = os.path.basename(self.filename)[:-3] if "{outfile}" in self.command and "outfile" not in options: # create temporary output file if needed ext = self.type and ".%s" % self.type or "" self.outfile = NamedTemporaryFile(mode='r+', suffix=ext) options["outfile"] = os.path.dirname(self.outfile.name) # Quote infile and outfile for spaces etc. if "infile" in options: options["infile"] = shell_quote(options["infile"]) if "outfile" in options: options["outfile"] = shell_quote(options["outfile"]) try: command = self.command.format(**options) proc = subprocess.Popen(command, shell=True, cwd=self.cwd, stdout=self.stdout, stdin=self.stdin, stderr=self.stderr) if self.infile is None: # if infile is None then send content to process' stdin filtered, err = proc.communicate(self.content.encode(encoding)) else: filtered, err = proc.communicate() filtered, err = filtered.decode(encoding), err.decode(encoding) except (IOError, OSError) as e: raise FilterError('Unable to apply %s (%r): %s' % (self.__class__.__name__, self.command, e)) else: if proc.wait() != 0: # command failed, raise FilterError exception if not err: err = ('Unable to apply %s (%s)' % (self.__class__.__name__, self.command)) if filtered: err += '\n%s' % filtered raise FilterError(err) if self.verbose: self.logger.debug(err) outfile_path = '{}/{}js'.format(options.get('outfile'), basename) if outfile_path: with io.open(outfile_path, 'r', encoding=encoding) as file: filtered = file.read() filtered = '{}window.templates["{}"] = {};'.format( 'window.templates = window.templates || {};', relative_path, filtered, ) finally: if self.infile is not None: self.infile.close() if self.outfile is not None: self.outfile.close() return smart_text(filtered)
def generate_export(export_type, xform, export_id=None, options=None): """ Create appropriate export object given the export type. param: export_type param: xform params: export_id: ID of export object associated with the request param: options: additional parameters required for the lookup. binary_select_multiples: boolean flag end: end offset ext: export extension type dataview_pk: dataview pk group_delimiter: "/" or "." query: filter_query for custom queries remove_group_name: boolean flag split_select_multiples: boolean flag index_tag: ('[', ']') or ('_', '_') show_choice_labels: boolean flag language: language labels as in the XLSForm/XForm """ username = xform.user.username id_string = xform.id_string end = options.get("end") extension = options.get("extension", export_type) filter_query = options.get("query") remove_group_name = options.get("remove_group_name", False) start = options.get("start") export_type_func_map = { Export.XLS_EXPORT: 'to_xls_export', Export.CSV_EXPORT: 'to_flat_csv_export', Export.CSV_ZIP_EXPORT: 'to_zipped_csv', Export.SAV_ZIP_EXPORT: 'to_zipped_sav', Export.GOOGLE_SHEETS_EXPORT: 'to_google_sheets', } if xform is None: xform = XForm.objects.get( user__username__iexact=username, id_string__iexact=id_string) dataview = None if options.get("dataview_pk"): dataview = DataView.objects.get(pk=options.get("dataview_pk")) records = dataview.query_data(dataview, all_data=True, filter_query=filter_query) total_records = dataview.query_data(dataview, count=True)[0].get('count') else: records = query_data(xform, query=filter_query, start=start, end=end) if filter_query: total_records = query_data(xform, query=filter_query, start=start, end=end, count=True)[0].get('count') else: total_records = xform.num_of_submissions if isinstance(records, QuerySet): records = records.iterator() export_builder = ExportBuilder() export_builder.TRUNCATE_GROUP_TITLE = True \ if export_type == Export.SAV_ZIP_EXPORT else remove_group_name export_builder.GROUP_DELIMITER = options.get( "group_delimiter", DEFAULT_GROUP_DELIMITER ) export_builder.SPLIT_SELECT_MULTIPLES = options.get( "split_select_multiples", True ) export_builder.BINARY_SELECT_MULTIPLES = options.get( "binary_select_multiples", False ) export_builder.INCLUDE_LABELS = options.get('include_labels', False) export_builder.INCLUDE_LABELS_ONLY = options.get( 'include_labels_only', False ) export_builder.INCLUDE_HXL = options.get('include_hxl', False) export_builder.INCLUDE_IMAGES \ = options.get("include_images", settings.EXPORT_WITH_IMAGE_DEFAULT) export_builder.VALUE_SELECT_MULTIPLES = options.get( 'value_select_multiples', False) export_builder.REPEAT_INDEX_TAGS = options.get( "repeat_index_tags", DEFAULT_INDEX_TAGS ) export_builder.SHOW_CHOICE_LABELS = options.get('show_choice_labels', False) export_builder.language = options.get('language') # 'win_excel_utf8' is only relevant for CSV exports if 'win_excel_utf8' in options and export_type != Export.CSV_EXPORT: del options['win_excel_utf8'] export_builder.set_survey(xform.survey, xform) temp_file = NamedTemporaryFile(suffix=("." + extension)) columns_with_hxl = export_builder.INCLUDE_HXL and get_columns_with_hxl( xform.survey_elements) # get the export function by export type func = getattr(export_builder, export_type_func_map[export_type]) try: func.__call__( temp_file.name, records, username, id_string, filter_query, start=start, end=end, dataview=dataview, xform=xform, options=options, columns_with_hxl=columns_with_hxl, total_records=total_records ) except NoRecordsFoundError: pass except SPSSIOError as e: export = get_or_create_export(export_id, xform, export_type, options) export.error_message = str(e) export.internal_status = Export.FAILED export.save() report_exception("SAV Export Failure", e, sys.exc_info()) return export # generate filename basename = "%s_%s" % ( id_string, datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")) if remove_group_name: # add 'remove group name' flag to filename basename = "{}-{}".format(basename, GROUPNAME_REMOVED_FLAG) if dataview: basename = "{}-{}".format(basename, DATAVIEW_EXPORT) filename = basename + "." + extension # check filename is unique while not Export.is_filename_unique(xform, filename): filename = increment_index_in_filename(filename) file_path = os.path.join( username, 'exports', id_string, export_type, filename) # seek to the beginning as required by storage classes temp_file.seek(0) export_filename = default_storage.save(file_path, File(temp_file, file_path)) temp_file.close() dir_name, basename = os.path.split(export_filename) # get or create export object export = get_or_create_export(export_id, xform, export_type, options) export.filedir = dir_name export.filename = basename export.internal_status = Export.SUCCESSFUL # do not persist exports that have a filter # Get URL of the exported sheet. if export_type == Export.GOOGLE_SHEETS_EXPORT: export.export_url = export_builder.url # if we should create a new export is true, we should not save it if start is None and end is None: export.save() return export
def add_thumbnails(video_id, in_w, in_h, folder): if DEBUG: print "ADD THUMBNAILS" video = Pod.objects.get(id=video_id) video.encoding_status = "ADD THUMBNAILS" video.save() tempfile = NamedTemporaryFile() scale = get_scale(in_w, in_h, DEFAULT_THUMBNAIL_OUT_SIZE_HEIGHT) thumbnails = int(video.duration / 3) com = ADD_THUMBNAILS_CMD % { 'ffmpeg': FFMPEG, 'src': video.video.path, 'thumbnail': thumbnails, 'scale': scale, 'out': tempfile.name, 'num': "%d" } if DEBUG: print "%s" % com thumbresult = commands.getoutput(com) output = "\n\nTHUMBNAILS" output += 80 * "~" output += "\n" output += thumbresult output += "\n" output += 80 * "~" f = open( os.path.join(settings.MEDIA_ROOT, VIDEOS_DIR, video.owner.username, "%s" % video.id, "encode.log"), 'w') f.write(output) output = "" f.close() video = None video = Pod.objects.get(id=video_id) for i in range(2, 5): if os.access("%s_%s.png" % (tempfile.name, i), os.F_OK): if DEBUG: print "THUMBNAILS %s" % i upc_image, created = Image.objects.get_or_create(folder=folder, name="%d_%s.png" % (video.id, i)) upc_image.file.save("%d_%s.png" % (video.id, i), File(open("%s_%s.png" % (tempfile.name, i))), save=True) upc_image.owner = video.owner upc_image.save() try: os.remove("%s_%s.png" % (tempfile.name, i)) except: pass if i == 2: video.thumbnail = upc_image else: msg = "\n [add_thumbnails] error accessing %s_%s.png" % ( tempfile.name, i) log.error(msg) addInfoVideo(video, msg) send_email(msg, video) video.save() try: os.remove("%s_1.png" % (tempfile.name)) os.remove("%s_5.png" % (tempfile.name)) except: pass
def import_entries(self, feed_entries): """ Import entries. """ for feed_entry in feed_entries: self.write_out('> %s... ' % feed_entry.title) if feed_entry.get('published_parsed'): creation_date = datetime(*feed_entry.published_parsed[:6]) if settings.USE_TZ: creation_date = timezone.make_aware( creation_date, timezone.utc) else: creation_date = timezone.now() slug = slugify(feed_entry.title)[:255] if Entry.objects.filter(creation_date__year=creation_date.year, creation_date__month=creation_date.month, creation_date__day=creation_date.day, slug=slug): self.write_out( self.style.NOTICE('SKIPPED (already imported)\n')) continue categories = self.import_categories(feed_entry) entry_dict = { 'title': feed_entry.title[:255], 'content': feed_entry.description, 'excerpt': strip_tags(feed_entry.get('summary')), 'status': PUBLISHED, 'creation_date': creation_date, 'start_publication': creation_date, 'last_update': timezone.now(), 'slug': slug } if not entry_dict['excerpt'] and self.auto_excerpt: entry_dict['excerpt'] = Truncator( strip_tags(feed_entry.description)).words(50) if self.tags: entry_dict['tags'] = self.import_tags(categories) entry = Entry(**entry_dict) entry.save() entry.categories.add(*categories) entry.sites.add(self.SITE) if self.image_enclosure: for enclosure in feed_entry.enclosures: if ('image' in enclosure.get('type') and enclosure.get('href')): img_tmp = NamedTemporaryFile(delete=True) img_tmp.write(urlopen(enclosure['href']).read()) img_tmp.flush() entry.image.save(os.path.basename(enclosure['href']), File(img_tmp)) break if self.default_author: entry.authors.add(self.default_author) elif feed_entry.get('author_detail'): try: author = Author.objects.create_user( slugify(feed_entry.author_detail.get('name')), feed_entry.author_detail.get('email', '')) except IntegrityError: author = Author.objects.get( username=slugify(feed_entry.author_detail.get('name'))) entry.authors.add(author) self.write_out(self.style.ITEM('OK\n'))
def classify_api(request): data = {"success": False} clean_directory() if request.method == "POST": model = request.POST.get("model", None) if model == 'imagenet': tmp_f = NamedTemporaryFile() tmp_adver = NamedTemporaryFile() if request.FILES.get("image", None) is not None: image_request = request.FILES["image"] image_bytes = image_request.read() image.save(tmp_f, image.format) elif request.POST.get("image64", None) is not None: base64_data = request.POST.get("image64", None).split(',', 1)[1] plain_data = base64.b64decode(base64_data) image = Image.open(io.BytesIO(plain_data)) image.save( os.path.join(current_dir, 'imagenet/dataset/images/testtest.png')) tmp_f.write(plain_data) tmp_f.close() # Backend session for attack print('Building Backend Session.') K.set_learning_phase(0) sess = tf.Session() backend.set_session(sess) # Image preprocess print('Modifying image') x = np.expand_dims(preprocess(image.resize((299, 299))), axis=0) img_shape = [1, 299, 299, 3] x_input = tf.placeholder(tf.float32, shape=img_shape) # Define model d = discriminator() # Prediction of original image print('prediction of original image') classify_result = get_predictions(d, x, 10) # Select attack algorithm and iteration attack_algorithm = request.POST.get("attack", None) n = int(request.POST.get("iterate", None)) # Start attack result, attack_speed = attack(attack_algorithm, n, d, x_input, x, sess) print("attack speed: %s seconds" % (round(attack_speed, 5))) print('original image:', classify_result[0][1]) print('adversarial example is classified by', result[0][1]) # Print image to web site with open( os.path.join(current_dir, 'imagenet/output/testtest.png'), 'rb') as img_file: img_str = base64.b64encode(img_file.read()) tmp_adver.write(base64.b64decode(img_str)) tmp_adver.close() elif model == 'mnist': tmp_adver = NamedTemporaryFile() tmp_f = NamedTemporaryFile() mnist_sample = int(request.POST.get("sample", None)) mnist_target = int(request.POST.get("target", None)) mnist_algorithm = request.POST.get("mnist_algorithm", None) result, attack_speed = mnist_attack_func(mnist_sample, mnist_target, mnist_algorithm) print("attack speed: %s seconds" % (round(attack_speed, 5))) print('original class:', mnist_sample, 'target class:', mnist_target) print('adversarial example is classified by', np.argmax(result)) result = result.tolist() with open( os.path.join(current_dir, 'mnist/dataset/images/testtest.png'), 'rb') as input_file: input_str = base64.b64encode(input_file.read()) tmp_f.write(base64.b64decode(input_str)) tmp_f.close() with open(os.path.join(current_dir, 'mnist/output/testtest.png'), 'rb') as img_file: img_str = base64.b64encode(img_file.read()) tmp_adver.write(base64.b64decode(img_str)) tmp_adver.close() # Make Graph data["attack_speed"] = attack_speed data["success"] = True data["confidence"] = {} if model == 'imagenet': data["model"] = 'imagenet' for i in range(len(classify_result)): data["confidence"][classify_result[i][1]] = float( classify_result[i][2]) data["adverimage"] = 'data:image/png;base64,' + img_str.decode( 'utf-8') data["adversarial"] = {} for i in range(len(result)): data["adversarial"][result[i][1]] = float(result[i][2]) #print('iter:', i, 'name:', result[i][1], 'pred:', result[i][2]) sess.close() elif model == 'mnist': data["model"] = 'mnist' for i in range(10): if i == mnist_sample: data["confidence"][str(i)] = float(1) else: data["confidence"][str(i)] = float(0) data["input_image"] = 'data:image/png;base64,' + input_str.decode( 'utf-8') data["adverimage"] = 'data:image/png;base64,' + img_str.decode( 'utf-8') data["adversarial"] = {} for i in range(len(result[0])): data["adversarial"][str(i)] = float(result[0][i]) # Close the session # sess.close() return JsonResponse(data)