def personalizacao(request): """ View que possibilita a personalização do texto da página inicial """ dominio = getDominio() if request.user.usuario.isAdministradorDominio(dominio): message = "" if request.POST: texto = request.POST["textoPaginaInicial"] dominio.textoPaginaInicial = texto dominio.save() message="Alterações realizadas com sucesso" try: data = request.FILES['css'] path = default_storage.save("egressos/logica/static/logica/default.css2", ContentFile(data.read())) except: print sys.exc_info()[0] try: data = request.FILES['favicon'] path = default_storage.save("egressos/logica/static/logica/images/favicon.ico", ContentFile(data.read())) except: print sys.exc_info()[0] try: data = request.FILES['logotipo'] path = default_storage.save("egressos/logica/static/logica/default.css2", ContentFile(data.read())) except: print sys.exc_info()[0] return render_to_response("personalizacao.html", { "pagina": "administracao", "variaveis": dominio.getVariaveisExplicacao(), "marcacoes": dominio.getBBCodeExplicacacao(), "dominio":dominio, "message":message}, context_instance=RequestContext(request)) else: return HttpResponseRedirect("/home/")
def do_export(self): """ Does actual export. Called from a celery task. """ book = Workbook() self.render_book(book) temp = NamedTemporaryFile(delete=True) book.save(temp) temp.flush() org_root = getattr(settings, 'SITE_ORGS_STORAGE_ROOT', 'orgs') filename = '%s/%d/%s/%s.xls' % (org_root, self.org_id, self.directory, random_string(20)) default_storage.save(filename, File(temp)) self.filename = filename self.save(update_fields=('filename',)) subject = "Your export is ready" download_url = self.org.make_absolute_url(reverse(self.download_view, args=[self.pk])) send_email([self.created_by], subject, 'utils/email/export', {'download_url': download_url}) # force a gc import gc gc.collect()
def pos_create_ec(sender, instance, **kwargs): establishment = instance isave = False if 'establishment/0/' in establishment.img_logo.__str__(): image = ImageUpload.objects.get(upload=establishment.img_logo) img_path = default_storage.location+'/'+establishment.img_logo.__str__() img_logo = File(open(img_path, 'r')) default_storage.delete(img_path) default_storage.save(img_path.replace('/0/', '/'+establishment.id.__str__()+'/'), img_logo) new_img = establishment.img_logo.__str__().replace('/0/', '/'+establishment.id.__str__()+'/') image.upload = new_img image.save() establishment.img_logo = new_img isave = True if 'establishment/0/' in establishment.img_vitrin.__str__(): image = ImageUpload.objects.get(upload=establishment.img_vitrin) img_path = default_storage.location+'/'+establishment.img_vitrin.__str__() img_vitrin = File(open(img_path, 'r')) default_storage.delete(img_path) default_storage.save(img_path.replace('/0/', '/'+establishment.id.__str__()+'/'), img_vitrin) new_img = establishment.img_vitrin.__str__().replace('/0/', '/'+establishment.id.__str__()+'/') image.upload = new_img image.save() establishment.img_vitrin = new_img isave = True if isave: establishment.save()
def get_full_filename(self, url): if not self._full_filename: driver = self.get_driver() driver.get(url) elem = driver.find_element_by_class_name("js-phone-show__insert") if not elem: return None elem.click() sleep(2) res = driver.execute_script(""" var phone_imgs = document.getElementsByClassName("description__phone-img"); var canvas = document.createElement("canvas"); canvas.width = 102; canvas.height = 16Partner; var ctx = canvas.getContext("2d"); ctx.drawImage(phone_imgs[0], 0, 0); return canvas.toDataURL("image/png").split(",")[1]; """) plaindata = base64.b64decode(res) today = datetime.date.today() directory = os.path.join(self.IMAGE_ROOT, today.strftime('%d%m%Y')) if not os.path.exists(directory): os.makedirs(directory) filename = '%s.png' % md5.new(url).hexdigest() self._full_filename = os.path.join(directory, filename) default_storage.save(self._full_filename, ContentFile(plaindata)) subprocess.call('convert %s -transparent "#FFFFFF" -alpha background %s' % (self._full_filename, self._full_filename), shell=True) return self._full_filename
def upload_video(request): if request.method == 'POST': id = request.POST['id'] member = Member.objects.get(id=id) label = member.recognize_label video_data = request.FILES['video-train'] video = ImageFile(video_data) video_name = request.POST['video-filename'] video_path = 'video/' + str(label) + "/" + video_name if default_storage.exists(video_path): default_storage.delete(video_path) default_storage.save(video_path, video) number_of_faces = face_detection.face_detect(member.recognize_label) member.number_of_train_images += number_of_faces if member.number_of_train_images > 150: http_response = { 'status': 'success', 'message': 'Get enough {0} images'.format(member.number_of_train_images) } else: http_response = { 'status': 'warning', 'message': 'Get {0} images, please add more video!'.format(member.number_of_train_images) } return JsonResponse(http_response)
def save_feed(feed_filename, feed_xml): path = 'feeds/{}'.format(feed_filename) if default_storage.exists(path): default_storage.delete(path) default_storage.save(path, ContentFile(feed_xml))
def __init__(self, path): self.path = path if default_storage.exists(self.path + '.tmp'): default_storage.delete(self.path + '.tmp') default_storage.save(self.path + '.tmp', BytesIO()) self.fp = default_storage.open(self.path + '.tmp', 'w') self.writer = csv.writer(self.fp)
def save_to_disk(f, instance): """ Takes file object and instance (or model). Returns back relative path of file. """ file_name = re.sub(r'[^a-z0-9._]+', '_', f.name.lower()) # make dir with app and module name relative_directory = os.path.join( 'files', instance._meta.app_label, instance._meta.module_name, ) # make directory with pk if isinstance(instance.pk, (int, long)): relative_directory = os.path.join( relative_directory, unicode(instance.pk)) default_storage.save(os.path.join(relative_directory, file_name), f) # absolute_directory = os.path.join(settings.MEDIA_ROOT, relative_directory) # # if not os.path.exists(absolute_directory): # os.makedirs(absolute_directory) # # destination = open(os.path.join(absolute_directory, file_name), 'wb+') # for chunk in f.chunks(): # destination.write(chunk) # destination.close() # relative path return os.path.join(relative_directory, file_name)
def upload_assignment(self, request, suffix=''): # pylint: disable=unused-argument, protected-access """ Save a students submission file. """ require(self.upload_allowed()) user = self.get_real_user() require(user) upload = request.params['assignment'] sha1 = get_sha1(upload.file) if self.file_size_over_limit(upload.file): raise JsonHandlerError( 413, 'Unable to upload file. Max size limit is {size}'.format( size=self.student_upload_max_size() ) ) # Uploading an assignment represents a change of state with this user in this block, # so we need to ensure that the user has a StudentModule record, which represents that state. self.get_or_create_student_module(user) answer = { "sha1": sha1, "filename": upload.file.name, "mimetype": mimetypes.guess_type(upload.file.name)[0], "finalized": False } student_item_dict = self.get_student_item_dict() submissions_api.create_submission(student_item_dict, answer) path = self.file_storage_path(sha1, upload.file.name) log.info("Saving file: %s at path: %s for user: %s", upload.file.name, path, user.username) if default_storage.exists(path): # save latest submission default_storage.delete(path) default_storage.save(path, File(upload.file)) return Response(json_body=self.student_state())
def staff_upload_annotated(self, request, suffix=''): # pylint: disable=unused-argument """ Save annotated assignment from staff. """ require(self.is_course_staff()) upload = request.params['annotated'] sha1 = get_sha1(upload.file) if self.file_size_over_limit(upload.file): raise JsonHandlerError( 413, 'Unable to upload file. Max size limit is {size}'.format( size=self.student_upload_max_size() ) ) module = self.get_student_module(request.params['module_id']) state = json.loads(module.state) state['annotated_sha1'] = sha1 state['annotated_filename'] = filename = upload.file.name state['annotated_mimetype'] = mimetypes.guess_type(upload.file.name)[0] state['annotated_timestamp'] = utcnow().strftime( DateTime.DATETIME_FORMAT ) path = self.file_storage_path(sha1, filename) if not default_storage.exists(path): default_storage.save(path, File(upload.file)) module.state = json.dumps(state) module.save() log.info( "staff_upload_annotated for course:%s module:%s student:%s ", module.course_id, module.module_state_key, module.student.username ) return Response(json_body=self.staff_grading_data())
def save_thumbnail(self, filename, image): upload_to = 'thumbs/' upload_path = os.path.join('thumbs/', filename) if storage.exists(upload_path): # Delete if exists otherwise the (FileSystemStorage) implementation # will create a new file with a unique name storage.delete(os.path.join(upload_path)) storage.save(upload_path, ContentFile(image)) url_path = os.path.join(settings.MEDIA_URL, upload_to, filename).replace('\\', '/') url = urljoin(settings.SITEURL, url_path) Link.objects.get_or_create(resource=self, url=url, defaults=dict( name='Thumbnail', extension='png', mime='image/png', link_type='image', )) ResourceBase.objects.filter(id=self.id).update( thumbnail_url=url )
def log_silent_post(request, payment): now_str = (datetime.now()).strftime('%Y-%m-%d %H:%M:%S') # log the post output = """ %s \n Referrer: %s \n Remote Address: %s \n Content-Type: %s \n User-Agent: %s \n\n Query-String: \n %s \n Remote-Addr: %s \n\n Remote-Host: %s \n Remote-User: %s \n Request-Method: %s \n """ % (now_str, request.META.get('HTTP_REFERER', ''), request.META.get('REMOTE_ADDR', ''), request.META.get('CONTENT_TYPE', ''), request.META.get('HTTP_USER_AGENT', ''), request.META.get('QUERY_STRING', ''), request.META.get('REMOTE_ADDR', ''), request.META.get('REMOTE_HOST', ''), request.META.get('REMOTE_USER', ''), request.META.get('REQUEST_METHOD', '')) log_file_name = "silentpost_%d.log" % payment.id log_path = os.path.join('silentposts', log_file_name) default_storage.save(log_path, ContentFile(output))
def _dump(): s_time = time.time() from django.apps import apps from django.db.models import get_model excluded_models = set() for exclude in EXCLUDED_MODELS: app_label, model_name = exclude.split('.', 1) model_obj = get_model(app_label, model_name) excluded_models.add(model_obj) app_list = [(c, None) for c in apps.get_app_configs() if c.label in set(APPS_TO_HANDLE)] objects = [] for model in serializers.sort_dependencies(app_list): if model in excluded_models: continue if not model._meta.proxy and router.allow_migrate(DEFAULT_DB_ALIAS, model): objects.extend(model._default_manager.using(DEFAULT_DB_ALIAS).all()) f_name = DUMP_NAME.format(datetime.now()) with closing(StringIO()) as compressed_data: with gzip.GzipFile(filename=f_name, mode='wb', fileobj=compressed_data) as compressor: compressor.write(serializers.serialize('json', objects, indent=2, use_natural_foreign_keys=True)) compressed_data.seek(0) default_storage.save(DUMP_PATH + f_name + '.gz', compressed_data) return '%d objects exported to %s in %d seconds'%(len(objects), f_name, time.time() - s_time)
def update_data( self, data, files ): """ Validate and update data record with new data """ #TODO: maybe remove old files?? fields = self.fields() print "in repos/models update_data for ", self.fields() validated_data, valid_files = validate_and_format(fields, data, files) db.data.update( {"_id":ObjectId( data['detail_data_id'] )},{"$set": { 'data': validated_data, 'timestamp':datetime.utcnow() }} ) # Once we save the repo data, save the files to S3 if len( valid_files.keys() ) > 0: # If we have media data, save it to this repo's data folder if not settings.DEBUG: storage.bucket_name = settings.AWS_MEDIA_STORAGE_BUCKET_NAME for key in valid_files.keys(): file_to_upload = valid_files.get( key ) s3_url = '%s/%s/%s' % ( self.mongo_id, new_data_id, file_to_upload.name ) print ("in repos/models update_data going to store in s3") print s3_url, file_to_upload storage.save( s3_url, file_to_upload )
def close(self): self.fp.close() if default_storage.exists(self.path): default_storage.delete(self.path) tmp_fp = default_storage.open(self.path + '.tmp', 'r') default_storage.save(self.path, tmp_fp) default_storage.delete(self.path + '.tmp')
def migrate_export_tasks(apps, schema_editor): task_model = apps.get_model('flows', 'ExportFlowResultsTask') store = AssetType.results_export.store copied_task_ids = [] failed_task_ids = [] for task in task_model.objects.exclude(filename=None): identifier = task.pk extension = os.path.splitext(task.filename)[1][1:] try: existing_file = default_storage.open(task.filename) new_path = store.derive_path(task.org, identifier, extension) default_storage.save(new_path, existing_file) copied_task_ids.append(task.pk) task.filename = None task.save() except Exception: print "Unable to copy %s" % task.filename failed_task_ids.append(task.pk) # clear filename for tasks that were successfully copied so we don't try to migrate them again task_model.objects.filter(pk__in=copied_task_ids).update(filename=None) if len(copied_task_ids) + len(failed_task_ids) > 0: print 'Copied %d export task files (%d could not be copied)' % (len(copied_task_ids), len(failed_task_ids))
def create_temp_file(filebytes, preset='document', ext='pdf', original_filename=None): """ Create a file and store it in Django's object db temporarily for tests. :param filebytes: The data to be stored in the file (as bytes) :param preset: String identifying the format preset (defaults to ``document``) :param ext: File extension, omitting the initial period :param original_filename: Original filename (needed for exercise_images) :return: A dict containing the keys name (filename), data (actual bytes), file (StringIO obj) and db_file (File object in db) of the temp file. """ fileobj = StringIO(filebytes) hash = hashlib.md5(filebytes) checksum = hash.hexdigest() filename = "{}.{}".format(checksum, ext) storage_file_path = cc.generate_object_storage_name(checksum, filename) # 1. Write out the file bytes on to object storage default_storage.save(storage_file_path, fileobj) assert default_storage.exists(storage_file_path) # 2. Get the minimum required Studio meta fields for a File object preset = cc.FormatPreset.objects.get(id=preset) file_format = cc.FileFormat.objects.get(extension=ext) if original_filename is None: original_filename = 'somefile.' + ext # 3. Create a File object db_file_obj = mixer.blend(cc.File, checksum=checksum, file_format=file_format, preset=preset, original_filename=original_filename, file_on_disk=storage_file_path) return {'name': os.path.basename(storage_file_path), 'data': filebytes, 'file': fileobj, 'db_file': db_file_obj}
def create_images(self, image_types=None): if image_types is None: image_types = self.IMAGE_TYPES if not image_types: return if not self.image: return # Load the original image if self.image.closed: self.image.open() self.image.seek(0) image = reset(Image.open(self.image)) for image_type in image_types: image_type = unicode(image_type) is_square = image_type[-1].lower() == 's' width, height = self.get_image_size(image_type) if is_square: new_image = thumbnail( image, size=[width, height], crop='middle') else: new_image = resize(image, [width, height]) data = Buffer() new_image.save(data, 'jpeg', quality=100) default_storage.save(self.get_upload_path(image_type), File(data))
def upload(request, image=False): """ Uploads a file and send back its URL to CKEditor. TODO: Validate uploads """ # Get the uploaded file from request. upload = request.FILES['upload'] # check for image if image and is_image(upload.name): #Verify that file is a valid image backend = image_processing.get_backend() try: backend.image_verify(upload) except utils.NotAnImageException: return HttpResponse(""" <script type='text/javascript'> alert('Invalid image file.') window.parent.CKEDITOR.tools.callFunction({0}); </script>""".format(request.GET['CKEditorFuncNum'])) # Open output file in which to store upload. upload_filename = get_upload_filename(upload.name, request.user, prefix='images') saved_path = default_storage.save(upload_filename, upload) if backend.should_create_thumbnail(saved_path): backend.create_thumbnail(saved_path) url = utils.get_media_url(saved_path) # Respond with Javascript sending ckeditor upload url. return HttpResponse(""" <script type='text/javascript'> window.parent.CKEDITOR.tools.callFunction({0}, '{1}'); </script>""".format(request.GET['CKEditorFuncNum'], url)) # check for document elif image != True and is_document(upload.name): # Open output file in which to store upload. upload_filename = get_upload_filename(upload.name, request.user, prefix='documents') saved_path = default_storage.save(upload_filename, upload) url = utils.get_media_url(saved_path) # Respond with Javascript sending ckeditor upload url. return HttpResponse(""" <script type='text/javascript'> window.parent.CKEDITOR.tools.callFunction({0}, '{1}'); </script>""".format(request.GET['CKEditorFuncNum'], url)) # bad file format else: return HttpResponse(""" <script type='text/javascript'> alert('Invalid file format.') window.parent.CKEDITOR.tools.callFunction({0}); </script>""".format(request.GET['CKEditorFuncNum']))
def create(data, file): post_data = { 'chf': 'bg,s,00000000', # Transparent background } # Update defaults with given chart data post_data.update(data) # Setup request req = urllib2.Request(url='http://chart.apis.google.com/chart', data=urllib.urlencode(post_data)) path = 'charts/%s' % file try: # Do the call to Google Charts response = urllib2.urlopen(req) # Delete previous chart if default_storage.exists(path): default_storage.delete(path) # Save new chart default_storage.save(path, ContentFile(response.read())) except HTTPError, e: # TODO: Do some logging raise e
def directory_export(request, template_name="directories/export.html"): """Export Directories""" if not request.user.profile.is_superuser: raise Http403 form = DirectoryExportForm(request.POST or None) if request.method == "POST" and form.is_valid(): export_fields = form.cleaned_data['export_fields'] export_status_detail = form.cleaned_data['export_status_detail'] identifier = int(time.time()) temp_file_path = 'export/directories/%s_temp.csv' % identifier default_storage.save(temp_file_path, ContentFile('')) # start the process subprocess.Popen(["python", "manage.py", "directory_export_process", '--export_fields=%s' % export_fields, '--export_status_detail=%s' % export_status_detail, '--identifier=%s' % identifier, '--user=%s' % request.user.id]) # log an event EventLog.objects.log() return HttpResponseRedirect(reverse('directory.export_status', args=[identifier])) context = {'form': form} return render_to_response(template_name, context, RequestContext(request))
def group_members_export(request, group_slug, export_target='all'): """ Export members for a specific group """ group = get_object_or_404(Group, slug=group_slug) # if they can edit it, they can export it if not has_perm(request.user,'user_groups.change_group', group): raise Http403 identifier = '%s_%s' % (int(ttime.time()), request.user.id) file_dir = 'export/groups/' temp_export_path = '%sgroup_%d_%s_%s_temp.csv' % (file_dir, group.id, export_target, identifier) default_storage.save(temp_export_path, ContentFile('')) # start the process subprocess.Popen(["python", "manage.py", "group_members_export", '--group_id=%d' % group.id, '--export_target=%s' % export_target, '--identifier=%s' % identifier, '--user_id=%s' % request.user.id]) # log an event EventLog.objects.log() return redirect(reverse('group.members_export_status', args=[group.slug, export_target, identifier]))
def handle(self, *args, **options): from tendenci.apps.files.models import File as tFile ct_user = ContentType.objects.get_for_model(User) tfiles = tFile.objects.filter(content_type=ct_user, object_id__isnull=False, status=True, status_detail='active') for tfile in tfiles: if default_storage.exists(tfile.file.name): is_image = mimetypes.guess_type(tfile.file.name)[0].startswith('image') if is_image: [user] = User.objects.filter(id=tfile.object_id)[:1] or [None] if user: [user_avatar] = user.avatar_set.filter( primary=True)[:1] or [None] if not user_avatar: avatar_path = avatar_file_path( user=user, filename=(tfile.file.name.split('/'))[-1]) # copy the file to the avatar directory default_storage.save(avatar_path, ContentFile( default_storage.open( tfile.file.name).read())) # create an avatar object for the user Avatar.objects.create( user=user, primary=True, avatar=avatar_path ) print 'Avatar created for ', user print 'Done'
def save(self, delete_zip_import=True, *args, **kwargs): """ If a zip file is uploaded, extract any images from it and add them to the gallery, before removing the zip file. """ super(Gallery, self).save(*args, **kwargs) if self.zip_import: zip_file = ZipFile(self.zip_import) from PIL import Image for name in zip_file.namelist(): data = zip_file.read(name) try: image = Image.open(StringIO(data)) image.load() image = Image.open(StringIO(data)) image.verify() except: continue path = os.path.join(GALLERIES_UPLOAD_DIR, self.slug, name.decode("utf-8")) try: saved_path = default_storage.save(path, ContentFile(data)) except UnicodeEncodeError: from warnings import warn warn("A file was saved that contains unicode " "characters in its path, but somehow the current " "locale does not support utf-8. You may need to set " "'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.") path = os.path.join(GALLERIES_UPLOAD_DIR, self.slug, unicode(name, errors="ignore")) saved_path = default_storage.save(path, ContentFile(data)) self.images.add(GalleryImage(file=saved_path)) if delete_zip_import: zip_file.close() self.zip_import.delete(save=True)
def photo_size(request, id, size, crop=False, quality=90, download=False, constrain=False): """ Renders image and returns response Does not use template Saves resized image within cache system Returns 404 if if image rendering fails """ if isinstance(quality, unicode) and quality.isdigit(): quality = int(quality) cache_key = generate_image_cache_key(file=id, size=size, pre_key=PHOTO_PRE_KEY, crop=crop, unique_key=id, quality=quality, constrain=constrain) cached_image = cache.get(cache_key) if cached_image: return redirect(cached_image) photo = get_object_or_404(Image, id=id) size = [int(s) for s in size.split('x')] size = aspect_ratio(photo.image_dimensions(), size, constrain) # check permissions if not has_perm(request.user, 'photos.view_image', photo): raise Http403 attachment = '' if download: attachment = 'attachment;' if not photo.image or not default_storage.exists(photo.image.name): raise Http404 # gets resized image from cache or rebuild image = get_image(photo.image, size, PHOTO_PRE_KEY, crop=crop, quality=quality, unique_key=str(photo.pk), constrain=constrain) # if image not rendered; quit if not image: raise Http404 response = HttpResponse(mimetype='image/jpeg') response['Content-Disposition'] = '%s filename=%s' % (attachment, photo.image.file.name) image.save(response, "JPEG", quality=quality) if photo.is_public_photo() and photo.is_public_photoset(): file_name = photo.image_filename() file_path = 'cached%s%s' % (request.path, file_name) default_storage.delete(file_path) default_storage.save(file_path, ContentFile(response.content)) full_file_path = "%s%s" % (settings.MEDIA_URL, file_path) cache.set(cache_key, full_file_path) cache_group_key = "photos_cache_set.%s" % photo.pk cache_group_list = cache.get(cache_group_key) if cache_group_list is None: cache.set(cache_group_key, [cache_key]) else: cache_group_list += [cache_key] cache.set(cache_group_key, cache_group_list) return response
def regenerate_organizer_css(organizer_id: int): organizer = Organizer.objects.get(pk=organizer_id) # main.scss css, checksum = compile_scss(organizer) fname = 'pub/{}/presale.{}.css'.format(organizer.slug, checksum[:16]) if organizer.settings.get('presale_css_checksum', '') != checksum: newname = default_storage.save(fname, ContentFile(css.encode('utf-8'))) organizer.settings.set('presale_css_file', newname) organizer.settings.set('presale_css_checksum', checksum) # widget.scss css, checksum = compile_scss(organizer, file='widget.scss', fonts=False) fname = 'pub/{}/widget.{}.css'.format(organizer.slug, checksum[:16]) if organizer.settings.get('presale_widget_css_checksum', '') != checksum: newname = default_storage.save(fname, ContentFile(css.encode('utf-8'))) organizer.settings.set('presale_widget_css_file', newname) organizer.settings.set('presale_widget_css_checksum', checksum) non_inherited_events = set(Event_SettingsStore.objects.filter( object__organizer=organizer, key__in=affected_keys ).values_list('object_id', flat=True)) for event in organizer.events.all(): if event.pk not in non_inherited_events: regenerate_css.apply_async(args=(event.pk,))
def import_from_stream(source,list): tmp_file = settings.MEDIA_ROOT + '/csv.csv' default_storage.save(tmp_file ,source) try: file=open(tmp_file,'rb') testReader=csv.reader(file,delimiter=';',quotechar='"') inserted = 0 for row in testReader: if len(row) < 2: continue email = row[0].strip() if not email_is_valid(email): continue created = False try: contact = Contact.objects.get(email=email) created = True except: contact = Contact() contact.email = email contact.first_name = row[1].decode('ISO-8859-1') contact.last_name = row[2].decode('ISO-8859-1') contact.save() if list: list.subscribers.add(contact) inserted += int(created) return inserted finally: file.close() os.remove(tmp_file)
def staff_upload_annotated(self, request, suffix=''): # pylint: disable=unused-argument """ Save annotated assignment from staff. """ require(self.is_course_staff()) upload = request.params['annotated'] module = StudentModule.objects.get(pk=request.params['module_id']) state = json.loads(module.state) state['annotated_sha1'] = sha1 = _get_sha1(upload.file) state['annotated_filename'] = filename = upload.file.name state['annotated_mimetype'] = mimetypes.guess_type(upload.file.name)[0] state['annotated_timestamp'] = _now().strftime( DateTime.DATETIME_FORMAT ) path = self._file_storage_path(sha1, filename) if not default_storage.exists(path): default_storage.save(path, File(upload.file)) module.state = json.dumps(state) module.save() log.info( "staff_upload_annotated for course:%s module:%s student:%s ", module.course_id, module.module_state_key, module.student.username ) return Response(json_body=self.staff_grading_data())
def upload_file(self, filelist, upload): """Saves a file to a list of files. """ if upload.file is None: raise ExceptionResponse.BadRequest( detail='No file in body.', comment='The body of the request must include a file.' ) upload_key = _get_key(upload.file) metadata = FileMetaData( upload.file.name, mimetypes.guess_type(upload.file.name)[0], str( _now() ) ) filelist[upload_key] = metadata path = _file_storage_path( self.location.to_deprecated_string(), upload_key, upload.file.name ) if not default_storage.exists(path): default_storage.save(path, File(upload.file)) #Need to return key and metadata so staff can append it to list. return (upload_key, metadata)
def backup_database(request): if request.method == 'POST': output = Popen(['which', 'mysqldump'], stdout=PIPE, close_fds=True).communicate()[0] mysqldump_bin = output.replace('\n','') cmd = mysqldump_bin+' -h %s --opt --compact --skip-add-locks -u %s -p%s %s' % \ (getattr(settings.DATABASES['default'], 'HOST', 'localhost'), settings.DATABASES['default']['USER'], settings.DATABASES['default']['PASSWORD'], settings.DATABASES['default']['NAME']) pop1 = Popen(cmd.split(" "), stdout=PIPE, close_fds=True) pop2 = Popen(["bzip2", "-c"], stdin=pop1.stdout, stdout=PIPE, close_fds=True) output = pop2.communicate()[0] default_storage.save(BACKUP_DIR+"/"+datetime.today().strftime("%Y-%m-%d_%H:%M:%S")+"_db.sql.bz2", ContentFile(output)) files = default_storage.listdir(BACKUP_DIR)[1] files.sort(reverse=True) return render_to_response('diagnostic/backupdb.html', {'files':files,}, context_instance=RequestContext(request))
def thumbnail(image_url, width, height, quality=95): """ Given the URL to an image, resizes the image using the given width and height on the first time it is requested, and returns the URL to the new resized image. if width or height are zero then original ratio is maintained. """ if not image_url: return "" image_url = unquote(unicode(image_url)) if image_url.startswith(settings.MEDIA_URL): image_url = image_url.replace(settings.MEDIA_URL, "", 1) image_dir, image_name = os.path.split(image_url) image_prefix, image_ext = os.path.splitext(image_name) filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG") thumb_name = "%s-%sx%s%s" % (image_prefix, width, height, image_ext) thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir, settings.THUMBNAILS_DIR_NAME) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) thumb_path = os.path.join(thumb_dir, thumb_name) thumb_url = "%s/%s" % (settings.THUMBNAILS_DIR_NAME, quote(thumb_name.encode("utf-8"))) image_url_path = os.path.dirname(image_url) if image_url_path: thumb_url = "%s/%s" % (image_url_path, thumb_url) try: thumb_exists = os.path.exists(thumb_path) except UnicodeEncodeError: # The image that was saved to a filesystem with utf-8 support, # but somehow the locale has changed and the filesystem does not # support utf-8. from mezzanine.core.exceptions import FileSystemEncodingChanged raise FileSystemEncodingChanged() if thumb_exists: # Thumbnail exists, don't generate it. return thumb_url elif not default_storage.exists(image_url): # Requested image does not exist, just return its URL. return image_url f = default_storage.open(image_url) try: image = Image.open(f) except: # Invalid image format return image_url image_info = image.info width = int(width) height = int(height) # If already right size, don't do anything. if width == image.size[0] and height == image.size[1]: return image_url # Set dimensions. if width == 0: width = image.size[0] * height / image.size[1] elif height == 0: height = image.size[1] * width / image.size[0] if image.mode not in ("L", "RGBA"): image = image.convert("RGBA") # Required for progressive jpgs. ImageFile.MAXBLOCK = image.size[0] * image.size[1] try: image = ImageOps.fit(image, (width, height), Image.ANTIALIAS) image = image.save(thumb_path, filetype, quality=quality, **image_info) # Push a remote copy of the thumbnail if MEDIA_URL is # absolute. if "://" in settings.MEDIA_URL: with open(thumb_path, "r") as f: default_storage.save(thumb_url, File(f)) except Exception: # If an error occurred, a corrupted image may have been saved, # so remove it, otherwise the check for it existing will just # return the corrupted image next time it's requested. try: os.remove(thumb_path) except Exception: pass return image_url return thumb_url
def edit_entry(title, content): filename = f"entries/{title}.md" title_and_content = f"# {title}\n{content}".encode("utf-8") if default_storage.exists(filename): default_storage.delete(filename) default_storage.save(filename, ContentFile(title_and_content))
def parse_replay_netstream(replay_id): from .models import PLATFORMS, BoostData, Goal, Player, Replay replay_obj = Replay.objects.get(pk=replay_id) try: if settings.DEBUG or platform == 'darwin': if not os.path.isfile(replay_obj.file.path): # Download the file. command = 'wget https://media.rocketleaguereplays.com/{} -qO {}'.format( replay_obj.file.name, replay_obj.file.path, ) os.system(command) replay = json.loads( subprocess.check_output( 'rattletrap-binaries/rattletrap-*-osx -i {}'.format( replay_obj.file.path), shell=True).decode('utf-8')) else: replay_name = replay_obj.file.name.split('/')[-1] command = 'wget {} -qO /tmp/{}'.format( replay_obj.file.url, replay_name, ) os.system(command) replay = json.loads( subprocess.check_output( 'rattletrap-binaries/rattletrap-*-linux -i /tmp/{}'.format( replay_name, ), shell=True).decode('utf-8')) command = 'rm /tmp/{}'.format(replay_name, ) os.system(command) except subprocess.CalledProcessError: # Parsing the file failed. replay_obj.processed = False replay_obj.save() return replay_obj, replay, header = _parse_header(replay_obj, replay) goals = { get_value(goal, 'frame'): { 'PlayerName': get_value(goal, 'PlayerName'), 'PlayerTeam': get_value(goal, 'PlayerTeam') } for goal in get_value(header, 'Goals', []) } last_hits = {0: None, 1: None} actors = {} # All actors player_actors = { } # XXX: This will be used to make the replay.save() easier. match_goals = {} teaminfo_score = {} goal_actors = {} team_data = {} actor_positions = { } # The current position data for all actors. Do we need this? player_cars = {} # Car -> Player actor ID mappings. boost_components = { } # Archetypes.CarComponents.CarComponent_Boost objects ball_angular_velocity = None # The current angular velocity of the ball. ball_possession = None # The team currently in possession of the ball. cars_frozen = False # Whether the cars are frozen in place (3.. 2.. 1..) shot_data = [ ] # The locations of the player and the ball when goals were scored. unknown_boost_data = {} # Holding dict for boosts without player data. ball_actor_id = None location_data = [] # Used for the location JSON. boost_data = {} # Used for the boost stats. boost_objects = [] heatmap_data = {} seconds_mapping = {} # Frame -> seconds remaining mapping. heatmap_json_filename = 'uploads/replay_json_files/{}.json'.format( replay_obj.replay_id) location_json_filename = 'uploads/replay_location_json_files/{}.json'.format( replay_obj.replay_id) for index, frame in enumerate(replay['content']['frames']): # Add an empty location list for this frame. location_data.append([]) ball_hit = False confirmed_ball_hit = False ball_spawned = False if index in goals: # Get the ball position. ball_actor_id = list( filter( lambda x: actors[x]['class_name'] in ['TAGame.Ball_TA', 'TAGame.Ball_Breakout_TA'], actors))[0] ball_position = actor_positions[ball_actor_id] # XXX: Update this to also register the hitter? hit_position = last_hits[goals[index]['PlayerTeam']] shot_data.append({ 'player': hit_position, 'ball': ball_position, 'frame': index }) # Reset the last hits. last_hits = {0: None, 1: None} # Handle any new actors. for replication in frame['replications']: actor_id = int(replication['actor_id']['value']) replication_type = list(replication['value'].keys())[0] value = replication['value'][replication_type] flattened_value = flatten_value(value) if replication_type == 'spawned_replication_value': if actor_id not in actors: actors[actor_id] = value if 'Engine.Pawn:PlayerReplicationInfo' in flattened_value: player_actor_id = value[ 'Engine.Pawn:PlayerReplicationInfo']['value'] player_cars[player_actor_id] = actor_id if value['class_name'] == 'TAGame.Ball_TA': ball_spawned = True elif value['class_name'] == 'TAGame.PRI_TA': player_actors[actor_id] = value player_actors[actor_id]['joined'] = index elif value['class_name'] == 'TAGame.Team_Soccar_TA': team_data[actor_id] = value['object_name'].replace( 'Archetypes.Teams.Team', '') # Handle any updates to existing actors. elif replication_type == 'updated_replication_value': if ('Engine.PlayerReplicationInfo:Team' in flattened_value and not flattened_value[ 'Engine.PlayerReplicationInfo:Team']['value']): del flattened_value['Engine.PlayerReplicationInfo:Team'] # If an actor is getting their team value nuked, store what it was # so we can use it later on. if ('Engine.PlayerReplicationInfo:Team' in flattened_value and flattened_value['Engine.PlayerReplicationInfo:Team'] ['value'] == -1 and actors[actor_id] ['Engine.PlayerReplicationInfo:Team']['value'] != -1): actors[actor_id][ 'Engine.PlayerReplicationInfo:CachedTeam'] = actors[ actor_id]['Engine.PlayerReplicationInfo:Team'] # Merge the new properties with the existing. if actors[actor_id] != value: actors[actor_id] = {**actors[actor_id], **flattened_value} if actor_id in player_actors: player_actors[actor_id] = actors[actor_id] if 'Engine.Pawn:PlayerReplicationInfo' in flattened_value: player_actor_id = flattened_value[ 'Engine.Pawn:PlayerReplicationInfo']['value'] player_cars[player_actor_id] = actor_id # Handle removing any destroyed actors. elif replication_type == 'destroyed_replication_value': del actors[actor_id] if actor_id in player_actors: player_actors[actor_id]['left'] = index else: raise Exception( 'Unhandled replication_type: {}'.format(replication_type)) # Loop over actors which have changed in this frame. for replication in frame['replications']: actor_id = int(replication['actor_id']['value']) replication_type = list(replication['value'].keys())[0] value = replication['value'][replication_type] flattened_value = flatten_value(value) if replication_type not in [ 'spawned_replication_value', 'updated_replication_value' ]: continue # Look for any position data. if 'TAGame.RBActor_TA:ReplicatedRBState' in flattened_value: location = flattened_value[ 'TAGame.RBActor_TA:ReplicatedRBState']['value']['location'] rotation = flattened_value[ 'TAGame.RBActor_TA:ReplicatedRBState']['value']['rotation'] actor_positions[actor_id] = [ location['x'], location['y'], location['z'] ] # Get the player actor id. real_actor_id = actor_id for player_actor_id, car_actor_id in player_cars.items(): if actor_id == car_actor_id: real_actor_id = player_actor_id break if real_actor_id == actor_id: real_actor_id = 'ball' data_dict = {'id': real_actor_id} data_dict['x'] = location['x'] data_dict['y'] = location['y'] data_dict['z'] = location['z'] # print(rotation) data_dict['roll'] = rotation['x']['value'] data_dict['pitch'] = rotation['y']['value'] data_dict['yaw'] = rotation['z']['value'] location_data[index].append(data_dict) # If this property exists, the ball has changed possession. if 'TAGame.Ball_TA:HitTeamNum' in flattened_value: ball_hit = confirmed_ball_hit = True hit_team_num = flattened_value['TAGame.Ball_TA:HitTeamNum'][ 'value'] ball_possession = hit_team_num # Clean up the actor positions. actor_positions_copy = actor_positions.copy() for actor_position in actor_positions_copy: found = False for car in player_cars: if actor_position == player_cars[car]: found = True if not found and actor_position != ball_actor_id: del actor_positions[actor_position] # Store the boost data for each actor at each frame where it changes. if 'TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount' in flattened_value: boost_value = flattened_value[ 'TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount'][ 'value'] assert 0 <= boost_value <= 255, 'Boost value {} is not in range 0-255.'.format( boost_value) if actor_id not in boost_data: boost_data[actor_id] = {} # Sometimes we have a boost component without a reference to # a car. We don't want to lose that data, so stick it into a # holding dictionary until we can figure out who it belongs to. if 'TAGame.CarComponent_TA:Vehicle' not in actors[actor_id]: if actor_id not in unknown_boost_data: unknown_boost_data[actor_id] = {} unknown_boost_data[actor_id][index] = boost_value else: car_id = actors[actor_id][ 'TAGame.CarComponent_TA:Vehicle']['value'] # Find out which player this car belongs to. try: player_actor_id = [ player_actor_id for player_actor_id, car_actor_id in player_cars.items() if car_actor_id == car_id ][0] if player_actor_id not in boost_data: boost_data[player_actor_id] = {} boost_data[player_actor_id][index] = boost_value # Attach any floating data (if we can). if actor_id in unknown_boost_data: for frame_index, boost_value in unknown_boost_data[ actor_id].items(): boost_data[player_actor_id][ frame_index] = boost_value del unknown_boost_data[actor_id] except IndexError: pass # Store the mapping of frame -> clock time. if 'TAGame.GameEvent_Soccar_TA:SecondsRemaining' in flattened_value: seconds_mapping[index] = flattened_value[ 'TAGame.GameEvent_Soccar_TA:SecondsRemaining']['value'] # See if the cars are frozen in place. if 'TAGame.GameEvent_TA:ReplicatedGameStateTimeRemaining' in flattened_value: if flattened_value[ 'TAGame.GameEvent_TA:ReplicatedGameStateTimeRemaining'][ 'value'] == 3: cars_frozen = True elif flattened_value[ 'TAGame.GameEvent_TA:ReplicatedGameStateTimeRemaining'][ 'value'] == 0: cars_frozen = False # Get the camera details. if 'TAGame.CameraSettingsActor_TA:ProfileSettings' in flattened_value: if actors[actor_id][ 'class_name'] == 'TAGame.CameraSettingsActor_TA': # Define some short variable names to stop the next line # being over 200 characters long. This block of code # makes new replays have a camera structure which is # similar to that of the old replays - where the camera # settings are directly attached to the player rather # than a CameraActor (which is what the actor in this # current loop is). csa = 'TAGame.CameraSettingsActor_TA:PRI' ps = 'TAGame.CameraSettingsActor_TA:ProfileSettings' cs = 'TAGame.PRI_TA:CameraSettings' if csa in flattened_value: player_actor_id = flattened_value[csa]['value'] actors[player_actor_id][cs] = flattened_value[ps][ 'value'] if 'Engine.GameReplicationInfo:ServerName' in flattened_value: replay_obj.server_name = flattened_value[ 'Engine.GameReplicationInfo:ServerName']['value'] if 'ProjectX.GRI_X:ReplicatedGamePlaylist' in flattened_value: replay_obj.playlist = flattened_value[ 'ProjectX.GRI_X:ReplicatedGamePlaylist']['value'] if 'TAGame.GameEvent_Team_TA:MaxTeamSize' in flattened_value: replay_obj.team_sizes = flattened_value[ 'TAGame.GameEvent_Team_TA:MaxTeamSize']['value'] if 'TAGame.PRI_TA:MatchGoals' in flattened_value: # Get the closest goal to this frame. mg = flattened_value['TAGame.PRI_TA:MatchGoals'] mg_increased = False if mg['value'] > match_goals.get(actor_id, 0): match_goals[actor_id] = mg['value'] mg_increased = True if index not in match_goals and mg_increased: goal_actors[index] = actor_id match_goals[actor_id] = mg['value'] if 'Engine.TeamInfo:Score' in flattened_value: tis = flattened_value['Engine.TeamInfo:Score'] tis_increased = False if tis['value'] > teaminfo_score.get(actor_id, 0): teaminfo_score[actor_id] = tis['value'] tis_increased = True if index not in goal_actors and tis_increased: goal_actors[index] = actor_id # Work out which direction the ball is travelling and if it has # changed direction or speed. ball = None ball_actor_id = None for actor_id, value in actors.items(): if value['class_name'] == 'TAGame.Ball_TA': ball_actor_id = actor_id ball = value break ball_hit = False # Take a look at the ball this frame, has anything changed? if (ball and 'TAGame.RBActor_TA:ReplicatedRBState' in ball and 'angular_velocity' in ball['TAGame.RBActor_TA:ReplicatedRBState']['value']): new_ball_angular_velocity = ball[ 'TAGame.RBActor_TA:ReplicatedRBState']['value'][ 'angular_velocity'] # The ball has *changed direction*, but not necessarily been hit (it # may have bounced). if ball_angular_velocity != new_ball_angular_velocity: ball_hit = True ball_angular_velocity = new_ball_angular_velocity # Calculate the current distances between cars and the ball. # Do we have position data for the ball? if ball_hit and not ball_spawned and ball_actor_id in actor_positions: # Iterate over the cars to get the players. lowest_distance = None lowest_distance_car_actor = None for player_id, car_actor_id in player_cars.items(): # Get the team. if (player_id in actors and 'Engine.PlayerReplicationInfo:Team' in actors[player_id] and actors[player_id] ['Engine.PlayerReplicationInfo:Team']['value']): team_id = actors[player_id][ 'Engine.PlayerReplicationInfo:Team']['value'] try: team_actor = actors[team_id] team = int(team_actor['object_name'].replace( 'Archetypes.Teams.Team', '').replace('GameEvent_Soccar_TA_', '')) except KeyError: team = -1 else: team = -1 # Make sure this actor is in on the team which is currently # in possession. if team != ball_possession: continue if car_actor_id in actor_positions: actor_distance = distance( actor_positions[car_actor_id], actor_positions[ball_actor_id]) if not confirmed_ball_hit: if actor_distance > 350: # Value taken from the max confirmed distance. continue # Get the player on this team with the lowest distance. if lowest_distance is None or actor_distance < lowest_distance: lowest_distance = actor_distance lowest_distance_car_actor = car_actor_id if lowest_distance_car_actor: last_hits[ball_possession] = actor_positions[ lowest_distance_car_actor] # Generate the heatmap data for this frame. Get all of the players # and the ball. if not cars_frozen: moveable_actors = [ (actor_id, value) for actor_id, value in actors.items() if value['class_name'] in ['TAGame.Ball_TA', 'TAGame.PRI_TA', 'TAGame.Car_TA'] and ( 'TAGame.RBActor_TA:ReplicatedRBState' in value or 'location' in value) ] for actor_id, value in moveable_actors: if value['class_name'] == 'TAGame.Ball_TA': actor_id = 'ball' elif value['class_name'] == 'TAGame.Car_TA': if 'Engine.Pawn:PlayerReplicationInfo' not in value: continue actor_id = value['Engine.Pawn:PlayerReplicationInfo'][ 'value'] if 'TAGame.RBActor_TA:ReplicatedRBState' in value: key = '{},{}'.format( value['TAGame.RBActor_TA:ReplicatedRBState']['value'] ['location']['x'], value['TAGame.RBActor_TA:ReplicatedRBState']['value'] ['location']['y'], ) elif 'location' in value: key = '{},{}'.format( value['location']['x'], value['location']['y'], ) if actor_id not in heatmap_data: heatmap_data[actor_id] = {} if key in heatmap_data[actor_id]: heatmap_data[actor_id][key] += 1 else: heatmap_data[actor_id][key] = 1 def get_team(actor_id): if actor_id == -1: return -1 return int(actors[actor_id]['object_name'].replace( 'Archetypes.Teams.Team', '')) player_objects = {} # Make a dict of all the player actors and then do a bulk_create? for actor_id, value in player_actors.items(): if 'Engine.PlayerReplicationInfo:UniqueId' in value: system = value['Engine.PlayerReplicationInfo:UniqueId']['value'][ 'system_id'] local_id = value['Engine.PlayerReplicationInfo:UniqueId']['value'][ 'local_id'] online_id = value['Engine.PlayerReplicationInfo:UniqueId'][ 'value']['remote_id'] unique_id = '{system}-{remote}-{local}'.format( system=system, remote=online_id, local=local_id, ) else: system = 'Unknown' unique_id = '' online_id = '' team = -1 if 'Engine.PlayerReplicationInfo:Team' in value and value[ 'Engine.PlayerReplicationInfo:Team']['value']: team = get_team( value['Engine.PlayerReplicationInfo:Team']['value']) # Attempt to get the team ID from our cache. if team == -1 and 'Engine.PlayerReplicationInfo:CachedTeam' in value: team = get_team( value['Engine.PlayerReplicationInfo:CachedTeam']['value']) if team == -1: # If this is a 1v1 and the other player has a team, then put this # player on the opposite team. if len(player_actors) == 2: pak = list(player_actors.keys()) other_player = player_actors[pak[(pak.index(actor_id) - 1) * -1]] other_team = -1 if 'Engine.PlayerReplicationInfo:Team' in other_player and other_player[ 'Engine.PlayerReplicationInfo:Team']['value']: other_team = other_player[ 'Engine.PlayerReplicationInfo:Team']['value'] # Attempt to get the team ID from our cache. if other_team == -1 and 'Engine.PlayerReplicationInfo:CachedTeam' in other_player: other_team = other_player[ 'Engine.PlayerReplicationInfo:CachedTeam']['value'] if other_team != -1: # There's nothing more we can do. tdk = list(team_data.keys()) team_id = tdk[(tdk.index(other_team) - 1) * 1] team = get_team(team_id) player_actors[actor_id][ 'Engine.PlayerReplicationInfo:Team'] = { 'Type': 'FlaggedInt', 'Value': { 'Flag': True, 'Int': team_id, } } player_objects[actor_id] = Player.objects.create( replay=replay_obj, player_name=value['Engine.PlayerReplicationInfo:PlayerName'] ['value'], team=team, score=value.get('TAGame.PRI_TA:MatchScore', {'value': 0})['value'], goals=value.get('TAGame.PRI_TA:MatchGoals', {'value': 0})['value'], shots=value.get('TAGame.PRI_TA:MatchShots', {'value': 0})['value'], assists=value.get('TAGame.PRI_TA:MatchAssists', {'value': 0})['value'], saves=value.get('TAGame.PRI_TA:MatchSaves', {'value': 0})['value'], platform=PLATFORMS.get(system, system), online_id=online_id, bot=value.get('Engine.PlayerReplicationInfo:bBot', {'value': False})['value'], spectator='Engine.PlayerReplicationInfo:Team' not in value, actor_id=actor_id, unique_id=unique_id, camera_settings=value.get('TAGame.PRI_TA:CameraSettings', None), vehicle_loadout=value.get('TAGame.PRI_TA:ClientLoadout', {'value': {}})['value'], total_xp=value.get('TAGame.PRI_TA:TotalXP', {'value': 0})['value'], ) # Store the boost data for this player. for boost_frame, boost_value in boost_data.get(actor_id, {}).items(): boost_objects.append( BoostData( replay=replay_obj, player=player_objects[actor_id], frame=boost_frame, value=boost_value, )) BoostData.objects.bulk_create(boost_objects) # Create the goals. goal_objects = [] goal_actors = OrderedDict(sorted(goal_actors.items())) for index, actor_id in goal_actors.items(): # Use the player_objects dict rather than the full actors dict as # players who leave the game get removed from the latter. if actor_id in player_actors: goal_objects.append( Goal( replay=replay_obj, number=len(goal_objects) + 1, player=player_objects[actor_id], frame=index, )) # This actor is most likely the team object, meaning the goal was an # own goal scored without any of the players on the benefiting team # hitting the ball. elif actor_id in actors: if actors[actor_id]['class_name'] == 'TAGame.Team_Soccar_TA': own_goal_player, _ = Player.objects.get_or_create( replay=replay_obj, player_name='Unknown player (own goal?)', team=get_team(actor_id), ) goal_objects.append( Goal( replay=replay_obj, number=len(goal_objects) + 1, player=own_goal_player, frame=index, )) Goal.objects.bulk_create(goal_objects) # Generate heatmap and location JSON files. # Put together the heatmap file. replay_obj.heatmap_json_file = default_storage.save( heatmap_json_filename, ContentFile(json.dumps(heatmap_data, separators=(',', ':')))) # Put together the location JSON file. # Get rid of any boost data keys which have an empty value. for actor_id, data in boost_data.copy().items(): if not data: del boost_data[actor_id] goal_data = [{ 'PlayerName': get_value(goal, 'PlayerName'), 'PlayerTeam': get_value(goal, 'PlayerTeam'), 'frame': get_value(goal, 'frame'), } for goal in get_value(header, 'Goals', [])] # Trim down the actors to just the information we care about. player_data = { actor_id: { 'type': 'player', 'join': data['joined'], 'left': data.get('left', get_value(header, 'NumFrames')), 'team': data['Engine.PlayerReplicationInfo:Team']['value'], 'name': data['Engine.PlayerReplicationInfo:PlayerName']['value'] } for actor_id, data in player_actors.items() if 'Engine.PlayerReplicationInfo:Team' in data } final_data = { 'frame_data': location_data, 'goals': goal_data, 'boost': boost_data, 'seconds_mapping': seconds_mapping, 'actors': player_data, 'teams': team_data, } replay_obj.location_json_file = default_storage.save( location_json_filename, ContentFile(json.dumps(final_data, separators=(',', ':')))) replay_obj.shot_data = shot_data replay_obj.processed = True replay_obj.show_leaderboard = True replay_obj.crashed_heatmap_parser = False replay_obj.excitement_factor = replay_obj.calculate_excitement_factor() replay_obj.average_rating = replay_obj.calculate_average_rating() replay_obj.save()
get_storage_path = getattr(module, parts[-1]) else: def get_storage_path(instance, filename): fn = unicodedata.normalize('NFKD', force_text(filename)).encode( 'ascii', 'ignore').decode('ascii') return os.path.join(PHOTOLOGUE_DIR, 'photos', fn) # Support CACHEDIR.TAG spec for backups for ignoring cache dir. # See http://www.brynosaurus.com/cachedir/spec.html PHOTOLOGUE_CACHEDIRTAG = os.path.join(PHOTOLOGUE_DIR, "photos", "cache", "CACHEDIR.TAG") if not default_storage.exists(PHOTOLOGUE_CACHEDIRTAG): default_storage.save( PHOTOLOGUE_CACHEDIRTAG, ContentFile("Signature: 8a477f597d28d172789f06886806bc55")) # Exif Orientation values # Value 0thRow 0thColumn # 1 top left # 2 top right # 3 bottom right # 4 bottom left # 5 left top # 6 right top # 7 right bottom # 8 left bottom # Image Orientations (according to EXIF informations) that needs to be # transposed and appropriate action
def _upload(self): with open(self.filepath, 'rb') as f: return default_storage.save(self.filepath, f)
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5, top=.5, padding=False, padding_color="#fff"): """ Given the URL to an image, resizes the image using the given width and height on the first time it is requested, and returns the URL to the new resized image. If width or height are zero then original ratio is maintained. When ``upscale`` is False, images smaller than the given size will not be grown to fill that size. The given width and height thus act as maximum dimensions. """ if not image_url: return "" try: from PIL import Image, ImageFile, ImageOps except ImportError: return "" image_url = unquote(str(image_url)).split("?")[0] if image_url.startswith(settings.MEDIA_URL): image_url = image_url.replace(settings.MEDIA_URL, "", 1) image_dir, image_name = os.path.split(image_url) image_prefix, image_ext = os.path.splitext(image_name) filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG") thumb_name = "%s-%sx%s" % (image_prefix, width, height) if not upscale: thumb_name += "-no-upscale" if left != .5 or top != .5: left = min(1, max(0, left)) top = min(1, max(0, top)) thumb_name = "%s-%sx%s" % (thumb_name, left, top) thumb_name += "-padded-%s" % padding_color if padding else "" thumb_name = "%s%s" % (thumb_name, image_ext) # `image_name` is used here for the directory path, as each image # requires its own sub-directory using its own name - this is so # we can consistently delete all thumbnails for an individual # image, which is something we do in filebrowser when a new image # is written, allowing us to purge any previously generated # thumbnails that may match a new image name. thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir, settings.THUMBNAILS_DIR_NAME, image_name) if not os.path.exists(thumb_dir): try: os.makedirs(thumb_dir) except OSError: pass thumb_path = os.path.join(thumb_dir, thumb_name) thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME, quote(image_name.encode("utf-8")), quote(thumb_name.encode("utf-8"))) image_url_path = os.path.dirname(image_url) if image_url_path: thumb_url = "%s/%s" % (image_url_path, thumb_url) try: thumb_exists = os.path.exists(thumb_path) except UnicodeEncodeError: # The image that was saved to a filesystem with utf-8 support, # but somehow the locale has changed and the filesystem does not # support utf-8. from mezzanine.core.exceptions import FileSystemEncodingChanged raise FileSystemEncodingChanged() if thumb_exists: # Thumbnail exists, don't generate it. return thumb_url elif not default_storage.exists(image_url): # Requested image does not exist, just return its URL. return image_url f = default_storage.open(image_url) try: image = Image.open(f) except: # Invalid image format. return image_url image_info = image.info # Transpose to align the image to its orientation if necessary. # If the image is transposed, delete the exif information as # not all browsers support the CSS image-orientation: # - http://caniuse.com/#feat=css-image-orientation try: orientation = image._getexif().get(0x0112) except: orientation = None if orientation: methods = { 2: (Image.FLIP_LEFT_RIGHT, ), 3: (Image.ROTATE_180, ), 4: (Image.FLIP_TOP_BOTTOM, ), 5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90), 6: (Image.ROTATE_270, ), 7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270), 8: (Image.ROTATE_90, ) }.get(orientation, ()) if methods: image_info.pop('exif', None) for method in methods: image = image.transpose(method) to_width = int(width) to_height = int(height) from_width = image.size[0] from_height = image.size[1] if not upscale: to_width = min(to_width, from_width) to_height = min(to_height, from_height) # Set dimensions. if to_width == 0: to_width = from_width * to_height // from_height elif to_height == 0: to_height = from_height * to_width // from_width if image.mode not in ("P", "L", "RGBA"): try: image = image.convert("RGBA") except: return image_url # Required for progressive jpgs. ImageFile.MAXBLOCK = 2 * (max(image.size)**2) # Padding. if padding and to_width and to_height: from_ratio = float(from_width) / from_height to_ratio = float(to_width) / to_height pad_size = None if to_ratio < from_ratio: pad_height = int(to_height * (float(from_width) / to_width)) pad_size = (from_width, pad_height) pad_top = (pad_height - from_height) // 2 pad_left = 0 elif to_ratio > from_ratio: pad_width = int(to_width * (float(from_height) / to_height)) pad_size = (pad_width, from_height) pad_top = 0 pad_left = (pad_width - from_width) // 2 if pad_size is not None: pad_container = Image.new("RGBA", pad_size, padding_color) pad_container.paste(image, (pad_left, pad_top)) image = pad_container # Create the thumbnail. to_size = (to_width, to_height) to_pos = (left, top) try: image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos) image = image.save(thumb_path, filetype, quality=quality, **image_info) # Push a remote copy of the thumbnail if MEDIA_URL is # absolute. if "://" in settings.MEDIA_URL: with open(thumb_path, "rb") as f: default_storage.save(unquote(thumb_url), File(f)) except Exception: # If an error occurred, a corrupted image may have been saved, # so remove it, otherwise the check for it existing will just # return the corrupted image next time it's requested. try: os.remove(thumb_path) except Exception: pass return image_url return thumb_url
def lock_import_dir(): msg = "locked: %s" % timezone.localtime(timezone.now()) default_storage.save(settings.ARP_IMPORT_LOCK, ContentFile(msg))
def _create_original(name): src = os.path.join(os.path.dirname(__file__), 'test_media', name) with open(src, 'rb') as src_file: orig_path = default_storage.save(os.path.join('test_images', name), ImageFile(src_file)) return OriginalImage.objects.create(image_file=orig_path)
def save_file(in_memeroy_file, suffix=''): path = default_storage.save('tmp/' + str(uuid.uuid1()) + suffix, ContentFile(in_memeroy_file.read())) file = os.path.join(settings.MEDIA_ROOT, path) return file
def process_export(export_fields='all_fields', export_status_detail='', identifier=u'', user_id=0): from tendenci.core.perms.models import TendenciBaseModel if export_fields == 'main_fields': field_list = [ 'headline', 'slug', 'summary', 'body', 'source', 'first_name', 'last_name', 'address', 'address2', 'city', 'state', 'zip_code', 'country', 'phone', 'phone2', 'fax', 'email', 'email2', 'website', 'list_type', 'requested_duration', 'activation_dt', 'expiration_dt', 'tags', 'enclosure_url', 'enclosure_type', 'enclosure_length', 'status', 'status_detail' ] else: # base ------------ base_field_list = [ smart_str(field.name) for field in TendenciBaseModel._meta.fields if not field.__class__ == AutoField ] field_list = [ smart_str(field.name) for field in Directory._meta.fields if not field.__class__ == AutoField ] field_list = [ name for name in field_list if not name in base_field_list ] field_list.remove('guid') # append base fields at the end field_list = field_list + base_field_list identifier = identifier or int(ttime.time()) file_name_temp = 'export/directories/%s_temp.csv' % identifier with default_storage.open(file_name_temp, 'wb') as csvfile: csv_writer = UnicodeWriter(csvfile, encoding='utf-8') csv_writer.writerow(field_list) directories = Directory.objects.all() if export_status_detail: directories = directories.filter( status_detail__icontains=export_status_detail) for directory in directories: items_list = [] for field_name in field_list: item = getattr(directory, field_name) if item is None: item = '' if item: if isinstance(item, datetime): item = item.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(item, date): item = item.strftime('%Y-%m-%d') elif isinstance(item, time): item = item.strftime('%H:%M:%S') elif isinstance(item, basestring): item = item.encode("utf-8") elif field_name == 'invoice': # display total vs balance item = 'Total: %d / Balance: %d' % (item.total, item.balance) item = smart_str(item).decode('utf-8') items_list.append(item) csv_writer.writerow(items_list) # rename the file name file_name = 'export/directories/%s.csv' % identifier default_storage.save(file_name, default_storage.open(file_name_temp, 'rb')) # delete the temp file default_storage.delete(file_name_temp) # notify user that export is ready to download [user] = User.objects.filter(pk=user_id)[:1] or [None] if user and user.email: download_url = reverse('directory.export_download', args=[identifier]) site_url = get_setting('site', 'global', 'siteurl') site_display_name = get_setting('site', 'global', 'sitedisplayname') parms = { 'download_url': download_url, 'user': user, 'site_url': site_url, 'site_display_name': site_display_name, 'export_status_detail': export_status_detail, 'export_fields': export_fields } subject = render_to_string( 'directories/notices/export_ready_subject.html', parms) subject = subject.strip('\n').strip('\r') body = render_to_string('directories/notices/export_ready_body.html', parms) email = Email(recipient=user.email, subject=subject, body=body) email.send()
def copy_data_from(self, other): from . import ItemAddOn, ItemCategory, Item, Question, Quota from ..signals import event_copy_data self.plugins = other.plugins self.is_public = other.is_public self.save() tax_map = {} for t in other.tax_rules.all(): tax_map[t.pk] = t t.pk = None t.event = self t.save() category_map = {} for c in ItemCategory.objects.filter(event=other): category_map[c.pk] = c c.pk = None c.event = self c.save() item_map = {} variation_map = {} for i in Item.objects.filter( event=other).prefetch_related('variations'): vars = list(i.variations.all()) item_map[i.pk] = i i.pk = None i.event = self if i.picture: i.picture.save(i.picture.name, i.picture) if i.category_id: i.category = category_map[i.category_id] if i.tax_rule_id: i.tax_rule = tax_map[i.tax_rule_id] i.save() for v in vars: variation_map[v.pk] = v v.pk = None v.item = i v.save() for ia in ItemAddOn.objects.filter( base_item__event=other).prefetch_related( 'base_item', 'addon_category'): ia.pk = None ia.base_item = item_map[ia.base_item.pk] ia.addon_category = category_map[ia.addon_category.pk] ia.save() for q in Quota.objects.filter(event=other, subevent__isnull=True).prefetch_related( 'items', 'variations'): items = list(q.items.all()) vars = list(q.variations.all()) q.pk = None q.event = self q.save() for i in items: if i.pk in item_map: q.items.add(item_map[i.pk]) for v in vars: q.variations.add(variation_map[v.pk]) for q in Question.objects.filter(event=other).prefetch_related( 'items', 'options'): items = list(q.items.all()) opts = list(q.options.all()) q.pk = None q.event = self q.save() for i in items: q.items.add(item_map[i.pk]) for o in opts: o.pk = None o.question = q o.save() for s in other.settings._objects.all(): s.object = self s.pk = None if s.value.startswith('file://'): fi = default_storage.open(s.value[7:], 'rb') nonce = get_random_string(length=8) fname = '%s/%s/%s.%s.%s' % (self.organizer.slug, self.slug, s.key, nonce, s.value.split('.')[-1]) newname = default_storage.save(fname, fi) s.value = 'file://' + newname s.save() elif s.key == 'tax_rate_default': try: if int(s.value) in tax_map: s.value = tax_map.get(int(s.value)).pk s.save() else: s.delete() except ValueError: s.delete() else: s.save() event_copy_data.send(sender=self, other=other)
def SaveFile(request): file=request.FILES['uploadedFile'] file_name=default_storage.save(file.name,file) return JsonResponse(file_name,safe=False)
def handle_config(request): """ List all code snippets, or create a new snippet. """ if request.method == 'POST': action = request.POST.get("_action") if validate_input(action): # valid containers, and running containers all_docker_ids = subprocess.check_output( common.CMD_ALL_VALID_DOCKER_ID, shell=True, timeout=10).decode("utf-8").strip().split('\n') running_docker_ids = subprocess.check_output( common.CMD_RUNNING_DOCKER_ID, shell=True, timeout=10).decode("utf-8").strip().split('\n') if action == 'getSchema': return JsonResponse( { "version": version, "build_id": build_id, "platform": platform, "wifi_support": wifi_support }, safe=False) elif action == 'getIfConfigured': configured = get_ifconfigured() # queryset = BoxDetails.objects.all() # serializer = BoxDetailsSerializer(queryset, many=True) return JsonResponse({"configState": configured}, safe=False) elif action == 'getAllAPs': wifi_aps = get_allAPs() return JsonResponse(wifi_aps, safe=False) elif action == 'saveUserDetails': print(action) boxname = request.POST.get("boxname") username = request.POST.get("username") password = request.POST.get("password") if validate_input(boxname) and validate_input( username) and not get_ifconfigured(): # locking the default root:titania subprocess.Popen(['usermod', '--lock', 'root']).wait() # set boxname and user set_boxname(boxname) add_user(username, password) # add wifi conn if wifi_support: wifi_pass = request.POST.get("wifi_password") wifi_name = request.POST.get("wifi_ap") wifi_encrpt = request.POST.get("wifi_encrpt") if len(wifi_name) > 0: add_newWifiConn(wifi_name, wifi_encrpt, wifi_pass) return JsonResponse({"STATUS": "SUCCESS"}, safe=False) elif action == 'login': print(action) username = request.POST.get("username") password = request.POST.get("password") if validate_input(username): output = '' """Tries to authenticate a user. Returns True if the authentication succeeds, else the reason (string) is returned.""" try: enc_pwd = spwd.getspnam(username)[1] if enc_pwd in ["NP", "!", "", None]: output = "User '%s' has no password set" % username if enc_pwd in ["LK", "*"]: output = "account is locked" if enc_pwd == "!!": output = "password has expired" # Encryption happens here, the hash is stripped from the # enc_pwd and the algorithm id and salt are used to encrypt # the password. if crypt.crypt(password, enc_pwd) == enc_pwd: output = '' else: output = "login failed" except KeyError: output = "login failed" if len(output) == 0: # insert session code here if not request.session.exists( request.session.session_key): request.session.create() print(request.session.session_key) print(get_client_ip(request)) setSessionRow = SessionDetails( session_key=request.session.session_key, username=username, client_ip=get_client_ip(request)) setSessionRow.save() return JsonResponse( { "username": username, "session_key": request.session.session_key }, safe=False) else: return JsonResponse(output, safe=False) if validate_session(request): if action == 'logout': print(action) username = request.POST.get("username") if validate_input(username): # delete loop for session id deleteSessionRows = SessionDetails.objects.filter( username=username).delete() deleteSessionRows.save() return JsonResponse( { "STATUS": "SUCCESS", "username": username }, safe=False) elif action == 'getDashboardCards': print(action) con = sqlite3.connect(dashboard_db) cursor = con.cursor() cursor.execute(common.Q_DASHBOARD_CARDS) rows = cursor.fetchall() return JsonResponse(rows, safe=False) elif action == 'getDashboardChart': print(action) con = sqlite3.connect(dashboard_db) cursor = con.cursor() # cursor.execute(common.Q_GET_CONTAINER_ID) # rows = cursor.fetchall() p = subprocess.check_output(common.CMD_DOCKER_MASTER, shell=True, timeout=10) p = p.decode("utf-8") p = p.split('\n') lenofoutput = len(p) finalset = [] for row in range(lenofoutput - 1): y = p[row].split('\t') cursor.execute(common.Q_GET_DASHBOARD_CHART, [ y[0], ]) datasets = cursor.fetchall() # print(datasets) data = {'container_name': y[1], 'data': datasets} finalset.append(data) return JsonResponse(finalset, safe=False) elif action == 'getDockerOverview': print(action) con = sqlite3.connect(dashboard_db) cursor = con.cursor() cursor.execute(common.Q_GET_DOCKER_OVERVIEW) rows = cursor.fetchall() # print(rows) finalset = [] for row in rows: if row[1] in all_docker_ids: data = { 'state': row[0], 'container_id': row[1], 'name': row[2], 'image': row[3], 'running_for': row[4], 'command': row[5], 'ports': row[6], 'status': row[7], 'networks': row[8] } finalset.append(data) return JsonResponse(finalset, safe=False) elif action == 'getContainerStats': print(action) con = sqlite3.connect(dashboard_db) cursor = con.cursor() # cursor.execute(common.Q_GET_CONTAINER_ID) # rows = cursor.fetchall() # print(rows) finalset = [] datasets_io = [] datasets_mem = [] datasets_perc = [] p = subprocess.check_output(common.CMD_DOCKER_MASTER, shell=True, timeout=10) p = p.decode("utf-8") p = p.split('\n') lenofoutput = len(p) finalset = [] for row in range(lenofoutput - 1): y = p[row].split('\t') datasets_io = [] datasets_mem = [] datasets_perc = [] # values with % appended to them for iter in range(0, 2): cursor.execute(common.Q_GET_CONTAINER_STATS_CPU, [y[0], iter + 1]) counter_val = cursor.fetchall() datasets_perc.append(counter_val) # values w/o % appended to them for iter in range(2, 4): cursor.execute(common.Q_GET_CONTAINER_STATS, [y[0], iter + 1]) counter_val = cursor.fetchall() datasets_mem.append(counter_val) # values w/o % appended to them for iter in range(4, 8): cursor.execute(common.Q_GET_CONTAINER_STATS, [y[0], iter + 1]) counter_val = cursor.fetchall() datasets_io.append(counter_val) data = { 'container_id': y[0], 'container_name': y[1], 'data_io': datasets_io, 'data_mem': datasets_mem, 'data_perc': datasets_perc } finalset.append(data) return JsonResponse(finalset, safe=False) elif action == 'getThreads': rows = [] ps = subprocess.Popen( ['top', '-b', '-n', '1'], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode().split('\n') # this specifies the number of splits, so the splitted lines # will have (nfields+1) elements nfields = len(processes[0].split()) - 1 for row in processes[4:]: rows.append(row.split(None, nfields)) return JsonResponse(rows, safe=False) elif action == 'getContainerTop': print(action) resultset = [] for i in running_docker_ids: data = {} datasets = [] ps = subprocess.Popen( ['docker', 'top', i], stdout=subprocess.PIPE).communicate()[0] processes = ps.decode().split('\n') # this specifies the number of splits, so the splitted lines # will have (nfields+1) elements nfields = len(processes[0].split()) - 1 for p in processes[1:]: datasets.append(p.split(None, nfields)) data = { 'container_id': i[0], 'container_name': i[1], 'data': datasets } resultset.append(data) return JsonResponse(resultset, safe=False) elif action == 'getSettings': print(action) ps = subprocess.Popen( ['grep', '/etc/group', '-e', 'docker'], stdout=subprocess.PIPE).communicate()[0].decode( 'utf-8').split('\n')[0] # sample ps # docker:x:992:pooja,asdasd,aaa,cow,dsds,priya,asdas,cowwwwww,ramm,asdasdasdasd,asdasdas,adam,run userlist = ps.split(':')[3].split(',') configuredwifi = get_allconfiguredwifi() wifi_aps = get_allAPs() return JsonResponse([{ 'users': userlist, 'wifi': configuredwifi, 'allwifiaps': wifi_aps }], safe=False) elif action == 'deleteUser': print(action) username = request.POST.get("username") if validate_input(username): ps = subprocess.Popen( ['userdel', username], stdout=subprocess.PIPE).communicate() fetchusers = subprocess.Popen( ['grep', '/etc/group', '-e', 'docker'], stdout=subprocess.PIPE).communicate()[0].decode( 'utf-8').split('\n')[0] # sample ps # docker:x:992:pooja,asdasd,aaa,cow,dsds,priya,asdas,cowwwwww,ramm,asdasdasdasd,asdasdas,adam,run userlist = fetchusers.split(':')[3].split(',') configuredwifi = get_allconfiguredwifi() wifi_aps = get_allAPs() return JsonResponse([{ 'users': userlist, 'wifi': configuredwifi, 'allwifiaps': wifi_aps, 'reqtype': 'deleteuser', 'endpoint': username }], safe=False) elif action == 'addNewUser': print(action) username = request.POST.get("username") password = request.POST.get("password") if validate_input(username): add_user(username, password) fetchusers = subprocess.Popen( ['grep', '/etc/group', '-e', 'docker'], stdout=subprocess.PIPE).communicate()[0].decode( 'utf-8').split('\n')[0] # sample ps # docker:x:992:pooja,asdasd,aaa,cow,dsds,priya,asdas,cowwwwww,ramm,asdasdasdasd,asdasdas,adam,run userlist = fetchusers.split(':')[3].split(',') configuredwifi = get_allconfiguredwifi() wifi_aps = get_allAPs() return JsonResponse([{ 'users': userlist, 'wifi': configuredwifi, 'allwifiaps': wifi_aps, 'reqtype': 'adduser', 'endpoint': username }], safe=False) elif action == 'addWifi': # connect to wifi ap user selected wifi_pass = request.POST.get("wifi_password") wifi_name = request.POST.get("wifi_ap") wifi_encrpt = request.POST.get("wifi_encrpt") if len(wifi_name) > 0: add_newWifiConn(wifi_name, wifi_encrpt, wifi_pass) fetchusers = '' fetchusers = subprocess.Popen( ['grep', '/etc/group', '-e', 'docker'], stdout=subprocess.PIPE).communicate()[0].decode( 'utf-8').split('\n')[0] # sample ps # docker:x:992:pooja,asdasd,aaa,cow,dsds,priya,asdas,cowwwwww,ramm,asdasdasdasd,asdasdas,adam,run userlist = fetchusers.split(':')[3].split(',') configuredwifi = get_allconfiguredwifi() print(configuredwifi) wifi_aps = get_allAPs() print(wifi_aps) return JsonResponse([{ 'users': userlist, 'wifi': configuredwifi, 'allwifiaps': wifi_aps, 'reqtype': 'addwifi', 'endpoint': wifi_name }], safe=False) elif action == 'deleteWifi': print(action) # connect to wifi ap user selected wifi_name = request.POST.get("wifi_ap") delete_WifiConn(wifi_name) fetchusers = subprocess.Popen( ['grep', '/etc/group', '-e', 'docker'], stdout=subprocess.PIPE).communicate()[0].decode( 'utf-8').split('\n')[0] # sample ps # docker:x:992:pooja,asdasd,aaa,cow,dsds,priya,asdas,cowwwwww,ramm,asdasdasdasd,asdasdas,adam,run userlist = fetchusers.split(':')[3].split(',') configuredwifi = get_allconfiguredwifi() wifi_aps = get_allAPs() return JsonResponse([{ 'users': userlist, 'wifi': configuredwifi, 'allwifiaps': wifi_aps, 'reqtype': 'deletewifi', 'endpoint': wifi_name }], safe=False) elif action == 'editWifi': print(action) # connect to wifi ap user selected wifi_name = request.POST.get("wifi_ap") wifi_pass = request.POST.get("wifi_password") if validate_input(wifi_name) and validate_input(wifi_pass): edit_WifiConn(wifi_name, wifi_pass) fetchusers = subprocess.Popen( ['grep', '/etc/group', '-e', 'docker'], stdout=subprocess.PIPE).communicate()[0].decode( 'utf-8').split('\n')[0] # sample ps # docker:x:992:pooja,asdasd,aaa,cow,dsds,priya,asdas,cowwwwww,ramm,asdasdasdasd,asdasdas,adam,run userlist = fetchusers.split(':')[3].split(',') configuredwifi = get_allconfiguredwifi() wifi_aps = get_allAPs() return JsonResponse([{ 'users': userlist, 'wifi': configuredwifi, 'allwifiaps': wifi_aps, 'reqtype': 'editwifi', 'endpoint': wifi_name }], safe=False) elif action == 'fetchAlldApps': print(action) dapps_list = get_dappsdetails() return JsonResponse( { 'STATUS': 'SUCCESS', 'dapps_store': dapps_list }, safe=False) elif action == 'fetchUpdatableDapps': update_list = get_containerswithavailableupdate() print(update_list) return JsonResponse( { 'STATUS': 'SUCCESS', 'update_list': update_list }, safe=False) elif action == 'disableDapp': print(action) dappid = request.POST.get("id") service = common.SERVICE_DISABLE.format(dappid) os.system(service) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'enableDapp': print(action) dappid = request.POST.get("id") service = common.SERVICE_ENABLE.format(dappid) os.system(service) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'restartDapp': print(action) dappid = request.POST.get("id") service = common.SERVICE_RESTART.format(dappid) os.system(service) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'removeDapp': print(action) # docker rm world.libertaria.nginx # docker rmi libertaria/nginx:armv7 dappid = request.POST.get("id") image = request.POST.get("image") service = common.DOCKER_RM_DAPP.format(dappid, image) print(service) os.system(service) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'downloadDapp': print(action) # docker pull <image> # image = request.POST.get("image") dappid = request.POST.get("id") service = common.DAPP_DOWNLOAD.format(dappid) print(service) ps = subprocess.Popen( service, shell=True, stdout=subprocess.PIPE).communicate()[0] print(ps) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'updateDapp': print(action) dappid = request.POST.get("id") service = common.SERVICE_UPDATE.format(dappid) print(service) ps = subprocess.Popen( service, shell=True, stdout=subprocess.PIPE).communicate()[0] print(ps) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'updateOSImage': print(action) data = request.FILES['file'] print(data) if data: # delete existing files before downloading new swu file rm_file_regex = settings.MEDIA_ROOT + common.SWU_FILE_FORMAT for filename in glob.glob(rm_file_regex): try: os.remove(filename) except OSError as e: ## if failed, report it back print(e) # save file from persistent store to /tmp path = default_storage.save(data.name, ContentFile(data.read())) tmp_file = os.path.join(settings.MEDIA_ROOT, path) # update call file_path = settings.MEDIA_ROOT + data.name # systemctl start swupdate@$(systemd-escape -p /tmp/titania-arm-rpi-v0.0-152-g3668500.swu).service update_cmd = 'systemctl start swupdate@$(systemd-escape -p {}).service'.format( file_path) print(update_cmd) os.system(update_cmd) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) elif action == 'getUpdateStatus': print(action) image_name = request.POST.get("image_name") file_path = settings.MEDIA_ROOT + image_name print(action) update_service = 'swupdate@$(systemd-escape -p {}).service'.format( file_path) status, data = get_updatestatus(update_service) print(status) print(data) # systemctl start swupdate@$(systemd-escape -p /tmp/titania-arm-rpi-v0.0-152-g3668500.swu).service return JsonResponse({ 'STATUS': status, 'data': data }, safe=False) elif action == 'getNatpmpStatus': print(action) natpmp_status = check_ifnatpmpenabled() return JsonResponse({'STATUS': natpmp_status}, safe=False) elif action == 'rebootSystem': print(action) os.system( 'systemd-run --on-active=1 --timer-property=AccuracySec=100ms /sbin/shutdown -r now' ) return JsonResponse({'STATUS': 'SUCCESS'}, safe=False) return JsonResponse({'STATUS': 'FAILURE'}, safe=False) elif action == 'getUpdateStatus' or action == 'getNatpmpStatus': # TO DO, come up with bettr soln to handle this call output return JsonResponse({'STATUS': 'FAILURE'}, safe=False) else: return JsonResponse({'STATUS': 'REDIRECT'}, status=302) return JsonResponse({'STATUS': 'FAILURE'}, safe=False)
def adlinks(request): link_dict = request.user.company.linkdefault.ad_link_dict # fetch link defaults from DB ad_templates = request.user.company.templates.all() ad_link_base_url = request.user.company.linkdefault.ad_base_url # empty dictionaries '{}' are treated as strings if isinstance(link_dict, str): link_dict = dict() if ad_link_base_url: link_dict['base_url'] = ad_link_base_url if request.method == 'POST' and len( request.FILES) > 0 and request.FILES['uploaded_file']: file = request.FILES['uploaded_file'] urls = None # to hold urls to output # handle extra inputs post_dict = request.POST.dict() pairs = get_key_value_pairs_from_html(post_dict) export_to_file = bool(request.POST.get('fileExport', False)) template_id = request.POST.get( 'template_name', None) # check if a template is to be used if file.name.endswith('.csv'): path = default_storage.save( os.path.join('tmp', file.name), ContentFile(file.read())) # store a file temporarily urls, outfile_name = process_csv(request, path, template_id, pairs, export_to_file) if outfile_name: response = download_file(outfile_name) return response return render( request, 'index.html', { 'urls': urls, 'uploaded_file_url': True, 'user': request.user, 'ad_templates': ad_templates, 'link_dict_items': link_dict.items() }) elif request.method == 'POST': post_dict = request.POST.dict() pairs = get_key_value_pairs_from_html(post_dict) template_id = request.POST.get( 'template_name', None) # check if a template is to be used url = process_kv_only(pairs, template_id, request) return render( request, 'index.html', { 'urls': [url], 'uploaded_file_url': True, 'user': request.user, 'ad_templates': ad_templates, 'link_dict_items': link_dict.items() }) return render( request, 'index.html', { 'user': request.user, 'ad_templates': ad_templates, 'link_dict_items': link_dict.items() })
def generate_token(self, instance, invitation_url): # token imports from PIL import Image, ImageFont, ImageDraw from django.core.files.temp import NamedTemporaryFile from django.core.files import File import urllib2 from urlparse import urlparse, urlunparse _, root_url = get_site() def stamp(image, text, offset): f = ImageFont.load_default() txt_img = Image.new('RGBA', f.getsize(text)) d = ImageDraw.Draw(txt_img) d.text((0, 0), text, font=f, fill="#888") exp_img_r = txt_img.rotate(0, expand=1) iw, ih = image.size tw, th = txt_img.size x = iw / 2 - tw / 2 y = ih / 2 - th / 2 image.paste(exp_img_r, (x, y + offset), exp_img_r) return offset + th # normalize sataic url r_parse = urlparse(root_url, 'http') s_parse = urlparse(settings.STATIC_URL, 'http') s_parts = (s_parse.scheme, s_parse.netloc or r_parse.netloc, s_parse.path, s_parse.params, s_parse.query, s_parse.fragment) static_url = urlunparse(s_parts) # open base token image img_url = static_url + 'notification/img/token-invite.png' temp_img = NamedTemporaryFile() temp_img.write(urllib2.urlopen(img_url).read()) temp_img.flush() image = Image.open(temp_img.name) # stamp expiration date delta = datetime.timedelta(days=settings.ACCOUNT_INVITATION_DAYS) expiration_date = instance.date_invited + delta exp_text = expiration_date.strftime("%x") stamp(image, exp_text, 18) # stamp recipient name if instance.recipient[1]: offset = stamp(image, instance.recipient[1], -16) if instance.recipient[2]: offset = stamp(image, instance.recipient[2], offset) image.save(temp_img.name, "PNG", quality=95) if not default_storage.exists('tokens/%s.png' % instance.key): default_storage.save('tokens/%s.png' % instance.key, File(temp_img)) get_token_url = root_url + reverse('invitation_token', kwargs={'key': instance.key}) token_html = ''.join(['<a style="display: inline-block;" href="', invitation_url, '"><img width="100" height="100" class="token"', ' src="', get_token_url, '" alt="invitation token"></a>']) return token_html
def save_tmp_file(myfile): name = myfile.name path = default_storage.save("tmp/" + name, ContentFile(myfile.read())) tmp_file = str(os.path.join(settings.MEDIA_ROOT, path)) return tmp_file
def _save_file(request, uploaded_file): filename = get_upload_filename(uploaded_file.name, request.user) saved_path = default_storage.save(filename, uploaded_file) return saved_path
def process_export( group_id, export_target='all', identifier=u'', user_id=0): """ Process export for group members and/or group subscribers. """ [group] = Group.objects.filter(id=group_id)[:1] or [None] if not group: return # pull 100 rows per query # be careful of the memory usage rows_per_batch = 100 identifier = identifier or str(time.time()) file_dir = 'export/groups/' file_path_temp = '%sgroup_%d_%s_%s_temp.csv' % (file_dir, group.id, export_target, identifier) # labels user_fields = ['id', 'first_name', 'last_name', 'email', 'username', 'is_active', 'is_staff', 'is_superuser'] profile_fields = ['direct_mail', 'company', 'department', 'position_title', 'address', 'address2', 'city', 'state', 'zipcode', 'country', 'region', 'phone', 'notes', 'referral_source', 'create_dt'] labels = user_fields + profile_fields field_dict = OrderedDict([(label.lower().replace(" ", "_"), '' ) for label in labels]) with default_storage.open(file_path_temp, 'w') as csvfile: csv_writer = csv.DictWriter(csvfile, fieldnames=list(field_dict.keys())) csv_writer.writeheader() # process regular group members count_members = group.members.filter( group_member__status=True, group_member__status_detail='active').count() num_rows_processed = 0 while num_rows_processed < count_members: users = group.members.filter( group_member__status=True, group_member__status_detail='active' ).select_related('profile' ).order_by('group_member__member_id')[num_rows_processed:(num_rows_processed + rows_per_batch)] num_rows_processed += rows_per_batch row_dict = field_dict.copy() for user in users: if hasattr(user, 'profile'): profile = user.profile else: profile = Profile.objects.create_profile(user) for field_name in user_fields: if hasattr(user, field_name): row_dict[field_name] = getattr(user, field_name) for field_name in profile_fields: if hasattr(profile, field_name): row_dict[field_name] = getattr(profile, field_name) for k, v in row_dict.items(): if not isinstance(v, str): if isinstance(v, datetime): row_dict[k] = v.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(v, date): row_dict[k] = v.strftime('%Y-%m-%d') else: row_dict[k] = smart_str(v) csv_writer.writerow(row_dict) # rename the file name file_path = '%sgroup_%d_%s_%s.csv' % (file_dir, group.id, export_target, identifier) default_storage.save(file_path, default_storage.open(file_path_temp, 'rb')) # delete the temp file default_storage.delete(file_path_temp) # notify user that export is ready to download [user] = User.objects.filter(id=user_id)[:1] or [None] if user and user.email: download_url = reverse('group.members_export_download', args=[group.slug, export_target, identifier]) site_url = get_setting('site', 'global', 'siteurl') site_display_name = get_setting('site', 'global', 'sitedisplayname') parms = { 'group': group, 'download_url': download_url, 'user': user, 'site_url': site_url, 'site_display_name': site_display_name} subject = render_to_string( template_name='user_groups/exports/export_ready_subject.html', context=parms) subject = subject.strip('\n').strip('\r') body = render_to_string( template_name='user_groups/exports/export_ready_body.html', context=parms) email = Email( recipient=user.email, subject=subject, body=body) email.send()
def copy_data_from(self, other): from . import ItemCategory, Item, Question, Quota self.plugins = other.plugins self.save() category_map = {} for c in ItemCategory.objects.filter(event=other): category_map[c.pk] = c c.pk = None c.event = self c.save() item_map = {} variation_map = {} for i in Item.objects.filter(event=other).prefetch_related('variations'): vars = list(i.variations.all()) item_map[i.pk] = i i.pk = None i.event = self if i.picture: i.picture.save(i.picture.name, i.picture) if i.category_id: i.category = category_map[i.category_id] i.save() for v in vars: variation_map[v.pk] = v v.pk = None v.item = i v.save() for q in Quota.objects.filter(event=other).prefetch_related('items', 'variations'): items = list(q.items.all()) vars = list(q.variations.all()) q.pk = None q.event = self q.save() for i in items: q.items.add(item_map[i.pk]) for v in vars: q.variations.add(variation_map[v.pk]) for q in Question.objects.filter(event=other).prefetch_related('items', 'options'): items = list(q.items.all()) opts = list(q.options.all()) q.pk = None q.event = self q.save() for i in items: q.items.add(item_map[i.pk]) for o in opts: o.pk = None o.question = q o.save() for s in EventSetting.objects.filter(object=other): s.object = self s.pk = None if s.value.startswith('file://'): fi = default_storage.open(s.value[7:], 'rb') nonce = get_random_string(length=8) fname = '%s/%s/%s.%s.%s' % ( self.organizer.slug, self.slug, s.key, nonce, s.value.split('.')[-1] ) newname = default_storage.save(fname, fi) s.value = 'file://' + newname s.save()
def post(self, request, *args, **kwargs): # 判断本地是否已经存在 def is_local_file(local_file_dir, get_file_name): for root, dirs, files in os.walk(local_file_dir): # print('root_dir:', root) # 当前目录路径 # print('sub_dirs:', dirs) # 当前路径下所有子目录 print('files:', files) # 当前路径下所有非目录子文件 local_files = files for item in local_files: print('本地文件判断:', get_file_name, item) if get_file_name == item: return True return False # 返回的数据 ret = {'code1': 1000, 'msg': None} # 获取提交的data中的数据 name = request.POST.get('name') number = request.POST.get('number') file_name = request.POST.get('file_name') upload_time = request.POST.get('upload_time') # 获取文件 homework = request.FILES.get('file') print('姓名:{},学号:{}\n作业:{} 时间:{}'.format(name, number, homework.name, upload_time)) # 获取本地目录下的文件名 local_file_dir = os.path.join(settings.MEDIA_ROOT, upload_time.split(' ')[0]) obj = models.Homework.objects.filter(name=name, number=number).first() # 判断,并赋返回值 print('提交在20点之前:', upload_time, int(upload_time.split(' ')[-1].split(':')[0])) if int(upload_time.split(' ')[-1].split(':')[0]) < 20: if obj: if is_local_file(local_file_dir, file_name): ret['code1'] = 4000 ret['msg'] = "请勿重复上传" else: ret['code1'] = 2000 ret['msg'] = "上传成功" models.Homework.objects.update_or_create(name=name, number=number, defaults={ 'is_upload': file_name, 'upload_time': upload_time }) # 保存文件 tmp_file = os.path.join(settings.MEDIA_ROOT, upload_time.split(' ')[0], homework.name) path = default_storage.save(tmp_file, ContentFile(homework.read())) print('tmp_file: {}'.format(path)) else: ret['code1'] = 8000 ret['msg'] = "用户不存在" else: ret['code1'] = 8000 ret['msg'] = "当前时间不能上传" return JsonResponse(ret)
def post(request): post_auth_key = request.POST.get('post_auth_key') docs_api_key = os.getenv('DOCS_API_KEY', '') es_index_prefix = settings.ES_INDEX_PREFIX root_path = request.POST.get('root_path', '').strip('/') root_path_segs = root_path.split('/') if (root_path is None or len(root_path_segs) != 2): return Response({'root_path': 'ROOT_PATH is required'}, status=400) obj, created = LibraryVersion.objects.get_or_create( name = root_path_segs[0], version = root_path_segs[1], defaults={'isActive': True} ) uploaded_file = request.FILES.get('file') if post_auth_key is None: return Response({'post_auth_key': 'required. Use DOCS_API_KEY env var'}, status=400) elif post_auth_key != docs_api_key: return Response({'post_auth_key': 'DOCS_API_KEY required'}, status=401) if uploaded_file and uploaded_file.name.endswith('.zip'): zipf = zipfile.ZipFile(uploaded_file) index_ext_types = [ '.json', ] index_dirs = [ 'docs', ] for zipped_file in zipf.namelist(): path = os.path.join(*( 'docs', root_path, zipped_file.lower())) content = ContentFile(zipf.read(zipped_file)) try: read_contents_bytes = content.read() read_contents_str = read_contents_bytes.decode('utf-8') except Exception as e: print("exception: " + str(e)) # Store file in AWS default_storage.save(path, content) # Continue parsing files to index in ES # Only index 'files'; based on whether # they have an extension, not a dir if "." in zipped_file: f, ext = os.path.splitext(zipped_file) indexer = DocsIndexer( ES_HOST_URL, ES_PORT, 'docs', ES_INDEX_PREFIX) doc = {} path_split = path.split('/') dir_root = path_split[3] doc_slug = f.split('/', 1)[-1] if ext in index_ext_types and dir_root in index_dirs: try: content_obj = json.loads(read_contents_str) except Exception as e: print("error: " + str(e)) try: # @NOTE: if you add any data to be indexed here # you also need to add it in `src/app/index_s3.py` doc['content'] = strip_tags(content_obj['body']) if 'api' in content_obj: doc['api'] = strip_tags(content_obj['api']) doc['title'] = content_obj['title'] doc['library'] = path_split[1] doc['version'] = path_split[2] doc['slug'] = doc_slug doc['path'] = path except Exception as err: print("Exception... {} on {}".format(err, path)) continue indexer.index_doc(doc) else: return Response( {'file': 'ZIP file not found'}, status=status.HTTP_400_BAD_REQUEST) return Response()
def details(request, id, size=None, crop=False, quality=90, download=False, constrain=False, template_name="files/details.html"): """ Return an image response after paramters have been applied. """ file = get_object_or_404(File, pk=id) cache_key = generate_image_cache_key(file=id, size=size, pre_key=FILE_IMAGE_PRE_KEY, crop=crop, unique_key=id, quality=quality, constrain=constrain) cached_image = cache.get(cache_key) if cached_image: if file.type() != 'image': # log an event EventLog.objects.log(instance=file) return redirect( '%s%s' % (get_setting('site', 'global', 'siteurl'), cached_image)) # basic permissions if not has_view_perm(request.user, 'files.view_file', file): raise Http403 # extra permission if not file.is_public: if not request.user.is_authenticated: raise Http403 # if string and digit convert to integer if isinstance(quality, str) and quality.isdigit(): quality = int(quality) # get image binary try: data = file.file.read() file.file.close() except IOError: # no such file or directory raise Http404 if download: # log download attachment = u'attachment;' EventLog.objects.log( **{ 'event_id': 185000, 'event_data': '%s %s (%d) dowloaded by %s' % (file.type(), file._meta.object_name, file.pk, request.user), 'description': '%s downloaded' % file._meta.object_name, 'user': request.user, 'request': request, 'instance': file, }) else: # log view attachment = u'' if file.type() != 'image': EventLog.objects.log( **{ 'event_id': 186000, 'event_data': '%s %s (%d) viewed by %s' % (file.type(), file._meta.object_name, file.pk, request.user), 'description': '%s viewed' % file._meta.object_name, 'user': request.user, 'request': request, 'instance': file, }) # if image size specified if file.type() == 'image' and size: # if size specified if file.ext() in ('.tif', '.tiff'): raise Http404 # tifs cannot (currently) be viewed via browsers size = [int(s) if s.isdigit() else 0 for s in size.split('x')] size = aspect_ratio(file.image_dimensions(), size, constrain) # check for dimensions # greater than zero if not all(size): raise Http404 # gets resized image from cache or rebuilds image = get_image(file.file, size, FILE_IMAGE_PRE_KEY, cache=True, crop=crop, quality=quality, unique_key=None) response = HttpResponse(content_type=file.mime_type()) response['Content-Disposition'] = '%s filename="%s"' % ( attachment, file.get_name()) params = {'quality': quality} if image.format == 'GIF': params['transparency'] = 0 try: image.save(response, image.format, **params) except AttributeError: return response if file.is_public_file(): file_name = "%s%s" % (file.get_name(), ".jpg") file_path = 'cached%s%s' % (request.path, file_name) if not default_storage.exists(file_path): default_storage.save(file_path, ContentFile(response.content)) full_file_path = "%s%s" % (settings.MEDIA_URL, file_path) cache.set(cache_key, full_file_path) cache_group_key = "files_cache_set.%s" % file.pk cache_group_list = cache.get(cache_group_key) if cache_group_list is None: cache.set(cache_group_key, [cache_key]) else: cache_group_list += [cache_key] cache.set(cache_group_key, cache_group_list) return response if file.is_public_file(): cache.set(cache_key, file.get_file_public_url()) set_s3_file_permission(file.file, public=True) cache_group_key = "files_cache_set.%s" % file.pk cache_group_list = cache.get(cache_group_key) if cache_group_list is None: cache.set(cache_group_key, [cache_key]) else: cache_group_list += cache_key cache.set(cache_group_key, cache_group_list) # set mimetype if file.mime_type(): response = HttpResponse(data, content_type=file.mime_type()) else: raise Http404 # return response if file.get_name().endswith(file.ext()): response['Content-Disposition'] = '%s filename="%s"' % ( attachment, file.get_name()) else: response['Content-Disposition'] = '%s filename="%s"' % ( attachment, file.get_name_ext()) return response
def save(self): upload_file = self.cleaned_data['file'] file_name = default_storage.save(upload_file.name, upload_file) return default_storage.url(file_name)
def document_thumb_upload(request, docid, template='documents/document_thumb_upload.html'): document = None try: document = _resolve_document(request, docid, 'base.change_resourcebase', _PERMISSION_MSG_MODIFY) except Http404: return HttpResponse(loader.render_to_string('404.html', context={}, request=request), status=404) except PermissionDenied: return HttpResponse(loader.render_to_string( '401.html', context={ 'error_message': _("You are not allowed to edit this document.") }, request=request), status=403) if document is None: return HttpResponse('An unknown error has occured.', content_type="text/plain", status=401) site_url = settings.SITEURL.rstrip('/') if settings.SITEURL.startswith( 'http') else settings.SITEURL if request.method == 'GET': return render(request, template, context={ "resource": document, "docid": docid, 'SITEURL': site_url }) elif request.method == 'POST': status_code = 401 out = {'success': False} if docid and request.FILES: data = request.FILES.get('base_file') if data: filename = 'document-{}-thumb.png'.format(document.uuid) path = default_storage.save('tmp/' + filename, ContentFile(data.read())) f = os.path.join(settings.MEDIA_ROOT, path) try: image_path = f except BaseException: image_path = document.find_placeholder() thumbnail_content = None try: thumbnail_content = generate_thumbnail_content(image_path) except MissingPILError: logger.error( 'Pillow not installed, could not generate thumbnail.') if not thumbnail_content: logger.warning( "Thumbnail for document #{} empty.".format(docid)) document.save_thumbnail(filename, thumbnail_content) logger.debug( "Thumbnail for document #{} created.".format(docid)) status_code = 200 out['success'] = True out['resource'] = docid else: out['success'] = False out['errors'] = 'An unknown error has occured.' out['url'] = reverse('document_detail', args=[docid]) return HttpResponse(json.dumps(out), content_type='application/json', status=status_code)
def save(self, delete_zip_import=True, *args, **kwargs): """ If a zip file is uploaded, extract any images from it and add them to the gallery, before removing the zip file. """ super(BaseGallery, self).save(*args, **kwargs) if self.zip_import: zip_file = ZipFile(self.zip_import) for name in zip_file.namelist(): data = zip_file.read(name) try: from PIL import Image image = Image.open(BytesIO(data)) image.load() image = Image.open(BytesIO(data)) image.verify() except ImportError: pass except: continue name = os.path.split(name)[1] # In python3, name is a string. Convert it to bytes. if not isinstance(name, bytes): try: name = name.encode('cp437') except UnicodeEncodeError: # File name includes characters that aren't in cp437, # which isn't supported by most zip tooling. They will # not appear correctly. tempname = name # Decode byte-name. if isinstance(name, bytes): encoding = charsetdetect(name)['encoding'] tempname = name.decode(encoding) # A gallery with a slug of "/" tries to extract files # to / on disk; see os.path.join docs. slug = self.slug if self.slug != "/" else "" path = os.path.join(GALLERIES_UPLOAD_DIR, slug, tempname) try: saved_path = default_storage.save(path, ContentFile(data)) except UnicodeEncodeError: from warnings import warn warn("A file was saved that contains unicode " "characters in its path, but somehow the current " "locale does not support utf-8. You may need to set " "'LC_ALL' to a correct value, eg: 'en_US.UTF-8'.") # The native() call is needed here around str because # os.path.join() in Python 2.x (in posixpath.py) # mixes byte-strings with unicode strings without # explicit conversion, which raises a TypeError as it # would on Python 3. path = os.path.join(GALLERIES_UPLOAD_DIR, slug, native(str(name, errors="ignore"))) saved_path = default_storage.save(path, ContentFile(data)) self.images.create(file=saved_path) if delete_zip_import: zip_file.close() self.zip_import.delete(save=True)
def photo_size(request, id, size, crop=False, quality=90, download=False, constrain=False): """ Renders image and returns response Does not use template Saves resized image within cache system Returns 404 if if image rendering fails """ if isinstance(quality, unicode) and quality.isdigit(): quality = int(quality) cache_key = generate_image_cache_key(file=id, size=size, pre_key=PHOTO_PRE_KEY, crop=crop, unique_key=id, quality=quality, constrain=constrain) cached_image = cache.get(cache_key) if cached_image: return redirect(cached_image) photo = get_object_or_404(Image, id=id) size = [int(s) for s in size.split('x')] size = aspect_ratio(photo.image_dimensions(), size, constrain) # check permissions if not has_perm(request.user, 'photos.view_image', photo): raise Http403 attachment = '' if download: attachment = 'attachment;' if not photo.image or not default_storage.exists(photo.image.name): raise Http404 # gets resized image from cache or rebuild image = get_image(photo.image, size, PHOTO_PRE_KEY, crop=crop, quality=quality, unique_key=str(photo.pk), constrain=constrain) # if image not rendered; quit if not image: raise Http404 response = HttpResponse(mimetype='image/jpeg') response['Content-Disposition'] = '%s filename=%s' % ( attachment, photo.image.file.name) image.save(response, "JPEG", quality=quality) if photo.is_public_photo() and photo.is_public_photoset(): file_name = photo.image_filename() file_path = 'cached%s%s' % (request.path, file_name) default_storage.save(file_path, ContentFile(response.content)) full_file_path = "%s%s" % (settings.MEDIA_URL, file_path) cache.set(cache_key, full_file_path) cache_group_key = "photos_cache_set.%s" % photo.pk cache_group_list = cache.get(cache_group_key) if cache_group_list is None: cache.set(cache_group_key, [cache_key]) else: cache_group_list += [cache_key] cache.set(cache_group_key, cache_group_list) return response
def parse_submission(request): raw_text = request.read() data = deb822.Deb822(raw_text) raw_text_gpg_stripped = data.dump() ## Parse GPG info ######################################################### uid = None data.raw_text = raw_text gpg_info = data.get_gpg_info() for x in ('VALIDSIG', 'NO_PUBKEY'): try: uid = gpg_info[x][0] break except (KeyError, IndexError): pass if uid is None: raise InvalidSubmission("Could not determine GPG uid") ## Check whether .buildinfo already exists ################################ def create_submission(buildinfo): submission = buildinfo.submissions.create( key=Key.objects.get_or_create(uid=uid)[0], ) default_storage.save( submission.get_storage_name(), ContentFile(raw_text), ) return submission ## Parse new .buildinfo ################################################### def get_or_create(model, field): try: return model.objects.get_or_create(name=data[field])[0] except KeyError: raise InvalidSubmission("Missing required field: {}".format(field)) if data.get('Format') not in SUPPORTED_FORMATS: raise InvalidSubmission( "Only {} 'Format:' versions are supported".format( ', '.join(sorted(SUPPORTED_FORMATS)), )) sha1 = hashlib.sha1(raw_text_gpg_stripped.encode('utf-8')).hexdigest() try: with transaction.atomic(): buildinfo = Buildinfo.objects.create( sha1=sha1, source=get_or_create(Source, 'Source'), architecture=get_or_create(Architecture, 'Architecture'), version=data['version'], build_path=data.get('Build-Path', ''), build_date=parse(data.get('Build-Date', '')), build_origin=get_or_create(Origin, 'Build-Origin'), build_architecture=get_or_create(Architecture, 'Build-Architecture'), environment=data.get('Environment', ''), ) except IntegrityError: # Already exists; just attach a new Submission instance return create_submission(Buildinfo.objects.get(sha1=sha1)), False default_storage.save( buildinfo.get_storage_name(), ContentFile(raw_text_gpg_stripped), ) ## Parse binaries ######################################################### try: binary_names = set(data['Binary'].split(' ')) except KeyError: raise InvalidSubmission("Missing 'Binary' field") if not binary_names: raise InvalidSubmission("Invalid 'Binary' field") binaries = {} for x in binary_names: # Save instances for lookup later binaries[x] = buildinfo.binaries.create( binary=Binary.objects.get_or_create(name=x)[0], ) ## Parse checksums ######################################################## hashes = ('Md5', 'Sha1', 'Sha256') checksums = {} for x in hashes: for y in data['Checksums-%s' % x].strip().splitlines(): checksum, size, filename = y.strip().split() # Check size try: size = int(size) if size < 0: raise ValueError() except ValueError: raise InvalidSubmission( "Invalid size for {}: {}".format(filename, size), ) checksums.setdefault(filename, { 'size': size, 'binary': None, })['checksum_{}'.format(x.lower())] = checksum existing = checksums[filename]['size'] if size != existing: raise InvalidSubmission("Mismatched file size in " "Checksums-{}: {} != {}".format( x, existing, size)) ## Create Checksum instances ############################################## for k, v in sorted(checksums.items()): # Match with Binary instances if possible m = re_binary.match(k) if m is not None: v['binary'] = binaries.get(m.group('name')) buildinfo.checksums.create(filename=k, **v) ## Validate Installed-Build-Depends ####################################### for x in data['Installed-Build-Depends'].strip().splitlines(): m = re_installed_build_depends.match(x.strip()) if m is None: raise InvalidSubmission( "Invalid entry in Installed-Build-Depends: {}".format(x), ) return create_submission(buildinfo), True
def store_in_file(input): filename = str(uuid.uuid4()) filepath = settings.BASE_DIR+"/files/tmp/"+filename+".satin" default_storage.save(filepath, ContentFile(input)) return filename, filepath
def create(self, validated_data): image_file = validated_data["image"] path = generate_unique_upload_filename(IMAGES_UPLOAD_DIRECTORY, 'jpg') default_storage.save(os.path.join(settings.MEDIA_ROOT, path), image_file) return path
def add_lecture(request, document_id): if request.method == 'POST': form = DocumentForm(request.POST, request.FILES) #get file extension extension = os.path.splitext( request.FILES['docfile'].name)[1][0:].lower() # extention original_name = os.path.splitext( request.FILES['docfile'].name)[0][0:] # name without extention # Check extension. Must be .zip if extension == '.zip': # manualy save file f = request.FILES['docfile'] path_to_file = settings.MEDIA_ROOT + "temp/" + str(f) temp_directory_path = settings.MEDIA_ROOT + "temp/" path = default_storage.save("temp/" + str(f), ContentFile(f.read())) # Unzip unzip(path_to_file, temp_directory_path) # Check number of items uploaded not_files = ([ name for name in os.listdir(path_to_file[0:-4] + "/") if os.path.isdir(path_to_file[0:-4] + "/" + name) ]) if not not_files: # rename doc = Presentation.objects.get(pk=document_id) destination = settings.MEDIA_ROOT + str(doc.docfile) files = [x for x in os.listdir(destination) if not '.' in x] files.sort() name = 'z' + files[-1] old_name = path_to_file[0:-4] # without .zip new_name = temp_directory_path + name os.rename(old_name, new_name) #move directory shutil.move(new_name, destination) # remove temporary directory shutil.rmtree(temp_directory_path) # update json try: json = generate_json(doc.docfile.name, doc.title) doc.json = json doc.save() # No errors error = '' except Exception as e: print(e) # Display error + Delete files error = 'Import error.' path = destination + '/' + name shutil.rmtree(path) else: error = 'Import error. Seems that you are uploading several lectures. Check your zip file, it must contain a single folder with slides and details.csv.' else: error = "Wrong file format. You must use .zip only." else: form = DocumentForm() # Empty error = "" # Empty # Render list page with the documents and the form and error if needed documents = Presentation.objects.all() return render(request, 'upload.html', { 'documents': documents, 'form': form, 'error': error })