def test(request): video_filename = MEDIA_ROOT + "/" + "121212" + request.FILES["track[video]"].name file_content = ContentFile(request.FILES["track[video]"].read()) with open(video_filename, "wb") as fp: for chunk in file_content.chunks(): fp.write(chunk) return HttpResponse()
def docente_edit(req, docente_id): docente = get_object_or_404(CursoDocente, id=docente_id) if req.method == "POST": nombre = req.POST.get("nombre") twitter = req.POST.get("twitter") perfil = req.POST.get("perfil") imagen = req.POST.get("imagen") imagen_n = req.POST.get("imagen_filename") if nombre and twitter and perfil: docente.nombre = nombre docente.twitter = twitter docente.perfil = perfil if imagen and imagen_n: # carga de imagen uploaded_file = ContentFile(base64.b64decode(imagen.split(",")[1])) uploaded_file.name = imagen_n docente.imagen = uploaded_file docente.save() return HttpResponse(jstatus_ok(serialize_docente(docente))) else: return HttpResponse(jstatus_err()) return render_to_response("edmin/docente/admin.html", {"docente": docente})
def test_post_with_collections(self): root_collection = Collection.get_first_root_node() evil_plans_collection = root_collection.add_child(name="Evil plans") # Build a fake file fake_file = ContentFile(b("A boring example document")) fake_file.name = 'test.txt' # Submit post_data = { 'title': "Test document", 'file': fake_file, 'collection': evil_plans_collection.id, } response = self.client.post(reverse('wagtaildocs:add'), post_data) # User should be redirected back to the index self.assertRedirects(response, reverse('wagtaildocs:index')) # Document should be created, and be placed in the Evil Plans collection self.assertTrue(models.Document.objects.filter(title="Test document").exists()) root_collection = Collection.get_first_root_node() self.assertEqual( models.Document.objects.get(title="Test document").collection, evil_plans_collection )
def createSVGView(request, filename): """ This view receives the svg information from the workspace and saves the file """ if request.is_ajax(): filenameRegex = re.search(r'(?P<filename>[a-zA-Z]+[\d\.]*)\.(?P<extension>[a-zA-Z]{1,4}$)', filename) cleanFileName = filenameRegex.group('filename') cleanFileExtension = filenameRegex.group('extension') newFile = ContentFile(cleanFileName+'.svg', 'w') newFile.name = cleanFileName+'.svg' fileContent = base64.b64decode(request.POST['svg']).decode('utf-8') newFile.write(fileContent) newFileDB = UploadSVGFile(file=newFile) newFileDB.save() response_data = { 'success': 1, 'url': newFileDB.file.url, 'filename': filename, 'extension': cleanFileExtension } return HttpResponse(json.dumps(response_data), content_type="application/json")
def edit_document(self, **params): # Build a fake file fake_file = ContentFile(b("A boring example document")) fake_file.name = 'test.txt' # Create a document without tags to edit document = models.Document.objects.create(title="Test document", file=fake_file) # Build another fake file another_fake_file = ContentFile(b("A boring example document")) another_fake_file.name = 'test.txt' # Submit post_data = { 'title': "Test document changed!", 'file': another_fake_file, } post_data.update(params) response = self.client.post(reverse('wagtaildocs:edit', args=(document.id,)), post_data) # User should be redirected back to the index self.assertRedirects(response, reverse('wagtaildocs:index')) # Document should be changed doc = models.Document.objects.filter(title=post_data['title']) self.assertTrue(doc.exists()) return doc.first()
def test_post(self): # Build a fake file fake_file = ContentFile(b("A boring example document")) fake_file.name = 'test.txt' # Submit post_data = { 'title': "Test document", 'file': fake_file, } response = self.client.post(reverse('wagtaildocs:add'), post_data) # User should be redirected back to the index self.assertRedirects(response, reverse('wagtaildocs:index')) # Document should be created, and be placed in the root collection document = models.Document.objects.get(title="Test document") root_collection = Collection.get_first_root_node() self.assertEqual( document.collection, root_collection ) # Check that the file_size field was set self.assertTrue(document.file_size)
def setUp(self): # Build a fake file fake_file = ContentFile(b("A boring example document")) fake_file.name = 'test.txt' self.root_collection = Collection.get_first_root_node() self.evil_plans_collection = self.root_collection.add_child(name="Evil plans") self.nice_plans_collection = self.root_collection.add_child(name="Nice plans") # Create a document to edit self.document = models.Document.objects.create( title="Test document", file=fake_file, collection=self.nice_plans_collection ) # Create a user with change_document permission but not add_document user = get_user_model().objects.create_user( username='******', email='*****@*****.**', password='******' ) change_permission = Permission.objects.get( content_type__app_label='wagtaildocs', codename='change_document' ) admin_permission = Permission.objects.get( content_type__app_label='wagtailadmin', codename='access_admin' ) self.changers_group = Group.objects.create(name='Document changers') GroupCollectionPermission.objects.create( group=self.changers_group, collection=self.root_collection, permission=change_permission ) user.groups.add(self.changers_group) user.user_permissions.add(admin_permission) self.assertTrue(self.client.login(username='******', password='******'))
def test_basic_actions(self): storage = CloudStorage() name = u"tmp.ąćęłńóśźż.马铃薯.zip" f = ContentFile("content", name="my_file") filename = storage.save(name, f) self.assertIsInstance(filename, basestring) self.assertTrue(filename.endswith(name)) self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len("content")) url = storage.url(filename) self.assertIsInstance(url, basestring) self.assertNotEqual(url, "") abs_url = urlparse.urlunparse(("http", os.environ["HTTP_HOST"], url, None, None, None)) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, httplib.OK) self.assertEqual(response.content, "content") f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), "content") # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename))
def test_basic_actions(self): storage = BlobstoreStorage() # Save a new file f = ContentFile("content", name="my_file") filename = storage.save("tmp", f) self.assertIsInstance(filename, basestring) self.assertTrue(filename.endswith("tmp")) # Check .exists(), .size() and .url() self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len("content")) url = storage.url(filename) self.assertIsInstance(url, basestring) self.assertNotEqual(url, "") # Check URL can be fetched abs_url = urlparse.urlunparse(("http", os.environ["HTTP_HOST"], url, None, None, None)) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, httplib.OK) self.assertEqual(response.content, "content") # Open it, read it # NOTE: Blobstore doesn’t support updating existing files. f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), "content") # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename))
def test_valid_dump_import_with_logo(client, settings): settings.CELERY_ENABLED = False user = f.UserFactory.create() client.login(user) url = reverse("importer-load-dump") data = ContentFile(bytes(json.dumps({ "slug": "valid-project", "name": "Valid project", "description": "Valid project desc", "is_private": False, "logo": { "name": "logo.bmp", "data": base64.b64encode(DUMMY_BMP_DATA).decode("utf-8") } }), "utf-8")) data.name = "test" response = client.post(url, {'dump': data}) assert response.status_code == 201 response_data = response.data assert "id" in response_data assert response_data["name"] == "Valid project" assert "logo_small_url" in response_data assert response_data["logo_small_url"] != None assert "logo_big_url" in response_data assert response_data["logo_big_url"] != None
def test_basic_actions(self): storage = CloudStorage() f = ContentFile('content', name='my_file') filename = storage.save('tmp', f) self.assertIsInstance(filename, basestring) self.assertTrue(filename.endswith('tmp')) self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len('content')) url = storage.url(filename) self.assertIsInstance(url, basestring) self.assertNotEqual(url, '') abs_url = urlparse.urlunparse( ('http', os.environ['HTTP_HOST'], url, None, None, None) ) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, httplib.OK) self.assertEqual(response.content, 'content') f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), 'content') # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename))
def up_view(request): content = request.raw_post_data response = False if not content: response = {'success': False, 'message': 'Empty file!'} return HttpResponse(simplejson.dumps(response)) try: contentFile = ContentFile(content) contentFile.name = request.META['HTTP_UP_FILENAME'] contentFile.type = request.META['HTTP_UP_TYPE'] if int(contentFile.size) > MAX_UPLOAD_SIZE: response = {'success' : False, 'error' : 'File is too big!'} raise f = File(pub_date=datetime.now(), secret=random()*100, name=random()*100, type=contentFile.type) f.file.save(contentFile.name, contentFile) f.save() f = File.objects.get(pk=f.pk) response = {'success': True, 'name': f.name, 'delete': f.secret} except: if not response: response = {'success': False, 'error': 'Upload failed!'} return HttpResponse(simplejson.dumps(response))
def post(self, request): folder = 'chapter_pdf' uploaded_filename = request.FILES['upload'].name try: os.mkdir(os.path.join( BASE_DIR+"/static/", folder)) except Exception as e: logging.debug(e) pass # save the uploaded file inside that folder. uploaded_filename = datetime.datetime.now().strftime("%y_%m_%d_%H_%M_%S_%f_file.pdf") full_filename = os.path.join( BASE_DIR+"/static/", folder, uploaded_filename) store_path = folder+'/'+uploaded_filename fout = open(full_filename, 'wb+') file_content = ContentFile(request.FILES['upload'].read()) try: # Iterate through the chunks. for chunk in file_content.chunks(): fout.write(chunk) fout.close() data = Chapterdetails(chapter_name = request.POST['chaptername']) data.cat = request.POST['cat'] data.chapter_path = store_path data.save() html = "<html><body>SAVED</body></html>" return HttpResponse(json.dumps({'status':200})) except Exception as e: print e logging.debug(e) html = "<html><body>NOT SAVED</body></html>" return HttpResponse(json.dumps({'status':400}))
def test_post_video_with_collections(self): root_collection = Collection.get_first_root_node() evil_plans_collection = root_collection.add_child(name="Evil plans") # Build a fake file fake_file = ContentFile(b("A boring example movie")) fake_file.name = 'movie.mp3' # Submit post_data = { 'title': "Test media", 'file': fake_file, 'duration': 100, 'collection': evil_plans_collection.id, } response = self.client.post(reverse('wagtailmedia:add', args=('video', )), post_data) # User should be redirected back to the index self.assertRedirects(response, reverse('wagtailmedia:index')) # Media should be created, and be placed in the Evil Plans collection self.assertTrue(models.Media.objects.filter(title="Test media").exists()) media = models.Media.objects.get(title="Test media") self.assertEqual(media.collection, evil_plans_collection) self.assertEqual(media.type, 'video')
def load_field(self, fieldname, val, zf): if val is None: return val, False try: field = self.model._meta.get_field(fieldname) except models.fields.FieldDoesNotExist: field = None deferr = False if field: if isinstance(field, models.DateTimeField): val = datetime.datetime.strptime(val, DATETIME_FORMAT) elif isinstance(field, models.DateField): val = datetime.date.strptime(val, DATE_FORMAT) elif isinstance(field, models.ManyToManyField): val = self.load_many(field.related.parent_model, val, zf) deferr = True elif isinstance(field, models.FileField): f = ContentFile(zf.read(val)) f.name = val # we hack in a name, to force django to automatically save the ContentFile on Model.save() val = f elif isinstance(field, models.ForeignKey): val = load_model_instance(field.rel.to, val, zf) elif isinstance(field, generic.GenericRelation): val = self.load_many(field.rel.to, val, zf, commit=False) deferr = True elif isinstance(val, list): rel_model = getattr(self.model, fieldname).related.model val = self.load_many(rel_model, val, zf, commit=False) deferr = True return val, deferr
def curso_add(req): if req.method == 'POST': nombre = req.POST.get('nombre') slug = req.POST.get('slug') pais = req.POST.get('pais') precio = req.POST.get('precio') descripcion = req.POST.get('descripcion') direccion = req.POST.get('direccion') mapa = req.POST.get('mapa') imagen = req.POST.get('imagen') imagen_n = req.POST.get('imagen_filename') info_pago = req.POST.get('info_pago') if nombre and slug and pais and precio and descripcion and direccion and mapa and info_pago and imagen and imagen_n: # carga de imagen uploaded_file = ContentFile(base64.b64decode(imagen.split(',')[1])) uploaded_file.name = imagen_n curso = Curso(nombre=nombre, slug=slug, pais=pais, precio=precio, descripcion=descripcion, direccion=direccion, info_pago=info_pago, mapa=mapa, imagen=uploaded_file) curso.save() return HttpResponse('OK') else: return HttpResponse('ERR') return render_to_response('edmin/curso/admin.html')
def baked_image_from_abi(abi): image_url = abi.badge_instance.get('image') if image_url is not None: try: image = requests.get(image_url) unbake(image) except: pass else: return image try: image_url = abi.badge.get('image') unbaked_image = ContentFile( requests.get(image_url)._content, "unbaked_image.png" ) unbaked_image.open() baked_image = bake( unbaked_image, json.dumps(dict(abi.badge_instance), indent=2) ) except requests.exceptions.SSLError as e: raise ValidationError("SSL failure retrieving image " + image_url) except Exception as e: raise e else: return baked_image
def test_compress_content_len(self): """ Test that file returned by _compress_content() is readable. """ content = ContentFile("I should be gzip'd") content = self.storage._compress_content(content) self.assertTrue(len(content.read()) > 0)
def docente_edit(req, docente_id): docente = get_object_or_404(CursoDocente, id=docente_id) if req.method == 'POST': nombre = req.POST.get('nombre') twitter = req.POST.get('twitter') perfil = req.POST.get('perfil') imagen = req.POST.get('imagen') imagen_n = req.POST.get('imagen_filename') if nombre and twitter and perfil: docente.nombre = nombre docente.twitter = twitter docente.perfil = perfil if imagen and imagen_n: # carga de imagen uploaded_file = ContentFile(base64.b64decode(imagen.split(',')[1])) uploaded_file.name = imagen_n docente.imagen = uploaded_file docente.save() return HttpResponse(jstatus_ok(serialize_docente(docente))) else: return HttpResponse(jstatus_err()) return render_to_response('edmin/docente/admin.html', { 'docente': docente})
def process(self, verbosity): dirname, filename = os.path.split(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../media/utils/temp_files/')+self.ref) prefix, suffix = os.path.splitext(filename) fd, filename = tempfile.mkstemp(suffix, prefix+"_", dirname) file_url = self.source_url try: try: # python >= 2.7 import requests r = requests.get(file_url) f = StringIO(r.content) except ImportError: # python <= 2.6 import urllib2 r = urllib2.urlopen(file_url) f = r file = ContentFile(f.read(), filename) file.close() try: tree = objectify.parse(file) parser_cls = parsers[tree.getroot().tag] try: parser_cls(tree, True, verbosity).parse() os.remove(file.name) except Exception, e: os.remove(file.name) raise Exception, e #TODO log error except KeyError: raise ImportError(u"Undefined document structure") except Exception, e: pass #TODO log error
def setUp(self): # Build a fake file fake_file = ContentFile(b("A boring example song")) fake_file.name = 'song.mp3' self.root_collection = Collection.get_first_root_node() self.evil_plans_collection = self.root_collection.add_child(name="Evil plans") self.nice_plans_collection = self.root_collection.add_child(name="Nice plans") # Create a media to edit self.media = models.Media.objects.create( title="Test media", file=fake_file, collection=self.nice_plans_collection, duration=100 ) # Create a user with change_media permission but not add_media user = get_user_model().objects.create_user( username='******', email='*****@*****.**', password='******' ) change_permission = Permission.objects.get( content_type__app_label='wagtailmedia', codename='change_media' ) admin_permission = Permission.objects.get( content_type__app_label='wagtailadmin', codename='access_admin' ) self.changers_group = Group.objects.create(name='Media changers') GroupCollectionPermission.objects.create( group=self.changers_group, collection=self.root_collection, permission=change_permission ) user.groups.add(self.changers_group) user.user_permissions.add(admin_permission) self.assertTrue(self.client.login(username='******', password='******'))
def test_storage(self): """ Storage testing. """ text = '' storage = S3Storage(host='s3.amazonaws.com') file_length = random.randrange(300, 1300) text = get_string(file_length) filename_length = random.randrange(5, 12) filename = get_string(filename_length) self.assertFalse(storage.exists(filename)) test_file = ContentFile(text) test_file.name = filename uploaded_url = upload(test_file, host='s3.amazonaws.com') self.assertTrue(storage.exists(filename)) url = 'http://' + BOTO_S3_BUCKET + '.s3.amazonaws.com/' + filename self.assertEqual(uploaded_url, url) page = urllib2.urlopen(uploaded_url) self.assertEqual(text, page.read()) self.assertEqual(len(text), storage.size(filename)) self.assertEqual(url, storage.url(filename)) storage.delete(filename) self.assertFalse(storage.exists(filename))
def generate(self): obj = self.content_object field = getattr(obj, self.field) image = ContentFile(field.file.read()) # Get extension if "." in field.file.name: split_file_name = field.file.name.split('.') split_file_name.reverse() extension = split_file_name[0] else: extension = "jpg" # Make hash filename to ensure uniqueness filename_hash = hashlib.md5(field.file.name) filename_hash = filename_hash.hexdigest() # Build filename image.name = "{model}.{obj}.{field}.{filename}.{ext}".format( model=obj._meta.db_table, obj=self.object_id, field=self.field, ext=extension, filename=filename_hash) # Save to model self.image = image self.save() # if self.coordinates: self._crop_image()
def get_db_prep_value(self, value, connection, prepared=False): """Convert pickle object to a string""" fname = uuid.uuid4().hex content_file = ContentFile(value) content_file.name = fname name = self.fs.save(fname, content_file) return change_format(name)
def get(self, request, *args, **kwargs): temp_file = ContentFile(b(""), name=self.tarfile_name) with tarfile.TarFile(fileobj=temp_file, mode='w', debug=3) as tar_file: files = self.get_files() for file_ in files: file_name = file_.name try: data = file_.read() except UnicodeDecodeError: pass file_.seek(0, os.SEEK_SET) size = len(data) try: if isinstance(data, bytes): lol = BytesIO(data) else: lol = BytesIO(data.encode()) except UnicodeDecodeError: pass try: info = tar_file.gettarinfo(fileobj=file_) except UnsupportedOperation: info = tarfile.TarInfo(name=file_name) info.size = size tar_file.addfile(tarinfo=info, fileobj=lol) file_size = temp_file.tell() temp_file.seek(0) response = HttpResponse(temp_file, content_type='application/x-tar') response['Content-Disposition'] = 'attachment; filename=%s' % self.tarfile_name response['Content-Length'] = file_size return response
def test_basic_actions(self): storage = BlobstoreStorage() # Save a new file f = ContentFile('content', name='my_file') filename = storage.save('tmp', f) self.assertIsInstance(filename, six.string_types) self.assertTrue(filename.endswith('tmp')) # Check .exists(), .size() and .url() self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len('content')) url = storage.url(filename) self.assertIsInstance(url, six.string_types) self.assertNotEqual(url, '') # Check URL can be fetched abs_url = six.moves.urllib.parse.urlunparse( ('http', os.environ['HTTP_HOST'], url, None, None, None) ) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, six.moves.http_client.OK) self.assertEqual(response.content, 'content') # Open it, read it # NOTE: Blobstore doesn’t support updating existing files. f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), 'content') # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename))
def test_basic_actions(self): storage = CloudStorage() name = u'tmp.ąćęłńóśźż.马铃薯.zip' f = ContentFile('content', name='my_file') filename = storage.save(name, f) self.assertIsInstance(filename, six.string_types) self.assertTrue(filename.endswith(name)) self.assertTrue(storage.exists(filename)) self.assertEqual(storage.size(filename), len('content')) url = storage.url(filename) self.assertIsInstance(url, six.string_types) self.assertNotEqual(url, '') abs_url = six.moves.urllib.parse.urlunparse( ('http', os.environ['HTTP_HOST'], url, None, None, None) ) response = urlfetch.fetch(abs_url) self.assertEqual(response.status_code, six.moves.http_client.OK) self.assertEqual(response.content, 'content') f = storage.open(filename) self.assertIsInstance(f, File) self.assertEqual(f.read(), 'content') # Delete it storage.delete(filename) self.assertFalse(storage.exists(filename))
def export_documents(export): from contacts.imex import get_exportable_documents as contacts_documents from invoicing.imex import get_exportable_documents as invoicing_documents with respect_language(export.language): archive_name = _('Vosae export.zip') zipped = ContentFile('', archive_name) f = zipfile.ZipFile(zipped, mode='w', compression=zipfile.ZIP_DEFLATED) for documents, path_func, doc_func in contacts_documents(export): for document in documents: f.writestr(path_func(document), doc_func(document)) for documents, path_func, doc_func in invoicing_documents(export): for document in documents: f.writestr(path_func(document), doc_func(document)) f.close() zipped.content_type = "application/zip" export.zipfile = VosaeFile( tenant=export.tenant, uploaded_file=zipped, issuer=export.issuer ) export.zipfile.save() export.update(set__zipfile=export.zipfile) context = { 'tenant': export.tenant, 'file': export.zipfile, 'site': {'name': settings.SITE_NAME, 'url': settings.SITE_URL} } # Email to issuer subject = _('Your Vosae export is available') message = render_to_string('data_liberation/emails/export_finished.txt', context) send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [export.issuer.email])
def test_put_twice(): f = ContentFile(b'This is test content') filename1 = default_storage.save('twice.txt', f) filename2 = default_storage.save('twice.txt', f) assert filename1 == filename2 with default_storage.open(filename1) as f: assert f.read() == b'This is test content'
def from_native(self, base64_data): if base64_data is None: data = base64_data # Check if this is a base64 string elif isinstance(base64_data, basestring): # Try to decode the file. Return validation error if it fails. try: decoded_file = base64.b64decode(base64_data) except TypeError: raise serializers.ValidationError(_(u"Please upload a valid image.")) # Generate file name: file_name = str(uuid.uuid4())[:12] # 12 characters are more than enough. # Get the file name extension: file_extension = self.get_file_extension(file_name, decoded_file) self.check_file_extension(file_extension) complete_file_name = file_name + "." + file_extension data = ContentFile(decoded_file, name=complete_file_name) else: data = base64_data file_extension = self.get_file_extension(data.name, data.read()) self.check_file_extension(file_extension) data.seek(0) return super(Base64ImageField, self).from_native(data)
def instructStudentChallenge_Submit(request): challengeid = request.POST.get('challenge') curriculumid = request.POST.get('curriculum') progressionid = request.POST.get('progression') studentid = request.POST.get('student') assessment_comments = request.POST.get('assessment_comments') #Save the event progression = Progression.objects.get(id=progressionid) student = User.objects.get(id=studentid) curriculum = Curriculum.objects.get(id=curriculumid) challenge = Challenge.objects.get(id=challengeid) #Save the recoding recording = request.FILES.get('recordingBlob') if recording is not None: videofilepath = 'ChallengeRecordings/' + recording.name path = default_storage.save(videofilepath, ContentFile(recording.read())) if (request.POST.get('resultCode') == "pass"): StudentChallengeEvent.objects.create(progressionid=progression, studentid=student, curriculumid=curriculum, challengeid=challenge, instructorid=request.user, resultcode=True, comment=assessment_comments, videofile=videofilepath) else: StudentChallengeEvent.objects.create(progressionid=progression, studentid=student, curriculumid=curriculum, challengeid=challenge, instructorid=request.user, resultcode=False, comment=assessment_comments, videofile=videofilepath) else: if (request.POST.get('resultCode') == "pass"): StudentChallengeEvent.objects.create(progressionid=progression, studentid=student, curriculumid=curriculum, challengeid=challenge, instructorid=request.user, resultcode=True, comment=assessment_comments) else: StudentChallengeEvent.objects.create(progressionid=progression, studentid=student, curriculumid=curriculum, challengeid=challenge, instructorid=request.user, resultcode=False, comment=assessment_comments) #Reroute back to ChallengeSelection for that same Student + Curriculum base_url = reverse('instructStudentChallenge_select') # 1 /products/ query_string = urlencode({ 'student': studentid, 'curriculum': curriculumid }) # 2 category=42 url = '{}?{}'.format(base_url, query_string) # 3 /products/?category=42 return redirect(url)
import pytest from django.core.files.base import ContentFile import base64 from mysite.helpers import base64_to_file BASE64_GIF_IMAGE = 'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7' @pytest.mark.parametrize('base64,expected', [ ( 'data:image/gif;base64,{}'.format(BASE64_GIF_IMAGE), ContentFile(base64.b64decode(BASE64_GIF_IMAGE), name='image.gif') ), ('bad data', None), ]) def test_base64_to_file(base64, expected): assert isinstance(base64_to_file(base64), type(expected))
def test_file_upload_directory_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o765)
def test_file_upload_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) fname = self.storage.save("some_file", ContentFile("data")) mode = os.stat(self.storage.path(fname))[0] & 0o777 self.assertEqual(mode, 0o666 & ~self.umask)
def test_file_upload_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0o777 self.assertEqual(actual_mode, 0o654)
def put_file(file_url, content): """ Writes out the content to the file_url using config info from user_settings. Note that content is bytecodes """ logger.debug("file_url=%s" % file_url) #logger.debug('content=%s' % content) if '..' in file_url: # .. allow url to potentially leave the user filesys. This would be bad. raise InvalidInputError(".. not allowed in urls") scheme = urlparse(file_url).scheme http_file_url = _get_http_url(file_url) # TODO: replace with parse_bdp_url() o = urlparse(http_file_url) mypath = o.path location = o.netloc if mypath[0] == os.path.sep: mypath = mypath[1:] logger.debug("mypath=%s" % mypath) query = parse_qsl(o.query) query_settings = dict(x[0:] for x in query) if '@' in location: location = location.split('@')[1] if scheme == 'http': # TODO: test import urllib import urllib2 values = {'name': 'Michael Foord', 'location': 'Northampton', 'language': 'Python'} data = urllib.urlencode(values) req = urllib2.Request(file_url, data) response = urllib2.urlopen(req) res = response.read() logger.debug("response=%s" % res) elif scheme == "ssh": key_file = get_value('key_file', query_settings) if not key_file: key_file = None # require None for ssh_settings to skip keys username = get_value('username', query_settings) password = get_value('password', query_settings) root_path = get_value('root_path', query_settings) logger.debug("key_file=%s" % key_file) logger.debug("root_path=%s" % root_path) paramiko_settings = {'username': username, 'password': password} if key_file: paramiko_settings['key_filename'] = key_file ssh_settings = {'params': paramiko_settings, 'host': location, 'root': str(root_path) + "/"} logger.debug("ssh_settings=%s" % ssh_settings) fs = RemoteStorage(settings=ssh_settings) # FIXME: does this overwrite? fs.save(mypath, ContentFile(content)) # NB: ContentFile only takes bytes logger.debug("File to be written on %s" % location) elif scheme == "tardis": # TODO: do a POST of a new datafile into existing exp and dataset # parse file_url to extract tardis host, exp_id and dataset_id from chiminey.mytardis import create_datafile create_datafile(file_url, content) elif scheme == "file": root_path = get_value('root_path', query_settings) logger.debug("remote_fs_path=%s" % root_path) fs = LocalStorage(location=root_path) dest_path = fs.save(mypath, ContentFile(content)) # NB: ContentFile only takes bytes logger.debug("dest_path=%s" % dest_path) return content
def newfile(self, name, contents): src = self.path(name) storage.save(src, ContentFile(contents)) return src
def insertDataImage(request): if request.method == 'POST': # If the form has been submitted... stationform = StationForm( request.user.get_username(), request.POST, request.FILES) # A form bound to the POST data nominatimform = NominatimForm( request.POST) # A form bound to the POST data form = ImageForm(request.POST, request.FILES) # A form bound to the POST data if stationform.is_valid(): # All validation rules pass slug = stationform.cleaned_data['station_slug'] if slug: station = StationMetadata.objects.get( ident__username=request.user.username, slug=slug) #stationlat=station.lat #stationlon=station.lon POST = request.POST.copy() POST['geom'] = str(Point(station.lon, station.lat)) stationform = StationForm( request.user.get_username(), POST, request.FILES) # A form bound to the new data form = ImageForm(POST, request.FILES) # A form bound to the new data return render( request, 'insertdata/form.html', { 'form': form, 'stationform': stationform, 'nominatimform': nominatimform }) else: stationform = StationForm(request.user.get_username()) return render( request, 'insertdata/form.html', { 'form': form, 'stationform': stationform, 'nominatimform': nominatimform, "invalid": True }) if nominatimform.is_valid(): # All validation rules pass address = nominatimform.cleaned_data['address'] if address: nom = Nominatim(base_url="http://nominatim.openstreetmap.org", referer=get_current_site(request)) result = nom.query(address, limit=1, countrycodes="IT") if len(result) >= 1: lat = result[0]["lat"] lon = result[0]["lon"] address = result[0]["display_name"] POST = request.POST.copy() POST['geom'] = str(Point(float(lon), float(lat))) POST['address'] = address stationform = StationForm( request.user.get_username(), POST, request.FILES) # A form bound to the new data nominatimform = NominatimForm( POST) # A form bound to the new data form = ImageForm( POST, request.FILES) # A form bound to the new data return render( request, 'insertdata/form.html', { 'form': form, 'stationform': stationform, 'nominatimform': nominatimform }) else: nominatimform = NominatimForm() return render( request, 'insertdata/form.html', { 'form': form, 'stationform': stationform, 'nominatimform': nominatimform, "invalid": True }) if form.is_valid(): # All validation rules pass if True: from rmap import exifutils comment = form.cleaned_data['comment'] geom = form.cleaned_data['geom'] image = request.FILES['image'] dt = datetime.utcnow().replace(microsecond=0) lon = geom['coordinates'][0] lat = geom['coordinates'][1] image = image.read() body = exifutils.setgeoimage( image, lat, lon, imagedescription=request.user.username, usercomment=comment) else: import pexif img = pexif.JpegFile.fromString( handle_uploaded_file(image).encode("utf8")) exif = img.get_exif() if exif: primary = exif.get_primary() if not exif is None or not primary is None: primary.ImageDescription = str(request.user.username) #primary.ExtendedEXIF.UserComment = "UNICODE"+chr(0x00)+str(comment) primary.ExtendedEXIF.UserComment = chr(0x55) + chr( 0x4E) + chr(0x49) + chr(0x43) + chr(0x4F) + chr( 0x44) + chr(0x45) + chr(0x00) + str(comment) img.set_geo(lat, lon) # try: # print primary.DateTime # except: # print "DateTime not present" primary.DateTime = datetime.utcnow().strftime( "%Y:%m:%d %H:%M:%S") # print primary.DateTime body = img.writeString() #grimages=GeorefencedImage.objects.filter(ident__username=ident) #grimages=GeorefencedImage.objects.filter(id=1) #f = NamedTemporaryFile(delete=False) #image = File(f) #image.write(body) #f.close() #os.unlink(f.name) if True: #inserimento diretto in DB geoimage = GeorefencedImage(active=True, geom=geom, comment=comment, ident=request.user, date=dt, category=CATEGORY_CHOICES[1]) geoimage.image.save('geoimage.jpg', ContentFile(body)) geoimage.save() else: # invio ad AMQP #quale utente usare per AMQP; ho l'utente ma non la password #bisognerebbe abilitare tutti gli admin a pubblicare immagini e qui usare amqpuser #user=request.user.username, user = rmap.settings.amqpuser password = rmap.settings.amqppassword rmap.rmap_core.send2amqp(body=body, user=user, password=password, host="localhost", exchange="photo", routing_key="photo") #return HttpResponseRedirect(reverse('geoimage-ident-id', args=[request.user.username,geoimage.pk])) return HttpResponseRedirect( reverse('geoimage-ident', args=[request.user.username])) else: form = ImageForm() # An unbound form return render( request, 'insertdata/form.html', { 'form': form, 'stationform': stationform, 'nominatimform': nominatimform, "invalid": True }) else: stationform = StationForm( request.user.get_username()) # An unbound form nominatimform = NominatimForm() # An unbound form form = ImageForm() # An unbound form return render( request, 'insertdata/form.html', { 'form': form, 'stationform': stationform, 'nominatimform': nominatimform })
def save_entry(title, content): filename = f"entries/{title}.md" if default_storage.exists(filename): default_storage.delete(filename) default_storage.save(filename, ContentFile(content))
def prepare_ballot(e, total, n, emails, keyemails, intpdf): #print "test...creating ballot.." #print total #print n #create ballots for v in range(100,total+100): serial = str(v) key = os.urandom(RSIZE) skey = base64.b64encode(key) codes = ["",""] recs = ["",""] votes = ["",""] ciphers = ["",""] plains = ["",""] for ab in range(2): #print "script run" p = subprocess.Popen(["sh","/var/www/finer/EC-ElGamal/GenPerm.sh", str(n), str(total)],stdout=subprocess.PIPE,stderr=subprocess.PIPE) output,err = p.communicate() votes[ab] = output #read from the disk file for ciphers f = open('/var/www/finer/EC-ElGamal/EC_cipher.txt') lines = f.readlines() f.close() flag = 0 i = 0 for enc in lines: i+=1 if i >= 2: if i%2 == 0: ciphers[ab]+=" " else: #" " and "," alternating ciphers[ab]+="," ciphers[ab]+=enc.strip() #read from the disk file for plains f = open('/var/www/finer/EC-ElGamal/EC_plain.txt') lines = f.readlines() f.close() i = 0 for decommit in lines: i+=1 if i >= 2: if i%2 == 0: plains[ab]+=" " else: #" " and "," alternating plains[ab]+="," plains[ab]+=decommit.strip() for i in range(n): message = bytes(serial+str(ab)+str(i)).encode('utf-8') c = hmac.new(key, message, digestmod=hashlib.sha256).digest() c1 = long(binascii.hexlify(c[0:8]), 16) #convert 64 bit string to long c1 &= 0x3fffffffffffffff # 64 --> 62 bits sc1 = base36encode(c1) while len(sc1)<12:#length padding sc1 = "0"+sc1 r1 = long(binascii.hexlify(c[8:12]), 16) #convert 32 bit string to long r1 &= 0x7fffffff # 32 --> 31 bits sr1 = base36encode(r1) while len(sr1)<6:#length padding sr1 = "0"+sr1 if i > 0: codes[ab]+="," recs[ab]+="," codes[ab]+=addbars(sc1) recs[ab]+=sr1 new_b = Ballot(election = e, serial = serial, key = skey, votes1 = votes[0],votes2 = votes[1],plain1 = plains[0],plain2 = plains[1] ,cipher1 = ciphers[0],cipher2 = ciphers[1], codes1 = codes[0],codes2 = codes[1],rec1 = recs[0],rec2 = recs[1]) new_b.save() #mark as prepared e.prepared = True e.save() # assign email ballots #get choices options = e.choice_set.order_by('id').values('text') opts = [x['text'] for x in options] #get all the unassigned ballots unused = Ballot.objects.filter(election = e)# all are not used counter = 0 for voter in emails: #generate random token token = long(binascii.hexlify(os.urandom(16)), 16) stoken = base36encode(token)#no padding 128 bit b = unused[counter] counter += 1 email = voter.rstrip() assign = Assignment(election = e, vID = stoken+email, serial = b.serial) assign.save() #mark as used b.used = True b.save() #store token new_t = Tokens(election = e, token = stoken, email = email) new_t.save() #get codes and options codes1 = b.codes1.split(',') codes2 = b.codes2.split(',') rec1 = b.rec1.split(',') rec2 = b.rec2.split(',') ##################################String sort bug!!!!!!!!!!!!####################### perm1 = [int(x) for x in b.votes1.split(',')] perm2 = [int(x) for x in b.votes2.split(',')] #################################################################################### #sort according to perm1 sorted1 = sorted(zip(perm1,codes1,rec1)) sorted2 = sorted(zip(perm2,codes2,rec2)) ballot_code1 = [y for (x,y,z) in sorted1] ballot_code2 = [y for (x,y,z) in sorted2] ballot_rec1 = [z for (x,y,z) in sorted1] ballot_rec2 = [z for (x,y,z) in sorted2] #send email for the first time emailbody = "Hello,\n\nHere is your ballot.\n" emailbody+= "================================================\nSerial Number: "+b.serial+"\n" emailbody+= "================================================\nBallot A: \n" for i in range(len(opts)): emailbody+= "Votecode: "+ballot_code1[i]+" Receipt: "+ballot_rec1[i]+ " Option: "+opts[i]+"\n" emailbody+= "================================================\nBallot B: \n" for i in range(len(opts)): emailbody+= "Votecode: "+ballot_code2[i]+" Receipt: "+ballot_rec2[i]+ " Option: "+opts[i]+"\n" emailbody+= "================================================\n" emailbody+= "\nVBB url: "+BB_URL+"vbb/"+e.EID+"/\n" emailbody+= "ABB url: "+BB_URL+"abb/"+e.EID+"/\n" emailbody+= "Client url: "+CLIENT_URL+e.EID+"/"+stoken+"/\n" emailbody+= "\nFINER Ballot Distribution Server\n" #send email p = subprocess.Popen(["sudo","/var/www/finer/bingmail.sh","Ballot for Election: "+e.question, emailbody,email],stdout=subprocess.PIPE,stderr=subprocess.PIPE, env=env) output,err = p.communicate() ################### #pdf ballots zip_buffer = cStringIO.StringIO() zfile = zipfile.ZipFile(zip_buffer,'w') for i in range(intpdf): #generate random token token = long(binascii.hexlify(os.urandom(16)), 16) stoken = base36encode(token)#no padding 128 bit b = unused[counter] counter += 1 email = "pdf"+str(i) assign = Assignment(election = e, vID = stoken+email, serial = b.serial) assign.save() #mark as used b.used = True b.save() #store token new_t = Tokens(election = e, token = stoken, email = email) new_t.save() #get codes and options codes1 = b.codes1.split(',') codes2 = b.codes2.split(',') rec1 = b.rec1.split(',') rec2 = b.rec2.split(',') ##################################String sort bug!!!!!!!!!!!!####################### perm1 = [int(x) for x in b.votes1.split(',')] perm2 = [int(x) for x in b.votes2.split(',')] #################################################################################### #sort according to perm1 sorted1 = sorted(zip(perm1,codes1,rec1)) sorted2 = sorted(zip(perm2,codes2,rec2)) ballot_code1 = [y for (x,y,z) in sorted1] ballot_code2 = [y for (x,y,z) in sorted2] ballot_rec1 = [z for (x,y,z) in sorted1] ballot_rec2 = [z for (x,y,z) in sorted2] #generate the pdf buffer = cStringIO.StringIO() # Create the PDF object, using the IO object as its "file." #register ttf fonts ttffont='/usr/share/fonts/truetype/ttf-liberation/' pdfmetrics.registerFont(TTFont('LiberationSans', ttffont+'LiberationSans-Regular.ttf')) pdfmetrics.registerFont(TTFont('LiberationSansBd', ttffont+'LiberationSans-Bold.ttf')) pdfmetrics.registerFont(TTFont('LiberationSansIt', ttffont+'LiberationSans-Italic.ttf')) pdfmetrics.registerFont(TTFont('LiberationSansBI', ttffont+'LiberationSans-BoldItalic.ttf')) #create pdf doc doc = SimpleDocTemplate(buffer, pagesize=A4,leftMargin=0.1*inch,rightMargin=0.1*inch) style = ParagraphStyle( name='Normal', #firstLineIndent = 0, #leftIndent = 0, fontName='LiberationSansBd', fontSize=14, leftMargin=0.3*inch, firstLineIndent = 0.3*inch, ) style_warning = ParagraphStyle( name='Normal', fontName='LiberationSans', fontSize=12, leftMargin=0.5*inch, firstLineIndent = 0.5*inch, ) #prepare table data data = [['Πολιτικό κόμμα', 'Κωδικός A', 'Απόδειξη A','','Πολιτικό κόμμα', 'Κωδικός A', 'Απόδειξη A']] data2 = [['Πολιτικό κόμμα', 'Κωδικός B', 'Απόδειξη B','','Πολιτικό κόμμα', 'Κωδικός B', 'Απόδειξη B']] for ii in range(len(opts)/2): tempname1 = opts[2*ii].split(';') tempname2 = opts[2*ii+1].split(';') temprow = [tempname1[0],ballot_code1[2*ii], ballot_rec1[2*ii],'',tempname2[0],ballot_code1[2*ii+1],ballot_rec1[2*ii+1]] data.append(temprow) temprow = [tempname1[0],ballot_code2[2*ii], ballot_rec2[2*ii],'',tempname2[0],ballot_code2[2*ii+1],ballot_rec2[2*ii+1]] data2.append(temprow) serial = [['Σειριακός αριθμός:',b.serial,'Σειριακός αριθμός:',b.serial]] #pdf part parts = [] table_serial = Table(serial, [2*inch,1.65 * inch, 2*inch,1.65* inch]) table_serial.setStyle(TableStyle([ ('FONT', (0, 0), (-1, 0), 'LiberationSansBd'), ('FONTSIZE', (0, 0), (-1, -1), 14), ('ALIGN', (0, 0), (-1, 0), 'CENTER'), ])) parts.append(table_serial) parts.append(Spacer(1, 0.2 * inch)) table_with_style = Table(data, [1.5 * inch, 1.3 * inch, 0.8*inch,0.1*inch, 1.5*inch,1.3 * inch, 0.8*inch]) table_with_style.setStyle(TableStyle([ ('FONT', (0, 0), (-1, -1), 'LiberationSans'), ('FONT', (0, 0), (-1, 0), 'LiberationSansBd'), ('FONTSIZE', (0, 0), (-1, -1), 9), ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black), ('BOX', (0, 0), (-1, 0), 0.25, colors.green), ('ALIGN', (0, 0), (-1, 0), 'CENTER'), ('BOX',(0,0),(-1,-1),2,colors.black), ('BOX', (1, 0), (2, -1),2, colors.black), ('BOX', (4, 0), (-1, -1),2, colors.black), ])) parts.append(table_with_style) parts.append(Spacer(1, 0.4 * inch)) #drawimage img = qrcode.make(SAMPLE_URL+e.EID+"/"+stoken+"/0/") output = cStringIO.StringIO() ## temp QR file img.save(output,'PNG') output.seek(0) #rewind the data I = Image(output, width = 150, height = 150) I.hAlign = 'LEFT' table_img = Table([[I,"A"]], [4*inch,4* inch]) table_img.setStyle(TableStyle([ ('FONT', (0, 0), (-1, 0), 'LiberationSansBd'), ('FONTSIZE', (0, 0), (-1, -1), 120), ('ALIGN', (0, 0), (-1, 0), 'LEFT'), ('VALIGN', (0, 0), (-1, 0), 'TOP'), ])) parts.append(table_img) #parts.append(I) parts.append(Spacer(1, 0.3 * inch)) parts.append(Paragraph("Εξυπηρετητής Ψηφοδελτίων FINER          "+BB_URL+"abb/"+e.EID+"/", style)) parts.append(Spacer(1, 0.55 * inch)) parts.append( Paragraph("Παρακαλούμε χρησιμοποιήστε οποιαδήποτε από τις δύο πλευρές αυτού του φύλλου.",style_warning)) #########append url for debug #parts.append(Paragraph(SAMPLE_URL+e.EID+"/"+stoken+"/",style_warning)) parts.append(table_serial) parts.append(Spacer(1, 0.2 * inch)) table_with_style = Table(data2, [1.5 * inch, 1.3 * inch, 0.8*inch,0.1*inch, 1.5*inch,1.3 * inch, 0.8*inch]) table_with_style.setStyle(TableStyle([ ('FONT', (0, 0), (-1, -1), 'LiberationSans'), ('FONT', (0, 0), (-1, 0), 'LiberationSansBd'), ('FONTSIZE', (0, 0), (-1, -1), 9), ('INNERGRID', (0, 0), (-1, -1), 0.25, colors.black), ('BOX', (0, 0), (-1, 0), 0.25, colors.green), ('ALIGN', (0, 0), (-1, 0), 'CENTER'), ('BOX',(0,0),(-1,-1),2,colors.black), ('BOX', (1, 0), (2, -1),2, colors.black), ('BOX', (4, 0), (-1, -1),2, colors.black), ])) parts.append(table_with_style) parts.append(Spacer(1, 0.4 * inch)) #drawimage img = qrcode.make(SAMPLE_URL+e.EID+"/"+stoken+"/1/") output = cStringIO.StringIO() ## temp QR file img.save(output,'PNG') output.seek(0) #rewind the data I = Image(output, width = 150, height = 150) I.hAlign = 'LEFT' table_img = Table([[I,"B"]], [4*inch,4* inch]) table_img.setStyle(TableStyle([ ('FONT', (0, 0), (-1, 0), 'LiberationSansBd'), ('FONTSIZE', (0, 0), (-1, -1), 120), ('ALIGN', (0, 0), (-1, 0), 'LEFT'), ('VALIGN', (0, 0), (-1, 0), 'TOP'), ])) parts.append(table_img) #parts.append(I) parts.append(Spacer(1, 0.3 * inch)) parts.append(Paragraph("Εξυπηρετητής Ψηφοδελτίων FINER          "+BB_URL+"abb/"+e.EID+"/", style)) parts.append(Spacer(1, 0.55 * inch)) parts.append( Paragraph("Παρακαλούμε χρησιμοποιήστε οποιαδήποτε από τις δύο πλευρές αυτού του φύλλου.",style_warning)) doc.build(parts) output.close() #save pdf zfile.writestr("Ballots/"+str(i)+".pdf", buffer.getvalue()) buffer.close() new_pdf = Pdfballot(election = e, token = stoken) new_pdf.save() zfile.close() new_pdf.pdf.save("Ballots"+e.EID+".zip",ContentFile(zip_buffer.getvalue())) zip_buffer.close() #send the PDF ballot link emailbody = "Hello,\n\nYour ballots are generated. You can download them now.\n" emailbody+= "URL: "+Ballot_URL+e.EID+"/"+stoken+"/\n" emailbody+= "\nFINER Ballot Distribution Server\n" #send email p = subprocess.Popen(["sudo","/var/www/finer/bingmail.sh","PDF Ballots for Election: "+e.question, emailbody,e.c_email],stdout=subprocess.PIPE,stderr=subprocess.PIPE, env=env) output,err = p.communicate() ######################################## #send ABB CSV data #random key for column 1 k1 = os.urandom(KSIZE) sk1 = base64.b64encode(k1) new_r = Randomstate(election = e, notes = "k1",random = sk1) new_r.save() ###send key to key holders emailbody = "Dear Key Holder,\n\n Your private key is:\n" emailbody+= "================================================\n" emailbody+= sk1+"\n" emailbody+= "================================================\n" emailbody+= "\nYour Tally URL: "+BB_URL+"keyholder/"+e.EID+"/\n" emailbody+= "\nFINER Election Authority\n" email = keyemails #send email p = subprocess.Popen(["sudo","/var/www/finer/bingmail.sh","Private Key for Election Definition "+e.EID, emailbody,email],stdout=subprocess.PIPE,stderr=subprocess.PIPE, env=env) output,err = p.communicate() ########################## #create csv file and encrypt the codes output = cStringIO.StringIO() ## temp output file writer = csv.writer(output, dialect='excel') #first row n, k1. writer.writerow([str(n),sk1]) #get all the ballots all_ballots = Ballot.objects.filter(election = e) for each in all_ballots: writer.writerow([each.serial,each.key])#second row serial , key. #encrypt codes temp_list = each.codes1.split(',') enc_list = [] for temp in temp_list: enc_list.append(base64.b64encode(encrypt(temp,k1,key_size=128))) writer.writerow(enc_list) #write cipher temp_list = each.cipher1.split(',') writer.writerow(temp_list) #write plain temp_list = each.plain1.split(',') writer.writerow(temp_list) #do the same for ballot 2 #encrypt codes temp_list = each.codes2.split(',') enc_list = [] for temp in temp_list: enc_list.append(base64.b64encode(encrypt(temp,k1,key_size=128))) writer.writerow(enc_list) #write cipher temp_list = each.cipher2.split(',') writer.writerow(temp_list) #write plain temp_list = each.plain2.split(',') writer.writerow(temp_list) #post reply = requests.post(BB_URL+'abb/'+e.EID+'/upload/',files = {'inputfile':ContentFile(output.getvalue(),name = "init.csv")}, verify=False) #close output.close() return reply
def test_content_saving(self): """ Test that ContentFile can be saved correctly with the filesystem storage, both if it was initialized with string or unicode content""" self.storage.save('bytes.txt', ContentFile(b"content")) self.storage.save('unicode.txt', ContentFile("español"))
def validate_file(question, value): # The JSON-serializable data structure for a file is a dict like: # # { # "content": [ array of Base64-encoded strings ], # "contentType": "text/plain", # } # # Turn this into a Django ContentFile instance which is how we'll # save it into the database. # Check data type. if not isinstance(value, dict): raise ValueError("Invalid data type (%s)." % type(value)) if not isinstance(value.get("content"), list): raise ValueError("Invalid data type.") if not isinstance(value.get("type"), str): raise ValueError("Invalid data type.") # Fetch content. from base64 import b64decode content = b64decode("".join( chunk for chunk in value["content"]).encode("ascii")) # The file must have content. if len(content) == 0: raise ValueError("File is empty.") # If the "file-type" field is set and it's set to "image", then # load the file using Pillow to ensre it actually is a valid image. # Also sanitize it by round-tripping it through Pillow. # This purposefully is intended to lose image metadata, to protect # the user. (TODO: Test that it actually drops XMP metadata.) if question.spec.get("file-type") == "image": # Load the image. from io import BytesIO from PIL import Image try: im = Image.open(BytesIO(content)) im.load( ) # force read from buffer so that exceptions are raised now except: raise ValueError("That's not an image file.") imspec = question.spec.get("image", {}) # Apply a size constraint and resize the image in-place. if imspec.get("max-size"): # TODO: Validate the size width/height fields are integers. size = imspec["max-size"] im.thumbnail( (size.get("width", im.size[0]), size.get("width", im.size[1]))) # Write the image back to a new buffer. buf = BytesIO() im.save(buf, "PNG") content = buf.getvalue() # Turn it into a Django ContentFile instance. from django.core.files.base import ContentFile value = ContentFile(content) value.name = "unknown.dat" # needs a name for the storage backend? return value
def lock_import_dir(): arp_root = get_arp_root() msg = "locked: %s" % timezone.localtime(timezone.now()) default_storage.save(settings.ARP_IMPORT_LOCK, ContentFile(msg))
GenericRelation) from django.contrib.contenttypes.models import ContentType from django.core.files.base import ContentFile from django.core.validators import (validate_ipv4_address, validate_ipv6_address) from django.db import models from .. import generators as g, mix_types as t, _compat as _ from ..main import (SKIP_VALUE, TypeMixerMeta as BaseTypeMixerMeta, TypeMixer as BaseTypeMixer, GenFactory as BaseFactory, Mixer as BaseMixer, _Deffered) get_contentfile = ContentFile if VERSION < (1, 4): get_contentfile = lambda content, name: ContentFile(content) MOCK_FILE = path.abspath( path.join(path.dirname(path.dirname(__file__)), 'resources', 'file.txt')) MOCK_IMAGE = path.abspath( path.join(path.dirname(path.dirname(__file__)), 'resources', 'image.jpg')) def get_file(filepath=MOCK_FILE, **kwargs): """ Generate a content file. :return ContentFile: """ with open(filepath, 'rb') as f: name = path.basename(filepath)
def cmd_message(self, content, for_user=None, group_invite=None): payload = content.get('payload') if type(payload) is dict: message_text = payload.get('message') pendingId = payload.get('pendingId', None) conversation_pk = payload.get('conversation_id', None) attachments = payload.get('attachment', None) if attachments: file_uries = attachments.get('file') file_names = attachments.get('name') file_sizes = attachments.get('size') else: message_text = payload pendingId = None conversation_pk = None attachments = None accepted_uuid = self.message.channel_session.get( 'accepted_messages_uuid', list()) if pendingId and pendingId in accepted_uuid: logging.info('Drop duplicated message %r', content) return if not for_user: conversation = self.get_conversation(conversation_pk) else: conversation = self._get_or_create_conversation(for_user) message = Message.objects.create(text=message_text, author=self.message.user, conversation=conversation, group_invite=group_invite) if pendingId: accepted_uuid.append(pendingId) self.message.channel_session[ 'accepted_messages_uuid'] = accepted_uuid self.message.channel_session.save() if attachments: for i in range(len(file_uries)): format, imgstr = file_uries[i].split(';base64,') ext = format.split('/')[-1] data = ContentFile(base64.b64decode(imgstr), name='attach.' + ext) attachment = Attachment.objects.create(message=message, file=data, file_name=file_names[i], file_size=file_sizes[i]) response = { 'type': 'newMessage', 'payload': self._create_message_data_dict(message), 'context': content.get('context'), } if pendingId: response['payload']['pendingId'] = pendingId if not for_user: for participant in conversation.participants.filter( status=Participant.PARTICIPANT_ACCEPTED): self.group_send(str(participant.user.id), response) if participant.user.online( ) != User.STATUS_ONLINE and participant.user.id != message.author.id: send_push_notification(message, participant.user) else: self.group_send(str(for_user.id), response) if for_user.online( ) != User.STATUS_ONLINE and for_user.id != message.author.id: send_push_notification(message, for_user)
def sign_in(self, request, pk): # print(json.loads(request.body)) signInData = json.loads(request.body) # try: print(signInData) print('phase' in signInData) if ('phase' in signInData == False): newUser = User.objects.create_user( first_name=signInData['first_name'], last_name=signInData['last_name'], email=signInData['email'], password=signInData['password'], usertype=signInData['usertype'], CPF=signInData['CPF'], birthdate=datetime.strptime(signInData['birthdate'], "%d/%m/%Y").date(), sign_in_status=signInData['sign_in_status'], user_code=signInData['user_code']) return Response('User created!') elif (signInData['phase'] == 'update_profile_pic'): pic = signInData['pic'] format, imgstr = pic.split(";base64,") ext = format.split("/")[-1] print(ext) image = ContentFile(base64.b64decode(imgstr), name="profile_pic." + ext) print(image.name) data = User.objects.get(pk=signInData['userId']) data.profile_pic = image data.sign_in_status = 2 data.save() serializer = UserSerializer(data, context={"request": request}) print(serializer.data['profile_pic']) return Response(serializer.data['profile_pic']) elif (signInData['phase'] == 'update_official_document'): for pic in signInData['picSet']: # print(signInData['document_type']) format, imgstr = pic['document'].split(";base64,") ext = format.split("/")[-1] print(ext) image = ContentFile(base64.b64decode(imgstr), name=pic['document_type'] + "." + ext) print(image.name) document = OfficialDocumentPic( user_id=User.objects.get(pk=signInData['userId']), document_type=pic['document_type'], document_pic=image) document.save() # print(OfficialDocumentPic.objects.filter(user_id=data['userId'])) user = User.objects.get(pk=signInData['userId']) user.sign_in_status = 3 user.save() data = OfficialDocumentPic.objects.filter( user_id=signInData['userId']) serializer = DocumentSerializer(data, many=True, context={"request": request}) print(serializer.data) return Response(serializer.data) elif (signInData['phase'] == 'update_situatial_document'): pic = signInData['pic'] print(pic['document_type']) format, imgstr = pic['document'].split(";base64,") ext = format.split("/")[-1] print(ext) image = ContentFile(base64.b64decode(imgstr), name=pic['document_type'] + "." + ext) print(image.name) document = SituationalDocumentPic( user_id=User.objects.get(pk=signInData['userId']), document_type=pic['document_type'], document_pic=image) document.save() # print(OfficialDocumentPic.objects.filter(user_id=data['userId'])) user = User.objects.get(pk=signInData['userId']) user.sign_in_status = 4 user.sign_in_date = datetime.now() user.save() data = SituationalDocumentPic.objects.filter( user_id=signInData['userId']) serializer = DocumentSerializer(data, many=True, context={"request": request}) print(serializer.data) response = serializer.data[0] return Response(response)
def test_file_path(self): f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.assertRaises(NotImplementedError, self.storage.path, f_name)
def get_file(self): """Return :class:`django.core.files.base.ContentFile` object.""" return ContentFile(b"Hello world!\n", name="hello-world.txt")
def save(self, *args, **kwargs): super().save(*args, **kwargs) self.tex_file.save("", ContentFile(exam_tex(self)), save=False)
def update_submission(request, challenge_pk): """ API endpoint to update submission related attributes Query Parameters: - ``challenge_phase``: challenge phase id, e.g. 123 (**required**) - ``submission``: submission id, e.g. 123 (**required**) - ``stdout``: Stdout after evaluation, e.g. "Evaluation completed in 2 minutes" (**required**) - ``stderr``: Stderr after evaluation, e.g. "Failed due to incorrect file format" (**required**) - ``submission_status``: Status of submission after evaluation (can take one of the following values: `FINISHED`/`CANCELLED`/`FAILED`), e.g. FINISHED (**required**) - ``result``: contains accuracies for each metric, (**required**) e.g. [ { "split": "split1-codename", "show_to_participant": True, "accuracies": { "metric1": 90 } }, { "split": "split2-codename", "show_to_participant": False, "accuracies": { "metric1": 50, "metric2": 40 } } ] - ``metadata``: Contains the metadata related to submission (only visible to challenge hosts) e.g: { "average-evaluation-time": "5 sec", "foo": "bar" } """ if not is_user_a_host_of_challenge(request.user, challenge_pk): response_data = { "error": "Sorry, you are not authorized to make this request!" } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) if request.method == "PUT": challenge_phase_pk = request.data.get("challenge_phase") submission_pk = request.data.get("submission") submission_status = request.data.get("submission_status", "").lower() stdout_content = request.data.get("stdout", "") stderr_content = request.data.get("stderr", "") submission_result = request.data.get("result", "") metadata = request.data.get("metadata", "") submission = get_submission_model(submission_pk) public_results = [] successful_submission = (True if submission_status == Submission.FINISHED else False) if submission_status not in [ Submission.FAILED, Submission.CANCELLED, Submission.FINISHED, ]: response_data = {"error": "Sorry, submission status is invalid"} return Response(response_data, status=status.HTTP_400_BAD_REQUEST) if successful_submission: try: results = json.loads(submission_result) except (ValueError, TypeError) as exc: response_data = { "error": "`result` key contains invalid data with error {}." "Please try again with correct format.".format(str(exc)) } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) leaderboard_data_list = [] for phase_result in results: split = phase_result.get("split") accuracies = phase_result.get("accuracies") show_to_participant = phase_result.get("show_to_participant", False) try: challenge_phase_split = ChallengePhaseSplit.objects.get( challenge_phase__pk=challenge_phase_pk, dataset_split__codename=split, ) except ChallengePhaseSplit.DoesNotExist: response_data = { "error": "Challenge Phase Split does not exist with phase_id: {} and" "split codename: {}".format(challenge_phase_pk, split) } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) leaderboard_metrics = challenge_phase_split.leaderboard.schema.get( "labels") missing_metrics = [] malformed_metrics = [] for metric, value in accuracies.items(): if metric not in leaderboard_metrics: missing_metrics.append(metric) if not (isinstance(value, float) or isinstance(value, int)): malformed_metrics.append((metric, type(value))) if len(missing_metrics): response_data = { "error": "Following metrics are missing in the" "leaderboard data: {}".format(missing_metrics) } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) if len(malformed_metrics): response_data = { "error": "Values for following metrics are not of" "float/int: {}".format(malformed_metrics) } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) data = {"result": accuracies} serializer = CreateLeaderboardDataSerializer( data=data, context={ "challenge_phase_split": challenge_phase_split, "submission": submission, "request": request, }, ) if serializer.is_valid(): leaderboard_data_list.append(serializer) else: return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # Only after checking if the serializer is valid, append the public split results to results file if show_to_participant: public_results.append(accuracies) try: with transaction.atomic(): for serializer in leaderboard_data_list: serializer.save() except IntegrityError: logger.exception( "Failed to update submission_id {} related metadata". format(submission_pk)) response_data = { "error": "Failed to update submission_id {} related metadata". format(submission_pk) } return Response(response_data, status=status.HTTP_400_BAD_REQUEST) submission.status = submission_status submission.completed_at = timezone.now() submission.stdout_file.save("stdout.txt", ContentFile(stdout_content)) submission.stderr_file.save("stderr.txt", ContentFile(stderr_content)) submission.submission_result_file.save( "submission_result.json", ContentFile(str(public_results))) submission.submission_metadata_file.save( "submission_metadata_file.json", ContentFile(str(metadata))) submission.save() response_data = { "success": "Submission result has been successfully updated" } return Response(response_data, status=status.HTTP_200_OK) if request.method == "PATCH": submission_pk = request.data.get("submission") submission_status = request.data.get("submission_status", "").lower() submission = get_submission_model(submission_pk) if submission_status not in [Submission.RUNNING]: response_data = {"error": "Sorry, submission status is invalid"} return Response(response_data, status=status.HTTP_400_BAD_REQUEST) data = {"status": submission_status, "started_at": timezone.now()} serializer = SubmissionSerializer(submission, data=data, partial=True, context={"request": request}) if serializer.is_valid(): submission.save() return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def upload_file(self, request, *args, **kwargs): file = request.data.get('file') if request.data.get('directory_id') != 'null': modified_file_name = map( lambda x: x.replace("\'", "").strip() if x.find("'") != -1 else x.replace(' ', '_').strip(), file.name.split('.')) modified_file_name[-1] = '.' + modified_file_name[-1] modified_file_name.insert(-2, request.data.get('directory_id')) else: modified_file_name = map( lambda x: x.replace("\'", "").strip() if x.find("'") != -1 else x.replace(' ', '_').strip(), file.name.split('.')) modified_file_name[-1] = '.' + modified_file_name[-1] try: if not os.path.isdir(settings.MEDIA_ROOT + str(request.user.id)): os.mkdir(settings.MEDIA_ROOT + str(request.user.id)) upload_dir = default_storage.save(''.join(modified_file_name), ContentFile(file.read())) user_name = request.user.first_name + request.user.last_name user_id = request.user.id tmp_file = os.path.join(settings.MEDIA_ROOT, upload_dir) encrypt_file( os.path.join(settings.MEDIA_ROOT, upload_dir), os.path.join(settings.MEDIA_ROOT + str(request.user.id), base64.b16encode(upload_dir)), '123') os.remove(tmp_file) file_data = { c.name: file.name, "modified_file_name": ''.join(modified_file_name), "file_type": file.name.split('.')[-1] or 'n/a', "size": file.size, "file_content_type": file.content_type, "created_by_id": request.user.id, "owner_id": request.user.id, "created_date": datetime.now(), "modified_date": datetime.now() } connection_es.index(index='dms_test', doc_type='post', body={ 'name': file.name, 'type': 'File', 'content_type': file.content_type, 'owner_id': request.user.id }) if request.data.get('directory_id') != 'null': file_data.update( {c.directory_id: request.data.get('directory_id')}) param = { 'field': 'file_id', 'label': 'File', 'user_name': user_name } act_log_data = file_data response = crud.add(self.table, file_data) file_data.update({"id": response}) act_log_data.update({'user_id': user_id}) log_view = LogView() log_view.generate_log(act_log_data, param) return Response(response) except Exception as e: return Response({"error": e})
def get_fake_file(filename): fake_file = ContentFile('file data') fake_file.name = filename return fake_file
def save_file(self, path, content): return self.storage.save(path, ContentFile(smart_bytes(content)))
if not image_link: continue try: person = models.Person.objects.get(original_id=member_id) except models.Person.DoesNotExist: print "Could not find %s - ignoring" % person continue url = 'http://mzalendo.com/Images/%s' % image_link source_string = "Original Mzalendo.com website (%s)" % image_link # check to see if this photo has already been used if Image.objects.filter(source=source_string).count(): print "Skipping %s - image already used" % person continue print "Fetching image for '%s': '%s'" % (person, url) person_image = Image( content_object=person, source=source_string, ) person_image.image.save( name=image_link, content=ContentFile(urllib.urlopen(url).read()), ) # break time.sleep(2)
from io import BytesIO import barcode from barcode.writer import ImageWriter from django.core.files.base import ContentFile #from scan.models import Barcode #bc = Barcode.obejcts.latest('id') upc = barcode.get('upc', 123456789, writer=ImageWriter()) i = upc.render() # <PIL.Image.Image image mode=RGB size=523x280 at 0x7FAE2B471320> image_io = BytesIO() i.save(image_io, format='PNG') image_name = 'test.png' bc.img.save(image_name, content=ContentFile(image_io.getvalue()), save=False) bc.save()
def get_archiveimage(self): archiveimage = baker.make('cradmin_imagearchive.ArchiveImage') archiveimage.image.save('testimage.png', ContentFile(create_image(200, 100))) return archiveimage
def test_content_file_custom_name(self): """ Test that the constructor of ContentFile accepts 'name' (#16590). """ name = "I can have a name too!" self.assertEqual(ContentFile(b"content", name=name).name, name)
def merge_attachment_page_data( court: Court, pacer_case_id: int, pacer_doc_id: int, document_number: int, text: str, attachment_dicts: List[Dict[str, Union[int, str]]], debug: bool = False, ) -> Tuple[List[RECAPDocument], DocketEntry]: """Merge attachment page data into the docket :param court: The court object we're working with :param pacer_case_id: A PACER case ID :param pacer_doc_id: A PACER document ID :param document_number: The docket entry number :param text: The text of the attachment page :param attachment_dicts: A list of Juriscraper-parsed dicts for each attachment. :param debug: Whether to do saves during this process. :return: A list of RECAPDocuments modified or created during the process, and the DocketEntry object associated with the RECAPDocuments :raises: RECAPDocument.MultipleObjectsReturned, RECAPDocument.DoesNotExist """ try: params = { "pacer_doc_id": pacer_doc_id, "docket_entry__docket__court": court, } if pacer_case_id: params["docket_entry__docket__pacer_case_id"] = pacer_case_id main_rd = RECAPDocument.objects.get(**params) except RECAPDocument.MultipleObjectsReturned as exc: # Unclear how to proceed and we don't want to associate this data with # the wrong case. We must punt. raise exc except RECAPDocument.DoesNotExist as exc: # Can't find the docket to associate with the attachment metadata # It may be possible to go look for orphaned documents at this stage # and to then add them here, as we do when adding dockets. This need is # particularly acute for those that get free look emails and then go to # the attachment page. raise exc # We got the right item. Update/create all the attachments for # the docket entry. de = main_rd.docket_entry if document_number is None: # Bankruptcy attachment page. Use the document number from the Main doc document_number = main_rd.document_number if debug: return [], de # Save the old HTML to the docket entry. pacer_file = PacerHtmlFiles(content_object=de, upload_type=UPLOAD_TYPE.ATTACHMENT_PAGE) pacer_file.filepath.save( "attachment_page.html", # Irrelevant b/c UUIDFileSystemStorage ContentFile(text), ) # Create/update the attachment items. rds_created = [] rds_affected = [] for attachment in attachment_dicts: sanity_checks = [ attachment["attachment_number"], # Missing on sealed items. attachment.get("pacer_doc_id", False), # Missing on some restricted docs (see Juriscraper) attachment["page_count"] is not None, attachment["description"], ] if not all(sanity_checks): continue rd, created = RECAPDocument.objects.update_or_create( docket_entry=de, document_number=document_number, attachment_number=attachment["attachment_number"], document_type=RECAPDocument.ATTACHMENT, ) if created: rds_created.append(rd) rds_affected.append(rd) for field in ["description", "pacer_doc_id"]: if attachment[field]: setattr(rd, field, attachment[field]) # Only set page_count and file_size if they're blank, in case # we got the real value by measuring. if rd.page_count is None: rd.page_count = attachment["page_count"] if rd.file_size is None and attachment["file_size_str"]: try: rd.file_size = convert_size_to_bytes( attachment["file_size_str"]) except ValueError: pass rd.save() # Do *not* do this async — that can cause race conditions. add_items_to_solr([rd.pk], "search.RECAPDocument") mark_ia_upload_needed(de.docket, save_docket=True) process_orphan_documents(rds_created, court.pk, main_rd.docket_entry.docket.date_filed) return rds_affected, de
def test_content_file_default_name(self): self.assertEqual(ContentFile(b"content").name, None)
def test_load_should_raise_if_file_is_not_a_zip(): with pytest.raises(ValueError) as excinfo: Backup.load(ContentFile(b'xxx', name='musasa_0.1.0_201501241620.zip')) assert 'Not a zip file' in str(excinfo.value)