def test_code_body(self): """ Tests when codefile was uploaded, read file and set as body. """ category = Category.objects.create(label=u"ソースコード") f = File(tempfile.NamedTemporaryFile( mode="r+w+t", suffix=".py", dir=settings.TEST_TEMPORARY_FILE_DIR )) body = """ import this print "this is a sample code file" hoge = 1 + 1 """ f.write(body) material_file = MaterialFile.objects.create(file=f) code = Material.objects.create( _file=material_file, description="description", category=category ) print code.body f.close() eq_(code.body, body) code.delete()
def _conditional_compress_file(self, name, content, content_encoding): """ Attempts to compress the given file. If the file is larger when compressed, returns the original file. Returns a tuple of (content_encoding, content). """ if content_encoding == CONTENT_ENCODING_GZIP: # Ideally, we would do some sort of incremental compression here, # but boto doesn't support uploading a key from an iterator. with self._temporary_file() as temp_file: with closing(gzip.GzipFile(name, "wb", 9, temp_file)) as zipfile: for chunk in content.chunks(): zipfile.write(chunk) # Check if the zipped version is actually smaller! if temp_file.tell() < content.tell(): temp_file.seek(0) content = File(temp_file, name) yield content, CONTENT_ENCODING_GZIP return # Haha! Gzip made it bigger. content.seek(0) yield content, None
def __init__(self, instance, field, name): File.__init__(self, None, name) self.instance = instance self.field = field self._committed = True self.storages = self.field.storages self.thumbnail_storages = self.field.thumbnail_storages
def merge_in_pdf(fa, fb): for f in [fa, fb]: if _is_img(f.name): img = Image.open(f.file) try: r, g, b, a = img.split() #alpha except Exception as e: r, g, b = img.split() img = Image.merge('RGB', (r, g, b)) temp_file = TemporaryFile() img.save(temp_file, "PDF", resolution=100, transparency=0) temp_file.seek(0) f.file = temp_file merger = PdfFileMerger() for f in [fa, fb]: merger.append(PdfFileReader(f.file)) temp_file = TemporaryFile() merger.write(temp_file) temp_file.seek(0) pdf_file = File(temp_file) pdf_file.name = 'id_card.pdf' return pdf_file
def processFileUploads(filelist): ''' processes a file upload list, unzipping all zips returns a new list with unzipped files ''' newlist = [] for file in filelist: if zipfile.is_zipfile(file): # unzip and append to the list zip = zipfile.ZipFile(file, "r") for f in zip.namelist(): logger.debug("Extracting ZipFile: %s" % f) if f.endswith('/'): logger.debug("Skipping directory entry: %s" % f) continue if "__MACOSX" in f or ".DS_Store" in f: logger.debug("Skipping MAC OS X resource file artifact: %s" % f) continue zf = zip.open(f).read() newfile = File(io.BytesIO(zf)) newfile.name = f newlist.append(newfile) else: newlist.append(file) return newlist
def test_forward_dependency(self): instance = DependencyTesting.objects.create() lenna_rect = File(open(add_base("static/images/lenna_rect.jpg"), 'rb')) instance.image_3 = lenna_rect instance.image_4 = lenna_rect instance.save() image_3_path = instance.image_3.path image_4_path = instance.image_4.path self.assertEqual(instance.image_3.width, 400) self.assertEqual(instance.image_4.width, 400) self.assertEqual( instance.image_3.url, "/media/tests/dependencytesting/%s/image_3.jpg" % instance.pk) self.assertEqual( instance.image_4.url, "/media/tests/dependencytesting/%s/image_4.jpg" % instance.pk) instance.image_2 = lenna_rect self.assertTrue(os.path.isfile(image_3_path)) self.assertTrue(os.path.isfile(image_4_path)) instance.save() lenna_rect.close() self.assertEqual(instance.image_3.width, 100) self.assertEqual(instance.image_4.width, 150) # forward dependencies on django's FileFields will also do the cleanup self.assertFalse(os.path.isfile(image_3_path)) self.assertFalse(os.path.isfile(image_4_path)) instance.delete()
def test_image_processor(self): instance = ImageTesting.objects.create() lenna_rect = File(open(add_base("media/static/images/lenna_rect.jpg"), 'rb')) instance.image_3 = lenna_rect instance.save() # make sure conversion went through properly self.assertEquals(instance.image_3_png.width, 200) self.assertEquals(instance.image_3_png.height, 112) # save instance, so files get commited to storage path = instance.image_3.path path_png = instance.image_3_png.path # check to see that files got commited self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path_png)) # make sure dependency gets reattached as expected instance = ImageTesting.objects.get(pk=instance.pk) self.assertEquals(instance.image_3_png.width, 200) self.assertEquals(instance.image_3_png.height, 112) self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path_png)) # test problematic processor (JPEG2000 is missing a required library) instance.image_4 = lenna_rect instance.save() # check to see that files got commited # It is possible than `libjpeg` isn't installed which will cause the test to fail self.assertEquals(instance.image_4_jpeg2000.width, 400) self.assertEquals(instance.image_4_jpeg2000.height, 225) lenna_rect.close() # delete instance and check if everything is cleaned up instance.delete() self.assertFalse(os.path.isfile(path)) self.assertFalse(os.path.isfile(path_png))
def hashed_name(self, name, content=None, filename=None): parsed_name = urlsplit(unquote(name)) clean_name = parsed_name.path.strip() opened = False if content is None: absolute_path = finders.find(clean_name) try: content = open(absolute_path, 'rb') except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise ValueError("The file '%s' could not be found with %r." % (clean_name, self)) else: raise content = File(content) opened = True try: file_hash = self.file_hash(clean_name, content) finally: if opened: content.close() path, filename = os.path.split(clean_name) root, ext = os.path.splitext(filename) if file_hash is not None: file_hash = ".%s" % file_hash hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext)) unparsed_name = list(parsed_name) unparsed_name[2] = hashed_name # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax if '?#' in name and not unparsed_name[3]: unparsed_name[2] += '?' return urlunsplit(unparsed_name)
def generateGhostDocumentFile(product, document, locker): """ :param product: :class:`.Product` that represents the arborescense :param Doc_controller: :class:`.Document3DController` from which we want to generate the :class:`.DocumentFile` For one :class:`.Product` (**product**) and one :class:`.Document3DController` (**Doc_controller**), generates a :class:`.DocumentFile` with a file .stp emptily without indexation It updates the attributes **doc_id** and **doc_path** of the :class:`.Product` (**product**) in relation of the generated :class:`.DocumentFile` """ doc_file = pmodels.DocumentFile() name = doc_file.file.storage.get_available_name(product.name+".stp") path = os.path.join(doc_file.file.storage.location, name) f = File(open(path.encode(), 'w')) f.close() doc_file.no_index = True doc_file.filename = "Ghost.stp" doc_file.size = f.size doc_file.file = name doc_file.document = document doc_file.locked = True doc_file.locker = locker doc_file.save() product.doc_id = doc_file.id product.doc_path = doc_file.file.path return doc_file.file.path
def test_manage_update_with_existing_version(self): """Check if creation of a new document succeeds even if there is a version to update """ from django.core.files.base import File document = Document() document.title = "Wachtmeister Studer" document.author = "Friedrich Glauser" document.source_publisher = "Diogenes" document.save() user = User.objects.get(pk=1) versionFile = File(open(os.path.join(TEST_DATA_DIR, 'test.xml'))) version = Version.objects.create( comment = "testing 123", document = document, created_by = user) version.content.save("updated_version.xml", versionFile) versionFile.close() self.client.login(username='******', password='******') response = self.client.post(reverse('manage_update', args=[document.pk]), { 'title': 'testing 456', 'language': 'de' }) self.assertTemplateNotUsed(response, 'documents/manage_update.html') self.assertRedirects(response, reverse('manage_index'))
def student_register_page(request): if request.method == "POST": form = RegistrationForm(request.POST) if form.is_valid(): user = User.objects.create_user( username=form.cleaned_data["username"], password=form.cleaned_data["password1"], email=form.cleaned_data["email"], ) fileinit = open("%s/%s" % (STATIC_ROOT, "images/student.gif"), "rb") filea = File(fileinit) filename = filea.name name = filename.split(".") filea.name = user.username + "_avatar_student" + "." + name[len(name) - 1] user_profile = UserProfile.objects.create( user=user, fullname=form.cleaned_data["fullname"], typeUser="******", avatar=filea ) filea.close() user_profile.save() LessonReference.objects.create(user=user_profile) return render_to_response("registration/teacher_signup_success.html", RequestContext(request)) else: form = RegistrationForm() variables = RequestContext(request, {"form": form}) return render_to_response("registration/student_signup.html", variables)
def get_steps(*names): r = [] for name in names: f = File(open(os.path.join(DATA_PATH, name))) f.name = name r.append(f) return r
def store_file(self, filename): storage = self.get_storage() f = File(open(filename)) try: storage.save(os.path.basename(filename), f) finally: f.close()
def save(self, storage, name, meta): """Saves this asset to the given storage.""" method = self._method # Calculate sizes. display_size = meta["size"] image_data, original_size = self._image_data_and_size data_size = method.get_data_size(display_size, display_size.intersect(original_size)) # Check whether we need to make a thumbnail. if data_size == original_size: super(ThumbnailAsset, self).save(storage, name, meta) else: # Use efficient image loading. image_data.draft(None, data_size) # Resize the image data. try: image_data = method.do_resize(image_data, original_size, display_size, data_size) except Exception as ex: # HACK: PIL raises all sorts of Exceptions :( raise ThumbnailError(str(ex)) # Parse the image format. _, extension = os.path.splitext(name) format = extension.lstrip(".").upper().replace("JPG", "JPEG") or "PNG" # If we're saving to PNG, make sure we're not in CMYK. if image_data.mode == "CMYK" and format == "PNG": image_data = image_data.convert("RGB") # If the storage has a path, then save it efficiently. try: thumbnail_path = storage.path(name) except NotImplementedError: # No path for the storage, so save it in a memory buffer. buffer = StringIO() try: image_data.save(buffer, format) except Exception as ex: # HACK: PIL raises all sorts of Exceptions :( raise ThumbnailError(str(ex)) # Write the file. buffer.seek(0, os.SEEK_END) buffer_length = buffer.tell() buffer.seek(0) file = File(buffer) file.size = buffer_length storage.save(name, file) else: # We can do an efficient streaming save. try: os.makedirs(os.path.dirname(thumbnail_path)) except OSError: pass try: image_data.save(thumbnail_path, format) except Exception as ex: # HACK: PIL raises all sorts of Exceptions :( try: raise ThumbnailError(str(ex)) finally: # Remove an incomplete file, if present. try: os.unlink(thumbnail_path) except: pass
def __init__(self, connection, container_name, name, *args, **kwargs): self.connection = connection self.container_name = container_name self._pos = 0 File.__init__( self, file=None, name=name, *args, **kwargs )
def __init__(self, content='', parent=None, name=None): #init InMemoryNode self.parent = parent #init File if six.PY3: stream_class = StringIO if isinstance(content, six.text_type) else BytesIO else: stream_class = six.BytesIO content = force_bytes(content) File.__init__(self, stream_class(content), name=name)
def job_save(request): if request.POST: form = add_job_form(request.POST) Job = form.save(request.POST) f = open('%s/%s' %(settings.SCRIPTS_DIR,Job.name) , 'w') myfile = File(f) content = '#!/bin/sh \nRecips=" %s " \nSubj=" %s " \nBody="%s" \nFromDir="%s" \n source sys/mailer.sh \n ' % ( Job.recips , Job.subj, Job.body ,Job.fromdir ) myfile.write(content) f.close() myfile.close() os.chmod('%s/%s' %(settings.SCRIPTS_DIR,Job.name),stat.S_IRWXU) return redirect ('/mailer/viewjob/')
def export_xml_by_source(request, dataset_id): """Call export API with this dataset_id, combine paginated responses""" if not dataset_id: return base_url = request.build_absolute_uri( reverse('export:activity-export') ) + "?dataset={dataset_id}&format=xml&page_size=100&page={page}".format( # NOQA: E501 dataset_id=dataset_id, page="{page}" ) def get_result(xml, page_num): print('making request, page: ' + str(page_num)) rf = RequestFactory() req = rf.get(base_url.format(page=page_num)) view = IATIActivityList.as_view()(req).render() xml.extend(etree.fromstring(view.content).getchildren()) link_header = view.get('link') if not link_header: return xml link = requests.utils.parse_header_links(link_header) has_next = reduce(lambda acc, x: acc or ( x['rel'] == 'next'), link, False) if has_next: return get_result(xml, page_num + 1) else: return xml xml = E('iati-activities', version="2.02") final_xml = get_result(xml, 1) final_xml.attrib[ 'generated-datetime' ] = datetime.datetime.now().isoformat() from django.core.files.base import File from django.conf import settings import uuid file_name = "{}.xml".format(uuid.uuid4()) path = "{}/{}".format(settings.MEDIA_ROOT, file_name) xml_file = File(open(path, mode='w')) xml_file.write(etree.tostring(final_xml, pretty_print=True)) xml_file.close() return file_name
def _open(self, name, mode="rb"): """ Return a File object. """ attachment = Attachment.objects.using(self.using).get(attachment__exact=name) fname = File(StringIO(attachment.blob), attachment.filename) # Make sure the checksum match before returning the file if not md5buffer(fname) == attachment.checksum: raise IntegrityError("Checksum mismatch") fname.size = attachment.size fname.mode = mode return fname
def testSaveFileToUploadDirectory(self): from django.conf import settings upload_dir = settings.UPLOAD_DIR self.assertTrue(os.access(upload_dir, os.W_OK)) #make sure upload dir can be written to #create a temporary file tempFilePath = os.getcwd() + "/assignments/tempFile.txt" with open(tempFilePath, "wr") as tmpFile: file = File(tmpFile) file.name = "tempFile.txt" self.assertEqual(upload_dir + file.__unicode__(), save_file_to_upload_dir(file)) self.assertTrue(os.path.exists(upload_dir + file.__unicode__())) #file was written to upload directory self.assertEqual(upload_dir + file.__unicode__(), save_file_to_upload_dir(file)) #overwrite self.assertTrue(os.path.exists(upload_dir + file.__unicode__())) os.remove(upload_dir + file.__unicode__()) self.assertFalse(os.path.exists(upload_dir + file.__unicode__())) os.remove(tempFilePath) self.assertFalse(os.path.exists(tempFilePath))
def __init__(self, content='', parent=None, name=None): #init InMemoryNode self.parent = parent self.created_at = timezone.now() self.last_modified = timezone.now() self.last_accessed = timezone.now() #init File if six.PY3: stream_class = StringIO if isinstance(content, six.text_type) else BytesIO else: stream_class = six.BytesIO content = force_bytes(content) File.__init__(self, stream_class(content), name=name)
def job_save_e(request,job_id): if request.POST: a = Job.objects.get(id=job_id) os.remove('/scripts/%s' %(a.name)) form = add_job_form(request.POST,instance=a) job = form.save(request.POST) f = open('%s/%s' %(settings.SCRIPTS_DIR,a.name), 'w') myfile = File(f) content = '#!/bin/sh \nRecips=" %s " \nSubj=" %s " \nBody="%s" \nFromDir="%s" \n source sys/mailer.sh \n ' % ( a.recips , a.subj, a.body ,a.fromdir ) myfile.write(content) f.close() myfile.close() os.chmod('%s/%s' %(settings.SCRIPTS_DIR,a.name),stat.S_IRWXU) return redirect ('/mailer/viewjob/')
def get(self, request, *args, **kwargs): fcode = kwargs["fcode"] file = None for ext in self.ext_list: filename = self.root_dir + self.base_dir + fcode + "." + ext if os.path.exists(filename): file = File(None, filename) file.open('r') print "OK: %s" % filename break else: print "Failed: %s" % filename if not file: raise Http404 return serve_file(request, file, save_as=False)
def test_self_dependency(self): instance = DependencyTesting.objects.create() lenna_rect = File(open(add_base("static/images/lenna_rect.jpg"), 'rb')) instance.image_1 = lenna_rect instance.save() lenna_rect.close() self.assertEqual(instance.image_1.width, 50) self.assertEqual( instance.image_1.url, "/media/tests/dependencytesting/%s/image_1.bmp" % instance.pk) self.assertEqual(instance.image_1_gif.width, 50) self.assertEqual( instance.image_1_gif.url, "/media/tests/dependencytesting/%s/image_1_gif.gif" % instance.pk) instance.delete()
def test_file_cleanup_after_delete(self): instance = FileTesting.objects.create() foo_bar = File(open(add_base("static/defaults/foo-bar.txt"), 'r')) instance.field_3 = foo_bar instance.field_4 = foo_bar instance.save() foo_bar.close() field_3_path = instance.field_3.path field_4_path = instance.field_4.path self.assertTrue(os.path.isfile(field_3_path)) self.assertTrue(os.path.isfile(field_4_path)) instance.delete() # testing cleanup without dependencies self.assertFalse(os.path.isfile(field_3_path)) # testing keep_orphans=True self.assertTrue(os.path.isfile(field_4_path))
def test_auto_cast_material_type_other(self): """ Tests cast material model to suitable type when other file was uploaded. """ from apps.materials.models import Material test_file = File(tempfile.NamedTemporaryFile( mode="r+w+t", suffix=".hoge", dir=settings.TEST_TEMPORARY_FILE_DIR )) test_file.write("hello!hello!") material_file = MaterialFile.objects.create(file=test_file) material = Material.objects.create( _file=material_file, description="description", category=Category.objects.get(pk=1) ) ok_(isinstance(material, Material)) test_file.close()
def rename_file(self, new_file_name,sep='/') : # NB: only changes the actual file name file = self.resource.file file_path = file.name.split(sep) old_name = file_path[-1] print old_name, ' to ', new_file_name file_path[-1]=new_file_name new_path = sep.join(file_path) try : f = File(open(file.name,'rb')) self.resource = f self.resource.name = new_path self.save() f.close() except Exception, e : print e
def _open(self, name, mode="rb"): """ Read the file from the database, and return as a File instance. """ attachment = Attachment.objects.using(self.using).get(attachment__exact=name) cursor = connections[self.using].cursor() lobject = cursor.db.connection.lobject(attachment.blob, "r") fname = File(StringIO(lobject.read()), attachment.filename) lobject.close() # Make sure the checksum match before returning the file if not md5buffer(fname) == attachment.checksum: raise IntegrityError("Checksum mismatch") fname.size = attachment.size fname.mode = mode return fname
def test_file_field(self): instance = FileTesting.objects.create() # test default static self.assertEqual(instance.field_1_foo.url, "/static/defaults/foo.txt") self.assertEqual(instance.bar.url, "/static/defaults/bar.txt") # test default FieldFile set and processed self.assertEqual(instance.field_1_foo.read(), force_bytes("FOO\n")) self.assertEqual(instance.bar.read(), force_bytes("BAR\n")) self.assertEqual(instance.field_2.read(), force_bytes("foo\n")) field_2_path = instance.field_2.path self.assertTrue(os.path.isfile(field_2_path)) # test assignment of file foo_bar = File(open(add_base("static/defaults/foo-bar.txt"), 'r')) instance.field_1 = foo_bar instance.save() foo_bar.close() # make sure default file was not removed self.assertTrue(os.path.isfile(field_2_path)) # check new content self.assertEqual(instance.field_1.read(), force_bytes("FOO BAR\n")) self.assertEqual(instance.field_1_foo.read(), force_bytes("FOO BAR\n")) instance.field_2.seek(0) self.assertEqual(instance.field_2.read(), force_bytes("foo\n")) # testing setting default value again instance.field_2 = None instance.save() # make sure previous file was removed self.assertFalse(os.path.isfile(field_2_path)) self.assertEqual(instance.field_2.read(), force_bytes("foo bar\n")) # test deletion of file together with instance field_1_path = instance.field_1.path field_1_foo_path = instance.field_1_foo.path field_2_path = instance.field_2.path self.assertTrue(os.path.isfile(field_1_path)) self.assertTrue(os.path.isfile(field_1_foo_path)) self.assertTrue(os.path.isfile(field_2_path)) instance.delete() self.assertFalse(os.path.isfile(field_1_path)) self.assertFalse(os.path.isfile(field_1_foo_path)) self.assertFalse(os.path.isfile(field_2_path))
def test_image_processor(self): instance = ImageTesting.objects.create() lenna_rect = File(open(add_base("static/images/lenna_rect.jpg"), 'rb')) instance.image_3 = lenna_rect instance.save() # make sure conversion went through properly self.assertEquals(instance.image_3_png.width, 200) self.assertEquals(instance.image_3_png.height, 112) # save instance, so files get commited to storage path = instance.image_3.path path_png = instance.image_3_png.path # check to see that files got commited self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path_png)) # make sure dependency gets reattached as expected instance = ImageTesting.objects.get(pk=instance.pk) self.assertEquals(instance.image_3_png.width, 200) self.assertEquals(instance.image_3_png.height, 112) self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path_png)) # test problematic processor (JPEG2000 is missing a required library) instance.image_4 = lenna_rect instance.save() self.assertEqual(instance.smartfields_get_field_status('image_4'), { 'state': 'error', 'messages': [ 'ProcessingError: There was a problem with image conversion: encoder ' 'jpeg2k not available' ], 'app_label': 'tests', 'pk': 1, 'field_name': 'image_4', 'model_name': 'imagetesting' }) lenna_rect.close() # delete instance and check if everything is cleaned up instance.delete() self.assertFalse(os.path.isfile(path)) self.assertFalse(os.path.isfile(path_png))
def save_small(self, img): source_image = img.convert('RGB') source_image.thumbnail( self.SMALL_IMAGE_MAX_DIMENSIONS) # Resize to size output = BytesIO() source_image.save(output, format='JPEG') # Save resize image to bytes output.seek(0) content_file = ContentFile( output.read()) # Read output and create ContentFile in memory file = File(content_file) name = f'{self.vanity}-small.jpg' self.small.save(name, file, save=False)
def _migrate_uploaded_files(project, project_path): """Migrate files uploaded by user""" source_upload_path = project_path / 'upload' if not source_upload_path.exists(): return target_upload_path = pathlib.Path(get_env('LABEL_STUDIO_BASE_DATA_DIR', get_data_dir())) / 'upload' if not target_upload_path.exists(): os.makedirs(str(target_upload_path), exist_ok=True) src_files = os.listdir(str(source_upload_path)) for file_name in src_files: full_file_name = os.path.join(str(source_upload_path), file_name) with open(full_file_name, 'rb') as f: FileUpload.objects.create(user=project.created_by, project=project, file=File(f, name=file_name))
def set_from_file(self, uri): """ Définir depuis un fichier local """ parts = parse.urlparse(uri) if parts.scheme in {'', 'file'}: path = parts.path filename = os.path.basename(path) try: with open(path, 'rb') as descriptor: self.image.save(filename, File(descriptor)) self.title = filename self.save() except Exception as e: traceback.print_exc(e) pass
def update_file(id, resp): obj = SwitchConfig.objects.get(id=id) obj.path.delete(save=False) temp_path = os.path.join(MEDIA_ROOT, 'switch', 'temp.cfg') fo = open(temp_path, 'w') for line in resp['result']['msg']: fo.write(line) fo.close() fo = open(temp_path, 'r') from django.core.files.base import File obj.path = File(fo) obj.save() fo.close() remove_temp(temp_path)
def test_clean_singularity_image_with_parent(self): """ A Singularity container should not have a parent. :return: """ parent = Container(id=41) container = Container(id=42, parent=parent, file_type=Container.SIMG) with open(self.alpine_path, "rb") as alpine_file: container.file = File(alpine_file) with self.assertRaisesMessage( ValidationError, Container. DEFAULT_ERROR_MESSAGES["singularity_cannot_have_parent"]): container.clean()
def from_native(self, data): if not data.startswith('data:'): raise serializers.ValidationError('Not a data URI.') metadata, encoded = data.rsplit(',', 1) parts = metadata.rsplit(';', 1) if parts[-1] == 'base64': content = encoded.decode('base64') tmp_dst = os.path.join(settings.TMP_PATH, 'icon', uuid.uuid4().hex) with storage.open(tmp_dst, 'wb') as f: f.write(content) tmp = File(storage.open(tmp_dst)) return serializers.ImageField().from_native(tmp) else: raise serializers.ValidationError('Not a base64 data URI.')
def admissible_driver_test_helper(self, archive_type, driver_type): """ Helper for testing archive containers' drivers' admissibility. :return: """ pipeline_dict = {"steps": [{"driver": "foobarbaz"}]} # Archives that contain a mix of files, including one driver. fd, archive = tempfile.mkstemp() try: with open(archive, mode="wb") as f: if archive_type == Container.ZIP: archive_handler = ZipHandler(f, mode="w") else: archive_handler = TarHandler(f, mode="w") archive_handler.write(u"hello_world.sh", self.hello_world_script) archive_handler.write(u"useless.lib", self.useless) archive_handler.write(u"not_a_script", self.not_a_script) if driver_type == "good": pipeline_dict["steps"][0]["driver"] = "hello_world.sh" elif driver_type == "bad": pipeline_dict["steps"][0]["driver"] = "not_a_script" archive_handler.write("kive/pipeline0.json", json.dumps(pipeline_dict)) archive_handler.close() parent = Container(id=41, file_type=Container.SIMG) container = Container(id=42, file_type=archive_type, parent=parent) with open(archive, "rb") as f: container.file = File(f) if driver_type == "good": container.clean() elif driver_type == "bad": with self.assertRaisesMessage( ValidationError, Container. DEFAULT_ERROR_MESSAGES["inadmissible_driver"]): container.clean() elif driver_type == "nonexistent": with self.assertRaisesMessage( ValidationError, Container. DEFAULT_ERROR_MESSAGES["driver_not_in_archive"]): container.clean() finally: os.remove(archive)
def _process_save_original_message(self, message, msg): settings = utils.get_settings() if settings['compress_original_message']: with NamedTemporaryFile(suffix=".eml.gz") as fp_tmp: with gzip.GzipFile(fileobj=fp_tmp, mode="w") as fp: fp.write(message.as_string().encode('utf-8')) msg.eml.save("%s.eml.gz" % (uuid.uuid4(), ), File(fp_tmp), save=False) else: msg.eml.save('%s.eml' % uuid.uuid4(), ContentFile(message.as_string()), save=False)
def test_media_file_hash(self): name = "screenshot.png" media_file = os.path.join(self.this_directory, 'fixtures', 'transportation', name) m = MetaData.objects.create(data_type='media', xform=self.xform, data_value=name, data_file=File(open(media_file), name), data_file_type='image/png') f = open(media_file) media_hash = hashlib.md5(f.read()).hexdigest() f.close() meta_hash = m.hash self.assertEqual(meta_hash, media_hash)
def setUpTestData(cls): cls.printer: Printer = mommy.make(Printer, check_type=Printer.KITCHEN, point_id=2) cls.check: Check = mommy.make(Check, printer=cls.printer) cls.test_file: File = File(open('forfar/tests/test_check.pdf', 'rb')) cls.test_file.name = 'test_check.pdf' cls.check_with_file: Check = mommy.make(Check, printer=cls.printer, pdf_file=cls.test_file, status=Check.RENDERED, _create_files=True)
def test_wand_image_processor(self): instance = ImageTesting.objects.create() lenna_square = File(open(add_base("static/images/lenna_square.png"), 'rb')) instance.image_5 = lenna_square instance.save() # make sure conversion went through properly self.assertEquals(instance.image_5_jpeg.width, 150) self.assertEquals(instance.image_5_jpeg.height, 150) # save instance, so files get commited to storage path = instance.image_5.path path_jpeg = instance.image_5_jpeg.path # check to see that files got commited self.assertTrue(os.path.isfile(path)) self.assertTrue(os.path.isfile(path_jpeg))
def test_flood_report_serializer(self): """Test for Report Serializer""" report_dict = { 'event_id': u'2015112518-3-rw', 'language': u'id', 'impact_report': File( open(self.data_path('impact-table-id.pdf')) ), 'impact_map': File( open(self.data_path('impact-map-id.pdf')) ) } serializer = FloodReportSerializer(data=report_dict) # Test that Serializer from dict to model works self.assertTrue(serializer.is_valid()) flood = Flood.objects.get(event_id=u'2015112518-3-rw') serializer.validated_data['flood'] = flood serializer.save() try: report = FloodReport.objects.get( flood__event_id=u'2015112518-3-rw', language=u'id' ) except FloodReport.DoesNotExist: pass # Test that report exists self.assertTrue(report) flood = Flood.objects.get(event_id=u'2015112518-3-rw') # Test that flood report is in the Floods model reports member # property self.assertIn(report, flood.reports.all()) serializer = FloodReportSerializer(report) # Test member of the serialized model equals dict value self.assertEqualDictionaryWithFiles(serializer.data, report_dict)
def test_document_creation(self): self.failUnlessEqual(self.document_type.name, 'test doc type') self.failUnlessEqual(self.document.exists(), True) self.failUnlessEqual(self.document.size, 272213) self.failUnlessEqual(self.document.file_mimetype, 'application/pdf') self.failUnlessEqual(self.document.file_mime_encoding, 'binary') self.failUnlessEqual(self.document.file_filename, 'mayan_11_1.pdf') self.failUnlessEqual(self.document.checksum, 'c637ffab6b8bb026ed3784afdb07663fddc60099853fae2be93890852a69ecf3') self.failUnlessEqual(self.document.page_count, 47) self.failUnlessEqual(self.document.latest_version.get_formated_version(), '1.0') # self.failUnlessEqual(self.document.has_detached_signature(), False) with open(TEST_SIGNED_DOCUMENT_PATH) as file_object: new_version_data = { 'comment': 'test comment 1', 'version_update': VERSION_UPDATE_MAJOR, 'release_level': RELEASE_LEVEL_FINAL, 'serial': 0, } new_version = self.document.new_version(file=File(file_object, name='mayan_11_1.pdf.gpg'), **new_version_data) self.failUnlessEqual(self.document.latest_version.get_formated_version(), '2.0') new_version_data = { 'comment': 'test comment 2', 'version_update': VERSION_UPDATE_MAJOR, 'release_level': RELEASE_LEVEL_FINAL, 'serial': 0, } with open(TEST_DOCUMENT_PATH) as file_object: new_version = self.document.new_version(file=File(file_object), **new_version_data) self.failUnlessEqual(self.document.latest_version.get_formated_version(), '3.0')
def clean_community_advprofile_avatar(self): f = self.cleaned_data['community_advprofile_avatar'] if f is None: return f # Verify file size .. size = len(self.cleaned_data['community_advprofile_avatar']) max_size = get_sph_setting('community_avatar_max_size') if size > max_size: raise djangoforms.ValidationError( _(u"Max upload filesize of %(max_size)d bytes exceeded. (Your file had %(size)d bytes)" ) % { 'max_size': max_size, 'size': size }) try: from PIL import Image except ImportError: import Image try: # Verify image dimensions .. image = Image.open(f) format = image.format width = image.size[0] height = image.size[1] f.seek(-f.tell()) max_width = get_sph_setting('community_avatar_max_width') max_height = get_sph_setting('community_avatar_max_height') if width > max_width or height > max_height: # Instead of creating a validation error, simply resize the image. image.thumbnail((max_width, max_height), Image.ANTIALIAS) from tempfile import NamedTemporaryFile from django.core.files.base import File import os tmpfile = NamedTemporaryFile() image.save(tmpfile, format=format) f = File(tmpfile, f.name) # raise djangoforms.ValidationError( "Max size of %dx%d exceeded (Your upload was %dx%d)" % (max_width, max_height, width, height) ) except IOError as e: print(e) raise djangoforms.ValidationError(_(u"Uploaded an invalid image.")) return f
def test_storage_celery_save(self): """ Make sure it actually works when using Celery as a task queue """ storage = QueuedStorage( local='django.core.files.storage.FileSystemStorage', remote='django.core.files.storage.FileSystemStorage', local_options=dict(location=self.local_dir), remote_options=dict(location=self.remote_dir)) field = TestModel._meta.get_field('file') field.storage = storage obj = TestModel(file=File(self.test_file)) obj.save() self.assertTrue(obj.file.storage.result.get()) self.assertTrue(path.isfile(path.join(self.local_dir, obj.file.name))) self.assertTrue(path.isfile(path.join(self.remote_dir, obj.file.name)), "Remote file is not available.") self.assertFalse(storage.using_local(obj.file.name)) self.assertTrue(storage.using_remote(obj.file.name)) self.assertEqual(self.test_file_name, storage.get_valid_name(self.test_file_name)) self.assertEqual(self.test_file_name, storage.get_available_name(self.test_file_name)) subdir_path = os.path.join('test', self.test_file_name) self.assertTrue(storage.exists(subdir_path)) self.assertEqual(storage.path(self.test_file_name), path.join(self.local_dir, self.test_file_name)) self.assertEqual(storage.listdir('test')[1], [self.test_file_name]) self.assertEqual(storage.size(subdir_path), os.stat(self.test_file_path).st_size) self.assertEqual(storage.url(self.test_file_name), self.test_file_name) self.assertIsInstance(storage.accessed_time(subdir_path), datetime) self.assertIsInstance(storage.created_time(subdir_path), datetime) self.assertIsInstance(storage.modified_time(subdir_path), datetime) subdir_name = 'queued_storage_2.txt' testfile = storage.open(subdir_name, 'w') try: testfile.write('test') finally: testfile.close() self.assertTrue(storage.exists(subdir_name)) storage.delete(subdir_name) self.assertFalse(storage.exists(subdir_name))
def handle_uploaded_file(f: File, path: Optional[List[str]] = None, name: Optional[str] = None, max_length: Optional[int] = None) -> str: """Handle an uploaded file by a form and return the complete file's path path has to be iterable """ def get_name(file: File) -> str: if hasattr(file, 'name'): name = file.name elif hasattr(file, '_name'): name = file._name else: name = 'file_{:08x}'.format(randint(0, MAXINT)) if name.rpartition('.')[2] not in settings.ALLOWED_EXTENSIONS: name = f'{name}.txt' return name dir_path_length = 1 # For the final '/' if not hasattr(path, '__iter__'): # TODO: path is None (or add support for only one string) relative_dir_path = 'upload' dir_path = join(settings.MEDIA_ROOT, relative_dir_path) dir_path_length += len(relative_dir_path) else: relative_dir_path = join(*path) dir_path = join(settings.MEDIA_ROOT, *path) # The storage uses '/' even on Windows. dir_path_length += len('/'.join(relative_dir_path)) if not name: name = get_name(f) if max_length: max_length -= dir_path_length if max_length <= 0: raise ValueError('The max length is too small.') final_path = FileCreator(dir_path=dir_path, name=name, max_length=max_length).create() with open(final_path, 'wb', 0o755) as destination: for chunk in f.chunks(): destination.write(chunk) return join(relative_dir_path, basename(final_path))
def write_temp_file_to_path(suffix, content, file_path): """ Write a temp file and return the name of the file. :param suffix: The file suffix :param content: The content to write :param file_path: The path to write the temp file to :return: The filename written to """ temp_file = NamedTemporaryFile(suffix=suffix) temp_file.write(content) temp_file.seek(0) export_filename = default_storage.save(file_path, File(temp_file, file_path)) temp_file.close() return export_filename
def test_media_file_hash(self): name = "screenshot.png" media_file = os.path.join(self.this_directory, 'fixtures', 'transportation', name) m = MetaData.objects.create(data_type='media', xform=self.xform, data_value=name, data_file=File(open(media_file, 'rb'), name), data_file_type='image/png') with open(media_file, 'rb') as f: media_hash = get_hash(f, prefix=True) meta_hash = m.md5_hash self.assertEqual(meta_hash, media_hash) self.assertEqual(m.file_hash, media_hash)
def save(self, *args, **kwargs): try: if os.path.isfile(self.data_file_committee.path): pass except: # save academic years fo1 = tempfile.NamedTemporaryFile() data = serializers.serialize("json", Committee.objects.all()) out = open(fo1.name, "w") out.write(data) out.close() self.data_file_committee.save(os.path.basename(fo1.name), File(fo1)) super().save(*args, **kwargs)
def test_create_attachment_with_mimetype_more_than_50(self): media_file = os.path.join(self.this_directory, 'fixtures', 'transportation', 'instances', self.surveys[0], self.media_file) media_file = File(open(media_file), media_file) with self.assertRaises(DataError): Attachment.objects.create(instance=self.instance, mimetype='a' * 120, media_file=media_file) pre_count = Attachment.objects.count() Attachment.objects.create(instance=self.instance, mimetype='a' * 100, media_file=media_file) self.assertEqual(pre_count + 1, Attachment.objects.count())
def setUpTestData(cls): user = User.objects.create(username='******', first_name='Андрей', last_name='Петров') user.set_password('password') user.save() user_profile = UserProfile.objects.create(user=user, middle_name='Сергеевич') user_profile.save() file_hash = File(open(MEDIA_ROOT + '/test_file_1', 'w')) file_hash.write('Хеш') file_hash.close() file_hash.open('r') order = Order.objects.create(title='Название', text='Текст', date=timezone.now(), author=user, order_hash=file_hash, is_closed=False) file_hash.close() UserOrder.objects.create(user=user, order=order, is_accepted=False, is_completed=False) Comment.objects.create( user=user, order=order, date=timezone.now(), text= 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod' )
def test_flood_report_detail(self): report_dict = { 'event_id': u'2015112518-3-rw', 'language': u'en', 'impact_report': File( open(self.data_path('impact-table-en.pdf')) ), 'impact_map': File( open(self.data_path('impact-map-en.pdf')) ) } response = self.client.get( reverse( 'realtime:flood_report_detail', kwargs={ 'event_id': report_dict['event_id'], 'language': report_dict['language'] } )) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqualDictionaryWithFiles(response.data, report_dict)
def test_upload_compressed_file(self): source = WebFormSource(label='test source', uncompress=SOURCE_UNCOMPRESS_CHOICE_Y) with open(TEST_COMPRESSED_DOCUMENT_PATH) as file_object: source.handle_upload( document_type=self.document_type, file_object=File(file_object), expand=(source.uncompress == SOURCE_UNCOMPRESS_CHOICE_Y)) self.assertEqual(Document.objects.count(), 2) self.assertTrue('first document.pdf' in Document.objects.values_list( 'label', flat=True)) self.assertTrue('second document.pdf' in Document.objects.values_list( 'label', flat=True))
def save_image(image, storage_path, format=DEFAULT_FORMAT, storage=default_storage): """ Saves a PIL image file to the given storage_path using the given storage. Returns the final storage path of the saved file. """ if format not in KEEP_FORMATS: format = DEFAULT_FORMAT with NamedTemporaryFile() as temp: image.save(temp, format=format) return storage.save(storage_path, File(temp))
def cache(self): """Store image locally if we have a URL""" if self.image or not self.image_url or self.image_url.lower() == 'n/a': return logging.info("Caching Image for Movie: %s... URL: %s" % (self.title, self.image_url)) result, headers = urllib.urlretrieve(self.image_url) logging.debug('HEADERS: %s' % headers) logging.info("Successfully downloaded image for %s" % self.title) f = File(open(result, 'rb')) #ensure opening as binary! image_name = os.path.basename(self.image_url) self.image.save(image_name, f) logging.info("Saved Image for Movie: %s" % self.title) self.save()
def test_earthquake_report_serializer(self): report_dict = { 'shake_id': u'20150619200628', 'language': u'en', 'report_pdf': File(open(self.data_path(u'20150619200628-en.pdf'))), 'report_image': File(open(self.data_path(u'20150619200628-en.png'))), 'report_thumbnail': File(open(self.data_path(u'20150619200628-thumb-en.png'))), } serializer = EarthquakeReportSerializer(data=report_dict) self.assertTrue(serializer.is_valid()) earthquake = Earthquake.objects.get(shake_id=u'20150619200628') serializer.validated_data['earthquake'] = earthquake serializer.save() report = EarthquakeReport.objects.get( earthquake__shake_id=u'20150619200628', language=u'en') self.assertTrue(report) serializer = EarthquakeReportSerializer(report) self.assertEqual(serializer.data['shake_id'], u'20150619200628') self.assertEqual(serializer.data['language'], u'en')
def setUp(self): self.admin_user = User.objects.create_superuser( username=TEST_ADMIN_USERNAME, email=TEST_ADMIN_EMAIL, password=TEST_ADMIN_PASSWORD) self.document_type = DocumentType(name=TEST_DOCUMENT_TYPE) self.document_type.save() self.document = Document(document_type=self.document_type, ) self.document.save() with open(TEST_DOCUMENT_PATH) as file_object: self.document.new_version( file_object=File(file_object, name='mayan_11_1.pdf'))
def test_get_zipfiles(self): metadata = { "uuid": "*****@*****.**", "name": "Test Metadata" } extension = models.Extension.objects.create_from_metadata( metadata, creator=self.user) version1 = models.ExtensionVersion.objects.create( extension=extension, source=File(ContentFile("doot doo"), name="aa"), status=models.STATUS_UNREVIEWED) self.assertEqual(None, get_old_version(version1)) # This one is broken... version2 = models.ExtensionVersion.objects.create( extension=extension, source="", status=models.STATUS_UNREVIEWED) self.assertEqual(version1, get_old_version(version2)) version3 = models.ExtensionVersion.objects.create( extension=extension, source=File(ContentFile("doot doo"), name="bb"), status=models.STATUS_UNREVIEWED) self.assertEqual(version1, get_old_version(version3))
def handle(self, *args, **options): ser_format = options.get('format') outfilename = options.get('outfile') if outfilename is None: raise CommandError('No --outfile specified (this is a required option)') self.target_dir = join(dirname(abspath(outfilename)), 'media') for modelclass in models_with_filefields(): pre_dump.connect(self.save_images_for_signal, sender=modelclass) self.set_up_serializer(ser_format) with File(open(outfilename, 'w')) as self.stdout: super(Command, self).handle(*args, **options)
def test_multiple_document_metadata_edit(self): self.login_user() self.grant_permission(permission=permission_document_view) self.grant_permission(permission=permission_metadata_document_add) self.grant_permission(permission=permission_metadata_document_edit) with open(TEST_SMALL_DOCUMENT_PATH) as file_object: document_2 = self.document_type.new_document( file_object=File(file_object)) document_metadata = self.document.metadata.create( metadata_type=self.metadata_type) document_2.metadata.create(metadata_type=self.metadata_type) response = self.get( 'metadata:metadata_multiple_edit', data={'id_list': '{},{}'.format(self.document.pk, document_2.pk)}) self.assertContains(response, 'Edit', status_code=200) # Test post to metadata removal view response = self.post('metadata:metadata_multiple_edit', data={ 'id_list': '{},{}'.format(self.document.pk, document_2.pk), 'form-0-id': document_metadata.metadata_type.pk, 'form-0-value': TEST_METADATA_VALUE_EDITED, 'form-0-update': True, 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', }, follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(self.document.metadata.first().value, TEST_METADATA_VALUE_EDITED) self.assertEqual(document_2.metadata.first().value, TEST_METADATA_VALUE_EDITED)