def _upload_instances(self, path): instances_count = 0 dirs, not_in_use = default_storage.listdir(path) for instance_dir in dirs: instance_dir_path = os.path.join(path, instance_dir) i_dirs, files = default_storage.listdir(instance_dir_path) xml_file = None if 'submission.xml' in files: file_obj = default_storage.open( os.path.join(instance_dir_path, 'submission.xml')) xml_file = file_obj if xml_file: try: self._upload_instance(xml_file, instance_dir_path, files) except ExpatError: continue except Exception: pass else: instances_count += 1 return instances_count
def _upload_instances(self, path): instances_count = 0 dirs, not_in_use = default_storage.listdir(path) for instance_dir in dirs: instance_dir_path = os.path.join(path, instance_dir) i_dirs, files = default_storage.listdir(instance_dir_path) xml_file = None if 'submission.xml' in files: file_obj = default_storage.open( os.path.join(instance_dir_path, 'submission.xml')) xml_file = file_obj if xml_file: try: self._upload_instance(xml_file, instance_dir_path, files) except ExpatError: continue except Exception as e: logging.exception(_( u'Ignoring exception, processing XML submission ' 'raised exception: %s' % str(e))) else: instances_count += 1 return instances_count
def __init__(self, path, sort='size', reverse=False): self.path = path subdirectories, files = default_storage.listdir(path) self.subdirectories = [] for subdirectory in subdirectories: if subdirectory.startswith('.'): continue subsubdirectory, subfiles = default_storage.listdir( os.path.join(self.path, subdirectory)) item_count = len(subsubdirectory) + len(subfiles) self.subdirectories.append((subdirectory, item_count)) self.files = [] for file in files: if file.startswith('.'): continue path_to_file = os.path.join(self.path, file) size = default_storage.size(path_to_file) self.files.append((file, size)) if sort == 'name': self.subdirectories.sort( key=lambda subdirectory: subdirectory[0].lower(), reverse=reverse) self.files.sort(key=lambda file: file[0].lower(), reverse=reverse) else: self.subdirectories.sort( key=lambda subdirectory: subdirectory[1], reverse=reverse) self.files.sort(key=lambda file: file[1], reverse=reverse)
def list_static_resources(self): """ Returns dict representing the contents of the experiment's static resources directory. Limited to 1-lvl deep WARNING works only with local filesystem storage!!!! returns: dict with one entry per subfolder, entry 'root' represents the top level '.'. Each entry is a list of files contained there """ resource_dict = {'root': []} exp_root = os.path.join(settings.MEDIA_ROOT, self.label) if os.path.exists(exp_root): entries = default_storage.listdir(exp_root) if entries[1]: #only write an entry if list is not empty resource_dict['root'] = entries[1] for folder in entries[0]: if folder == 'root': folder = 'root1' subfiles = default_storage.listdir(os.path.join(exp_root, folder)) #guard against empty directories resource_dict[folder] = subfiles[1] return resource_dict else: return {}
def test_listdir_should_return_proper_paths(self): file_name = '/a/b/test.txt' new_file = files.create_inmemory_file(file_name, b'Avada Kedavra') default_storage.save(file_name, new_file) dirs, files_list = default_storage.listdir('/') sub_dirs, sub_files_list = default_storage.listdir('/a/') sub_sub_dirs, sub_sub_files_list = default_storage.listdir('/a/b/') self.assertEqual(dirs, ['a']) self.assertFalse(files_list) self.assertEqual(sub_dirs, ['b']) self.assertFalse(sub_files_list) self.assertFalse(sub_sub_dirs) self.assertEqual(sub_sub_files_list, ['test.txt'])
def move_file(self, source_path, destination_path, log_message): """Move a file from `source_path` to `destination_path` and delete the source directory if it's empty once the file has been successfully moved. Meant to move files from/to the guarded file path as they are disabled or re-enabled. IOError and UnicodeEncodeError are caught and logged.""" log_message = force_text(log_message) try: if storage.exists(source_path): source_parent_path = os.path.dirname(source_path) log.info(log_message.format( source=source_path, destination=destination_path)) move_stored_file(source_path, destination_path) # Now that the file has been deleted, remove the directory if # it exists to prevent the main directory from growing too # much (#11464) remaining_dirs, remaining_files = storage.listdir( source_parent_path) if len(remaining_dirs) == len(remaining_files) == 0: storage.delete(source_parent_path) except (UnicodeEncodeError, IOError): msg = u'Move Failure: {} {}'.format(source_path, destination_path) log.exception(msg)
def get_image_files(user=None, path=''): """ Recursively walks all dirs under upload dir and generates a list of full paths for each file found. """ # If a user is provided and CKEDITOR_RESTRICT_BY_USER is True, # limit images to user specific path, but not for superusers. STORAGE_DIRECTORIES = 0 STORAGE_FILES = 1 restrict = getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False) if user and not user.is_superuser and restrict: user_path = user.username else: user_path = '' browse_path = os.path.join(settings.CKEDITOR_UPLOAD_PATH, user_path, path) try: storage_list = default_storage.listdir(browse_path) except NotImplementedError: return except OSError: return for filename in storage_list[STORAGE_FILES]: if os.path.splitext(filename)[0].endswith('_thumb') or os.path.basename(filename).startswith('.'): continue filename = os.path.join(browse_path, filename) yield filename for directory in storage_list[STORAGE_DIRECTORIES]: directory_path = os.path.join(path, directory) for element in get_image_files(user=user, path=directory_path): yield element
def rmtree(prefix): dirs, files = storage.listdir(prefix) for fname in files: storage.delete(os.path.join(prefix, fname)) for d in dirs: rmtree(os.path.join(prefix, d)) storage.delete(prefix)
def cleanup_extracted_file(): log.info('Removing extracted files for file viewer.') root = os.path.join(settings.TMP_PATH, 'file_viewer') # Local storage uses local time for file modification. S3 uses UTC time. now = datetime.utcnow if storage_is_remote() else datetime.now for path in storage.listdir(root)[0]: full = os.path.join(root, path) age = now() - storage.modified_time(os.path.join(full, 'manifest.webapp')) if age.total_seconds() > (60 * 60): log.debug('Removing extracted files: %s, %dsecs old.' % (full, age.total_seconds())) for subroot, dirs, files in walk_storage(full): for f in files: storage.delete(os.path.join(subroot, f)) # Nuke out the file and diff caches when the file gets removed. id = os.path.basename(path) try: int(id) except ValueError: continue key = hashlib.md5() key.update(str(id)) cache.delete('%s:memoize:%s:%s' % (settings.CACHE_PREFIX, 'file-viewer', key.hexdigest()))
def import_all(): if import_dir_locked(): return # Lock the import directory lock_import_dir() file_list = default_storage.listdir(settings.ARP_ROOT)[1] for file_name in file_list: # Expects filename like: arp-111101-0006.txt if file_name.find("arp-") < 0: continue runtime_str = file_name.lstrip(settings.ARP_ROOT) runtime_str = runtime_str.lstrip("arp-").rstrip(".txt") runtime = timezone.make_aware(datetime.strptime(runtime_str, "%y%m%d-%H%M"), timezone.get_current_timezone()) full_path = settings.ARP_ROOT + file_name file = default_storage.open(full_path) log_message("importing %s" % file_name) ImportLog.objects.create(file_name=file_name, success=False) import_file(file, runtime) default_storage.delete(full_path) log = ImportLog.objects.filter(file_name=file_name).order_by('created')[0] log.success = True log.save() # Unlock the import directory unlock_import_dir()
def exists(path): if storage.exists(path): return True else: parent, name = path.rstrip('/').rsplit('/', 1) res = storage.listdir(parent) return name in res[0] or name in res[1]
def import_all(): if import_dir_locked(): return # Lock the import directory lock_import_dir() file_list = default_storage.listdir(settings.ARP_ROOT)[1] for file_name in file_list: log = ImportLog.objects.create(file_name=file_name, success=False) # Expects filename like: arp-111101-0006.txt if file_name.find("arp-") < 0: continue full_path = settings.ARP_ROOT + file_name try: # Extract our runtime from the file name runtime_str = file_name.lstrip(settings.ARP_ROOT) runtime_str = runtime_str.lstrip("arp-").rstrip(".txt") runtime = timezone.make_aware(datetime.strptime(runtime_str, "%y%m%d-%H%M"), timezone.get_current_timezone()) # Import the data log_message("importing %s" % file_name) file_data = default_storage.open(full_path) import_file(file_data, runtime) log.success = True except AmbiguousTimeError: log_message("Caught AmbiguousTimeError. This must be daylight savings. Deleting file") finally: default_storage.delete(full_path) log.save() # Unlock the import directory unlock_import_dir()
def backup_database(request): if request.method == 'POST': output = Popen(['which', 'mysqldump'], stdout=PIPE, close_fds=True).communicate()[0] mysqldump_bin = output.replace('\n','') cmd = mysqldump_bin+' -h %s --opt --compact --skip-add-locks -u %s -p%s %s' % \ (getattr(settings.DATABASES['default'], 'HOST', 'localhost'), settings.DATABASES['default']['USER'], settings.DATABASES['default']['PASSWORD'], settings.DATABASES['default']['NAME']) pop1 = Popen(cmd.split(" "), stdout=PIPE, close_fds=True) pop2 = Popen(["bzip2", "-c"], stdin=pop1.stdout, stdout=PIPE, close_fds=True) output = pop2.communicate()[0] default_storage.save(BACKUP_DIR+"/"+datetime.today().strftime("%Y-%m-%d_%H:%M:%S")+"_db.sql.bz2", ContentFile(output)) files = default_storage.listdir(BACKUP_DIR)[1] files.sort(reverse=True) return render_to_response('diagnostic/backupdb.html', {'files':files,}, context_instance=RequestContext(request))
def visual_translation_map(request, term): dirs, files = default_storage.listdir('visual_translations/{}/'.format(term)) time = request.GET.dict().get("t", None) if time is not None and time not in dirs: raise Http404("Visual translation with t={} not found or not ready".format(time)) elif time is None: time = str(max((int(dir) for dir in dirs))) locales_info = [ { "locale": "{}_{}".format(language, country), "small_image_file": "visual_translations/{}/{}/S_{}_{}.jpg".format(term, time, language, country), "large_image_file": "visual_translations/{}/{}/L_{}_{}.jpg".format(term, time, language, country), "xlarge_image_file": "visual_translations/{}/{}/XL_{}_{}.jpg".format(term, time, language, country), "grid": { "width": grid["cell_width"] * grid["columns"], "height": grid["cell_height"] * grid["rows"], "width_xl": grid["cell_width"] * grid["columns"] * factor, "height_xl": grid["cell_height"] * grid["rows"] * factor, "width_2": int(grid["cell_width"] * grid["columns"] / 2), "height_2": int(grid["cell_height"] * grid["rows"] / 2), "width_20": int(grid["cell_width"] * grid["columns"] / 20), "height_20": int(grid["cell_height"] * grid["rows"] / 20) } } for language, country, grid, factor in VisualTranslationsEUCommunity.LOCALES ] context = { "region_topo_json": "visual_translations/geo/europe.topo.json", "locales": locales_info, } return TemplateResponse(request, "visual_translations/map.html", context=context)
def get_context_data(self, **kwargs): context = super(S3View, self).get_context_data(**kwargs) if self.request.POST.get('limit'): limit = int(self.request.POST.get('limit')) else: limit = 0 folder = "beach_return_cams_2" VIDEO_FORMATS = ('.mp4', '.avi', '.mkv') videos = [ v for v in default_storage.listdir(folder)[1] if v.endswith(VIDEO_FORMATS)] vlist = [] unimported = [] for video in videos: item = {} imported = True if Video.objects.filter( file__icontains=video) else False item['video'] = video item['imported'] = imported vlist.append(item) if not imported: unimported.append(video) context['title'] = 'S3 Amazon - Beach Return Cams View' context['videos'] = videos context['video_list'] = vlist[:limit] if limit else vlist context['unimported_videos'] = unimported return context
def show_gallery(path, template="_gallery.html"): images = [] for file_name in default_storage.listdir(path)[1]: name, ext = os.path.splitext(file_name) if ext in IMAGE_EXTENSTIONS: file_path = posixpath.join(path, file_name) images.append({"title": name, "url": default_storage.url(file_path)}) return {"image_list": images, "template": template}
def push(self): dirs, files = default_storage.listdir(self.forms_path) for form_dir in dirs: dir_path = os.path.join(self.forms_path, form_dir) form_dirs, form_files = default_storage.listdir(dir_path) form_xml = "%s.xml" % form_dir if form_xml in form_files: form_xml_path = os.path.join(dir_path, form_xml) x = self._upload_xform(form_xml_path, form_xml) if isinstance(x, dict): self.logger.error("Failed to publish %s" % form_dir) else: self.logger.debug("Successfully published %s" % form_dir) if "instances" in form_dirs: self.logger.debug("Uploading instances") c = self._upload_instances(os.path.join(dir_path, "instances")) self.logger.debug("Published %d instances for %s" % (c, form_dir))
def cleanup_uploads(self): """ Remove all files uploaded as badges. """ upload_to = BadgeClass._meta.get_field('image').upload_to # pylint: disable=protected-access (_, files) = default_storage.listdir(upload_to) for uploaded_file in files: default_storage.delete(upload_to + '/' + uploaded_file)
def search_entries(query): """ Returns a list of encyclopedia entries with query in entry name. """ _, filenames = default_storage.listdir("entries") return list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if re.search(query, filename, re.IGNORECASE)))
def push(self): dirs, files = default_storage.listdir(self.forms_path) for form_dir in dirs: dir_path = os.path.join(self.forms_path, form_dir) form_dirs, form_files = default_storage.listdir(dir_path) form_xml = '%s.xml' % form_dir if form_xml in form_files: form_xml_path = os.path.join(dir_path, form_xml) x = self._upload_xform(form_xml_path, form_xml) if isinstance(x, dict): self.logger.error("Failed to publish %s" % form_dir) else: self.logger.debug("Successfully published %s" % form_dir) if 'instances' in form_dirs: self.logger.debug("Uploading instances") c = self._upload_instances(os.path.join(dir_path, 'instances')) self.logger.debug("Published %d instances for %s" % (c, form_dir))
def cleanup_uploads(self): """ Remove all files uploaded as badges. """ upload_to = BadgeClass._meta.get_field('image').upload_to if default_storage.exists(upload_to): (_, files) = default_storage.listdir(upload_to) for uploaded_file in files: default_storage.delete(upload_to + '/' + uploaded_file)
def list_entries(): """ Returns a list of all names of encyclopedia entries. """ _, filenames = default_storage.listdir("entries") return list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md")))
def handle(self, *args, **options): for exported_file in default_storage.listdir(EXPORTS_SAVE_PATH)[1]: if exported_file == 'DONOTDELTETHISFOLDER': continue exported_file = '%s%s' % (EXPORTS_SAVE_PATH, exported_file) modified_time_delta = datetime.datetime.now() - default_storage.modified_time(exported_file) if modified_time_delta > datetime.timedelta(days=1): default_storage.delete(exported_file)
def list_files(root_dir): # Trying to list a dir that doesn't exist can cause errors. if not default_storage.exists(root_dir): return [] directories, filenames = default_storage.listdir(root_dir) files = [os.path.join(root_dir, name) for name in filenames] for dirname in directories: files.extend(list_files(os.path.join(root_dir, dirname))) return files
def list_entries(): ''' Returns a list of all names of encyclopedia entries. ''' _, filenames = default_storage.listdir('entries') return list( sorted( re.sub(r'\.md$', '', filename) for filename in filenames if filename.endswith('.md')))
def import_folder(cls, folder=settings.S3_FOLDER): logger = logging.getLogger('videos') logger.debug('Started import_folder method.') VIDEO_FORMATS = ('.mp4', '.avi', '.mkv') videos = [ v for v in default_storage.listdir(folder)[1] if v.endswith(VIDEO_FORMATS)] count = 0 for video in videos: logger.debug("Checking {0}".format(video)) nameparts = video.split("_", 3) # if len(nameparts) != 2: # logger.debug("Error: can't parse {0}".format(nameparts)) # continue filename = os.path.join(folder, video) if cls.objects.filter(file=filename).exists(): continue # If video doesn't exist and filename splits nicely, create it. logger.debug("Importing {0}".format(video)) datestr = '_'.join(nameparts[0:2]) try: video_datetime = datetime.datetime.strptime( datestr, "%Y-%m-%d_%H") except: datestr = '_'.join(nameparts[0:1]) video_datetime = datetime.datetime.strptime( datestr, "%Y-%m-%d") date = video_datetime.date() start_time = video_datetime.time() camstr = nameparts[-1] camstr = camstr.split(".")[0] # Remove the extension. # assume each video is 60 mins long (video times are # inaccurate/halved?) end_time = (video_datetime + datetime.timedelta(minutes=60)).time() logger.debug( "Finding camera name closest to {} str:{}*".format(camstr, camstr.split("_")[0])) try: # use filter()[0] rather than get if theres dupes in the db. camera = Camera.objects.filter( camera_key__icontains=camstr.split("_")[0])[0] cls.objects.create( date=date, start_time=start_time, end_time=end_time, camera=camera, file=os.path.join( folder, video)) count += 1 except: logger.error( 'No matching camera found, skipping video name {}'.format(nameparts[-1])) logger.debug("Import task completed.") return count
def suffix_search(title): """ Retrieves encyclopedia entries by a suffix match. """ _, filenames = default_storage.listdir("entries") searchResults = list( re.sub(r"\.md$", "", filename) for filename in filenames if re.match(title, filename) is not None ) return searchResults
def iterate(path): path_dirs, path_files = storage.listdir(path) for dirname in sorted(path_dirs): full = os.path.join(path, dirname) all_files.append(full) iterate(full) for filename in sorted(path_files): full = os.path.join(path, filename) all_files.append(full)
def list_entries(): """ Returns a list of all names of encyclopedia entries. """ _, filenames = default_storage.listdir("entries") # Changing RE here could inject link directly in html: (r"(.*)\.md$", r"<a href='wiki/\1'>\1</a>", filename) return list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md")))
def list_entries(): # get name of all files in the entries folder _, filenames = default_storage.listdir("entries") # delete .md in the name of the file filenames = [ re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md") ] # return the sorted list return list(sorted(filenames))
def list_entries_html(): """ Returns a list of all names of encyclopedia entries. """ _, filenames = default_storage.listdir( "encyclopedia/templates/encyclopedia") return list( sorted( re.sub(r"\.html$", "", filename) for filename in filenames if filename.endswith(".html")))
def run_listdir_test(self, folder): content = ('testsdir/file3.txt', 'testsdir/file4.txt', 'testsdir/sub/file5.txt') for file in content: default_storage.save(file, UnicodeContentFile('Lorem ipsum dolor sit amet')) self.assert_(default_storage.exists(file)) dirs, files = default_storage.listdir(folder) self.assertEqual(dirs, ['sub']) self.assertEqual(files, ['file3.txt', 'file4.txt']) if not folder.endswith('/'): folder = folder+'/' dirs, files = default_storage.listdir(folder+dirs[0]) self.assertEqual(dirs, []) self.assertEqual(files, ['file5.txt']) for file in content: default_storage.delete(file) self.assert_(not default_storage.exists(file))
def is_empty(self): if self.is_folder: try: dirs, files = default_storage.listdir(self.name) except UnicodeDecodeError: from mezzanine.core.exceptions import FileSystemEncodingChanged raise FileSystemEncodingChanged() if not dirs and not files: return True return False
def list_entries(): """ Returns a list of all names of encyclopedia entries. """ _, filenames = default_storage.listdir("entries") return sorted([ re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md") ], key=lambda x: x.lower())
def _is_empty(self): if self.is_folder: try: dirs, files = default_storage.listdir(self.path) except UnicodeDecodeError: from mezzanine.core.exceptions import FileSystemEncodingChanged raise FileSystemEncodingChanged() if not dirs and not files: return True return False
def delete_all_files(anonid, rootdir=CONF["media_base_dir"]): """used to IRREVERSIBLY clear all data from a user. will completely purse the directory of given anonid anonid = anonid of the directory to be purged rootdir = the root of all media storage sent to the default_storage class""" dirname = get_sha1(anonid) if default_storage.exists(f"{rootdir}/{dirname}"): files = default_storage.listdir(f"{rootdir}/{dirname}")[1] for file in files: fullpath = f"{dirname}/{file}" delete_file(fullpath)
def random_entry(): """ Returns a random entry. """ _, filenames = default_storage.listdir("entries") return random.choice( list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md"))))
def search_entries(texto): """ Returns a list of all names of encyclopedia entries that contains the text. """ l = [] _, filenames = default_storage.listdir("entries") for filename in filenames: if texto.upper() in re.sub(r"\.md$", "", filename).upper(): l.append(re.sub(r"\.md$", "", filename)) return l
def _delete_directory(path): if default_storage.exists(path): directories, filenames = default_storage.listdir(path) for filename in filenames: _log.info(f'Deleting file {path}/{filename}...') default_storage.delete(f'{path}/{filename}') for directory in directories: _log.info(f'Deleting directory {path}/{directory}...') _delete_directory(f'{path}/{directory}') default_storage.delete(f'{path}/{directory}')
def list_similar(query): """ Returns a list of names of encyclopedia entries that contains given query as a substring. """ _, filenames = default_storage.listdir("entries") return list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if contains(filename, query)))
def request_files(key: bytes, anonid, rootdir=CONF["media_base_dir"], decompress=True) -> bytes: """generator that yields file by file""" dirname = get_sha1(anonid) files = default_storage.listdir(f"{rootdir}/{dirname}")[1] for file in files: fullpath = f"{dirname}/{file}" yield open_file(key, fullpath, rootdir=rootdir, decompress=decompress)
def import_folder(cls, folder=settings.S3_FOLDER): logger = logging.getLogger('videos') logger.debug('Started import_folder method.') VIDEO_FORMATS = ('.mp4', '.avi', '.mkv') videos = [ v for v in default_storage.listdir(folder)[1] if v.endswith(VIDEO_FORMATS) ] count = 0 for video in videos: logger.debug("Checking {0}".format(video)) nameparts = video.split("_", 3) # if len(nameparts) != 2: # logger.debug("Error: can't parse {0}".format(nameparts)) # continue filename = os.path.join(folder, video) if cls.objects.filter(file=filename).exists(): continue # If video doesn't exist and filename splits nicely, create it. logger.debug("Importing {0}".format(video)) datestr = '_'.join(nameparts[0:2]) try: video_datetime = datetime.datetime.strptime( datestr, "%Y-%m-%d_%H") except: datestr = '_'.join(nameparts[0:1]) video_datetime = datetime.datetime.strptime( datestr, "%Y-%m-%d") date = video_datetime.date() start_time = video_datetime.time() camstr = nameparts[-1] camstr = camstr.split(".")[0] # Remove the extension. # assume each video is 60 mins long (video times are # inaccurate/halved?) end_time = (video_datetime + datetime.timedelta(minutes=60)).time() logger.debug("Finding camera name closest to {} str:{}*".format( camstr, camstr.split("_")[0])) try: # use filter()[0] rather than get if theres dupes in the db. camera = Camera.objects.filter( camera_key__icontains=camstr.split("_")[0])[0] cls.objects.create(date=date, start_time=start_time, end_time=end_time, camera=camera, file=os.path.join(folder, video)) count += 1 except: logger.error( 'No matching camera found, skipping video name {}'.format( nameparts[-1])) logger.debug("Import task completed.") return count
def testListdir(self): self.assertEqual(default_storage.listdir(""), ([], [])) self.assertEqual(default_storage.listdir("/"), ([], [])) with self.save_file(), self.save_file(name="bar/bat.txt"): self.assertEqual(default_storage.listdir(""), (["bar"], ["foo.txt"])) self.assertEqual(default_storage.listdir("/"), (["bar"], ["foo.txt"])) self.assertEqual(default_storage.listdir("bar"), ([], ["bat.txt"])) self.assertEqual(default_storage.listdir("/bar"), ([], ["bat.txt"])) self.assertEqual(default_storage.listdir("bar/"), ([], ["bat.txt"]))
def browse_images(request): result = [] for fname, folder in BROWSE_FOLDERS.items(): files = [(os.path.join(folder, x), x) for x in default_storage.listdir(folder)[1]] result += [ {'image': default_storage.url(x[0]), 'title': x[1], 'thumb': get_thumbnail(x[0], '150x150', upscale = False).url, 'folder': fname, } for x in files] return HttpResponse(content_type = 'text/javascript', content = json.dumps(result))
def delete_meme_file(post): if default_storage.exists(post.meme_file.name): default_storage.delete(post.meme_file.name) folder = os.path.dirname(post.meme_file.name) if default_storage.exists(folder): folders, files = default_storage.listdir(folder) if not folders and not files: default_storage.delete(folder)
def sub_entries(stringname): """ Returns a list of names of sub string entries. """ filename = [] _, filenames = default_storage.listdir("entries") for stringlist in filenames: if stringlist.find(stringname) > -1: filename.append(stringlist) return list(sorted(re.sub(r"\.md$", "", files) for files in filename if files.endswith(".md")))
def recursive_delete(root): """ Recursively delete the contents of a directory in the Django default storage. Unfortunately, this will not delete empty folders, as the default FileSystemStorage implementation does not allow it. """ directories, files = default_storage.listdir(root) for directory in directories: recursive_delete(os.path.join(root, directory)) for f in files: default_storage.delete(os.path.join(root, f))
def show_gallery(path, template="_gallery.html"): images = [] for file_name in default_storage.listdir(path)[1]: name, ext = os.path.splitext(file_name) if ext in IMAGE_EXTENSTIONS: file_path = posixpath.join(path, file_name) images.append({ 'title': name, 'url': default_storage.url(file_path) }) return {'image_list': images, 'template': template}
def _get_files(self): all_files, res = [], OrderedDict() # Not using os.path.walk so we get just the right order. def iterate(path): path_dirs, path_files = storage.listdir(path) for dirname in sorted(path_dirs): full = os.path.join(path, dirname) all_files.append(full) iterate(full) for filename in sorted(path_files): full = os.path.join(path, filename) all_files.append(full) iterate(self.dest) for path in all_files: filename = smart_unicode(os.path.basename(path), errors='replace') short = smart_unicode(path[len(self.dest) + 1:], errors='replace') mime, encoding = mimetypes.guess_type(filename) if not mime and filename == 'manifest.webapp': mime = 'application/x-web-app-manifest+json' if storage_is_remote(): # S3 doesn't have directories, so we check for names with this # prefix and call it a directory if there are some. subdirs, subfiles = storage.listdir(path) directory = bool(subdirs or subfiles) else: directory = os.path.isdir(path) res[short] = { 'binary': self._is_binary(mime, path), 'depth': short.count(os.sep), 'directory': directory, 'filename': filename, 'full': path, 'md5': get_md5(path) if not directory else '', 'mimetype': mime or 'application/octet-stream', 'syntax': self.get_syntax(filename), 'modified': ( time.mktime(storage.modified_time(path).timetuple()) if not directory else 0), 'short': short, 'size': storage.size(path) if not directory else 0, 'truncated': self.truncate(filename), 'url': reverse('mkt.files.list', args=[self.file.id, 'file', short]), 'url_serve': reverse('mkt.files.redirect', args=[self.file.id, short]), 'version': self.file.version.version, } return res
def list_entries(): """ Returns a list of all names of encyclopedia entries. """ p1 = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) p = os.path.join(p1, 'md_files/entries') _, filenames = default_storage.listdir(p) return list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md")))
def icons(): """ Generates a list of tuples for the default icons for add-ons, in the format (psuedo-mime-type, description). """ icons = [("image/jpeg", "jpeg"), ("image/png", "png"), ("", "default")] dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH) for fname in files: if "32" in fname and not "default" in fname: icon_name = fname.split("-")[0] icons.append(("icon/%s" % icon_name, icon_name)) return icons
def dokuman(request, isim): dokuman = get_object_or_404(Dokuman, slug=isim) dosya = dokuman.dosya klasor = dosya.filename_root + '-katalog-gorunumu-dosyalari' swfKlasor = os.path.dirname(dosya.path) + '/' + klasor sayfaSayi = len(default_storage.listdir(swfKlasor)[1]) swfUrl = STATIC_URL + swfKlasor return render_to_response('dokuman.html', {'dokuman':dokuman, 'sayfa':sayfaSayi, 'swfUrl':swfUrl}, context_instance = RequestContext(request))
def icons(): """ Generates a list of tuples for the default icons for add-ons, in the format (pseudo-mime-type, description). """ icons = [('image/jpeg', 'jpeg'), ('image/png', 'png'), ('', 'default')] dirs, files = storage.listdir(settings.ADDON_ICONS_DEFAULT_PATH) for fname in files: if '32' in fname and 'default' not in fname: icon_name = fname.split('-')[0] icons.append(('icon/%s' % icon_name, icon_name)) return icons
def list_entries(): """ Returns a list of all names of encyclopedia entries. """ _, filenames = default_storage.listdir("entries") # r indicates it's regex, the \ escapes the . so it actually means . # so it's replacing the end of filenames .md with "" (blank aka deleting) # so the list being returned is just the name entries without .md return list( sorted( re.sub(r"\.md$", "", filename) for filename in filenames if filename.endswith(".md")))
def cleanup_media_files(): """ Delete MEDIA_ROOT directory with media files :return: """ location = default_storage.base_location try: listdir = default_storage.listdir(location)[0] for dir in listdir: rmtree(default_storage.path(dir)) print("The MEDIA_ROOT directory was cleared.") except OSError as e: print(f'Error: {e.strerror}')
def generate_url(namespace, file_format): """ Finds file by namespace and file format and returns url on it. If file is not found, returns empty string. """ try: folders, files = default_storage.listdir(namespace) except OSError: raise RuntimeError(u'Unknown namespace `{}`'.format(namespace)) for filename in files: if filename.endswith(file_format): return default_storage.url(os.path.join(namespace, filename)) return ''