def flush_rclone(self): if not self.rclone_remote or not self.rclone_queue: return click.echo() for name, data in self.buckets.viewitems(): if not data['exists']: self.create_bucket(name) data['exists'] = True for bucket, data in self.rclone_queue.viewitems(): click.echo( cformat( 'Copying %{cyan}{}%{reset} files (%{cyan}{}%{reset}) to %{cyan}{}%{reset} via rclone' ).format(data['files'], do_filesizeformat(data['bytes']), bucket)) start = datetime.now() try: subprocess.check_call([ 'rclone', 'copy', '--copy-links', data['path'], '{}:{}'.format(self.rclone_remote, bucket) ]) except subprocess.CalledProcessError: click.secho('\nError while running rclone', fg='red') raise duration = (datetime.now() - start) click.echo('...finished after {}'.format( format_human_timedelta(duration, 'minutes', narrow=True))) rmlinktree(data['path']) self.rclone_queue.clear()
def _size_formatter(view, context, model, name): size = getattr(model, name) return Markup( "%s<div style='color: lightgray;'>%s</div>" % ( filters.do_filesizeformat(size).replace(' ', ' '), size) ) if size is not None else ""
def _log_image_created(image, user, **kwargs): image.event_new.log(EventLogRealm.management, EventLogKind.positive, 'Layout', 'Added image "{}"'.format(image.filename), user, data={ 'File name': image.filename, 'File type': image.content_type, 'File size': do_filesizeformat(image.size) })
def upload_file(self, entry, session): self.logger.debug('Uploading attachment %d (%s) [%s]', entry.attachment.file.id, entry.attachment.file.filename, do_filesizeformat(entry.attachment.file.size)) ts = time.time() with entry.attachment.file.open() as file: delta = time.time() - ts self.logger.debug('File opened: %d (%s) [%.03fs]', entry.attachment.file.id, entry.attachment.file.filename, delta) ts = time.time() resp = session.put(url_join( self.search_app, f'api/record/{entry.citadel_id}/files/attachment'), data=file) delta = time.time() - ts self.logger.debug('Upload finished: %d (%s) [%.03fs]', entry.attachment.file.id, entry.attachment.file.filename, delta) if resp.ok: entry.attachment_file_id = entry.attachment.file.id db.session.merge(entry) db.session.commit() resp.close() return True else: self.logger.error('Failed uploading attachment %d: [%d] %s', entry.attachment.id, resp.status_code, resp.text) resp.close() return False
def _log_image_created(image, user, **kwargs): image.event.log(EventLogRealm.management, EventLogKind.positive, 'Layout', 'Added image "{}"'.format(image.filename), user, data={ 'File name': image.filename, 'File type': image.content_type, 'File size': do_filesizeformat(image.size) })
def human_size(value, binary=False): """ Modify do_filesizeformat from jinja2 to shorten Bytes to B """ size = do_filesizeformat(value, binary=binary) size = size.replace('Bytes', 'B') size = size.replace('Byte', 'B') return size
def clean(self, *args, **kwargs): data = super(CommAttachmentForm, self).clean(*args, **kwargs) attachment = data.get('attachment') max_size = self.max_upload_size if attachment and attachment.size > max_size: # L10n: error raised when review attachment is too large. exc = _('Attachment exceeds maximum size of %s.' % do_filesizeformat(self.max_upload_size)) raise ValidationError(exc) return data
def clean(self, *args, **kwargs): data = super(CommAttachmentForm, self).clean(*args, **kwargs) attachment = data.get('attachment') max_size = self.max_upload_size if attachment and attachment.size > max_size: # L10n: error raised when review attachment is too large. exc = _('Attachment exceeds maximum size of %s.' % do_filesizeformat(self.max_upload_size)) raise ValidationError(exc) return data
def _get_attachment_data(attachment): data = _get_folder_data(attachment.folder, True) data['Type'] = unicode(attachment.type.title) data['Title'] = attachment.title if attachment.type == AttachmentType.link: data['URL'] = attachment.link_url else: data.update({'File name': attachment.file.filename, 'File size': do_filesizeformat(attachment.file.size), 'File type': attachment.file.content_type}) return data
def _get_attachment_data(attachment): data = _get_folder_data(attachment.folder, True) data['Type'] = unicode(attachment.type.title) data['Title'] = attachment.title if attachment.type == AttachmentType.link: data['URL'] = attachment.link_url else: data.update({'File name': attachment.file.filename, 'File size': do_filesizeformat(attachment.file.size), 'File type': attachment.file.content_type}) return data
def support(): """Render contact form.""" form = ContactForm() if current_user.is_authenticated: user_id = current_user.id form.email.data = current_user.email form.name.data = form.name.data or (current_user.profile.full_name if current_user.profile else None) form.recaptcha.validators = [] else: user_id = None uap = user_agent_information() # Load form choices and validation from config categories = OrderedDict( (c['key'], c) for c in current_app.config['SUPPORT_ISSUE_CATEGORIES']) form.issue_category.choices = \ [(c['key'], c['title']) for c in categories.values()] form.description.validators.append( Length( min=current_app.config['SUPPORT_DESCRIPTION_MIN_LENGTH'], max=current_app.config['SUPPORT_DESCRIPTION_MAX_LENGTH'], )) form.attachments.description = 'Optional. Max attachments size: ' + \ do_filesizeformat(current_app.config['SUPPORT_ATTACHMENT_MAX_SIZE']) if form.validate_on_submit(): attachments = request.files.getlist("attachments") if attachments and not check_attachment_size(attachments): form.attachments.errors.append('File size exceeded. ' 'Please add URLs to the files ' 'or make a smaller selection.') else: context = dict(user_id=user_id, info=form.data, uap=uap) recipients = categories[form.issue_category.data]['recipients'] send_support_email(context, recipients) send_confirmation_email(context) flash(_( 'Request sent successfully, ' 'You should receive a confirmation email within several ' 'minutes - if this does not happen you should retry or send ' 'us an email directly to [email protected].'), category='success') return redirect(url_for('zenodo_frontpage.index')) return render_template( 'zenodo_support/contact_form.html', uap=uap, form=form, categories=categories, max_file_size=current_app.config['SUPPORT_ATTACHMENT_MAX_SIZE'], )
def gui_representation(self): ''' This returns a dictionary giving the representation in the UI. The structure is dictated by the UI-- e.g. the `text` key is used as the display shown. `pk` is not shown, but is carried around with the node such that it can be returned to the backend. ''' d = {} d['text'] = '%s (%s)' % (self.name, do_filesizeformat(self.size, binary=True)) d['pk'] = self.pk d['href'] = '/resources/%d' % self.pk return d
def files(shellid): shells = db.session.query(Shells).filter_by(id=shellid, uid=current_user.id).first() if request.method == 'GET': return render_template('files.html') else: req = dataRequest(shells) p = request.form.get('path').encode(shells.coding) path = shells.info.splitlines()[5] if p == 'false' else p # 获取文件夹列表 files = req.files(path) if not files: return jsonify({"status": -1, "msg": '连接失败!'}) files = files.decode(shells.coding).splitlines() # 解析文件夹列表 fileslist = [] for i in files: i = i.split('\t') if i[1] not in ('.', '..'): i[4] = do_filesizeformat(i[4]) if i[4] else '-' fileslist.append(i) # jstree列表 lists = [{'text': i[1], 'children': True} for i in fileslist if i[1] not in ['.', '..'] and i[0]] if p == 'false': # 解析shell路径 path = re.split(r'[/\\]', path) path[0] = path[0] if path[0] else '/' # jstree列表 for i in path[::-1]: if path.index(i) == len(path) - 1: lists = [{'text': i, 'children': lists, "state": {"opened": True, "selected": True}}] else: lists = [{'text': i, 'children': lists, "state": {"opened": True}}] # 获取磁盘信息 disk = req.disk().splitlines() # jstree列表 disk = [{'text': i, 'children': True} for i in disk if i != path[0]] # 整合磁盘 for k, v in enumerate(disk): if ord(lists[0]['text'][0]) >= ord(v['text'][0]): disk.insert(k + 1, lists[0]) break else: disk = lists[0] else: disk = lists return jsonify({"status": 1, "msg": '打开文件夹成功!', "data": {"lists": disk, "files": fileslist}})
def get(self, *args, **kwargs): root_dir = self.application.root_dir dir_list = os.listdir(root_dir) browse_type = self.request.query_arguments.get('type') if browse_type: browse_type = browse_type[0].decode() if not browse_type or browse_type not in [ 'audio', 'features', 'predictions' ]: browse_type = 'audio' # Filter based on the requested type. ext_filter = [] if browse_type == 'audio': ext_filter += ['wav'] elif browse_type == 'features' or browse_type == 'predictions': ext_filter += ['csv'] # Get files. files = list() for filename in dir_list: file_path = os.path.join(root_dir, filename) if os.path.isdir( file_path) or '.' not in filename or filename.startswith( '.'): continue else: _, file_ext = filename.rsplit('.', maxsplit=1) if file_ext not in ext_filter: continue stat = os.stat(file_path) files.append( dict( path=file_path, name=filename, size=stat.st_size, human_size=do_filesizeformat(stat.st_size), extension=file_ext, )) self.add_header('Content-Type', 'application/json') self.write(dict(files=files))
def find_orphan_files(): """Finds unused files in the given project. This is a heavy operation that inspects *everything* in MongoDB. Use with care. """ from jinja2.filters import do_filesizeformat from pathlib import Path output_fpath = Path(current_app.config['STORAGE_DIR']) / 'orphan-files.txt' if output_fpath.exists(): log.error('Output filename %s already exists, remove it first.', output_fpath) return 1 start_timestamp = datetime.datetime.now() orphans = _find_orphan_files() if not orphans: log.info('No orphan files found, congratulations.') return 0 files_coll = current_app.db('files') aggr = files_coll.aggregate([ {'$match': {'_id': {'$in': list(orphans)}}}, {'$group': { '_id': None, 'size': {'$sum': '$length_aggregate_in_bytes'}, }} ]) total_size = list(aggr)[0]['size'] log.info('Total orphan file size: %s', do_filesizeformat(total_size, binary=True)) orphan_count = len(orphans) total_count = files_coll.count() log.info('Total nr of orphan files: %d', orphan_count) log.info('Total nr of files : %d', total_count) log.info('Orphan percentage : %d%%', 100 * orphan_count / total_count) end_timestamp = datetime.datetime.now() duration = end_timestamp - start_timestamp log.info('Finding orphans took %s', duration) log.info('Writing Object IDs to %s', output_fpath) with output_fpath.open('w', encoding='ascii') as outfile: outfile.write('\n'.join(str(oid) for oid in sorted(orphans)) + '\n')
def flush_rclone(self): if not self.rclone_remote or not self.rclone_queue: return click.echo() for name, data in self.buckets.viewitems(): if not data['exists']: self.create_bucket(name) data['exists'] = True for bucket, data in self.rclone_queue.viewitems(): click.echo(cformat('Copying %{cyan}{}%{reset} files (%{cyan}{}%{reset}) to %{cyan}{}%{reset} via rclone') .format(data['files'], do_filesizeformat(data['bytes']), bucket)) start = datetime.now() try: subprocess.check_call([ 'rclone', 'copy', '--copy-links', data['path'], '{}:{}'.format(self.rclone_remote, bucket) ]) except subprocess.CalledProcessError: click.secho('\nError while running rclone', fg='red') raise duration = (datetime.now() - start) click.echo('...finished after {}'.format(format_human_timedelta(duration, 'minutes', narrow=True))) rmlinktree(data['path']) self.rclone_queue.clear()
def format_post_image(self, request, obj, fieldname, *args, **kwargs): field = getattr(obj, fieldname) name = field.image.filename size = field.image.gridout.size return html.span(style='border:1px solid black;')("{} - {}".format( name, do_filesizeformat(size)))
def size(pth): if op.exists(pth): return do_filesizeformat(os.stat(pth).st_size) return False
def size(pth): if op.exists(pth): return do_filesizeformat(os.stat(pth).st_size) return False
def _size_formatter(view, context, model, name): size = getattr(model, name) return Markup("%s<div style='color: lightgray;'>%s</div>" % (filters.do_filesizeformat(size).replace(' ', ' '), size)) if size is not None else ""
import pygal from pygal.style import RotateStyle from jinja2.filters import do_filesizeformat # Formatting functions number_formatter = lambda v: '{:,}'.format(v) bytes_formatter = lambda v: do_filesizeformat(v, True) def tables_piechart(db, by_field, value_formatter): ''' Generate a pie chart of the top n tables in the database. `db` - the database instance `by_field` - the field name to sort by `value_formatter` - a function to use for formatting the numeric values ''' Tables = db.get_model_for_table('tables', system_table=True) qs = Tables.objects_in(db).filter( database=db.db_name, is_temporary=False).exclude(engine='Buffer') tuples = [(getattr(table, by_field), table.name) for table in qs] return _generate_piechart(tuples, value_formatter) def columns_piechart(db, tbl_name, by_field, value_formatter): ''' Generate a pie chart of the top n columns in the table. `db` - the database instance `tbl_name` - the table name `by_field` - the field name to sort by `value_formatter` - a function to use for formatting the numeric values '''