def __init__(self, communications_file, max_file_size=1073741824, add_references=True): """ Args: communications_file (str): String specifying name of Communications file max_file_size (int): Maximum file size, in bytes add_references (bool): If True, calls :func:`concrete.util.references.add_references_to_communication` on any retrieved :class:`.Communication` """ self._add_references = add_references self.comm_id_to_comm = {} comm_file_size = os.path.getsize(communications_file) if comm_file_size > max_file_size: raise Exception( ("MemoryBackedComunicationContainer will not open the file " "'%s' because the file's size (%s) is larger than the " "maximum specified file size of %s. If you would like to " "read a file this large into memory, you will need to " "specify a larger value for the 'max_file_size'parameter.") % (communications_file, humanfriendly.format_size(comm_file_size, binary=True), humanfriendly.format_size(max_file_size, binary=True))) logging.info("Reading in Communications from file '%s'" % communications_file) logging.debug("Communication IDs:") for (comm, _) in CommunicationReader(communications_file, add_references=self._add_references): self.comm_id_to_comm[comm.id] = comm logging.debug(" %s" % comm.id) logging.info("Finished reading communications.\n")
def partitions(self): ''' Collects All Mounted Partitions On System. Iterates Through Each Partition, Ignoring Devices Which Cannot Be Written To. Returns a Diction Object With The Mount Point As Its Key, And Disk Attributes As Its Value. ''' try: partitions = p.disk_partitions() for partition in partitions: mount_point, file_sys = partition.mountpoint, partition.fstype if mount_point.startswith('C:'): # The If Statement Only Selects A Partition That Has A File System Attribute # This will ignore other mounted devices such as Disc Drives. if len(file_sys) > 0: stats = p.disk_usage("%s" % (str(mount_point))) total, used = bc.format_size(stats[0]), bc.format_size(stats[1]) free, percent = bc.format_size(stats[2]), stats[3] partition_handler = {'mount_point':mount_point,'total':total,"used":used,"free":free,"percent_used":percent} return partition_handler except Exception, error: return int(4), str(error)
def _log_measure_comparison(self, info_measure, du_measure): logging.info('Additional disk that Docker demanded (measured with docker.info()): ' + format_size(info_measure) + '.') logging.info('Additional disk that Docker demanded (measured with du): ' + format_size(du_measure) + '.') difference = du_measure - info_measure if difference>1024: # More than 1KB of difference logging.warning('du measures %s more than docker.info().' % format_size(difference)) elif difference<-1024: # Less than 1MB of difference logging.warning('du measures %s less than docker.info().' % format_size(difference*-1))
def report_memory_usage(lines, label, memory_usage): """Create a textual summary of Apache worker memory usage.""" lines.append("") workers = pluralize(len(memory_usage), "worker") lines.append("Memory usage of %s (%s):" % (label, workers)) lines.append(" - Minimum: %s" % format_size(memory_usage.min)) lines.append(" - Average: %s" % format_size(memory_usage.average)) lines.append(" - Maximum: %s" % format_size(memory_usage.max))
def beautify_image(image): new_image = remove_keys_from_dict( [u'RepoDigests', u'ParentId', u'Labels'], image) new_image[u'Created'] = datetime.fromtimestamp( image[u'Created']).isoformat(' ') new_image[u'Size'] = format_size(image[u'Size']) new_image[u'VirtualSize'] = format_size(image[u'VirtualSize']) return new_image
def max_size(directory): """Test directory for maximum size returns true if directory size is larger than spec property "max_size" """ try: max_size = humanfriendly.parse_size(directory.spec['max_size']) except KeyError: raise UnregulatableError('No "max_size" configured in specification!') size = directory.size logger.debug("Directory size is %s", humanfriendly.format_size(size)) logger.debug("max_size set to: %s", humanfriendly.format_size(max_size)) return size > max_size
def mem_data(self): ''' Collects Memory Attributes From System. Returns a dictionary Object With Total Mem, and Free Mem. ''' try: phys_mem = p.phymem_usage() mem_total, mem_avail = bc.format_size(phys_mem[0]), bc.format_size(phys_mem[1]) mem_stats = {'total_memory': mem_total , 'available_memory': mem_avail} return mem_stats except Exception, error: return int(5), str(error)
def fetch_worker(url): """ Fetch the given URL for :func:`fetch_concurrent()`. :param url: The URL to fetch (a string). :returns: A tuple of three values: 1. The URL that was fetched (a string). 2. The data that was fetched (a string or :data:`None`). 3. The number of seconds it took to fetch the URL (a number). """ # Ignore Control-C instead of raising KeyboardInterrupt because (due to a # quirk in multiprocessing) this can cause the parent and child processes # to get into a deadlock kind of state where only Control-Z will get you # your precious terminal back; super annoying IMHO. signal.signal(signal.SIGINT, signal.SIG_IGN) timer = Timer() try: data = fetch_url(url, retry=False) except Exception as e: logger.debug("Failed to fetch %s! (%s)", url, e) data = None else: kbps = format_size(round(len(data) / timer.elapsed_time, 2)) logger.debug("Downloaded %s at %s per second.", url, kbps) return url, data, timer.elapsed_time
def disk_to_dict(disk): """Converts a lxml.objectify.ObjectifiedElement disk object to a dict. :param lxml.objectify.ObjectifiedElement disk: an object containing EntityType.DISK XML data. :return: dictionary representation of disk object. :rtype: dict """ result = {} result['name'] = disk.get('name') result['id'] = extract_id(disk.get('id')) result['status'] = disk.get('status') result['size'] = humanfriendly.format_size(int(disk.get('size'))) result['size_bytes'] = disk.get('size') result['busType'] = disk.get('busType') result['busSubType'] = disk.get('busSubType') result['iops'] = disk.get('iops') if hasattr(disk, 'Owner'): result['owner'] = disk.Owner.User.get('name') if hasattr(disk, 'Description'): result['description'] = disk.Description if hasattr(disk, 'StorageProfile'): result['storageProfile'] = disk.StorageProfile.get('name') if hasattr(disk, 'attached_vms') and \ hasattr(disk.attached_vms, 'VmReference'): result['vms_attached'] = disk.attached_vms.VmReference.get('name') result['vms_attached_id'] = disk.attached_vms.VmReference.get( 'href').split('/vm-')[-1] return result
def _download(self): # Start download partital content when queue not empty while not self.configer.down_queue.empty(): data_range = self.configer.down_queue.get() headers = { 'Range': 'bytes={}-{}'.format(*data_range) } self.start_time[threading.current_thread().name]\ = time.time() self.downloaded[threading.current_thread().name] = 0 response = requests.get( self.configer.url, stream = True, headers = headers ) start_point = data_range[0] for bunch in response.iter_content(self.block_size): _time = time.time() with self.file_lock: self.downloaded_size += len(bunch) with open( self.configer.path, 'r+b', buffering = 1 ) as f: f.seek(start_point) self.downloaded[threading.current_thread().name] += f.write(bunch) f.flush() start_point += self.block_size self.status[threading.current_thread().name]\ = humanfriendly.format_size( int(self.downloaded[threading.current_thread().name]\ /(time.time()\ - self.start_time[threading.current_thread().name])) ) + '/s' self.configer.down_queue.task_done() self.status[threading.current_thread().name] = 'Done'
def list_disks(ctx): try: restore_session(ctx, vdc_required=True) client = ctx.obj['client'] vdc_href = ctx.obj['profiles'].get('vdc_href') vdc = VDC(client, href=vdc_href) disks = vdc.get_disks() result = [] for disk in disks: attached_vms = '' if hasattr(disk, 'attached_vms') and \ hasattr(disk.attached_vms, 'VmReference'): attached_vms = disk.attached_vms.VmReference.get('name') result.append({ 'name': disk.get('name'), 'id': extract_id(disk.get('id')), 'owner': disk.Owner.User.get('name'), 'size': humanfriendly.format_size(int(disk.get('size'))), 'size_bytes': disk.get('size'), 'status': VCLOUD_STATUS_MAP.get(int(disk.get('status'))), 'vms_attached': attached_vms }) stdout(result, ctx, show_id=True) except Exception as e: stderr(e, ctx)
def render(self): file_size = os.path.getsize(self.file_path) if file_size > settings.MAX_FILE_SIZE: raise exceptions.FileTooLargeError( 'Tabular files larger than {} are not rendered. Please download ' 'the file to view.'.format(format_size(settings.MAX_FILE_SIZE, binary=True)), file_size=file_size, max_size=settings.MAX_FILE_SIZE, extension=self.metadata.ext, ) with open(self.file_path, errors='replace') as fp: sheets, size, nbr_rows, nbr_cols = self._render_grid(fp, self.metadata.ext) # Force GC gc.collect() if sheets and size: return self.TEMPLATE.render( base=self.assets_url, width=settings.TABLE_WIDTH, height=settings.TABLE_HEIGHT, sheets=json.dumps(sheets), options=json.dumps(size), ) assert nbr_rows and nbr_cols raise exceptions.TableTooBigError( 'Table is too large to render.', extension=self.metadata.ext, nbr_cols=nbr_cols, nbr_rows=nbr_rows )
def close(self): assert not self._closed self.archive.close() self.archive_file.seek(0) self._closed = True size = os.path.getsize(self.archive_file.name) logger.info('Archive: {}'.format(format_size(size)))
def galleries_list(fspath, stat=False, **args): term.banner("LIST OF GALLERIES") compact = not stat galleries = search_galleries(fspath) if compact: print(term.em('# '), end='') print_gallery_name('Name', term.em) print(term.em("{1}{0}".format('Albums', SYMBOL_SEPARATOR_CLEAR))) for i, gallery in enumerate(galleries): if compact: print(term.p("{:<5d}".format(i)), end='') print_gallery_name(gallery.name) print(term.p("{1}{0}".format( ', '.join(str(x) for x in gallery.albums), SYMBOL_SEPARATOR ))) else: print_gallery_name(gallery.name, term.em, end='\n') print(term.p("{0:>20}: {1}".format('Albums', ', '.join(str(x) for x in gallery.albums)))) nop, notp, sod = gallery_stat(gallery.path) print(term.p("{0:>20}: {1!s}".format("Number of pictures", nop))) print(term.p("{0:>20}: {1}".format("Size on disk", humanfriendly.format_size(sod)))) print()
def healthcheck(): import psutil import humanfriendly now = time.time() pid = os.getgid() ppid = os.getppid() current_process = psutil.Process(pid=ppid) # how about launching under gunicorn? process_uptime = current_process.create_time() process_uptime_delta = now - process_uptime process_uptime_human = humanfriendly.format_timespan(process_uptime_delta) system_uptime = psutil.boot_time() system_uptime_delta = now - system_uptime system_uptime_human = humanfriendly.format_timespan(system_uptime_delta) free_memory = psutil.disk_usage('/').free free_memory_human = humanfriendly.format_size(free_memory) return { 'status': 'Operational', 'free_disk_space': free_memory_human, 'system_uptime': system_uptime_human, 'process_uptime': process_uptime_human, }
def test_format_size(self): self.assertEqual('0 bytes', humanfriendly.format_size(0)) self.assertEqual('1 byte', humanfriendly.format_size(1)) self.assertEqual('42 bytes', humanfriendly.format_size(42)) self.assertEqual('1 KB', humanfriendly.format_size(1024 ** 1)) self.assertEqual('1 MB', humanfriendly.format_size(1024 ** 2)) self.assertEqual('1 GB', humanfriendly.format_size(1024 ** 3)) self.assertEqual('1 TB', humanfriendly.format_size(1024 ** 4)) self.assertEqual('1 PB', humanfriendly.format_size(1024 ** 5))
def download_url(result, directory, max_bytes): """ Download a file. """ response = requests.get(result['download_url'], stream=True) # TODO: make this more robust by parsing the URL filename = response.url.split('/')[-1] filename = re.sub(r'\?.*$', '', filename) filename = '{}-{}'.format(result['user']['id'], filename) size = int(response.headers['Content-Length']) if size > max_bytes: print 'Skipping {}, {} > {}'.format(filename, format_size(size), format_size(max_bytes)) return print 'Downloading {} ({})'.format(filename, format_size(size)) output_path = os.path.join(directory, filename) try: stat = os.stat(output_path) if stat.st_size == size: print 'Skipping "{}"; file exists and is the right size'.format( filename) return else: print 'Removing "{}"; file exists and is the wrong size'.format( filename) os.remove(output_path) except OSError: # TODO: check errno here? pass with open(output_path, 'wb') as f: for chunk in response.iter_content(chunk_size=8192): if chunk: f.write(chunk) print 'Downloaded {}'.format(filename)
def format_stats(stats): result = " \n" result += "File types | Count | Total Size\n" result += ":-- | :-- | :-- \n" counter = 0 for mime in stats["mime_stats"]: result += mime[2] result += " | " + str(mime[1]) result += " | " + humanfriendly.format_size(mime[0]) + " \n" counter += 1 if counter >= 3: break result += "**Total** | **" + str(stats["total_count"]) + "** | **" result += humanfriendly.format_size(stats["total_size"]) + "** \n\n" return result
def runTests(self): """ Run the tests prescribed in the configuration """ sites = self.get_sites() templates_dir = os.path.join(sys.prefix, "etc/stashcache-tester/templates") # Parse the size of the test in bytes raw_testsize = humanfriendly.parse_size(get_option("testsize")) md5sum = self.createTestFile(raw_testsize, get_option("stashdir")) # Create the site specific tests env = Environment(loader=FileSystemLoader(templates_dir)) env.globals = { "config_location": self.config_location, "stash_test_location": os.path.abspath(sys.argv[0]), "pythonpath": ":".join(sys.path), "testurl": get_option("testurl"), "localpath": get_option("stashdir"), "testsize": raw_testsize, "humantestsize": humanfriendly.format_size(raw_testsize) } test_dirs = [] testingdir = get_option("testingdir") for site in sites: tmp_site = Site(site) test_dir = tmp_site.createTest(testingdir, env) test_dirs.append(test_dir) # Create the DAG from the template dag_template = env.get_template("dag.tmpl") test_dag = os.path.join(testingdir, "submit.dag") with open(test_dag, 'w') as f: f.write(dag_template.render(sites=sites, md5sum=md5sum)) reduce_template = env.get_template("test_reduce.tmpl") reduce_submit = os.path.join(testingdir, "reduce.submit") with open(reduce_submit, 'w') as f: f.write(reduce_template.render()) shutil.copyfile(os.path.join(templates_dir, "site_post.py"), os.path.join(get_option("testingdir"), "site_post.py")) os.chmod(os.path.join(get_option("testingdir"), "site_post.py"), 0755) # Start the DAG (stdout, stderr) = RunExternal("cd %s; condor_submit_dag submit.dag" % testingdir) logging.debug("output from condor_submit_dag: %s" % stdout) if stderr is not None or stderr is not "": logging.error("Error from condor_submit_dag: %s" % stderr)
def get_dir_size(self): """Will go one level deeper and sum the sizes of all objects. If the objects are directories, you'll have to do the iteration yourself!""" total = humanfriendly.parse_size(self.size) for f in self.list_dir(): total += humanfriendly.parse_size(f.size) print(total) t = humanfriendly.parse_size(str(total)) return humanfriendly.format_size(t)
def run(self): p_color = "" disk = ps.disk_usage(self.instance) if disk.percent >= 90: p_color = " color=\'#f44\'" self.full_text = "{} (<span{}>{}%</span>)".format(format_size(disk.free), p_color, disk.percent) self.short_text = "<span{}>{}%</span>".format(p_color, disk.percent) self._to_json()
def test_beautify_image(self): image = { u'Id': u'1', u'RepoDigests': 'digest', u'ParentId': u'0', u'Labels': 'labels', u'Created': 1441050417, u'Size': 1048576, u'VirtualSize': 1024 } exp_image = { u'Id': u'1', u'Created': datetime.fromtimestamp( image[u'Created']).isoformat(' '), u'Size': format_size(image[u'Size']), u'VirtualSize': format_size(image[u'VirtualSize']) } self.assertEqual(di_cleaner.beautify_image(image), exp_image, msg='Unexpected result')
def show_package_metadata(archive): control_fields, contents = inspect_package(archive) print("Package metadata from %s:" % format_path(archive)) for field_name in sorted(control_fields.keys()): value = control_fields[field_name] if field_name == 'Installed-Size': value = format_size(int(value) * 1024) print(" - %s: %s" % (field_name, value)) print("Package contents from %s:" % format_path(archive)) for pathname, entry in sorted(contents.items()): size = format_size(entry.size, keep_width=True) if len(size) < 10: size = ' ' * (10 - len(size)) + size if entry.target: pathname += ' -> ' + entry.target print("{permissions} {owner} {group} {size} {modified} {pathname}".format( permissions=entry.permissions, owner=entry.owner, group=entry.group, size=size, modified=entry.modified, pathname=pathname))
def render(self): file_size = os.path.getsize(self.file_path) if file_size > settings.MAX_SIZE: raise cp_exceptions.FileTooLargeException( 'Text files larger than {} are not rendered. Please download the file to ' 'view.'.format(format_size(settings.MAX_SIZE, binary=True)) ) with open(self.file_path, 'rb') as fp: body = self._render_html(fp, self.metadata.ext) return self.TEMPLATE.render(base=self.assets_url, body=body)
def initializeTorrent(self): self.torrent = dottorrent.Torrent(self.inputEdit.text()) try: t_info = self.torrent.get_info() except Exception as e: self.torrent = None self._showError(str(e)) return ptail = os.path.split(self.torrent.path)[1] if self.inputMode == 'file': self._statusBarMsg("{}: {}".format( ptail, humanfriendly.format_size(t_info[0], binary=True))) else: self._statusBarMsg("{}: {} files, {}".format( ptail, t_info[1], humanfriendly.format_size(t_info[0], binary=True))) self.pieceSizeComboBox.setCurrentIndex(0) self.updatePieceCountLabel(t_info[2], t_info[3]) self.pieceCountLabel.show() self.createButton.setEnabled(True)
def getFilesize( myfile='' ): if not os.path.isfile(myfile): debug_msg("File: "+myfile+" does not exist") close() size = os.path.getsize(myfile) sizestr = humanfriendly.format_size(size) debug_msg(" - Size: "+str(size)+" bytes or "+sizestr) return size
def main(): mystr = "63.3k" print("mystr is a " + str(type(mystr)) + " and holds the value: " + mystr) #myconvstr = float(mystr) myconvstr = humanfriendly.parse_size(mystr) print("myconvstr is a " + str(type(myconvstr)) + " and holds the value: " + str(myconvstr)) print("lets see if we can convert it back again...") mystr2 = humanfriendly.format_size(myconvstr) print("mystr2 is of type " + str(type(mystr2)) + " and holds the value: " + mystr2)
def images(client, _args): image_list = client.images() print('Available images: ') for image in image_list: print(' - Fingerprint: {}'.format(image['fingerprint'])) if image['aliases']: aliases = map(lambda a: a['name'], image['aliases']) print(' Aliases: {}'.format(', '.join(aliases))) if 'description' in image['properties']: print(' Description: {}'.format(image['properties']['description'])) print(' Size: {}'.format(format_size(image['size'], binary=True)))
def report(upload_pipe, verify_pipe, start_time, wait_for_verify): report = {} upload_done = False verify_done = not wait_for_verify while True: if wait_for_verify: v = verify_pipe.recv() if v: if v == 'DONE': print('Verify DONE') verify_done = True else: if v[0] not in report: report[v[0]] = {} report[v[0]]['s3_done_time'] = v[1] u = upload_pipe.recv() if u and u == 'DONE': print('Upload DONE') upload_done = True if verify_done and upload_done: break if u: if u[0] not in report: report[u[0]] = {} report[u[0]]['size'] = u[1] report[u[0]]['copy_done_time'] = u[2] total_size = 0 for k in report: if wait_for_verify: report[k]['s3_latency'] = ( report[k]['s3_done_time'] - report[k]['copy_done_time']).total_seconds() * 1000 total_size = total_size + report[k]['size'] write_report(report, wait_for_verify) time_in_secs = (datetime.datetime.now() - start_time).total_seconds() print('Total Size: {}'.format(humanfriendly.format_size(total_size))) print('File Count: {}'.format(len(report))) print('Execution Time: {} second(s)'.format(time_in_secs)) print('Performance: {}/sec'.format( humanfriendly.format_size((total_size / time_in_secs))))
def convert_to_html(filename, input_encoding='UTF-8'): """ Convert a file with Markdown or reStructuredText markup to HTML. :param filename: The filename of the text file to convert (a string). :param encoding: The encoding of the text file (a string). :returns: A tuple of two strings: 1. The HTML to embed in the ``<head>``. 2. The HTML to embed in the ``<body>``. """ # Determine the filename extension. basename, extension = os.path.splitext(filename) extension = extension.lower() # Read the input file into a Unicode string. with codecs.open(filename, encoding=input_encoding) as handle: text = handle.read() # Convert the input file. timer = Timer() if extension in MARKDOWN_EXTENSIONS: logger.debug( "Filename extension of input file (%s) indicates Markdown.", extension) converter = Markdown(HtmlRenderer()) head = '' body = converter.render(text) elif extension in RESTRUCTUREDTEXT_EXTENSIONS: logger.debug( "Filename extension of input file (%s) indicates reStructuredText.", extension) parts = publish_parts(source=text, writer_name='html', settings_overrides=dict(doctitle_xform=False)) head = parts['stylesheet'] body = parts['html_body'] else: msg = "Input file not supported! (filename extension %s not recognized)" raise ValueError(msg % extension) logger.debug("Converted %s input text to %s HTML in %s.", format_size(len(text)), format_size(len(head) + len(body)), timer) return head, body
def render_tab(self, ctx): # Update system info self.update_sysinfo() # Print CPU usage for i in range(0, len(self.cpu_usages)): cpu_usage = self.cpu_usages[i] ctx.fg_color(Screen.WHITE) ctx.write("CPU %d:" % i).fg_color(Screen.YELLOW).write_line(" %.2f %%" % (cpu_usage*100)).fg_color(Screen.WHITE).write("[") if cpu_usage < self.YELLOW_THRESHOLD: ctx.fg_color(Screen.GREEN) elif cpu_usage >= self.YELLOW_THRESHOLD and cpu_usage <= self.RED_THRESHOLD: ctx.fg_color(Screen.YELLOW) else: ctx.fg_color(Screen.RED) ctx.write(get_progress_bar(ctx.get_columns()-2, cpu_usage)).fg_color(Screen.WHITE).write("]") # Print RAM used = humanfriendly.format_size(self.used_ram) total = humanfriendly.format_size(self.total_ram) ctx.linebreak().write_line("RAM").fg_color(Screen.YELLOW).write_line("%s / %s" % (used, total)).fg_color(Screen.WHITE) ram_usage = float(self.used_ram) / float(self.total_ram) ctx.write("[") if ram_usage < 0.33: ctx.fg_color(Screen.GREEN) elif ram_usage >= 0.33 and ram_usage <= 0.66: ctx.fg_color(Screen.YELLOW) else: ctx.fg_color(Screen.RED) ctx.write(get_progress_bar(ctx.get_columns()-2, ram_usage)).fg_color(Screen.WHITE).write("]") # Print uptime ctx.linebreak().write_line("Uptime:").fg_color(Screen.YELLOW).write_line("%s" % format_timespan(self.uptime)).fg_color(Screen.WHITE)
def print_export_list(data): colnames = ['Filename', 'Owner', 'Modified', 'Size', 'Type', 'Exportable'] values = [] for entry in data['files']: size = humanfriendly.format_size( entry['size'] if entry['size'] is not None else 0) row = [ entry['filename'], entry['owner'], entry['modified_date'], size, entry['mime-type'], 'No' if entry['exportable'] is None else 'Yes' ] values.append(row) print(humanfriendly.tables.format_pretty_table(sorted(values), colnames))
def build(self): """Call docker build to create a image :param service: service in compose e.g gcc54 """ logging.info("Starting build for service %s." % self.service) # --no-cache subprocess.check_call("docker-compose build --no-cache %s" % self.service, shell=True) output = subprocess.check_output("docker image inspect %s --format '{{.Size}}'" % self.created_image_name, shell=True) size = int(output.decode().strip()) logging.info("%s image size: %s" % (self.created_image_name, format_size(size)))
def to_size(value, binary=True): if binary is True: count = 0 while value >= 1024: value /= 1024 count += 1 for i in range(count): value *= 1000 out = humanfriendly.format_size(value) out = ''.join(out.split(' ')) return out
def check_sync(self): rrd_size_alert_threshold = 1073741824 try: used = psutil.disk_usage('/var/db/collectd/rrd').used except FileNotFoundError: raise UnavailableException() if used > rrd_size_alert_threshold: return Alert(ReportingDbAlertClass, args=humanfriendly.format_size(used), key=None)
async def info_bot(self, ctx: cmd.Context): "Get information about the bot!" emoji = self.bot.get_cog("TapTitansModule").emoji("elixum") embed = await self.bot.embed() embed.title = f"{emoji} {self.bot.user}" embed.description = "Please insert coin to continue." embed.color = 0x473080 # ) embed.set_thumbnail(url=emoji.url) embed.add_field( name="Author", value=str(self.bot.get_user(305879281580638228)), inline=False, ) embed.add_field( name="Memory", value=humanfriendly.format_size( self.process.memory_full_info().uss), ) embed.add_field( name="CPU", value="{:.2f}%".format(self.process.cpu_percent() / psutil.cpu_count()), ) total_members = sum(1 for _ in self.bot.get_all_members()) total_online = len({ m.id for m in self.bot.get_all_members() if m.status is not discord.Status.offline }) total_unique = len(self.bot.users) embed.add_field(name="Guilds", value=len(self.bot.guilds)) prem = await self.bot.db.hgetall("premium") embed.add_field(name="Premium Servers", value=len(prem)) text_channels = [] voice_channels = [] for guild in self.bot.guilds: voice_channels.extend(guild.voice_channels) text_channels.extend(guild.text_channels) text = len(text_channels) voice = len(voice_channels) embed.add_field( name="Channels", value=f"{text + voice:,} total - {text:,} text - {voice:,} voice", ) embed.add_field( name="Members", value= f"{total_members} total - {total_unique} unique - {total_online} online", inline=False, ) await ctx.send(embed=embed)
def __str__(self): return "Duplicate File Scan using {!r} ({}): " \ "{} folders discovered, {} files scanned totaling {}".format( self.hash_type, "complete" if self.complete else "incomplete", self.num_discovered_folders, self.num_scanned_files, humanfriendly.format_size( self.total_bytes_scanned_files, binary=True ) )
def status(client, _args): info = client.status() print('Container status: {}'.format(info['status'])) if info['disk']: print('Disks:') for name, data in info['disk'].items(): print(' - {}: Used {}'.format(name, format_size(data['usage'], binary=True))) print('Memory use: {}'.format(format_size(info['memory']['usage'], binary=True))) print('Running processes: {}'.format(info['processes'])) if info['network'] and not (len(info['network']) == 1 and 'lo' in info['network']): print('Network interfaces:') for name, data in info['network'].items(): if name == 'lo': continue print(' - {} ({}):'.format(name, data['hwaddr'])) print(' Sent/received: {}/{}'.format( format_size(data['counters']['bytes_sent'], binary=True), format_size(data['counters']['bytes_received'], binary=True))) for addr in data['addresses']: print(' IPv{} address: {}/{}'.format('6' if addr['family'] == 'inet6' else '4', addr['address'], addr['netmask']))
def size(self, *, human_readable=False): # 其实这里用-h也能美化,但怕计量方式跟XlPath.size不同,所以还是用humanfriendly再算 try: res = self.client.exec(f'du "{self}" -s -b') except SshCommandError: res = 0 sz = int(re.match(r'\d+', res).group()) if human_readable: return humanfriendly.format_size(self.size, binary=True) else: return sz
def check_sync(self): rrd_size_alert_threshold = 1610611911 # bytes try: used = shutil.disk_usage('/var/db/collectd/rrd').used except FileNotFoundError: raise UnavailableException() if used > rrd_size_alert_threshold: # zfs list reports in kibi/mebi/gibi(bytes) but # format_size() calculates in kilo/mega/giga by default # so the report that we send the user needs to match # up with what zfs list reports as to not confuse anyone used = format_size(used, binary=True) threshold = format_size(rrd_size_alert_threshold, binary=True) return Alert(ReportingDbAlertClass, { 'used': used, 'threshold': threshold }, key=None)
def run(self): """ The resource monitor loop itself """ # Determine which filesystem the Docker daemon uses for storing its data directory dockerInfo = DockerUtils.info() rootDir = dockerInfo["DockerRootDir"] # If we cannot access the Docker data directory (e.g. when the daemon is in a Moby VM), don't report disk space reportDisk = os.path.exists(rootDir) # Sample the CPU usage using an interval of 1 second the first time to prime the system # (See: <https://psutil.readthedocs.io/en/latest/#psutil.cpu_percent>) psutil.cpu_percent(1.0) # Loop until asked to stop while True: # Check that the thread has not been asked to stop with self._lock: if self._shouldStop == True: return # Format the timestamp for the current time in ISO 8601 format (albeit without the "T" separator) isoTime = datetime.datetime.now().replace( microsecond=0).isoformat(" ") # We format data sizes using binary units (KiB, MiB, GiB, etc.) formatSize = lambda size: humanfriendly.format_size( size, binary=True, keep_width=True) # Format the current quantity of available disk space on the Docker data directory's filesystem diskSpace = (formatSize(shutil.disk_usage(rootDir).free) if reportDisk == True else "Unknown") # Format the current quantity of available system memory physicalMemory = formatSize(psutil.virtual_memory().free) virtualMemory = formatSize(psutil.swap_memory().free) # Format the current CPU usage levels cpu = psutil.cpu_percent() # Report the current levels of our available resources self._logger.info( "[{}] [Available disk: {}] [Available memory: {} physical, {} virtual] [CPU usage: {:.2f}%]" .format(isoTime, diskSpace, physicalMemory, virtualMemory, cpu), False, ) # Sleep until the next sampling interval time.sleep(self._interval)
def get_used_avg_blob_capacity(credentials, subscription_id): resource_client = ResourceManagementClient(credentials, subscription_id) storage_client = StorageManagementClient(credentials, subscription_id) lst = [] count = 0 resource_groups = resource_client.resource_groups.list() for group in resource_groups: storage_accounts = storage_client.storage_accounts.list_by_resource_group( group.name) for storage_account in storage_accounts: print("Reading metric data from storage account: " + storage_account.name) count += 1 blob_size = get_metric_data_capacity(group.name, storage_account.name, subscription_id, Metric_type.blob_capacity) file_size = get_metric_data_capacity( group.name, storage_account.name, subscription_id, Metric_type.fileshare_capacity) total_size = blob_size + file_size if math.isnan(total_size): total_size_friendly = '' else: total_size_friendly = humanfriendly.format_size(total_size) lst.append([ storage_account.name, group.name, blob_size, file_size, total_size, total_size_friendly ]) print("Total number of storage accounts: " + str(count)) cols = [ 'Storage account', 'Resource group', 'Blob capacity', 'File capacity', 'Total capacity', 'Total capacity (friendly)' ] df = pd.DataFrame(lst, columns=cols) file_name = 'metrics_' + datetime.datetime.now().strftime( '%m-%d-%y-%H%M%S') + '.csv' df.to_csv(file_name, header=cols, index=False) print("\n") print("Metrics saved to file: " + file_name) return file_name
def download_media(plex, sync_title, media, part, opts, downloaded_callback, max_allowed_size_diff_percent=0): log.debug('Checking media#%d %s', media.ratingKey, media.title) filename = sanitize_filename(pretty_filename(media, part)) filename_tmp = filename + '.part' if '#' in sync_title: sync_title, _ = sync_title.split('#', 1) sync_title = sync_title.strip() savepath = os.path.join(opts.destination, sanitize_filename(sync_title, True)) if os.sep.join(os.path.join(savepath, filename).split(os.sep)[-2:]) in opts.skip: log.info('Skipping file %s from %s due to cli arguments', filename, savepath) return if media.TYPE == 'movie' and opts.subdir: savepath = os.path.join(savepath, sanitize_filename(os.path.splitext(filename)[0])) part_key = part.key if part.decision == 'directplay': part_key = '/' + '/'.join(part_key.split('/')[3:]) url = part._server.url(part_key) log.info('Downloading %s to %s, file size is %s', filename, savepath, format_size(part.size, binary=True)) makedirs(savepath, exist_ok=True) path = os.path.join(savepath, filename) path_tmp = os.path.join(savepath, filename_tmp) if not opts.resume_downloads and os.path.isfile(path_tmp) and os.path.getsize(path_tmp) != part.size: os.unlink(path_tmp) if os.path.isfile(path_tmp) and os.path.getsize(path_tmp) > part.size: log.error('File "%s" has an unexpected size (actual: %d, expected: %d), removing it', path_tmp, os.path.getsize(path_tmp), part.size) os.unlink(path_tmp) if not os.path.isfile(path_tmp) or os.path.getsize(path_tmp) != part.size: try: download(url, token=plex.authenticationToken, session=media._server._session, filename=filename_tmp, savepath=savepath, showstatus=True, rate_limit=opts.rate_limit) except BaseException: # handle all exceptions, anyway we'll re-raise them if os.path.isfile(path_tmp) and os.path.getsize(path_tmp) != part.size and not opts.resume_downloads: os.unlink(path_tmp) raise if not os.path.isfile(path_tmp) or abs(1 - os.path.getsize(path_tmp) / part.size) > max_allowed_size_diff_percent: log.error('File "%s" has an unexpected size (actual: %d, expected: %d)', path_tmp, os.path.getsize(path_tmp), part.size) raise ValueError('Downloaded file size is not the same as expected') downloaded_callback(media, part, filename) os.rename(path_tmp, path)
def directory_manage(dir_id): if "admin" in session and session["admin"]: directory = storage.dirs()[dir_id] tn_size = get_dir_size("static/thumbnails/" + str(dir_id)) tn_size_formatted = humanfriendly.format_size(tn_size) return render_template("directory_manage.html", directory=directory, tn_size=tn_size, tn_size_formatted=tn_size_formatted) flash("You are not authorized to access this page", "warning") return redirect("/")
def render_tab(self, ctx): self.update_disk_usage() for device_name, usage in self.disk_usage.iteritems(): ctx.write_line("%s" % device_name) ctx.fg_color(Screen.YELLOW).write_line("%s / %s" % (humanfriendly.format_size(usage["used"]), humanfriendly.format_size(usage["total"]))) ctx.fg_color(Screen.WHITE).write("[") usage_percent = float(usage["used"]) / float(usage["total"]) if usage_percent < self.YELLOW_THRESHOLD: ctx.fg_color(Screen.GREEN) elif usage_percent >= self.YELLOW_THRESHOLD and usage_percent <= self.RED_THRESHOLD: ctx.fg_color(Screen.YELLOW) else: ctx.fg_color(Screen.RED) ctx.write(get_progress_bar(ctx.get_columns()-2, usage_percent)).fg_color(Screen.WHITE).write("]").linebreak()
def df(self): # Create tmp megarc file tmp = tempfile.NamedTemporaryFile(delete=True) tmp.write('[Login]\nUsername = %s\nPassword = %s\n' % (str(self.email), str(self.passwd))) tmp.flush() command = 'megadf --reload --config=%s' % tmp.name test_command = 0 try: subprocess.check_call(command, stdout=FNULL, stderr=subprocess.STDOUT, shell=True) except: test_command = 1 if test_command != 0: tmp.close() return 1 else: result = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) output, err = result.communicate() tmp.close() # deletes tmp megarc file # Get data df_data_raw = output.split('\n') df_total_raw = str(df_data_raw[0]).split(':') df_total_bytes = int(str(df_total_raw[1]).strip()) df_used_raw = str(df_data_raw[1]).split(':') df_used_bytes = int(str(df_used_raw[1]).strip()) df_free_raw = str(df_data_raw[2]).split(':') df_free_bytes = int(str(df_free_raw[1]).strip()) df_total = humanfriendly.format_size(df_total_bytes, binary=True) df_used = humanfriendly.format_size(df_used_bytes, binary=True) df_free = humanfriendly.format_size(df_free_bytes, binary=True) # Return values ready to insert in database return (self.id, self.name, df_total_bytes, df_free_bytes, df_used_bytes, df_total, df_free, df_used)
def upload_file(target_filepath, metadata, access_token, project_member_id, remote_file_info=None, base_url=OH_BASE_URL, max_bytes=MAX_FILE_DEFAULT): """ Upload a file. """ filesize = os.stat(target_filepath).st_size if filesize > max_bytes: logging.info('Skipping {}, {} > {}'.format(target_filepath, format_size(filesize), format_size(max_bytes))) return if remote_file_info: response = requests.get(remote_file_info['download_url'], stream=True) remote_size = int(response.headers['Content-Length']) if remote_size == filesize: logging.info('Skipping {}, remote exists with matching name and ' 'file size'.format(target_filepath)) return url = urlparse.urljoin( base_url, '/api/direct-sharing/project/files/upload/?{}'.format( urlparse.urlencode({'access_token': access_token}))) logging.info('Uploading {} ({})'.format(target_filepath, format_size(filesize))) r = requests.post(url, files={'data_file': open(target_filepath, 'rb')}, data={ 'project_member_id': project_member_id, 'metadata': json.dumps(metadata) }) logging.info('Upload complete: {}'.format(target_filepath))
def net_data(self): ''' Collects Network Details From All Interfaces On System. Returns A Dictionary Object Of The Network Statistics Collected. ''' try: net_stat = p.net_io_counters() bytes_sent, bytes_recvd = bc.format_size(net_stat[0]), bc.format_size(net_stat[1]) packets_sent, packets_recvd = net_stat[2], net_stat[3] err_pkt_in, err_pkt_out = net_stat[4], net_stat[5] drpd_pkt_in, drpd_pkt_out = net_stat[6], net_stat[7] pkt_data = {'bytes_sent':bytes_sent, 'bytes_recvd':bytes_recvd, 'packets_sent':packets_sent, 'packets_recvd':packets_recvd, 'err_pkt_in':err_pkt_in, 'err_pkt_out':err_pkt_out, 'drpd_pkt_in':drpd_pkt_in, 'drpd_pkt_out':drpd_pkt_out} return pkt_data except Exception, error: return int(7), str(error)
def get_handler( self, method, filename, size=None, progress=None, download_rate=None ): if self._current_package_index is None or self._packages_count is None or not progress: return if size: try: size = humanfriendly.format_size(int(size)) except Exception: pass if download_rate: try: download_rate = humanfriendly.format_size(int(download_rate)) + "/s" except Exception: pass job_progress = ( ((self._current_package_index + progress / 100) / self._packages_count) * self.download_proportion) filename = filename.rsplit('/', 1)[-1] if size and download_rate: self.job.set_progress( job_progress, 'Downloading {}: {} ({}%) at {}'.format( filename, size, progress, download_rate, ) ) else: self.job.set_progress( job_progress, 'Downloading {} ({}%)'.format( filename, progress, ) )
def cal_vol(ts): d = ts[0].tail(21)['volume'] v = d[-1] avgv = d[-21:-1].mean() val = v - avgv ret0 = humanfriendly.format_size(v) ret1 = COLOR_DEFAULT if v > 1.3 * avgv: ret1 = COLOR_GREEN elif v < 0.7 * avgv: ret1 = COLOR_RED return ret0, ret1
def parallel_download(self, urls): totalsize = 0 pb = Progress( len(urls), 'Downloading files', callback=lambda: 'Size {}'.format(format_size(totalsize))) try: with Pool(min(self.processes, len(urls))) as pool: for size in pb.iterator( pool.imap_unordered(self.download, urls)): totalsize += size except KeyboardInterrupt: print('Ending prematurely.')
def show_package_metadata(archive): control_fields, contents = inspect_package(archive) print("Package metadata from %s:" % format_path(archive)) for field_name in sorted(control_fields.keys()): value = control_fields[field_name] if field_name == 'Installed-Size': value = format_size(int(value) * 1024) print(" - %s: %s" % (field_name, value)) print("Package contents from %s:" % format_path(archive)) for pathname, entry in sorted(contents.items()): size = format_size(entry.size, keep_width=True) if len(size) < 10: size = ' ' * (10 - len(size)) + size if entry.target: pathname += ' -> ' + entry.target print("{permissions} {owner} {group} {size} {modified} {pathname}". format(permissions=entry.permissions, owner=entry.owner, group=entry.group, size=size, modified=entry.modified, pathname=pathname))
def get_file(self, path): local_path = os.path.basename(path) self.bunga_client.wait_for_connection() LOGGER.debug("Getting file '%s' to '%s'.", path, local_path) self.bunga_client.get_file(path, local_path) with open(local_path, 'rb') as fin: data = fin.read() LOGGER.debug("'%s' is %s with sha256 %s.", local_path, format_size(os.stat(local_path).st_size), sha256(data).hexdigest())
def get_handler(self, method, filename, size=None, progress=None, download_rate=None): if self._current_package_index is None or self._packages_count is None or not progress: return if size: try: size = humanfriendly.format_size(int(size)) except Exception: pass if download_rate: try: download_rate = humanfriendly.format_size( int(download_rate)) + "/s" except Exception: pass job_progress = (((self._current_package_index + progress / 100) / self._packages_count) * self.download_proportion) filename = filename.rsplit('/', 1)[-1] if size and download_rate: self.job.set_progress( job_progress, 'Downloading {}: {} ({}%) at {}'.format( filename, size, progress, download_rate, )) else: self.job.set_progress( job_progress, 'Downloading {} ({}%)'.format( filename, progress, ))
def build(self): """Call docker build to create a image :param service: service in compose e.g gcc54 :param context: image dir """ logging.info("Starting build for service %s." % self.service) no_cache = "" if self.variables.docker_cache else "--no-cache" subprocess.check_call("docker-compose build %s %s" % (no_cache, self.service), shell=True) output = subprocess.check_output("docker image inspect %s --format '{{.Size}}'" % self.created_image_name, shell=True) size = int(output.decode().strip()) logging.info("%s image size: %s" % (self.created_image_name, format_size(size)))
def size(self, *, human_readable=False): if self.is_file(): sz = os.path.getsize(self) elif self.is_dir(): sz = sum( [os.path.getsize(p) for p in self.rglob('*') if p.is_file()]) else: sz = 0 if human_readable: return humanfriendly.format_size(sz, binary=True) else: return sz
def print_resumables_list(data, filename=None, upload_id=None): if filename and upload_id: pass # not implemented else: the_list = data['resumables'] the_list.sort(key=cmp_to_key(resumables_cmp)) colnames = ['Upload ID', 'Server-side data size', 'Filename'] values = [] for r in the_list: mb = humanfriendly.format_size(r['next_offset']) row = [r['id'], mb, r['filename']] values.append(row) print(humanfriendly.tables.format_pretty_table(values, colnames))