def configure(self, do_packages=0): _env = type(self.genv)(self.genv) _env = set_root_login(db_type='mysql', db_host=self.genv.db_host, e=_env) if int(do_packages): self.prep_root_password() self.install_packages() self.set_root_password() if _env.mysql_custom_mycnf: fn = self.render_to_file('my.template.cnf', extra=_env) self.put_or_dryrun( local_path=fn, remote_path=_env.mysql_conf, use_sudo=True, ) if _env.mysql_allow_remote_connections: # Enable remote connections. self.sudo_or_dryrun("sed -i 's/127.0.0.1/0.0.0.0/g' %(mysql_conf)s" % _env) # Enable root logins from remote connections. cmd = 'mysql -u %(db_root_user)s -p"%(mysql_root_password)s" --execute="USE mysql; GRANT ALL ON *.* to %(db_root_user)s@\'%%\' IDENTIFIED BY \'%(db_root_password)s\'; FLUSH PRIVILEGES;"' % _env sudo_or_dryrun(cmd) self.restart()
def force_stop(): """ Forcibly terminates all Celery processes. """ with settings(warn_only=True): #sudo_or_dryrun(env.celery_force_stop_command % env)#fails? run('sudo pkill -9 -f celery') sudo_or_dryrun('rm -f /tmp/celery*.pid')
def purge(): """ Clears all pending tasks in the Celery queue. """ render_paths() sudo_or_dryrun( 'export SITE=%(SITE)s; export ROLE=%(ROLE)s; %(celery_supervisor_django_manage)s celeryctl purge' % env)
def clean_virtualenv(virtualenv_dir=None): render_paths() env.pip_virtual_env_dir = virtualenv_dir or env.pip_virtual_env_dir with settings(warn_only=True): print('Deleting old virtual environment...') sudo_or_dryrun('rm -Rf %(pip_virtual_env_dir)s' % env) assert not files.exists(env.pip_virtual_env_dir), \ 'Unable to delete pre-existing environment.'
def set_permissions(): """ Sets ownership and permissions for Celery-related files. """ for path in env.celery_paths_owned: env.celery_path_owned = path sudo_or_dryrun( 'chown %(celery_daemon_user)s:%(celery_daemon_user)s %(celery_path_owned)s' % env)
def make_dir(d): if d not in _fs_cache['make_dir']: if env.plan_storage == STORAGE_REMOTE: sudo_or_dryrun('mkdir -p "%s"' % d) else: if not os.path.isdir(d): os.makedirs(d) _fs_cache['make_dir'][d] = True return _fs_cache['make_dir'][d]
def backup(fn, ext='bak'): """ Makes a timestamped copy of a file in the same directory. """ today = date.today() fn_bak = '%s.%04i%02i%02i.%s' % (fn, today.year, today.month, today.day, ext) sudo_or_dryrun('cp "%s" "%s"' % (fn, fn_bak)) return fn_bak
def test_create_user(): from burlap.user import create, exists try: create('user1', create_home=False) assert exists('user1') assert not is_dir('/home/user1') finally: sudo_or_dryrun('userdel -r user1')
def update(package='', ignore_errors=0, no_deps=0, all=0, mirrors=1): """ Updates the local cache of pip packages. If all=1, skips check of host and simply updates everything. """ assert env[ROLE] ignore_errors = int(ignore_errors) env.pip_path_versioned = env.pip_path % env env.pip_local_cache_dir = env.pip_local_cache_dir_template % env env.pip_cache_dir = env.pip_local_cache_dir if not os.path.isdir(env.pip_cache_dir): os.makedirs(env.pip_cache_dir) env.pip_package = (package or '').strip() env.pip_no_deps = '--no-deps' if int(no_deps) else '' env.pip_build_dir = tempfile.mkdtemp() # Clear build directory in case it wasn't properly cleaned up previously. cmd = 'rm -Rf %(pip_build_directory)s' % env if env.is_local: run_or_dryrun(cmd) else: sudo_or_dryrun(cmd) with settings(warn_only=ignore_errors): if package: # Download a single specific package. cmd = env.pip_update_command % env if not int(mirrors): cmd = cmd.replace('--use-mirrors', '') local_or_dryrun(cmd) else: # Download each package in a requirements file. # Note, specifying the requirements file in the command isn't properly # supported by pip, thus we have to parse the file itself and send each # to pip separately. if int(all): packages = list(iter_pip_requirements()) else: packages = [k for k, v in check()] for package in packages: env.pip_package = package.strip() cmd = env.pip_update_command % env if not int(mirrors): cmd = cmd.replace('--use-mirrors', '') local_or_dryrun(cmd)
def reset(): """ Deletes all recorded plan executions. This will cause the planner to think everything needs to be re-deployed. """ d = os.path.join(init_plan_data_dir(), env.ROLE) if env.plan_storage == STORAGE_REMOTE: sudo_or_dryrun('rm -Rf "%s"' % d) sudo_or_dryrun('mkdir -p "%s"' % d) elif env.plan_storage == STORAGE_LOCAL: local_or_dryrun('rm -Rf "%s"' % d) local_or_dryrun('mkdir -p "%s"' % d) else: raise NotImplementedError
def dump(dest_dir=None, to_local=None, from_local=0, archive=0, dump_fn=None): """ Exports the target database to a single transportable file on the localhost, appropriate for loading using load(). """ from burlap.dj import set_db from_local = int(from_local) set_db() if dest_dir: env.db_dump_dest_dir = dest_dir env.db_date = datetime.date.today().strftime('%Y%m%d') #env.db_dump_fn = dump_fn or (env.db_dump_fn_template % env) env.db_dump_fn = get_default_db_fn(dump_fn or env.db_dump_fn_template).strip() if to_local is None and not env.is_local: to_local = 1 if env.db_dump_command: run_or_dryrun(env.db_dump_command % env) elif 'postgres' in env.db_engine or 'postgis' in env.db_engine: assert env.db_schemas, \ 'Please specify the list of schemas to dump in db_schemas.' env.db_schemas_str = ' '.join('-n %s' % _ for _ in env.db_schemas) cmd = env.db_postgresql_dump_command % env #print 'db_host:',env.db_host if env.is_local or from_local: local_or_dryrun(cmd) else: sudo_or_dryrun(cmd) elif 'mysql' in env.db_engine: cmd = env.db_mysql_dump_command % env if env.is_local: local_or_dryrun(cmd) else: sudo_or_dryrun(cmd) else: raise NotImplemented # Download the database dump file on the remote host to localhost. if not from_local and (0 if to_local is None else int(to_local)) and not env.is_local: cmd = ('rsync -rvz --progress --recursive --no-p --no-g --rsh "ssh -o StrictHostKeyChecking=no -i %(key_filename)s" %(user)s@%(host_string)s:%(db_dump_fn)s %(db_dump_fn)s') % env local_or_dryrun(cmd) if to_local and int(archive): db_fn = render_fn(env.db_dump_fn) env.db_archive_fn = '%s/%s' % (env.db_dump_archive_dir, os.path.split(db_fn)[-1]) local_or_dryrun('mv %s %s' % (db_fn, env.db_archive_fn)) return env.db_dump_fn
def uninstall(package): from burlap.dj import render_remote_paths render_remote_paths() if env.pip_virtual_env_dir_template: env.pip_virtual_env_dir = env.pip_virtual_env_dir_template % env env.pip_local_cache_dir = env.pip_local_cache_dir_template % env env.pip_package = package if env.is_local: run_or_dryrun(env.pip_uninstall_command % env) else: sudo_or_dryrun(env.pip_uninstall_command % env)
def update_install(clean=0, pip_requirements_fn=None, virtualenv_dir=None, user=None, group=None, perms=None): try: from burlap.dj import render_remote_paths except ImportError: render_remote_paths = None _env = type(env)(env) pip_requirements_fn = pip_requirements_fn or env.pip_requirements_fn bootstrap(force=clean) init(clean=clean, virtualenv_dir=virtualenv_dir, check_permissions=False) req_fn = find_template(pip_requirements_fn) assert req_fn, 'Could not find file: %s' % pip_requirements_fn _env.pip_remote_requirements_fn = '/tmp/pip-requirements.txt' put_or_dryrun( local_path=req_fn, remote_path=_env.pip_remote_requirements_fn, ) if render_remote_paths: render_remote_paths() if virtualenv_dir: _env.virtualenv_dir = virtualenv_dir elif _env.pip_virtual_env_dir_template: _env.virtualenv_dir = _env.pip_virtual_env_dir_template % _env _env.pip_update_install_command = "%(virtualenv_dir)s/bin/pip install -r %(pip_remote_requirements_fn)s" if _env.is_local: run_or_dryrun(_env.pip_update_install_command % _env) else: sudo_or_dryrun(_env.pip_update_install_command % _env) if not _env.is_local and (_env.pip_check_permissions or user or group or perms): set_virtualenv_permissions( user=user, group=group, perms=perms, virtualenv_dir=virtualenv_dir, )
def drop_views(self, name=None, site=None): """ Drops all views. """ from burlap.dj import set_db set_db(name=name, site=site) if 'postgres' in env.db_engine or 'postgis' in env.db_engine: # SELECT 'DROP VIEW ' || table_name || ';' # FROM information_schema.views # WHERE table_schema NOT IN ('pg_catalog', 'information_schema') # AND table_name !~ '^pg_'; # http://stackoverflow.com/questions/13643831/drop-all-views-postgresql # DO$$ # BEGIN # # EXECUTE ( # SELECT string_agg('DROP VIEW ' || t.oid::regclass || ';', ' ') -- CASCADE? # FROM pg_class t # JOIN pg_namespace n ON n.oid = t.relnamespace # WHERE t.relkind = 'v' # AND n.nspname = 'my_messed_up_schema' # ); # # END # $$ todo elif 'mysql' in env.db_engine: set_root_login() cmd = ("mysql --batch -v -h %(db_host)s " \ #"-u %(db_root_user)s -p'%(db_root_password)s' " \ "-u %(db_user)s -p'%(db_password)s' " \ "--execute=\"SELECT GROUP_CONCAT(CONCAT(TABLE_SCHEMA,'.',table_name) SEPARATOR ', ') AS views FROM INFORMATION_SCHEMA.views WHERE TABLE_SCHEMA = '%(db_name)s' ORDER BY table_name DESC;\"") % env result = sudo_or_dryrun(cmd) result = re.findall( '^views[\s\t\r\n]+(.*)', result, flags=re.IGNORECASE|re.DOTALL|re.MULTILINE) if not result: return env.db_view_list = result[0] #cmd = ("mysql -v -h %(db_host)s -u %(db_root_user)s -p'%(db_root_password)s' " \ cmd = ("mysql -v -h %(db_host)s -u %(db_user)s -p'%(db_password)s' " \ "--execute=\"DROP VIEW %(db_view_list)s CASCADE;\"") % env sudo_or_dryrun(cmd) else: raise NotImplementedError
def write_pgpass(self, name=None, use_sudo=0, verbose=1, commands_only=0): """ Write the file used to store login credentials for PostgreSQL. """ from burlap.dj import set_db from burlap.file import appendline use_sudo = int(use_sudo) verbose = common.get_verbose() commands_only = int(commands_only) if name: set_db(name=name) cmds = [] cmds.append( 'touch {db_postgresql_pgass_path}'.format( db_postgresql_pgass_path=env.db_postgresql_pgass_path)) cmds.append( 'chmod {db_postgresql_pgpass_chmod} {db_postgresql_pgass_path}'.format( db_postgresql_pgass_path=env.db_postgresql_pgass_path, db_postgresql_pgpass_chmod=env.db_postgresql_pgpass_chmod)) pgpass_kwargs = dict( db_host=env.db_host, db_port=env.db_postgresql_port, db_user=env.db_user, db_password=env.db_password, ) pgpass_line = '{db_host}:{db_port}:*:{db_user}:{db_password}'\ .format(**pgpass_kwargs) cmds.extend(appendline( fqfn=env.db_postgresql_pgass_path, line=pgpass_line, use_sudo=use_sudo, commands_only=1, verbose=0)) if not commands_only: for cmd in cmds: if verbose: print(cmd) if use_sudo: sudo_or_dryrun(cmd) else: run_or_dryrun(cmd) return cmds
def test_sudo(self): all_users = run_or_dryrun('cut -d: -f1 /etc/passwd') print('all users:', all_users) ret = sudo_or_dryrun('whoami') print('ret0:', ret) self.assertEqual(ret, 'root') if 'travis' in all_users: target_user = '******' else: target_user = '******' ret = sudo_or_dryrun('whoami', user=target_user) print('ret1:', ret) self.assertEqual(ret, target_user)
def configure(): """ Configures rules for IPTables. """ if env.iptables_enabled: fn = common.render_to_file(env.iptables_rules_template) put(local_path=fn) cmd = 'iptables-restore < %(put_remote_path)s; iptables-save > /etc/iptables.up.rules' % env sudo_or_dryrun(cmd) enable() restart() else: disable() stop()
def test_sudo(self): all_users = run_or_dryrun('cut -d: -f1 /etc/passwd') print('all users:', all_users) ret = (sudo_or_dryrun('whoami') or '').split('\n')[-1] print('ret0:', ret) self.assertEqual(ret, 'root') if 'travis' in all_users: target_user = '******' else: target_user = '******' ret = (sudo_or_dryrun('whoami', user=target_user) or '').split('\n')[-1] print('ret1:', ret) self.assertEqual(ret, target_user)
def upgrade_pip(): from burlap.dj import render_remote_paths render_remote_paths() if env.pip_virtual_env_dir_template: env.pip_virtual_env_dir = env.pip_virtual_env_dir_template % env sudo_or_dryrun( "chown -R %(pip_user)s:%(pip_group)s %(pip_virtual_env_dir)s" % env) run_or_dryrun( ". %(pip_virtual_env_dir)s/bin/activate; pip install --upgrade setuptools" % env) run_or_dryrun( ". %(pip_virtual_env_dir)s/bin/activate; pip install --upgrade distribute" % env) with settings(warn_only=True): run_or_dryrun( ". %(pip_virtual_env_dir)s/bin/activate; pip install --upgrade pip" % env)
def deploy_cura(): """ Updates files for the Printrbot manager. e.g. fab printer deploy_cura """ # Ensure our 3d configuration options are up-to-date. run_or_dryrun( 'mkdir -p ~/git; cd ~/git; git clone https://github.com/chrisspen/3d-printer-profiles.git; cd 3d-printer-profiles; git pull' ) # Ensure our 3d models are up-to-date. sudo_or_dryrun('mkdir -p %(project_home)s/models/printable' % env) sudo_or_dryrun('chown -R %(user)s:%(user)s %(project_home)s' % env) local_or_dryrun( 'rsync -avz --delete --rsh "ssh -t -o StrictHostKeyChecking=no -i %(key_filename)s" models/printable %(user)s@%(host_string)s:%(project_home)s/models/' % env)
def appendline(fqfn, line, use_sudo=0, verbose=1, commands_only=0): """ Appends the given line to the given file only if the line does not already exist in the file. """ verbose = int(verbose) commands_only = int(commands_only) use_sudo = int(use_sudo) kwargs = dict(fqfn=fqfn, line=line) cmd = 'grep -qF "{line}" {fqfn} || echo "{line}" >> {fqfn}'.format(**kwargs) if verbose: print(cmd) if not commands_only: if use_sudo: sudo_or_dryrun(cmd) else: run_or_dryrun(cmd) return [cmd]
def appendline(fqfn, line, use_sudo=0, verbose=1, commands_only=0): """ Appends the given line to the given file only if the line does not already exist in the file. """ verbose = int(verbose) commands_only = int(commands_only) use_sudo = int(use_sudo) kwargs = dict(fqfn=fqfn, line=line) cmd = 'grep -qF "{line}" {fqfn} || echo "{line}" >> {fqfn}'.format( **kwargs) if verbose: print(cmd) if not commands_only: if use_sudo: sudo_or_dryrun(cmd) else: run_or_dryrun(cmd) return [cmd]
def sync(): """ Uploads sets of files to the host. """ for data in env.file_sync_sets: env.file_src = src = data['src'] assert os.path.isfile(src), 'File %s does not exist.' % (src,) env.file_dst = dst = data['dst'] env.file_dst_dir, env.file_dst_file = os.path.split(dst) cmd = 'mkdir -p %(file_dst_dir)s' % env sudo_or_dryrun(cmd) put_or_dryrun(local_path=src, remote_path=dst, use_sudo=True) env.file_user = data.get('user', env.file_default_user) env.file_group = data.get('group', env.file_default_group) cmd = 'chown %(file_user)s:%(file_group)s %(file_dst)s' % env sudo_or_dryrun(cmd)
def sync(): """ Uploads sets of files to the host. """ for data in env.file_sync_sets: env.file_src = src = data['src'] assert os.path.isfile(src), 'File %s does not exist.' % (src, ) env.file_dst = dst = data['dst'] env.file_dst_dir, env.file_dst_file = os.path.split(dst) cmd = 'mkdir -p %(file_dst_dir)s' % env sudo_or_dryrun(cmd) put_or_dryrun(local_path=src, remote_path=dst, use_sudo=True) env.file_user = data.get('user', env.file_default_user) env.file_group = data.get('group', env.file_default_group) cmd = 'chown %(file_user)s:%(file_group)s %(file_dst)s' % env sudo_or_dryrun(cmd)
def loaddata(path, site=None): """ Runs the Dango loaddata management command. By default, runs on only the current site. Pass site=all to run on all sites. """ render_remote_paths() site = site or env.SITE env._loaddata_path = path for site, site_data in common.iter_sites(site=site, no_secure=True): try: set_db(site=site) env.SITE = site cmd = ('export SITE=%(SITE)s; export ROLE=%(ROLE)s; ' 'cd %(shell_default_dir)s; ' './manage loaddata %(_loaddata_path)s') % env sudo_or_dryrun(cmd) except KeyError: pass
def bootstrap(force=0): """ Installs all the necessary packages necessary for managing virtual environments with pip. """ force = int(force) if has_pip() and has_virtualenv() and not force: return _env = type(env)(env) _env.pip_path_versioned = _env.pip_path % _env run_or_dryrun( 'wget http://peak.telecommunity.com/dist/ez_setup.py -O /tmp/ez_setup.py' ) #sudo_or_dryrun('python{pip_python_version} /tmp/ez_setup.py -U setuptools'.format(**env)) with settings(warn_only=True): sudo_or_dryrun( 'python{pip_python_version} /tmp/ez_setup.py -U setuptools'.format( **_env)) sudo_or_dryrun('easy_install -U pip') if env.pip_bootstrap_packages: for package in _env.pip_bootstrap_packages: _env.package = package sudo_or_dryrun( '{pip_path_versioned} install --upgrade {package}'.format( **_env))
def set_virtualenv_permissions(user=None, group=None, perms=None, virtualenv_dir=None): from burlap.dj import render_remote_paths _env = type(env)(env) _env.pip_user = user or _env.pip_user _env.pip_group = group or _env.pip_group _env.pip_chmod = perms or _env.pip_chmod _env.pip_virtual_env_dir = virtualenv_dir or _env.pip_virtual_env_dir render_remote_paths() if virtualenv_dir: _env.pip_virtual_env_dir = virtualenv_dir elif _env.pip_virtual_env_dir_template: _env.pip_virtual_env_dir = _env.pip_virtual_env_dir_template % _env sudo_or_dryrun( 'chown -R %(pip_user)s:%(pip_group)s %(pip_virtual_env_dir)s' % _env) sudo_or_dryrun('chmod -R %(pip_chmod)s %(pip_virtual_env_dir)s' % _env)
def save_db_password(user, password): """ Writes the database user's password to a file, allowing automatic login from a secure location. Currently, only PostgreSQL is supported. """ from burlap.dj import set_db set_db(name='default') if 'postgres' in env.db_engine or 'postgis' in env.db_engine: env.db_save_user = user env.db_save_password = password sudo_or_dryrun('sudo -u postgres psql -c "ALTER USER %(db_save_user)s PASSWORD \'%(db_save_password)s\';"' % env) sudo_or_dryrun("sed -i '/%(db_save_user)s/d' ~/.pgpass" % env) sudo_or_dryrun('echo "localhost:5432:*:%(db_save_user)s:%(db_save_password)s" >> ~/.pgpass' % env) sudo_or_dryrun('chmod 600 ~/.pgpass') else: raise NotImplementedError
def init(clean=0, check_global=0, virtualenv_dir=None, check_permissions=None): """ Creates the virtual environment. """ assert env[ROLE] render_paths() # Delete any pre-existing environment. if int(clean): clean_virtualenv(virtualenv_dir=virtualenv_dir) if virtualenv_exists(virtualenv_dir=virtualenv_dir): print('virtualenv exists') return # Important. Default Ubuntu 12.04 package uses Pip 1.0, which # is horribly buggy. Should use 1.3 or later. if int(check_global): print('Ensuring the global pip install is up-to-date.') sudo_or_dryrun('pip install --upgrade pip') virtualenv_dir = virtualenv_dir or env.pip_virtual_env_dir #if not files.exists(env.pip_virtual_env_dir): print('Creating new virtual environment...') with settings(warn_only=True): cmd = 'virtualenv --no-site-packages %s' % virtualenv_dir if env.is_local: run_or_dryrun(cmd) else: sudo_or_dryrun(cmd) if check_permissions is None: check_permissions = env.pip_check_permissions if not env.is_local and check_permissions: sudo_or_dryrun( 'chown -R %(pip_user)s:%(pip_group)s %(remote_app_dir)s' % env) sudo_or_dryrun('chmod -R %(pip_chmod)s %(remote_app_dir)s' % env)
def restart(): cmd = get_service_command(common.RESTART) sudo_or_dryrun(cmd)
def stop(): cmd = get_service_command(common.STOP) sudo_or_dryrun(cmd)
def disable(): cmd = get_service_command(common.DISABLE) sudo_or_dryrun(cmd)
def enable(): cmd = get_service_command(common.ENABLE) sudo_or_dryrun(cmd)
def status(): cmd = get_service_command(common.STATUS) sudo_or_dryrun(cmd)
def reload(): cmd = get_service_command(common.RELOAD) sudo_or_dryrun(cmd)
def list_server_specs(cpu=1, memory=1, hdd=1): """ Displays a list of common servers characteristics, like number of CPU cores, amount of memory and hard drive capacity. """ cpu = int(cpu) memory = int(memory) hdd= int(hdd) # CPU if cpu: cmd = 'cat /proc/cpuinfo | grep -i "model name"' ret = run_or_dryrun(cmd) matches = map(str.strip, re.findall('model name\s+:\s*([^\n]+)', ret, re.DOTALL|re.I)) cores = {} for match in matches: cores.setdefault(match, 0) cores[match] += 1 # Memory if memory: cmd = 'dmidecode --type 17' ret = sudo_or_dryrun(cmd) #print repr(ret) matches = re.findall('Memory\s+Device\r\n(.*?)(?:\r\n\r\n|$)', ret, flags=re.DOTALL|re.I) #print len(matches) #print matches[0] memory_slot_dicts = [] for match in matches: attrs = dict([(_a.strip(), _b.strip()) for _a, _b in re.findall('^([^:]+):\s+(.*)$', match, flags=re.MULTILINE)]) #print attrs memory_slot_dicts.append(attrs) total_memory_gb = 0 total_slots_filled = 0 total_slots = len(memory_slot_dicts) memory_types = set() memory_forms = set() memory_speeds = set() for memory_dict in memory_slot_dicts: try: size = int(round(float(re.findall('([0-9]+)\s+MB', memory_dict['Size'])[0])/1024.)) #print size total_memory_gb += size total_slots_filled += 1 except IndexError: pass _v = memory_dict['Type'] if _v != 'Unknown': memory_types.add(_v) _v = memory_dict['Form Factor'] if _v != 'Unknown': memory_forms.add(_v) _v = memory_dict['Speed'] if _v != 'Unknown': memory_speeds.add(_v) # Storage if hdd: #cmd = 'ls /dev/*d* | grep "/dev/[a-z]+d[a-z]$"' cmd = 'find /dev -maxdepth 1 | grep -E "/dev/[a-z]+d[a-z]$"' devices = map(str.strip, run_or_dryrun(cmd).split('\n')) total_drives = len(devices) total_physical_storage_gb = 0 total_logical_storage_gb = 0 drive_transports = set() for device in devices: cmd = 'udisks --show-info %s |grep -i " size:"' % (device) ret = run_or_dryrun(cmd) size_bytes = float(re.findall('size:\s*([0-9]+)', ret)[0].strip()) size_gb = int(round(size_bytes/1024/1024/1024)) #print device, size_gb total_physical_storage_gb += size_gb with settings(warn_only=True): cmd = 'hdparm -I %s|grep -i "Transport:"' % device ret = sudo_or_dryrun(cmd) if ret and not ret.return_code: drive_transports.add(ret.split('Transport:')[-1].strip()) cmd = "df | grep '^/dev/[mhs]d*' | awk '{s+=$2} END {print s/1048576}'" ret = run_or_dryrun(cmd) total_logical_storage_gb = float(ret) if cpu: print('-'*80) print('CPU') print('-'*80) type_str = ', '.join(['%s x %i' % (_type, _count) for _type, _count in cores.items()]) print('Cores: %i' % sum(cores.values())) print('Types: %s' % type_str) if memory: print('-'*80) print('MEMORY') print('-'*80) print('Total: %s GB' % total_memory_gb) print('Type: %s' % list_to_str_or_unknown(memory_types)) print('Form: %s' % list_to_str_or_unknown(memory_forms)) print('Speed: %s' % list_to_str_or_unknown(memory_speeds)) print('Slots: %i (%i filled, %i empty)' % (total_slots, total_slots_filled, total_slots - total_slots_filled)) if hdd: print('-'*80) print('STORAGE') print('-'*80) print('Total physical drives: %i' % total_drives) print('Total physical storage: %s GB' % total_physical_storage_gb) print('Total logical storage: %s GB' % total_logical_storage_gb) print('Types: %s' % list_to_str_or_unknown(drive_transports))