def get_linux_distribution_from_distro(get_full_name): """Get the distribution information from the distro Python module.""" # If we get here we have to have the distro module, thus we do # not wrap the call in a try-except block as it would mask the problem # and result in a broken agent installation osinfo = list( distro.linux_distribution( full_distribution_name=get_full_name ) ) full_name = distro.linux_distribution()[0].strip() osinfo.append(full_name) return osinfo
def package_manager(config_opts, chroot, plugins): pm = config_opts.get('package_manager', 'yum') if pm == 'yum': return Yum(config_opts, chroot, plugins) elif pm == 'dnf': if os.path.isfile(config_opts['dnf_command']): return Dnf(config_opts, chroot, plugins) # RHEL without DNF (distribution, version) = distro.linux_distribution(full_distribution_name=False)[0:2] if distribution in ['redhat', 'centos']: version = int(version.split('.')[0]) if version < 8: if 'dnf_warning' in config_opts and config_opts['dnf_warning']: print("""WARNING! WARNING! WARNING! You are building package for distribution which use DNF. However your system does not support DNF. You can continue with YUM, which will likely succeed, but the result may be little different. You can suppress this warning when you put config_opts['dnf_warning'] = False in Mock config.""") input("Press Enter to continue.") return Yum(config_opts, chroot, plugins) # something else then EL, and no dnf_command exist # This will likely mean some error later. # Either user is smart or let him shot in his foot. return Dnf(config_opts, chroot, plugins) else: # TODO specific exception type raise Exception('Unrecognized package manager')
def _prepare_nspawn_command(chrootPath, user, cmd, nspawn_args=None, env=None, cwd=None): cmd_is_list = isinstance(cmd, list) if nspawn_args is None: nspawn_args = [] if user: # user can be either id or name if cmd_is_list: cmd = ['-u', str(user)] + cmd else: raise exception.Error('Internal Error: command must be list or shell=True.') elif not cmd_is_list: cmd = [cmd] nspawn_argv = ['/usr/bin/systemd-nspawn', '-q', '-M', uuid.uuid4().hex, '-D', chrootPath] distro_label = distro.linux_distribution(full_distribution_name=False)[0] if (distro_label != 'centos') and (distro_label != 'rhel'): # EL7 does not support it (yet). See BZ 1417387 nspawn_argv += ['-a'] nspawn_argv.extend(nspawn_args) if cwd: nspawn_argv.append('--chdir={0}'.format(cwd)) if env: # BZ 1312384 workaround env['PROMPT_COMMAND'] = r'printf "\033]0;<mock-chroot>\007"' env['PS1'] = r'<mock-chroot> \s-\v\$ ' for k, v in env.items(): nspawn_argv.append('--setenv={0}={1}'.format(k, v)) cmd = nspawn_argv + cmd if cmd_is_list: return cmd else: return " ".join(cmd)
def captureException(self, exception, value, tb): from .local_server import LocalServer from .local_config import LocalConfig from .controller import Controller from .compute_manager import ComputeManager local_server = LocalServer.instance().localServerSettings() if local_server["report_errors"]: if not RAVEN_AVAILABLE: return if os.path.exists(LocalConfig.instance().runAsRootPath()): log.warning("User has run application as root. Crash reports are disabled.") sys.exit(1) return if os.path.exists(".git"): log.warning("A .git directory exist crash report is turn off for developers. Instant exit") sys.exit(1) return if hasattr(exception, "fingerprint"): client = raven.Client(CrashReport.DSN, release=__version__, fingerprint=['{{ default }}', exception.fingerprint], transport=HTTPTransport) else: client = raven.Client(CrashReport.DSN, release=__version__, transport=HTTPTransport) context = { "os:name": platform.system(), "os:release": platform.release(), "os:win_32": " ".join(platform.win32_ver()), "os:mac": "{} {}".format(platform.mac_ver()[0], platform.mac_ver()[2]), "os:linux": " ".join(distro.linux_distribution()), "python:version": "{}.{}.{}".format(sys.version_info[0], sys.version_info[1], sys.version_info[2]), "python:bit": struct.calcsize("P") * 8, "python:encoding": sys.getdefaultencoding(), "python:frozen": "{}".format(hasattr(sys, "frozen")), } # extra controller and compute information extra_context = {"controller:version": Controller.instance().version(), "controller:host": Controller.instance().host(), "controller:connected": Controller.instance().connected()} for index, compute in enumerate(ComputeManager.instance().computes()): extra_context["compute{}:id".format(index)] = compute.id() extra_context["compute{}:name".format(index)] = compute.name(), extra_context["compute{}:host".format(index)] = compute.host(), extra_context["compute{}:connected".format(index)] = compute.connected() extra_context["compute{}:platform".format(index)] = compute.capabilities().get("platform") extra_context["compute{}:version".format(index)] = compute.capabilities().get("version") context = self._add_qt_information(context) client.tags_context(context) client.extra_context(extra_context) try: report = client.captureException((exception, value, tb)) except Exception as e: log.error("Can't send crash report to Sentry: {}".format(e)) return log.debug("Crash report sent with event ID: {}".format(client.get_ident(report)))
def _describe_system(): import platform as pl from datalad import get_encoding_info if hasattr(pl, 'dist'): dist = pl.dist() else: # Python 3.8 removed .dist but recommended "distro" is slow, so we # try it only if needed try: import distro dist = distro.linux_distribution(full_distribution_name=False) except ImportError: lgr.info( "Please install 'distro' package to obtain distribution information" ) dist = tuple() except Exception as exc: lgr.warning( "No distribution information will be provided since 'distro' " "fails to import/run: %s", exc_str(exc) ) dist = tuple() return { 'type': os.name, 'name': pl.system(), 'release': pl.release(), 'version': pl.version(), 'distribution': ' '.join([_t2s(dist), _t2s(pl.mac_ver()), _t2s(pl.win32_ver())]).rstrip(), 'max_path_length': get_max_path_length(getpwd()), 'encoding': get_encoding_info(), }
def capture_exception(self, request=None): if not RAVEN_AVAILABLE: return if os.path.exists(".git"): log.warning("A .git directory exist crash report is turn off for developers") return server_config = Config.instance().get_section_config("Server") if server_config.getboolean("report_errors"): if self._client is None: self._client = raven.Client(CrashReport.DSN, release=__version__, raise_send_errors=True, transport=HTTPTransport) if request is not None: self._client.http_context({ "method": request.method, "url": request.path, "data": request.json, }) context = { "os:name": platform.system(), "os:release": platform.release(), "os:win_32": " ".join(platform.win32_ver()), "os:mac": "{} {}".format(platform.mac_ver()[0], platform.mac_ver()[2]), "os:linux": " ".join(distro.linux_distribution()), "aiohttp:version": aiohttp.__version__, "python:version": "{}.{}.{}".format(sys.version_info[0], sys.version_info[1], sys.version_info[2]), "python:bit": struct.calcsize("P") * 8, "python:encoding": sys.getdefaultencoding(), "python:frozen": "{}".format(hasattr(sys, "frozen")) } if sys.platform.startswith("linux") and not hasattr(sys, "frozen"): # add locale information try: language, encoding = locale.getlocale() context["locale:language"] = language context["locale:encoding"] = encoding except ValueError: pass # add GNS3 VM version if it exists home = os.path.expanduser("~") gns3vm_version = os.path.join(home, ".config", "GNS3", "gns3vm_version") if os.path.isfile(gns3vm_version): try: with open(gns3vm_version) as fd: context["gns3vm:version"] = fd.readline().strip() except OSError: pass self._client.tags_context(context) try: report = self._client.captureException() except Exception as e: log.error("Can't send crash report to Sentry: {}".format(e)) return log.info("Crash report sent with event ID: {}".format(self._client.get_ident(report)))
def get_distro(): if is_mac(): return ["osx", "", "darwin"] try: if distro_available: return distro.linux_distribution(full_distribution_name=False) else: return platform.linux_distribution() except: return ["N/A", "N/A", "N/A"]
def get_newest_version(huuid): """Get the newest Home Assistant version.""" info_object = { 'arch': platform.machine(), 'dev': ('dev' in CURRENT_VERSION), 'docker': False, 'os_name': platform.system(), 'python_version': platform.python_version(), 'timezone': dt_util.DEFAULT_TIME_ZONE.zone, 'uuid': huuid, 'version': CURRENT_VERSION, 'virtualenv': (os.environ.get('VIRTUAL_ENV') is not None), } if platform.system() == 'Windows': info_object['os_version'] = platform.win32_ver()[0] elif platform.system() == 'Darwin': info_object['os_version'] = platform.mac_ver()[0] elif platform.system() == 'FreeBSD': info_object['os_version'] = platform.release() elif platform.system() == 'Linux': import distro linux_dist = distro.linux_distribution(full_distribution_name=False) info_object['distribution'] = linux_dist[0] info_object['os_version'] = linux_dist[1] info_object['docker'] = os.path.isfile('/.dockerenv') if not huuid: info_object = {} res = None try: req = requests.post(UPDATER_URL, json=info_object, timeout=5) res = req.json() res = RESPONSE_SCHEMA(res) _LOGGER.info(("Submitted analytics to Home Assistant servers. " "Information submitted includes %s"), info_object) return (res['version'], res['release-notes']) except requests.RequestException: _LOGGER.error("Could not contact Home Assistant Update to check " "for updates") return None except ValueError: _LOGGER.error("Received invalid response from Home Assistant Update") return None except vol.Invalid: _LOGGER.error('Got unexpected response: %s', res) return None
def get_linux_distro(): """ Get the linux distribution and check if it is supported :returns: linux distro, None if the distro is not supported :rtype: list """ dist = distro.linux_distribution() if dist[0] == 'Ubuntu' or \ dist[0] == 'CentOS Linux' or \ dist[:7] == 'Red Hat': return dist else: raise RuntimeError( 'Linux Distribution {} is not supported'.format(dist[0]))
def main(): (dname, version, _) = distro.linux_distribution() try: fn = list_fn[dname] if fn == list_centos and LooseVersion(version) >= LooseVersion('8.0'): list_fedora() elif fn == list_fedora and LooseVersion(version) < LooseVersion('23'): list_centos() else: fn() except KeyError: if os.path.exists('/data/data/com.termux'): list_termux() else: raise RuntimeError('Unknown system (distribution: {})'.format(dname)) return 0
def get_os_environment(): try: myplatform = open(redhat_release_path).readlines()[0].strip() except: try: import distro myplatform = ' '.join(distro.linux_distribution()) except: myplatform = "unknown" # uname returns (sysname, nodename, release, version, machine) uname = os.uname() kernel_release = uname[2] cpu = uname[4] os_desc = "%s %s" % (kernel_release, cpu) return (myplatform, os_desc)
def get_current_os_full() -> Tuple[Any, ...]: """ Detect the full OS version for log debugging. """ device = get_device() if device == "macOS": from platform import mac_ver return mac_ver() elif device == "GNU/Linux": import distro return distro.linux_distribution() else: from platform import win32_ver return win32_ver()
def update_permissions(self, repo_path): """ Verifies that permissions and contexts after an rsync are as expected. Sending proper rsync flags should prevent the need for this, though this is largely a safeguard. """ # all_path = os.path.join(repo_path, "*") owner = "root:apache" distribution = distro.linux_distribution()[0] if distribution.lower() in ("sles", "opensuse leap", "opensuse tumbleweed"): owner = "root:www" elif "debian" in distribution.lower(): owner = "root:www-data" cmd1 = "chown -R " + owner + " %s" % repo_path utils.subprocess_call(self.logger, cmd1) cmd2 = "chmod -R 755 %s" % repo_path utils.subprocess_call(self.logger, cmd2)
def get_distro(): try: from distro import linux_distribution except ImportError: linux_distribution = None system = platform.system() if system == "Linux": dist = linux_distribution() return "{}:{}".format(dist[0], dist[1]) elif system == "Windows": dist = platform.win32_ver() return "{}:{}".format(dist[0], dist[1]) elif system == "Java": dist = platform.java_ver() return "{}:{}".format(dist[0], dist[1]) elif system == "Darwin": dist = platform.mac_ver() return "{}".format(dist[0]) else: return ":".join(platform.uname()[0:1])
def platDesc(): # we may get an interrupted system call, so try this in a loop n = 0 theos = "unknown" while n < 100: n += 1 try: system = platform.system() if isMac: theos = "mac:%s" % (platform.mac_ver()[0]) elif isWin: theos = "win:%s" % (platform.win32_ver()[0]) elif system == "Linux": import distro r = distro.linux_distribution(full_distribution_name=False) theos = "lin:%s:%s" % (r[0], r[1]) else: theos = system break except: continue return theos
def get_newest_version(huuid): """Get the newest Home Assistant version.""" info_object = {'uuid': huuid, 'version': CURRENT_VERSION, 'timezone': dt_util.DEFAULT_TIME_ZONE.zone, 'os_name': platform.system(), "arch": platform.machine(), 'python_version': platform.python_version(), 'virtualenv': (os.environ.get('VIRTUAL_ENV') is not None), 'docker': False} if platform.system() == 'Windows': info_object['os_version'] = platform.win32_ver()[0] elif platform.system() == 'Darwin': info_object['os_version'] = platform.mac_ver()[0] elif platform.system() == 'Linux': import distro linux_dist = distro.linux_distribution(full_distribution_name=False) info_object['distribution'] = linux_dist[0] info_object['os_version'] = linux_dist[1] info_object['docker'] = os.path.isfile('/.dockerenv') if not huuid: info_object = {} try: req = requests.post(UPDATER_URL, json=info_object) res = req.json() _LOGGER.info(('The latest version is %s. ' 'Information submitted includes %s'), res['version'], info_object) return (res['version'], res['release-notes']) except requests.RequestException: _LOGGER.exception('Could not contact HASS Update to check for updates') return None except ValueError: _LOGGER.exception('Received invalid response from HASS Update') return None except KeyError: _LOGGER.exception('Response from HASS Update did not include version') return None
def _guess_chroot(chroot_config): """ Guess which chroot is equivalent to this machine """ # FIXME Copr should generate non-specific arch repo dist = chroot_config if dist is None or (dist[0] is False) or (dist[1] is False): dist = linux_distribution() if "Fedora" in dist: # x86_64 because repo-file is same for all arch # ($basearch is used) if "Rawhide" in dist: chroot = ("fedora-rawhide-x86_64") # workaround for enabling repos in Rawhide when VERSION in os-release # contains a name other than Rawhide elif "rawhide" in os_release_attr("redhat_support_product_version"): chroot = ("fedora-rawhide-x86_64") else: chroot = ("fedora-{}-x86_64".format(dist[1])) elif "Mageia" in dist: # Get distribution architecture (Mageia does not use $basearch) distarch = rpm.expandMacro("%{distro_arch}") # Set the chroot if "Cauldron" in dist: chroot = ("mageia-cauldron-{}".format(distarch)) else: chroot = ("mageia-{0}-{1}".format(dist[1], distarch)) elif "openSUSE" in dist: # Get distribution architecture (openSUSE does not use $basearch) distarch = rpm.expandMacro("%{_target_cpu}") # Set the chroot if "Tumbleweed" in dist: chroot = ("opensuse-tumbleweed-{}".format(distarch)) else: chroot = ("opensuse-leap-{0}-{1}".format(dist[1], distarch)) else: chroot = ("epel-%s-x86_64" % dist[1].split(".", 1)[0]) return chroot
def get_current_os() -> Tuple[str, str]: """ Detect the OS version. """ device = get_device() if device == "macOS": from platform import mac_ver # Ex: macOS 10.12.6 version = mac_ver()[0] elif device == "GNU/Linux": import distro # Ex: Debian GNU/Linux testing buster details = distro.linux_distribution() device = details[0] version = " ".join(details[1:]) else: from platform import win32_ver # Ex: Windows 7 version = win32_ver()[0] return (device, version.strip())
# pylint: disable=invalid-name from setuptools import setup, find_packages import platform try: import distro distro, version, _ = distro.linux_distribution( full_distribution_name=False) except ImportError: distro = version = "" platform_name = platform.system() # Default to CentOS7 data_files = [ ("/usr/lib/systemd/system", ["pkg/source/hubble.service"]), ("/etc/hubble", ["conf/hubble"]), ] build_dependencies = [ "distro", "psutil", "defusedxml", "msgpack", "pyyaml", "objgraph", "pycryptodome", "cryptography", "pyopenssl",
# -*- coding: utf-8 -*- from setuptools import find_packages from setuptools import setup import distro import subprocess import re import platform os_name = distro.linux_distribution()[0] if not os_name: if 'amzn' in platform.uname()[2]: os_name = 'centos' files_definition = [ ('/etc/iemlav', ['iemlav.conf']), ('', ['iemlav.conf']), ('/etc/iemlav/asp', [ 'iemlav/lib/auto_server_patcher/configs/commands.json', 'iemlav/lib/auto_server_patcher/configs/config.json' ]), ('/etc/iemlav/log_monitor/server_log/payloads', [ 'iemlav/lib/log_monitor/server_log/rules/payloads/bad_ua.txt', 'iemlav/lib/log_monitor/server_log/rules/payloads/lfi.txt', 'iemlav/lib/log_monitor/server_log/rules/payloads/port_scan_ua.txt', 'iemlav/lib/log_monitor/server_log/rules/payloads/sqli.txt', 'iemlav/lib/log_monitor/server_log/rules/payloads/web_shell.txt', 'iemlav/lib/log_monitor/server_log/rules/payloads/xss.txt' ]), ('/etc/iemlav/log_monitor/server_log/regex', [ 'iemlav/lib/log_monitor/server_log/rules/regex/sqli.txt', 'iemlav/lib/log_monitor/server_log/rules/regex/xss.txt'
def get_dist_info(): """Return distribution information""" if linux_distribution: return linux_distribution() return "unknown"
try: import distro except ImportError: sys.path.insert(0, os.path.join( os.path.dirname(os.path.dirname( os.path.dirname(__file__))), 'requirements', 'quickstart', 'distro-1.0.4-py2.py3-none-any.whl')) import distro parser = argparse.ArgumentParser() parser.add_argument("--mk-virtualenv", default="../cfme_venv") parser.add_argument("--system-site-packages", action="store_true") parser.add_argument("--config-path", default="../cfme-qe-yamls/complete/") DISTRO_DATA = distro.linux_distribution()[:2] IS_SCRIPT = sys.argv[0] == __file__ CWD = os.getcwd() # we expect to be in the workdir IS_ROOT = os.getuid() == 0 REDHAT_BASED = os.path.isfile('/etc/redhat-release') CREATED = object() REQUIREMENT_FILE = 'requirements/frozen.txt' HAS_DNF = os.path.exists('/usr/bin/dnf') IN_VIRTUALENV = getattr(sys, 'real_prefix', None) is not None PRISTINE_ENV = dict(os.environ) REDHAT_PACKAGES_OLD = ( " python-virtualenv gcc postgresql-devel libxml2-devel" " libxslt-devel zeromq3-devel libcurl-devel" " redhat-rpm-config gcc-c++ openssl-devel"
def get_libcarla_extensions(): include_dirs = ['dependencies/include'] library_dirs = ['dependencies/lib'] libraries = [] sources = ['source/libcarla/libcarla.cpp'] def walk(folder, file_filter='*'): for root, _, filenames in os.walk(folder): for filename in fnmatch.filter(filenames, file_filter): yield os.path.join(root, filename) if os.name == "posix": # @todo Replace deprecated method. #linux_distro = platform.dist()[0] # pylint: disable=W1505 #platform.distro was removed in Python3.8 # changed by Jiechi Zhang on Jun 22 2020 linux_distro = distro.linux_distribution()[0] if linux_distro.lower() in ["ubuntu", "debian", "deepin"]: pwd = os.path.dirname(os.path.realpath(__file__)) pylib = "libboost_python%d%d.a" % (sys.version_info.major, sys.version_info.minor) if is_rss_variant_enabled(): print('Building AD RSS variant.') extra_link_args = [ os.path.join(pwd, 'dependencies/lib/libcarla_client_rss.a') ] else: extra_link_args = [ os.path.join(pwd, 'dependencies/lib/libcarla_client.a') ] extra_link_args += [ os.path.join(pwd, 'dependencies/lib/librpc.a'), os.path.join(pwd, 'dependencies/lib/libboost_filesystem.a'), os.path.join(pwd, 'dependencies/lib/libRecast.a'), os.path.join(pwd, 'dependencies/lib/libDetour.a'), os.path.join(pwd, 'dependencies/lib/libDetourCrowd.a') ] extra_compile_args = [ '-isystem', 'dependencies/include/system', '-fPIC', '-std=c++14', '-Werror', '-Wall', '-Wextra', '-Wpedantic', '-Wno-self-assign-overloaded', '-Wdeprecated', '-Wno-shadow', '-Wuninitialized', '-Wunreachable-code', '-Wpessimizing-move', '-Wold-style-cast', '-Wnull-dereference', '-Wduplicate-enum', '-Wnon-virtual-dtor', '-Wheader-hygiene', '-Wconversion', '-Wfloat-overflow-conversion', '-DBOOST_ERROR_CODE_HEADER_ONLY', '-DLIBCARLA_WITH_PYTHON_SUPPORT' ] if is_rss_variant_enabled(): extra_compile_args += ['-DLIBCARLA_RSS_ENABLED'] extra_compile_args += [ '-DLIBCARLA_PYTHON_MAJOR_' + str(sys.version_info.major) ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libad_rss_map_integration_python' + str(sys.version_info.major) + '.a') ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libad_rss_map_integration.a') ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libad_map_access_python' + str(sys.version_info.major) + '.a') ] extra_link_args += [ os.path.join(pwd, 'dependencies/lib/libad_map_access.a') ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libad_rss_python' + str(sys.version_info.major) + '.a') ] extra_link_args += [ os.path.join(pwd, 'dependencies/lib/libad_rss.a') ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libad_physics_python' + str(sys.version_info.major) + '.a') ] extra_link_args += [ os.path.join(pwd, 'dependencies/lib/libad_physics.a') ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libad_map_opendrive_reader.a') ] extra_link_args += [ os.path.join( pwd, 'dependencies/lib/libboost_program_options.a') ] extra_link_args += [ os.path.join(pwd, 'dependencies/lib/libspdlog.a') ] extra_link_args += [ os.path.join(pwd, 'dependencies/lib/libboost_system.so') ] extra_link_args += ['-ltbb'] extra_link_args += ['-lrt'] extra_link_args += ['-lproj'] extra_link_args += [os.path.join(pwd, 'dependencies/lib', pylib)] if 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true': print('Travis CI build detected: disabling PNG support.') extra_link_args += ['-ljpeg', '-ltiff'] extra_compile_args += [ '-DLIBCARLA_IMAGE_WITH_PNG_SUPPORT=false' ] else: extra_link_args += ['-lpng', '-ljpeg', '-ltiff'] extra_compile_args += [ '-DLIBCARLA_IMAGE_WITH_PNG_SUPPORT=true' ] # @todo Why would we need this? # include_dirs += ['/usr/lib/gcc/x86_64-linux-gnu/7/include'] # library_dirs += ['/usr/lib/gcc/x86_64-linux-gnu/7'] # extra_link_args += ['/usr/lib/gcc/x86_64-linux-gnu/7/libstdc++.a'] extra_link_args += ['-lstdc++'] else: raise NotImplementedError elif os.name == "nt": sources += [x for x in walk('dependencies/include/carla', '*.cpp')] pwd = os.path.dirname(os.path.realpath(__file__)) pylib = 'libboost_python%d%d' % (sys.version_info.major, sys.version_info.minor) extra_link_args = ['shlwapi.lib'] required_libs = [ pylib, 'libboost_filesystem', 'rpc.lib', 'carla_client.lib', 'libpng.lib', 'zlib.lib', 'Recast.lib', 'Detour.lib', 'DetourCrowd.lib' ] # Search for files in 'PythonAPI\carla\dependencies\lib' that contains # the names listed in required_libs in it's file name libs = [ x for x in os.listdir('dependencies/lib') if any(d in x for d in required_libs) ] for lib in libs: extra_link_args.append(os.path.join(pwd, 'dependencies/lib', lib)) # https://docs.microsoft.com/es-es/cpp/porting/modifying-winver-and-win32-winnt extra_compile_args = [ '/experimental:external', '/external:I', 'dependencies/include/system', '/DBOOST_ALL_NO_LIB', '/DBOOST_PYTHON_STATIC_LIB', '/DBOOST_ERROR_CODE_HEADER_ONLY', '/D_WIN32_WINNT=0x0600', '/DHAVE_SNPRINTF', '/DLIBCARLA_WITH_PYTHON_SUPPORT', '-DLIBCARLA_IMAGE_WITH_PNG_SUPPORT=true' ] else: raise NotImplementedError depends = [x for x in walk('source/libcarla')] depends += [x for x in walk('dependencies')] def make_extension(name, sources): return Extension(name, sources=sources, include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, language='c++14', depends=depends) print('compiling:\n - %s' % '\n - '.join(sources)) return [make_extension('carla.libcarla', sources)]
# -*- coding: utf-8 from __future__ import unicode_literals, absolute_import import distro import django DEBUG = True USE_TZ = True # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = "!2*nm%ps%x8!ykyb^s9+!l1vcmeh+(f&de%br=js*7(5i_rmet" # needed since travis uses Ubuntu 14.04 if distro.linux_distribution() == ('Ubuntu', '16.04', 'Xenial Xerus'): SPATIALITE_LIBRARY_PATH = 'mod_spatialite' DATABASES = { "default": { "ENGINE": 'django.contrib.gis.db.backends.spatialite', "NAME": ":memory:", } } ROOT_URLCONF = "tests.urls" INSTALLED_APPS = [ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", 'cities',
async def stats(self, ctx): _("""Statistics on the bot.""") async with self.bot.pool.acquire() as conn: characters = await conn.fetchval("SELECT COUNT(*) FROM profile;") items = await conn.fetchval("SELECT COUNT(*) FROM allitems;") pg_version = conn.get_server_version() pg_version = f"{pg_version.major}.{pg_version.micro} {pg_version.releaselevel}" d0 = self.bot.user.created_at d1 = datetime.datetime.now() delta = d1 - d0 myhours = delta.days * 1.5 sysinfo = distro.linux_distribution() # owner = await self.bot.get_user_global(self.bot.owner_id) # sad that teams are fucky owner = " and ".join( [str(await self.bot.get_user_global(u)) for u in self.bot.config.owners] ) guild_count = sum( await self.bot.cogs["Sharding"].handler("guild_count", self.bot.shard_count) ) embed = discord.Embed( title=_("IdleRPG Statistics"), colour=0xB8BBFF, url=self.bot.BASE_URL, description=_("Official Support Server Invite: https://discord.gg/MSBatf6"), ) embed.set_thumbnail(url=self.bot.user.avatar_url) embed.set_footer( text=f"IdleRPG {self.bot.version} | By {owner}", icon_url=self.bot.user.avatar_url, ) embed.add_field( name=_("Hosting Statistics"), value=_( """\ CPU Usage: **{cpu}%** RAM Usage: **{ram}%** Python Version **{python}** <:python:445247273065250817> discord.py Version **{dpy}** Operating System: **{osname} {osversion}** Kernel Version: **{kernel}** PostgreSQL Version **{pg_version}**""" ).format( cpu=psutil.cpu_percent(), ram=psutil.virtual_memory().percent, python=platform.python_version(), dpy=pkg.get_distribution("discord.py").version, osname=sysinfo[0].title(), osversion=sysinfo[1], kernel=os.uname().release, pg_version=pg_version, ), inline=False, ) embed.add_field( name=_("Bot Statistics"), value=_( """\ Code lines written: **{lines}** Shards: **{shards}** Servers: **{guild_count}** Characters: **{characters}** Items: **{items}** Average hours of work: **{hours}**""" ).format( lines=self.bot.linecount, shards=self.bot.shard_count, guild_count=guild_count, characters=characters, items=items, hours=myhours, ), inline=False, ) await ctx.send(embed=embed)
def upgrade_all(self): """ upgrade all packages This method checks if there are packages available for updating and update the packages if updating them won't remove any packages from the system. Often times when a bad source is added to the system, APT tends to remove a number of packages from the system when upgrading which is very risky. """ Avalon.info('Starting automatic upgrade') Avalon.info('Updating APT cache') with open('/etc/apt/sources.list', 'r') as aptlist: for line in aptlist: if 'ubuntu.com' in line and distro.linux_distribution( )[0] != 'Ubuntu' and line.replace(' ', '')[0] != '#': Avalon.warning('Ubuntu source detected in source.list!') Avalon.warning( 'Continue upgrading might cause severe consequences!') if Avalon.ask('Are you sure that you want to continue?', False): break else: Avalon.warning('Aborting system upgrade..') exit(0) self.update() Avalon.info('APT cache updated') if len(self.import_list) != 0: if Avalon.ask('Detected unimported keys, import?', True): if shutil.which('dirmngr') is None: Avalon.warning('dirmngr Not installed') Avalon.warning('It is required for importing keys') # ask if user wants to install dirmngr if Avalon.ask('Install Now?'): self.install('dirnmgr') # check dirmngr after package installation if isinstance(shutil.which('dirmngr'), str): Avalon.info('Installation successful') self.import_keys(self.import_list) Avalon.info('Keys imported') Avalon.info( 'Updating APT cache after key importing') self.update() else: Avalon.error('Installation Failed') Avalon.error('Please check your settings') Avalon.warning( 'dirmngr not available. Continuing without importing keys' ) else: Avalon.warning('dirmngr not available') Avalon.warning('Continuing without importing keys') else: self.import_keys(self.import_list) Avalon.info('Keys imported') Avalon.info('Updating APT cache after key importing') self.update() # Second update after keys are imported self.update() # if there are no upgrades available Avalon.debug_info('Checking package updates') if self.no_upgrades(): Avalon.info('No upgrades available') # if upgrades are available else: Avalon.debug_info('Checking if full upgrade is safe') # if upgrade is safe, use -y flag on apt-get full-upgrade # otherwise, let user confirm the upgrade if self.full_upgrade_safe(): Avalon.info('Full upgrade is safe') Avalon.info('Starting APT full upgrade') self.full_upgrade() else: Avalon.warning('Full upgrade is NOT safe') Avalon.warning('Requiring human confirmation') self.manual_full_upgrade()
def __define_linux_os(): dist = distro.linux_distribution(full_distribution_name=False) os = Version(dist[0], dist[1]) return os
def system_info(): ''' Get the sysem information. Return a tuple with the platform type, the architecture and the distribution ''' # Get the platform info platform = os.environ.get('OS', '').lower() if not platform: platform = sys.platform if platform.startswith('win'): platform = Platform.WINDOWS elif platform.startswith('darwin'): platform = Platform.DARWIN elif platform.startswith('linux'): platform = Platform.LINUX else: raise FatalError(_("Platform %s not supported") % platform) # Get the architecture info if platform == Platform.WINDOWS: arch = windows_arch() if arch in ('x64', 'amd64'): arch = Architecture.X86_64 elif arch == 'x86': arch = Architecture.X86 else: raise FatalError(_("Windows arch %s is not supported") % arch) else: uname = os.uname() arch = uname[4] if arch == 'x86_64': arch = Architecture.X86_64 elif arch.endswith('86'): arch = Architecture.X86 elif arch.startswith('armv7'): arch = Architecture.ARMv7 elif arch.startswith('arm'): arch = Architecture.ARM else: raise FatalError(_("Architecture %s not supported") % arch) # Get the distro info if platform == Platform.LINUX: if sys.version_info >= (3, 8, 0): try: import distro except ImportError: print( '''Python >= 3.8 detected and the 'distro' python package was not found. Please install the 'python3-distro' or 'python-distro' package from your linux package manager or from pypi using pip. Terminating.''', file=sys.stderr) sys.exit(1) d = distro.linux_distribution() else: d = pplatform.linux_distribution() if d[0] == '' and d[1] == '' and d[2] == '': if os.path.exists('/etc/arch-release'): # FIXME: the python2.7 platform module does not support Arch Linux. # Mimic python3.4 platform.linux_distribution() output. d = ('arch', 'Arch', 'Linux') elif os.path.exists('/etc/os-release'): with open('/etc/os-release', 'r') as f: if 'ID="amzn"\n' in f.readlines(): d = ('RedHat', 'amazon', '') else: f.seek(0, 0) for line in f: # skip empty lines and comment lines if line.strip( ) and not line.lstrip().startswith('#'): k, v = line.rstrip().split("=") if k == 'NAME': name = v.strip('"') elif k == 'VERSION_ID': version = v.strip('"') d = (name, version, '') if d[0] in ['Ubuntu', 'debian', 'LinuxMint']: distro = Distro.DEBIAN if d[2] in ['maverick', 'isadora']: distro_version = DistroVersion.UBUNTU_MAVERICK elif d[2] in ['lucid', 'julia']: distro_version = DistroVersion.UBUNTU_LUCID elif d[2] in ['natty', 'katya']: distro_version = DistroVersion.UBUNTU_NATTY elif d[2] in ['oneiric', 'lisa']: distro_version = DistroVersion.UBUNTU_ONEIRIC elif d[2] in ['precise', 'maya']: distro_version = DistroVersion.UBUNTU_PRECISE elif d[2] in ['quantal', 'nadia']: distro_version = DistroVersion.UBUNTU_QUANTAL elif d[2] in ['raring', 'olivia']: distro_version = DistroVersion.UBUNTU_RARING elif d[2] in ['saucy', 'petra']: distro_version = DistroVersion.UBUNTU_SAUCY elif d[2] in ['trusty', 'qiana', 'rebecca']: distro_version = DistroVersion.UBUNTU_TRUSTY elif d[2] in ['utopic']: distro_version = DistroVersion.UBUNTU_UTOPIC elif d[2] in ['vivid']: distro_version = DistroVersion.UBUNTU_VIVID elif d[2] in ['wily']: distro_version = DistroVersion.UBUNTU_WILY elif d[2] in ['xenial', 'sarah', 'serena', 'sonya', 'sylvia']: distro_version = DistroVersion.UBUNTU_XENIAL elif d[2] in ['artful']: distro_version = DistroVersion.UBUNTU_ARTFUL elif d[2] in ['bionic', 'tara', 'tessa', 'tina', 'tricia']: distro_version = DistroVersion.UBUNTU_BIONIC elif d[2] in ['cosmic']: distro_version = DistroVersion.UBUNTU_COSMIC elif d[2] in ['disco']: distro_version = DistroVersion.UBUNTU_DISCO elif d[2] in ['eoan']: distro_version = DistroVersion.UBUNTU_EOAN elif d[1].startswith('6.'): distro_version = DistroVersion.DEBIAN_SQUEEZE elif d[1].startswith('7.') or d[1].startswith('wheezy'): distro_version = DistroVersion.DEBIAN_WHEEZY elif d[1].startswith('8.') or d[1].startswith('jessie'): distro_version = DistroVersion.DEBIAN_JESSIE elif d[1].startswith('9.') or d[1].startswith('stretch'): distro_version = DistroVersion.DEBIAN_STRETCH elif d[1].startswith('10.') or d[1].startswith('buster'): distro_version = DistroVersion.DEBIAN_BUSTER elif d[1].startswith('11.') or d[1].startswith('bullseye'): distro_version = DistroVersion.DEBIAN_BULLSEYE else: raise FatalError("Distribution '%s' not supported" % str(d)) elif d[0] in [ 'RedHat', 'Fedora', 'CentOS', 'Red Hat Enterprise Linux Server', 'CentOS Linux' ]: distro = Distro.REDHAT if d[1] == '16': distro_version = DistroVersion.FEDORA_16 elif d[1] == '17': distro_version = DistroVersion.FEDORA_17 elif d[1] == '18': distro_version = DistroVersion.FEDORA_18 elif d[1] == '19': distro_version = DistroVersion.FEDORA_19 elif d[1] == '20': distro_version = DistroVersion.FEDORA_20 elif d[1] == '21': distro_version = DistroVersion.FEDORA_21 elif d[1] == '22': distro_version = DistroVersion.FEDORA_22 elif d[1] == '23': distro_version = DistroVersion.FEDORA_23 elif d[1] == '24': distro_version = DistroVersion.FEDORA_24 elif d[1] == '25': distro_version = DistroVersion.FEDORA_25 elif d[1] == '26': distro_version = DistroVersion.FEDORA_26 elif d[1] == '27': distro_version = DistroVersion.FEDORA_27 elif d[1] == '28': distro_version = DistroVersion.FEDORA_28 elif d[1] == '29': distro_version = DistroVersion.FEDORA_29 elif d[1] == '30': distro_version = DistroVersion.FEDORA_30 elif d[1] == '31': distro_version = DistroVersion.FEDORA_31 elif d[1].startswith('6.'): distro_version = DistroVersion.REDHAT_6 elif d[1].startswith('7.'): distro_version = DistroVersion.REDHAT_7 elif d[1].startswith('8.'): distro_version = DistroVersion.REDHAT_8 elif d[1] == 'amazon': distro_version = DistroVersion.AMAZON_LINUX else: # FIXME Fill this raise FatalError("Distribution '%s' not supported" % str(d)) elif d[0].strip() in ['openSUSE']: distro = Distro.SUSE if d[1] == '42.2': distro_version = DistroVersion.OPENSUSE_42_2 elif d[1] == '42.3': distro_version = DistroVersion.OPENSUSE_42_3 else: # FIXME Fill this raise FatalError("Distribution OpenSuse '%s' " "not supported" % str(d)) elif d[0].strip() in ['openSUSE Tumbleweed']: distro = Distro.SUSE distro_version = DistroVersion.OPENSUSE_TUMBLEWEED elif d[0].strip() in ['arch', 'Arch Linux']: distro = Distro.ARCH distro_version = DistroVersion.ARCH_ROLLING elif d[0].strip() in ['Gentoo Base System']: distro = Distro.GENTOO distro_version = DistroVersion.GENTOO_VERSION else: raise FatalError("Distribution '%s' not supported" % str(d)) elif platform == Platform.WINDOWS: distro = Distro.WINDOWS win32_ver = pplatform.win32_ver()[0] dmap = { 'xp': DistroVersion.WINDOWS_XP, 'vista': DistroVersion.WINDOWS_VISTA, '7': DistroVersion.WINDOWS_7, 'post2008Server': DistroVersion.WINDOWS_8, '8': DistroVersion.WINDOWS_8, 'post2012Server': DistroVersion.WINDOWS_8_1, '8.1': DistroVersion.WINDOWS_8_1, '10': DistroVersion.WINDOWS_10 } if win32_ver in dmap: distro_version = dmap[win32_ver] else: raise FatalError("Windows version '%s' not supported" % win32_ver) elif platform == Platform.DARWIN: distro = Distro.OS_X ver = pplatform.mac_ver()[0] if ver.startswith('10.15'): distro_version = DistroVersion.OS_X_CATALINA elif ver.startswith('10.14'): distro_version = DistroVersion.OS_X_MOJAVE elif ver.startswith('10.13'): distro_version = DistroVersion.OS_X_HIGH_SIERRA elif ver.startswith('10.12'): distro_version = DistroVersion.OS_X_SIERRA elif ver.startswith('10.11'): distro_version = DistroVersion.OS_X_EL_CAPITAN elif ver.startswith('10.10'): distro_version = DistroVersion.OS_X_YOSEMITE elif ver.startswith('10.9'): distro_version = DistroVersion.OS_X_MAVERICKS elif ver.startswith('10.8'): distro_version = DistroVersion.OS_X_MOUNTAIN_LION else: raise FatalError("Mac version %s not supported" % ver) num_of_cpus = determine_num_of_cpus() return platform, arch, distro, distro_version, num_of_cpus
def get(self, container=False): """ Get host stats (uptime, cpu, ram, etc) """ host_cpu_infos = lwp.host_cpu_infos() cpu_count_logical = psutil.cpu_count() cpu_count_physical = psutil.cpu_count(logical=False) cpu_percent = lwp.host_cpu_percent() virtual_memory = psutil.virtual_memory() swap_memory = psutil.swap_memory() disk_partitions = psutil.disk_partitions() disk_partitions_usage = [] for partition in disk_partitions: partition_data = psutil.disk_usage(partition.mountpoint) disk_partitions_usage.append({ 'name': partition.mountpoint, 'total': partition_data.total, 'used': partition_data.used, 'free': partition_data.free, 'percent': partition_data.percent }) net_if_addrs = psutil.net_if_addrs() adapters = [] for adapter in net_if_addrs: adapters.append({ 'name': adapter, 'ipv4': None, 'ipv6': None }) index = len(adapters) - 1 for snic in net_if_addrs[adapter]: if snic.family.name == 'AF_INET': adapters[index]['ipv4'] = snic.address if snic.family.name == 'AF_INET6': adapters[index]['ipv6'] = snic.address json_output = { 'uptime': lwp.host_uptime(), 'hostname': socket.gethostname(), 'distrib': ' '.join(linux_distribution()), 'disk': disk_partitions_usage, 'cpu': { 'usage': cpu_percent, 'model': host_cpu_infos['name'], 'physical': cpu_count_physical, 'logical': cpu_count_logical }, 'memory': { 'virtual': { 'total': virtual_memory.total, 'used': virtual_memory.used, 'free': virtual_memory.free, 'percent': virtual_memory.percent }, 'swap': { 'total': swap_memory.total, 'used': swap_memory.used, 'free': swap_memory.free, 'percent': swap_memory.percent } }, 'adapters': adapters, 'kernel': platform.release(), 'lxc': { 'version': lxc.version, 'lxcpath': lxc.get_global_config_item('lxc.lxcpath'), 'default_config': lxc.get_global_config_item('lxc.default_config') } } if not container: output = { 'attributes': json_output } else: output = json_output return {'data': output}
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import distro print 'os_release_info: {0}'.format(distro.os_release_info()) print 'lsb_release_info: {0}'.format(distro.lsb_release_info()) print 'distro_release_info: {0}'.format(distro.distro_release_info()) print 'id: {0}'.format(distro.id()) print 'name: {0}'.format(distro.name()) print 'name_pretty: {0}'.format(distro.name(True)) print 'version: {0}'.format(distro.version()) print 'version_pretty: {0}'.format(distro.version(True)) print 'like: {0}'.format(distro.like()) print 'codename: {0}'.format(distro.codename()) print 'linux_distribution_full: {0}'.format(distro.linux_distribution()) print 'linux_distribution: {0}'.format(distro.linux_distribution(False)) print 'major_version: {0}'.format(distro.major_version()) print 'minor_version: {0}'.format(distro.minor_version()) print 'build_number: {0}'.format(distro.build_number())
if platform == "linux" or platform == "linux2": # linux... # OS Support has not been Tested # please report if your os has problems print("Installing nodejs and npm on you OSX, might take a while: ") print("--> INSTALLATION STARTS HERE <--\n\n") psxmlgen = subprocess.Popen([ 'bash' + pkg_resources.resource_filename('pynodejs', 'data/Linux.sh'), ], cwd=os.getcwd(), shell=True).wait() linux_disto = distro.linux_distribution( full_distribution_name=False)[0] # OS Details if (type(psxmlgen) != int): raise Exception( f'Please an issue in for your os[{linux_disto}] https://github.com/hidely/pynodejs' ) print("--> INSTALLATION COMPLETES HERE <--\n") elif platform == "darwin": # OSX... # OS Support has not been Tested # please report if your os has problems print("Installing nodejs and npm on you OSX, might take a while: ") print("--> INSTALLATION STARTS HERE <--\n\n")
def is_host_rh_family(): distro_name = distro.linux_distribution(full_distribution_name=False)[0] return distro_name in RHEL_CLONES + ['fedora']
def show_install(show_nvidia_smi:bool=False): "Print user's setup information: python -c 'import fastai; fastai.show_install()'" import platform, fastai.version, subprocess rep = [] rep.append(["platform", platform.platform()]) opt_mods = [] if platform.system() == 'Linux': try: import distro except ImportError: opt_mods.append('distro'); # partial distro info rep.append(["distro", platform.uname().version]) else: # full distro info rep.append(["distro", ' '.join(distro.linux_distribution())]) rep.append(["python", platform.python_version()]) rep.append(["fastai", fastai.__version__]) rep.append(["torch", torch.__version__]) # nvidia-smi cmd = "nvidia-smi" have_nvidia_smi = False try: result = subprocess.run(cmd.split(), shell=False, check=False, stdout=subprocess.PIPE) except: pass else: if result.returncode == 0 and result.stdout: have_nvidia_smi = True # XXX: if nvidia-smi is not available, another check could be: # /proc/driver/nvidia/version on most systems, since it's the # currently active version if have_nvidia_smi: smi = result.stdout.decode('utf-8') # matching: "Driver Version: 396.44" match = re.findall(r'Driver Version: +(\d+\.\d+)', smi) if match: rep.append(["nvidia dr.", match[0]]) # nvcc cmd = "nvcc --version" have_nvcc = False try: result = subprocess.run(cmd.split(), shell=False, check=False, stdout=subprocess.PIPE) except: pass else: if result.returncode == 0 and result.stdout: have_nvcc = True nvcc_cuda_ver = "Unknown" if have_nvcc: nvcc = result.stdout.decode('utf-8') # matching: "Cuda compilation tools, release 9.2, V9.2.148" match = re.findall(r'V(\d+\.\d+.\d+)', nvcc) if match: nvcc_cuda_ver = match[0] cuda_is_available = torch.cuda.is_available() if not cuda_is_available: rep.append(["torch cuda", "Not available"]) rep.append(["torch cuda", torch.version.cuda]) rep.append(["nvcc cuda", nvcc_cuda_ver]) # disable this info for now, seems to be available even on cpu-only systems #rep.append(["cudnn", torch.backends.cudnn.version()]) #rep.append(["cudnn avail", torch.backends.cudnn.enabled]) gpu_cnt = torch.cuda.device_count() rep.append(["torch gpus", gpu_cnt]) # it's possible that torch might not see what nvidia-smi sees? gpu_total_mem = [] if have_nvidia_smi: try: cmd = "nvidia-smi --query-gpu=memory.total --format=csv,nounits,noheader" result = subprocess.run(cmd.split(), shell=False, check=False, stdout=subprocess.PIPE) except: print("have nvidia-smi, but failed to query it") else: if result.returncode == 0 and result.stdout: output = result.stdout.decode('utf-8') gpu_total_mem = [int(x) for x in output.strip().split('\n')] # information for each gpu for i in range(gpu_cnt): rep.append([f" [gpu{i}]", None]) rep.append([" name", torch.cuda.get_device_name(i)]) if gpu_total_mem: rep.append([" total mem", f"{gpu_total_mem[i]}MB"]) print("\n\n```") keylen = max([len(e[0]) for e in rep]) for e in rep: print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] else "")) if have_nvidia_smi: if show_nvidia_smi == True: print(f"\n{smi}") else: if gpu_cnt: # have gpu, but no nvidia-smi print("no nvidia-smi is found") else: print("no supported gpus found on this system") print("```\n") print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n") if opt_mods: print("Optional package(s) to enhance the diagnostics can be installed with:") print(f"pip install {' '.join(opt_mods)}") print("Once installed, re-run this utility to get the additional information")
if sys.version_info >= (3, 6): import distro else: import platform as distro if 'freebsd' in sys.platform: # Note: Do not use .version() method which is from either platform or distro. # platform.version() and distro.version() return different values. # platform.version() returns 'FreeBSD 11.2-RELEASE-p9.....'. # distro.version() returns '11.2'. distro_name = 'freebsd' # distro_version is not used for FreeBSD later in this code. distro_version = None else: distribution = distro.linux_distribution() distro_name = distribution[0].lower() distro_version = distribution[1].split('.')[0] distro_utils = None if 'centos' in distro_name and distro_version == '6': import google_compute_engine.distro_lib.el_6.utils as distro_utils elif 'centos' in distro_name: import google_compute_engine.distro_lib.el_7.utils as distro_utils elif 'red hat enterprise linux' in distro_name and distro_version == '6': import google_compute_engine.distro_lib.el_6.utils as distro_utils elif 'red hat enterprise linux' in distro_name: import google_compute_engine.distro_lib.el_7.utils as distro_utils elif 'fedora' in distro_name: import google_compute_engine.distro_lib.el_7.utils as distro_utils elif 'debian' in distro_name and distro_version == '8':
import os import distro import pytest @pytest.mark.skipif(os.getuid() != 0, reason='User is not a root') @pytest.mark.skipif( distro.linux_distribution()[0] == 'Fedora', reason='default.target.wants does not exists on Fedora distro', ) def test_create_resume_service(current_actor_context): current_actor_context.run() service_name = 'leapp_resume.service' service_path = '/etc/systemd/system/{}'.format(service_name) symlink_path = '/etc/systemd/system/default.target.wants/{}'.format( service_name) try: assert os.path.isfile(service_path) assert os.path.isfile(symlink_path) finally: os.unlink(service_path) os.unlink(symlink_path)
def __init__( self, choice=None, no_interactive=False, hg_configure=False, no_system_changes=False, mach_context=None, ): self.instance = None self.choice = choice self.hg_configure = hg_configure self.no_system_changes = no_system_changes self.mach_context = mach_context cls = None args = { "no_interactive": no_interactive, "no_system_changes": no_system_changes, } if sys.platform.startswith("linux"): # distro package provides reliable ids for popular distributions so # we use those instead of the full distribution name dist_id, version, codename = distro.linux_distribution( full_distribution_name=False) if dist_id in ("centos", "fedora"): cls = CentOSFedoraBootstrapper args["distro"] = dist_id elif dist_id in DEBIAN_DISTROS: cls = DebianBootstrapper args["distro"] = dist_id args["codename"] = codename elif dist_id in ("gentoo", "funtoo"): cls = GentooBootstrapper elif dist_id in ("solus"): cls = SolusBootstrapper elif dist_id in ("arch") or os.path.exists("/etc/arch-release"): cls = ArchlinuxBootstrapper elif dist_id in ("void"): cls = VoidBootstrapper elif dist_id in ( "opensuse", "opensuse-leap", "opensuse-tumbleweed", "suse", ): cls = OpenSUSEBootstrapper else: raise NotImplementedError("Bootstrap support for this Linux " "distro not yet available: " + dist_id) args["version"] = version args["dist_id"] = dist_id elif sys.platform.startswith("darwin"): # TODO Support Darwin platforms that aren't OS X. osx_version = platform.mac_ver()[0] if platform.machine() == "arm64": cls = OSXBootstrapperLight else: cls = OSXBootstrapper args["version"] = osx_version elif sys.platform.startswith("openbsd"): cls = OpenBSDBootstrapper args["version"] = platform.uname()[2] elif sys.platform.startswith("dragonfly") or sys.platform.startswith( "freebsd"): cls = FreeBSDBootstrapper args["version"] = platform.release() args["flavor"] = platform.system() elif sys.platform.startswith("win32") or sys.platform.startswith( "msys"): if "MOZILLABUILD" in os.environ: cls = MozillaBuildBootstrapper else: cls = WindowsBootstrapper if cls is None: raise NotImplementedError("Bootstrap support is not yet available " "for your OS.") self.instance = cls(**args)
def start_retrace(self, custom_arch=None): self.hook_start() task = self.task crashdir = os.path.join(task.get_savedir(), "crash") corepath = os.path.join(crashdir, "coredump") try: self.stats["coresize"] = os.path.getsize(corepath) except: pass arch = self.read_architecture(custom_arch, corepath) self.stats["arch"] = arch crash_package, pkgdata = self.read_package_file(crashdir) self.stats["package"] = pkgdata["name"] if pkgdata["epoch"] != 0: self.stats["version"] = "%s:%s-%s" % (pkgdata["epoch"], pkgdata["version"], pkgdata["release"]) else: self.stats["version"] = "%s-%s" % (pkgdata["version"], pkgdata["release"]) release, distribution, version = self.read_release_file(crashdir, crash_package) releaseid = "%s-%s-%s" % (distribution, version, arch) if not releaseid in get_supported_releases(): log_error("Release '%s' is not supported" % releaseid) self._fail() if not is_package_known(crash_package, arch, releaseid): log_error("Package '%s.%s' was not recognized.\nIs it a part of " "official %s repositories?" % (crash_package, arch, release)) self._fail() self.hook_pre_prepare_debuginfo() packages, missing, self.fafrepo = self.read_packages(crashdir, releaseid, crash_package, distribution) self.hook_post_prepare_debuginfo() self.hook_pre_prepare_mock() # create mock config file try: repopath = os.path.join(CONFIG["RepoDir"], releaseid) linux_dist = distro.linux_distribution(full_distribution_name=False) with open(os.path.join(task.get_savedir(), RetraceTask.MOCK_DEFAULT_CFG), "w") as mockcfg: mockcfg.write("config_opts['root'] = '%d'\n" % task.get_taskid()) mockcfg.write("config_opts['target_arch'] = '%s'\n" % arch) mockcfg.write("config_opts['chroot_setup_cmd'] = '") if linux_dist[0] == "fedora": mockcfg.write("--setopt=strict=0") else: mockcfg.write("--skip-broken") mockcfg.write(" install %s abrt-addon-ccpp shadow-utils %s rpm'\n" % (" ".join(packages), self.plugin.gdb_package)) mockcfg.write("config_opts['releasever'] = '%s'\n" % linux_dist[1]) if linux_dist[0] == "fedora": mockcfg.write("config_opts['package_manager'] = 'dnf'\n") mockcfg.write("config_opts['plugin_conf']['ccache_enable'] = False\n") mockcfg.write("config_opts['plugin_conf']['yum_cache_enable'] = False\n") mockcfg.write("config_opts['plugin_conf']['root_cache_enable'] = False\n") mockcfg.write("config_opts['plugin_conf']['bind_mount_enable'] = True\n") mockcfg.write("config_opts['plugin_conf']['bind_mount_opts'] = { 'create_dirs': True,\n") mockcfg.write(" 'dirs': [\n") mockcfg.write(" ('%s', '%s'),\n" % (repopath, repopath)) mockcfg.write(" ('%s', '/var/spool/abrt/crash'),\n" % crashdir) if CONFIG["UseFafPackages"]: mockcfg.write(" ('%s', '/packages'),\n" % self.fafrepo) mockcfg.write(" ] }\n") mockcfg.write("\n") mockcfg.write("config_opts['yum.conf'] = \"\"\"\n") mockcfg.write("[main]\n") mockcfg.write("cachedir=/var/cache/yum\n") mockcfg.write("debuglevel=1\n") mockcfg.write("reposdir=%s\n" % os.devnull) mockcfg.write("logfile=/var/log/yum.log\n") mockcfg.write("retries=20\n") mockcfg.write("obsoletes=1\n") if version != "rawhide" and CONFIG["RequireGPGCheck"]: mockcfg.write("gpgcheck=1\n") else: mockcfg.write("gpgcheck=0\n") mockcfg.write("assumeyes=1\n") mockcfg.write("syslog_ident=mock\n") mockcfg.write("syslog_device=\n") mockcfg.write("\n") mockcfg.write("#repos\n") mockcfg.write("\n") mockcfg.write("[%s]\n" % distribution) mockcfg.write("name=%s\n" % releaseid) mockcfg.write("baseurl=file://%s/\n" % repopath) mockcfg.write("failovermethod=priority\n") if version != "rawhide" and CONFIG["RequireGPGCheck"]: mockcfg.write("gpgkey=file:///usr/share/retrace-server/gpg/%s-%s\n" % (distribution, version)) mockcfg.write("\"\"\"\n") # symlink defaults from /etc/mock os.symlink("/etc/mock/site-defaults.cfg", os.path.join(task.get_savedir(), RetraceTask.MOCK_SITE_DEFAULTS_CFG)) os.symlink("/etc/mock/logging.ini", os.path.join(task.get_savedir(), RetraceTask.MOCK_LOGGING_INI)) except Exception as ex: log_error("Unable to create mock config file: %s" % ex) self._fail() # run retrace task.set_status(STATUS_INIT) log_info(STATUS[STATUS_INIT]) self._retrace_run(25, ["/usr/bin/mock", "init", "--resultdir", task.get_savedir() + "/log", "--configdir", task.get_savedir()]) self.hook_post_prepare_mock() self.hook_pre_retrace() if CONFIG["UseFafPackages"]: self._retrace_run(26, ["/usr/bin/mock", "--configdir", task.get_savedir(), "shell", "--", "bash -c 'for PKG in /packages/*; " "do rpm2cpio $PKG | cpio -muid --quiet; done'"]) self._retrace_run(27, ["/usr/bin/mock", "--configdir", task.get_savedir(), "shell", "--", "chgrp -R mock /var/spool/abrt/crash"]) # generate backtrace task.set_status(STATUS_BACKTRACE) log_info(STATUS[STATUS_BACKTRACE]) try: backtrace, exploitable = run_gdb(task.get_savedir(), self.plugin) except Exception as ex: log_error(str(ex)) self._fail() task.set_backtrace(backtrace) if exploitable is not None: task.add_misc("exploitable", exploitable) self.hook_post_retrace() # does not work at the moment rootsize = 0 if not task.get_type() in [TASK_DEBUG, TASK_RETRACE_INTERACTIVE]: # clean up temporary data task.set_status(STATUS_CLEANUP) log_info(STATUS[STATUS_CLEANUP]) self.clean_task() # ignore error: workdir = savedir => workdir is not empty if CONFIG["UseWorkDir"]: try: os.rmdir(workdir) except: pass # save crash statistics task.set_status(STATUS_STATS) log_info(STATUS[STATUS_STATS]) task.set_finished_time(int(time.time())) self.stats["duration"] = int(time.time()) - self.stats["starttime"] self.stats["status"] = STATUS_SUCCESS try: con = init_crashstats_db() statsid = save_crashstats(self.stats, con) save_crashstats_success(statsid, self.prerunning, len(get_active_tasks()), rootsize, con) save_crashstats_packages(statsid, packages[1:], con) if missing: save_crashstats_build_ids(statsid, missing, con) con.close() except Exception as ex: log_warn(str(ex)) # publish log => finish task log_info("Retrace took %d seconds" % self.stats["duration"]) log_info(STATUS[STATUS_SUCCESS]) task.set_status(STATUS_SUCCESS) self.hook_success() return True
def __virtual__(): """Check for Gentoo family.""" if "Gentoo" in distro.linux_distribution()[0]: return "eselect_grain" return (False, "This is not a Gentoo family OS")
def get_linux_distribution_name(): linux_distribution = distro.linux_distribution(full_distribution_name=False) linux_distribution_name = linux_distribution[0] return linux_distribution_name
# Needed to check version of python from varken import structures # noqa from varken.ombi import OmbiAPI from varken.unifi import UniFiAPI from varken import VERSION, BRANCH, BUILD_DATE from varken.sonarr import SonarrAPI from varken.radarr import RadarrAPI from varken.lidarr import LidarrAPI from varken.iniparser import INIParser from varken.dbmanager import DBManager from varken.helpers import GeoIPHandler from varken.tautulli import TautulliAPI from varken.sickchill import SickChillAPI from varken.varkenlogger import VarkenLogger PLATFORM_LINUX_DISTRO = ' '.join(x for x in linux_distribution() if x) def thread(job, **kwargs): worker = Thread(target=job, kwargs=dict(**kwargs)) worker.start() if __name__ == "__main__": parser = ArgumentParser( prog='varken', description= 'Command-line utility to aggregate data from the plex ecosystem into InfluxDB', formatter_class=RawTextHelpFormatter) parser.add_argument("-d",
return True # --- main ----------------------------------------------------------------------------------------- if __name__ == '__main__': # setup logging facility logger = getLogger('1.0') if platform.system() == 'Linux': logger.info('Operating System type identified: Linux, {}'.format( os_type())) try: linux_distro = distro.linux_distribution()[0].lower() logger.info( 'Linux distribution identified as {}'.format(linux_distro)) except Exception: logger.exception( 'Unable to id distribution using python distro library') linux_distro = os_type() # start configuration local_profile_setup(linux_distro) else: logger.info('Operating System type identified: {}'.format(os_type())) sys.exit(0)
def system_information(): """ Report system versions. """ # Late import so that when getting called from setup.py does not break from distro import linux_distribution def system_version(): """ Return host system version. """ lin_ver = linux_distribution() mac_ver = platform.mac_ver() win_ver = platform.win32_ver() # linux_distribution() will return a # distribution on OS X and Windows. # Check mac_ver and win_ver first, # then lin_ver. if mac_ver[0]: if isinstance(mac_ver[1], (tuple, list)) and "".join(mac_ver[1]): return " ".join([mac_ver[0], ".".join(mac_ver[1]), mac_ver[2]]) else: return " ".join([mac_ver[0], mac_ver[2]]) elif win_ver[0]: return " ".join(win_ver) elif lin_ver[0]: return " ".join(lin_ver) else: return "" if platform.win32_ver()[0]: # Get the version and release info based on the Windows Operating # System Product Name. As long as Microsoft maintains a similar format # this should be future proof import win32api # pylint: disable=3rd-party-module-not-gated import win32con # pylint: disable=3rd-party-module-not-gated # Get the product name from the registry hkey = win32con.HKEY_LOCAL_MACHINE key = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion" value_name = "ProductName" reg_handle = win32api.RegOpenKey(hkey, key) # Returns a tuple of (product_name, value_type) product_name, _ = win32api.RegQueryValueEx(reg_handle, value_name) version = "Unknown" release = "" if "Server" in product_name: for item in product_name.split(" "): # If it's all digits, then it's version if re.match(r"\d+", item): version = item # If it starts with R and then numbers, it's the release # ie: R2 if re.match(r"^R\d+$", item): release = item release = "{}Server{}".format(version, release) else: for item in product_name.split(" "): # If it's a number, decimal number, Thin or Vista, then it's the # version if re.match(r"^(\d+(\.\d+)?)|Thin|Vista$", item): version = item release = version _, ver, service_pack, extra = platform.win32_ver() version = " ".join([release, ver, service_pack, extra]) else: version = system_version() release = platform.release() system = [ ("system", platform.system()), ("dist", " ".join(linux_distribution(full_distribution_name=False))), ("release", release), ("machine", platform.machine()), ("version", version), ("locale", __salt_system_encoding__), ] for name, attr in system: yield name, attr continue
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import platform if platform.system() == 'Linux': import distro print(distro.linux_distribution())
def build_env(self, hosts_file_path=None, target=None, is_build=False, test_unit=False, uwp=False, features=None): """Return an extended environment dictionary.""" env = os.environ.copy() if sys.platform == "win32" and type(env['PATH']) == six.text_type: # On win32, the virtualenv's activate_this.py script sometimes ends up # turning os.environ['PATH'] into a unicode string. This doesn't work # for passing env vars in to a process, so we force it back to ascii. # We don't use UTF8 since that won't be correct anyway; if you actually # have unicode stuff in your path, all this PATH munging would have broken # it in any case. env['PATH'] = env['PATH'].encode('ascii', 'ignore') extra_path = [] extra_lib = [] if "msvc" in (target or host_triple()): extra_path += [path.join(self.msvc_package_dir("cmake"), "bin")] extra_path += [path.join(self.msvc_package_dir("llvm"), "bin")] extra_path += [path.join(self.msvc_package_dir("ninja"), "bin")] extra_path += [self.msvc_package_dir("nuget")] extra_path += [path.join(self.msvc_package_dir("xargo"))] arch = (target or host_triple()).split('-')[0] vcpkg_arch = { "x86_64": "x64-windows", "i686": "x86-windows", "aarch64": "arm64-windows", } target_arch = vcpkg_arch[arch] if uwp: target_arch += "-uwp" openssl_base_dir = path.join(self.msvc_package_dir("openssl"), target_arch) # Link openssl env["OPENSSL_INCLUDE_DIR"] = path.join(openssl_base_dir, "include") env["OPENSSL_LIB_DIR"] = path.join(openssl_base_dir, "lib") env["OPENSSL_LIBS"] = "libssl:libcrypto" # Link moztools, used for building SpiderMonkey moztools_paths = [ path.join(self.msvc_package_dir("moztools"), "bin"), path.join(self.msvc_package_dir("moztools"), "msys", "bin"), ] # In certain cases we need to ensure that tools with conflicting MSYS versions # can be placed in the PATH ahead of the moztools directories. moztools_path_prepend = env.get("MOZTOOLS_PATH_PREPEND", None) if moztools_path_prepend: moztools_paths.insert(0, moztools_path_prepend) env["MOZTOOLS_PATH"] = os.pathsep.join(moztools_paths) # Link autoconf 2.13, used for building SpiderMonkey env["AUTOCONF"] = path.join(self.msvc_package_dir("moztools"), "msys", "local", "bin", "autoconf-2.13") # Link LLVM env["LIBCLANG_PATH"] = path.join(self.msvc_package_dir("llvm"), "lib") if not os.environ.get("NATIVE_WIN32_PYTHON"): env["NATIVE_WIN32_PYTHON"] = sys.executable # Always build harfbuzz from source env["HARFBUZZ_SYS_NO_PKG_CONFIG"] = "true" if is_build and self.needs_gstreamer_env(target or host_triple(), env, uwp, features): gstpath = gstreamer_root(target or host_triple(), env, self.get_top_dir()) extra_path += [path.join(gstpath, "bin")] libpath = path.join(gstpath, "lib") # we append in the reverse order so that system gstreamer libraries # do not get precedence extra_path = [libpath] + extra_path extra_lib = [libpath] + extra_lib append_to_path_env(path.join(libpath, "pkgconfig"), env, "PKG_CONFIG_PATH") if is_linux(): distrib, version, _ = distro.linux_distribution() distrib = six.ensure_str(distrib) version = six.ensure_str(version) if distrib == "Ubuntu" and (version == "16.04" or version == "14.04"): env["HARFBUZZ_SYS_NO_PKG_CONFIG"] = "true" if extra_path: append_to_path_env(os.pathsep.join(extra_path), env, "PATH") if self.config["build"]["incremental"]: env["CARGO_INCREMENTAL"] = "1" elif self.config["build"]["incremental"] is not None: env["CARGO_INCREMENTAL"] = "0" if extra_lib: path_var = "DYLD_LIBRARY_PATH" if sys.platform == "darwin" else "LD_LIBRARY_PATH" append_to_path_env(os.pathsep.join(extra_lib), env, path_var) # Paths to Android build tools: if self.config["android"]["sdk"]: env["ANDROID_SDK"] = self.config["android"]["sdk"] if self.config["android"]["ndk"]: env["ANDROID_NDK"] = self.config["android"]["ndk"] if self.config["android"]["toolchain"]: env["ANDROID_TOOLCHAIN"] = self.config["android"]["toolchain"] if self.config["android"]["platform"]: env["ANDROID_PLATFORM"] = self.config["android"]["platform"] toolchains = path.join(self.context.topdir, "android-toolchains") for kind in ["sdk", "ndk"]: default = os.path.join(toolchains, kind) if os.path.isdir(default): env.setdefault("ANDROID_" + kind.upper(), default) tools = os.path.join(toolchains, "sdk", "platform-tools") if os.path.isdir(tools): env["PATH"] = "%s%s%s" % (tools, os.pathsep, env["PATH"]) # These are set because they are the variable names that build-apk # expects. However, other submodules have makefiles that reference # the env var names above. Once glutin is enabled and set as the # default, we could modify the subproject makefiles to use the names # below and remove the vars above, to avoid duplication. if "ANDROID_SDK" in env: env["ANDROID_HOME"] = env["ANDROID_SDK"] if "ANDROID_NDK" in env: env["NDK_HOME"] = env["ANDROID_NDK"] if "ANDROID_TOOLCHAIN" in env: env["NDK_STANDALONE"] = env["ANDROID_TOOLCHAIN"] if hosts_file_path: env['HOST_FILE'] = hosts_file_path if not test_unit: # This wrapper script is in bash and doesn't work on Windows # where we want to run doctests as part of `./mach test-unit` env['RUSTDOC'] = path.join(self.context.topdir, 'etc', 'rustdoc-with-private') if self.config["build"]["rustflags"]: env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " " + self.config["build"]["rustflags"] # Don't run the gold linker if on Windows https://github.com/servo/servo/issues/9499 if self.config["tools"]["rustc-with-gold"] and sys.platform != "win32": if subprocess.call(['which', 'ld.gold'], stdout=PIPE, stderr=PIPE) == 0: env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C link-args=-fuse-ld=gold" if not (self.config["build"]["ccache"] == ""): env['CCACHE'] = self.config["build"]["ccache"] # Ensure Rust uses hard floats and SIMD on ARM devices if target: if target.startswith('arm') or target.startswith('aarch64'): env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C target-feature=+neon" env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -W unused-extern-crates" git_info = [] if os.path.isdir('.git') and is_build: git_sha = subprocess.check_output([ 'git', 'rev-parse', '--short', 'HEAD' ]).strip() git_is_dirty = bool(subprocess.check_output([ 'git', 'status', '--porcelain' ]).strip()) git_info.append('') git_info.append(six.ensure_str(git_sha)) if git_is_dirty: git_info.append('dirty') env['GIT_INFO'] = '-'.join(git_info) if self.config["build"]["thinlto"]: env['RUSTFLAGS'] += " -Z thinlto" # Work around https://github.com/servo/servo/issues/24446 # Argument-less str.split normalizes leading, trailing, and double spaces env['RUSTFLAGS'] = " ".join(env['RUSTFLAGS'].split()) return env
import sys import platform import distro system = platform.system() if system == 'Darwin' : print('mac') elif system == 'Linux' : dist = distro.linux_distribution() if dist[0] == 'Ubuntu' : print('ubuntu') elif dist[0] == 'debian' : print('debian') else : sys.exit(1) elif system == 'cygwin' : print('cygwin') else : sys.exit(1)
class WOVar(): """Intialization of core variables""" # WordOps version wo_version = "3.10.2" # WordOps packages versions wo_wp_cli = "2.3.0" wo_adminer = "4.7.3" wo_phpmyadmin = "4.9.1" wo_extplorer = "2.1.13" wo_dashboard = "1.2" # Get WPCLI path wo_wpcli_path = '/usr/local/bin/wp' # Current date and time of System wo_date = datetime.now().strftime('%d%b%Y-%H-%M-%S') # WordOps core variables # linux distribution wo_distro = linux_distribution(full_distribution_name=False)[0].lower() wo_platform_version = linux_distribution( full_distribution_name=False)[1].lower() # distro codename (bionic, xenial, stretch ...) wo_platform_codename = linux_distribution( full_distribution_name=False)[2].lower() # Get timezone of system if os.path.isfile('/etc/timezone'): with open("/etc/timezone", mode='r', encoding='utf-8') as tzfile: wo_timezone = tzfile.read().replace('\n', '') if wo_timezone == "Etc/UTC": wo_timezone = "UTC" else: wo_timezone = "Europe/Amsterdam" # Get FQDN of system wo_fqdn = getfqdn() # WordOps default webroot path wo_webroot = '/var/www/' # WordOps default renewal SSL certificates path wo_ssl_archive = '/etc/letsencrypt/renewal' # WordOps default live SSL certificates path wo_ssl_live = '/etc/letsencrypt/live' # PHP user wo_php_user = '******' # WordOps git configuration management config = configparser.ConfigParser() config.read(os.path.expanduser("~") + '/.gitconfig') try: wo_user = config['user']['name'] wo_email = config['user']['email'] except Exception: print("WordOps (wo) require an username & and an email " "address to configure Git (used to save server configurations)") print("Your informations will ONLY be stored locally") wo_user = input("Enter your name: ") while wo_user == "": print("Unfortunately, this can't be left blank") wo_user = input("Enter your name: ") wo_email = input("Enter your email: ") while not match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$", wo_email): print("Whoops, seems like you made a typo - " "the e-mailaddress is invalid...") wo_email = input("Enter your email: ") git.config("--global", "user.name", "{0}".format(wo_user)) git.config("--global", "user.email", "{0}".format(wo_email)) if not os.path.isfile('/root/.gitconfig'): copy2(os.path.expanduser("~") + '/.gitconfig', '/root/.gitconfig') # MySQL hostname wo_mysql_host = "" config = configparser.RawConfigParser() if os.path.exists('/etc/mysql/conf.d/my.cnf'): cnfpath = "/etc/mysql/conf.d/my.cnf" else: cnfpath = os.path.expanduser("~") + "/.my.cnf" if [cnfpath] == config.read(cnfpath): try: wo_mysql_host = config.get('client', 'host') except configparser.NoOptionError: wo_mysql_host = "localhost" else: wo_mysql_host = "localhost" # WordOps stack installation variables # Nginx repo and packages if wo_distro == 'ubuntu': wo_nginx_repo = "ppa:wordops/nginx-wo" else: if wo_distro == 'debian': if wo_platform_codename == 'jessie': wo_deb_repo = "Debian_8.0" elif wo_platform_codename == 'stretch': wo_deb_repo = "Debian_9.0" elif wo_platform_codename == 'buster': wo_deb_repo = "Debian_10" elif wo_distro == 'raspbian': if wo_platform_codename == 'stretch': wo_deb_repo = "Raspbian_9.0" elif wo_platform_codename == 'buster': wo_deb_repo = "Raspbian_10" # debian/raspbian nginx repository wo_nginx_repo = ("deb http://download.opensuse.org" "/repositories/home:" "/virtubox:/WordOps/{0}/ /".format(wo_deb_repo)) wo_nginx = ["nginx-custom", "nginx-wo"] wo_nginx_key = '188C9FB063F0247A' wo_php = [ "php7.2-fpm", "php7.2-curl", "php7.2-gd", "php7.2-imap", "php7.2-readline", "php7.2-common", "php7.2-recode", "php7.2-cli", "php7.2-mbstring", "php7.2-intl", "php7.2-bcmath", "php7.2-mysql", "php7.2-opcache", "php7.2-zip", "php7.2-xml", "php7.2-soap" ] wo_php73 = [ "php7.3-fpm", "php7.3-curl", "php7.3-gd", "php7.3-imap", "php7.3-readline", "php7.3-common", "php7.3-recode", "php7.3-cli", "php7.3-mbstring", "php7.3-intl", "php7.3-bcmath", "php7.3-mysql", "php7.3-opcache", "php7.3-zip", "php7.3-xml", "php7.3-soap" ] wo_php_extra = [ "php-memcached", "php-imagick", "graphviz", "php-xdebug", "php-msgpack", "php-redis" ] wo_mysql = ["mariadb-server", "percona-toolkit"] if wo_distro == 'raspbian': wo_mysql = wo_mysql + ["python3-mysqldb"] if wo_platform_codename == 'stretch': mariadb_ver = '10.1' else: mariadb_ver = '10.3' else: mariadb_ver = '10.3' if wo_platform_codename == 'jessie': wo_mysql = wo_mysql + ["python3-mysql.connector"] else: wo_mysql = wo_mysql + ["python3-mysqldb", "mariadb-backup"] wo_mysql_client = ["mariadb-client"] if wo_platform_codename == 'jessie': wo_mysql_client = wo_mysql_client + ["python3-mysqldb"] else: wo_mysql_client = wo_mysql_client + ["python3-mysql.connector"] wo_fail2ban = ["fail2ban"] wo_clamav = ["clamav", "clamav-freshclam"] wo_ubuntu_backports = 'ppa:jonathonf/backports' # Redis repo details if wo_distro == 'ubuntu': wo_php_repo = "ppa:ondrej/php" wo_redis_repo = ("ppa:chris-lea/redis-server") wo_goaccess_repo = ("ppa:alex-p/goaccess") wo_mysql_repo = ("deb [arch=amd64,ppc64el] " "http://mariadb.mirrors.ovh.net/MariaDB/repo/" "10.3/ubuntu {codename} main".format( codename=wo_platform_codename)) else: wo_php_repo = ( "deb https://packages.sury.org/php/ {codename} main".format( codename=wo_platform_codename)) wo_php_key = 'AC0E47584A7A714D' wo_redis_repo = ( "deb https://packages.sury.org/php/ {codename} all".format( codename=wo_platform_codename)) wo_mysql_repo = ("deb [arch=amd64,ppc64el] " "http://mariadb.mirrors.ovh.net/MariaDB/repo/" "10.3/debian {codename} main".format( codename=wo_platform_codename)) wo_redis = ['redis-server'] # Repo path wo_repo_file = "wo-repo.list" wo_repo_file_path = ("/etc/apt/sources.list.d/" + wo_repo_file) # Application dabase file path basedir = os.path.abspath(os.path.dirname('/var/lib/wo/')) wo_db_uri = 'sqlite:///' + os.path.join(basedir, 'dbase.db') def __init__(self): pass
def __init__(self, choice=None, no_interactive=False, hg_configure=False, no_system_changes=False, mach_context=None): self.instance = None self.choice = choice self.hg_configure = hg_configure self.no_system_changes = no_system_changes self.mach_context = mach_context cls = None args = { 'no_interactive': no_interactive, 'no_system_changes': no_system_changes } if sys.platform.startswith('linux'): # distro package provides reliable ids for popular distributions so # we use those instead of the full distribution name dist_id, version, codename = distro.linux_distribution( full_distribution_name=False) if dist_id in ('centos', 'fedora'): cls = CentOSFedoraBootstrapper args['distro'] = dist_id elif dist_id in DEBIAN_DISTROS: cls = DebianBootstrapper args['distro'] = dist_id args['codename'] = codename elif dist_id in ('gentoo', 'funtoo'): cls = GentooBootstrapper elif dist_id in ('solus'): cls = SolusBootstrapper elif dist_id in ('arch') or os.path.exists('/etc/arch-release'): cls = ArchlinuxBootstrapper elif dist_id in ('void'): cls = VoidBootstrapper elif os.path.exists('/etc/SUSE-brand'): cls = OpenSUSEBootstrapper else: raise NotImplementedError('Bootstrap support for this Linux ' 'distro not yet available: ' + dist_id) args['version'] = version args['dist_id'] = dist_id elif sys.platform.startswith('darwin'): # TODO Support Darwin platforms that aren't OS X. osx_version = platform.mac_ver()[0] cls = OSXBootstrapper args['version'] = osx_version elif sys.platform.startswith('openbsd'): cls = OpenBSDBootstrapper args['version'] = platform.uname()[2] elif sys.platform.startswith('dragonfly') or \ sys.platform.startswith('freebsd'): cls = FreeBSDBootstrapper args['version'] = platform.release() args['flavor'] = platform.system() elif sys.platform.startswith('win32') or sys.platform.startswith( 'msys'): if 'MOZILLABUILD' in os.environ: cls = MozillaBuildBootstrapper else: cls = WindowsBootstrapper if cls is None: raise NotImplementedError('Bootstrap support is not yet available ' 'for your OS.') self.instance = cls(**args)
import os import sys import distro import platform import numpy import warnings import cv2 import pdb from enum import Enum from ctypes import * # Low-level API # x86_64, Raspberry Pi or OSX dll_file = "libhs.so" if distro.linux_distribution()[0] == 'Ubuntu' \ and distro.linux_distribution()[1] == '16.04' \ and platform.machine() == 'x86_64': arch_path = 'libs/linux/x86_64' elif 'Raspbian' in distro.linux_distribution()[0] \ and platform.machine() == 'armv7l': arch_path = 'libs/linux/armv7l' elif platform.system() == 'Darwin': arch_path = 'libs/macos' dll_file = "libhs.dylib" else: raise Exception("Unsupported operating system") f = CDLL(os.path.join(os.path.dirname(__file__), arch_path, dll_file)) warnings.simplefilter('default', DeprecationWarning)
def system_information(): ''' Report system versions. ''' def system_version(): ''' Return host system version. ''' lin_ver = linux_distribution() mac_ver = platform.mac_ver() win_ver = platform.win32_ver() if lin_ver[0]: return ' '.join(lin_ver) elif mac_ver[0]: if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]): return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]]) else: return ' '.join([mac_ver[0], mac_ver[2]]) elif win_ver[0]: return ' '.join(win_ver) else: return '' if platform.win32_ver()[0]: # Get the version and release info based on the Windows Operating # System Product Name. As long as Microsoft maintains a similar format # this should be future proof import win32api # pylint: disable=3rd-party-module-not-gated import win32con # pylint: disable=3rd-party-module-not-gated # Get the product name from the registry hkey = win32con.HKEY_LOCAL_MACHINE key = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion' value_name = 'ProductName' reg_handle = win32api.RegOpenKey(hkey, key) # Returns a tuple of (product_name, value_type) product_name, _ = win32api.RegQueryValueEx(reg_handle, value_name) version = 'Unknown' release = '' if 'Server' in product_name: for item in product_name.split(' '): # If it's all digits, then it's version if re.match(r'\d+', item): version = item # If it starts with R and then numbers, it's the release # ie: R2 if re.match(r'^R\d+$', item): release = item release = '{0}Server{1}'.format(version, release) else: for item in product_name.split(' '): # If it's a number, decimal number, Thin or Vista, then it's the # version if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item): version = item release = version _, ver, service_pack, extra = platform.win32_ver() version = ' '.join([release, ver, service_pack, extra]) else: version = system_version() release = platform.release() system = [ ('system', platform.system()), ('dist', ' '.join(linux_distribution(full_distribution_name=False))), ('release', release), ('machine', platform.machine()), ('version', version), ('locale', __salt_system_encoding__), ] for name, attr in system: yield name, attr continue
def __init__(self): self.nomdist = distro.linux_distribution(full_distribution_name=False)
def __init__(self, args): self.args = args self.configFilePath = os.path.join(args.build_root, 'qt.cmake') self.version = os.getenv('VIRCADIA_USE_QT_VERSION', '5.12.3') self.assets_url = hifi_utils.readEnviromentVariableFromFile( args.build_root, 'EXTERNAL_BUILD_ASSETS') defaultBasePath = os.path.expanduser('~/hifi/qt') self.basePath = os.getenv('HIFI_QT_BASE', defaultBasePath) if (not os.path.isdir(self.basePath)): os.makedirs(self.basePath) self.path = os.path.join(self.basePath, self.version) self.fullPath = os.path.join(self.path, 'qt5-install') self.cmakePath = os.path.join(self.fullPath, 'lib/cmake') print("Using qt path {}".format(self.path)) lockDir, lockName = os.path.split(self.path) lockName += '.lock' if not os.path.isdir(lockDir): os.makedirs(lockDir) self.lockFile = os.path.join(lockDir, lockName) if (os.getenv('VIRCADIA_USE_PREBUILT_QT')): print("Using pre-built Qt5") return # OS dependent information system = platform.system() if 'Windows' == system: self.qtUrl = self.assets_url + '/dependencies/vcpkg/qt5-install-5.12.3-windows3.tar.gz%3FversionId=5ADqP0M0j5ZfimUHrx4zJld6vYceHEsI' elif 'Darwin' == system: self.qtUrl = self.assets_url + '/dependencies/vcpkg/qt5-install-5.12.3-macos.tar.gz%3FversionId=bLAgnoJ8IMKpqv8NFDcAu8hsyQy3Rwwz' elif 'Linux' == system: import distro dist = distro.linux_distribution() if distro.id() == 'ubuntu': u_major = int(distro.major_version()) u_minor = int(distro.minor_version()) if u_major == 16: self.qtUrl = self.assets_url + '/dependencies/vcpkg/qt5-install-5.12.3-ubuntu-16.04-with-symbols.tar.gz' elif u_major == 18: self.qtUrl = self.assets_url + '/dependencies/vcpkg/qt5-install-5.12.3-ubuntu-18.04.tar.gz' elif u_major == 19 and u_minor == 10: self.qtUrl = self.assets_url + '/dependencies/vcpkg/qt5-install-5.12.6-ubuntu-19.10.tar.xz' elif u_major > 18 and (u_major != 19 and u_minor != 4): print("We don't support " + distro.name(pretty=True) + " yet. Perhaps consider helping us out?") raise Exception('LINUX DISTRO IS NOT SUPPORTED YET!!!') else: print( "Sorry, " + distro.name(pretty=True) + " is old and won't be officially supported. Please consider upgrading." ) raise Exception('UNKNOWN LINUX DISTRO VERSION!!!') else: print("Sorry, " + distro.name(pretty=True) + " is not supported. Please consider helping us out.") print( "It's also possible to build Qt for your distribution, please see the documentation at:" ) print( "https://github.com/kasenvr/project-athena/tree/kasen/core/tools/qt-builder" ) raise Exception('UNKNOWN LINUX VERSION!!!') else: print("System : " + platform.system()) print("Architecture: " + platform.architecture()) print("Machine : " + platform.machine()) raise Exception('UNKNOWN OPERATING SYSTEM!!!')
def sysinfo(): # added as a temp fix for issue: https://github.com/giampaolo/psutil/issues/1650 import warnings warnings.filterwarnings("ignore") print("\n" + "-" * 29 + " System information " + "-" * 30 + "\n") import distro # get info about linux distro fdist = distro.linux_distribution() dist = " ".join(x for x in fdist) print("Linux distro: " + dist) print("Linux kernel: " + pl.release()) # driver check driver = s.getoutput("cpufreqctl --driver") print("Driver: " + driver) # get cpu architecture cpu_arch = pl.machine() # get number of cores/logical CPU's cpu_count = p.cpu_count() print("Architecture:", cpu_arch) # get processor with open("/proc/cpuinfo", "r") as f: line = f.readline() while line: if "model name" in line: print("Processor:" + line.split(':')[1].rstrip()) break line = f.readline() print("Cores:", cpu_count) print("\n" + "-" * 30 + " Current CPU states " + "-" * 30 + "\n") # print cpu max frequency max_cpu_freq = p.cpu_freq().max print("CPU max frequency: " + "\n{:.0f}".format(max_cpu_freq) + " MHz\n") # get current cpu frequency per core core_usage = p.cpu_freq(percpu=True) # print current cpu frequency per core print("CPU frequency for each core:\n") core_num = 0 while core_num < cpu_count: print("CPU" + str(core_num) + ": {:.0f}".format(core_usage[core_num].current) + " MHz") core_num += 1 # get number of core temp sensors core_temp_num = p.cpu_count(logical=False) # get hardware temperatures core_temp = p.sensors_temperatures() # print temperature for each physical core print("\nTemperature for each physical core:\n") core_num = 0 while core_num < core_temp_num: print("CPU" + str(core_num) + " temp: {:.0f}".format(core_temp['coretemp'][core_num].current) + "°C") core_num += 1
server.verbose = verbose # Start a thread with the server -- that thread will then start one # or more threads for each request server_thread = threading.Thread(target=server.serve_forever) # Exit the server thread when the main thread terminates server_thread.verbose = verbose server_thread.setDaemon(True) server_thread.start() if not quiet: strx = "Win or Unkn." try: import distro for aa in distro.linux_distribution(): strx = str(aa) + " " except: pass print("MainSiteID:", pyservsup.globals.siteid) print("Server running:", server.server_address) pyver = support.list2str(sys.version_info[0:3], ".") print("Running python", platform.python_version(), "on", platform.system(), strx) if conf.pglog > 0: pysyslog.syslog("Started Server") # Block
def start_vmcore(self, custom_kernelver=None): self.hook_start() task = self.task vmcore = os.path.join(task.get_savedir(), "crash", "vmcore") try: self.stats["coresize"] = os.path.getsize(vmcore) except: pass if custom_kernelver is not None: kernelver = custom_kernelver kernelver_str = custom_kernelver.kernelver_str else: kernelver = get_kernel_release(vmcore, task.get_crash_cmd().split()) if not kernelver: raise Exception("Unable to determine kernel version") log_debug("Determined kernel version: %s" % kernelver) task.set_kernelver(str(kernelver)) kernelver_str = kernelver.kernelver_str self.stats["package"] = "kernel" self.stats["version"] = "%s-%s" % (kernelver.version, kernelver.release) self.stats["arch"] = kernelver.arch kernelcache = os.path.join(CONFIG["RepoDir"], "kernel") kerneltmp = os.path.join(kernelcache, "%s.tmp" % kernelver) log_info(STATUS[STATUS_INIT]) task.set_status(STATUS_INIT) vmlinux = "" if task.use_mock(kernelver): self.hook_post_prepare_mock() # we don't save config into task.get_savedir() because it is only # readable by user/group retrace/CONFIG["AuthGroup"]. # if a non-retrace user in group mock executes # setgid /usr/bin/mock, he gets permission denied. # this is not a security thing - using mock gives you root anyway cfgdir = os.path.join(CONFIG["SaveDir"], "%d-kernel" % task.get_taskid()) # if the directory exists, it is orphaned - nuke it if os.path.isdir(cfgdir): shutil.rmtree(cfgdir) mockgid = grp.getgrnam("mock").gr_gid old_umask = os.umask(0o027) os.mkdir(cfgdir) os.chown(cfgdir, -1, mockgid) try: cfgfile = os.path.join(cfgdir, RetraceTask.MOCK_DEFAULT_CFG) linux_dist = distro.linux_distribution(full_distribution_name=False) with open(cfgfile, "w") as mockcfg: mockcfg.write("config_opts['root'] = '%d-kernel'\n" % task.get_taskid()) mockcfg.write("config_opts['target_arch'] = '%s'\n" % kernelver.arch) mockcfg.write("config_opts['chroot_setup_cmd'] = 'install bash coreutils cpio " "crash findutils rpm shadow-utils'\n") mockcfg.write("config_opts['releasever'] = '%s'\n" % linux_dist[1]) if linux_dist[0] == "fedora": mockcfg.write("config_opts['package_manager'] = 'dnf'\n") mockcfg.write("config_opts['plugin_conf']['ccache_enable'] = False\n") mockcfg.write("config_opts['plugin_conf']['yum_cache_enable'] = False\n") mockcfg.write("config_opts['plugin_conf']['root_cache_enable'] = False\n") mockcfg.write("config_opts['plugin_conf']['bind_mount_enable'] = True\n") mockcfg.write("config_opts['plugin_conf']['bind_mount_opts'] = { \n") mockcfg.write(" 'dirs': [('%s', '%s'),\n" % (CONFIG["RepoDir"], CONFIG["RepoDir"])) mockcfg.write(" ('%s', '%s'),],\n" % (task.get_savedir(), task.get_savedir())) mockcfg.write(" 'create_dirs': True, }\n") mockcfg.write("\n") mockcfg.write("config_opts['yum.conf'] = \"\"\"\n") mockcfg.write("[main]\n") mockcfg.write("cachedir=/var/cache/yum\n") mockcfg.write("debuglevel=1\n") mockcfg.write("reposdir=%s\n" % os.devnull) mockcfg.write("logfile=/var/log/yum.log\n") mockcfg.write("retries=20\n") mockcfg.write("obsoletes=1\n") mockcfg.write("assumeyes=1\n") mockcfg.write("syslog_ident=mock\n") mockcfg.write("syslog_device=\n") mockcfg.write("\n") mockcfg.write("#repos\n") mockcfg.write("\n") mockcfg.write("[kernel-%s]\n" % kernelver.arch) mockcfg.write("name=kernel-%s\n" % kernelver.arch) mockcfg.write("baseurl=%s\n" % CONFIG["KernelChrootRepo"].replace("$ARCH", kernelver.arch)) mockcfg.write("failovermethod=priority\n") mockcfg.write("\"\"\"\n") os.chown(cfgfile, -1, mockgid) # symlink defaults from /etc/mock os.symlink("/etc/mock/site-defaults.cfg", os.path.join(task.get_savedir(), RetraceTask.MOCK_SITE_DEFAULTS_CFG)) os.symlink("/etc/mock/logging.ini", os.path.join(task.get_savedir(), RetraceTask.MOCK_LOGGING_INI)) except Exception as ex: raise Exception("Unable to create mock config file: %s" % ex) finally: os.umask(old_umask) child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "init"], stdout=PIPE, stderr=STDOUT, encoding='utf-8') stdout = child.communicate()[0] if child.wait(): raise Exception("mock exitted with %d:\n%s" % (child.returncode, stdout)) self.hook_post_prepare_mock() # no locks required, mock locks itself try: self.hook_pre_prepare_debuginfo() vmlinux = task.prepare_debuginfo(vmcore, cfgdir, kernelver=kernelver, crash_cmd=task.get_crash_cmd().split()) self.hook_post_prepare_debuginfo() except Exception as ex: raise Exception("prepare_debuginfo failed: %s" % str(ex)) self.hook_pre_retrace() # generate the log with open(os.devnull, "w") as null: child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash --minimal -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') kernellog = child.communicate("log\nquit\n")[0] if child.wait(): log_warn("crash 'log' exitted with %d" % child.returncode) child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_bt_a = child.communicate("set hex\nbt -a\nquit\n")[0] if child.wait(): log_warn("crash 'bt -a' exitted with %d" % child.returncode) crash_bt_a = None crash_kmem_f = None if CONFIG["VmcoreRunKmem"] == 1: child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_kmem_f = child.communicate("kmem -f\nquit\n")[0] if child.wait(): log_warn("crash 'kmem -f' exitted with %d" % child.returncode) crash_kmem_f = None if CONFIG["VmcoreRunKmem"] == 2: child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_kmem_f = child.communicate("set hash off\nkmem -f\nset hash on\nquit\n")[0] if child.wait(): log_warn("crash 'kmem -f' exitted with %d" % child.returncode) crash_kmem_f = None crash_kmem_z = None if CONFIG["VmcoreRunKmem"] == 3: child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_kmem_z = child.communicate("kmem -z\nquit\n")[0] if child.wait(): log_warn("crash 'kmem -z' exitted with %d" % child.returncode) crash_kmem_z = None child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_sys = child.communicate("sys\nquit\n")[0] if child.wait(): log_warn("crash 'sys' exitted with %d" % child.returncode) crash_sys = None child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_sys_c = child.communicate("sys -c\nquit\n")[0] if child.wait(): log_warn("crash 'sys -c' exitted with %d" % child.returncode) crash_sys_c = None child = Popen(["/usr/bin/mock", "--configdir", cfgdir, "shell", "--", "crash -s %s %s" % (vmcore, vmlinux)], stdin=PIPE, stdout=PIPE, stderr=null, encoding='utf-8') crash_foreach_bt = child.communicate("set hex\nforeach bt\nquit\n")[0] if child.wait(): log_warn("crash 'foreach bt' exitted with %d" % child.returncode) crash_foreach_bt = None else: try: self.hook_pre_prepare_debuginfo() crash_cmd = task.get_crash_cmd().split() vmlinux = task.prepare_debuginfo(vmcore, kernelver=kernelver, crash_cmd=crash_cmd) task.set_crash_cmd(' '.join(crash_cmd)) self.hook_post_prepare_debuginfo() except Exception as ex: raise Exception("prepare_debuginfo failed: %s" % str(ex)) self.hook_pre_retrace() task.set_status(STATUS_BACKTRACE) log_info(STATUS[STATUS_BACKTRACE]) child = Popen(task.get_crash_cmd().split() + ["--minimal", "-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') kernellog = child.communicate("log\nquit\n")[0] if child.wait(): log_warn("crash 'log' exited with %d" % child.returncode) child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_bt_a = child.communicate("set hex\nbt -a\nquit\n")[0] if child.wait(): log_warn("crash 'bt -a' exited with %d" % child.returncode) crash_bt_a = None crash_kmem_f = None if CONFIG["VmcoreRunKmem"] == 1: child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_kmem_f = child.communicate("kmem -f\nquit\n")[0] if child.wait(): log_warn("crash 'kmem -f' exited with %d" % child.returncode) crash_kmem_f = None if CONFIG["VmcoreRunKmem"] == 2: child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_kmem_f = child.communicate("set hash off\nkmem -f\nset hash on\nquit\n")[0] if child.wait(): log_warn("crash 'kmem -f' exited with %d" % child.returncode) crash_kmem_f = None crash_kmem_z = None if CONFIG["VmcoreRunKmem"] == 3: child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_kmem_z = child.communicate("kmem -z\nquit\n")[0] if child.wait(): log_warn("crash 'kmem -z' exited with %d" % child.returncode) crash_kmem_z = None child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_sys = child.communicate("sys\nquit\n")[0] if child.wait(): log_warn("crash 'sys' exited with %d" % child.returncode) crash_sys = None child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_sys_c = child.communicate("sys -c\nquit\n")[0] if child.wait(): log_warn("crash 'sys -c' exited with %d" % child.returncode) crash_sys_c = None child = Popen(task.get_crash_cmd().split() + ["-s", vmcore, vmlinux], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') crash_foreach_bt = child.communicate("set hex\nforeach bt\nquit\n")[0] if child.wait(): log_warn("crash 'foreach bt' exited with %d" % child.returncode) crash_foreach_bt = None task.set_backtrace(kernellog) # If crash sys command exited with non-zero status, we likely have a semi-useful vmcore if not crash_sys_c: # FIXME: Probably a better hueristic can be done here if len(kernellog) < 1024: # If log is less than 1024 bytes, probably it is not useful at all so fail it raise Exception("Failing task due to crash exiting with non-zero status and " "small kernellog size = %d bytes" % len(kernellog)) else: # If log is 1024 bytes or above, try 'crash --minimal' task.set_crash_cmd("crash --minimal") if crash_bt_a: task.add_misc("bt-a", crash_bt_a) if crash_kmem_f: task.add_misc("kmem-f", crash_kmem_f) if crash_kmem_z: task.add_misc("kmem-z", crash_kmem_z) if crash_sys: task.add_misc("sys", crash_sys) if crash_sys_c: task.add_misc("sys-c", crash_sys_c) if crash_foreach_bt: child = Popen(["bt_filter"], stdin=PIPE, stdout=PIPE, stderr=STDOUT, encoding='utf-8') bt_filter = child.communicate(crash_foreach_bt)[0] if child.wait(): bt_filter = "bt_filter exitted with %d\n\n%s" % (child.returncode, bt_filter) task.add_misc("bt-filter", bt_filter) crashrc_lines = [] if "/" in vmlinux: crashrc_lines.append("mod -S %s > %s" % (vmlinux.rsplit("/", 1)[0], os.devnull)) miscdir = os.path.join(task.get_savedir(), RetraceTask.MISC_DIR) crashrc_lines.append("cd %s" % miscdir) if len(crashrc_lines) > 0: task.set_crashrc("%s\n" % "\n".join(crashrc_lines)) self.hook_post_retrace() task.set_finished_time(int(time.time())) self.stats["duration"] = int(time.time()) - self.stats["starttime"] self.stats["status"] = STATUS_SUCCESS log_info(STATUS[STATUS_STATS]) try: save_crashstats(self.stats) except Exception as ex: log_error(str(ex)) # clean up temporary data task.set_status(STATUS_CLEANUP) log_info(STATUS[STATUS_CLEANUP]) if not task.get_type() in [TASK_VMCORE_INTERACTIVE]: self.clean_task() log_info("Retrace took %d seconds" % self.stats["duration"]) log_info(STATUS[STATUS_SUCCESS]) self._symlink_log() task.set_status(STATUS_SUCCESS) self.notify_email() self.hook_success()
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # -------------------------------------------------------------------------------------------- import distro import unittest import pandas from nimbusml import Pipeline from nimbusml.preprocessing.normalization import RobustScaler @unittest.skipIf( 'centos' in distro.linux_distribution(full_distribution_name=False)[0].lower(), "centos is not supported") class TestRobustScaler(unittest.TestCase): def test_with_integer_inputs(self): df = pandas.DataFrame(data=dict(c0=[1, 3, 5, 7, 9])) xf = RobustScaler(columns='c0', center=True, scale=True) pipeline = Pipeline([xf]) result = pipeline.fit_transform(df) expected_result = pandas.Series([-1.0, -0.5, 0.0, 0.5, 1.0]) self.assertTrue(result.loc[:, 'c0'].equals(expected_result)) if __name__ == '__main__': unittest.main()