def get_arch_triplet(): try: return _DEB_TRANSLATIONS[platform.machine()]['triplet'] except KeyError: raise EnvironmentError( '{} is not supported, please log a bug at' 'https://bugs.launchpad.net/snapcraft'.format(platform.machine()))
def build_c_unit_tests(): """ Builds unit tests in C """ libraryPath = ''; fileName = '' if ('win32' in sys.platform): fileName = 'cmocka.dll' if '64' in platform.machine(): libraryPath = 'code-experiments/test/unit-test/lib/win64' elif ('32' in platform.machine()) or ('x86' in platform.machine()): if 'cygwin' in os.environ['PATH']: libraryPath = 'code-experiments/test/unit-test/lib/win32_cygwin' else: libraryPath = 'code-experiments/test/unit-test/lib/win32_mingw' elif ('linux' in sys.platform): fileName = 'libcmocka.so' if 'Ubuntu' in platform.linux_distribution(): libraryPath = 'code-experiments/test/unit-test/lib/linux_ubuntu' elif 'Fedora' in platform.linux_distribution(): libraryPath = 'code-experiments/test/unit-test/lib/linux_fedora' elif ('darwin' in sys.platform): # Mac libraryPath = 'code-experiments/test/unit-test/lib/macosx' fileName = 'libcmocka.dylib' if (len(libraryPath) > 0): copy_file(os.path.join(libraryPath, fileName), os.path.join('code-experiments/test/unit-test', fileName)) copy_file('code-experiments/build/c/coco.c', 'code-experiments/test/unit-test/coco.c') copy_file('code-experiments/src/coco.h', 'code-experiments/test/unit-test/coco.h') make("code-experiments/test/unit-test", "clean") make("code-experiments/test/unit-test", "all")
def get_system_type(): print platform.system() print platform.machine() print platform.mac_ver() print platform.win32_ver() print platform.linux_distribution() print platform.platform()
def DetectarSistema(): import platform import os #ARM if platform.machine() in ["armv71","mips","mipsel"]: arch= platform.machine() #PC elif platform.machine() in ["i686","AMD64"]: if platform.architecture()[0] =="64bit": arch = "x64" elif platform.architecture()[0] =="32bit": arch = "x86" else: if platform.architecture()[0] =="64bit": arch = "x64" elif platform.architecture()[0] =="32bit": arch = "x86" #Windows if os.name =="nt": OS ="windows" #Linux if os.name =="posix": OS ="linux" return OS, arch
def check(): if ( platform.machine() == 'x86_64'): print "amd64" return "amd64" elif ( platform.machine()== 'i686'): print "x86" return "x86"
def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" g = {} g['CC'] = "gcc -pthread" g['CXX'] = "g++ -pthread" g['OPT'] = "-DNDEBUG -O2" g['CFLAGS'] = "-DNDEBUG -O2" g['CCSHARED'] = "-fPIC" g['LDSHARED'] = "gcc -pthread -shared" g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['AR'] = "ar" g['ARFLAGS'] = "rc" g['EXE'] = "" g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['VERSION'] = get_python_version() if sys.platform[:6] == "darwin": import platform if platform.machine() == 'i386': if platform.architecture()[0] == '32bit': arch = 'i386' else: arch = 'x86_64' else: # just a guess arch = platform.machine() g['LDSHARED'] += ' -undefined dynamic_lookup' g['CC'] += ' -arch %s' % (arch,) global _config_vars _config_vars = g
def findRPMFileForGivenPackage(self, package,version="*", index=0): cmdUtils = CommandUtils() release = "*" if version == "*": version = SPECS.getData().getVersion(package, index) release = SPECS.getData().getRelease(package, index) listFoundRPMFiles = sum([cmdUtils.findFile(package + "-" + version + "-" + release + "." + platform.machine()+".rpm", constants.rpmPath), cmdUtils.findFile(package + "-" + version + "-" + release + ".noarch.rpm", constants.rpmPath)], []) if constants.inputRPMSPath is not None: listFoundRPMFiles = sum([cmdUtils.findFile(package + "-" + version + "-" + release + "." + platform.machine()+".rpm", constants.inputRPMSPath), cmdUtils.findFile(package + "-" + version + "-" + release + ".noarch.rpm", constants.inputRPMSPath)], listFoundRPMFiles) if len(listFoundRPMFiles) == 1: return listFoundRPMFiles[0] if len(listFoundRPMFiles) == 0: return None if len(listFoundRPMFiles) > 1: self.logger.error("Found multiple rpm files for given package in rpm directory." + "Unable to determine the rpm file for package:" + package) raise Exception("Multiple rpm files found")
def __init__(self, ip=None, username=None, config_file_path=None): """ Initialization function. Parameters: ------------ ip : string IP address as dotted quad username : string, optional """ if config_file_path is not None: self.config_file_path = config_file_path elif os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME), os.W_OK): self.config_file_path = os.path.join(os.getenv(USER_HOME), '.python_hue') elif 'iPad' in platform.machine() or 'iPhone' in platform.machine() or 'iPad' in platform.machine(): self.config_file_path = os.path.join(os.getenv(USER_HOME), 'Documents', '.python_hue') else: self.config_file_path = os.path.join(os.getcwd(), '.python_hue') self.ip = ip self.username = username self.lights_by_id = {} self.lights_by_name = {} self._name = None # self.minutes = 600 # these do not seem to be used anywhere? # self.seconds = 10 self.connect()
def get_flac_data(self): """ Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance. Writing these bytes directly to a file results in a valid FLAC file. """ wav_data = self.get_wav_data() # determine which converter executable to use system = platform.system() path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored flac_converter = shutil_which("flac") # check for installed version first if flac_converter is None: # flac utility is not installed if system == "Windows" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: # Windows NT, use the bundled FLAC conversion utility flac_converter = os.path.join(path, "flac-win32.exe") elif system == "Linux" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: flac_converter = os.path.join(path, "flac-linux-i386") elif system == "Darwin" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: flac_converter = os.path.join(path, "flac-mac") else: raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application using `brew install flac` or your operating system's equivalent") # mark FLAC converter as executable try: stat_info = os.stat(flac_converter) os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC) except OSError: pass # run the FLAC converter with the WAV data to get the FLAC data process = subprocess.Popen("\"{0}\" --stdout --totally-silent --best -".format(flac_converter), stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) flac_data, stderr = process.communicate(wav_data) return flac_data
def performance(attribute): """ A little smater routing system. """ data = None if attribute == 'system': data = { 'system': platform.system() } elif attribute == 'processor': data = { 'processor': platform.processor() } elif attribute in ['cpu_count', 'cpucount'] : data = { 'cpu_count': psutil.cpu_count() } elif attribute == 'machine': data = { 'machine': platform.machine() } elif attribute in ['virtual_mem', 'virtualmem']: data = { 'virtual_mem': psutil.virtual_memory().total } elif attribute in ['virtual_mem_gb', 'virtualmemgb']: data = { 'virtual_mem_gb': psutil.virtual_memory().total / (1024.0 ** 3) } elif attribute == 'all': data = { 'system': platform.system(), 'processor': platform.processor(), 'cpu_count': psutil.cpu_count(), 'machine': platform.machine(), 'virtual_mem': psutil.virtual_memory().total, 'virtual_mem_gb': psutil.virtual_memory().total / (1024.0 ** 3), } packet = json.dumps(data) resp = Response(packet, status=200, mimetype='application/json') return(resp)
def setUserAgent(wuuversion): """ Sets the user agent to use for web requests. Also sets the socket timeout, in lieu of a better place. """ tracer.debug("setUserAgent") global useragent wuuplatform = "Unknown" if sys.platform[:5] == 'linux': wuuplatform = 'X11; U; Linux %s; Python %s' % (platform.machine(), platform.python_version()) elif sys.platform == 'darwin': maccpu = 'PPC' if platform.machine() == 'i386': maccpu = 'Intel' wuuplatform = 'Macintosh; U; %s Mac OS X' % (maccpu) elif sys.platform == 'win32': winver = platform.version()[:3] # should grab only the major.minor of the OS version wuuplatform = 'Windows; U; Windows NT %s; Python %s' % (winver, platform.python_version()) useragent = "WUU/%s (%s)" % (wuuversion, wuuplatform) # sets the user agent used for web requests logger.log(DEBUG5, 'User agent set to %s' % (useragent)) # Try to set global socket timeout - if unparseable as an integer, use blocking mode try: timeout = int(getGlobalSetting(":SocketTimeout")) if timeout > 0: socket.setdefaulttimeout(timeout) outDebug("Socket timeout set to %d seconds" % (timeout)) except: pass # blocking mode (the all-round default)
def detect(conf): def set_def(var, val): if not conf.env[var]: conf.env[var] = val set_def("GO_PLATFORM", platform.machine()) if conf.env.GO_PLATFORM == "x86_64": set_def("GO_COMPILER", "6g") set_def("GO_LINKER", "6l") set_def("GO_EXTENSION", ".6") elif conf.env.GO_PLATFORM == "i386": set_def("GO_COMPILER", "8g") set_def("GO_LINKER", "8l") set_def("GO_EXTENSION", ".8") if not (conf.env.GO_COMPILER or conf.env.GO_LINKER or conf.env.GO_EXTENSION): raise conf.fatal("Unsupported platform " + platform.machine()) set_def("GO_PACK", "gopack") set_def("GO_PACK_EXTENSION", ".a") conf.find_program(conf.env.GO_COMPILER, var="GOC", mandatory=True) conf.find_program(conf.env.GO_LINKER, var="GOL", mandatory=True) conf.find_program(conf.env.GO_PACK, var="GOP", mandatory=True)
def samples_to_flac(self, source, frame_data): assert isinstance(source, AudioSource), "Source must be an audio source" import platform, os, stat with io.BytesIO() as wav_file: wav_writer = wave.open(wav_file, "wb") try: # note that we can't use context manager due to Python 2 not supporting it wav_writer.setsampwidth(source.SAMPLE_WIDTH) wav_writer.setnchannels(source.CHANNELS) wav_writer.setframerate(source.RATE) wav_writer.writeframes(frame_data) finally: # make sure resources are cleaned up wav_writer.close() wav_data = wav_file.getvalue() # determine which converter executable to use system = platform.system() path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored flac_converter = shutil_which("flac") # check for installed version first if flac_converter is None: # flac utility is not installed if system == "Windows" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: # Windows NT, use the bundled FLAC conversion utility flac_converter = os.path.join(path, "flac-win32.exe") elif system == "Linux" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: flac_converter = os.path.join(path, "flac-linux-i386") else: raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application using `brew install flac` or your operating system's equivalent") # mark covnerter as executable try: stat_info = os.stat(flac_converter) os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC) except OSError: pass process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) flac_data, stderr = process.communicate(wav_data) return flac_data
def detect_platform (): global HOST, TARGET, DEVNULL if sys.platform [:3] == "win": HOST = ["windows"] TARGET = ["windows"] DEVNULL = "nul" elif sys.platform [:6] == "darwin": HOST = ["mac"] TARGET = ["mac"] DEVNULL = "/dev/null" else: # Default to POSIX HOST = ["posix"] TARGET = ["posix"] DEVNULL = "/dev/null" arch = platform.machine () # Python 2.5 on windows returns empty string here if (arch == "") or \ (len (arch) >= 4 and arch [0] == "i" and arch [2:4] == "86" and arch [4:] == ""): arch = "x86" HOST.append (arch) TARGET.append (arch) # Earlier code here would automatically tune for host processor. # This is not good for package maintainers, since the machine on which # packages are built aren't related in any way to machines, on which # users will run the binary packages. So we'll stick to generic tuning # by default, and autodetect if user requires "auto" tuning. TARGET.append (platform.machine ())
def stop_sonar(sonarhome): if platform.system() == "Windows": if platform.machine() == "x86_64": command = ["cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-64", "UninstallNTService.bat")] check_call(command, stdout=PIPE, shell=os.name == "nt") elif platform.machine() == "i686": command = ["cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-32", "UninstallNTService.bat")] check_call(command, stdout=PIPE, shell=os.name == "nt") elif platform.machine() == "AMD64": command = ["cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-64", "UninstallNTService.bat")] check_call(command, stdout=PIPE, shell=os.name == "nt") if not wait_for_sonar(30, is_webui_down): sys.stdout.write(RED + "FAILED\n" + RESET) return False sys.stdout.write(INDENT + "stopping SonarQube ... ") sys.stdout.flush() rc = check_call(stop_script(sonarhome)) if rc != 0 or not wait_for_sonar(30, is_webui_down): sys.stdout.write(RED + "FAILED\n" + RESET) return False sys.stdout.write(GREEN + "OK\n\n" + RESET) return True
def sizeof(arch, t): assert isinstance(t, (Parameter,) + six.string_types) if isinstance(t, Parameter): return sizeof(arch, t.type) size = _sizes.get(t) if size is None: # We don't know the size of this type, so we'll ask the C compiler. toolprefix = os.environ.get('TOOLPREFIX', '') compiler = '%sgcc' % toolprefix extra_flags = [] # Account for the fact that we may be cross-compiling using our native # compiler. if arch == 'ia32' and platform.machine() == 'x86_64': extra_flags.append('-m32') elif arch == 'x86_64' and platform.machine() == 'i386': extra_flags.append('-m64') # Determine the size by invoking the c compiler size = sizeof_probe.probe_sizeof(t, compiler, extra_flags) # Cache the result for next time. _sizes[t] = size assert size is not None return size
def samples_to_flac(self, source, frame_data): import platform, os with io.BytesIO() as wav_file: wav_writer = wave.open(wav_file, "wb") try: wav_writer.setsampwidth(source.SAMPLE_WIDTH) wav_writer.setnchannels(source.CHANNELS) wav_writer.setframerate(source.RATE) wav_writer.writeframes(frame_data) finally: # make sure resources are cleaned up wav_writer.close() wav_data = wav_file.getvalue() # determine which converter executable to use system = platform.system() path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored flac_converter = shutil_which("flac") # check for installed version first if flac_converter is None: # flac utility is not installed if system == "Windows" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: # Windows NT, use the bundled FLAC conversion utility flac_converter = os.path.join(path, "flac-win32.exe") elif system == "Linux" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: flac_converter = os.path.join(path, "flac-linux-i386") else: raise ChildProcessError("FLAC conversion utility not available - consider installing the FLAC command line application using brew install flac") process = subprocess.Popen("\"%s\" --stdout --totally-silent --best -" % flac_converter, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) flac_data, stderr = process.communicate(wav_data) return flac_data
def ansFile(fname ): # Uses Jeff's new gpdiff.pl/atmsort.pl compatible with pyodbc # For pyODB, we have to use a different expected result # Check for platform dependent expecte results curplatform = sys.platform ext = ".ans" if string.find( curplatform, 'sun' ) >= 0: ext = ".ans.sun" elif string.find( curplatform, 'darwin' ) >= 0: ext = ".ans.osx" # RH64 and SUSE return "linux2" for sys.platform # Will nee to use /etc/SuSE-release or /etc/redhat-release # Note: http://docs.python.org/library/sys.html # Linux (2.x and 3.x), platform value = linux2 elif curplatform.startswith("linux"): if os.path.exists("/etc/SuSE-release"): ext = ".ans.suse" elif os.path.exists("/etc/redhat-release"): if platform.machine() == "i686": ext = ".ans.linux-i686" elif platform.machine() == "x86_64": ext = ".ans.linux-x86_64" if not os.path.exists( os.path.splitext( fname )[0] + ext ): ext = ".ans" return os.path.splitext( fname )[0] + ext
def probe_user(cls): with cls.lock: if cls.user: return cls.user arch = 'ppc64' if platform.machine() == 'ppc64le' \ else platform.machine() xml = cls.SIMPLE_VM_XML % {'name': KVMUSERTEST_VM_NAME, 'arch': arch} with RollbackContext() as rollback: with cls.lock: conn = libvirt.open(None) rollback.prependDefer(conn.close) f = libvirt.VIR_DOMAIN_START_AUTODESTROY dom = conn.createXML(xml, flags=f) rollback.prependDefer(dom.destroy) filename = '/var/run/libvirt/qemu/%s.pid' % KVMUSERTEST_VM_NAME with open(filename) as f: pidStr = f.read() p = psutil.Process(int(pidStr)) # bug fix #357 # in psutil 2.0 and above versions, username will be a method, # not a string if callable(p.username): cls.user = p.username() else: cls.user = p.username return cls.user
def get_properties(group): mem = psutil.virtual_memory() disk = psutil.disk_usage('/') properties = {} if group is None or group == 'fresh': if platform.machine().startswith('arm') and platform.system() == 'Linux': # raspberry pi properties["cpuTemp"] = get_rpi_cpu_temperature() properties["ramAvailable"] = int(mem.available / (1024 * 1024)) properties["usedDiskSpaceRoot"] = int(disk.used / (1024 * 1024)) properties["bootTime"] = datetime.datetime.fromtimestamp(psutil.boot_time()).strftime(Publisher.DATE_FORMAT) properties["cpuLoad"] = psutil.cpu_percent(interval=3) if group is None or group == 'seldom': if platform.system() == 'Darwin': # mac properties["release"] = platform.mac_ver()[0] elif platform.machine().startswith('arm') and platform.system() == 'Linux': # raspberry pi properties["distribution"] = "{} {}".format(platform.dist()[0], platform.dist()[1]) for i in NET_INTERFACES: properties["{}IpAddress".format(i)] = get_ip(i) properties["totalDiskSpaceRoot"] = int(disk.total / (1024 * 1024)) properties["hostname"] = platform.node() properties["machine"] = platform.machine() properties["system"] = platform.system() properties["cpuProcessorCount"] = psutil.cpu_count() properties["ramTotal"] = int(mem.total / (1024 * 1024)) return properties
def SystemSummary(self): """Gather a string describing intrinsic properties of the current machine. Ideally this would capture anything relevant about the current machine that would cause build output to vary (other than build recipe + inputs). """ if self._system_summary is None: # Note there is no attempt to canonicalize these values. If two # machines that would in fact produce identical builds differ in # these values, it just means that a superfluous build will be # done once to get the mapping from new input hash to preexisting # output hash into the cache. assert len(sys.platform) != 0, len(platform.machine()) != 0 # Use environment from command so we can access MinGW on windows. env = command.PlatformEnvironment([]) gcc = pynacl.file_tools.Which('gcc', paths=env['PATH'].split(os.pathsep)) p = subprocess.Popen( [gcc, '-v'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) _, gcc_version = p.communicate() assert p.returncode == 0 items = [ ('platform', sys.platform), ('machine', platform.machine()), ('gcc-v', gcc_version), ] self._system_summary = str(items) return self._system_summary
def get_cpu_arch(): """ Work out which CPU architecture we're running on """ cpu_table = [('^cpu.*(RS64|POWER3|Broadband Engine)', 'power'), ('^cpu.*POWER4', 'power4'), ('^cpu.*POWER5', 'power5'), ('^cpu.*POWER6', 'power6'), ('^cpu.*POWER7', 'power7'), ('^cpu.*POWER8', 'power8'), ('^cpu.*POWER9', 'power9'), ('^cpu.*PPC970', 'power970'), ('(ARM|^CPU implementer|^CPU part|^CPU variant' '|^Features|^BogoMIPS|^CPU revision)', 'arm'), ('(^cpu MHz dynamic|^cpu MHz static|^features' '|^bogomips per cpu|^max thread id)', 's390'), ('^type', 'sparc64'), ('^flags.*:.* lm .*', 'x86_64'), ('^flags', 'i386'), ('^hart\\s*: 1$', 'riscv')] cpuinfo = _get_cpu_info() for (pattern, arch) in cpu_table: if _list_matches(cpuinfo, pattern): # ARM is a special situation, which matches both 32 bits # (v7) and 64 bits (v8). if arch == 'arm': arm_v8_arch_name = 'aarch64' if arm_v8_arch_name == platform.machine(): return arm_v8_arch_name return arch return platform.machine()
def get_os_word_size_guess(self): if "32" in platform.machine() and "64" not in platform.machine(): return "(32 bit)" elif "64" in platform.machine() and "32" not in platform.machine(): return "(64 bit)" else: return ""
def test_upgrade_changes_schema(admin_mc, wait_remove_resource): client = admin_mc.client URL = DRIVER_AMD64_URL if platform.machine() == "aarch64": URL = DRIVER_ARM64_URL kd = client.create_kontainerDriver( createDynamicSchema=True, active=True, url=URL ) wait_remove_resource(kd) kd = wait_for_condition('Active', 'True', admin_mc.client, kd, timeout=90) verify_driver_in_types(client, kd) kdSchema = client.schema.types[kd.name + 'EngineConfig'] assert 'specialTestingField' not in kdSchema.resourceFields NEW_URL = NEW_DRIVER_URL if platform.machine() == "aarch64": NEW_URL = NEW_DRIVER_ARM64_URL kd.url = NEW_URL kd = client.update_by_id_kontainerDriver(kd.id, kd) def schema_updated(): client.reload_schema() kdSchema = client.schema.types[kd.name + 'EngineConfig'] return 'specialTestingField' in kdSchema.resourceFields wait_until(schema_updated) kdSchema = client.schema.types[kd.name + 'EngineConfig'] assert 'specialTestingField' in kdSchema.resourceFields
def verify(prog_filename, log_filename=None): global is_verified is_verified = True cmd = None if platform.system() == 'Linux': if platform.machine() == 'i686': cmd = 'maude.linux' elif platform.machine() == 'x86_64': cmd = 'maude.linux64' elif platform.system() == 'Darwin': cmd = 'maude.intelDarwin' args = ['-no-prelude', '-no-banner', '-no-wrap', '-no-ansi-color'] if log_filename != None: args += ['-xml-log=' + log_filename] args += [prog_filename] retcode = run_maude.run( cmd=cmd, cmd_args=args, path=maude_dir, filter=output_filter, epilog='DONE!') if retcode != 0: sys.exit(retcode) if is_verified: print(green_color + 'Verification succeeded!' + no_color, statistics) else: print(red_color + 'Verification failed!' + no_color, statistics) if output_stream != None: if output_stream != "" and output_stream != "epsilon": print('Output:', output_stream)
def play_mp3(self, mp3_data): import platform, os, stat # determine which player executable to use system = platform.system() path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored player = shutil_which("mpg123") # check for installed version first if player is None: # flac utility is not installed if system == "Windows" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: # Windows NT, use the bundled FLAC conversion utility player = os.path.join(path, "player", "mpg123-win32.exe") elif system == "Linux" and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: player = os.path.join(path, "player", "mpg123-linux") elif system == 'Darwin' and platform.machine() in ["i386", "x86", "x86_64", "AMD64"]: player = os.path.join(path, "player", "mpg123-mac") else: raise OSError("MP3 player utility not available - consider installing the MPG123 command line application using `brew install mpg123` or your operating system's equivalent") try: stat_info = os.stat(player) os.chmod(player, stat_info.st_mode | stat.S_IEXEC) except OSError: pass process = subprocess.Popen("\"%s\" -q -" % player, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) play_info, stderr = process.communicate(mp3_data) return play_info
def download_compiled_archive(self, package, version): """ Download compiled package archive, look for locations specified using --find-links. Store archive in current working folder. :param package: package name :param version: package version :return: path to archive or None if not found """ if self.settings["no_remote_cache"]: return None if not platform.machine(): logging.warn('Cannot determine architecture from "platform.machine()".') return None archive_base_name = "%s-%s-%s" % (package, version, platform.machine()) logging.info("Searching for compiled package archive %s" % archive_base_name) extensions = [".compiled.tar.gz", ".compiled.tar.bz2", ".compiled.zip"] for index in self.settings["find_links"]: for archive_name in [archive_base_name + ext for ext in extensions]: try: download(os.path.join(index, archive_name), archive_name, verbose=self.settings["verbosity"] >= 2) return os.path.abspath(archive_name) except urllib2.URLError: pass logging.info("Failed to find compiled package archive %s" % archive_base_name) return None
def get_arch(): if sys.maxsize > 2**32: # check if arch is real 64-bit arch = platform.machine() else: arch = platform.machine() return arch
def detected_architecture(): # FIXME: Very weak check but not very common to run conan in other architectures if "64" in platform.machine(): return "x86_64" elif "86" in platform.machine(): return "x86" return None
def _GetD8BinaryPathForPlatform(): if platform.system() == "Linux" and platform.machine() == "x86_64": return os.path.join(_V8_DIR, "linux", "x86_64", "d8") elif platform.system() == "Darwin" and platform.machine() == "x86_64": return os.path.join(_V8_DIR, "mac", "x86_64", "d8") else: raise NotImplementedError("d8 binary for this platform and architecture is not yet supported")
"VCRUNTIME140_1.dll", "VCRUNTIME140.dll", "api-ms-win-crt-heap-l1-1-0.dll", "api-ms-win-crt-runtime-l1-1-0.dll", "api-ms-win-crt-stdio-l1-1-0.dll", "api-ms-win-crt-filesystem-l1-1-0.dll", "api-ms-win-crt-string-l1-1-0.dll", "api-ms-win-crt-environment-l1-1-0.dll", "api-ms-win-crt-math-l1-1-0.dll", "api-ms-win-crt-convert-l1-1-0.dll", } HERE = osp.dirname(osp.abspath(__file__)) PACKAGE_ROOT = osp.dirname(osp.dirname(HERE)) PLATFORM_ARCH = platform.machine() PYTHON_VERSION = sys.version_info def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): """Yield pieces of data from a file-like object until EOF.""" while True: chunk = file.read(size) if not chunk: break yield chunk def rehash(path, blocksize=1 << 20): """Return (hash, length) for path using hashlib.sha256()""" h = hashlib.sha256()
def test_interpret(self): sys_platform = sys.platform version = sys.version.split()[0] os_name = os.name platform_version = platform.version() platform_machine = platform.machine() platform_python_implementation = python_implementation() self.assertTrue(interpret("sys_platform == '%s'" % sys_platform)) self.assertTrue( interpret("sys_platform == '%s' and python_full_version == '%s'" % (sys_platform, version))) self.assertTrue(interpret("'%s' == sys_platform" % sys_platform)) self.assertTrue(interpret('os_name == "%s"' % os_name)) self.assertTrue( interpret('platform_version == "%s" and platform_machine == "%s"' % (platform_version, platform_machine))) self.assertTrue( interpret('platform_python_implementation == "%s"' % platform_python_implementation)) self.assertTrue(interpret('platform_in_venv == "%s"' % in_venv())) # stuff that need to raise a syntax error ops = ('os_name == 2', "'2' == '2'", 'okpjonon', '', 'os_name ==', 'python_version == 2.4') for op in ops: self.assertRaises(SyntaxError, interpret, op) # combined operations OP = 'os_name == "%s"' % os_name FALSEOP = 'os_name == "buuuu"' AND = ' and ' OR = ' or ' self.assertTrue(interpret(OP + AND + OP)) self.assertTrue(interpret(OP + AND + OP + AND + OP)) self.assertTrue(interpret(OP + OR + OP)) self.assertTrue(interpret(OP + OR + FALSEOP)) self.assertTrue(interpret(OP + OR + OP + OR + FALSEOP)) self.assertTrue(interpret(OP + OR + FALSEOP + OR + FALSEOP)) self.assertTrue(interpret(FALSEOP + OR + OP)) self.assertFalse(interpret(FALSEOP + AND + FALSEOP)) self.assertFalse(interpret(FALSEOP + OR + FALSEOP)) # other operators self.assertTrue(interpret("os_name != 'buuuu'")) self.assertTrue(interpret("python_version > '1.0'")) self.assertTrue(interpret("python_version < '5.0'")) self.assertTrue(interpret("python_version <= '5.0'")) self.assertTrue(interpret("python_version >= '1.0'")) self.assertTrue(interpret("'%s' in os_name" % os_name)) self.assertTrue(interpret("'buuuu' not in os_name")) self.assertTrue(interpret("'buu' in os_name", {'os_name': 'buuu'})) self.assertTrue( interpret("'buuuu' not in os_name and '%s' in os_name" % os_name)) # execution context self.assertTrue( interpret('python_version == "0.1"', {'python_version': '0.1'})) # parentheses and extra if sys.platform != 'win32': relop = '!=' else: relop = '==' expression = ("(sys_platform %s 'win32' or python_version == '2.4') " "and extra == 'quux'" % relop) self.assertTrue(interpret(expression, {'extra': 'quux'}))
'--read-timeout=20', '--timeout=15', '--tries=4' ] git_apply_args = [ 'apply', '--reject', '--ignore-whitespace', '--whitespace=fix' ] patch_args = ['-flp1', '-i'] # vs help win_sdk_default = '10.0.16299.0' win_toolset_default = '142' vs_where_path = os.path.join(os.environ['ProgramFiles(x86)'], 'Microsoft Visual Studio', 'Installer', 'vswhere.exe') host_is_64bit = (False, True)[platform.machine().endswith('64')] python_is_64bit = (False, True)[8 * struct.calcsize("P") == 64] def shellquote(s, windows=False): if not windows: return "'" + s.replace("'", "'\''") + "'" else: return '\"' + s + '\"' def getLatestVSVersion(): args = [ '-latest', '-products *', '-requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64', '-property installationVersion'
def setup_bitcoind_thread(specter=None, internal_bitcoind_version=""): try: BITCOIND_OS_SUFFIX = { "Windows": "win64.zip", "Linux": "x86_64-linux-gnu.tar.gz", "Darwin": "osx64.tar.gz", } # ARM Linux devices (e.g. Raspberry Pi 4 == armv7l) need ARM binary if platform.system() == "Linux" and "armv" in platform.machine(): BITCOIND_OS_SUFFIX["Linux"] = "arm-linux-gnueabihf.tar.gz" packed_name = ( os.path.join( sys._MEIPASS, f"bitcoind/bitcoin-{internal_bitcoind_version}-{BITCOIND_OS_SUFFIX[platform.system()]}", ) if getattr(sys, "frozen", False) else Path(__file__).parent / f"../../../../pyinstaller/bitcoind/bitcoin-{internal_bitcoind_version}-{BITCOIND_OS_SUFFIX[platform.system()]}" ) bitcoin_binaries_folder = os.path.join(specter.data_folder, "bitcoin-binaries") logger.info(f"Unpacking binaries to {bitcoin_binaries_folder}") if BITCOIND_OS_SUFFIX[platform.system()].endswith("tar.gz"): with tarfile.open(packed_name, "r:gz") as so: so.extractall(specter.data_folder) else: with zipfile.ZipFile(packed_name, "r") as zip_ref: zip_ref.extractall(specter.data_folder) if os.path.exists(bitcoin_binaries_folder): shutil.rmtree(bitcoin_binaries_folder) os.rename( os.path.join(specter.data_folder, f"bitcoin-{internal_bitcoind_version}"), bitcoin_binaries_folder, ) if not os.path.exists(specter.config["internal_node"]["datadir"]): logger.info( f"Creating bitcoin datadir: {specter.config['internal_node']['datadir']}" ) os.makedirs(specter.config["internal_node"]["datadir"]) logger.info(f"Writing bitcoin.conf") with open( os.path.join(specter.config["internal_node"]["datadir"], "bitcoin.conf"), "w", ) as file: salt = generate_salt(16) password_hmac = password_to_hmac( salt, specter.config["internal_node"]["password"] ) file.write( f'\nrpcauth={specter.config["internal_node"]["user"]}:{salt}${password_hmac}' ) file.write(f"\nserver=1") file.write(f"\nlisten=1") file.write(f"\nproxy=127.0.0.1:9050") file.write(f"\nbind=127.0.0.1") file.write(f"\ntorcontrol=127.0.0.1:9051") file.write(f"\ntorpassword={specter.config['torrc_password']}") specter.config["bitcoind_internal_version"] = internal_bitcoind_version specter._save() specter.reset_setup("bitcoind") except Exception as e: logger.error(f"Failed to install Bitcoin Core. Error: {e}") handle_exception(e) specter.update_setup_error("bitcoind", str(e))
import weakref _builtin_compile = compile from platform import python_implementation # restricted set of variables _VARS = { 'sys.platform': sys.platform, 'python_version': '%s.%s' % sys.version_info[:2], # FIXME parsing sys.platform is not reliable, but there is no other # way to get e.g. 2.7.2+, and the PEP is defined with sys.version 'python_full_version': sys.version.split(' ', 1)[0], 'os.name': os.name, 'platform.version': platform.version(), 'platform.machine': platform.machine(), 'platform.python_implementation': python_implementation(), 'extra': None # wheel extension } for var in list(_VARS.keys()): if '.' in var: _VARS[var.replace('.', '_')] = _VARS[var] def default_environment(): """Return copy of default PEP 385 globals dictionary.""" return dict(_VARS) class ASTWhitelist(NodeTransformer):
import numpy as np import time import sys import os import cv2 # OpenCV 画像を読み込む #imageFormat=1 #cv_bgr = cv2.imread(os.path.join(path, file_name), imageFormat) # OpenCV Webカメラ準備 import platform vid = None if platform.machine() == 'aarch64': vid = cv2.VideoCapture(1) # WebCam Jetson TX2 /dev/video1 else: # armv7l vid = cv2.VideoCapture(0) # WebCam Raspberry Pi3 /dev/video0 print(vid.isOpened()) if not vid.isOpened(): raise IOError( ("Couldn't open video file or webcam. If you're " "trying to open a webcam, make sure you video_path is an integer!")) # カメラ画像サイズ image_height = 120 image_width = 160 image_depth = 3 # BGRの3bit vidw = None
import platform import shutil import os import sys import subprocess version = "0.6.0-beta" packages = ["s", "files", "sockets"] source = sys.argv[1] dist_name = "Emojicode-{0}-{1}-{2}".format(version, platform.system(), platform.machine()) path = os.path.abspath(dist_name) def copy_packages(destination, source): for package in packages: dir_path = os.path.join(destination, package) make_dir(dir_path) shutil.copy2(os.path.join(package, "interface.emojii"), dir_path) shutil.copy2(os.path.join(package, "lib" + package + ".a"), dir_path) def make_dir(path): if not os.path.exists(path): os.makedirs(path) def copy_header(*args): make_dir(os.path.join(path, "include", *args[:-1])) shutil.copy2(os.path.join(source, *args),
def __init__(self, diverter_config, listeners_config, logging_level=logging.INFO): self.logger = logging.getLogger('Diverter') self.logger.setLevel(logging_level) self.diverter_config = diverter_config self.listeners_config = listeners_config # Local IP address self.external_ip = socket.gethostbyname(socket.gethostname()) self.loopback_ip = socket.gethostbyname('localhost') # Sessions cache # NOTE: A dictionary of source ports mapped to destination address, port tuples self.sessions = dict() ####################################################################### # Listener specific configuration # NOTE: All of these definitions have protocol as the first key # followed by a list or another nested dict with the actual definitions # Diverted ports self.diverted_ports = dict() # Listener Port Process filtering # TODO: Allow PIDs self.port_process_whitelist = dict() self.port_process_blacklist = dict() # Listener Port Host filtering # TODO: Allow domain name resolution self.port_host_whitelist = dict() self.port_host_blacklist = dict() # Execute command list self.port_execute = dict() # Parse listener configurations self.parse_listeners_config(listeners_config) ####################################################################### # Diverter settings and filters # Intercept filter # NOTE: All relevant connections are recorded as outbound by WinDivert # so additional filtering based on destination port will need to be # performed in order to determine the correct traffic direction. self.filter = None # Default TCP/UDP listeners self.default_listener_tcp_port = None self.default_listener_udp_port = None # Global TCP/UDP port blacklist self.blacklist_ports_tcp = [] self.blacklist_ports_udp = [] # Global process blacklist # TODO: Allow PIDs self.blacklist_processes = [] # Global host blacklist # TODO: Allow domain resolution self.blacklist_hosts = [] # Parse diverter config self.parse_diverter_config() ####################################################################### # Network verification # Check active interfaces if not self.check_active_ethernet_adapters(): self.logger.warning( 'WARNING: No active ethernet interfaces detected!') self.logger.warning(' Please enable a network interface.') # Check configured gateways if not self.check_gateways(): self.logger.warning('WARNING: No gateways configured!') self.logger.warning( ' Please configure a default gateway or route in order to intercept external traffic.' ) # Check configured DNS servers if not self.check_dns_servers(): self.logger.warning('WARNING: No DNS servers configured!') self.logger.warning( ' Please configure a DNS server in order to allow network resolution.' ) ####################################################################### # Initialize WinDivert # Locate the WinDivert driver # NOTE: This is necessary to work in scenarios where the applications is # executed as a python script, installed as an egg or with the pyinstaller dll_arch = "64" if platform.machine() == 'AMD64' else "32" dll_path = os.path.join('lib', dll_arch, 'WinDivert.dll') if not os.path.exists(dll_path): dll_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib', dll_arch, 'WinDivert.dll') if not os.path.exists(dll_path): self.logger.error('Could not open bundled WinDivert.dll') sys.exit(1) # Divert handle driver = None driver = WinDivert(dll_path=dll_path) try: self.handle = Handle(driver, filter=self.filter) self.handle.open() except WindowsError, e: if e.winerror == 5: self.logger.error( 'ERROR: Insufficient privileges to run windows diverter.') self.logger.error( ' Please restart with Administrator privileges.') sys.exit(1) elif e.winerror == 3: self.logger.error( 'ERROR: Could not locate WinDivert DLL or one of its components.' ) self.logger.error( ' Please make sure you have copied FakeNet-NG to the C: drive.' ) sys.exit(1) else: self.logger.error( 'ERROR: Failed to open a handle to the WinDivert driver: %s', e) sys.exit(1)
else: copy_file(abs_file, os.path.join(build_root_dir, abs_file)) if abs_file.endswith('__init__.py'): copy_file(init_file, os.path.join(build_root_dir, abs_file)) else: if os.path.basename(abs_file) in ignore_folders: continue if os.path.basename(abs_file) in conf_folders: copy_file(abs_file, os.path.join(build_root_dir, abs_file)) compose_extensions(abs_file) if __name__ == '__main__': build_root_dir = 'build/lib.' + platform.system().lower( ) + '-' + platform.machine() + '-' + str( sys.version_info.major) + '.' + str(sys.version_info.minor) print(build_root_dir) extensions = [] ignore_folders = ['build'] # , 'test', 'tests'] conf_folders = ['conf'] compose_extensions(os.path.abspath('..')) os.remove(init_file) setup(name='my_project', version='1.0', ext_modules=cythonize( extensions, nthreads=16,
except ImportError: pip = None REQS_DIR = "lib" sys.path.insert(0, REQS_DIR) REQS_TXT = "requirements.txt" REQS_NO_AUDIO_TXT = "requirements_no_audio.txt" FFMPEG_BUILDS_URL = "https://ffmpeg.zeranoe.com/builds/" INTRO = ("==========================\n" "Elven - Discord Bot\n" "==========================\n") IS_WINDOWS = os.name == "nt" IS_MAC = sys.platform == "darwin" IS_64BIT = platform.machine().endswith("64") INTERACTIVE_MODE = not len(sys.argv) > 1 # CLI flags = non-interactive PYTHON_OK = sys.version_info >= (3, 5) FFMPEG_FILES = { "ffmpeg.exe" : "e0d60f7c0d27ad9d7472ddf13e78dc89", "ffplay.exe" : "d100abe8281cbcc3e6aebe550c675e09", "ffprobe.exe" : "0e84b782c0346a98434ed476e937764f" } def parse_cli_arguments(): parser = argparse.ArgumentParser(description="Elven Discord Bot Launcher") parser.add_argument("--start", "-s", help="Starts Elven", action="store_true")
def is_aarch64(): """ Simple function to return if host is AArch64 or not """ return platform.machine().startswith("aarch64")
import os import os.path import platform from shutil import copyfile TOOLS_PREFIX='' OS_NAME = platform.system(); MACH = platform.machine(); ARCH = platform.architecture(); is32bit = (ARCH[0] == '32bit'); if is32bit: if MACH == 'i686' or MACH == 'i386': TARGET_ARCH='x86' else: TARGET_ARCH='arm' else: TARGET_ARCH='' print('MACH=' + MACH + ' ARCH=' + str(ARCH) + ' TARGET_ARCH=' + TARGET_ARCH) def joinPath(root, subdir): return os.path.normpath(os.path.join(root, subdir)) TK_ROOT=os.path.dirname(os.path.normpath(os.path.abspath(__file__))) print('TK_ROOT: ' + TK_ROOT); TK_SRC = joinPath(TK_ROOT, 'src') TK_BIN_DIR = joinPath(TK_ROOT, 'bin') TK_LIB_DIR = joinPath(TK_ROOT, 'lib')
def _more_tags(self): return {'non_deterministic': ( _IS_32BIT or platform.machine().startswith(('ppc', 'powerpc')))}
(options, args) = parser.parse_args() if args: print "Unrecognized argument(s) %s." % args sys.exit(1) if not os.path.isdir(options.dest_dir): print "Error: %s is not a directory." % options.dest_dir sys.exit(1) if not os.path.isdir(options.gem5_dir): print "Error: %s is not a directory." % options.gem5_dir sys.exit(1) if machine() != "x86_64": print "Error: This script should run in a x86_64 machine" sys.exit(1) binaries_dir = options.dest_dir + "/binaries" if os.path.exists(binaries_dir): print "Error: %s already exists." % binaries_dir sys.exit(1) revisions_dir = options.dest_dir + "/revisions" if os.path.exists(revisions_dir): print "Error: %s already exists." % revisions_dir sys.exit(1)
'nppif', 'nppig', 'nppim', 'nppist', 'nppisu', 'nppitc', 'npps', 'nvToolsExt', 'nvblas', 'nvgraph', 'nvrtc', 'nvrtc-builtins', ] config['cuda_static_libraries'] = ['cudadevrt'] # nvjpeg is only available on linux if sys.platform.startswith('linux') and platform.machine() != 'aarch64': config['cuda_libraries'].append('nvjpeg') config['libdevice_versions'] = ['10'] config['linux'] = { 'blob': 'cuda_10.2.89_440.33.01_rhel6.run', 'ppc64le_blob': 'cuda_10.2.89_440.33.01_linux_ppc64le.run', 'embedded_blob': 'cuda-linux.10.2.89-27506705.run', 'ppc64le_embedded_blob': None, 'patches': [], # need globs to handle symlinks 'cuda_lib_fmt': 'lib{0}.so*', 'cuda_static_lib_fmt': 'lib{0}.a', 'nvtoolsext_fmt': 'lib{0}.so*', 'nvvm_lib_fmt': 'lib{0}.so*', 'libdevice_lib_fmt': 'libdevice.{0}.bc'
import warnings import os import os.path import time from enum import Enum from ctypes import * import re import platform sys.path.insert(0, '../hsapi') from core import * filepath = "../../SungemSDK/lib/" filename = "libhs.so" if platform.system() == "Linux": filepath += "linux/%s" % (platform.machine()) elif platform.system() == "Darwin": filepath += "macos" filename = "libhs.dylib" else: raise Exception("Unsupported operating system") dll = CDLL(os.path.join(os.path.dirname(__file__), filepath, filename)) from Controllers.FileIO import * from Controllers.DataTransforms import * myriad_debug_size = 120 handler = None no_conf_warning_thrown = False
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Usando o pyinstaller para gerar um binario pip install pyinstaller pyinstaller file.py -F rm -rf build *.spec *.pyc """ import os import sys import platform os_plat = platform.system().lower() bits = platform.machine()[-2:] def compile_py(file_name): if os.path.exists(file_name): bin_name = file_name.split(".")[0] os.system("find . -type f -iname *.pyc -exec rm -f {} \;") os.system("pyinstaller %s --clean --add-data 'app:app' -F -n %s" % (file_name, bin_name)) os.system("rm -rf build *.spec *.pyc") os.system("find . -type f -iname *.pyc -exec rm -f {} \;") os.system("rm -rf dist/ssl dist/static dist/templates") os.system("cp -aR ssl themes/AdminLTE/* dist/") else: print "File %s NotFound !!!" % (file_name)
def get_sys_info(): # delay these imports until now as they are only needed in this # function which then exits. import platform import json from numba import config from numba import cuda as cu from numba.cuda import cudadrv from numba.cuda.cudadrv.driver import driver as cudriver from numba import roc from numba.roc.hlc import hlc, libhlc import textwrap as tw import ctypes as ct import llvmlite.binding as llvmbind import locale from datetime import datetime from itertools import chain from subprocess import check_output, CalledProcessError try: fmt = "%-35s : %-s" print("-" * 80) print("__Time Stamp__") print(datetime.utcnow()) print("") print("__Hardware Information__") print(fmt % ("Machine", platform.machine())) print(fmt % ("CPU Name", llvmbind.get_host_cpu_name())) try: featuremap = llvmbind.get_host_cpu_features() except RuntimeError: print(fmt % ("CPU Features", "NA")) else: features = sorted( [key for key, value in featuremap.items() if value]) cpu_feat = tw.fill(' '.join(features), 80) print(fmt % ("CPU Features", "")) print(cpu_feat) print("") print("__OS Information__") print(fmt % ("Platform", platform.platform(aliased=True))) print(fmt % ("Release", platform.release())) system_name = platform.system() print(fmt % ("System Name", system_name)) print(fmt % ("Version", platform.version())) try: if system_name == 'Linux': info = platform.linux_distribution() elif system_name == 'Windows': info = platform.win32_ver() elif system_name == 'Darwin': info = platform.mac_ver() else: raise RuntimeError("Unknown system.") buf = ''.join([ x if x != '' else ' ' for x in list(chain.from_iterable(info)) ]) print(fmt % ("OS specific info", buf)) if system_name == 'Linux': print(fmt % ("glibc info", ' '.join(platform.libc_ver()))) except: print("Error: System name incorrectly identified or unknown.") print("") print("__Python Information__") print(fmt % ("Python Compiler", platform.python_compiler())) print(fmt % ("Python Implementation", platform.python_implementation())) print(fmt % ("Python Version", platform.python_version())) print(fmt % ("Python Locale ", ' '.join( [x for x in locale.getdefaultlocale() if x is not None]))) print("") print("__LLVM information__") print(fmt % ("LLVM version", '.'.join( [str(k) for k in llvmbind.llvm_version_info]))) print("") print("__CUDA Information__") # Look for GPUs try: cu.list_devices()[0] # will a device initialise? except BaseException as e: msg_not_found = "CUDA driver library cannot be found" msg_disabled_by_user = "******" msg_end = " or no CUDA enabled devices are present." msg_generic_problem = "Error: CUDA device intialisation problem." msg = getattr(e, 'msg', None) if msg is not None: if msg_not_found in msg: err_msg = msg_not_found + msg_end elif msg_disabled_by_user in msg: err_msg = msg_disabled_by_user + msg_end else: err_msg = msg_generic_problem + " Message:" + msg else: err_msg = msg_generic_problem + " " + str(e) # Best effort error report print("%s\nError class: %s" % (err_msg, str(type(e)))) else: try: cu.detect() dv = ct.c_int(0) cudriver.cuDriverGetVersion(ct.byref(dv)) print(fmt % ("CUDA driver version", dv.value)) print("CUDA libraries:") cudadrv.libs.test(sys.platform, print_paths=False) except: print( "Error: Probing CUDA failed (device and driver present, runtime problem?)\n" ) print("") print("__ROC Information__") roc_is_available = roc.is_available() print(fmt % ("ROC available", roc_is_available)) toolchains = [] try: libhlc.HLC() toolchains.append('librocmlite library') except: pass try: cmd = hlc.CmdLine().check_tooling() toolchains.append('ROC command line tools') except: pass # if no ROC try and report why if not roc_is_available: from numba.roc.hsadrv.driver import hsa try: hsa.is_available except BaseException as e: msg = str(e) else: msg = 'No ROC toolchains found.' print(fmt % ("Error initialising ROC due to", msg)) if toolchains: print(fmt % ("Available Toolchains", ', '.join(toolchains))) try: # ROC might not be available due to lack of tool chain, but HSA # agents may be listed from numba.roc.hsadrv.driver import hsa, dgpu_count decode = lambda x: x.decode('utf-8') if isinstance(x, bytes) else x print("\nFound %s HSA Agents:" % len(hsa.agents)) for i, agent in enumerate(hsa.agents): print('Agent id : %s' % i) print(' vendor: %s' % decode(agent.vendor_name)) print(' name: %s' % decode(agent.name)) print(' type: %s' % agent.device) print("") _dgpus = [] for a in hsa.agents: if a.is_component and a.device == 'GPU': _dgpus.append(decode(a.name)) print(fmt % ("Found %s discrete GPU(s)" % dgpu_count(), \ ', '.join(_dgpus))) except Exception as e: print("No HSA Agents found, encountered exception when searching:") print(e) print("") print("__SVML Information__") # replicate some SVML detection logic from numba.__init__ here. # if SVML load fails in numba.__init__ the splitting of the logic # here will help diagnosis of the underlying issue have_svml_library = True try: if sys.platform.startswith('linux'): llvmbind.load_library_permanently("libsvml.so") elif sys.platform.startswith('darwin'): llvmbind.load_library_permanently("libsvml.dylib") elif sys.platform.startswith('win'): llvmbind.load_library_permanently("svml_dispmd") else: have_svml_library = False except: have_svml_library = False func = getattr(llvmbind.targets, "has_svml", None) llvm_svml_patched = func() if func is not None else False svml_operational = (config.USING_SVML and llvm_svml_patched \ and have_svml_library) print(fmt % ("SVML state, config.USING_SVML", config.USING_SVML)) print(fmt % ("SVML library found and loaded", have_svml_library)) print(fmt % ("llvmlite using SVML patched LLVM", llvm_svml_patched)) print(fmt % ("SVML operational:", svml_operational)) # Look for conda and conda information print("") print("__Conda Information__") cmd = ["conda", "info", "--json"] try: conda_out = check_output(cmd) except Exception as e: print("Conda not present/not working.\nError was %s\n" % e) else: data = ''.join(conda_out.decode("utf-8").splitlines()) jsond = json.loads(data) keys = [ 'conda_build_version', 'conda_env_version', 'platform', 'python_version', 'root_writable' ] for k in keys: try: print(fmt % (k, jsond[k])) except KeyError: pass # get info about current environment cmd = ["conda", "list"] try: conda_out = check_output(cmd) except CalledProcessError as e: print("Error: Conda command failed. Error was %s\n" % e.output) else: print("") print("__Current Conda Env__") data = conda_out.decode("utf-8").splitlines() for k in data: if k[0] != '#': # don't show where the env is, personal data print(k) print("-" * 80) except Exception as e: print("Error: The system reporting tool has failed unexpectedly.") print("Exception was:") print(e) finally: print( "%s" % "If requested, please copy and paste the information between\n" "the dashed (----) lines, or from a given specific section as\n" "appropriate.\n\n" "=============================================================\n" "IMPORTANT: Please ensure that you are happy with sharing the\n" "contents of the information present, any information that you\n" "wish to keep private you should remove before sharing.\n" "=============================================================\n")
def get_machine(): '''计算机类型''' return platform.machine()
def host_arch(): machine = platform.machine() if machine == 'i386': return 'ia32' return 'x64'
def start_script(sonarhome): command = None replace( os.path.join(sonarhome, "conf", "wrapper.conf"), "wrapper.java.command=java", "wrapper.java.command=" + (os.environ['JAVA_HOME'] + '/bin/java').replace("\\", "/")) if platform.system() == "Linux": script = linux_script(sonarhome) if script: command = [script, "start"] Popen(command, stdout=PIPE, shell=os.name == "nt") elif platform.system() == "Windows": replace( os.path.join(sonarhome, "conf", "sonar.properties"), "#sonar.path.data=data", "sonar.path.data=" + os.path.join(sonarhome, "data").replace("\\", "/")) replace( os.path.join(sonarhome, "conf", "sonar.properties"), "#sonar.path.temp=temp", "sonar.path.temp=" + os.path.join(sonarhome, "temp").replace("\\", "/")) replace( os.path.join(sonarhome, "conf", "wrapper.conf"), "wrapper.java.additional.1=-Djava.awt.headless=true", "wrapper.java.additional.1=-Djava.awt.headless=true -Djava.io.tmpdir=" + os.path.join(sonarhome, "temp").replace("\\", "/")) if platform.machine() == "x86_64": sys.stdout.write(GREEN + "Install Service...\n") command = [ "cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-64", "InstallNTService.bat") ] check_call(command, stdout=PIPE, shell=os.name == "nt") sys.stdout.write(GREEN + "Install Service... Ok\n" + RESET) sys.stdout.write(GREEN + "Start Service... \n") command = [ "cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-64", "StartNTService.bat") ] Popen(command, stdout=PIPE, shell=os.name == "nt") sys.stdout.write(GREEN + "Start Service... Ok \n") elif platform.machine() == "i686": sys.stdout.write(GREEN + "Install Service...\n") command = [ "cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-32", "InstallNTService.bat") ] check_call(command, stdout=PIPE, shell=os.name == "nt") sys.stdout.write(GREEN + "Install Service... Ok\n" + RESET) sys.stdout.write(GREEN + "Start Service... \n") command = [ "cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-32", "StartNTService.bat") ] Popen(command, stdout=PIPE, shell=os.name == "nt") sys.stdout.write(GREEN + "Start Service... Ok \n" + RESET) elif platform.machine() == "AMD64": sys.stdout.write(GREEN + "Install Service...\n") command = [ "cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-64", "InstallNTService.bat") ] check_call(command, stdout=PIPE, shell=os.name == "nt") sys.stdout.write(GREEN + "Install Service... Ok\n" + RESET) sys.stdout.write(GREEN + "Start Service... \n") command = [ "cmd", "/c", os.path.join(sonarhome, "bin", "windows-x86-64", "StartNTService.bat") ] Popen(command, stdout=PIPE, shell=os.name == "nt") sys.stdout.write(GREEN + "Start Service... Ok \n" + RESET) sys.stdout.write(GREEN + "Start on windows done... Ok \n" + RESET) elif platform.system() == "Darwin": command = [ os.path.join(sonarhome, "bin/macosx-universal-64/sonar.sh"), "start" ] Popen(command, stdout=PIPE, shell=os.name == "nt") if command is None: msg = "Dont know how to find the start script for the platform %s-%s" % ( platform.system(), platform.machine()) raise UnsupportedPlatform(msg) return command
def add_upstream_patches(patches, image, tmp_dir, default_branch=os.path.join('stable', con.DEFAULT_OS_VERSION), uc_ip=None, docker_tag=None): """ Adds patches from upstream OpenStack gerrit to Undercloud for deployment :param patches: list of patches :param image: undercloud image :param tmp_dir: to store temporary patch files :param default_branch: default branch to fetch commit (if not specified in patch) :param uc_ip: undercloud IP (required only for docker patches) :param docker_tag: Docker Tag (required only for docker patches) :return: Set of docker services patched (if applicable) """ virt_ops = [{con.VIRT_INSTALL: 'patch'}] logging.debug("Evaluating upstream patches:\n{}".format(patches)) docker_services = set() for patch in patches: assert isinstance(patch, dict) assert all(i in patch.keys() for i in ['project', 'change-id']) if 'branch' in patch.keys(): branch = patch['branch'] else: branch = default_branch patch_diff = build_utils.get_patch(patch['change-id'], patch['project'], branch) project_path = project_to_path(patch['project'], patch_diff) # If docker tag and python we know this patch belongs on docker # container for a docker service. Therefore we build the dockerfile # and move the patch into the containers directory. We also assume # this builder call is for overcloud, because we do not support # undercloud containers if platform.machine() == 'aarch64': docker_url = con.DOCKERHUB_AARCH64 else: docker_url = con.DOCKERHUB_OOO if docker_tag and 'python' in project_path: # Projects map to multiple THT services, need to check which # are supported ooo_docker_services = project_to_docker_image( patch['project'], docker_url) docker_img = ooo_docker_services[0] else: ooo_docker_services = [] docker_img = None change = build_utils.get_change(con.OPENSTACK_GERRIT, patch['project'], branch, patch['change-id']) patch_promoted = is_patch_promoted(change, branch.replace('stable/', ''), docker_url, docker_img) if patch_diff and not patch_promoted: patch_file = "{}.patch".format(patch['change-id']) # If we found services, then we treat the patch like it applies to # docker only if ooo_docker_services: os_version = default_branch.replace('stable/', '') for service in ooo_docker_services: docker_services = docker_services.union({service}) docker_cmds = [ "WORKDIR {}".format(project_path), "ADD {} {}".format(patch_file, project_path), "RUN patch -p1 < {}".format(patch_file) ] src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \ "{}".format(uc_ip, os_version, service, docker_tag) oc_builder.build_dockerfile(service, tmp_dir, docker_cmds, src_img_uri) patch_file_path = os.path.join(tmp_dir, 'containers', patch_file) else: patch_file_path = os.path.join(tmp_dir, patch_file) virt_ops.extend([{ con.VIRT_UPLOAD: "{}:{}".format(patch_file_path, project_path) }, { con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(project_path, patch_file) }]) logging.info("Adding patch {} to {}".format(patch_file, image)) with open(patch_file_path, 'w') as fh: fh.write(patch_diff) else: logging.info("Ignoring patch:\n{}".format(patch)) if len(virt_ops) > 1: virt_utils.virt_customize(virt_ops, image) return docker_services
# coding=utf-8 # 위의 coding=utf-8 은 한글 입력을 가능하게 해 줍니다. 아님 comment out 문장도 입력이 안됨. # paltform 은 python 을 실행하는 운영체제를 보여줍니다. # multiprocessing 은 CPU 개수를 보여줌. import platform import multiprocessing print "운영체제: ", platform.system() print "상세 정보: ", platform.platform() print "version: ", platform.version() print "프로세서: ", platform.processor() print "CPU 수: ", multiprocessing.cpu_count() print("python 2.7 과 3.0 모두 됨") print platform.machine() print platform.node() print platform.python_build() print platform.python_compiler() print platform.python_version() print platform.uname() print multiprocessing.current_process()
def linux_script(sonarhome): if platform.machine() == "x86_64": return os.path.join(sonarhome, "bin/linux-x86-64/sonar.sh") elif platform.machine() == "i686": return os.path.join(sonarhome, "bin/linux-x86-32/sonar.sh")
def discover_processor(): return platform.machine()
from Adafruit_MotorHAT import Adafruit_MotorHAT as MotorHAT def _setup(motor_num: int = 1): mh = MotorHAT() # recommended for auto-disabling motors on shutdown! def turn_off_motors(): mh.getMotor(1).run(MotorHAT.RELEASE) mh.getMotor(2).run(MotorHAT.RELEASE) mh.getMotor(3).run(MotorHAT.RELEASE) mh.getMotor(4).run(MotorHAT.RELEASE) atexit.register(turn_off_motors) stepper = mh.getStepper(200, motor_num) # stepper.setSpeed(30) return stepper class FakeStepper: def __init__(self, motor_num: int = 1): pass def oneStep(self, *args): pass setup = _setup if platform.machine().startswith("arm") else FakeStepper
def install(self, spec, prefix): target = spec.variants["build-target"].value plat = sys.platform if plat.startswith("linux"): plat = "linux" elif plat.startswith("win"): plat = "win" elif plat.startswith("cnl"): plat = "cnl" elif plat.startswith("cnk"): plat = "cnk" mach = platform.machine() if mach.startswith("ppc"): mach = "ppc" elif mach.startswith("arm"): mach = "arm" comm = spec.variants['backend'].value # Define Charm++ version names for various (plat, mach, comm) # combinations. Note that not all combinations are supported. versions = { ("darwin", "x86_64", "mpi"): "mpi-darwin-x86_64", ("darwin", "x86_64", "multicore"): "multicore-darwin-x86_64", ("darwin", "x86_64", "netlrts"): "netlrts-darwin-x86_64", ("linux", "i386", "mpi"): "mpi-linux", ("linux", "i386", "multicore"): "multicore-linux", ("linux", "i386", "netlrts"): "netlrts-linux", ("linux", "i386", "uth"): "uth-linux", ("linux", "x86_64", "mpi"): "mpi-linux-x86_64", ("linux", "x86_64", "multicore"): "multicore-linux-x86_64", ("linux", "x86_64", "netlrts"): "netlrts-linux-x86_64", ("linux", "x86_64", "verbs"): "verbs-linux-x86_64", ("linux", "x86_64", "ofi"): "ofi-linux-x86_64", ("linux", "x86_64", "uth"): "uth-linux-x86_64", ("linux", "ppc", "mpi"): "mpi-linux-ppc", ("linux", "ppc", "multicore"): "multicore-linux-ppc", ("linux", "ppc", "netlrts"): "netlrts-linux-ppc", ("linux", "ppc", "pami"): "pami-linux-ppc64le", ("linux", "ppc", "verbs"): "verbs-linux-ppc64le", ("linux", "arm", "netlrts"): "netlrts-linux-arm7", ("linux", "arm", "multicore"): "multicore-arm7", ("win", "x86_64", "mpi"): "mpi-win-x86_64", ("win", "x86_64", "multicore"): "multicore-win-x86_64", ("win", "x86_64", "netlrts"): "netlrts-win-x86_64", ("cnl", "x86_64", "gni"): "gni-crayxc", ("cnl", "x86_64", "mpi"): "mpi-crayxc", ("cnk", "x86_64", "mpi"): "mpi-bluegeneq", ("cnk", "x86_64", "pami"): "pami-bluegeneq", ("cnk", "x86_64", "pamilrts"): "pamilrts-bluegeneq", } if (plat, mach, comm) not in versions: raise InstallError( "The communication mechanism %s is not supported " "on a %s platform with a %s CPU" % (comm, plat, mach)) version = versions[(plat, mach, comm)] # We assume that Spack's compiler wrappers make this work. If # not, then we need to query the compiler vendor from Spack # here. options = [ os.path.basename(self.compiler.cc), os.path.basename(self.compiler.fc), "-j%d" % make_jobs, "--destination=%s" % prefix, ] if 'backend=mpi' in spec: # in intelmpi <prefix>/include and <prefix>/lib fails so --basedir # cannot be used options.extend([ '--incdir={0}'.format(incdir) for incdir in spec["mpi"].headers.directories ]) options.extend([ '--libdir={0}'.format(libdir) for libdir in spec["mpi"].libs.directories ]) if "+papi" in spec: options.extend(["papi", "--basedir=%s" % spec["papi"].prefix]) if "+smp" in spec: options.append("smp") if "+tcp" in spec: if 'backend=netlrts' not in spec: # This is a Charm++ limitation; it would lead to a # build error raise InstallError( "The +tcp variant requires " "the backend=netlrts communication mechanism") options.append("tcp") if "+omp" in spec: options.append("omp") if "+pthreads" in spec: options.append("pthreads") if "+cuda" in spec: options.append("cuda") if "+shared" in spec: options.append("--build-shared") if "+production" in spec: options.append("--with-production") if "+tracing" in spec: options.append("--enable-tracing") # Call "make" via the build script # Note: This builds Charm++ in the "tmp" subdirectory of the # install directory. Maybe we could set up a symbolic link # back to the build tree to prevent this? Alternatively, we # could dissect the build script; the build instructions say # this wouldn't be difficult. build = Executable(join_path(".", "build")) build(target, version, *options) # Charm++'s install script does not copy files, it only creates # symbolic links. Fix this. for dirpath, dirnames, filenames in os.walk(prefix): for filename in filenames: filepath = join_path(dirpath, filename) if os.path.islink(filepath): tmppath = filepath + ".tmp" # Skip dangling symbolic links try: copy(filepath, tmppath) os.remove(filepath) os.rename(tmppath, filepath) except (IOError, OSError): pass shutil.rmtree(join_path(prefix, "tmp"))
class Openjdk(Package): """The free and opensource java implementation""" homepage = "https://jdk.java.net" preferred_prefix = "11." preferred_defined = False for ver, packages in _versions.items(): key = "{0}-{1}".format(platform.system(), platform.machine()) pkg = packages.get(key) if pkg: is_preferred = not preferred_defined and ver.startswith(preferred_prefix) if is_preferred: preferred_defined = True version(ver, sha256=pkg[0], url=pkg[1], preferred=is_preferred) provides('java@17', when='@17.0:17') provides('java@16', when='@16.0:16') provides('java@11', when='@11.0:11') provides('java@10', when='@10.0:10') provides('java@9', when='@9.0:9') provides('java@8', when='@1.8.0:1.8') conflicts('target=ppc64:', msg='openjdk is not available for ppc64 (big endian)') # FIXME: # 1. `extends('java')` doesn't work, you need to use `extends('openjdk')` # 2. Packages cannot extend multiple packages, see #987 # 3. Update `YamlFilesystemView.merge` to allow a Package to completely # override how it is symlinked into a view prefix. Then, spack activate # can symlink all *.jar files to `prefix.lib.ext` extendable = True executables = ['^java$'] @classmethod def determine_version(cls, exe): output = Executable(exe)('-version', output=str, error=str) # Make sure this is actually OpenJDK, not Oracle JDK if 'openjdk' not in output: return None match = re.search(r'\(build (\S+)\)', output) return match.group(1).replace('+', '_') if match else None @property def home(self): """Most of the time, ``JAVA_HOME`` is simply ``spec['java'].prefix``. However, if the user is using an externally installed JDK, it may be symlinked. For example, on macOS, the ``java`` executable can be found in ``/usr/bin``, but ``JAVA_HOME`` is actually ``/Library/Java/JavaVirtualMachines/jdk-10.0.1.jdk/Contents/Home``. Users may not know the actual installation directory and add ``/usr`` to their ``packages.yaml`` unknowingly. Run ``java_home`` if it exists to determine exactly where it is installed. Specify which version we are expecting in case multiple Java versions are installed. See ``man java_home`` for more details.""" prefix = self.prefix java_home = prefix.libexec.java_home if os.path.exists(java_home): java_home = Executable(java_home) version = str(self.version.up_to(2)) prefix = java_home('--version', version, output=str).strip() prefix = Prefix(prefix) return prefix @property def libs(self): """Depending on the version number and whether the full JDK or just the JRE was installed, Java libraries can be in several locations: * ``lib/libjvm.so`` * ``jre/lib/libjvm.dylib`` Search recursively to find the correct library location.""" return find_libraries(['libjvm'], root=self.home, recursive=True) def install(self, spec, prefix): top_dir = 'Contents/Home/' if platform.system() == "Darwin" else '.' install_tree(top_dir, prefix) def setup_run_environment(self, env): """Set JAVA_HOME.""" env.set('JAVA_HOME', self.home) def setup_dependent_build_environment(self, env, dependent_spec): """Set JAVA_HOME and CLASSPATH. CLASSPATH contains the installation prefix for the extension and any other Java extensions it depends on.""" env.set('JAVA_HOME', self.home) class_paths = [] for d in dependent_spec.traverse(deptype=('build', 'run', 'test')): if d.package.extends(self.spec): class_paths.extend(find(d.prefix, '*.jar')) classpath = os.pathsep.join(class_paths) env.set('CLASSPATH', classpath) def setup_dependent_run_environment(self, env, dependent_spec): """Set CLASSPATH. CLASSPATH contains the installation prefix for the extension and any other Java extensions it depends on.""" # For runtime environment set only the path for # dependent_spec and prepend it to CLASSPATH if dependent_spec.package.extends(self.spec): class_paths = find(dependent_spec.prefix, '*.jar') classpath = os.pathsep.join(class_paths) env.prepend_path('CLASSPATH', classpath) def setup_dependent_package(self, module, dependent_spec): """Allows spec['java'].home to work.""" self.spec.home = self.home
def main(): print('''make: [1]:下载Python库Lib依赖 [2]:安装pyinstaller [3]:同步源代码(需要先安装git) [4]:编译可执行文件 [5]:清理工作目录 [6]:Exit ''') a = int(input('>>>')) if a == 1: l = [ 'lz4', 'ConfigParser', 'protobuf', 'brotli', 'pycryptodome', 'docopt', 'Crypto', 'zstandard', 'google', 'checker', 'glob2' ] for i in l: os.system( 'pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple ' + i) main() return if a == 2: os.system( 'pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple pyinstaller' ) main() return if a == 3: os.system('git pull') main() return if a == 4: os.system('pyinstaller -F main.py') copy('Project.txt', 'dist' + os.sep + 'Project.txt') copy('about_pycrypto.md', 'dist' + os.sep + 'about_pycrypto.md') copy('README.md', 'dist' + os.sep + 'README.md') copy('requirements.txt', 'dist' + os.sep + 'requirements.txt') copy('README_LGKDZ.txt', 'dist' + os.sep + 'README_LGKDZ.txt') copy('README_ozip.md', 'dist' + os.sep + 'README_ozip.md') copy('README_unpayload.md', 'dist' + os.sep + 'README_unpayload.md') copy('README_simg2img.txt', 'dist' + os.sep + 'README_simg2img.txt') copytree('pic', 'dist' + os.sep + 'pic') z = zipfile.ZipFile(platform.system() + '_' + platform.machine() + '.zip', 'w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) for d in os.listdir('dist'): z.write('dist' + os.sep + d) for c in os.listdir('dist' + os.sep + 'pic'): z.write('dist' + os.sep + 'pic' + os.sep + c) z.close() main() return if a == 5: l1 = ['main.spec', 'main.zip'] l2 = ['__pycache__', 'build', 'dist', 'logs', 'temp'] for i in l1: try: os.remove(i) except: pass for i in l2: try: os.rmdir(i) except: pass main() return if a == 6: exit()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 50008)) s.listen(5) print "Server sudah siap" data = '' while data.lower() !='quit': komm, addr = s.accept() while data.lower() !='quit': data = komm.recv(1024) if data.lower() =='quit': s.close() break print 'perintah: ', data if data.lower() =='machine': respon = platform.machine() komm.send(respon) elif data.lower() =='release': respon = platform.release() komm.send(respon) elif data.lower() =='system': respon = platform.system() komm.send(respon) elif data.lower() =='version': respon = platform.version() komm.send(respon) elif data.lower() =='node': respon = platform.node() komm.send(respon) else: komm.send('unknown command')