def run(args): logFormat = "(%(relativeCreated)5d) %(levelname)-5s %(threadName)s.%(name)s.%(funcName)s: %(message)s" logging.basicConfig(level=logging.DEBUG, format=logFormat) _moduleLogger.debug("%s", __file__) _moduleLogger.debug("OS: %s", os.uname()[0]) _moduleLogger.debug("Kernel: %s (%s) for %s", *os.uname()[2:]) _moduleLogger.debug("Hostname: %s", os.uname()[1]) app = QtGui.QApplication(args) controller = Controller() view = QtDeclarative.QDeclarativeView() view.setResizeMode(QtDeclarative.QDeclarativeView.SizeRootObjectToView) view.setWindowTitle(__file__) engine = view.engine() context = view.rootContext() context.setContextProperty("controller", controller) topLevelQMLFile = __file__.replace(".py", ".qml") view.setSource(topLevelQMLFile) view.show() return app.exec_()
def createTags(self, instanceId, machineName, machinetypeName): if self.machinetypes[machinetypeName].remote_joboutputs_url: joboutputsURL = self.machinetypes[machinetypeName].remote_joboutputs_url + machineName else: joboutputsURL = 'https://' + os.uname()[1] + ':' + str(self.https_port) + '/machines/' + machineName + '/joboutputs' try: result = self.ec2Request( formRequest = { 'Action' : 'CreateTags', 'Version' : self.version, 'ResourceId.1' : instanceId, 'Tag.1.Key' : 'name', 'Tag.1.Value' : machineName, 'Tag.2.Key' : 'machinetype', 'Tag.2.Value' : machinetypeName, 'Tag.3.Key' : 'machinefeatures', 'Tag.3.Value' : 'https://' + os.uname()[1] + ':' + str(self.https_port) + '/machines/' + machineName + '/machinefeatures', 'Tag.4.Key' : 'jobfeatures', 'Tag.4.Value' : 'https://' + os.uname()[1] + ':' + str(self.https_port) + '/machines/' + machineName + '/jobfeatures', 'Tag.5.Key' : 'machineoutputs', 'Tag.5.Value' : joboutputsURL, 'Tag.6.Key' : 'joboutputs', 'Tag.6.Value' : joboutputsURL }, verbose = False ) except Exception as e: raise Ec2Error('Adding tags to ' + machineName + ' (' + instanceId + ') fails with ' + str(e))
def test_example(): global compiler,examplesDir,parentdir if (not examplesDir.endswith("/")) : rootdir = examplesDir + "/" rootdir = rootdir+parentdir for root, subFolders, files in os.walk(rootdir) : if root.find(".svn")==-1 : for f in files : if (size=='small' and f=='nightlytest.prop') or (size=='large' and (f=='nightlytest.prop' or f=='weeklytest.prop')) : os.chdir(root) yield check_build, 'build',root,compiler system = os.uname()[0].lower() arch = os.uname()[4] propFile= file(f, "r") properties = dict() load_prop(propFile,properties) if '42TestingPointsPATH' in properties : testingPointsPath = examplesDir+"/"+properties['42TestingPointsPATH'] else: testingPointsPath = "" testpoints = properties['TestingPoint'] for testpoint in testpoints : if (testpoint[0].find("${")!=-1) : testingPointPath = properties[testpoint[0][2:testpoint[0].find("}")]]+testpoint[0][testpoint[0].find("}")+1:] else: testingPointPath = testpoint[0] os.chdir(testingPointsPath + testingPointPath) if len(testpoint)<=2 : yield check_run, 'run', os.getcwd(), system, arch, compiler, root, testpoint[1], testingPointPath, False else : yield check_run, 'run', os.getcwd(), system, arch, compiler, root, testpoint[1], testingPointPath if len(testpoint)<=4 : yield check_output,'check',os.getcwd(), testpoint[2], testpoint[3] else : yield check_output,'check',os.getcwd(), testpoint[2], testpoint[3], testpoint[4]
def find_library(cls, name): if name in cls._libsearch_cache: return cls._libsearch_cache[name] if struct.calcsize("l") == 4: machine = os.uname()[4] + "-32" else: machine = os.uname()[4] + "-64" mach_map = { "x86_64-64": "libc6,x86-64", "ppc64-64": "libc6,64bit", "sparc64-64": "libc6,64bit", "s390x-64": "libc6,64bit", "ia64-64": "libc6,IA-64", } abi_type = mach_map.get(machine, "libc6") expr = r"\s+lib%s\.[^\s]+\s+\(%s, [^)]+[^/]+([^\s]+)" % (name, abi_type) with os.popen("/sbin/ldconfig -p 2>/dev/null") as f: data = f.read() res = re.search(expr, data) if not res: return None path = res.group(1) cls._libsearch_cache[name] = path return path
def __doOSG ( self ): """ Treat the OSG case """ osgDir = '' if self.pp.flavour == "OSG": vo = self.pp.releaseProject.replace( 'DIRAC', '' ).upper() if not vo: vo = 'DIRAC' osgDir = os.environ['OSG_WN_TMP'] # Make a separate directory per Project if it is defined jobDir = os.path.basename( self.pp.pilotReference ) if not jobDir: # just in case import random jobDir = str( random.randint( 1000, 10000 ) ) osgDir = os.path.join( osgDir, vo, jobDir ) if not os.path.isdir( osgDir ): os.makedirs( osgDir ) os.chdir( osgDir ) try: import shutil shutil.copy( self.pp.installScript, os.path.join( osgDir, self.pp.installScriptName ) ) except Exception, x: print sys.executable print sys.version print os.uname() print x raise x
def get_os_type(self): ''' os.uname() Returns information identifying the current operating system. The return value is an object with five attributes: sysname - operating system name nodename - name of machine on network (implementation-defined) release - operating system release version - operating system version machine - hardware identifier {'nodename': 'OSXAir.home.home', 'machine': 'x86_64', 'release': '15.6.0', 'sysname': 'Darwin', 'version': 'Darwin Kernel Version 15.6.0: Thu Jun 23 18:25:34 PDT 2016; root:xnu-3248.60.10~1/RELEASE_X86_64'} ''' systemType = {} ''' python disctionaries http://www.python-course.eu/python3_dictionaries.php https://docs.python.org/3/library/collections.html ''' systemType.update({'sysname':os.uname().sysname}) systemType.update({'nodename':os.uname().nodename}) systemType.update({'release':os.uname().release}) systemType.update({'machine':os.uname().machine}) systemType.update({'version':os.uname().version}) return systemType
def get_hostname(self): """ Returns a hostname as configured by the user """ if 'hostname' in self.config: return self.config['hostname'] if ('hostname_method' not in self.config or self.config['hostname_method'] == 'fqdn_short'): return socket.getfqdn().split('.')[0] if self.config['hostname_method'] == 'fqdn': return socket.getfqdn().replace('.', '_') if self.config['hostname_method'] == 'fqdn_rev': hostname = socket.getfqdn().split('.') hostname.reverse() hostname = '.'.join(hostname) return hostname if self.config['hostname_method'] == 'uname_short': return os.uname()[1].split('.')[0] if self.config['hostname_method'] == 'uname_rev': hostname = os.uname()[1].split('.') hostname.reverse() hostname = '.'.join(hostname) return hostname if self.config['hostname_method'].lower() == 'none': return None raise NotImplementedError(self.config['hostname_method'])
def parseloginfo(self, loginfo): if self.visualization_units == 1: unit = "°F" else: unit = "°C" loginfo = loginfo.replace('{temp}', str(self.data)) loginfo = loginfo.replace('{unit}', unit) loginfo = loginfo.replace('{applet}', self.gconf.get_applet_name()) loginfo = loginfo.replace('{sensor}', self.tempmon.get_sensor_name()) loginfo = loginfo.replace('{zone}', self.tempmon.get_zone_display_name(self.thermalzone)) loginfo = loginfo.replace('{compdate}', time.asctime(time.localtime())) loginfo = loginfo.replace('{date}', time.strftime("%d%b%Y", time.localtime())) loginfo = loginfo.replace('{time}', time.strftime("%H:%M:%S", time.localtime())) loginfo = loginfo.replace('{year}', time.strftime("%Y", time.localtime())) loginfo = loginfo.replace('{month}', time.strftime("%m", time.localtime())) loginfo = loginfo.replace('{monthname}', time.strftime("%B", time.localtime())) loginfo = loginfo.replace('{monthsimp}', time.strftime("%b", time.localtime())) loginfo = loginfo.replace('{day}', time.strftime("%d", time.localtime())) loginfo = loginfo.replace('{dayname}', time.strftime("%A", time.localtime())) loginfo = loginfo.replace('{daysimp}', time.strftime("%a", time.localtime())) loginfo = loginfo.replace('{user}', os.environ["USER"]) loginfo = loginfo.replace('{sysname}', os.uname()[0]) loginfo = loginfo.replace('{hostname}', os.uname()[1]) loginfo = loginfo.replace('{kernel}', os.uname()[2]) loginfo = loginfo.replace('{arch}', os.uname()[4]) loginfo = loginfo.replace('#', '\t') return loginfo
def GET (self, name): """ HTTP GET method. """ if not session.login: return render.login() header = genpage.get_header() headernavbar = genpage.get_headernavbar() footer = genpage.get_footer() if name == "" or name == "rainbow.html": osversion = os.uname()[0] + " " + os.uname()[2] + " " + os.uname()[3] uptime = systat.get_uptime() plateform = systat.get_plateform() loadavg = systat.get_loadavg() return render.rainbow(osversion, plateform, uptime, loadavg, header, headernavbar, footer) if name == "changepass.html": return render.changepass(header, headernavbar, footer) if name == "selectlang.html": return render.selectlang(header, headernavbar, footer) if name == "reboot.html": return render.reboot(header, headernavbar, footer) if name == "shutdown.html": return render.shutdown(header, headernavbar, footer) if name == "logout": logger.info ('logout from %s' % web.ctx.ip) session.login = 0 session.kill() raise web.seeother('/')
def available(): ''' Return a list of all available kernel modules CLI Example: .. code-block:: bash salt '*' kmod.available ''' ret = [] mod_dir = os.path.join('/lib/modules/', os.uname()[2]) for root, dirs, files in os.walk(mod_dir): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) if 'Arch' in __grains__['os_family']: # Sadly this path is relative to kernel major version but ignores minor version mod_dir_arch = '/lib/modules/extramodules-' + os.uname()[2][0:3] + '-ARCH' for root, dirs, files in os.walk(mod_dir_arch): for fn_ in files: if '.ko' in fn_: ret.append(fn_[:fn_.index('.ko')].replace('-', '_')) return sorted(list(ret))
def _findSoname_ldconfig(name): import struct if struct.calcsize('l') == 4: machine = os.uname()[4] + '-32' else: machine = os.uname()[4] + '-64' mach_map = { 'x86_64-64': 'libc6,x86-64', 'ppc64-64': 'libc6,64bit', 'sparc64-64': 'libc6,64bit', 's390x-64': 'libc6,64bit', 'ia64-64': 'libc6,IA-64', } abi_type = mach_map.get(machine, 'libc6') # XXX assuming GLIBC's ldconfig (with option -p) expr = r'(\S+)\s+\((%s(?:, OS ABI:[^\)]*)?)\)[^/]*(/[^\(\)\s]*lib%s\.[^\(\)\s]*)' \ % (abi_type, re.escape(name)) f = os.popen('/sbin/ldconfig -p 2>/dev/null') try: data = f.read() finally: f.close() res = re.search(expr, data) if not res: return None return res.group(1)
def sys_info(): sysname, nodename, release, version, machine = os.uname() #The first three columns measure CPU and IO utilization of the #last one, five, and 15 minute periods. The fourth column shows #the number of currently running processes and the total number of #processes. The last column displays the last process ID used. system, node, release, version, machine, processor = platform.uname() distname, distver, distid = platform.linux_distribution() hostname = os.uname()[1] uptime = open('/proc/uptime').read().split() loadavg = open('/proc/loadavg').read().split() return {'system': system, 'node': node, 'release': release, 'version': version, 'machine': machine, 'processor': processor, 'distname': distname, 'distver': distver, 'distid': distid, 'user': os.getlogin(), 'hostname': hostname, 'uptime': uptime[0], 'idletime': uptime[1], 'loadavg': loadavg[2] # 15 minute average }
def plotdu(): stats = diskused() figure(1, figsize=(7,7)) ax = axes([0.1, 0.1, 0.8, 0.8]) stage = os.environ['STAGE'] id = subprocess.Popen('du -s '+stage+'/data/'+os.uname()[1]+'_data0/*', shell=True, stdout=subprocess.PIPE) duout = id.stdout.readlines() p = subprocess.Popen('ls '+stage+'/data/'+os.uname()[1]+'_data0/', shell=True, stdout=subprocess.PIPE) out = p.stdout.readlines() labels = ['free'] dubyid = [stats['kb-free']] for i in range(0, len(out)): labels.append(out[i].split('\n')[0]) dubyid.append(int(duout[i].split('\t')[0])) labels.append(os.uname()[1]+'_odexport/') od = subprocess.Popen('du -s '+stage+'/data/'+os.uname()[1]+'_odexport/', shell=True, stdout=subprocess.PIPE) odout = od.stdout.readlines() dubyid.append(int(odout[0].split('\t')[0])) fracs = dubyid #explode=(0, 0.05, 0, 0) pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True) title(stats['project']+' Allocation', bbox={'facecolor':'0.8', 'pad':5}) show()
def setUp(self): self.mox = mox.Mox() self.mox.StubOutWithMock(ds.gmacpyutil, 'RunProcess') if os.uname()[0] == 'Linux': self.InitMockFoundation() elif os.uname()[0] == 'Darwin': self.StubFoundation()
def is_windows(self): try: os.uname() except AttributeError: return True return False
def codegen(filename, start=0, duration=30): # Run codegen on the file and return the json. If start or duration is -1 ignore them. cmd = config.CODEGEN_BINARY_OVERRIDE if not cmd: # Is this is posix platform, or is it windows? if hasattr(os, 'uname'): if(os.uname()[0] == "Darwin"): cmd = "codegen.Darwin" else: cmd = 'codegen.'+os.uname()[0]+'-'+os.uname()[4] else: cmd = "codegen.windows.exe" if not os.path.exists(cmd): raise Exception("Codegen binary not found.") command = cmd + " \"" + filename + "\" " if start >= 0: command = command + str(start) + " " if duration >= 0: command = command + str(duration) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (json_block, errs) = p.communicate() json_block = reallyUTF8(json_block) try: return json.loads(json_block) except ValueError: logger.debug("No JSON object came out of codegen: error was %s" % (errs)) return None
def _get_sys_information(self): """ Get relevant system information. """ if hasattr(os, "uname"): operatingsystem = os.uname()[0] distribution = os.uname()[2] else: operatingsystem = sys.platform distribution = " " return "Python version: %s \n"\ "BSDDB version: %s \n"\ "Gramps version: %s \n"\ "LANG: %s\n"\ "OS: %s\n"\ "Distribution: %s\n\n"\ "GTK version : %s\n"\ "gobject version: %s\n"\ "cairo version : %s"\ % (str(sys.version).replace('\n',''), BSDDB_STR, str(VERSION), get_env_var('LANG',''), operatingsystem, distribution, '%d.%d.%d' % (Gtk.get_major_version(), Gtk.get_minor_version(), Gtk.get_micro_version()), '%d.%d.%d' % GObject.pygobject_version, cairo.version_info)
def __init__( self, options ): """ Initialise the snoing package manager.""" import sys super( snoing, self ).__init__() Util.CheckSystem() PackageUtil.kCachePath = Util.BuildDirectory( options.cachePath ) PackageUtil.kInstallPath = Util.BuildDirectory( options.installPath ) Log.kInfoFile = Log.LogFile( os.path.join( PackageUtil.kInstallPath, "README.md" ), True ) # Set the local details file Log.kDetailsFile = Log.LogFile( os.path.join( os.path.dirname( __file__ ), "snoing.log" ) ) Log.Detail( options ) Log.Detail( os.uname()[0] ) Log.Detail( os.uname()[2] ) Log.Header( "Caching to %s, installing to %s" % ( PackageUtil.kCachePath, PackageUtil.kInstallPath ) ) # Now check the install options are compatible with install directory if options.graphical == True and options.grid == True: Log.Error( "Cannot be both graphical and grid." ) self.PrintErrorMessage() snoingSettingsPath = os.path.join( PackageUtil.kInstallPath, "snoing.pkl" ) installModes = Util.DeSerialise( snoingSettingsPath ) # Set the file options if not set if installModes is None: installModes = { "Graphical" : options.graphical, "Grid" : options.grid } Util.Serialise( snoingSettingsPath, installModes ) # Check the options match if installModes["Graphical"] != options.graphical or installModes["Grid"] != options.grid: Log.Error( "Install mode for install directory does not match that specified. Install path is graphical %s and grid %s" \ % ( options.graphical, options.grid ) ) self.PrintErrorMessage() PackageUtil.kGraphical = options.graphical PackageUtil.kGrid = options.grid # First import all register all packages in the versions folder self.RegisterPackagesInDirectory( os.path.join( os.path.dirname( __file__ ), "versions" ) ) return
def _findSoname_ldconfig(name): import struct if struct.calcsize('l') == 4: machine = os.uname()[4] + '-32' else: machine = os.uname()[4] + '-64' mach_map = { 'x86_64-64': 'libc6,x86-64', 'ppc64-64': 'libc6,64bit', 'sparc64-64': 'libc6,64bit', 's390x-64': 'libc6,64bit', 'ia64-64': 'libc6,IA-64', } abi_type = mach_map.get(machine, 'libc6') # XXX assuming GLIBC's ldconfig (with option -p) expr = r'\s+(lib%s\.[^\s]+)\s+\(%s' % (re.escape(name), abi_type) env = dict(os.environ) env['LC_ALL'] = 'C' env['LANG'] = 'C' null = open(os.devnull, 'wb') try: with null: p = subprocess.Popen(['/sbin/ldconfig', '-p'], stderr=null, stdout=subprocess.PIPE, env=env) except OSError: # E.g. command not found return None [data, _] = p.communicate() res = re.search(expr, data) if not res: return None return res.group(1)
def osparameters(): "Return OS parameters as expected in CMS, osx/slc and ia32/amd64" osname = os.uname()[0].replace('Darwin', 'osx').replace('Linux', 'slc5') osarch = os.uname()[-1].replace('x86_64', 'amd64') for intel in ['i386', 'i486', 'i586', 'i686']: osarch = osarch.replace(intel, 'ia32') return osname, osarch
def createSiteElement( self ): ''' Creates the top level CDash Site Element. Returns the <Site> element. ''' # grab the current xml structure xml = self.__xml # # create <Site> # # grab the system information os_platform = os.uname()[0] os_version = os.uname()[2] hostname = getfqdn() # grab the current buildtime buildtime = datetime.now().strftime( "%Y%m%d-%H%M" ) siteElement = xml.createElement( 'Site' ) # .. and the attributes for it siteElement.setAttribute( 'BuildName', os_platform + '-' + os_version ) siteElement.setAttribute( 'BuildStamp', buildtime + '-${BUILDTYPE}' ) siteElement.setAttribute( 'Hostname', hostname ) siteElement.setAttribute( 'Name', hostname ) return siteElement
def _get_target_platform(self): is64bit = False if sys.platform.startswith('linux'): sys_release = os.uname()[2] if os.uname()[-1] == 'x86_64': is64bit == True if os.path.exists('/usr/bin/emerge'): return TP_GENTOO, sys_release, is64bit if os.path.exists('/usr/bin/yum'): return TP_RHEL, sys_release, is64bit if os.path.exists('/usr/bin/apt-get'): return TP_DEBIAN, sys_release, is64bit raise Exception('Unsupported linux distribution detected!') elif sys.platform == 'darwin': sys_release = os.uname()[2] if os.uname()[-1] == 'x86_64': is64bit == True return TP_MACOS, sys_release, is64bit elif sys.platform == 'win': is64bit = 'PROGRAMFILES(X86)' in os.environ return TP_WIN, platform.release(), is64bit else: raise Exception('Unknown platform: %s'%sys.platform)
def get_platform(): ret = { "arch": sys.maxsize > 2 ** 32 and "x64" or "x86", "os": "", } if xbmc.getCondVisibility("system.platform.android"): ret["os"] = "android" if "arm" in os.uname()[4] or "aarch" in os.uname()[4]: ret["arch"] = "arm" elif xbmc.getCondVisibility("system.platform.linux"): ret["os"] = "linux" if "arm" in os.uname()[4]: ret["arch"] = "arm" elif xbmc.getCondVisibility("system.platform.xbox"): ret["os"] = "windows" ret["arch"] = "x64" elif xbmc.getCondVisibility("system.platform.windows"): ret["os"] = "windows" if platform.machine().endswith('64'): ret["arch"] = "x64" elif xbmc.getCondVisibility("system.platform.osx"): ret["os"] = "darwin" ret["arch"] = "x64" elif xbmc.getCondVisibility("system.platform.ios"): ret["os"] = "ios" ret["arch"] = "arm" return ret
def is_vamps_prod(self): print(os.uname()[1]) dev_comps = ['bpcweb8', 'bpcweb8.bpcservers.private'] if os.uname()[1] in dev_comps: return True else: return False
def get_hostname(config, method=None): """ Returns a hostname as configured by the user """ if 'hostname' in config: return config['hostname'] if method is None: if 'hostname_method' in config: method = config['hostname_method'] else: method = 'smart' # case insensitive method method = method.lower() if method == 'smart': hostname = get_hostname(config, 'fqdn_short') if hostname != 'localhost': return hostname hostname = get_hostname(config, 'hostname_short') return hostname if method == 'fqdn_short': return socket.gethostname().split('.')[0] if method == 'fqdn': return socket.gethostname().replace('.', '_') if method == 'fqdn_rev': hostname = socket.gethostname().split('.') hostname.reverse() hostname = '.'.join(hostname) return hostname if method == 'uname_short': return os.uname()[1].split('.')[0] if method == 'uname_rev': hostname = os.uname()[1].split('.') hostname.reverse() hostname = '.'.join(hostname) return hostname if method == 'hostname': return socket.gethostname() if method == 'hostname_short': return socket.gethostname().split('.')[0] if method == 'hostname_rev': hostname = socket.gethostname().split('.') hostname.reverse() hostname = '.'.join(hostname) return hostname if method == 'none': return None raise NotImplementedError(config['hostname_method'])
def _install_android_ndk(self): ndk_dir = self.android_ndk_dir if self.buildozer.file_exists(ndk_dir): self.buildozer.info('Android NDK found at {0}'.format(ndk_dir)) return ndk_dir self.buildozer.info('Android NDK is missing, downloading') if platform in ('win32', 'cygwin'): #FIXME find a way of checking 32/64 bits os (not sys.maxint) archive = 'android-ndk-r{0}-windows-{1}.zip' is_64 = False elif platform in ('darwin', ): archive = 'android-ndk-r{0}-darwin-{1}.tar.bz2' is_64 = (os.uname()[4] == 'x86_64') elif platform.startswith('linux'): archive = 'android-ndk-r{0}-linux-{1}.tar.bz2' is_64 = (os.uname()[4] == 'x86_64') else: raise SystemError('Unsupported platform: {0}'.format(platform)) architecture = 'x86_64' if is_64 else 'x86' unpacked = 'android-ndk-r{0}' archive = archive.format(self.android_ndk_version, architecture) unpacked = unpacked.format(self.android_ndk_version) url = 'http://dl.google.com/android/ndk/' self.buildozer.download(url, archive, cwd=self.buildozer.global_platform_dir) self.buildozer.info('Unpacking Android NDK') self.buildozer.file_extract(archive, cwd=self.buildozer.global_platform_dir) self.buildozer.file_rename(unpacked, ndk_dir, cwd=self.buildozer.global_platform_dir) self.buildozer.info('Android NDK installation done.') return ndk_dir
def platform(): ret = { "arch": sys.maxsize > 2**32 and "x64" or "x86", } if xbmc.getCondVisibility("system.platform.android"): ret["os"] = "android" if "arm" in os.uname()[4]: ret["arch"] = "arm" elif xbmc.getCondVisibility("system.platform.linux"): ret["os"] = "linux" if "arm" in os.uname()[4]: ret["arch"] = "arm" elif xbmc.getCondVisibility("system.platform.xbox"): system_platform = "xbox" ret["arch"] = "" elif xbmc.getCondVisibility("system.platform.windows"): ret["os"] = "windows" elif xbmc.getCondVisibility("system.platform.osx"): ret["os"] = "darwin" elif xbmc.getCondVisibility("system.platform.ios"): ret["os"] = "ios" ret["arch"] = "arm" return ret
def downloadConfig(config, configFor): if os.uname()[1] == "raspbmc": lightberryRepoAddress = "http://lightberry.eu/download/General/" configFolder = "/etc/" replaceEffectsInd = False elif os.uname()[1] == "OpenELEC": lightberryRepoAddress = "http://lightberry.eu/download/General/" configFolder = "/storage/.config/" replaceEffectsInd = True else : lightberryRepoAddress = "http://lightberry.eu/download/General/" configFolder = "/etc/" replaceEffectsInd = False fileAddress = lightberryRepoAddress + config + "/" + configOptions[configFor] tempFile = tempFolder + configOptions[configFor] destFile = configFolder + configOptions[configFor] bkpFile = destFile + "_bkp" urllib.urlretrieve(fileAddress, tempFile) if replaceEffectsInd: replaceEffectsFolder(tempFile) mvFromTo(destFile,bkpFile) mvFromTo(tempFile,destFile)
def get_proxy_status(self, msg): """ Get status information about the vPoller Proxy Args: msg (dict): The client message for processing (ignored) Returns: Status information about the vPoller Proxy """ logging.debug('Getting vPoller Proxy status') result = { 'success': 0, 'msg': 'vPoller Proxy Status', 'result': { 'status': 'running', 'hostname': os.uname()[1], 'frontend_endpoint': self.frontend_endpoint, 'backend_endpoint': self.backend_endpoint, 'mgmt_endpoint': self.mgmt_endpoint, 'running_since': self.running_since, 'uname': ' '.join(os.uname()), } } logging.debug('Returning result to client: %s', result) return result
def _findSoname_ldconfig(name): import struct if struct.calcsize('l') == 4: machine = os.uname().machine + '-32' else: machine = os.uname().machine + '-64' mach_map = { 'x86_64-64': 'libc6,x86-64', 'ppc64-64': 'libc6,64bit', 'sparc64-64': 'libc6,64bit', 's390x-64': 'libc6,64bit', 'ia64-64': 'libc6,IA-64', } abi_type = mach_map.get(machine, 'libc6') # XXX assuming GLIBC's ldconfig (with option -p) regex = r'\s+(lib%s\.[^\s]+)\s+\(%s' regex = os.fsencode(regex % (re.escape(name), abi_type)) try: with subprocess.Popen(['/sbin/ldconfig', '-p'], stdin=subprocess.DEVNULL, stderr=subprocess.DEVNULL, stdout=subprocess.PIPE, env={'LC_ALL': 'C', 'LANG': 'C'}) as p: res = re.search(regex, p.stdout.read()) if res: return os.fsdecode(res.group(1)) except OSError: pass
build_ext.finalize_options(self) self.include_dirs.append(self.numpy_include_dir) if self.get_compiler() == 'msvc': return True self.gdaldir = self.get_gdal_config('prefix') self.library_dirs.append(os.path.join(self.gdaldir, 'lib')) self.include_dirs.append(os.path.join(self.gdaldir, 'include')) extra_link_args = [] extra_compile_args = [] if sys.platform == 'darwin' and [int(x) for x in os.uname()[2].split('.') ] >= [11, 0, 0]: # since MacOS X 10.9, clang no longer accepts -mno-fused-madd #extra_compile_args.append('-Qunused-arguments') os.environ[ 'ARCHFLAGS'] = '-Wno-error=unused-command-line-argument-hard-error-in-future' gdal_module = Extension('osgeo._gdal', sources=['extensions/gdal_wrap.cpp'], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args) gdalconst_module = Extension('osgeo._gdalconst', sources=['extensions/gdalconst_wrap.c'], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args)
def sendMessage(self, address, message): cmd = 0 data = '' if message == "wakeup": cmd = 0x04 elif message == "sourceactive": address = 0x0f # use broadcast for active source command cmd = 0x82 physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress() data = str( struct.pack('BB', int(physicaladdress / 256), int(physicaladdress % 256))) elif message == "standby": cmd = 0x36 elif message == "sourceinactive": address = 0x0f # use broadcast for inactive source command physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress() cmd = 0x9d data = str( struct.pack('BB', int(physicaladdress / 256), int(physicaladdress % 256))) elif message == "menuactive": cmd = 0x8e data = str(struct.pack('B', 0x00)) elif message == "menuinactive": cmd = 0x8e data = str(struct.pack('B', 0x01)) elif message == "givesystemaudiostatus": cmd = 0x7d address = 0x05 elif message == "setsystemaudiomode": cmd = 0x70 address = 0x05 physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress() data = str( struct.pack('BB', int(physicaladdress / 256), int(physicaladdress % 256))) elif message == "osdname": cmd = 0x47 data = os.uname()[1] data = data[:14] elif message == "poweractive": cmd = 0x90 data = str(struct.pack('B', 0x00)) elif message == "powerinactive": cmd = 0x90 data = str(struct.pack('B', 0x01)) elif message == "reportaddress": address = 0x0f # use broadcast address cmd = 0x84 physicaladdress = eHdmiCEC.getInstance().getPhysicalAddress() devicetype = eHdmiCEC.getInstance().getDeviceType() data = str( struct.pack('BBB', int(physicaladdress / 256), int(physicaladdress % 256), devicetype)) elif message == "vendorid": cmd = 0x87 data = '\x00\x00\x00' elif message == "keypoweron": cmd = 0x44 data = str(struct.pack('B', 0x6d)) elif message == "keypoweroff": cmd = 0x44 data = str(struct.pack('B', 0x6c)) if cmd: eHdmiCEC.getInstance().sendMessage(address, cmd, data, len(data))
def read_cpuinfo(): def get_entry(a, entry): e = entry.lower() if not a.has_key(e): return "" return a[e] # read cpu list and return number of cpus and list as dictionary def get_cpulist_as_dict(cpulist): count = 0 tmpdict = {} for cpu in cpulist.split("\n\n"): if not len(cpu): continue count = count + 1 if count > 1: break # no need to parse rest for cpu_attr in cpu.split("\n"): if not len(cpu_attr): continue vals = cpu_attr.split(":") if len(vals) != 2: # XXX: make at least some effort to recover this data... continue name, value = vals[0].strip(), vals[1].strip() tmpdict[name.lower()] = value return tmpdict if not os.access("/proc/cpuinfo", os.R_OK): return {} # Okay, the kernel likes to give us the information we need in the # standard "C" locale. if locale: # not really needed if you don't plan on using atof() locale.setlocale(locale.LC_NUMERIC, "C") cpulist = open("/proc/cpuinfo", "r").read() uname = os.uname()[4].lower() count = cpu_count() # This thing should return a hwdict that has the following # members: # # class, desc (required to identify the hardware device) # count, type, model, model_number, model_ver, model_rev # bogomips, platform, speed, cache hwdict = { 'class': "CPU", "desc": "Processor", } if uname[0] == "i" and uname[-2:] == "86" or (uname == "x86_64"): # IA32 compatible enough tmpdict = get_cpulist_as_dict(cpulist) if uname == "x86_64": hwdict['platform'] = 'x86_64' else: hwdict['platform'] = "i386" hwdict['count'] = count hwdict['type'] = get_entry(tmpdict, 'vendor_id') hwdict['model'] = get_entry(tmpdict, 'model name') hwdict['model_number'] = get_entry(tmpdict, 'cpu family') hwdict['model_ver'] = get_entry(tmpdict, 'model') hwdict['model_rev'] = get_entry(tmpdict, 'stepping') hwdict['cache'] = get_entry(tmpdict, 'cache size') hwdict['bogomips'] = get_entry(tmpdict, 'bogomips') hwdict['other'] = get_entry(tmpdict, 'flags') mhz_speed = get_entry(tmpdict, 'cpu mhz') if mhz_speed == "": # damn, some machines don't report this mhz_speed = "-1" try: hwdict['speed'] = int(round(float(mhz_speed)) - 1) except ValueError: hwdict['speed'] = -1 elif uname in ["alpha", "alphaev6"]: # Treat it as an an Alpha tmpdict = get_cpulist_as_dict(cpulist) hwdict['platform'] = "alpha" hwdict['count'] = get_entry(tmpdict, 'cpus detected') hwdict['type'] = get_entry(tmpdict, 'cpu') hwdict['model'] = get_entry(tmpdict, 'cpu model') hwdict['model_number'] = get_entry(tmpdict, 'cpu variation') hwdict['model_version'] = "%s/%s" % (get_entry( tmpdict, 'system type'), get_entry(tmpdict, 'system variation')) hwdict['model_rev'] = get_entry(tmpdict, 'cpu revision') hwdict['cache'] = "" # pitty the kernel doesn't tell us this. hwdict['bogomips'] = get_entry(tmpdict, 'bogomips') hwdict['other'] = get_entry(tmpdict, 'platform string') hz_speed = get_entry(tmpdict, 'cycle frequency [Hz]') # some funky alphas actually report in the form "462375000 est." hz_speed = hz_speed.split() try: hwdict['speed'] = int(round(float(hz_speed[0]))) / 1000000 except ValueError: hwdict['speed'] = -1 elif uname in ["ia64"]: tmpdict = get_cpulist_as_dict(cpulist) hwdict['platform'] = uname hwdict['count'] = count hwdict['type'] = get_entry(tmpdict, 'vendor') hwdict['model'] = get_entry(tmpdict, 'family') hwdict['model_ver'] = get_entry(tmpdict, 'archrev') hwdict['model_rev'] = get_entry(tmpdict, 'revision') hwdict['bogomips'] = get_entry(tmpdict, 'bogomips') mhz_speed = tmpdict['cpu mhz'] try: hwdict['speed'] = int(round(float(mhz_speed)) - 1) except ValueError: hwdict['speed'] = -1 hwdict['other'] = get_entry(tmpdict, 'features') elif uname in ['ppc64']: tmpdict = get_cpulist_as_dict(cpulist) hwdict['platform'] = uname hwdict['count'] = count hwdict['model'] = get_entry(tmpdict, "cpu") hwdict['model_ver'] = get_entry(tmpdict, 'revision') hwdict['bogomips'] = get_entry(tmpdict, 'bogomips') hwdict['vendor'] = get_entry(tmpdict, 'machine') # strings are postpended with "mhz" mhz_speed = get_entry(tmpdict, 'clock')[:-3] try: hwdict['speed'] = int(round(float(mhz_speed)) - 1) except ValueError: hwdict['speed'] = -1 elif uname in ['s390', 's390x']: tmpdict = {} for cpu in cpulist.split("\n"): vals = cpu.split(cpu, ": ") if len(vals) != 2: continue tmpdict[vals[0].strip()] = vals[1].strip() hwdict['platform'] = uname hwdict['type'] = get_entry(tmpdict, 'vendor_id') hwdict['model'] = uname hwdict['count'] = count hwdict['bogomips'] = get_entry(tmpdict, 'bogomips per cpu') hwdict['model_number'] = "" hwdict['model_ver'] = "" hwdict['model_rev'] = "" hwdict['cache'] = "" hwdict['other'] = get_entry(tmpdict, 'features') hwdict['speed'] = 0 else: # XXX: expand me. Be nice to others hwdict['platform'] = uname hwdict['count'] = count hwdict['type'] = uname hwdict['model'] = uname hwdict['model_number'] = "" hwdict['model_ver'] = "" hwdict['model_rev'] = "" hwdict['cache'] = "" hwdict['bogomips'] = "" hwdict['other'] = "" hwdict['speed'] = 0 # make sure we get the right number here if not hwdict["count"]: hwdict["count"] = 1 else: try: hwdict["count"] = int(hwdict["count"]) except: hwdict["count"] = 1 else: if hwdict["count"] == 0: # we have at least one hwdict["count"] = 1 # This whole things hurts a lot. return hwdict
from rpython.jit.metainterp.test import support from rpython.rlib.jit import JitDriver class JitARMMixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() def check_jumps(self, maxcount): pass if not getattr(os, 'uname', None): pytest.skip('cannot run arm tests on non-posix platform') if os.uname()[1] == 'llaima.local': AS = '~/Code/arm-jit/android/android-ndk-r4b//build/prebuilt/darwin-x86/arm-eabi-4.4.0/arm-eabi/bin/as' else: AS = 'as' def run_asm(asm): BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) addr = asm.mc.materialize(asm.cpu.asmmemmgr, [], None) assert addr % 8 == 0 func = rffi.cast(lltype.Ptr(BOOTSTRAP_TP), addr) asm.mc._dump_trace(addr, 'test.asm') return func() def skip_unless_run_slow_tests():
# -*- Mode: Python -*- from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext exts = [ Extension('caesure.proto', ['caesure/proto.pyx']), Extension('caesure._script', ['caesure/_script.pyx']), Extension('caesure.txfaa', ['caesure/txfaa.pyx'], language="c++"), ] import os if os.path.isfile('/usr/local/lib/libsecp256k1.a'): if os.uname()[0] == 'Darwin': # needed for -rpath to work. os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9' exts.append( Extension( "caesure.secp256k1", ["caesure/secp256k1.pyx"], include_dirs=['/usr/local/include'], libraries=['secp256k1', 'gmp', 'z'], extra_link_args=['-Wl,-rpath,/usr/local/lib'], )) ## edit bitcoin.py as well (search for 'ecdsa_cryptopp'). # cryptopp = '/home/rushing/src/crypto++' # exts.append ( # Extension (
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = '[email protected] (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') def AssertEq(expected, actual): if expected != actual: print 'Expected: %s' % (expected,) print ' Actual: %s' % (actual,) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None:
""" This test need a set of pins which can be set as inputs and have no external pull up or pull down connected. GP12 and GP17 must be connected together """ from machine import Pin import os mch = os.uname().machine if 'LaunchPad' in mch: pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5'] max_af_idx = 15 elif 'WiPy' in mch: pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5'] max_af_idx = 15 else: raise Exception('Board not supported!') # test initial value p = Pin('GP12', Pin.IN) Pin('GP17', Pin.OUT, value=1) print(p() == 1) Pin('GP17', Pin.OUT, value=0) print(p() == 0) def test_noinit(): for p in pin_map: pin = Pin(p) pin.value() def test_pin_read(pull):
def sys_os(): os_name, _, _, _, _ = os.uname() return os_name.lower()
required=False, type=str, default=None, help="Specify the path of the temporary directory use by Ray process.") args = parser.parse_args() ray.utils.setup_logger(args.logging_level, args.logging_format) metrics_export_address = os.environ.get("METRICS_EXPORT_ADDRESS") try: dashboard = Dashboard(args.host, args.port, args.redis_address, args.temp_dir, redis_password=args.redis_password, metrics_export_address=metrics_export_address) dashboard.run() except Exception as e: # Something went wrong, so push an error to all drivers. redis_client = ray.services.create_redis_client( args.redis_address, password=args.redis_password) traceback_str = ray.utils.format_error_message(traceback.format_exc()) message = ("The dashboard on node {} failed with the following " "error:\n{}".format(os.uname()[1], traceback_str)) ray.utils.push_error_to_driver_through_redis( redis_client, ray_constants.DASHBOARD_DIED_ERROR, message) if isinstance(e, OSError) and e.errno == errno.ENOENT: logger.warning(message) else: raise e
def is_non_x86_64(): return os.uname()[4] != 'x86_64'
from pxStats.lib.RrdUtilities import RrdUtilities from pxStats.lib.MemoryManagement import MemoryManagement from pxStats.lib.LanguageTools import LanguageTools """ - Small function that adds pxLib to sys path. """ STATSPATHS = StatsPaths() STATSPATHS.setBasicPaths() sys.path.append(STATSPATHS.PXLIB) """ - These imports require PXLIB """ from Logger import * from PXManager import * LOCAL_MACHINE = os.uname()[1] CURRENT_MODULE_ABS_PATH = os.path.abspath(__file__).replace(".pyc", ".py") ################################################################# # # #################PARSER AND OPTIONS SECTION###################### # # ################################################################# class _Infos: def __init__(self, endTime, clients, fileTypes, machines, products="all",
def sys_arch(): _, _, _, _, arch = os.uname() return arch.lower()
def run_test(test): current_kernel = LooseVersion(uname()[2]) if test.kernel and LooseVersion(test.kernel) > current_kernel: print(warn("[ SKIP ] ") + "%s.%s" % (test.suite, test.name)) return Utils.SKIP_KERNEL_VERSION full_test_name = test.suite + "." + test.name if full_test_name in os.getenv("RUNTIME_TEST_DISABLE", "").split(","): print(warn("[ SKIP ] ") + "%s.%s" % (test.suite, test.name)) return Utils.SKIP_ENVIRONMENT_DISABLED signal.signal(signal.SIGALRM, Utils.__handler) try: before = None after = None print(ok("[ RUN ] ") + "%s.%s" % (test.suite, test.name)) if test.requirement: with open(devnull, 'w') as dn: if subprocess.call( test.requirement, shell=True, stdout=dn, stderr=dn, env={'PATH': "{}:{}".format(BPF_PATH, ENV_PATH)}, ) != 0: print( warn("[ SKIP ] ") + "%s.%s" % (test.suite, test.name)) return Utils.SKIP_REQUIREMENT_UNSATISFIED if test.before: before = subprocess.Popen(test.before, shell=True, preexec_fn=os.setsid) waited = 0 with open(devnull, 'w') as dn: # This might not work for complicated cases, such as if # a test program needs to accept arguments. It covers the # current simple calls with no arguments child_name = os.path.basename(test.before.split()[-1]) while subprocess.call( ["pidof", child_name], stdout=dn, stderr=dn) != 0: time.sleep(0.1) waited += 0.1 if waited > test.timeout: raise TimeoutError( 'Timed out waiting for BEFORE %s ', test.before) bpf_call = Utils.prepare_bpf_call(test) env = {'test': test.name} env.update(test.env) p = subprocess.Popen(bpf_call, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env, preexec_fn=os.setsid, universal_newlines=True, bufsize=1) signal.alarm(ATTACH_TIMEOUT) output = "" while p.poll() is None: nextline = p.stdout.readline() output += nextline if nextline == "Running...\n": signal.alarm(test.timeout or DEFAULT_TIMEOUT) if not after and test.after: after = subprocess.Popen(test.after, shell=True, preexec_fn=os.setsid) break output += p.communicate()[0] signal.alarm(0) result = re.search(test.expect, output, re.M) except (TimeoutError): # Give it a last chance, the test might have worked but the # bpftrace process might still be alive if p.poll() is None: os.killpg(os.getpgid(p.pid), signal.SIGKILL) output += p.communicate()[0] result = re.search(test.expect, output) if not result: print( fail("[ TIMEOUT ] ") + "%s.%s" % (test.suite, test.name)) print('\tCommand: %s' % bpf_call) print('\tTimeout: %s' % test.timeout) print('\tCurrent output: %s' % output) return Utils.TIMEOUT finally: if before and before.poll() is None: os.killpg(os.getpgid(before.pid), signal.SIGKILL) if after and after.poll() is None: os.killpg(os.getpgid(after.pid), signal.SIGKILL) if result: print(ok("[ OK ] ") + "%s.%s" % (test.suite, test.name)) return Utils.PASS else: print(fail("[ FAILED ] ") + "%s.%s" % (test.suite, test.name)) print('\tCommand: ' + bpf_call) print('\tExpected: ' + test.expect) print('\tFound: ' + output) return Utils.FAIL
def this_node(): 'returns name of this node (hostname)' return os.uname()[1]
def get(self, item): if item == "hostname": hostname = os.uname()[1] return os.uname()[1] pass elif item == "CPUload": return "CPU: " + str(psutil.cpu_percent(interval=0)) + "%" pass elif item == "CPUpercent": cputimes = psutil.cpu_times_percent(interval=0) return "us: " + str(int(cputimes.user)) + " ni: " + str( int(cputimes.nice)) + " sy: " + str(int(cputimes.system)) pass elif item == "mem": cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'" return str(subprocess.check_output(cmd, shell=True)) pass elif item == "disk": cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'" return str(subprocess.check_output(cmd, shell=True)) pass elif item == "dline": return "====================" pass elif item == "line": return "--------------------" pass elif item == "blank": return " " pass elif item == "nicv4": nics = netifaces.interfaces() numNics = len(nics) if self.nicIndex >= numNics: self.nicIndex = 0 while nics[self.nicIndex] == "lo" or nics[ self.nicIndex] == "bond0": self.nicIndex += 1 if self.nicIndex >= numNics: self.nicIndex = 0 nicName = nics[self.nicIndex] + " " address = netifaces.ifaddresses( nics[self.nicIndex])[AF_INET][0]['addr'] self.nicIndex += 1 return nicName[:5] + ":" + str(address) pass elif item == "nicv6": nics = netifaces.interfaces() numNics = len(nics) if self.nicIndex >= numNics: self.nicIndex = 0 while nics[self.nicIndex] == "lo" or nics[ self.nicIndex] == "bond0": self.nicIndex += 1 if self.nicIndex >= numNics: self.nicIndex = 0 nicName = nics[self.nicIndex] + " " address = netifaces.ifaddresses( nics[self.nicIndex])[AF_INET6][0]['addr'] self.nicIndex += 1 return nicName[:5] + ":" + str(address) pass else: return "no such item"
async def getBukkensFromYamlInPage(yaml, pageUrl): # 物件情報のDBIOインスタンスを作成 dbio = dbiomaker() # webサイトから取得した物件リストを格納 bukkens = [] #開発環境と本番環境でPhantomJSの呼び出し方が異なるため、ホスト名で振り分け if os.uname()[1] == "kira-no-MacBook-Air.local": driver = webdriver.PhantomJS( executable_path='/Applications/phantomjs-1.9.2-macosx/bin/phantomjs' ) else: driver = webdriver.PhantomJS() # 新規タブをあけるキー操作を設定 newtab = Keys.CONTROL + 't' # Mac かどうかの判定、キーがMac だと違う if sys.platform == 'darwin': newtab = Keys.COMMAND + 't' #webサイトからデータ取得 print("start driver") #open tab #driver.find_element_by_tag_name('body').send_keys(newtab) driver.get(pageUrl) print("end driver") #HTMLは未使用にみえるが、文字列指定の形でevalで使用している HTML = lxml.html.fromstring(driver.page_source) #登録用物件辞書 bukkenDic = {} bukkenSetter = BukkenSetter() #mainルーチン # g is GROUP # u is UNIT # pcs is UNIT item #共通情報設定 yamlid = "website" bukkenDic.update({yamlid: yaml[yamlid]}) yamlid = "websiteURL" bukkenDic.update({yamlid: yaml[yamlid]}) #print("G1 --YAML[GROUPS] => YAML[GROUP]--:YAMLファイルからGROUPの検索条件を取得") for g in yaml: if g == "GROUP": gp = yaml[g]["PROTOCOL"] gc = yaml[g]["COMMAND"] gs = yaml[g]["SELECTOR"] #print("G2 --YAML[GROUP] => HTML[GROUPS]--:GROUP検索条件よりHTMLのGROUP群を抽出") groups = eval("HTML" + "." + gp + '("' + gc + '")' + gs) #print("G3 --HTML[GROUPS] => HTML[GROUP]--:HTMLのGROUP群を1つづつループ処理") for group in groups: #print("U1 --YAML[GROUP] => YAML[UNIT]--:YAMLファイルからUNITの検索条件を取得") for u in yaml[g]: if u == "UNIT": up = yaml[g][u]["PROTOCOL"] uc = yaml[g][u]["COMMAND"] us = yaml[g][u]["SELECTOR"] #print("U2 --YAML[UNIT] => HTML[UNITS]--:UNIT検索条件よりHTMLのUNIT群を抽出") #<div class="article-box clearfix"> units = eval("group" + "." + up + '("' + uc + '")' + us) #print("U3 --HTML[UNITS] => HTML[UNIT]--:HTMLのUNIT群を1つづつループ処理") for unit in units: #print("UI1--YAML[UNIT] => YAML[UNITITEMS]--:YAMLファイルからUNITITEM群の検索条件を取得") for uis in yaml[g][u]: if uis == "UNITITEMS": #print("UI2--YAML[UNITITEMS] => YAML[UNITITEM]--:YAMLファイルからUNITITEMの検索条件を取得") for ui in yaml[g][u][uis]: if ui != "IGNORE": p = yaml[g][u][uis][ui]["PROTOCOL"] c = yaml[g][u][uis][ui]["COMMAND"] s = yaml[g][u][uis][ui]["SELECTOR"] h = yaml[g][u][uis][ui]["HEADER"] #print("UI3 --YAML[UNITITEM] => HTML[UNITITEM]--:UNITITEM検索条件よりHTMLのUNITITEM情報を抽出") #print(ui+":"+htmlItemSelector(unit,p,c,s)) #登録用物件辞書に追加 bukkenDic.update({ ui: htmlItemSelector( unit, p, c, s, h) }) #物件情報設定 bukkeninfo = bukkenSetter.getBukkenInfoByDic( bukkenDic) bukkens.append(bukkeninfo) #️DBへ格納 dbio.insert(bukkens)
""" from OSC3 import OSCServer, OSCClient, OSCMessage import time import numpy as np import rtmidi from rtmidi.midiutil import open_midiinput from threading import Thread from rtmidi.midiconstants import (CHANNEL_PRESSURE, CONTROLLER_CHANGE, NOTE_ON, NOTE_OFF, PITCH_BEND, POLY_PRESSURE, PROGRAM_CHANGE) import os, json import midi3 if os.uname()[1]=='raspberrypi': pass port = 8090 ip = "127.0.0.1" mididest = 'Session 1' djdest = 'Port' midichannel = 1 computerIP = ['127.0.0.1','192.168.2.95','192.168.2.52','127.0.0.1', '127.0.0.1','127.0.0.1','127.0.0.1','127.0.0.1'] computer = 0 # store current value for computer 1 cc1 =[0]*140
os.putenv('ENDDATE',enddate.strftime('%Y%m%d%H')) # Clear out the log files os.system('rm -f *.log') os.system('rm -f *.err') # Loop through each member for mem in range(1,int(Ne)+1): #for mem in [46]: #for mem in [6,16,27,28,39]: print "Member %d..." % (mem), # Check for restart if restart_flag: restart_mem(mem) # Copy the run_member script as run_member_%d.py in each directory os.system('cp run_member.py %s/m%d/m%d_run_member.py' % (dir_members,mem,mem)) # Submit this member to the queue # Check for which system we are on if os.uname()[0] == 'AIX' and os.uname()[1].startswith('be'): # We're on bluefire bluefire_submit(mem) elif os.uname()[1].startswith('ys'): # We're on Yellowstone. Use the Bluefire submission. bluefire_submit(mem) else: os.system('qsub -pe ompi %d -q %s -V -o %s/m%d -e %s/m%d -wd %s/m%d %s/m%d/m%d_run_member.py' \ % (mpi_numprocs_member,queue_members,dir_members,mem,dir_members,mem,dir_members,mem,\ dir_members,mem,mem)) print "Done."
# # CORE # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. # # author: Jeff Ahrenholz <*****@*****.**> # ''' quagga.py: defines routing services provided by Quagga. ''' import os if os.uname()[0] == "Linux": from core.netns import nodes elif os.uname()[0] == "FreeBSD": from core.bsd import nodes from core.service import CoreService, addservice from core.misc.ipaddr import IPv4Prefix, isIPv4Address, isIPv6Address from core.api import coreapi from core.constants import * QUAGGA_USER="******" QUAGGA_GROUP="root" if os.uname()[0] == "FreeBSD": QUAGGA_GROUP="wheel" class Zebra(CoreService): ''' ''' _name = "zebra"
def openstack_cleanup(file_path, os_name): """ This function is only run if openstack is one of the builders. If the image is to be stored then the image will be shrunk and the original image deleted, if there were any other images in openstack of the same type they will be removed. """ nova, glance = authenticate() large_image = nova.images.find(name=environ.get('IMAGE_NAME')) downloaded_file = file_path + ''.join(random.choice(string.lowercase) for i in range(20)) + ".raw" local_qcow = file_path + ''.join(random.choice(string.lowercase) for i in range(20)) + ".qcow" try: subprocess.check_call(['glance', 'image-download', '--progress', '--file', downloaded_file, large_image.id]) except subprocess.CalledProcessError as e: print(e.output) sys.stdout.flush() try: debug(" ".join(['openstack', 'image', 'delete', large_image])) subprocess.check_call(['openstack', 'image', 'delete', large_image]) except subprocess.CalledProcessError as f: print(f.output) print("Failed to remove the uncompressed image from openstack, you will need to clean this up manually.") sys.exit(3) if os.stat(downloaded_file).st_size == 0: print(f.output) print("Downloaded file ({hostname}:{path}) empty".format(path=downloaded_file,hostname=os.uname()[1])) sys.exit(4) debug("Download file ({hostname}:{path}) size={size}".format(path=downloaded_file,hostname=os.uname()[1],size=os.stat(downloaded_file).st_size)) try: subprocess.check_call(['qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', downloaded_file, local_qcow]) except subprocess.CalledProcessError as e: print(e.output) sys.exit(5) os.remove(downloaded_file) debug("Converted file ({hostname}:{path}) size={size}".format(path=local_qcow,hostname=os.uname()[1],size=os.stat(local_qcow).st_size)) try: debug(" ".join(['glance', 'image-create', '--file', local_qcow, '--disk-format', 'qcow2', '--container-format', 'bare', '--progress', '--name', os_name])) subprocess.check_call(['glance', 'image-create', '--file', local_qcow, '--disk-format', 'qcow2', '--container-format', 'bare', '--progress', '--name', os_name]) print(os_name) sys.stdout.flush() final_image = nova.images.find(name=os_name) environ['OS_IMAGE_ID'] = final_image.id print("Image created and compressed with id: " + final_image.id) sys.stdout.flush() except subprocess.CalledProcessError as e: print(e.output) sys.stdout.flush() sys.exit(6) os.remove(local_qcow) try: subprocess.check_call(['openstack', 'image', 'delete', large_image.id]) except subprocess.CalledProcessError as e: print(e.output) print('The large image could not be destroyed, please run this manually') sys.exit(7)
def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': # sniff sys.version for architecture. prefix = " bit (" i = sys.version.find(prefix) if i == -1: return sys.platform j = sys.version.find(")", i) look = sys.version[i + len(prefix):j].lower() if look == 'amd64': return 'win-amd64' if look == 'itanium': return 'win-ia64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix osname, host, release, version, machine = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = osname.lower().replace('/', '') machine = machine.replace(' ', '_') machine = machine.replace('/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile(r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. cfgvars = get_config_vars() macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if True: # Always calculate the release of the running machine, # needed to determine if we can build fat binaries or not. macrelease = macver # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: try: m = re.search( r'<key>ProductUserVisibleVersion</key>\s*' r'<string>(.*?)</string>', f.read()) finally: f.close() if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour if not macver: macver = macrelease if macver: release = macver osname = "macosx" if ((macrelease + '.') >= '10.4.' and '-arch' in get_config_vars().get('CFLAGS', '').strip()): # The universal build will build fat binaries, but not on # systems before 10.4 # # Try to detect 4-way universal builds, those have machine-type # 'universal' instead of 'fat'. machine = 'fat' cflags = get_config_vars().get('CFLAGS') archs = re.findall(r'-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: machine = archs[0] elif archs == ('i386', 'ppc'): machine = 'fat' elif archs == ('i386', 'x86_64'): machine = 'intel' elif archs == ('i386', 'ppc', 'x86_64'): machine = 'fat3' elif archs == ('ppc64', 'x86_64'): machine = 'fat64' elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): machine = 'universal' else: raise ValueError("Don't know machine value for archs=%r" % (archs, )) elif machine == 'i386': # On OSX the machine type returned by uname is always the # 32-bit variant, even if the executable architecture is # the 64-bit variant if sys.maxsize >= 2**32: machine = 'x86_64' elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. # See 'i386' case if sys.maxsize >= 2**32: machine = 'ppc64' else: machine = 'ppc' return "%s-%s-%s" % (osname, release, machine)
from sys import byteorder from ctypes import sizeof, c_void_p CPU_BIGENDIAN = (byteorder == 'big') CPU_64BITS = (sizeof(c_void_p) == 8) if CPU_64BITS: CPU_WORD_SIZE = 8 # bytes CPU_MAX_UINT = 0xffffffffffffffff else: CPU_WORD_SIZE = 4 # bytes CPU_MAX_UINT = 0xffffffff if HAS_UNAME: # guess machine type using uname() _machine = uname()[4] CPU_PPC32 = (_machine == 'ppc') CPU_PPC64 = (_machine == 'ppc64') CPU_I386 = (_machine in ("i386", "i686")) # compatible Intel 32 bits CPU_X86_64 = (_machine == "x86_64") # compatible Intel 64 bits del _machine else: # uname() fallback for Windows # I hope that your Windows doesn't run on PPC32/PPC64 CPU_PPC32 = False CPU_PPC64 = False CPU_I386 = False CPU_X86_64 = False bits, linkage = architecture() if bits == '32bit': CPU_I386 = True
# coding=utf-8 import os, fnmatch from win32com import client from win32com.client import Dispatch ''' Pywin32实现格式转换 插件:win32com 下载:pip install pywin32 引入:from win32com import client as wc ''' ''' 操作文件和目录 os.name 判断操作系统类型如果是posix,说明系统是Linux、Unix或Mac OS X,如果是nt,就是Windows系统 os.uname() 获取详细的系统消息(Windows上不提供) os.path.abspath(r"相对路径") 相对路径转绝对路径 os.path.abspath('.') 查看当前目录的绝对路径 os.path.split("绝对路径") 拆分文件路径、文件名 os.path.join(A, B) 合并A、B两个路径 os.path.exists("绝对路径") 检查该绝对路径是否存在 os.makedirs("绝对路径")、os.mkdir("绝对路径") 创建目录 os.rmdir("绝对路径") 删掉一个目录 os.path.isdir("绝对路径") 判断是否是文件夹/目录 os.path.isfile("绝对路径") 判断是否是文件 fnmatch.fnmatch("原文","要匹配的内容") 匹配文件 client.Dispatch('Word.Application').Documents.Open("文件名").SaveAs("新路径",4) 打开提取文件内容并保存 client.Dispatch('Word.Application').Documents.Open("文件名").Close() 关闭文件 ''' def File2Txt(filePath, savepath = ''): try:
'haproxy', 'horizon', 'keepalived', 'keystone', 'kolla-toolbox', 'mariadb', 'memcached', 'neutron', 'nova-', 'openvswitch', 'rabbitmq', ], help='Gate images') ] hostarch = os.uname()[4] _CLI_OPTS = [ cfg.StrOpt('base', short='b', default='centos', choices=BASE_OS_DISTRO, help='The distro type of the base image.'), cfg.StrOpt('base-tag', default='latest', help='The base distro image tag'), cfg.StrOpt('base-image', help='The base image name. Default is the same with base.'), cfg.StrOpt('base-arch', default=hostarch, choices=BASE_ARCH, help='The base architecture. Default is same as host'), cfg.BoolOpt('debug', short='d', default=False, help='Turn on debugging log level'), cfg.BoolOpt('skip-parents', default=False,
################# SKIMS ######################## ################################################ dataReco = 'Run2018_102X_nAODv7_Full2018v7' dataSteps = 'DATAl1loose2018v7__DATACombJJLNu2018' fakeSteps = 'DATAl1loose2018v7__DATACombJJLNu2018' mcProduction = 'Autumn18_102X_nAODv7_Full2018v7' mcSteps = 'MCl1loose2018v7__MCCorr2018v7__MCCombJJLNu2018' ############################################## ###### Tree base directory for the site ###### ############################################## SITE=os.uname()[1] if 'iihe' in SITE: treeBaseDir = '/pnfs/iihe/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/' elif 'cern' in SITE: treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano' # treeBaseDir = '/eos/user/s/ssiebert/HWWNano' def makeMCDirectory(var=None): if var is not None: return os.path.join(treeBaseDir, mcProduction, mcSteps+'_'+var) else: return os.path.join(treeBaseDir, mcProduction, mcSteps) mcDirectory = makeMCDirectory() #mcDirectory = os.path.join(treeBaseDir, mcProduction, mcSteps) VBSDirectory = os.path.join('/eos/cms/store/group/phys_smp/VJets_NLO_VBSanalyses', mcProduction, mcSteps)
def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. On Unix, this means every variable defined in Python's installed Makefile; On Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _CONFIG_VARS if _CONFIG_VARS is None: _CONFIG_VARS = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # distutils2 module. _CONFIG_VARS['prefix'] = _PREFIX _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX _CONFIG_VARS['py_version'] = _PY_VERSION _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] _CONFIG_VARS['base'] = _PREFIX _CONFIG_VARS['platbase'] = _EXEC_PREFIX _CONFIG_VARS['projectbase'] = _PROJECT_BASE try: _CONFIG_VARS['abiflags'] = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. _CONFIG_VARS['abiflags'] = '' if os.name in ('nt', 'os2'): _init_non_posix(_CONFIG_VARS) if os.name == 'posix': _init_posix(_CONFIG_VARS) # Setting 'userbase' is done below the call to the # init function to enable using 'get_config_var' in # the init-function. if sys.version >= '2.6': _CONFIG_VARS['userbase'] = _getuserbase() if 'srcdir' not in _CONFIG_VARS: _CONFIG_VARS['srcdir'] = _PROJECT_BASE else: _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if _PYTHON_BUILD and os.name == "posix": base = _PROJECT_BASE try: cwd = os.getcwd() except OSError: cwd = None if (not os.path.isabs(_CONFIG_VARS['srcdir']) and base != cwd): # srcdir is relative and we are not in the same directory # as the executable. Assume executable is in the build # directory and make srcdir absolute. srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) if sys.platform == 'darwin': kernel_version = os.uname()[2] # Kernel version (8.4.3) major_version = int(kernel_version.split('.')[0]) if major_version < 8: # On Mac OS X before 10.4, check if -arch and -isysroot # are in CFLAGS or LDFLAGS and remove them if they are. # This is needed when building extensions on a 10.3 system # using a universal build of python. for key in ( 'LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _CONFIG_VARS[key] = flags else: # Allow the user to override the architecture flags using # an environment variable. # NOTE: This name was introduced by Apple in OSX 10.5 and # is used by several scripting languages distributed with # that OS release. if 'ARCHFLAGS' in os.environ: arch = os.environ['ARCHFLAGS'] for key in ( 'LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _CONFIG_VARS[key] = flags # If we're on OSX 10.5 or later and the user tries to # compiles an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. # # The major usecase for this is user_service using a Python.org # binary installer on OSX 10.6: that installer uses # the 10.4u SDK, but that SDK is not installed by default # when you install Xcode. # CFLAGS = _CONFIG_VARS.get('CFLAGS', '') m = re.search(r'-isysroot\s+(\S+)', CFLAGS) if m is not None: sdk = m.group(1) if not os.path.exists(sdk): for key in ( 'LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) _CONFIG_VARS[key] = flags if args: vals = [] for name in args: vals.append(_CONFIG_VARS.get(name)) return vals else: return _CONFIG_VARS
''' Python mapping for the CoreLocation framework. This module does not contain docstrings for the wrapped code, check Apple's documentation for details on how to use these functions and classes. ''' import os import sys import Foundation import objc from CoreLocation import _metadata from CoreLocation._CoreLocation import * if int(os.uname()[2].split('.')[0]) < 12: # OSX <= 10.7 kCLErrorGeocodeFoundNoResult = 7 kCLErrorGeocodeCanceled = 8 else: # OSX 10.8 or later kCLErrorGeocodeFoundNoResult = 8 kCLErrorGeocodeCanceled = 10 sys.modules['CoreLocation'] = mod = objc.ObjCLazyModule( "CoreLocation", "com.apple.corelocation", objc.pathForFramework("/System/Library/Frameworks/CoreLocation.framework"), _metadata.__dict__, None, { '__doc__': __doc__, '__path__': __path__, '__loader__': globals().get('__loader__', None),
def __init__(self, basedir, configFileName=None, umask=None, reactor=None, config_loader=None): service.AsyncMultiService.__init__(self) if reactor is None: from twisted.internet import reactor self.reactor = reactor self.setName("buildmaster") self.umask = umask self.basedir = basedir if basedir is not None: # None is used in tests assert os.path.isdir(self.basedir) if config_loader is not None and configFileName is not None: raise config.ConfigErrors([ "Can't specify both `config_loader` and `configFilename`.", ]) elif config_loader is None: if configFileName is None: configFileName = 'master.cfg' config_loader = config.FileLoader(self.basedir, configFileName) self.config_loader = config_loader self.configFileName = configFileName # flag so we don't try to do fancy things before the master is ready self._master_initialized = False self.initLock = defer.DeferredLock() # set up child services self.create_child_services() # db configured values self.configured_db_url = None # configuration / reconfiguration handling self.config = config.MasterConfig() self.reconfig_active = False self.reconfig_requested = False self.reconfig_notifier = None # this stores parameters used in the tac file, and is accessed by the # WebStatus to duplicate those values. self.log_rotation = LogRotation() # local cache for this master's object ID self._object_id = None # Check environment is sensible check_functional_environment(self.config) # figure out local hostname try: self.hostname = os.uname()[1] # only on unix except AttributeError: self.hostname = socket.getfqdn() # public attributes self.name = ("%s:%s" % (self.hostname, os.path.abspath(self.basedir or '.'))) if isinstance(self.name, bytes): self.name = self.name.decode('ascii', 'replace') self.masterid = None
from machine import Pin, SPI from micropython import const import os import glcdfont import tt14 import tt24 import tt32 import time SCR_WIDTH = const(320) SCR_HEIGHT = const(240) SCR_ROT = const(2) CENTER_Y = int(SCR_WIDTH / 2) CENTER_X = int(SCR_HEIGHT / 2) print(os.uname()) ''' TFT_CLK_PIN = const(10) TFT_MOSI_PIN = const(11) TFT_MISO_PIN = const(12) TFT_CS_PIN = const(13) TFT_RST_PIN = const(14) TFT_DC_PIN = const(15) ''' # TFT_CLK_PIN = Pin(10, Pin.OUT, Pin.PULL_UP) TFT_CLK_PIN = Pin(10) TFT_MOSI_PIN = Pin(11) TFT_MISO_PIN = Pin(12) TFT_CS_PIN = Pin(13, Pin.OUT, Pin.PULL_UP) TFT_RST_PIN = Pin(14) TFT_DC_PIN = Pin(15, Pin.OUT, Pin.PULL_UP)
def killHandler(sig, frame): hostname = os.uname()[1] logging.getLogger().warning("%s: got SIGTERM on %s :-O" % (sys.argv[0], hostname)) logging.shutdown() os._exit(-1)