def CheckRequiredExecutables(PATH=os.getcwd(), datatype='nuc',mpi=False): ''' will check if you have the mdoeltest and garli installed and in your PATH ''' print 'Checking required executables and paths . . .' if datatype == 'nuc': jm = os.getenv('JMODELTEST_HOME') if not jm: print 'jModelTest not in your PATH. Please set the PATH to $JMODELTEST_HOME with export' print 'or set it in your bash file (.bashrc in Unix) e.g. export JMODELTEST_HOME=<PATH_TO_JMODELTEST>' sys.exit(-1) print jm else: pt = os.getenv('PROTTEST_HOME') if not pt: print 'ProtTest not in your PATH. Please set the PATH to $PROTTEST_HOME with export' print 'or set it in your bash file (.bashrc in Unix)e.g. export PROTTEST_HOME=<PATH_TO_PROTTEST>' sys.exit(-1) print pt p,f = Popen('which Garli-2.0',shell=True, stdout=PIPE).communicate() if not p.strip(): print'No executable of GARLI in your PATH (or older version of Garli). Please copy the executable to the bin folder or assign the PATH with export' print 'or set it in your bash file (.bashrc in Unix)' sys.exit(-1) print p if mpi: garlimpi=os.getenv('GARLIMPI_HOME') q,g = Popen('which mpirun',shell=True, stdout=PIPE).communicate() if not q.strip() or not garlimpi: print 'Trying to use MPI with MPI applications. Install mpirun and related resources. Set a GARLIMPI_HOME to the executable with export' print 'e.g. export GARLIMPIHOME=[PATH_TO_GARLI], or set it in your bash file (.bashrc in Unix)' print 'MAKE SURE YOUR GARLI IS COMPILED FOR MPI' sys.exit(-1)
def get_umi_reads_and_sites(sample): S_number = sample.split("_")[1] command = "awk '{sum+=$4} END{print sum}' *%s.bg" % (S_number) umi_reads = Popen(command, stdout=PIPE, shell=True).stdout.read() command = "wc -l *%s.bg" % (S_number) sites = Popen(command, stdout=PIPE, shell=True).stdout.read() return umi_reads.strip(), sites.strip().split(" ")[0]
def stop(): pid = Popen(['pidof','/opt/vsftpd-2.3.4/vsftpd'],stdout=PIPE).stdout.read() if pid: Popen(['kill',pid.strip()]) return "killed pid: %s" % pid.strip() else: return "not running"
def add_ipv6_vxlan_flows(self,**args): """ Compute node only """ # 参数提取:虚拟机名称 if "instance_name" not in args: return {"code":0,"message":"Domain name not specified !"} # 参数提取:虚拟机 IPv6 地址的 vlan 信息 if "vlan_id" not in args: return {"code":0,"message":"Ip vlan id not specified !"} # 参数提取:虚拟机的 IPv6 地址 if "ip" not in args: return {"code":0,"message":"Domain Ip not specified !"} # 参数提取:vxlan 端口号 if "vxlan_port" not in args: return {"code":0,"message":"Domain vxlan_port not specified !"} # 参数提取:虚拟机的 mac 地址。这个暂时忽略上层传过来的值,直接从本地系统中去获取 domifinfo = "virsh domiflist %s" % args["instance_name"] devices = Popen(domifinfo,shell=True,stdout=PIPE).stdout.read() if "bridge" not in devices: return {"code":0,"message":"Domain or ovs bridge may not exist!"} keyword = "br0" #if args["bridge"] == "br-wan" else "br1" line = [qline for qline in devices.split("\n") if keyword in qline ][0] mac = line.split()[4].strip() # 本地准备额外的参数:获取用于网络节点连接的端口号 ovs_port_prefix = "wan" #if args["bridge"] == "br-wan" else "lan" device = "%s%s" % (ovs_port_prefix,args["instance_name"]) desc = "ovs-vsctl get Interface %s ofport"%device js = Popen(desc,shell=True,stdout=PIPE).stdout.read() ofport = js.strip() # 检查是否存在上次生成的流表文件 tmpfile = "/tmp/%s_add_ipv6_%s_vxlan_flows" % (args["instance_name"],"wan") if os.path.exists(tmpfile): os.remove(tmpfile) # 读取指定的流表模板 template_path = "%s%s" % (self.network_dir,"apply_ipv6_wan_flows_template") fd = open(template_path, "r") flows = fd.read() fd.close() # 将信息带入流表模板,生成流表文件 with open(tmpfile,"a") as fd: qflows = flows.replace('QPORT','%s'%ofport)\ .replace('QMAC','%s'%mac)\ .replace('SRC_IP','%s'%args["ip"])\ .replace('DEFAULT_VXLAN_PORT','%s'%args["vxlan_port"])\ .replace('VLAN_ID', '%s'%args["vlan_id"]) fd.write(qflows) # 通过执行命令的方式应用生成的流表文件 apply_flows = "ovs-ofctl add-flows %s %s" % ("br-wan",tmpfile) # IPv6 只适用于外网 errinfo = Popen(apply_flows,shell=True,stderr=PIPE).stderr.read() # 结果返回 if errinfo is not None and len(errinfo.strip()) > 0: return {"code":0,"message":"Fail to add flows with file '%s' : %s"%(tmpfile,errinfo)} else: return {"code":1,"message":"success"}
def find_jstack(): default = Popen("which jstack", stdout=PIPE, stdin=DEVNULL, shell=True).communicate()[0] if default.strip(): return default.strip() if os.environ.get('JAVA_HOME'): return os.path.join(os.environ['JAVA_HOME'], 'bin', 'jstack') raise RuntimeError("Could not find jstack - do you have it installed in $JAVA_HOME?")
def get_host_port(host): host_ofport_name = "vxlan%s"%host find_ofport = "ovs-vsctl get Interface %s ofport" % host_ofport_name find_ofport_result = Popen(find_ofport,shell=True,stdout=PIPE).stdout.read() if "no row" in find_ofport_result.strip(): return None else: return find_ofport_result.strip()
def _supported_lxc(): output = Popen("which lxc-start 2>/dev/null", stdout=PIPE, shell=True).stdout.read() if output.strip() == "": return False output = Popen("(lxc-version 2>/dev/null || lxc-start --version) | sed 's/.* //'", stdout=PIPE, shell=True).stdout.read() supported, _ = _supported_tuple(output.strip(), (0, 7, 5)) return supported
def upgrade(request): step = 'overview' show_output = False errormsg = '' if request.POST.get('start') == '1': step = 'check_usb' if request.POST.get('check_usb') == '1': result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader check_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] result = result.strip() if result == 'yes': step = 'format_usb' elif result == 'sizefail': step = 'check_usb' errormsg = 'sizefail' elif result == 'nodrive': step = 'check_usb' errormsg = 'nodrive' if request.POST.get('format_usb') == '1': Popen(["sudo /bin/busybox sh /usr/sbin/upgrader format_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] step = 'backup_to_usb' if request.POST.get('backup_to_usb') == '1': Popen(["sudo /bin/busybox sh /usr/sbin/upgrader backup_to_usb 2>&1 > /tmp/backup_output"], shell=True, stdout=PIPE, close_fds=True) show_output = True step = 'backup_to_usb' if request.POST.get('proceed_to_step_4') == '1': step = 'download_image' if request.POST.get('download_image') == '1': result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader download_image"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] result = result.strip() if result == 'yes': step = 'ensure_usb_unplugged' else: step = 'download_image' errormsg = 'imagefail' if request.POST.get('ensure_usb_unplugged') == '1': result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader check_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] result = result.strip() if result == 'nodrive': step = 'start_upgrade' else: step = 'ensure_usb_unplugged' errormsg = 'usbfail' if request.POST.get('start_upgrade') == '1': import os os.system("sudo /bin/busybox sh /usr/sbin/upgrader run_upgrade &") return render_to_response('upgrade/' + step + '.html', { 'show_output': show_output, 'errormsg': errormsg, }, context_instance=RequestContext(request))
def find_jstack(pid): default = Popen("which jstack", stdout=PIPE, stdin=DEVNULL, shell=True).communicate()[0] if default.strip(): return default.strip() if os.environ.get('JAVA_HOME'): return os.path.join(os.environ['JAVA_HOME'], 'bin', 'jstack') jvm_bin_path = os.path.dirname(os.readlink('/proc/{pid}/exe'.format(pid=pid))) if jvm_bin_path.strip(): return os.path.join(jvm_bin_path.strip(), 'jstack') raise RuntimeError("Could not find jstack - do you have it installed in $JAVA_HOME?")
def _is_running_btrfs(self): logging.warning("checking for btrfs") cmd = 'cat /etc/mtab | grep btrfs | awk \'{print $2}\'' output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() != "": dirs = output.strip().split("\n") for d in dirs: if self._is_parent_dir(d, DOCKER_DIR): return True return False
def add_vxlan_flows(self,**args): """ Compute node only """ if "bridge" not in args: return {"code":0,"message":"OVS bridge not specified !"} if "instance_name" not in args: return {"code":0,"message":"Domain name not specified !"} if "vlan_id" not in args: return {"code":0,"message":"Ip vlan id not specified !"} if "ip" not in args: return {"code":0,"message":"Domain Ip not specified !"} if "vxlan_port" not in args: return {"code":0,"message":"Domain vxlan_port not specified !"} vxlan_port = args["vxlan_port"] vlan_id = args["vlan_id"] domifinfo = "virsh domiflist %s" % args["instance_name"] devices = Popen(domifinfo,shell=True,stdout=PIPE).stdout.read() if "bridge" not in devices: return {"code":0,"message":"Domain or ovs bridge may not exist!"} keyword = "br0" if args["bridge"] == "br-wan" else "br1" line = [qline for qline in devices.split("\n") if keyword in qline ][0] mac = line.split()[4].strip() ovs_port_prefix = "wan" if args["bridge"] == "br-wan" else "lan" device = "%s%s" % (ovs_port_prefix,args["instance_name"]) desc = "ovs-vsctl get Interface %s ofport"%device js = Popen(desc,shell=True,stdout=PIPE).stdout.read() ofport = js.strip() template_path = "%s%s" % (self.network_dir,"apply_%s_flows_template"%("wan" if args["bridge"] == "br-wan" else "lan")) tmpfile = "/tmp/%s_add_%s_vxlan_flows" % (args["instance_name"],"wan" if args["bridge"] == "br-wan" else "lan") fd = open(template_path, "r") flows = fd.read() fd.close() if os.path.exists(tmpfile): os.remove(tmpfile) with open(tmpfile,"a") as fd: qflows = flows.replace('QPORT','%s'%ofport)\ .replace('QMAC','%s'%mac)\ .replace('SRC_IP','%s'%args["ip"])\ .replace('DEFAULT_VXLAN_PORT','%s'%vxlan_port)\ .replace('VLAN_ID', '%s'%vlan_id) if "br-lan" in args["bridge"]: qflows = qflows.replace('VNI','%s'%args["vni"]) fd.write(qflows) apply_flows = "ovs-ofctl add-flows %s %s" % (args["bridge"],tmpfile) errinfo = Popen(apply_flows,shell=True,stderr=PIPE).stderr.read() if errinfo is not None and len(errinfo.strip()) > 0: return {"code":0,"message":"Fail to add flows with file '%s' : %s"%(tmpfile,errinfo)} else: return {"code":1,"message":"success"}
def check(self,): res = [] for el in self.tail: cmd = base64.b64decode(el.split(':')[1]) output = Popen(['tail', '-n', '1', cmd], stdout=PIPE).communicate()[0] res.append(self.diff_value(el, output.strip())) for el in self.call: cmd = base64.b64decode(el.split(':')[1]) output = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE).stdout.read() res.append(self.diff_value(el, output.strip())) return res
def get_selection(aps, vpns, gsms, others): """Combine the arg lists and send to dmenu for selection. Also executes the associated action. Args: args - aps: list of Actions vpns: list of Actions gsms: list of Actions others: list of Actions """ conf = configparser.ConfigParser() conf.read(expanduser("~/.config/networkmanager-dmenu/config.ini")) try: rofi_highlight = conf.getboolean('dmenu', 'rofi_highlight') except (configparser.NoOptionError, configparser.NoSectionError): rofi_highlight = False inp = [] empty_action = [Action('', None)] all_actions = [] all_actions += aps + empty_action if aps else [] all_actions += vpns + empty_action if vpns else [] all_actions += gsms + empty_action if gsms else [] all_actions += others if rofi_highlight is True: inp = [str(action) for action in all_actions] else: inp = [('** ' if action.is_active else ' ') + str(action) for action in all_actions] active_lines = [index for index, action in enumerate(all_actions) if action.is_active] inp_bytes = "\n".join([i for i in inp]).encode(ENC) command = dmenu_cmd(len(inp), active_lines=active_lines) sel = Popen(command, stdin=PIPE, stdout=PIPE, env=ENV).communicate(input=inp_bytes)[0].decode(ENC) if not sel.rstrip(): sys.exit() if rofi_highlight is False: action = [i for i in aps + vpns + gsms + others if ((str(i).strip() == str(sel.strip()) and not i.is_active) or ('** ' + str(i) == str(sel.rstrip('\n')) and i.is_active))] else: action = [i for i in aps + vpns + gsms + others if str(i).strip() == sel.strip()] assert len(action) == 1, \ u"Selection was ambiguous: '{}'".format(str(sel.strip())) return action[0]
def login(self, user, password, email, registry): """ Login to a remote registry. """ cmd = self.docker + ' ' + self.login_cmd + ' -u %s -p %s -e %s %s' % (user, password, email, registry) logging.warning(cmd) output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == "Login Succeeded": return True else: logging.error(output.strip()) return False
def main(): """Print virtualenv and python version.""" workon_home = os.environ.get('WORKON_HOME') workon_home = Path(workon_home) for virtualenv in workon_home.iterdir(): if virtualenv.is_dir(): for python_bin in Path(f'{virtualenv}/bin/').iterdir(): if python_bin.name == 'python': virtual_environment = str(virtualenv).rpartition('/')[-1] command = [f'{python_bin}', '-c', "import sys;print(sys.version.split()[0]);" ] stdout, _ = Popen(command, stdout=PIPE).communicate() stdout = stdout.decode('utf-8') python_version = stdout.strip() if python_bin.name == 'pip': command = [f'{python_bin}', 'freeze' ] stdout, _ = Popen(command, stdout=PIPE).communicate() stdout = stdout.decode('utf-8') packages = [p.strip() for p in stdout.split()] with open(f'virtualenvs-{os.uname()[1].split(".")[0]}.md', 'a') as f: f.write(template.render(virtualenv=virtual_environment, version=python_version, packages=packages))
def get_config(): required_keys = { 'user': '******', 'token': 'GITHUB_TOKEN' } config = {} for key, env_key in required_keys.items(): value = os.environ.get(env_key) if not value: command = "git config --global github.%s" % key stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate() if stderr: for line in stderr.splitlines(): print line sys.exit(1) value = stdout.strip() if value: config[key] = value else: alt_help_names = {'user': '******'} help_name = alt_help_names.get(key, key) print "error: required GitHub entry '%s' not found in global "\ "git config" % key print "please add it to the global git config by doing this:" print print " git config --global github.%s <your GitHub %s>" % (key, help_name) print print "or by specifying env vars GITHUB_USER and GITHUB_TOKEN" sys.exit(1) return config
def __init__(self, options): # fixme, do graphic cache check self.options = options if options.quiet: tp = apt.progress.base.OpProgress() else: tp = apt.progress.text.OpProgress() # set architecture to architecture in root-dir if options.rootdir and os.path.exists(options.rootdir+"/usr/bin/dpkg"): arch = Popen([options.rootdir+"/usr/bin/dpkg", "--print-architecture"], stdout=PIPE, universal_newlines=True).communicate()[0] if arch: apt_pkg.config.set("APT::Architecture",arch.strip()) if options.apt_opts: for o in options.apt_opts: if o.find('=') < 0: sys.stderr.write(_("Configuration items must be specified with a =<value>\n")) sys.exit(1) (name, value) = o.split('=', 1) try: apt_pkg.config.set(name, value) except: sys.stderr.write(_("Couldn't set APT option %s to %s\n") % (name, value)) sys.exit(1) self._cache = Cache(tp, rootdir=options.rootdir)
def _check_image_installed(self, image_name): cmd = DOCKER_CMD + ' -H=' + DOCKER_SOCK + ' inspect %s 2> /dev/null' % image_name output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() if output.strip() == '[]': return False else: return True
def ensure_access_for_clouddb(self, **args): if "ip_list" not in args or not args["ip_list"]: return {"code":0,"message":"black-list ip(s) needed specified !"} ips = args["ip_list"] try: ips = ips.split(",") except Exception: return {"code":0,"message":"black-list string malformed. !"} tmp_flows = "/tmp/clouddb_access" if os.path.exists(tmp_flows): os.remove(tmp_flows) template = "table=0,priority=100,ip,nw_dst=%s,dl_dst=%s actions=output:%d" with open(tmp_flows, "a") as fd: for ip in ips: if len(ip.strip()) == 0: continue flow = template % (ip, self.int_gw_mac, self.int_uplink_port) fd.write("%s\n" % flow) fd.close() apply_flows = "ovs-ofctl add-flows br-lan %s" % tmp_flows errinfo = Popen(apply_flows, shell=True, stderr=PIPE).stderr.read() if errinfo is not None and len(errinfo.strip()) > 0: return {"code":0,"message":"Fail to add flows with command '%s' : %s"%(apply_flows, errinfo)} else: return {"code":1,"message":"success"}
def fetch(self, device): # launch acpi bin with device parameter. output = Popen([self.ACPI, '-%s' % device], stdout=PIPE).communicate()[0] output = output.strip().split('\n')[0] # acpi returns nothing if device is not present if not ':' in output: return None # result line try: status_str = output.split(':')[1].strip() except: return None status = self.UNKNOWN percent = self.P_UNKNOWN if status_str == 'on-line' or status_str.startswith('Charging') \ or status_str.startswith('Full'): status = self.ONLINE if status_str == 'off-line' or status_str.startswith('Discharging'): status = self.OFFLINE if status_str.startswith('Discharging'): status = self.OFFLINE # we have a %age and time if ',' in status_str: try: percent = int(status_str.split(',')[1].strip().replace('%', '')) except: pass return {'status': status, 'percent': percent}
def get_num_cores(self): """ Get total number of cores. """ cmd = "cat /proc/cpuinfo | grep cores | awk '{print $4}'" output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() return int(output.strip())
def get_remote_info(): commands = ( "git config --get remote.origin.url", "git config --get remote.github.url", "hg paths default", "hg paths github") aliases = get_aliases() for command in commands: stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate() if stdout: line = stdout.strip() if not "github.com" in line: # check if it's using an alias for alias in aliases: if line.startswith(alias): line = line.replace(alias, aliases[alias]) break else: continue pattern = re.compile(r'([^:/]+)/([^/]+).git$') result = pattern.search(line) if result: return result.groups() else: raise Exception("invalid user and repo name") elif stderr: for line in stderr.splitlines(): line = line.lower() # a bit hackish: hg paths <path> returns 'not found!' when # <path> is not in .hg/hgrc; this is to avoid showing it if not 'not found' in line: print line raise Exception("not a valid repository or not hosted on github.com")
def SpawnSlaves(cluster_name, slave_template, num_slaves): slaves_list = {} print("Creating Slave Nodes...") try: for i in range(1, num_slaves + 1): # name the slave slave_name = "slave" + str(i) + "." + cluster_name result = Popen(["onetemplate", "instantiate", slave_template, "--name", slave_name], stdout=PIPE).communicate()[0] slave_id = result.strip('VM ID: ').strip('\n') vm_info = Popen(["onevm", "show", str(slave_id)], stdout=PIPE).communicate()[0] ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', vm_info) slaves_list[slave_id] = ip_list[0] print(slaves_list) except: raise print("Slaves Spawned...") return slaves_list
def get_battery_time(): """Returns battery remaining/charging time. Uses `upower` shell command. :return: battery time to full or time to empty :rtype: float or NoneType """ std, err = Popen(['upower', '-e'], stdout=PIPE).communicate() if not err: for device in std.split(): if 'battery_' in device: upower = Popen( ['upower', '-i', device], stdout=PIPE ) grep = Popen( ['grep', '-E', 'time\ to'], stdin=upower.stdout, stdout=PIPE ) std, err = grep.communicate() if not err: regexp = '^time to \w+:\s+?(\d+,\d+) hours$' match = re.match(regexp, std.strip()) groups = match.groups() if groups: # assuming only one battery device return float(groups[0].replace(',', '.'))
def detect_ise_version(path): xst = Popen('which xst', shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) lines = xst.stdout.readlines() if not lines: return None xst = str(lines[0].strip()) version_pattern = re.compile('.*?(?P<major>\d|\d\d)[^\d](?P<minor>\d|\d\d).*') # First check if we have version in path match = re.match(version_pattern, xst) if match: ise_version = "%s.%s" % (match.group('major'), match.group('minor')) else: # If it is not the case call the "xst -h" to get version xst_output = Popen('xst -h', shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) xst_output = xst_output.stdout.readlines()[0] xst_output = xst_output.strip() version_pattern = re.compile('Release\s(?P<major>\d|\d\d)[^\d](?P<minor>\d|\d\d)\s.*') match = re.match(version_pattern, xst_output) if match: ise_version = "%s.%s" % (match.group('major'), match.group('minor')) else: logging.error("xst output is not in expected format: %s\n" % xst_output + "Can't determine ISE version") return None return ise_version
def get_lsb_distributor(): """Return the distributor ID as received from lsb_release.""" try: res = Popen(["lsb_release", "-s", "-i"], stdout=PIPE).communicate()[0] return res.strip() except OSError: return 'Debian'
def run_job(task_portion, wall, auto = False, extra = '', prefix='vrptw'): from subprocess import Popen, PIPE from os import getcwd # some task preparation (if it wasn't a file, but a test name?) script = """ cd %s pwd date """ % getcwd() + "".join(""" ./pygrout.py %s --wall %d %s date """ % (extra, wall, task) for task in task_portion) # prepare jobname jobname = re.sub('.txt|hombergers/|solomons/', '', prefix+'_' + task_portion[0]) command = 'qsub -l nodes=1:nehalem -l walltime=%d -N %s -e /tmp' % ( (wall+60)*len(task_portion), jobname) if not auto: print "About to pipe: \n%s\n to the command: \n%s\n\nPress Enter" % ( script, command) raw_input() output, errors = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate(script) print "Process returned", repr((output, errors)) return command, script, jobname, output.strip()
def test_dump(self): '''test_add_delete will test the add and delete functions ''' print('Testing json DUMP') from sutils import write_json, read_json print('Case 1: Dumping file.') jsondump = {'HELLO':'KITTY', 'BATZ':'MARU', 'MY':'MELODY' } write_json(jsondump,self.file) self.assertTrue(os.path.exists(self.file)) script_path = "%s/helpers/json/dump.py" %(self.here) if VERSION == 2: testing_command = ["python2",script_path,'--file',self.file] else: testing_command = ["python3",script_path,'--file',self.file] output = Popen(testing_command,stderr=PIPE,stdout=PIPE) t = output.communicate()[0],output.returncode result = {'message':t[0], 'return_code':t[1]} self.assertEqual(result['return_code'],0) output = result['message'] if isinstance(output,bytes): output = output.decode(encoding='UTF-8') dump = ['HELLO:"KITTY"', 'BATZ:"MARU"', 'MY:"MELODY"'] result = output.strip('\n').split('\n')[-3:] for res in result: self.assertTrue(res in dump)
def get_free_memory(self): """ Get free memory of current system. """ cmd = "cat /proc/meminfo | grep MemFree | awk '{print $2}'" output = Popen(cmd, stdout=PIPE, shell=True).stdout.read() return int(output.strip()) / 1000
def remove_ipv6_flows(self,**args): """ Work for network node and compute node. """ # 从参数中读取要取消流表的 IPv6 地址 if "domain_ip" not in args or not args["domain_ip"]: return {"code":0,"message":"Domain IP need to be specified !"} # 检测要执行的刷流表命令文件是否已存在 tmp_flows = "/tmp/remove_all_ipv6_%s_flows.txt" % (args["domain_ip"]) if os.path.exists(tmp_flows): os.remove(tmp_flows) # 读取刷流表命令文件的模板 remove_flow_template = "%s%s"%(self.network_dir, "remove_all_ipv6_template") with open(remove_flow_template, "r") as fd: flows = fd.read() fd.close() # 根据实际参数替换模板中的占位符 with open(tmp_flows,"a") as fd: flows = flows.replace('SRC_IP','%s'%args["domain_ip"])\ .replace('BRIDGE', 'br-wan') # IPv6 只适用于 br-wan fd.write(flows) fd.close() # 应用流表命令文件 apply_flows = "/bin/bash %s" % tmp_flows errinfo = Popen(apply_flows,shell=True,stderr=PIPE).stderr.read() if errinfo is not None and len(errinfo.strip()) > 0: return {"code":0,"message":"Fail to add flows with file '%s' : %s"%(tmp_flows,errinfo)} else: return {"code":1,"message":"success"}
def get_cluster(self, commit_id, conf_dir, merge_commit_into_master=False, is_yarn_mode=False, additional_make_distribution_args=""): if not os.path.isdir(self._master_spark): clone_spark(self._master_spark, self.spark_git_repo) # Get the SHA corresponding to the commit and check if we've already built this version: checkout_version(self._master_spark, commit_id, merge_commit_into_master) sha = Popen("cd %s; git rev-parse --verify HEAD" % self._master_spark, shell=True, stdout=PIPE).communicate()[0] sha = sha.strip() logger.debug("Requested version %s corresponds to SHA %s" % (commit_id, sha)) cluster_dir = os.path.join(self.root_dir, sha) # TODO: detect and recover from failed builds? logger.debug("Searching for dir %s" % cluster_dir) if os.path.exists(cluster_dir): logger.info( "Found pre-compiled Spark with SHA %s; skipping build" % sha) else: logger.info("Could not find pre-compiled Spark with SHA %s" % sha) # Check out and build the requested version of Spark in the master spark directory make_spark_distribution( commit_id=commit_id, target_dir=self._master_spark, spark_git_repo=self.spark_git_repo, merge_commit_into_master=merge_commit_into_master, is_yarn_mode=is_yarn_mode, additional_make_distribution_args= additional_make_distribution_args) # Copy the completed build to a directory named after the SHA. run_cmd("mv %s %s" % (os.path.join(self._master_spark, "dist"), cluster_dir)) copy_configuration(conf_dir, cluster_dir) return Cluster(spark_home=cluster_dir, spark_conf_dir=conf_dir, commit_sha=sha)
def get_remote_info(): commands = ( "git config --get remote.origin.url", "git config --get remote.github.url", "hg paths default", "hg paths github") aliases = get_aliases() for command in commands: stdout, stderr = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate() if stdout: line = stdout.strip() if not "github.com" in line: # check if it's using an alias for alias in aliases: if line.startswith(alias): line = line.replace(alias, aliases[alias]) break else: continue pattern = re.compile(r'([^:/]+)/([^/]+).git$') result = pattern.search(line) if result: return result.groups() else: # Whilst repos are usually configured with a postfix of ".git" # this is by convention only. Github happily handles requests # without the postfix. pattern = re.compile(r'([^:/]+)/([^/]+)') result = pattern.search(line) if result: return result.groups() raise Exception("invalid user and repo name") elif stderr: for line in stderr.splitlines(): line = line.lower() # a bit hackish: hg paths <path> returns 'not found!' when # <path> is not in .hg/hgrc; this is to avoid showing it if not 'not found' in line: print line raise Exception("not a valid repository or not hosted on github.com")
def __create(self): """ Create a ramdisk device @author: Roy Nielsen """ success = False cmd = [self.hdiutil, "attach", "-nomount", "ram://" + self.diskSize] retval, reterr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate() if reterr: success = False raise Exception("Error trying to create ramdisk(" + str(reterr).strip() + ")") else: self.myRamdiskDev = retval.strip() log_message("Device: \"" + str(self.myRamdiskDev) + "\"", "debug", self.message_level) success = True log_message("Success: " + str(success) + " in __create", "debug", self.message_level) return success
def get_aliases(): """ Return a dict of global git aliases regarding github, or None: { "alias": "http://...", "alias2": "git://...it", } """ cmd = "git config --global --get-regexp url\..*github.com.*" stdout, stderr = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate() if stdout: d = {} for alias in stdout.strip().split('\n'): url, alias = alias.split() d[alias] = url.split('.', 1)[1].rsplit('.', 1)[0] return d return []
def run(self, script, mem="2g", err=None, log=None, hold=None, **kwargs): cmd = "{0} -cwd -l vf={1} {2} ".format(self.qsub, mem, self.queue_parameter) if err: cmd += "-e %s " % os.path.abspath(err) if log: cmd += "-o %s " % os.path.abspath(log) if hold: cmd += "-hold_jid %s " % hold cmd += os.path.abspath(script) out, err = Popen(cmd.split(), stdout=PIPE, stderr=PIPE).communicate() try: job_id = re.compile( "^Your job ([0-9]+) \(\"([^\)]*)\"\) has been submitted$" ).search(out.strip()).groups() ids = int(job_id[0]) scripts = os.path.basename(job_id[1]) self.jobs[scripts] = str(ids) return ids, scripts except: raise IOError("%s\nExecuted command:\n%s\n" % (err, cmd))
def read_arptable(eth_addr: str = None, ipv4_addr: str = None) -> str: ''' universal way to get ip address and mac from the system ARP table select one of the parameters, eth_addr has priority if you fill up both ''' searchstr: str = None addr_type: int = 0 # 0 - mac address 1 - ipv4 address if eth_addr and isinstance(eth_addr, str): searchstr = eth_addr.replace( ':', '.?') if eth_addr[2] == ':' else eth_addr.replace('-', '.?') elif ipv4_addr and isinstance(ipv4_addr, str): addr_type = 1 searchstr = ipv4_addr.replace('.', '.?') else: raise ValueError('ipv4 addres and mac addres must be a string') if system() == "Windows": output = Popen(['arp', '-a'], stdout=PIPE, stderr=PIPE).communicate()[0].decode('utf-8') for line in output.strip().split('\n'): if search(searchstr, line, IGNORECASE): return line.split( )[addr_type] if addr_type == 0 else line.split()[addr_type + 2] else: arp_cache: str = '/proc/net/arp' if exists(arp_cache): with open(arp_cache, 'r') as file: for line in file.readlines()[1:]: line = line.strip() if search(searchstr, line): # for posix systems in this case addr_type for ipv4 must be addr_type + 2 # see /proc/net/arp return line.split( )[addr_type] if addr_type == 0 else line.split()[ addr_type + 2] else: raise NameError('Record not found in /proc/net/arp') else: return None
def connected_devices_arp(measurements={}, device_file="/home/pi/chalk/unique_mac_addr.csv"): import pandas as pd print("connected devices") ts = int(time.time()) nmap_cmd = "/usr/bin/nmap -sn 192.168.1.0/24" Popen(nmap_cmd, shell=True, stdout=PIPE) arp_cmd = "/usr/sbin/arp -e -i eth0 | grep : | grep -v '192.168.1.1 ' | tr -s ' ' | cut -f3 -d' ' | sort | uniq" arp_res = Popen(arp_cmd, shell=True, stdout=PIPE).stdout.read().decode('utf-8') devices = set(arp_res.strip().split("\n")) new_devices = [[dev, ts, 1] for dev in devices] new_devices = pd.DataFrame(columns=["mac_addr", "last_seen", "N"], data=new_devices) if not os.path.exists(device_file): with open(device_file, "w") as out: out.write("mac_addr,last_seen,N\n") existing_devices = pd.read_csv(device_file) devices = pd.concat([existing_devices, new_devices]) devices = devices.groupby("mac_addr").agg({"N": sum, "last_seen": max}) devices[["last_seen", "N"]].to_csv(device_file, index=True) measurements["devices_active"] = new_devices.shape[0] measurements["devices_total"] = devices.shape[0] measurements["devices_1day"] = devices.query( "last_seen > {:d}".format(ts - 86400)).shape[0] measurements["devices_1week"] = devices.query( "last_seen > {:d}".format(ts - 86400 * 7)).shape[0] return measurements
def remove_ipv6_flows(self, **args): """ Work for network node and compute node. """ # 从参数中读取要取消流表的 IPv6 地址 if "domain_ip" not in args or not args["domain_ip"]: return {"code": 0, "message": "Domain IP need to be specified !"} # 检测要执行的刷流表命令文件是否已存在 tmp_flows = "/tmp/remove_all_ipv6_%s_flows.txt" % (args["domain_ip"]) if os.path.exists(tmp_flows): os.remove(tmp_flows) # 读取刷流表命令文件的模板 remove_flow_template = "%s%s" % (self.network_dir, "remove_all_ipv6_template") with open(remove_flow_template, "r") as fd: flows = fd.read() fd.close() # 根据实际参数替换模板中的占位符 with open(tmp_flows, "a") as fd: flows = flows.replace('SRC_IP','%s'%args["domain_ip"])\ .replace('BRIDGE', 'br-wan') # IPv6 只适用于 br-wan fd.write(flows) fd.close() # 应用流表命令文件 apply_flows = "/bin/bash %s" % tmp_flows errinfo = Popen(apply_flows, shell=True, stderr=PIPE).stderr.read() if errinfo is not None and len(errinfo.strip()) > 0: return { "code": 0, "message": "Fail to add flows with file '%s' : %s" % (tmp_flows, errinfo) } else: return {"code": 1, "message": "success"}
def restore(srcDir, destDir): backupDir = srcDir # from where to backup apks restoreDir = destDir # where to restore apks # check for backup dir if not os.path.exists(backupDir): droid.makeToast("Backup dir does not exist!") sys.exit("Backup dir does not exist!") else: # count number of files in src dir os.chdir(backupDir) file_count = Popen('ls | wc -l', shell=True, stdout=PIPE).stdout.readline() src_file_count = file_count.strip() # check if the destination dir path if not os.path.exists(restoreDir): sys.exit("Restore Dir does not exist!") else: # call progress bar progress_bar(srcDir, destDir, src_file_count)
def addToCrontab(self): out, err = Popen("which python".split(), stdout=PIPE, stderr=PIPE).communicate() record = " ".join([ self.__minute, self.__hour, self.__dayOfMonth, self.__month, self.__dayOfWeek, "root", "cd %s &&" % os.path.join(os.getcwd(), ("", "libs")["libs" not in os.getcwd()]), out.strip(), os.path.join(os.getcwd(), ("", "libs")["libs" not in os.getcwd()], "refreshPhoneBook.py") ]) + "\n" fpath = os.path.join(os.path.sep, "etc", "crontab") fp = open(fpath, "r") rd = fp.read() fp.close() if record not in rd: fp = open(fpath, "a") fp.write(record) fp.close()
def detect_version(self, path): #xst = Popen('which xst', shell=True, stdin=PIPE, # stdout=PIPE, close_fds=True) #lines = xst.stdout.readlines() #if not lines: # return None #xst = str(lines[0].strip()) version_pattern = re.compile( '.*?(?P<major>\d|\d\d)[^\d](?P<minor>\d|\d\d).*') # First check if we have version in path match = re.match(version_pattern, path) if match: ise_version = "%s.%s" % (match.group('major'), match.group('minor')) else: # If it is not the case call the "xst -h" to get version xst_output = Popen('xst -h', shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) xst_output = xst_output.stdout.readlines()[0] xst_output = xst_output.strip() version_pattern = re.compile( 'Release\s(?P<major>\d|\d\d)[^\d](?P<minor>\d|\d\d)\s.*') match = re.match(version_pattern, xst_output) if match: ise_version = "%s.%s" % (match.group('major'), match.group('minor')) else: logging.error("xst output is not in expected format: %s\n" % xst_output + "Can't determine ISE version") return None return ise_version
def set_disk_iops(self, **args): instance_name = args["instance_name"] disk = args["disk_uuid"] iops = int(args["iops"]) dominfo = "virsh dominfo %s" % instance_name domdetails = Popen(dominfo, stdout=PIPE, shell=True).stdout.read() live = "running" in domdetails if "system" in disk: device = "vda" else: get_blks = "virsh domblklist %s" % instance_name blks = Popen(get_blks, stdout=PIPE, shell=True).stdout.read() for line in blks.split("\n"): if disk in line: device = line.split()[0].strip() break if device is None: return {"message": "Device %s not found " % disk, "code": 0} setblktune = "virsh blkdeviotune \ --domain %(domain)s \ --device %(device)s \ --read-iops-sec %(riops)d \ --write-iops-sec %(wiops)d \ %(live)s --config" % { "domain": instance_name, "device": device, "riops": iops, "wiops": iops, "live": "--live" if live else "" } log(setblktune) retv = Popen(setblktune, stdout=PIPE, shell=True).stdout.read() log(retv) success = len(retv.strip()) == 0 return { "message": "SUCCESS" if success else retv, "code": 1 if success else 0 }
def possibleToolLocations(): homebin = '%s/bin/plotdevice' % os.environ['HOME'] localbin = '/usr/local/bin/plotdevice' locations = [homebin, localbin] # find the user's login shell out, _ = Popen( ['dscl', '.', '-read', '/Users/' + os.environ['USER'], 'UserShell'], stdout=PIPE).communicate() shell = out.replace('UserShell:', '').strip() # try launching a shell to extract the user's path if shell: out, _ = Popen([shell, "-l"], stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate("echo $PATH") for path in out.strip().split(':'): path += '/plotdevice' if '/sbin' in path: continue if path.startswith('/bin'): continue if path.startswith('/usr/bin'): continue if path in locations: continue locations.append(path) return locations
def get_git_repo_dir(): """ Get the directory of the current git project Returns: str: The top level directory of the current git project """ repo_dir, err = Popen(['git', 'rev-parse', '--show-toplevel'], stdin=PIPE, stderr=PIPE, stdout=PIPE).communicate() repo_dir = repo_dir.strip() if not repo_dir: repo_dir = None if repo_dir and not isinstance(repo_dir, str): encoding = locale.getpreferredencoding() if encoding: return repo_dir.decode(encoding) return repo_dir
def platform(self): operatingsystem = self.os if operatingsystem == "linux": lsb = self.lsb if not lsb: if os.path.exists("/etc/redhat-release"): return "redhat" if os.path.exists("/etc/fedora-release"): return "fedora" if os.path.exists("/etc/debian_version"): return "debian" if os.path.exists("/etc/gentoo-release"): return "gentoo" return "unknown" return lsb['id'].lower() elif operatingsystem == "darwin": out = Popen("/usr/bin/sw_vers", stdout=PIPE).communicate()[0] sw_vers = dict([y.strip() for y in x.split(':', 1)] for x in out.strip().split('\n')) # ProductName, ProductVersion, BuildVersion return sw_vers['ProductName'].lower().replace(' ', '_') else: return "unknown"
def get_battery_time(): """Returns battery remaining/charging time. Uses `upower` shell command. :return: battery time to full or time to empty :rtype: float or NoneType """ std, err = Popen(['upower', '-e'], stdout=PIPE).communicate() if not err: for device in std.split(): if 'battery_' in device: upower = Popen(['upower', '-i', device], stdout=PIPE) grep = Popen(['grep', '-E', 'time\ to'], stdin=upower.stdout, stdout=PIPE) std, err = grep.communicate() if not err: regexp = '^time to \w+:\s+?(\d+,\d+) hours$' match = re.match(regexp, std.strip()) groups = match.groups() if groups: # assuming only one battery device return float(groups[0].replace(',', '.'))
def ChangeUser(self, UserName, Password): ''' Used to add/edit the user of the system ''' #If the User exists then Edit the password only UserExists = False Users = Popen("sudo -S pdbedit -Lw".split(), stdout=PIPE, stdin=PIPE).communicate(self.ROOTPASS)[0] Users = Users.strip().split("\n") for item in Users: Users = item[:item.find(":")].strip() if (Users == UserName): UserExists = True print("User Already Exists") break #Add User to the system if (UserExists == False): print "------Add User-------" Popen(("sudo -S useradd -r -s /bin/false " + UserName).split(), stdin=PIPE).communicate(self.ROOTPASS) Popen(("sudo -S smbpasswd -n -a " + UserName).split(), stdin=PIPE).communicate(self.ROOTPASS)[0] #Change Password of the User if (Password == self.HIDEPASS ): #If no change is made to the window's password print("---------Nothing Changed--------") elif (Password == ""): #If no password is requested print "------Change UserName-------" Popen(("sudo -S smbpasswd -n -a " + UserName).split(), stdin=PIPE).communicate(self.ROOTPASS)[0] print("Password Changed to None") else: #If password is used print("-------Changing Password-------") smbpasswd = Popen(("sudo -S smbpasswd -s -a " + UserName).split(), stdin=PIPE) smbpasswd.communicate(b'\n'.join([Password, Password])) print("Password Changed to: " + Password)
def reportSummaries(d): sums = join(d, 'summaries.txt') sims = join(d, 'similarities.txt') if exists(sums): with open(sums) as sin: for summary in sin: summary = summary.strip().split(' ') if not summary or len(summary) != 2: continue with open(join(d, summary[0])) as fin: count = 0 overall = [0] * 10 matches = [0] * 10 for line in fin: line = line.rstrip() count += 1 n = 10 p = Popen(['grep', line + ' ', sims], stdout=PIPE) pin = Popen(['grep', summary[1], '-'], stdin=p.stdout, stdout=PIPE).communicate()[0] for match in pin.strip().split('\n'): if match.strip(): i = match.find(':') m = int(float(match[i + 2:].strip()) * 10) matches[m] += 1 n = min(n, m) if n < 10: overall[n] += 1 print 'Functions in', summary[0], 'matching', summary[ 1], 'with similarity in', sims print matches, '...', sum(matches) s = 0 for n in overall: s += n print s, print '/', count
def test_text_regex(self): self.container.start() docker_ip = Popen(["docker-machine", "ip"], stdout=PIPE).communicate()[0] docker_ip = docker_ip.strip().decode("utf-8") client = JanusGraphClient() client = client.connect( host=str(docker_ip), port="8182", traversal_source="gods_traversal").get_connection() g = Graph().traversal().withRemote(client) truth = {"s.{2}": 2, "shouldNotBeFound": 0} for k, v in truth.items(): count = g.V().has("name", Text.textRegex(k)).count().next() self.assertEqual(count, v) client.close() self.container.stop()
def spawn_slaves(cluster_name, slave_template, num_slaves): slaves_dict = {} print("Creating Slave Nodes...") try: for i in range(1, num_slaves + 1): # name the slave slave_name = "slave" + str(i) + "." + cluster_name result = Popen([ "onetemplate", "instantiate", slave_template, "--name", slave_name ], stdout=PIPE).communicate()[0] slave_id = result.strip('VM ID: ').strip('\n') vm_info = Popen(["onevm", "show", str(slave_id)], stdout=PIPE).communicate()[0] ip_list = re.findall(r'[0-9]+(?:\.[0-9]+){3}', vm_info) slaves_dict[slave_name] = ip_list[0] except: raise print("Slaves Spawned!") return slaves_dict
def task1(): output = Popen([cmd, 'call', '--wait', 'wait_two_ints', '1', '2'], stdout=PIPE).communicate()[0] self.assertEquals('sum: 3', output.strip())
break else: raise Exception('%s not found' % lib) include_files.append((path, lib)) ssl_paths = ssl.get_default_verify_paths() include_files.append( (ssl_paths.openssl_cafile, os.path.join('etc', 'ssl', 'cert.pem'))) if os.path.exists(ssl_paths.openssl_capath): include_files.append( (ssl_paths.openssl_capath, os.path.join('etc', 'ssl', 'certs'))) version = Popen( 'python setup.py --version', stdout=PIPE, shell=True, encoding='utf-8' ).stdout.read() version = version.strip() setup(name='tryton', version=version, options={ 'build_exe': { 'no_compress': True, 'include_files': include_files, 'excludes': ['tkinter'], 'silent': True, 'packages': ['gi'], 'include_msvcr': True, }, 'bdist_mac': { 'iconfile': os.path.join( 'tryton', 'data', 'pixmaps', 'tryton', 'tryton.icns'),
def getSPIVendorLegacy(spi_id): cmd = "cat /tmp/spi0.%d_vendor.dat | cut -c1-2" % (spi_id) data = Popen(cmd, shell=True, stdout=PIPE).stdout.read().decode() manufacturer_id = data.strip("\n") return SPIVendorID2Name(manufacturer_id)
def getInformation(self, param=None): # Get Platform Name name = pal.pal_get_platform_name() # Get MAC Address eth_intf = pal.pal_get_eth_intf_name() mac_path = "/sys/class/net/%s/address" % (eth_intf) if os.path.isfile(mac_path): mac = open(mac_path).read() mac_addr = mac[0:17].upper() else: mac = get_mac() mac_addr = ":".join( ("%012X" % mac)[i:i + 2] for i in range(0, 12, 2)) # Get BMC Reset Reason wdt_counter = (Popen("devmem 0x1e785010", shell=True, stdout=PIPE).stdout.read().decode()) wdt_counter = int(wdt_counter, 0) wdt_counter &= 0xFF00 if wdt_counter: por_flag = 0 else: por_flag = 1 if por_flag: reset_reason = "Power ON Reset" else: reset_reason = "User Initiated Reset or WDT Reset" # Get BMC's Up Time data = Popen("uptime", shell=True, stdout=PIPE).stdout.read().decode() uptime = data.strip() # Use another method, ala /proc, but keep the old one for backwards # compat. # See http://man7.org/linux/man-pages/man5/proc.5.html for details # on full contents of proc endpoints. uptime_seconds = read_file_contents("/proc/uptime")[0].split()[0] # Pull load average directory from proc instead of processing it from # the contents of uptime command output later. load_avg = read_file_contents("/proc/loadavg")[0].split()[0:3] # Get Usage information data = Popen("top -b n1", shell=True, stdout=PIPE).stdout.read().decode() adata = data.split("\n") mem_usage = adata[0] cpu_usage = adata[1] # Get OpenBMC version obc_version = "" data = Popen("cat /etc/issue", shell=True, stdout=PIPE).stdout.read().decode() # OpenBMC Version ver = re.search(r"[v|V]([\w\d._-]*)\s", data) if ver: obc_version = ver.group(1) # U-Boot version uboot_version = self.getUbootVer() if uboot_version is None: uboot_version = "NA" # Get kernel release and kernel version kernel_release = "" data = Popen("uname -r", shell=True, stdout=PIPE).stdout.read().decode() kernel_release = data.strip("\n") kernel_version = "" data = Popen("uname -v", shell=True, stdout=PIPE).stdout.read().decode() kernel_version = data.strip("\n") # Get TPM version tpm_tcg_version = "NA" tpm_fw_version = "NA" if os.path.exists("/sys/class/tpm/tpm0"): tpm1_caps = "/sys/class/tpm/tpm0/device/caps" if os.path.isfile(tpm1_caps): with open(tpm1_caps) as f: for line in f: if "TCG version:" in line: tpm_tcg_version = line.strip( "TCG version: ").strip("\n") elif "Firmware version:" in line: tpm_fw_version = line.strip( "Firmware version: ").strip("\n") elif os.path.isfile("/usr/bin/tpm2_getcap"): with Popen( "/usr/bin/tpm2_getcap -c properties-fixed 2>/dev/null", shell=True, stdout=PIPE) as f: tpm_tcg_version = f.stdout.readlines()[2].decode().split( "\"")[1] with Popen( "/usr/bin/tpm2_getcap -c properties-fixed 2>/dev/null", shell=True, stdout=PIPE) as f: value = f.stdout.readlines()[21].decode().strip( "\n").split(":")[1] value = int(value, 16) tpm_fw_version = "%d.%d" % (value >> 16, value & 0xFFFF) spi0_vendor = getSPIVendor(0) spi1_vendor = getSPIVendor(1) # ASD status - check if ASD daemon/asd-test is currently running asd_status = bool( Popen("ps | grep -i [a]sd", shell=True, stdout=PIPE).stdout.read()) vboot_info = get_vboot_status() used_fd_count = read_file_contents( "/proc/sys/fs/file-nr")[0].split()[0] info = { "Description": name + " BMC", "MAC Addr": mac_addr, "Reset Reason": reset_reason, # Upper case Uptime is for legacy # API support "Uptime": uptime, # Lower case Uptime is for simpler # more pass-through proxy "uptime": uptime_seconds, "Memory Usage": mem_usage, "CPU Usage": cpu_usage, "OpenBMC Version": obc_version, "u-boot version": uboot_version, "kernel version": kernel_release + " " + kernel_version, "TPM TCG version": tpm_tcg_version, "TPM FW version": tpm_fw_version, "SPI0 Vendor": spi0_vendor, "SPI1 Vendor": spi1_vendor, "At-Scale-Debug Running": asd_status, "vboot": vboot_info, "load-1": load_avg[0], "load-5": load_avg[1], "load-15": load_avg[2], "open-fds": used_fd_count, } return info
ldflags += config_env['LIBMAPNIK_LINKFLAGS'] dep_libs = ''.join([' -l%s' % i for i in env['LIBMAPNIK_LIBS']]) # remove local agg from public linking dep_libs = dep_libs.replace('-lagg', '') git_revision = 'N/A' git_describe = config_env['MAPNIK_VERSION_STRING'] try: git_cmd = "git rev-list --max-count=1 HEAD" stdin, stderr = Popen(git_cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() if not stderr: git_revision = stdin.strip() git_cmd = "git describe" stdin, stderr = Popen(git_cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() if not stderr: git_describe = stdin.strip() except: pass # for fonts and input plugins we should try # to store the relative path, if feasible fontspath = config_env['MAPNIK_FONTS'] lib_root = os.path.join(config_env['PREFIX'], config_env['LIBDIR_SCHEMA']) if lib_root in fontspath: fontspath = "${CONFIG_PREFIX}/" + os.path.relpath(fontspath,
[dirpath, [os.path.join(dirpath, f) for f in filenames]]) # Small hack for working with bdist_wininst. # See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst': for file_info in data_files: file_info[0] = '\\PURELIB\\%s' % file_info[0] scripts = ['bin/scrapy'] if os.name == 'nt': scripts.append('extras/scrapy.bat') if os.environ.get('SCRAPY_VERSION_FROM_GIT'): v = Popen("git describe", shell=True, stdout=PIPE).communicate()[0] with open('scrapy/VERSION', 'w+') as f: f.write(v.strip()) with open(os.path.join(os.path.dirname(__file__), 'scrapy/VERSION')) as f: version = f.read().strip() setup_args = { 'name': 'Scrapy', 'version': version, 'url': 'http://scrapy.org', 'description': 'A high-level Python Screen Scraping framework', 'long_description': open('README.rst').read(), 'author':
return 0 for search_dir in DIRECTORIES_TO_SEARCH_FORM: for dir_name in os.listdir(search_dir): dir_path = os.path.abspath(os.path.join(search_dir, dir_name)) if not os.path.isdir(dir_path): continue setup_filepath = os.path.join(dir_path, 'setup.py') if not os.path.isfile(setup_filepath): continue os.chdir(dir_path) version = Popen(["python", setup_filepath, '--version'], stdout=PIPE).stdout.read() version = version.strip().decode() package_name = Popen(["python", setup_filepath, '--name'], stdout=PIPE).stdout.read() package_name = package_name.strip().decode().replace(' ', '-') remote_version = pypi.package_releases(package_name) print(dir_name, version, remote_version) if len(remote_version) == 0 or version_compare(version, remote_version[0]) < 0: print('----- UPLOADING PYPI -----', package_name) if os.path.isdir('./dist'): shutil.rmtree('./dist') Popen(['python', 'setup.py', 'sdist', 'bdist_wheel'],
def upgrade(request): step = 'overview' show_output = False errormsg = '' if request.POST.get('start') == '1': step = 'check_usb' if request.POST.get('check_usb') == '1': result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader check_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] result = result.strip() if result == 'yes': step = 'format_usb' elif result == 'sizefail': step = 'check_usb' errormsg = 'sizefail' elif result == 'nodrive': step = 'check_usb' errormsg = 'nodrive' if request.POST.get('format_usb') == '1': Popen(["sudo /bin/busybox sh /usr/sbin/upgrader format_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] step = 'backup_to_usb' if request.POST.get('backup_to_usb') == '1': Popen([ "sudo /bin/busybox sh /usr/sbin/upgrader backup_to_usb 2>&1 > /tmp/backup_output" ], shell=True, stdout=PIPE, close_fds=True) show_output = True step = 'backup_to_usb' if request.POST.get('proceed_to_step_4') == '1': step = 'download_image' if request.POST.get('download_image') == '1': result = Popen( ["sudo /bin/busybox sh /usr/sbin/upgrader download_image"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] result = result.strip() if result == 'yes': step = 'ensure_usb_unplugged' else: step = 'download_image' errormsg = 'imagefail' if request.POST.get('ensure_usb_unplugged') == '1': result = Popen(["sudo /bin/busybox sh /usr/sbin/upgrader check_usb"], shell=True, stdout=PIPE, close_fds=True).communicate()[0] result = result.strip() if result == 'nodrive': step = 'start_upgrade' else: step = 'ensure_usb_unplugged' errormsg = 'usbfail' if request.POST.get('start_upgrade') == '1': import os os.system("sudo /bin/busybox sh /usr/sbin/upgrader run_upgrade &") return render_to_response('upgrade/' + step + '.html', { 'show_output': show_output, 'errormsg': errormsg, }, context_instance=RequestContext(request))
def call(cmd, silent=True): stdin, stderr = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate() if not stderr: return stdin.strip() elif not silent: print (stderr)
def test_rosservice(self): # wait for network to initialize services = ['/add_two_ints', '/foo/add_two_ints', '/bar/add_two_ints'] for s in services: rospy.wait_for_service(s) cmd = 'rosservice' names = [ 'add_two_ints', '/add_two_ints', 'foo/add_two_ints', '/bar/add_two_ints' ] # list # - hard to exact match as we are still adding builtin services to nodes (e.g. set_logger_level) output = Popen([cmd, 'list'], stdout=PIPE).communicate()[0] l = set(output.split()) for s in services: self.assert_(s in l) for name in names: # args output = Popen([cmd, 'args', name], stdout=PIPE).communicate()[0] self.assertEquals('a b', output.strip()) # type output = Popen([cmd, 'type', name], stdout=PIPE).communicate()[0] self.assertEquals('test_rosmaster/AddTwoInts', output.strip()) # find output = Popen([cmd, 'find', 'test_rosmaster/AddTwoInts'], stdout=PIPE).communicate()[0] values = [v.strip() for v in output.split('\n') if v.strip()] self.assertEquals(set(values), set(services)) # uri output = Popen([cmd, 'uri', name], stdout=PIPE).communicate()[0] # - no exact answer self.assert_(output.startswith('rosrpc://'), output) # call output = Popen([cmd, 'call', '--wait', name, '1', '2'], stdout=PIPE).communicate()[0] self.assertEquals('sum: 3', output.strip()) output = Popen([cmd, 'call', name, '1', '2'], stdout=PIPE).communicate()[0] self.assertEquals('sum: 3', output.strip()) name = 'header_echo' # test with a Header so we can validate keyword args import yaml import time t = time.time() # test with empty headers for v in ['{}', '{header: {}}', '{header: {seq: 0}}']: output = Popen([cmd, 'call', name, v], stdout=PIPE).communicate()[0] output = output.strip() self.assert_(output, output) val = yaml.load(output)['header'] self.assertEquals('', val['frame_id']) self.assert_(val['seq'] >= 0) self.assertEquals(0, val['stamp']['secs']) self.assertEquals(0, val['stamp']['nsecs']) # test with auto headers for v in ['{header: auto}', '{header: {stamp: now}}']: output = Popen([cmd, 'call', name, v], stdout=PIPE).communicate()[0] val = yaml.load(output.strip())['header'] self.assertEquals('', val['frame_id']) self.assert_(val['seq'] >= 0) self.assert_(val['stamp']['secs'] >= int(t)) # verify that it respects ROS_NS # - the uris should be different as the names should resolve differently env = os.environ.copy() env['ROS_NAMESPACE'] = 'foo' uri1 = Popen([cmd, 'uri', 'add_two_ints'], stdout=PIPE).communicate()[0] uri2 = Popen([cmd, 'uri', 'add_two_ints'], env=env, stdout=PIPE).communicate()[0] self.assert_(uri2.startswith('rosrpc://')) self.assertNotEquals(uri1, uri2) # test_call_wait def task1(): output = Popen([cmd, 'call', '--wait', 'wait_two_ints', '1', '2'], stdout=PIPE).communicate()[0] self.assertEquals('sum: 3', output.strip()) timeout_t = time.time() + 5. t1 = TestTask(task1) t1.start() rospy.init_node('test_call_wait') rospy.Service("wait_two_ints", test_rosmaster.srv.AddTwoInts, lambda x: x.a + x.b) while not t1.done and time.time() < timeout_t: time.sleep(0.5) self.assert_(t1.success)