def bsh(self, cmd, out=False): """ Bash Decorator: Wraps the bash.bash method. If bash cmd exit code is not 0, raises a custom error. Handle this error if you don't want your script to fail on failed bash commands. :param str cmd: A bash shell command. Eg: "dig A {0}.format(domain)". :param bool out: Print stdout to tty 1. :rtr: The bash method instance. """ if out: b = bash(cmd, stdout=1) if b.code != 0: raise BashFailedWithNonZeroExit(cmd, b.stderr) else: return b else: b = bash(cmd) if b.code != 0: print b.stderr raise BashFailedWithNonZeroExit(cmd, b.stderr) else: return b
def restartGSMModule(): return; print "(Presumably) Shutting off GSM Module" ser = serial.Serial('/dev/ttyAMA0', 9600, timeout = 1) ser.flush() ser.flushInput() ser.flushInput() ser.flushInput() #I feel there is need of more flushing as sometimes th$ ser.flushOutput() time.sleep(1) bash("/home/pi/weatherStation/startGSMModule.py") sleep(0.1) resp = ser.read(200) print resp log.write(resp) if "DOWN" in resp: #i.e. it contains NORMAL POWER DOWN log.write("GSM Module successfully NORMALLY POWERED DOWN") sleep(5) bash("/home/pi/weatherStation/startGSMModule.py") sleep(30) else: print "Looks like GSM Module was already turned off and now is turned on" log.write("Looks like GSM Module was already turned off and now is turned on") sleep(30) ser.close() return
def initMachine(machineInfo): #create a dir and a file for machineStatus #0 means availabe, 1 means occupied try: bash('test -d machineStatusDir && rm -r machineStatusDir') bash('mkdir machineStatusDir') with open("machineStatusDir/machineStatus.config",'w') as f: for machineName in machineInfo.keys(): f.write(machineName + " 0\n") except: print "encounter troubles when rm machineStatusDir or mkdir machineStatusDir" return None
def run(): while running: input = raw_input("\nWhat would you like to do? >") print "" if input == "check guide": check_guide() elif input == "look": print "You quickly look at your surroundings. On first glance you see:" print bash('ls') elif input == "look up": print "You look up above you and see:" print bash('ls ..') elif input == "look around": print "You take a good look around you, stopping to make note of what you see:" print bash('ls -a') elif input == "bash": bash_input = True while bash_input: bash_command = raw_input("$") if bash_command == "exit": bash_input = False else: print bash(bash_command) elif input == "exit": print "there is no escape" sys.exit() else: print "Please enter a valid command"
def run_and_get_return_code(command): command_result = bash(command) return_code = command_result.code return return_code
def get_files(commit_only=True, copy_dest=None): "Get copies of files for analysis." if commit_only: real_files = bash( "git diff --cached --name-status | " "grep -v -E '^D' | " "awk '{ print ( $(NF) ) }' " ).value().strip() else: real_files = bash( "git ls-tree --name-only --full-tree -r HEAD" ).value().strip() if real_files: return create_fake_copies(real_files.split('\n'), copy_dest) return []
def run_and_return_output(command, command_for_print=None, print_command=False, confirm_prompt=False, print_output=False): if command_for_print: print('\n> Running command:\n') print(command_for_print + '\n') if print_command: print('\n> Running command:\n') print(command + '\n') if confirm_prompt: if confirm('>> Do you want to continue?'): pass else: print('## Command did not run.') return result = bash(command) output = result.value() if print_output: print('\n## Output:') print('\n' + 44 * '-') print('') print(output) print('\n' + 44 * '-') return output
def __init__(self, bash_cmd): """Constructor. The constructor executes the given bash command and store result code and stdout/stderr inside the object. You can use this object like this:: bash_wrapper = BashWrapper("ls /tmp") if bash_wrapper: print("execution was ok (status_code == 0)") else: print("execution was not ok (status_code != 0)") status_code = bash_wrapper.code stdout_output = bash_wrapper.stdout stderr_output = bash_wrapper.stderr print("full representation with command/code/stdout/stderr: %s" % bash_wapper) Args: bash_cmd (string): complete bash command to execute. """ self.__bash_cmd = bash_cmd self.__bash_result_object = bash.bash(bash_cmd)
def test_sync_false_does_not_wait(self): t1 = datetime.now() b = bash('sleep 0.5; echo 1', sync=False) t2 = datetime.now() self.assertTrue((t2-t1).total_seconds() < 0.5) b.sync() self.assertEqual(b.stdout, b'1\n')
def pipe(): s = {} for p in PIPES: b = bash("{0} << {1}".format(p, FQ_FILE_NAME)) if b.code != 0: s[p] = b.stderr else: s[p] = b.stdout return s
def retrieve_commits(path, mode="stat"): """ Takes a file path string and a mode string and produces the git log for the specified directory. The default mode, "stat" retrieves the logs by running "git log --stat". Modes include: "basic" (git log), "raw" ("git log --raw"), and "stat" ("git log --stat"). **Parameters** : > *path* : `string` >> A string identifying the path to the target git repository. > *mode* : `string` >> A string identifying the git log mode to be retrieved. Default mode is "stat". **Return** : > Returns a large string containing the raw output from the repository's git log. """ print("Attempting local git log retrieval...") # Log command modes, referenced by "mode" input. log_commands = {"basic": "git log", "raw": "git log --raw", "stat":"git log --stat"} if mode not in log_commands.keys(): raise InputError("{} is not a valid retrieval mode.".format(mode)) # Save the current directory. Navigate to new directory. Retrieve logs. Return to original directory. work_dir = os.getcwd() os.chdir(path) raw_logs = sh.bash(log_commands[mode]).stdout.decode("utf-8", "ignore") os.chdir(work_dir) # If the retrieval was unsuccessful, raise an error. if len(raw_logs) == 0: print("Raising error.") if "true" in str(sh.bash("git rev-parse --is-inside-work-tree").stdout): raise RepositoryError("{} is not a Git repository.".format(path)) else: raise RepositoryError("{} has no commits.".format(path)) # If the retrieval was successful, print a summary." print("Got {} characters from: {}".format(len(raw_logs), path)) # Record the retrieval mode. raw_logs = "Mode =\n{}\n".format(mode) + raw_logs return raw_logs
def write_with_curl_from(self, url): from bash import bash command = "curl '{url}' -o {file_path}".format(url=url, file_path=self.file_path) print(command) bash_result = bash(command) print(bash_result)
def parseInput(input): """translates user input into (logical notation) - parse works recursively, applying functions and operators - variables in logical form are stored in local_vars - these variables are tied to an attribute list - a variable can represent an existing shape or a hypothetical shape """ # parse=bash("sh ../bitpar/parse '"+input+"'") # ouput: [.VP [.V draw][.NP [.D a][.N-bar [.N square]]]] bash("java -jar ../lambda/lambda-auto.jar ../lambda/input.txt > ../lambda/input.tex") fml=bash("make -C ../lambda input.fml") print fml cmd=`fml`.split('true ')[1] # TEST CASES # cmd="draw(Gy[red(y) & square(y)])" cmd="draw(\gamma y(red(y) & square(y)))." print cmd parse(cmd)
def test_bash_stderr(self): result = bash('./missing_command') self.assertEqual(result.stdout, b'') self.assertTrue(result.stderr in [ # Mac OSX b'/bin/sh: ./missing_command: No such file or directory\n', # Travis b'/bin/sh: 1: ./missing_command: not found\n' ]) self.assertEqual(result.code, 127)
def scan_for_requiredbranches_list(reponame, requiredbranches, WD): print(spacer) print("scanning requiredbranches_list") for branch in requiredbranches: branches = str( bash(f"cd {tempdir}{reponame}/{reponame}; git branch -a")) if branch in branches: statements = ["branch", branch, "pass"] else: statements = ["branch", branch, "fail"] printer(statements)
def follow_node_installation(self): """This function is intended to follow nodes installation""" user_name = config.get('credentials', 'STX_DEPLOY_USER_NAME') password = config.get('credentials', 'STX_DEPLOY_USER_PSWD') LOG.info('Node %s: Following node installation.', self.name) installation = pexpect.spawn( ('ipmitool -I lanplus -H {node_bmc_ip} ' '-U {node_bmc_user} -P {node_bmc_pswd} ' 'sol activate').format(node_bmc_ip=self.bmc_ip, node_bmc_user=self.bmc_user, node_bmc_pswd=self.bmc_pswd)) installation.logfile = open( '{}/iso_setup_installation.txt'.format(LOG_PATH), 'wb') installation.timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT')) installation.expect('Start PXE over IPv4.') LOG.info('Node %s: Trying to boot using PXE', self.name) installation.expect('Linux version') LOG.info('Node %s: Loading Linux Kernel', self.name) installation.expect('Welcome to') LOG.info('Node %s: CentOS have been loaded', self.name) installation.expect('Starting installer, one moment...') LOG.info('Node %s: Starting installer ...', self.name) installation.expect('Performing post-installation setup tasks') LOG.info('Node %s: Performing post-installation setup tasks', self.name) installation.expect('login:'******'Node %s: the system boot up correctly', self.name) LOG.info('Node %s: logging into the system', self.name) installation.sendline(user_name) installation.expect('Password:'******'Node %s: setting a new password', self.name) installation.expect('UNIX password:'******'New password:'******'Retype new password:'******'$') LOG.info('Node %s: the password was changed successfully', self.name) installation.close() LOG.info( 'Node %s: Closing SOL session after successfully ' 'installation', self.name) deactivate_sol = bash( ('ipmitool -I lanplus -H {node_bmc_ip} ' '-U {node_bmc_user} -P {node_bmc_pswd} ' 'sol deactivate').format(node_bmc_ip=self.bmc_ip, node_bmc_user=self.bmc_user, node_bmc_pswd=self.bmc_pswd)) if not deactivate_sol.stderr: LOG.info('Node %s: SOL session closed successfully', self.name)
def check_guest_kernel_bridge_result(): cmd = f""" ip -d link show br0 ifconfig br0 ifconfig eth1 ifconfig eth2 """ pts = bash("virsh ttyconsole gg").value() ret = my_tool.run_cmd_get_output(pts, cmd) log(ret) pass
def sriov_pci_passthrough_test(q_num, pkt_size, cont_time): clear_env() numa_node = bash("cat /sys/class/net/{}/device/numa_node".format( get_env("NIC1_VF"))).value() vcpu_list = [get_env("VCPU1"), get_env("VCPU2"), get_env("VCPU3")] if q_num != 1: vcpu_list = [ get_env("VCPU1"), get_env("VCPU2"), get_env("VCPU3"), get_env("VCPU4"), get_env("VCPU5") ] new_xml = "g1.xml" vcpupin_in_xml(numa_node, "guest.xml", new_xml, vcpu_list) #clear the old hostdev config and update xml file xml_tool.remove_item_from_xml(new_xml, "./devices/interface[@type='hostdev']") # Here because of the limit of p35 archtechture , I can not add two vf into vm at the same time # So , make a workaround , add vf with virsh attach-device two times one_queue_image_name = os.path.basename(get_env("ONE_QUEUE_IMAGE")) two_queue_image_name = os.path.basename(get_env("TWO_QUEUE_IMAGE")) if q_num == 1: xml_tool.update_image_source(new_xml, image_dir + "/" + one_queue_image_name) else: xml_tool.update_image_source(new_xml, image_dir + "/" + two_queue_image_name) start_guest(new_xml) #Here attach vf to vm attach_sriov_vf_to_vm(new_xml, "gg") configure_guest log("guest start testpmd test Now") if q_num == 1: guest_cpu_list = "0,1,2" else: guest_cpu_list = "0,1,2,3,4" guest_start_testpmd(q_num, guest_cpu_list, get_env("SRIOV_RXD_SIZE"), get_env("SRIOV_TXD_SIZE"), pkt_size, "mac") log("sriov pci passthrough PVP performance test Begin Now") bonding_test_trex(cont_time, pkt_size, "52:54:00:11:8f:ea", "52:54:00:11:8f:eb") check_guest_testpmd_result() return 0
def conf_checks(): proc_cmdline_info = local.path("/proc/cmdline").read() log(proc_cmdline_info) if not "intel_iommu=on" in proc_cmdline_info: log("Iommu Enablement" "Please enable IOMMU mode in your grub config") return 1 else: log("Check intel_iommu=on SUCCESS") if bash("tuned-adm active | grep cpu-partitioning").value() == '': log("Tuned-adm cpu-partitioning profile must be active") return 1 else: log("tuned-adm active OK") if bash(""" cat /proc/cmdline | grep "nohz_full=[0-9]" """).value() == '': log("Tuned Config Must set cores to isolate in tuned-adm profile") return 1 else: log("nohz_full flag check is OK") return 0
def network_connection_check(): log("*** Checking connection to people.redhat.com ***") ret = bash("ping -c 10 people.redhat.com") log(ret) if ret.code == 0: log("*** Connection to server succesful ***") return 0 else: log("People.redhat.com connection fail !!!!") log("Cannot connect to people.redhat.com, please verify internet connection !!!") return 1 return 0
def create_fake_copies(files, destination): """ Create copies of the given list of files in the destination given. Creates copies of the actual files to be committed using git show :<filename> Return a list of destination files. """ dest_files = [] for filename in files: leaf_dest_folder = os.path.join(destination, os.path.dirname(filename)) if not os.path.exists(leaf_dest_folder): os.makedirs(leaf_dest_folder) dest_file = os.path.join(destination, filename) bash("git show :{filename} > {dest_file}".format( filename=filename, dest_file=dest_file) ) dest_files.append(os.path.realpath(dest_file)) return dest_files
def configure_network_interfaces(): """Configure network interfaces This function configure the following network interfaces virbr[1-4] """ networks = ['virbr1 10.10.10.1/24', 'virbr2 192.168.204.1/24', 'virbr3', 'virbr4'] for net in networks: eval_cmd = bash('sudo ifconfig {} up'.format(net)) if 'ERROR' in eval_cmd.stderr: LOG.error(eval_cmd.stderr) raise EnvironmentError(eval_cmd.stderr) # setting the ip tables iptables = ('sudo iptables -t nat -A POSTROUTING -s 10.10.10.0/24 -j ' 'MASQUERADE') eval_cmd = bash(iptables) if 'ERROR' in eval_cmd.stderr: LOG.error(eval_cmd.stderr) raise EnvironmentError(eval_cmd.stderr)
def make_systemd_service(name, description, service_user, start_command, service_type='simple', restart_on_fail=True, target='multi-user'): if restart_on_fail: restart_block = "Restart=on-failure" service_str = base_str.format(**locals()) path = f"/etc/systemd/system/{name}.service" assert not os.path.exists( path), f"Service at {path} already exists, won't overwrite." with open(path, 'w') as ff: ff.write(service_str) print("enabling service") enable_cmd = bash(f'sudo systemctl enable {name}') print(enable_cmd.stdout) print(enable_cmd.stderr) print("starting service") enable_cmd = bash(f'sudo systemctl start {name}') print(enable_cmd.stdout) print(enable_cmd.stderr) print("statusing service") enable_cmd = bash(f'sudo systemctl status {name}') print(enable_cmd.stdout) print(enable_cmd.stderr)
def run_in_container(cmd): """Run inside the container a command :param cmd: the cmd that will be run inside the container :return - _c.stdout: which is the cmd's stdout - _c.stderr which is the cmd's stderr """ _c = bash('docker exec --interactive --user={0} -e MYUNAME={0} {1} script ' '-q -c "{2}" /dev/null'.format( CURRENT_USER, TC_CONTAINER_NAME, cmd)) return _c.stdout, _c.stderr
def test_encrypt__specific_file(self): self.root_dir.join('a.yml').write( textwrap.dedent(''' a: b: true c: 12 ''').strip()) bash('git init') self.root_dir.join('.gitignore').write('.szczypiorek_encryption_key\n' 'a.yml\n') result = self.runner.invoke( cli, ['encrypt', str(self.root_dir.join('a.yml'))]) assert result.exit_code == 0 assert sorted(self.root_dir.listdir()) == [ str(self.root_dir.join('.git')), str(self.root_dir.join('.gitignore')), str(self.root_dir.join('.szczypiorek_encryption_key')), str(self.root_dir.join('a.szczyp')), str(self.root_dir.join('a.yml')), ]
def write_from_file(self, file_path, header=None): from bash import bash command = 'gsutil {header} cp {file_path} {gcs_url}'.format( header=header, file_path=file_path, gcs_url=self.url ) print(command) bash_result = bash(command) print(bash_result)
def gen_pdm_and_pack_ref(input_file_name, output_file_name): # Just grab the wav sample rate _data, input_rate = sf.read(input_file_name, frames=1) cmd = f"soxi -r {input_file_name}" input_rate = float(bash(cmd).stdout) cmd = f"soxi -D {input_file_name}" len_s = float(bash(cmd).stdout) # turn ref into raw 2ch 32b PCM files for later print("Extracting reference signal..") reference_file = "reference.raw" cmd = f"sox {input_file_name} -b 32 -e signed-integer {reference_file} remix 3 4" bash(cmd) # Generate PDM by upsampling then modulating, 1 channel at a time to avoid large TMP files pdm_files = [] for channel in [1, 2]: print(f"Upsampling mic channel {channel}..") # assert len_s <= 359.0, f"Sox limitation on 2^30 samples which is 349s 3072000MHz mono wav (len: {len_s})" # pcm_file = f"pcm_upsampled_ch_{channel}.wav" # cmd = f"sox {input_file_name} -r {pdm_rate} {pcm_file} remix {channel}" pcm_file = f"pcm_upsampled_ch_{channel}.raw" cmd = f"sox {input_file_name} -r {pdm_rate} -t s32 {pcm_file} remix {channel}" bash(cmd) print(f"Converting mic channel {channel} to PDM..") pdm_file = f"pcm_ch_{channel}.pdm" pcm2pdm(pcm_file, pdm_file) pdm_files.append(pdm_file) if delete_temp_files: os.remove(pcm_file) print("Packing PCM and PDM files to raw..") output_raw_file = "boggled.raw" pack_pdm_and_ref(input_rate, pdm_files, reference_file, output_raw_file) for pdm_file in pdm_files: if delete_temp_files: os.remove(pdm_file) if delete_temp_files: os.remove(reference_file) out_chans = 26 if input_rate == 16000 else 10 print("Converting raw to wav..") cmd = f"sox -b 32 -e signed-integer -c {out_chans} -r {input_rate} {output_raw_file} {output_file_name}" bash(cmd) if delete_temp_files: os.remove(output_raw_file)
def create_usr(usr_dic): #! 用户名重复验证 new_id = input('请输入用户名') while new_id in usr_dic.keys(): print("用户名重复") new_id = input("请输入用户名") #! 密码加密 new_pwd = input("请输入密码") new_md5 = hashlib.md5() new_md5.update(new_pwd.encode('utf-8')) md_pwd = new_md5.hexdigest() #! 用户名和密码存储 new_usr = new_id + " " + md_pwd +'\n' with open(r'../DB/user', 'a', encoding='utf-8', newline=None) as f: f.write(new_usr) print("注册成功!") #! 创建用户根目录 cmd = 'mkdir ../user_dir/' + new_id bash.bash(cmd) print("创建用户目录")
def delete_network_interfaces(): """Delete network interfaces This function performs a clean up for the following network interfaces stxbr[1-4] """ # elevate module re-launches the current process with root/admin privileges # using one of the following mechanisms : sudo (Linux, macOS) # becoming in root elevate(graphical=False) ifdata = NetIfs() # Destroy NAT network if exist try: bash('sudo virsh net-destroy {}'.format('stx-nat')) bash('sudo virsh net-undefine {}'.format('stx-nat')) except IOError: LOG.warning('NAT network not found') for interface in range(1, 5): net_if = 'stxbr{}'.format(interface) if net_if in ifdata.interfaces: if ifdata.is_up(net_if) or \ ifdata.is_running(net_if): # the network interface is up or running try: # down and delete the network interface ifconfig.Interface(net_if).down() brctl.Bridge(net_if).delete() except IOError: LOG.warning('[Errno 19] No such device: ' '%s', net_if)
def ovs_kernel_datapath_test(q_num, pkt_size, cont_time): clear_env() nic1_name = get_env("NIC1") nic2_name = get_env("NIC2") numa_node = bash( f"cat /sys/class/net/{nic1_name}/device/numa_node").value() if q_num == 1: vcpu_list = [get_env("VCPU1"), get_env("VCPU2"), get_env("VCPU3")] ovs_bridge_with_kernel(nic1_name, nic2_name) else: vcpu_list = [ get_env("VCPU1"), get_env("VCPU2"), get_env("VCPU3"), get_env("VCPU4"), get_env("VCPU5") ] ovs_bridge_with_kernel(nic1_name, nic2_name) pass new_xml = "g1.xml" vcpupin_in_xml(numa_node, "guest.xml", new_xml, vcpu_list) update_xml_vnet_port(new_xml) one_queue_image_name = os.path.basename(get_env("ONE_QUEUE_IMAGE")) two_queue_image_name = os.path.basename(get_env("TWO_QUEUE_IMAGE")) if q_num == 1: xml_tool.update_image_source(new_xml, image_dir + "/" + one_queue_image_name) else: xml_tool.update_image_source(new_xml, image_dir + "/" + two_queue_image_name) start_guest(new_xml) configure_guest() guest_start_kernel_bridge() log("ovs kernel datapath PVP performance test Begin Now") trex_port_1 = get_env("TRAFFICGEN_TREX_PORT1") trex_port_2 = get_env("TRAFFICGEN_TREX_PORT2") bonding_test_trex(cont_time, pkt_size, trex_port_1, trex_port_2) check_guest_kernel_bridge_result() return 0
def configure_guest(): cmd = """ stty rows 24 cols 120 nmcli dev set eth1 managed no nmcli dev set eth2 managed no systemctl stop firewalld iptables -t filter -P INPUT ACCEPT iptables -t filter -P FORWARD ACCEPT iptables -t filter -P OUTPUT ACCEPT iptables -t mangle -P PREROUTING ACCEPT iptables -t mangle -P INPUT ACCEPT iptables -t mangle -P FORWARD ACCEPT iptables -t mangle -P OUTPUT ACCEPT iptables -t mangle -P POSTROUTING ACCEPT iptables -t nat -P PREROUTING ACCEPT iptables -t nat -P INPUT ACCEPT iptables -t nat -P OUTPUT ACCEPT iptables -t nat -P POSTROUTING ACCEPT iptables -t filter -F iptables -t filter -X iptables -t mangle -F iptables -t mangle -X iptables -t nat -F iptables -t nat -X ip6tables -t filter -P INPUT ACCEPT ip6tables -t filter -P FORWARD ACCEPT ip6tables -t filter -P OUTPUT ACCEPT ip6tables -t mangle -P PREROUTING ACCEPT ip6tables -t mangle -P INPUT ACCEPT ip6tables -t mangle -P FORWARD ACCEPT ip6tables -t mangle -P OUTPUT ACCEPT ip6tables -t mangle -P POSTROUTING ACCEPT ip6tables -t nat -P PREROUTING ACCEPT ip6tables -t nat -P INPUT ACCEPT ip6tables -t nat -P OUTPUT ACCEPT ip6tables -t nat -P POSTROUTING ACCEPT ip6tables -t filter -F ip6tables -t filter -X ip6tables -t mangle -F ip6tables -t mangle -X ip6tables -t nat -F ip6tables -t nat -X ip -d addr show """ pts = bash("virsh ttyconsole gg").value() ret = my_tool.run_cmd_get_output(pts, cmd) log(ret) return 0
def rpm_check(): log("*** Checking for installed RPMS ***") if bash("rpm -qa | grep ^openvswitch").value() == "": log("Openvswitch rpm" "Please install Openvswitch rpm") return 1 else: log("Openvswitch rpm check OK") if bash("rpm -qa | grep dpdk-tools").value() == "": log("Please install dpdk tools rpm ") return 1 else: log("dpdk tools check OK ") if bash("rpm -qa | grep dpdk-[0-9]").value() == "": log("Please install dpdk package rpm ") return 1 else: log("dpdk package check OK") log("Please make sure qemu-kvm qemu-kvm-tools version >= 2.12 !!!!") log("Please make sure qemu-kvm qemu-kvm-tools version >= 2.12 !!!!") log("Please make sure qemu-kvm qemu-kvm-tools version >= 2.12 !!!!") if system_version_id < 80: if bash("rpm -qa | grep qemu-kvm-tools").value() == "": log("Please install qemu-kvm-tools rpm ") return 1 else: log("qemu-kvm-tools check OK") else: if bash("rpm -qa | grep kernel-tools").value() == "": log("Please install kernel-tools rpm ") return 1 else: log("kernel-tools check OK") if bash("rpm -qa | grep qemu-img").value() == "": log("Please install qemu-img rpm ") return 1 else: log("qemu-img package check OK") if bash("rpm -qa | grep qemu-kvm").value() == "": log("Please install qemu-kmv rpm ") return 1 else: log("qemu-kvm package check OK") return 0
def bonding_test_trex(t_time,pkt_size): trex_server_ip = get_env("TRAFFICGEN_TREX_HOST_IP_ADDR") with pushd(case_path): ret = bash(f"ping {trex_server_ip} -c 3") if ret.code != 0: log("Trex server {} not up please check ".format(trex_server_ip)) trex_url = "http://netqe-bj.usersys.redhat.com/share/wanghekai/v2.49.tar.gz" trex_dir = os.path.basename(trex_url).replace(".tar.gz","") trex_name = os.path.basename(trex_url) if not os.path.exists(trex_dir): run(f"wget {trex_url} > /dev/null 2>&1") run(f"tar -xvf {trex_name} > /dev/null 2>&1") log(f"python ./trex_sport.py -c {trex_server_ip} -t {t_time} --pkt_size={pkt_size} -m 10") run(f"python ./trex_sport.py - c {trex_server_ip} - t {t_time} - -pkt_size={pkt_size} -m 10") pass
def assert_is_git_ignored(filepath): if not filepath: return True result = bash(f'git check-ignore {filepath}') if result.stdout: return True else: raise FileNotIgnoredError(f""" Well it seems that the '{filepath}' is not git ignored. Since it appears in the context there's a big chance that it contains some sensitive data. Please add it to the '.gitignore' and stop tracking it. """)
def filter_python_files(files): "Get all python files from the list of files." py_files = [] for f in files: # If we end in .py, or if we don't have an extension and file says that # we are a python script, then add us to the list extension = os.path.splitext(f)[-1] if extension: if extension == '.py': py_files.append(f) elif 'python' in open(f, 'r').readline(): py_files.append(f) elif 'python script' in bash('file {}'.format(f)).value().lower(): py_files.append(f) return py_files
def guest_start_testpmd(queue_num, cpu_list, rxd_size, txd_size): dpdk_ver = get_env("dpdk_ver") cmd = f""" /root/one_gig_hugepages.sh 1 rpm -ivh /root/{dpdk_ver}/dpdk*.rpm modprobe -r vfio_iommu_type1 modprobe -r vfio modprobe vfio modprobe vfio-pci ip link set eth1 down ip link set eth2 down dpdk-devbind -b vfio-pci 0000:03:00.0 dpdk-devbind -b vfio-pci 0000:04:00.0 dpdk-devbind --status """ pts = bash("virsh ttyconsole gg").value() my_tool.run_cmd_get_output(pts, cmd) num_core = 2 if queue_num == 1: num_core = 2 else: num_core = 4 hw_vlan_flag = "--disable-hw-vlan" legacy_mem = "" cmd_test = f"""testpmd -l {cpu_list} \ --socket-mem 1024 \ {legacy_mem} \ -n 4 \ -- \ --forward-mode=io \ --port-topology=pair \ {hw_vlan_flag} \ --disable-rss \ -i \ --rxq={queue_num} \ --txq={queue_num} \ --rxd={rxd_size} \ --txd={txd_size} \ --nb-cores={num_core} \ --auto-start" """ my_tool.run_cmd_get_output(pts,cmd_test,"testpmd>") pass
def delete_slice(self, **kwargs): # Extract parameters from keyword arguments s_id = kwargs.get('s_id', None) # Get the client's MAC address s_mac = self.s_ids[s_id]["mac"] # Get the slice number i_sln = self.s_ids[s_id]["slice"]["number"] # Remove host from the DHCP subnet self.omapi.del_host(s_mac) # Set the slice in question bash("sdrctl dev {0} set slice_idx {1}".format(self.sdr_dev, i_sln)) # Try to clear the slice cls = bash("sdrctl dev {0} set addr {1}".format( self.sdr_dev, "00000000")).code # If the last command failed if cls: return False, "Could not remove MAC from slice #" + str(i_lsn) # Set the default slice configuration s = bash("sdrctl dev {0} set slice_start {1}".format(self.sdr_dev, 0)).code e = bash("sdrctl dev {0} set slice_end {1}".format( self.sdr_dev, 49999)).code t = bash("sdrctl dev {0} set slice_total {1}".format( self.sdr_dev, 50000)).code # Sync all commands sync = bash("sdrctl dev {0} set slice_idx 4".format(self.sdr_dev)).code # If any of the precious commands failed if any([s, e, t, sync]): return False, "Failed reverting slice to default parameters." # Iterate over the slice slice for i, x in enumerate(self.ran_slice_list): # If matching the slice number if x["index"] == i_sln: # Toggle flag self.ran_slice_list[i]['available'] = True # Return state return True, {"s_id": s_id}
def sriov_pci_passthrough_test(q_num,pkt_size,cont_time): clear_env() numa_node = bash("cat /sys/class/net/{}/device/numa_node".format(get_env("NIC1_VF"))).value() vcpu_list = [ get_env("VCPU1"),get_env("VCPU2"),get_env("VCPU3")] if q_num != 1: vcpu_list = [ get_env("VCPU1"),get_env("VCPU2"),get_env("VCPU3"),get_env("VCPU4"),get_env("VCPU5")] new_xml = "g1.xml" vcpupin_in_xml(numa_node,"guest.xml",new_xml,vcpu_list) update_xml_sriov_vf_port(new_xml,0) if q_num == 1: xml_tool.update_image_source(new_xml,case_path + "/" + get_env("one_queue_image")) else: xml_tool.update_image_source(new_xml,case_path + "/" + get_env("two_queue_image")) start_guest(new_xml) configure_guest guest_start_testpmd(q_num,vcpu_list,get_env("SRIOV_RXD_SIZE"),get_env("SRIOV_TXD_SIZE")) bonding_test_trex(cont_time,pkt_size) pass
def bonding_test_trex(t_time, pkt_size, dst_mac_one, dst_mac_two): trex_server_ip = get_env("TRAFFICGEN_TREX_HOST_IP_ADDR") with pushd(case_path): ret = bash(f"ping {trex_server_ip} -c 3") if ret.code != 0: log("Trex server {} not up please check ".format(trex_server_ip)) trex_url = get_env("TREX_URL") trex_dir = os.path.basename(trex_url).replace(".tar.gz", "") trex_name = os.path.basename(trex_url) if not os.path.exists(trex_dir): cmd = f""" wget {trex_url} > /dev/null 2>&1 tar -xvf {trex_name} > /dev/null 2>&1 """ log_and_run(cmd) import time time.sleep(3) log_and_run( f""" python ./trex_sport.py -c {trex_server_ip} -d '{dst_mac_one} {dst_mac_two}' -t {t_time} --pkt_size={pkt_size} -m 10 """ ) return 0
def process(event): """Process the event type :param event: this param has the following attributes event.event_type 'modified' | 'created' | 'moved' | 'deleted' event.is_directory True | False event.src_path path/to/observed/file """ # the file will be processed there # print event.src_path, event.event_type # print now only for debug last_line = bash('tail -2 {}'.format(event.src_path)) if 'LAST_CONSOLE_LINE' not in os.environ: os.environ['LAST_CONSOLE_LINE'] = last_line.stdout print('{}'.format(last_line.stdout)) elif os.environ.get('LAST_CONSOLE_LINE') != last_line.stdout: os.environ['LAST_CONSOLE_LINE'] = last_line.stdout print('{}'.format(last_line.stdout))
def config_controller(config_file): """Configures master controller using its corresponding init file""" config_controller_timeout = int( config.get('iso_installer', 'CONFIG_CONTROLLER_TIMEOUT')) nodes_file = os.path.join(os.environ['PYTHONPATH'], 'baremetal', 'baremetal_setup.yaml') nodes = yaml.safe_load(open(nodes_file)) controller_0 = nodes['nodes']['controller-0'] master_controller = Node(controller_0) serial_cmd = ('ipmitool -I lanplus -H {node_bmc_ip} -U {node_bmc_user} ' '-P {node_bmc_pswd} sol activate'.format( node_bmc_ip=master_controller.bmc_ip, node_bmc_user=master_controller.bmc_user, node_bmc_pswd=master_controller.bmc_pswd)) configuring_controller = pexpect.spawn(serial_cmd) configuring_controller.logfile = open( '{}/iso_setup_installation.txt'.format(LOG_PATH), 'wb') configuring_controller.sendline('\r') configuring_controller.expect(PROMPT) LOG.info('Applying configuration (this will take several minutes)') configuring_controller.sendline( 'sudo config_controller --force --config-file {}'.format(config_file)) configuring_controller.timeout = config_controller_timeout configuring_controller.expect('Configuration was applied') LOG.info(configuring_controller.before) configuring_controller.logfile.close() LOG.info('Closing the log') configuring_controller.close() closing_serial_connection = (bash( 'ipmitool -I lanplus -H {node_bmc_ip} -U {node_bmc_user} ' '-P {node_bmc_pswd} sol deactivate'.format( node_bmc_ip=master_controller.bmc_ip, node_bmc_user=master_controller.bmc_user, node_bmc_pswd=master_controller.bmc_pswd))) if closing_serial_connection.stderr: LOG.info(closing_serial_connection.stderr)
def guest_start_kernel_bridge(): cmd = f""" brctl addbr br0 ip addr add 192.168.1.2/24 dev eth1 ip link set dev eth1 up brctl addif br0 eth1 ip addr add 192.168.1.3/24 dev eth2 ip link set dev eth2 up brctl addif br0 eth2 ip addr add 1.1.1.5/16 dev br0 ip link set dev br0 up # arp -s 1.1.1.10 3c:fd:fe:ad:bc:e8 # arp -s 1.1.2.10 3c:fd:fe:ad:bc:e9 sysctl -w net.ipv4.ip_forward=1 yum install -y tuna tuned-adm profile network-latency sysctl -w net.ipv4.conf.all.rp_filter=0 sysctl -w net.ipv4.conf.eth0.rp_filter=0 """ pts = bash("virsh ttyconsole gg").value() ret = my_tool.run_cmd_get_output(pts, cmd) log(ret) pass
def rpm_check(): log("*** Checking for installed RPMS ***") if bash("rpm -qa | grep ^openvswitch-[0-9]").value() == "": log("Openvswitch rpm" "Please install Openvswitch rpm") return 1 else: log("Openvswitch rpm check OK") if bash("rpm -qa | grep dpdk-tools").value() == "": log("Please install dpdk tools rpm ") return 1 else: log("dpdk tools check OK ") if bash("rpm -qa | grep dpdk-[0-9]").value() == "": log("Please install dpdk package rpm ") return 1 else: log("dpdk package check OK") if bash("rpm -qa | grep qemu-kvm-tools").value() == "": log("Please install qemu-kvm-tools rpm ") return 1 else: log("qemu-kvm-tools check OK") if bash("rpm -qa | grep qemu-img").value() == "": log("Please install qemu-img rpm ") return 1 else: log("qemu-img package check OK") if bash("rpm -qa | grep qemu-kvm").value() == "": log("Please install qemu-kmv rpm ") return 1 else: log("qemu-kvm package check OK") return 0
def test_bash_concatenation_by_method(self): result = bash('ls .').bash('grep "tests.py$"').value() self.assertEqual(result, 'tests.py')
def test_output_to_stdout(self): b = bash('ls .', stdout=None) self.assertEqual(str(b), '') # Shouldn't find anything because we haven't piped it. self.assertEqual(str(b.bash('grep setup')), '')
def test_passing_env(self): result = bash('echo $NAME', env={'NAME': 'Fred'}) self.assertEqual(result.stdout, b'Fred\n')
def notify(pipe_status): body = " \n" + pipe_status for e in ADMIN_EMAILS: bash("echo '{0}' | mail -s {1} -a {2} {3}".format(body, SUBJECT_LINE, FQ_FILE_NAME, e))
def test_bash_stdout(self): result = bash('ls . | grep "tests.py$"') self.assertEqual(result.stdout, b'tests.py\n') self.assertEqual(result.code, 0)
def test_bash_repr(self): result = bash('ls . | grep "tests.py$"') self.assertEqual(repr(result), 'tests.py')
def test_bash_concatenation_within_command(self): result = bash('ls . | grep "tests.py$"').value() self.assertEqual(result, 'tests.py')
def generate_ssh_key(): key_location = settings.SSH_KEY_LOCATION bash("ssh-keygen -t rsa -N '' -q -f %s/id_rsa" % key_location) bash( "ssh-keygen -f %s/id_rsa -e -m pem > %s/id_rsa.pub.pem" % (key_location, key_location))
print "Line to be uploaded Next: " + str(lineToBeUploadedNext) ################# ## Phant Stuff ## ################# server = "54.86.132.254" # This is the IP of data.sparkfun.com. Using IP instead of hostname may cause trouble in future if data.sparkfun.com changes their IP publicKey = "G2J96qjGw6INyx9vadov" privateKey = "NWPjb4elJbI1YV805rM0" #Calls the script which resets the GSM Module. def resetGSMModule(): print "Resetting GSM Module" log.write("Resetting GSM Module\n") bash("sudo python /home/pi/weatherStation/resetGSM.py pl2303") #Turns off the GSM Module and starts it back again def restartGSMModule(): print "(Presumably) Shutting off GSM Module" ser = serial.Serial('/dev/ttyAMA0', 9600, timeout = 1) ser.flush() ser.flushInput() ser.flushInput() ser.flushInput() #I feel there is need of more flushing as sometimes th$ ser.flushOutput() time.sleep(1) bash("/home/pi/weatherStation/startGSMModule.py") sleep(0.1) resp = ser.read(200)
from mfutil.net import get_simple_hostname, get_full_hostname, \ get_domainname, get_real_ip except ImportError: print("mfutil not found", file=sys.stderr) sys.exit(1) def profile_error(message): print("ERROR: %s" % message, file=sys.stderr) print("export PROFILE_ERROR=1") sys.exit(1) # cpu cmd = "lscpu --parse=SOCKET |grep -v '^#' |sort |uniq |wc -l" physical_processors_result = bash.bash(cmd) if physical_processors_result.code != 0: profile_error("can't execute %s" % cmd) physical_processors = int(physical_processors_result.stdout) cmd = "lscpu --parse=CORE |grep -v '^#' |sort |uniq |wc -l" physical_cores_result = bash.bash(cmd) if physical_cores_result.code != 0: profile_error("can't execute %s" % cmd) physical_cores = int(physical_cores_result.stdout) print("export MFCOM_HARDWARE_NUMBER_OF_PHYSICAL_PROCESSORS=%i" % physical_processors) print("export MFCOM_HARDWARE_NUMBER_OF_CPU_CORES=%i" % physical_cores) print("export MFCOM_HARDWARE_NUMBER_OF_CPU_CORES_PLUS_1=%i" % max(physical_cores / 2, 1)) print("export MFCOM_HARDWARE_NUMBER_OF_CPU_CORES_PLUS_1=%i" %
def __init__(self, bash_cmd): self.__bash_cmd = bash_cmd self.__bash_result_object = bash.bash(bash_cmd)
print("ERROR : cookiecutter result is not a valid directory") parser.exit(1) if MFMODULE == "MFDATA": # FIXME: why chmod +x *.py ??? get_bash_output_or_die("cd %s && chmod +x *.py && " "remove_empty.sh" % args.plugin) else: get_bash_output_or_die("cd %s && remove_empty.sh" % args.plugin) get_bash_output_or_die("cd %s && bootstrap_plugin.post" % args.plugin) if args.make: print("Make plugin on directory %s" % args.plugin) os.chdir("%s" % args.plugin) b = bash("make release") print("%s" % b.stdout) if b.code == 0: if args.install: for fic in glob.glob("*.plugin"): print("Installing plugin %s" % fic) b = bash("plugins.install %s" % fic) print("%s" % b.stdout) if b.code != 0: print("Plugin directory %s successfully created" % res) print("but PLUGIN INSTALL ERROR") print("%s" % b.stderr) sys.exit(1) if args.delete:
def resetGSMModule(): return print "Resetting GSM Module" log.write("Resetting GSM Module\n") bash("sudo python /home/pi/weatherStation/resetGSM.py pl2303")